Merge "Write 64-bit address in DWARF if we are on 64-bit architecture."
diff --git a/Android.mk b/Android.mk
index 9360355..3467f1d 100644
--- a/Android.mk
+++ b/Android.mk
@@ -405,8 +405,8 @@
adb root
adb wait-for-device shell stop
adb shell rm -rf $(ART_TARGET_DALVIK_CACHE_DIR)/*
- adb shell setprop dalvik.vm.dex2oat-filter ""
- adb shell setprop dalvik.vm.image-dex2oat-filter ""
+ adb shell setprop dalvik.vm.dex2oat-filter \"\"
+ adb shell setprop dalvik.vm.image-dex2oat-filter \"\"
adb shell setprop persist.sys.dalvik.vm.lib.2 libart.so
adb shell start
@@ -415,18 +415,18 @@
adb root
adb wait-for-device shell stop
adb shell rm -rf $(ART_TARGET_DALVIK_CACHE_DIR)/*
- adb shell setprop dalvik.vm.dex2oat-filter ""
- adb shell setprop dalvik.vm.image-dex2oat-filter ""
+ adb shell setprop dalvik.vm.dex2oat-filter \"\"
+ adb shell setprop dalvik.vm.image-dex2oat-filter \"\"
adb shell setprop persist.sys.dalvik.vm.lib.2 libartd.so
adb shell start
-.PHONY: use-art-smart
-use-art-smart:
+.PHONY: use-art-verify-at-runtime
+use-art-verify-at-runtime:
adb root
adb wait-for-device shell stop
adb shell rm -rf $(ART_TARGET_DALVIK_CACHE_DIR)/*
- adb shell setprop dalvik.vm.dex2oat-filter "interpret-only"
- adb shell setprop dalvik.vm.image-dex2oat-filter ""
+ adb shell setprop dalvik.vm.dex2oat-filter "verify-at-runtime"
+ adb shell setprop dalvik.vm.image-dex2oat-filter "verify-at-runtime"
adb shell setprop persist.sys.dalvik.vm.lib.2 libart.so
adb shell start
diff --git a/build/Android.common_build.mk b/build/Android.common_build.mk
index c60e75b..3e427a3 100644
--- a/build/Android.common_build.mk
+++ b/build/Android.common_build.mk
@@ -83,19 +83,10 @@
else
ART_TARGET_CLANG := false
endif
-
-ifeq ($(TARGET_ARCH)|$(ART_TARGET_CLANG),mips|true)
- # b/18807290, Clang generated mips assembly code for array.cc
- # cannot be compiled by gas.
- # b/18789639, Clang assembler cannot compile inlined assembly code in
- # valgrind_malloc_space-inl.h:192:5: error: used $at without ".set noat"
- $(warning Clang is disabled for the mips target)
-endif
ART_TARGET_CLANG_arm :=
ART_TARGET_CLANG_arm64 :=
-# TODO: Enable clang mips when b/18807290 and b/18789639 are fixed.
-ART_TARGET_CLANG_mips := false
-ART_TARGET_CLANG_mips64 := false
+ART_TARGET_CLANG_mips :=
+ART_TARGET_CLANG_mips64 :=
ART_TARGET_CLANG_x86 :=
ART_TARGET_CLANG_x86_64 :=
@@ -119,10 +110,6 @@
ART_TARGET_CLANG_CFLAGS_arm64 += \
-DNVALGRIND
-# FIXME: upstream LLVM has a vectorizer bug that needs to be fixed
-ART_TARGET_CLANG_CFLAGS_arm64 += \
- -fno-vectorize
-
# Warn about thread safety violations with clang.
art_clang_cflags := -Wthread-safety
@@ -189,6 +176,7 @@
ART_C_INCLUDES := \
external/gtest/include \
+ external/icu/icu4c/source/common \
external/valgrind/main/include \
external/valgrind/main \
external/vixl/src \
diff --git a/build/Android.common_path.mk b/build/Android.common_path.mk
index e0c0b0c..2d6b6a3 100644
--- a/build/Android.common_path.mk
+++ b/build/Android.common_path.mk
@@ -80,7 +80,7 @@
TARGET_CORE_IMG_LOCATION := $(ART_TARGET_TEST_OUT)/core.art
# Jar files for core.art.
-TARGET_CORE_JARS := core-libart conscrypt okhttp core-junit bouncycastle
+TARGET_CORE_JARS := core-libart conscrypt okhttp bouncycastle
HOST_CORE_JARS := $(addsuffix -hostdex,$(TARGET_CORE_JARS))
HOST_CORE_DEX_LOCATIONS := $(foreach jar,$(HOST_CORE_JARS), $(HOST_OUT_JAVA_LIBRARIES)/$(jar).jar)
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 8c61871..bfc8956 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -26,9 +26,11 @@
AllFields \
ExceptionHandle \
GetMethodSignature \
+ Instrumentation \
Interfaces \
Main \
MultiDex \
+ MultiDexModifiedSecondary \
MyClass \
MyClassNatives \
Nested \
@@ -61,12 +63,13 @@
# Dex file dependencies for each gtest.
ART_GTEST_class_linker_test_DEX_DEPS := Interfaces MultiDex MyClass Nested Statics StaticsFromCode
-ART_GTEST_compiler_driver_test_DEX_DEPS := AbstractMethod
+ART_GTEST_compiler_driver_test_DEX_DEPS := AbstractMethod StaticLeafMethods
ART_GTEST_dex_file_test_DEX_DEPS := GetMethodSignature Main Nested
ART_GTEST_exception_test_DEX_DEPS := ExceptionHandle
+ART_GTEST_instrumentation_test_DEX_DEPS := Instrumentation
ART_GTEST_jni_compiler_test_DEX_DEPS := MyClassNatives
ART_GTEST_jni_internal_test_DEX_DEPS := AllFields StaticLeafMethods
-ART_GTEST_oat_file_assistant_test_DEX_DEPS := Main MainStripped MultiDex Nested
+ART_GTEST_oat_file_assistant_test_DEX_DEPS := Main MainStripped MultiDex MultiDexModifiedSecondary Nested
ART_GTEST_oat_file_test_DEX_DEPS := Main MultiDex
ART_GTEST_object_test_DEX_DEPS := ProtoCompare ProtoCompare2 StaticsFromCode XandY
ART_GTEST_proxy_test_DEX_DEPS := Interfaces
@@ -157,8 +160,10 @@
runtime/handle_scope_test.cc \
runtime/indenter_test.cc \
runtime/indirect_reference_table_test.cc \
+ runtime/instrumentation_test.cc \
runtime/intern_table_test.cc \
runtime/interpreter/safe_math_test.cc \
+ runtime/interpreter/unstarted_runtime_test.cc \
runtime/java_vm_ext_test.cc \
runtime/jit/jit_code_cache_test.cc \
runtime/leb128_test.cc \
@@ -191,6 +196,7 @@
compiler/dex/mir_graph_test.cc \
compiler/dex/mir_optimization_test.cc \
compiler/dex/quick/quick_cfi_test.cc \
+ compiler/dex/type_inference_test.cc \
compiler/dwarf/dwarf_test.cc \
compiler/driver/compiler_driver_test.cc \
compiler/elf_writer_test.cc \
@@ -227,6 +233,7 @@
compiler/utils/arena_allocator_test.cc \
compiler/utils/dedupe_set_test.cc \
compiler/utils/swap_space_test.cc \
+ compiler/utils/test_dex_file_builder_test.cc \
compiler/utils/arm/managed_register_arm_test.cc \
compiler/utils/arm64/managed_register_arm64_test.cc \
compiler/utils/x86/managed_register_x86_test.cc \
@@ -242,6 +249,7 @@
COMPILER_GTEST_HOST_SRC_FILES := \
$(COMPILER_GTEST_COMMON_SRC_FILES) \
+ compiler/dex/quick/x86/quick_assemble_x86_test.cc \
compiler/utils/arm/assembler_arm32_test.cc \
compiler/utils/arm/assembler_thumb2_test.cc \
compiler/utils/assembler_thumb_test.cc \
@@ -410,7 +418,7 @@
LOCAL_CPP_EXTENSION := $$(ART_CPP_EXTENSION)
LOCAL_SRC_FILES := $$(art_gtest_filename)
LOCAL_C_INCLUDES += $$(ART_C_INCLUDES) art/runtime $$(art_gtest_extra_c_includes)
- LOCAL_SHARED_LIBRARIES += libartd $$(art_gtest_extra_shared_libraries) libart-gtest libart-disassembler
+ LOCAL_SHARED_LIBRARIES += libartd $$(art_gtest_extra_shared_libraries) libart-gtest libartd-disassembler
LOCAL_WHOLE_STATIC_LIBRARIES += libsigchain
LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common_build.mk
diff --git a/cmdline/cmdline_parser_test.cc b/cmdline/cmdline_parser_test.cc
index 9f873b3..1386439 100644
--- a/cmdline/cmdline_parser_test.cc
+++ b/cmdline/cmdline_parser_test.cc
@@ -23,7 +23,7 @@
#include "gtest/gtest.h"
#define EXPECT_NULL(expected) EXPECT_EQ(reinterpret_cast<const void*>(expected), \
- reinterpret_cast<void*>(NULL));
+ reinterpret_cast<void*>(nullptr));
namespace art {
bool UsuallyEquals(double expected, double actual);
diff --git a/cmdline/cmdline_types.h b/cmdline/cmdline_types.h
index 03165ed..e02fe4b 100644
--- a/cmdline/cmdline_types.h
+++ b/cmdline/cmdline_types.h
@@ -579,6 +579,8 @@
log_verbosity.class_linker = true;
} else if (verbose_options[j] == "compiler") {
log_verbosity.compiler = true;
+ } else if (verbose_options[j] == "deopt") {
+ log_verbosity.deopt = true;
} else if (verbose_options[j] == "gc") {
log_verbosity.gc = true;
} else if (verbose_options[j] == "heap") {
diff --git a/compiler/Android.mk b/compiler/Android.mk
index ac95abd..3f5271d 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -23,6 +23,7 @@
dex/global_value_numbering.cc \
dex/gvn_dead_code_elimination.cc \
dex/local_value_numbering.cc \
+ dex/type_inference.cc \
dex/quick/arm/assemble_arm.cc \
dex/quick/arm/call_arm.cc \
dex/quick/arm/fp_arm.cc \
@@ -124,13 +125,14 @@
optimizing/optimizing_compiler.cc \
optimizing/parallel_move_resolver.cc \
optimizing/prepare_for_register_allocation.cc \
+ optimizing/primitive_type_propagation.cc \
+ optimizing/reference_type_propagation.cc \
optimizing/register_allocator.cc \
optimizing/side_effects_analysis.cc \
optimizing/ssa_builder.cc \
optimizing/ssa_liveness_analysis.cc \
optimizing/ssa_phi_elimination.cc \
- optimizing/primitive_type_propagation.cc \
- optimizing/reference_type_propagation.cc \
+ optimizing/stack_map_stream.cc \
trampolines/trampoline_compiler.cc \
utils/arena_bit_vector.cc \
utils/arm/assembler_arm.cc \
diff --git a/compiler/cfi_test.h b/compiler/cfi_test.h
index f7501d2..5e345db 100644
--- a/compiler/cfi_test.h
+++ b/compiler/cfi_test.h
@@ -30,6 +30,8 @@
namespace art {
+constexpr dwarf::CFIFormat kCFIFormat = dwarf::DW_DEBUG_FRAME_FORMAT;
+
class CFITest : public dwarf::DwarfTest {
public:
void GenerateExpected(FILE* f, InstructionSet isa, const char* isa_str,
@@ -46,11 +48,11 @@
// Pretty-print CFI opcodes.
constexpr bool is64bit = false;
dwarf::DebugFrameOpCodeWriter<> initial_opcodes;
- dwarf::WriteEhFrameCIE(is64bit, dwarf::DW_EH_PE_absptr, dwarf::Reg(8),
- initial_opcodes, &eh_frame_data_);
- std::vector<uintptr_t> eh_frame_patches;
- dwarf::WriteEhFrameFDE(is64bit, 0, 0, actual_asm.size(), &actual_cfi,
- &eh_frame_data_, &eh_frame_patches);
+ dwarf::WriteDebugFrameCIE(is64bit, dwarf::DW_EH_PE_absptr, dwarf::Reg(8),
+ initial_opcodes, kCFIFormat, &debug_frame_data_);
+ std::vector<uintptr_t> debug_frame_patches;
+ dwarf::WriteDebugFrameFDE(is64bit, 0, 0, actual_asm.size(), &actual_cfi,
+ kCFIFormat, &debug_frame_data_, &debug_frame_patches);
ReformatCfi(Objdump(false, "-W"), &lines);
// Pretty-print assembly.
auto* opts = new DisassemblerOptions(false, actual_asm.data(), true);
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index 8ffc86e..5a9e04f 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -140,6 +140,27 @@
}
}
+// Get the set of image classes given to the compiler-driver in SetUp. Note: the compiler
+// driver assumes ownership of the set, so the test should properly release the set.
+std::unordered_set<std::string>* CommonCompilerTest::GetImageClasses() {
+ // Empty set: by default no classes are retained in the image.
+ return new std::unordered_set<std::string>();
+}
+
+// Get the set of compiled classes given to the compiler-driver in SetUp. Note: the compiler
+// driver assumes ownership of the set, so the test should properly release the set.
+std::unordered_set<std::string>* CommonCompilerTest::GetCompiledClasses() {
+ // Null, no selection of compiled-classes.
+ return nullptr;
+}
+
+// Get the set of compiled methods given to the compiler-driver in SetUp. Note: the compiler
+// driver assumes ownership of the set, so the test should properly release the set.
+std::unordered_set<std::string>* CommonCompilerTest::GetCompiledMethods() {
+ // Null, no selection of compiled-methods.
+ return nullptr;
+}
+
void CommonCompilerTest::SetUp() {
CommonRuntimeTest::SetUp();
{
@@ -165,7 +186,10 @@
method_inliner_map_.get(),
compiler_kind, instruction_set,
instruction_set_features_.get(),
- true, new std::unordered_set<std::string>, nullptr,
+ true,
+ GetImageClasses(),
+ GetCompiledClasses(),
+ GetCompiledMethods(),
2, true, true, "", timer_.get(), -1, ""));
}
// We typically don't generate an image in unit tests, disable this optimization by default.
@@ -239,7 +263,7 @@
mirror::Class* klass = class_linker_->FindClass(self, class_descriptor.c_str(), class_loader);
CHECK(klass != nullptr) << "Class not found " << class_name;
mirror::ArtMethod* method = klass->FindVirtualMethod(method_name, signature);
- CHECK(method != NULL) << "Virtual method not found: "
+ CHECK(method != nullptr) << "Virtual method not found: "
<< class_name << "." << method_name << signature;
CompileMethod(method);
}
diff --git a/compiler/common_compiler_test.h b/compiler/common_compiler_test.h
index d7b210d..8d80a2d 100644
--- a/compiler/common_compiler_test.h
+++ b/compiler/common_compiler_test.h
@@ -18,6 +18,7 @@
#define ART_COMPILER_COMMON_COMPILER_TEST_H_
#include <list>
+#include <unordered_set>
#include <vector>
#include "common_runtime_test.h"
@@ -56,6 +57,18 @@
virtual void SetUpRuntimeOptions(RuntimeOptions *options);
+ // Get the set of image classes given to the compiler-driver in SetUp. Note: the compiler
+ // driver assumes ownership of the set, so the test should properly release the set.
+ virtual std::unordered_set<std::string>* GetImageClasses();
+
+ // Get the set of compiled classes given to the compiler-driver in SetUp. Note: the compiler
+ // driver assumes ownership of the set, so the test should properly release the set.
+ virtual std::unordered_set<std::string>* GetCompiledClasses();
+
+ // Get the set of compiled methods given to the compiler-driver in SetUp. Note: the compiler
+ // driver assumes ownership of the set, so the test should properly release the set.
+ virtual std::unordered_set<std::string>* GetCompiledMethods();
+
virtual void TearDown();
void CompileClass(mirror::ClassLoader* class_loader, const char* class_name)
diff --git a/compiler/compiled_method.cc b/compiler/compiled_method.cc
index 4f7a970..d1acada 100644
--- a/compiler/compiled_method.cc
+++ b/compiler/compiled_method.cc
@@ -108,7 +108,7 @@
}
default:
LOG(FATAL) << "Unknown InstructionSet: " << instruction_set;
- return NULL;
+ return nullptr;
}
}
diff --git a/compiler/dex/bb_optimizations.h b/compiler/dex/bb_optimizations.h
index 0850f42..02d5327 100644
--- a/compiler/dex/bb_optimizations.h
+++ b/compiler/dex/bb_optimizations.h
@@ -26,6 +26,30 @@
namespace art {
/**
+ * @class String Change
+ * @brief Converts calls to String.<init> to StringFactory instead.
+ */
+class StringChange : public PassME {
+ public:
+ StringChange() : PassME("StringChange", kNoNodes) {
+ }
+
+ void Start(PassDataHolder* data) const {
+ DCHECK(data != nullptr);
+ CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
+ DCHECK(c_unit != nullptr);
+ c_unit->mir_graph->StringChange();
+ }
+
+ bool Gate(const PassDataHolder* data) const {
+ DCHECK(data != nullptr);
+ CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+ DCHECK(c_unit != nullptr);
+ return c_unit->mir_graph->HasInvokes();
+ }
+};
+
+/**
* @class CacheFieldLoweringInfo
* @brief Cache the lowering info for fields used by IGET/IPUT/SGET/SPUT insns.
*/
@@ -270,7 +294,25 @@
CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
DCHECK(c_unit != nullptr);
c_unit->mir_graph->EliminateDeadCodeEnd();
- down_cast<PassMEDataHolder*>(data)->dirty = !c_unit->mir_graph->MirSsaRepUpToDate();
+ }
+};
+
+/**
+ * @class GlobalValueNumberingCleanupPass
+ * @brief Performs the cleanup after global value numbering pass and the dependent
+ * dead code elimination pass that needs the GVN data.
+ */
+class GlobalValueNumberingCleanupPass : public PassME {
+ public:
+ GlobalValueNumberingCleanupPass()
+ : PassME("GVNCleanup", kNoNodes, "") {
+ }
+
+ void Start(PassDataHolder* data) const OVERRIDE {
+ DCHECK(data != nullptr);
+ CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+ DCHECK(c_unit != nullptr);
+ return c_unit->mir_graph->GlobalValueNumberingCleanup();
}
};
diff --git a/compiler/dex/compiler_enums.h b/compiler/dex/compiler_enums.h
index 0acdd42..b78b3d7 100644
--- a/compiler/dex/compiler_enums.h
+++ b/compiler/dex/compiler_enums.h
@@ -172,7 +172,6 @@
kMirOpRangeCheck,
kMirOpDivZeroCheck,
kMirOpCheck,
- kMirOpCheckPart2,
kMirOpSelect,
// Vector opcodes:
diff --git a/compiler/dex/dataflow_iterator-inl.h b/compiler/dex/dataflow_iterator-inl.h
index 6e25db6..83dfc28 100644
--- a/compiler/dex/dataflow_iterator-inl.h
+++ b/compiler/dex/dataflow_iterator-inl.h
@@ -23,7 +23,7 @@
// Single forward pass over the nodes.
inline BasicBlock* DataflowIterator::ForwardSingleNext() {
- BasicBlock* res = NULL;
+ BasicBlock* res = nullptr;
// Are we not yet at the end?
if (idx_ < end_idx_) {
@@ -38,7 +38,7 @@
// Repeat full forward passes over all nodes until no change occurs during a complete pass.
inline BasicBlock* DataflowIterator::ForwardRepeatNext() {
- BasicBlock* res = NULL;
+ BasicBlock* res = nullptr;
// Are we at the end and have we changed something?
if ((idx_ >= end_idx_) && changed_ == true) {
@@ -61,7 +61,7 @@
// Single reverse pass over the nodes.
inline BasicBlock* DataflowIterator::ReverseSingleNext() {
- BasicBlock* res = NULL;
+ BasicBlock* res = nullptr;
// Are we not yet at the end?
if (idx_ >= 0) {
@@ -76,7 +76,7 @@
// Repeat full backwards passes over all nodes until no change occurs during a complete pass.
inline BasicBlock* DataflowIterator::ReverseRepeatNext() {
- BasicBlock* res = NULL;
+ BasicBlock* res = nullptr;
// Are we done and we changed something during the last iteration?
if ((idx_ < 0) && changed_) {
diff --git a/compiler/dex/dataflow_iterator.h b/compiler/dex/dataflow_iterator.h
index 2a06cec..097c2a4 100644
--- a/compiler/dex/dataflow_iterator.h
+++ b/compiler/dex/dataflow_iterator.h
@@ -72,7 +72,7 @@
: mir_graph_(mir_graph),
start_idx_(start_idx),
end_idx_(end_idx),
- block_id_list_(NULL),
+ block_id_list_(nullptr),
idx_(0),
repeats_(0),
changed_(false) {}
diff --git a/compiler/dex/dex_to_dex_compiler.cc b/compiler/dex/dex_to_dex_compiler.cc
index ef94d8b..d1ddfda 100644
--- a/compiler/dex/dex_to_dex_compiler.cc
+++ b/compiler/dex/dex_to_dex_compiler.cc
@@ -301,7 +301,7 @@
art::DexToDexCompilationLevel dex_to_dex_compilation_level) {
UNUSED(invoke_type);
if (dex_to_dex_compilation_level != art::kDontDexToDexCompile) {
- art::DexCompilationUnit unit(NULL, class_loader, art::Runtime::Current()->GetClassLinker(),
+ art::DexCompilationUnit unit(nullptr, class_loader, art::Runtime::Current()->GetClassLinker(),
dex_file, code_item, class_def_idx, method_idx, access_flags,
driver.GetVerifiedMethod(&dex_file, method_idx));
art::optimizer::DexCompiler dex_compiler(driver, unit, dex_to_dex_compilation_level);
diff --git a/compiler/dex/global_value_numbering.cc b/compiler/dex/global_value_numbering.cc
index 30e3ce0..e2b9987 100644
--- a/compiler/dex/global_value_numbering.cc
+++ b/compiler/dex/global_value_numbering.cc
@@ -128,8 +128,9 @@
++bbs_processed_;
merge_lvns_.clear();
- bool change = (lvns_[bb->id] == nullptr) || !lvns_[bb->id]->Equals(*work_lvn_);
+ bool change = false;
if (mode_ == kModeGvn) {
+ change = (lvns_[bb->id] == nullptr) || !lvns_[bb->id]->Equals(*work_lvn_);
// In GVN mode, keep the latest LVN even if Equals() indicates no change. This is
// to keep the correct values of fields that do not contribute to Equals() as long
// as they depend only on predecessor LVNs' fields that do contribute to Equals().
@@ -137,6 +138,9 @@
std::unique_ptr<const LocalValueNumbering> old_lvn(lvns_[bb->id]);
lvns_[bb->id] = work_lvn_.release();
} else {
+ DCHECK_EQ(mode_, kModeGvnPostProcessing); // kModeLvn doesn't use FinishBasicBlock().
+ DCHECK(lvns_[bb->id] != nullptr);
+ DCHECK(lvns_[bb->id]->Equals(*work_lvn_));
work_lvn_.reset();
}
return change;
diff --git a/compiler/dex/global_value_numbering_test.cc b/compiler/dex/global_value_numbering_test.cc
index b4559ef..c8aa990 100644
--- a/compiler/dex/global_value_numbering_test.cc
+++ b/compiler/dex/global_value_numbering_test.cc
@@ -15,7 +15,6 @@
*/
#include "base/logging.h"
-#include "dataflow_iterator.h"
#include "dataflow_iterator-inl.h"
#include "dex/mir_field_info.h"
#include "global_value_numbering.h"
@@ -260,10 +259,8 @@
mir->ssa_rep = &ssa_reps_[i];
mir->ssa_rep->num_uses = def->num_uses;
mir->ssa_rep->uses = const_cast<int32_t*>(def->uses); // Not modified by LVN.
- mir->ssa_rep->fp_use = nullptr; // Not used by LVN.
mir->ssa_rep->num_defs = def->num_defs;
mir->ssa_rep->defs = const_cast<int32_t*>(def->defs); // Not modified by LVN.
- mir->ssa_rep->fp_def = nullptr; // Not used by LVN.
mir->dalvikInsn.opcode = def->opcode;
mir->offset = i; // LVN uses offset only for debug output
mir->optimization_flags = 0u;
@@ -293,6 +290,15 @@
DoPrepareVregToSsaMapExit(bb_id, map, count);
}
+ template <size_t count>
+ void MarkAsWideSRegs(const int32_t (&sregs)[count]) {
+ for (int32_t sreg : sregs) {
+ cu_.mir_graph->reg_location_[sreg].wide = true;
+ cu_.mir_graph->reg_location_[sreg + 1].wide = true;
+ cu_.mir_graph->reg_location_[sreg + 1].high_word = true;
+ }
+ }
+
void PerformGVN() {
DoPerformGVN<LoopRepeatingTopologicalSortIterator>();
}
@@ -363,9 +369,11 @@
cu_.access_flags = kAccStatic; // Don't let "this" interfere with this test.
allocator_.reset(ScopedArenaAllocator::Create(&cu_.arena_stack));
// By default, the zero-initialized reg_location_[.] with ref == false tells LVN that
- // 0 constants are integral, not references. Nothing else is used by LVN/GVN.
+ // 0 constants are integral, not references, and the values are all narrow.
+ // Nothing else is used by LVN/GVN. Tests can override the default values as needed.
cu_.mir_graph->reg_location_ =
cu_.arena.AllocArray<RegLocation>(kMaxSsaRegs, kArenaAllocRegAlloc);
+ cu_.mir_graph->num_ssa_regs_ = kMaxSsaRegs;
// Bind all possible sregs to live vregs for test purposes.
live_in_v_->SetInitialBits(kMaxSsaRegs);
cu_.mir_graph->ssa_base_vregs_.reserve(kMaxSsaRegs);
@@ -913,14 +921,14 @@
DEF_IGET(6, Instruction::AGET_OBJECT, 3u, 200u, 201u), // Same as at the left side.
DEF_AGET(3, Instruction::AGET_WIDE, 4u, 300u, 301u),
- DEF_CONST(5, Instruction::CONST_WIDE, 5u, 1000),
- DEF_APUT(5, Instruction::APUT_WIDE, 5u, 300u, 301u),
- DEF_AGET(6, Instruction::AGET_WIDE, 7u, 300u, 301u), // Differs from the top and the CONST.
+ DEF_CONST(5, Instruction::CONST_WIDE, 6u, 1000),
+ DEF_APUT(5, Instruction::APUT_WIDE, 6u, 300u, 301u),
+ DEF_AGET(6, Instruction::AGET_WIDE, 8u, 300u, 301u), // Differs from the top and the CONST.
- DEF_AGET(3, Instruction::AGET_SHORT, 8u, 400u, 401u),
- DEF_CONST(3, Instruction::CONST, 9u, 2000),
- DEF_APUT(4, Instruction::APUT_SHORT, 9u, 400u, 401u),
- DEF_APUT(5, Instruction::APUT_SHORT, 9u, 400u, 401u),
+ DEF_AGET(3, Instruction::AGET_SHORT, 10u, 400u, 401u),
+ DEF_CONST(3, Instruction::CONST, 11u, 2000),
+ DEF_APUT(4, Instruction::APUT_SHORT, 11u, 400u, 401u),
+ DEF_APUT(5, Instruction::APUT_SHORT, 11u, 400u, 401u),
DEF_AGET(6, Instruction::AGET_SHORT, 12u, 400u, 401u), // Differs from the top, == CONST.
DEF_AGET(3, Instruction::AGET_CHAR, 13u, 500u, 501u),
@@ -942,6 +950,8 @@
};
PrepareMIRs(mirs);
+ static const int32_t wide_sregs[] = { 4, 6, 8 };
+ MarkAsWideSRegs(wide_sregs);
PerformGVN();
ASSERT_EQ(arraysize(mirs), value_names_.size());
EXPECT_EQ(value_names_[0], value_names_[1]);
@@ -1060,6 +1070,12 @@
};
PrepareMIRs(mirs);
+ for (size_t i = 0u; i != arraysize(mirs); ++i) {
+ if ((mirs_[i].ssa_rep->defs[0] % 2) == 0) {
+ const int32_t wide_sregs[] = { mirs_[i].ssa_rep->defs[0] };
+ MarkAsWideSRegs(wide_sregs);
+ }
+ }
PerformGVN();
ASSERT_EQ(arraysize(mirs), value_names_.size());
EXPECT_EQ(value_names_[0], value_names_[7]);
@@ -1496,27 +1512,27 @@
static const MIRDef mirs[] = {
// NOTE: MIRs here are ordered by unique tests. They will be put into appropriate blocks.
DEF_AGET(3, Instruction::AGET_WIDE, 0u, 100u, 101u),
- DEF_AGET(4, Instruction::AGET_WIDE, 1u, 100u, 101u), // Same as at the top.
- DEF_AGET(5, Instruction::AGET_WIDE, 2u, 100u, 101u), // Same as at the top.
+ DEF_AGET(4, Instruction::AGET_WIDE, 2u, 100u, 101u), // Same as at the top.
+ DEF_AGET(5, Instruction::AGET_WIDE, 4u, 100u, 101u), // Same as at the top.
- DEF_AGET(3, Instruction::AGET_BYTE, 3u, 200u, 201u),
- DEF_AGET(4, Instruction::AGET_BYTE, 4u, 200u, 201u), // Differs from top...
- DEF_APUT(4, Instruction::APUT_BYTE, 5u, 200u, 201u), // Because of this IPUT.
- DEF_AGET(5, Instruction::AGET_BYTE, 6u, 200u, 201u), // Differs from top and the loop AGET.
+ DEF_AGET(3, Instruction::AGET_BYTE, 6u, 200u, 201u),
+ DEF_AGET(4, Instruction::AGET_BYTE, 7u, 200u, 201u), // Differs from top...
+ DEF_APUT(4, Instruction::APUT_BYTE, 8u, 200u, 201u), // Because of this IPUT.
+ DEF_AGET(5, Instruction::AGET_BYTE, 9u, 200u, 201u), // Differs from top and the loop AGET.
- DEF_AGET(3, Instruction::AGET, 7u, 300u, 301u),
- DEF_APUT(4, Instruction::APUT, 8u, 300u, 301u), // Because of this IPUT...
- DEF_AGET(4, Instruction::AGET, 9u, 300u, 301u), // Differs from top.
- DEF_AGET(5, Instruction::AGET, 10u, 300u, 301u), // Differs from top but == the loop AGET.
+ DEF_AGET(3, Instruction::AGET, 10u, 300u, 301u),
+ DEF_APUT(4, Instruction::APUT, 11u, 300u, 301u), // Because of this IPUT...
+ DEF_AGET(4, Instruction::AGET, 12u, 300u, 301u), // Differs from top.
+ DEF_AGET(5, Instruction::AGET, 13u, 300u, 301u), // Differs from top but == the loop AGET.
- DEF_CONST(3, Instruction::CONST, 11u, 3000),
- DEF_APUT(3, Instruction::APUT_CHAR, 11u, 400u, 401u),
- DEF_APUT(3, Instruction::APUT_CHAR, 11u, 400u, 402u),
- DEF_AGET(4, Instruction::AGET_CHAR, 14u, 400u, 401u), // Differs from 11u and 16u.
- DEF_AGET(4, Instruction::AGET_CHAR, 15u, 400u, 402u), // Same as 14u.
- DEF_CONST(4, Instruction::CONST, 16u, 4000),
- DEF_APUT(4, Instruction::APUT_CHAR, 16u, 400u, 401u),
- DEF_APUT(4, Instruction::APUT_CHAR, 16u, 400u, 402u),
+ DEF_CONST(3, Instruction::CONST, 14u, 3000),
+ DEF_APUT(3, Instruction::APUT_CHAR, 14u, 400u, 401u),
+ DEF_APUT(3, Instruction::APUT_CHAR, 14u, 400u, 402u),
+ DEF_AGET(4, Instruction::AGET_CHAR, 15u, 400u, 401u), // Differs from 11u and 16u.
+ DEF_AGET(4, Instruction::AGET_CHAR, 16u, 400u, 402u), // Same as 14u.
+ DEF_CONST(4, Instruction::CONST, 17u, 4000),
+ DEF_APUT(4, Instruction::APUT_CHAR, 17u, 400u, 401u),
+ DEF_APUT(4, Instruction::APUT_CHAR, 17u, 400u, 402u),
DEF_AGET(5, Instruction::AGET_CHAR, 19u, 400u, 401u), // Differs from 11u and 14u...
DEF_AGET(5, Instruction::AGET_CHAR, 20u, 400u, 402u), // and same as the CONST 16u.
@@ -1534,6 +1550,8 @@
};
PrepareMIRs(mirs);
+ static const int32_t wide_sregs[] = { 0, 2, 4 };
+ MarkAsWideSRegs(wide_sregs);
PerformGVN();
ASSERT_EQ(arraysize(mirs), value_names_.size());
EXPECT_EQ(value_names_[0], value_names_[1]);
diff --git a/compiler/dex/gvn_dead_code_elimination.cc b/compiler/dex/gvn_dead_code_elimination.cc
index ec12221..6d8a7da 100644
--- a/compiler/dex/gvn_dead_code_elimination.cc
+++ b/compiler/dex/gvn_dead_code_elimination.cc
@@ -20,6 +20,7 @@
#include "base/bit_vector-inl.h"
#include "base/macros.h"
+#include "base/allocator.h"
#include "compiler_enums.h"
#include "dataflow_iterator-inl.h"
#include "dex_instruction.h"
@@ -57,14 +58,12 @@
low_def_over_high_word = prev_data->low_def_over_high_word;
} else {
prev_value = prev_data->prev_value_high;
- low_def_over_high_word =
- prev_data->prev_value_high.value != kNPos && !prev_data->high_def_over_low_word;
+ low_def_over_high_word = !prev_data->high_def_over_low_word;
}
} else {
if (prev_data->vreg_def == v_reg) {
prev_value_high = prev_data->prev_value;
- high_def_over_low_word =
- prev_data->prev_value.value != kNPos && !prev_data->low_def_over_high_word;
+ high_def_over_low_word = !prev_data->low_def_over_high_word;
} else {
prev_value_high = prev_data->prev_value_high;
high_def_over_low_word = prev_data->high_def_over_low_word;
@@ -75,6 +74,9 @@
GvnDeadCodeElimination::VRegChains::VRegChains(uint32_t num_vregs, ScopedArenaAllocator* alloc)
: num_vregs_(num_vregs),
vreg_data_(alloc->AllocArray<VRegValue>(num_vregs, kArenaAllocMisc)),
+ vreg_high_words_(num_vregs, false, Allocator::GetNoopAllocator(),
+ BitVector::BitsToWords(num_vregs),
+ alloc->AllocArray<uint32_t>(BitVector::BitsToWords(num_vregs))),
mir_data_(alloc->Adapter()) {
mir_data_.reserve(100);
}
@@ -82,6 +84,7 @@
inline void GvnDeadCodeElimination::VRegChains::Reset() {
DCHECK(mir_data_.empty());
std::fill_n(vreg_data_, num_vregs_, VRegValue());
+ vreg_high_words_.ClearAllBits();
}
void GvnDeadCodeElimination::VRegChains::AddMIRWithDef(MIR* mir, int v_reg, bool wide,
@@ -93,24 +96,26 @@
data->wide_def = wide;
data->vreg_def = v_reg;
- if (vreg_data_[v_reg].change != kNPos &&
- mir_data_[vreg_data_[v_reg].change].vreg_def + 1 == v_reg) {
- data->low_def_over_high_word = true;
- }
- data->prev_value = vreg_data_[v_reg];
DCHECK_LT(static_cast<size_t>(v_reg), num_vregs_);
+ data->prev_value = vreg_data_[v_reg];
+ data->low_def_over_high_word =
+ (vreg_data_[v_reg].change != kNPos)
+ ? GetMIRData(vreg_data_[v_reg].change)->vreg_def + 1 == v_reg
+ : vreg_high_words_.IsBitSet(v_reg);
vreg_data_[v_reg].value = new_value;
vreg_data_[v_reg].change = pos;
+ vreg_high_words_.ClearBit(v_reg);
if (wide) {
- if (vreg_data_[v_reg + 1].change != kNPos &&
- mir_data_[vreg_data_[v_reg + 1].change].vreg_def == v_reg + 1) {
- data->high_def_over_low_word = true;
- }
- data->prev_value_high = vreg_data_[v_reg + 1];
DCHECK_LT(static_cast<size_t>(v_reg + 1), num_vregs_);
+ data->prev_value_high = vreg_data_[v_reg + 1];
+ data->high_def_over_low_word =
+ (vreg_data_[v_reg + 1].change != kNPos)
+ ? GetMIRData(vreg_data_[v_reg + 1].change)->vreg_def == v_reg + 1
+ : !vreg_high_words_.IsBitSet(v_reg + 1);
vreg_data_[v_reg + 1].value = new_value;
vreg_data_[v_reg + 1].change = pos;
+ vreg_high_words_.SetBit(v_reg + 1);
}
}
@@ -123,9 +128,17 @@
if (data->has_def) {
DCHECK_EQ(vreg_data_[data->vreg_def].change, NumMIRs() - 1u);
vreg_data_[data->vreg_def] = data->prev_value;
+ DCHECK(!vreg_high_words_.IsBitSet(data->vreg_def));
+ if (data->low_def_over_high_word) {
+ vreg_high_words_.SetBit(data->vreg_def);
+ }
if (data->wide_def) {
DCHECK_EQ(vreg_data_[data->vreg_def + 1].change, NumMIRs() - 1u);
vreg_data_[data->vreg_def + 1] = data->prev_value_high;
+ DCHECK(vreg_high_words_.IsBitSet(data->vreg_def + 1));
+ if (data->high_def_over_low_word) {
+ vreg_high_words_.ClearBit(data->vreg_def + 1);
+ }
}
}
mir_data_.pop_back();
@@ -169,6 +182,7 @@
uint16_t change = vreg_data_[v_reg].change;
if (change == kNPos) {
vreg_data_[v_reg].value = value;
+ vreg_high_words_.SetBit(v_reg);
} else {
while (true) {
MIRData* data = &mir_data_[change];
@@ -208,6 +222,7 @@
}
}
vreg_data_[v_reg].value = old_value;
+ DCHECK(!vreg_high_words_.IsBitSet(v_reg)); // Keep marked as low word.
}
} else {
DCHECK_LT(static_cast<size_t>(v_reg + 1), num_vregs_);
@@ -223,6 +238,7 @@
old_value = lvn->GetStartingVregValueNumber(v_reg);
}
vreg_data_[v_reg].value = old_value;
+ DCHECK(!vreg_high_words_.IsBitSet(v_reg)); // Keep marked as low word.
}
if (check_high && vreg_data_[v_reg + 1].value == kNoValue) {
uint16_t old_value = lvn->GetStartingVregValueNumber(v_reg + 1);
@@ -234,6 +250,7 @@
}
}
vreg_data_[v_reg + 1].value = old_value;
+ DCHECK(!vreg_high_words_.IsBitSet(v_reg + 1)); // Keep marked as low word.
}
}
}
@@ -300,6 +317,8 @@
if (next_change == kNPos) {
DCHECK_EQ(vreg_data_[v_reg].change, old_change);
vreg_data_[v_reg].change = new_change;
+ DCHECK_EQ(vreg_high_words_.IsBitSet(v_reg), v_reg == old_data->vreg_def + 1);
+ // No change in vreg_high_words_.
} else {
DCHECK_EQ(mir_data_[next_change].PrevChange(v_reg), old_change);
mir_data_[next_change].SetPrevChange(v_reg, new_change);
@@ -316,6 +335,12 @@
if (next_change == kNPos) {
DCHECK_EQ(vreg_data_[v_reg].change, change);
vreg_data_[v_reg] = (data->vreg_def == v_reg) ? data->prev_value : data->prev_value_high;
+ DCHECK_EQ(vreg_high_words_.IsBitSet(v_reg), v_reg == data->vreg_def + 1);
+ if (data->vreg_def == v_reg && data->low_def_over_high_word) {
+ vreg_high_words_.SetBit(v_reg);
+ } else if (data->vreg_def != v_reg && data->high_def_over_low_word) {
+ vreg_high_words_.ClearBit(v_reg);
+ }
} else {
DCHECK_EQ(mir_data_[next_change].PrevChange(v_reg), change);
mir_data_[next_change].RemovePrevChange(v_reg, data);
@@ -347,6 +372,21 @@
return false;
}
+bool GvnDeadCodeElimination::VRegChains::IsVRegUsed(uint16_t first_change, uint16_t last_change,
+ int v_reg, MIRGraph* mir_graph) const {
+ DCHECK_LE(first_change, last_change);
+ DCHECK_LE(last_change, mir_data_.size());
+ for (size_t c = first_change; c != last_change; ++c) {
+ SSARepresentation* ssa_rep = mir_data_[c].mir->ssa_rep;
+ for (int i = 0; i != ssa_rep->num_uses; ++i) {
+ if (mir_graph->SRegToVReg(ssa_rep->uses[i]) == v_reg) {
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
void GvnDeadCodeElimination::VRegChains::RenameSRegUses(uint16_t first_change, uint16_t last_change,
int old_s_reg, int new_s_reg, bool wide) {
for (size_t c = first_change; c != last_change; ++c) {
@@ -478,7 +518,7 @@
mir->dalvikInsn.opcode - Instruction::ADD_INT_2ADDR + Instruction::ADD_INT);
}
-MIR* GvnDeadCodeElimination::CreatePhi(int s_reg, bool fp) {
+MIR* GvnDeadCodeElimination::CreatePhi(int s_reg) {
int v_reg = mir_graph_->SRegToVReg(s_reg);
MIR* phi = mir_graph_->NewMIR();
phi->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpPhi);
@@ -491,11 +531,9 @@
mir_graph_->AllocateSSADefData(phi, 1);
phi->ssa_rep->defs[0] = s_reg;
- phi->ssa_rep->fp_def[0] = fp;
size_t num_uses = bb_->predecessors.size();
mir_graph_->AllocateSSAUseData(phi, num_uses);
- std::fill_n(phi->ssa_rep->fp_use, num_uses, fp);
size_t idx = 0u;
for (BasicBlockId pred_id : bb_->predecessors) {
BasicBlock* pred_bb = mir_graph_->GetBasicBlock(pred_id);
@@ -520,17 +558,29 @@
// Just before we kill mir_to_kill, we need to replace the previous SSA reg assigned to the
// same dalvik reg to keep consistency with subsequent instructions. However, if there's no
- // defining MIR for that dalvik reg, the preserved valus must come from its predecessors
+ // defining MIR for that dalvik reg, the preserved values must come from its predecessors
// and we need to create a new Phi (a degenerate Phi if there's only a single predecessor).
if (def_change == kNPos) {
- bool fp = mir_to_kill->ssa_rep->fp_def[0];
if (wide) {
DCHECK_EQ(new_s_reg + 1, mir_to_kill->ssa_rep->defs[1]);
- DCHECK_EQ(fp, mir_to_kill->ssa_rep->fp_def[1]);
DCHECK_EQ(mir_graph_->SRegToVReg(new_s_reg) + 1, mir_graph_->SRegToVReg(new_s_reg + 1));
- CreatePhi(new_s_reg + 1, fp); // High word Phi.
+ CreatePhi(new_s_reg + 1); // High word Phi.
}
- return CreatePhi(new_s_reg, fp);
+ MIR* phi = CreatePhi(new_s_reg);
+ // If this is a degenerate Phi with all inputs being the same SSA reg, we need to its uses.
+ DCHECK_NE(phi->ssa_rep->num_uses, 0u);
+ int old_s_reg = phi->ssa_rep->uses[0];
+ bool all_same = true;
+ for (size_t i = 1u, num = phi->ssa_rep->num_uses; i != num; ++i) {
+ if (phi->ssa_rep->uses[i] != old_s_reg) {
+ all_same = false;
+ break;
+ }
+ }
+ if (all_same) {
+ vreg_chains_.RenameSRegUses(0u, last_change, old_s_reg, new_s_reg, wide);
+ }
+ return phi;
} else {
DCHECK_LT(def_change, last_change);
DCHECK_LE(last_change, vreg_chains_.NumMIRs());
@@ -676,8 +726,14 @@
uint16_t src_name =
(d->wide_def ? lvn_->GetSregValueWide(src_s_reg) : lvn_->GetSregValue(src_s_reg));
if (value_name == src_name) {
- RecordPassKillMoveByRenamingSrcDef(check_change, c);
- return;
+ // Check if the move's destination vreg is unused between check_change and the move.
+ uint32_t new_dest_v_reg = mir_graph_->SRegToVReg(d->mir->ssa_rep->defs[0]);
+ if (!vreg_chains_.IsVRegUsed(check_change + 1u, c, new_dest_v_reg, mir_graph_) &&
+ (!d->wide_def ||
+ !vreg_chains_.IsVRegUsed(check_change + 1u, c, new_dest_v_reg + 1, mir_graph_))) {
+ RecordPassKillMoveByRenamingSrcDef(check_change, c);
+ return;
+ }
}
}
}
@@ -967,18 +1023,17 @@
uint16_t opcode = mir->dalvikInsn.opcode;
switch (opcode) {
case kMirOpPhi: {
- // We can't recognize wide variables in Phi from num_defs == 2 as we've got two Phis instead.
+ // Determine if this Phi is merging wide regs.
+ RegLocation raw_dest = gvn_->GetMirGraph()->GetRawDest(mir);
+ if (raw_dest.high_word) {
+ // This is the high part of a wide reg. Ignore the Phi.
+ return false;
+ }
+ bool wide = raw_dest.wide;
+ // Record the value.
DCHECK_EQ(mir->ssa_rep->num_defs, 1);
int s_reg = mir->ssa_rep->defs[0];
- bool wide = false;
- uint16_t new_value = lvn_->GetSregValue(s_reg);
- if (new_value == kNoValue) {
- wide = true;
- new_value = lvn_->GetSregValueWide(s_reg);
- if (new_value == kNoValue) {
- return false; // Ignore the high word Phi.
- }
- }
+ uint16_t new_value = wide ? lvn_->GetSregValueWide(s_reg) : lvn_->GetSregValue(s_reg);
int v_reg = mir_graph_->SRegToVReg(s_reg);
DCHECK_EQ(vreg_chains_.CurrentValue(v_reg), kNoValue); // No previous def for v_reg.
diff --git a/compiler/dex/gvn_dead_code_elimination.h b/compiler/dex/gvn_dead_code_elimination.h
index 9a19f29..06022db 100644
--- a/compiler/dex/gvn_dead_code_elimination.h
+++ b/compiler/dex/gvn_dead_code_elimination.h
@@ -111,6 +111,8 @@
void RemoveChange(uint16_t change);
bool IsTopChange(uint16_t change) const;
bool IsSRegUsed(uint16_t first_change, uint16_t last_change, int s_reg) const;
+ bool IsVRegUsed(uint16_t first_change, uint16_t last_change, int v_reg,
+ MIRGraph* mir_graph) const;
void RenameSRegUses(uint16_t first_change, uint16_t last_change,
int old_s_reg, int new_s_reg, bool wide);
void RenameVRegUses(uint16_t first_change, uint16_t last_change,
@@ -119,6 +121,7 @@
private:
const uint32_t num_vregs_;
VRegValue* const vreg_data_;
+ BitVector vreg_high_words_;
ScopedArenaVector<MIRData> mir_data_;
};
@@ -128,7 +131,7 @@
void KillMIR(MIRData* data);
static void KillMIR(MIR* mir);
static void ChangeBinOp2AddrToPlainBinOp(MIR* mir);
- MIR* CreatePhi(int s_reg, bool fp);
+ MIR* CreatePhi(int s_reg);
MIR* RenameSRegDefOrCreatePhi(uint16_t def_change, uint16_t last_change, MIR* mir_to_kill);
// Update state variables going backwards through a MIR.
diff --git a/compiler/dex/gvn_dead_code_elimination_test.cc b/compiler/dex/gvn_dead_code_elimination_test.cc
index 4d2b8b3..de591d0 100644
--- a/compiler/dex/gvn_dead_code_elimination_test.cc
+++ b/compiler/dex/gvn_dead_code_elimination_test.cc
@@ -406,6 +406,15 @@
}
}
+ template <size_t count>
+ void MarkAsWideSRegs(const int32_t (&sregs)[count]) {
+ for (int32_t sreg : sregs) {
+ cu_.mir_graph->reg_location_[sreg].wide = true;
+ cu_.mir_graph->reg_location_[sreg + 1].wide = true;
+ cu_.mir_graph->reg_location_[sreg + 1].high_word = true;
+ }
+ }
+
void PerformDCE() {
FillVregToSsaRegExitMaps();
cu_.mir_graph->GetNumOfCodeAndTempVRs();
@@ -467,9 +476,11 @@
cu_.access_flags = kAccStatic; // Don't let "this" interfere with this test.
allocator_.reset(ScopedArenaAllocator::Create(&cu_.arena_stack));
// By default, the zero-initialized reg_location_[.] with ref == false tells LVN that
- // 0 constants are integral, not references. Nothing else is used by LVN/GVN.
+ // 0 constants are integral, not references, and the values are all narrow.
+ // Nothing else is used by LVN/GVN. Tests can override the default values as needed.
cu_.mir_graph->reg_location_ = static_cast<RegLocation*>(cu_.arena.Alloc(
kMaxSsaRegs * sizeof(cu_.mir_graph->reg_location_[0]), kArenaAllocRegAlloc));
+ cu_.mir_graph->num_ssa_regs_ = kMaxSsaRegs;
// Bind all possible sregs to live vregs for test purposes.
live_in_v_->SetInitialBits(kMaxSsaRegs);
cu_.mir_graph->ssa_base_vregs_.reserve(kMaxSsaRegs);
@@ -705,6 +716,8 @@
PrepareSRegToVRegMap(sreg_to_vreg_map);
PrepareMIRs(mirs);
+ static const int32_t wide_sregs[] = { 3 };
+ MarkAsWideSRegs(wide_sregs);
PerformGVN_DCE();
ASSERT_EQ(arraysize(mirs), value_names_.size());
@@ -745,6 +758,8 @@
PrepareIFields(ifields);
PrepareMIRs(mirs);
+ static const int32_t wide_sregs[] = { 5 };
+ MarkAsWideSRegs(wide_sregs);
PerformGVN_DCE();
ASSERT_EQ(arraysize(mirs), value_names_.size());
@@ -777,6 +792,8 @@
PrepareSRegToVRegMap(sreg_to_vreg_map);
PrepareMIRs(mirs);
+ static const int32_t wide_sregs[] = { 0, 2 };
+ MarkAsWideSRegs(wide_sregs);
PerformGVN_DCE();
ASSERT_EQ(arraysize(mirs), value_names_.size());
@@ -1030,6 +1047,40 @@
}
}
+TEST_F(GvnDeadCodeEliminationTestSimple, NoRename4) {
+ static const MIRDef mirs[] = {
+ DEF_CONST(3, Instruction::CONST, 0u, 1000u),
+ DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 1u),
+ DEF_CONST(3, Instruction::CONST, 2u, 100u),
+ DEF_CONST(3, Instruction::CONST, 3u, 200u),
+ DEF_BINOP(3, Instruction::OR_INT_2ADDR, 4u, 2u, 3u), // 3. Find definition of the move src.
+ DEF_MOVE(3, Instruction::MOVE, 5u, 0u), // 4. Uses move dest vreg.
+ DEF_MOVE(3, Instruction::MOVE, 6u, 4u), // 2. Find overwritten move src.
+ DEF_CONST(3, Instruction::CONST, 7u, 2000u), // 1. Overwrites 4u, look for moves.
+ };
+
+ static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 3, 2, 4, 0, 2 };
+ PrepareSRegToVRegMap(sreg_to_vreg_map);
+
+ PrepareMIRs(mirs);
+ PerformGVN_DCE();
+
+ ASSERT_EQ(arraysize(mirs), value_names_.size());
+ static const size_t diff_indexes[] = { 0, 1, 2, 3, 4, 7 };
+ ExpectValueNamesNE(diff_indexes);
+ EXPECT_EQ(value_names_[0], value_names_[5]);
+ EXPECT_EQ(value_names_[4], value_names_[6]);
+
+ static const bool eliminated[] = {
+ false, false, false, false, false, false, false, false
+ };
+ static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
+ for (size_t i = 0; i != arraysize(eliminated); ++i) {
+ bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
+ EXPECT_EQ(eliminated[i], actually_eliminated) << i;
+ }
+}
+
TEST_F(GvnDeadCodeEliminationTestSimple, Simple1) {
static const IFieldDef ifields[] = {
{ 0u, 1u, 0u, false, kDexMemAccessObject },
@@ -1221,6 +1272,8 @@
PrepareIFields(ifields);
PrepareMIRs(mirs);
+ static const int32_t wide_sregs[] = { 1, 6 };
+ MarkAsWideSRegs(wide_sregs);
PerformGVN_DCE();
ASSERT_EQ(arraysize(mirs), value_names_.size());
@@ -1576,6 +1629,52 @@
}
TEST_F(GvnDeadCodeEliminationTestDiamond, CreatePhi2) {
+ static const MIRDef mirs[] = {
+ DEF_CONST(3, Instruction::CONST, 0u, 1000),
+ DEF_MOVE(4, Instruction::MOVE, 1u, 0u),
+ DEF_CONST(4, Instruction::CONST, 2u, 1000),
+ };
+
+ static const int32_t sreg_to_vreg_map[] = { 0, 1, 0 };
+ PrepareSRegToVRegMap(sreg_to_vreg_map);
+
+ PrepareMIRs(mirs);
+ PerformGVN_DCE();
+
+ ASSERT_EQ(arraysize(mirs), value_names_.size());
+ EXPECT_EQ(value_names_[0], value_names_[1]);
+ EXPECT_EQ(value_names_[0], value_names_[2]);
+
+ static const bool eliminated[] = {
+ false, false, true,
+ };
+ static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
+ for (size_t i = 0; i != arraysize(eliminated); ++i) {
+ bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
+ EXPECT_EQ(eliminated[i], actually_eliminated) << i;
+ }
+ // Check that we've created a single-input Phi to replace the CONST 3u.
+ BasicBlock* bb4 = cu_.mir_graph->GetBasicBlock(4);
+ MIR* phi = bb4->first_mir_insn;
+ ASSERT_TRUE(phi != nullptr);
+ ASSERT_EQ(kMirOpPhi, static_cast<int>(phi->dalvikInsn.opcode));
+ ASSERT_EQ(1, phi->ssa_rep->num_uses);
+ EXPECT_EQ(0, phi->ssa_rep->uses[0]);
+ ASSERT_EQ(1, phi->ssa_rep->num_defs);
+ EXPECT_EQ(2, phi->ssa_rep->defs[0]);
+ EXPECT_EQ(0u, phi->dalvikInsn.vA);
+ MIR* move = phi->next;
+ ASSERT_TRUE(move != nullptr);
+ ASSERT_EQ(Instruction::MOVE, move->dalvikInsn.opcode);
+ ASSERT_EQ(1, move->ssa_rep->num_uses);
+ EXPECT_EQ(2, move->ssa_rep->uses[0]);
+ ASSERT_EQ(1, move->ssa_rep->num_defs);
+ EXPECT_EQ(1, move->ssa_rep->defs[0]);
+ EXPECT_EQ(1u, move->dalvikInsn.vA);
+ EXPECT_EQ(0u, move->dalvikInsn.vB);
+}
+
+TEST_F(GvnDeadCodeEliminationTestDiamond, CreatePhi3) {
static const IFieldDef ifields[] = {
{ 0u, 1u, 0u, false, kDexMemAccessWord },
};
@@ -1797,4 +1896,91 @@
EXPECT_EQ(2u, phi->dalvikInsn.vA);
}
+TEST_F(GvnDeadCodeEliminationTestDiamond, LongOverlaps1) {
+ static const MIRDef mirs[] = {
+ DEF_CONST_WIDE(3, Instruction::CONST_WIDE, 0u, 1000u),
+ DEF_CONST_WIDE(3, Instruction::CONST_WIDE, 2u, 1000u),
+ DEF_MOVE_WIDE(4, Instruction::MOVE_WIDE, 4u, 0u),
+ DEF_MOVE_WIDE(4, Instruction::MOVE_WIDE, 6u, 2u),
+ DEF_MOVE_WIDE(4, Instruction::MOVE_WIDE, 8u, 4u),
+ DEF_MOVE_WIDE(4, Instruction::MOVE_WIDE, 10u, 6u),
+ };
+
+ // The last insn should overlap the first and second.
+ static const int32_t sreg_to_vreg_map[] = { 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3 };
+ PrepareSRegToVRegMap(sreg_to_vreg_map);
+
+ PrepareMIRs(mirs);
+ static const int32_t wide_sregs[] = { 0, 2, 4, 6, 8, 10 };
+ MarkAsWideSRegs(wide_sregs);
+ PerformGVN_DCE();
+
+ ASSERT_EQ(arraysize(mirs), value_names_.size());
+ EXPECT_EQ(value_names_[0], value_names_[1]);
+ EXPECT_EQ(value_names_[0], value_names_[2]);
+ EXPECT_EQ(value_names_[0], value_names_[3]);
+ EXPECT_EQ(value_names_[0], value_names_[4]);
+
+ static const bool eliminated[] = {
+ false, false, false, false, false, false,
+ };
+ static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
+ for (size_t i = 0; i != arraysize(eliminated); ++i) {
+ bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
+ EXPECT_EQ(eliminated[i], actually_eliminated) << i;
+ }
+}
+
+TEST_F(GvnDeadCodeEliminationTestSimple, MixedOverlaps1) {
+ static const MIRDef mirs[] = {
+ DEF_CONST(3, Instruction::CONST, 0u, 1000u),
+ DEF_MOVE(3, Instruction::MOVE, 1u, 0u),
+ DEF_CONST(3, Instruction::CONST, 2u, 2000u),
+ { 3, Instruction::INT_TO_LONG, 0, 0u, 1, { 2u }, 2, { 3u, 4u} },
+ DEF_MOVE_WIDE(3, Instruction::MOVE_WIDE, 5u, 3u),
+ DEF_CONST(3, Instruction::CONST, 7u, 3000u),
+ DEF_CONST(3, Instruction::CONST, 8u, 4000u),
+ };
+
+ static const int32_t sreg_to_vreg_map[] = { 1, 2, 0, 0, 1, 3, 4, 0, 1 };
+ PrepareSRegToVRegMap(sreg_to_vreg_map);
+
+ PrepareMIRs(mirs);
+ static const int32_t wide_sregs[] = { 3, 5 };
+ MarkAsWideSRegs(wide_sregs);
+ PerformGVN_DCE();
+
+ ASSERT_EQ(arraysize(mirs), value_names_.size());
+ static const size_t diff_indexes[] = { 0, 2, 3, 5, 6 };
+ ExpectValueNamesNE(diff_indexes);
+ EXPECT_EQ(value_names_[0], value_names_[1]);
+ EXPECT_EQ(value_names_[3], value_names_[4]);
+
+ static const bool eliminated[] = {
+ false, true, false, false, true, false, false,
+ };
+ static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
+ for (size_t i = 0; i != arraysize(eliminated); ++i) {
+ bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
+ EXPECT_EQ(eliminated[i], actually_eliminated) << i;
+ }
+ // Check renamed registers in CONST.
+ MIR* cst = &mirs_[0];
+ ASSERT_EQ(Instruction::CONST, cst->dalvikInsn.opcode);
+ ASSERT_EQ(0, cst->ssa_rep->num_uses);
+ ASSERT_EQ(1, cst->ssa_rep->num_defs);
+ EXPECT_EQ(1, cst->ssa_rep->defs[0]);
+ EXPECT_EQ(2u, cst->dalvikInsn.vA);
+ // Check renamed registers in INT_TO_LONG.
+ MIR* int_to_long = &mirs_[3];
+ ASSERT_EQ(Instruction::INT_TO_LONG, int_to_long->dalvikInsn.opcode);
+ ASSERT_EQ(1, int_to_long->ssa_rep->num_uses);
+ EXPECT_EQ(2, int_to_long->ssa_rep->uses[0]);
+ ASSERT_EQ(2, int_to_long->ssa_rep->num_defs);
+ EXPECT_EQ(5, int_to_long->ssa_rep->defs[0]);
+ EXPECT_EQ(6, int_to_long->ssa_rep->defs[1]);
+ EXPECT_EQ(3u, int_to_long->dalvikInsn.vA);
+ EXPECT_EQ(0u, int_to_long->dalvikInsn.vB);
+}
+
} // namespace art
diff --git a/compiler/dex/local_value_numbering.cc b/compiler/dex/local_value_numbering.cc
index cdf5e38..cc9dbe4 100644
--- a/compiler/dex/local_value_numbering.cc
+++ b/compiler/dex/local_value_numbering.cc
@@ -1152,28 +1152,20 @@
// Running LVN without a full GVN?
return kNoValue;
}
- int32_t* uses = mir->ssa_rep->uses;
- // Try to find out if this is merging wide regs.
- if (mir->ssa_rep->defs[0] != 0 &&
- sreg_wide_value_map_.count(mir->ssa_rep->defs[0] - 1) != 0u) {
+ // Determine if this Phi is merging wide regs.
+ RegLocation raw_dest = gvn_->GetMirGraph()->GetRawDest(mir);
+ if (raw_dest.high_word) {
// This is the high part of a wide reg. Ignore the Phi.
return kNoValue;
}
- BasicBlockId* incoming = mir->meta.phi_incoming;
- int16_t pos = 0;
- // Check if we're merging a wide value based on the first merged LVN.
- const LocalValueNumbering* first_lvn = gvn_->merge_lvns_[0];
- DCHECK_LT(pos, mir->ssa_rep->num_uses);
- while (incoming[pos] != first_lvn->Id()) {
- ++pos;
- DCHECK_LT(pos, mir->ssa_rep->num_uses);
- }
- int first_s_reg = uses[pos];
- bool wide = (first_lvn->sreg_wide_value_map_.count(first_s_reg) != 0u);
+ bool wide = raw_dest.wide;
// Iterate over *merge_lvns_ and skip incoming sregs for BBs without associated LVN.
merge_names_.clear();
uint16_t value_name = kNoValue;
bool same_values = true;
+ BasicBlockId* incoming = mir->meta.phi_incoming;
+ int32_t* uses = mir->ssa_rep->uses;
+ int16_t pos = 0;
for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
DCHECK_LT(pos, mir->ssa_rep->num_uses);
while (incoming[pos] != lvn->Id()) {
@@ -1994,6 +1986,9 @@
if (s_reg == INVALID_SREG) {
return kNoValue;
}
+ if (gvn_->GetMirGraph()->GetRegLocation(s_reg).wide != wide) {
+ return kNoValue;
+ }
if (wide) {
int high_s_reg = bb->data_flow_info->vreg_to_ssa_map_exit[v_reg + 1];
if (high_s_reg != s_reg + 1) {
diff --git a/compiler/dex/local_value_numbering.h b/compiler/dex/local_value_numbering.h
index 379c952..67fb647 100644
--- a/compiler/dex/local_value_numbering.h
+++ b/compiler/dex/local_value_numbering.h
@@ -53,10 +53,12 @@
}
uint16_t GetSregValue(uint16_t s_reg) const {
+ DCHECK(!gvn_->GetMirGraph()->GetRegLocation(s_reg).wide);
return GetSregValueImpl(s_reg, &sreg_value_map_);
}
uint16_t GetSregValueWide(uint16_t s_reg) const {
+ DCHECK(gvn_->GetMirGraph()->GetRegLocation(s_reg).wide);
return GetSregValueImpl(s_reg, &sreg_wide_value_map_);
}
@@ -123,21 +125,27 @@
void SetOperandValue(uint16_t s_reg, uint16_t value) {
DCHECK_EQ(sreg_wide_value_map_.count(s_reg), 0u);
+ DCHECK(!gvn_->GetMirGraph()->GetRegLocation(s_reg).wide);
SetOperandValueImpl(s_reg, value, &sreg_value_map_);
}
uint16_t GetOperandValue(int s_reg) const {
DCHECK_EQ(sreg_wide_value_map_.count(s_reg), 0u);
+ DCHECK(!gvn_->GetMirGraph()->GetRegLocation(s_reg).wide);
return GetOperandValueImpl(s_reg, &sreg_value_map_);
}
void SetOperandValueWide(uint16_t s_reg, uint16_t value) {
DCHECK_EQ(sreg_value_map_.count(s_reg), 0u);
+ DCHECK(gvn_->GetMirGraph()->GetRegLocation(s_reg).wide);
+ DCHECK(!gvn_->GetMirGraph()->GetRegLocation(s_reg).high_word);
SetOperandValueImpl(s_reg, value, &sreg_wide_value_map_);
}
uint16_t GetOperandValueWide(int s_reg) const {
DCHECK_EQ(sreg_value_map_.count(s_reg), 0u);
+ DCHECK(gvn_->GetMirGraph()->GetRegLocation(s_reg).wide);
+ DCHECK(!gvn_->GetMirGraph()->GetRegLocation(s_reg).high_word);
return GetOperandValueImpl(s_reg, &sreg_wide_value_map_);
}
@@ -331,7 +339,7 @@
void CopyLiveSregValues(SregValueMap* dest, const SregValueMap& src);
- // Intersect maps as sets. The value type must be equality-comparable.
+ // Intersect SSA reg value maps as sets, ignore dead regs.
template <SregValueMap LocalValueNumbering::* map_ptr>
void IntersectSregValueMaps();
diff --git a/compiler/dex/local_value_numbering_test.cc b/compiler/dex/local_value_numbering_test.cc
index 566527a..bd00690 100644
--- a/compiler/dex/local_value_numbering_test.cc
+++ b/compiler/dex/local_value_numbering_test.cc
@@ -158,10 +158,8 @@
mir->ssa_rep = &ssa_reps_[i];
mir->ssa_rep->num_uses = def->num_uses;
mir->ssa_rep->uses = const_cast<int32_t*>(def->uses); // Not modified by LVN.
- mir->ssa_rep->fp_use = nullptr; // Not used by LVN.
mir->ssa_rep->num_defs = def->num_defs;
mir->ssa_rep->defs = const_cast<int32_t*>(def->defs); // Not modified by LVN.
- mir->ssa_rep->fp_def = nullptr; // Not used by LVN.
mir->dalvikInsn.opcode = def->opcode;
mir->offset = i; // LVN uses offset only for debug output
mir->optimization_flags = 0u;
@@ -184,6 +182,15 @@
~MirSFieldLoweringInfo::kFlagClassIsInitialized;
}
+ template <size_t count>
+ void MarkAsWideSRegs(const int32_t (&sregs)[count]) {
+ for (int32_t sreg : sregs) {
+ cu_.mir_graph->reg_location_[sreg].wide = true;
+ cu_.mir_graph->reg_location_[sreg + 1].wide = true;
+ cu_.mir_graph->reg_location_[sreg + 1].high_word = true;
+ }
+ }
+
void PerformLVN() {
cu_.mir_graph->temp_.gvn.ifield_ids = GlobalValueNumbering::PrepareGvnFieldIds(
allocator_.get(), cu_.mir_graph->ifield_lowering_infos_);
@@ -212,9 +219,11 @@
cu_.mir_graph.reset(new MIRGraph(&cu_, &cu_.arena));
allocator_.reset(ScopedArenaAllocator::Create(&cu_.arena_stack));
// By default, the zero-initialized reg_location_[.] with ref == false tells LVN that
- // 0 constants are integral, not references. Nothing else is used by LVN/GVN.
+ // 0 constants are integral, not references, and the values are all narrow.
+ // Nothing else is used by LVN/GVN. Tests can override the default values as needed.
cu_.mir_graph->reg_location_ = static_cast<RegLocation*>(cu_.arena.Alloc(
kMaxSsaRegs * sizeof(cu_.mir_graph->reg_location_[0]), kArenaAllocRegAlloc));
+ cu_.mir_graph->num_ssa_regs_ = kMaxSsaRegs;
}
static constexpr size_t kMaxSsaRegs = 16384u;
@@ -381,26 +390,28 @@
{ 3u, 0u, 0u, false, kDexMemAccessWord }, // Unresolved field.
};
static const MIRDef mirs[] = {
- DEF_UNIQUE_REF(Instruction::NEW_INSTANCE, 20u),
- DEF_IGET(Instruction::IGET, 1u, 20u, 0u), // Resolved field #1, unique object.
- DEF_IGET(Instruction::IGET, 2u, 21u, 0u), // Resolved field #1.
- DEF_IGET_WIDE(Instruction::IGET_WIDE, 3u, 21u, 1u), // Resolved field #2.
- DEF_IGET(Instruction::IGET, 4u, 22u, 2u), // Unresolved IGET can be "acquire".
- DEF_IGET(Instruction::IGET, 5u, 20u, 0u), // Resolved field #1, unique object.
- DEF_IGET(Instruction::IGET, 6u, 21u, 0u), // Resolved field #1.
- DEF_IGET_WIDE(Instruction::IGET_WIDE, 7u, 21u, 1u), // Resolved field #2.
- DEF_IPUT(Instruction::IPUT, 8u, 22u, 2u), // IPUT clobbers field #1 (#2 is wide).
- DEF_IGET(Instruction::IGET, 9u, 20u, 0u), // Resolved field #1, unique object.
- DEF_IGET(Instruction::IGET, 10u, 21u, 0u), // Resolved field #1, new value name.
- DEF_IGET_WIDE(Instruction::IGET_WIDE, 11u, 21u, 1u), // Resolved field #2.
- DEF_IGET_WIDE(Instruction::IGET_WIDE, 12u, 20u, 1u), // Resolved field #2, unique object.
- DEF_IPUT(Instruction::IPUT, 13u, 20u, 2u), // IPUT clobbers field #1 (#2 is wide).
- DEF_IGET(Instruction::IGET, 14u, 20u, 0u), // Resolved field #1, unique object.
- DEF_IGET_WIDE(Instruction::IGET_WIDE, 15u, 20u, 1u), // Resolved field #2, unique object.
+ DEF_UNIQUE_REF(Instruction::NEW_INSTANCE, 30u),
+ DEF_IGET(Instruction::IGET, 1u, 30u, 0u), // Resolved field #1, unique object.
+ DEF_IGET(Instruction::IGET, 2u, 31u, 0u), // Resolved field #1.
+ DEF_IGET_WIDE(Instruction::IGET_WIDE, 3u, 31u, 1u), // Resolved field #2.
+ DEF_IGET(Instruction::IGET, 5u, 32u, 2u), // Unresolved IGET can be "acquire".
+ DEF_IGET(Instruction::IGET, 6u, 30u, 0u), // Resolved field #1, unique object.
+ DEF_IGET(Instruction::IGET, 7u, 31u, 0u), // Resolved field #1.
+ DEF_IGET_WIDE(Instruction::IGET_WIDE, 8u, 31u, 1u), // Resolved field #2.
+ DEF_IPUT(Instruction::IPUT, 10u, 32u, 2u), // IPUT clobbers field #1 (#2 is wide).
+ DEF_IGET(Instruction::IGET, 11u, 30u, 0u), // Resolved field #1, unique object.
+ DEF_IGET(Instruction::IGET, 12u, 31u, 0u), // Resolved field #1, new value name.
+ DEF_IGET_WIDE(Instruction::IGET_WIDE, 13u, 31u, 1u), // Resolved field #2.
+ DEF_IGET_WIDE(Instruction::IGET_WIDE, 15u, 30u, 1u), // Resolved field #2, unique object.
+ DEF_IPUT(Instruction::IPUT, 17u, 30u, 2u), // IPUT clobbers field #1 (#2 is wide).
+ DEF_IGET(Instruction::IGET, 18u, 30u, 0u), // Resolved field #1, unique object.
+ DEF_IGET_WIDE(Instruction::IGET_WIDE, 19u, 30u, 1u), // Resolved field #2, unique object.
};
PrepareIFields(ifields);
PrepareMIRs(mirs);
+ static const int32_t wide_sregs[] = { 3, 8, 13, 15, 19 };
+ MarkAsWideSRegs(wide_sregs);
PerformLVN();
ASSERT_EQ(value_names_.size(), 16u);
// Unresolved field is potentially volatile, so we need to adhere to the volatile semantics.
@@ -432,16 +443,18 @@
static const MIRDef mirs[] = {
DEF_SGET(Instruction::SGET, 0u, 0u), // Resolved field #1.
DEF_SGET_WIDE(Instruction::SGET_WIDE, 1u, 1u), // Resolved field #2.
- DEF_SGET(Instruction::SGET, 2u, 2u), // Unresolved SGET can be "acquire".
- DEF_SGET(Instruction::SGET, 3u, 0u), // Resolved field #1.
- DEF_SGET_WIDE(Instruction::SGET_WIDE, 4u, 1u), // Resolved field #2.
- DEF_SPUT(Instruction::SPUT, 5u, 2u), // SPUT clobbers field #1 (#2 is wide).
- DEF_SGET(Instruction::SGET, 6u, 0u), // Resolved field #1.
- DEF_SGET_WIDE(Instruction::SGET_WIDE, 7u, 1u), // Resolved field #2.
+ DEF_SGET(Instruction::SGET, 3u, 2u), // Unresolved SGET can be "acquire".
+ DEF_SGET(Instruction::SGET, 4u, 0u), // Resolved field #1.
+ DEF_SGET_WIDE(Instruction::SGET_WIDE, 5u, 1u), // Resolved field #2.
+ DEF_SPUT(Instruction::SPUT, 7u, 2u), // SPUT clobbers field #1 (#2 is wide).
+ DEF_SGET(Instruction::SGET, 8u, 0u), // Resolved field #1.
+ DEF_SGET_WIDE(Instruction::SGET_WIDE, 9u, 1u), // Resolved field #2.
};
PrepareSFields(sfields);
PrepareMIRs(mirs);
+ static const int32_t wide_sregs[] = { 1, 5, 9 };
+ MarkAsWideSRegs(wide_sregs);
PerformLVN();
ASSERT_EQ(value_names_.size(), 8u);
// Unresolved field is potentially volatile, so we need to adhere to the volatile semantics.
@@ -587,18 +600,20 @@
DEF_IGET(Instruction::IGET, 7u, 20u, 0u), // New value.
DEF_IGET(Instruction::IGET, 8u, 20u, 1u), // Still the same.
DEF_IPUT_WIDE(Instruction::IPUT_WIDE, 9u, 31u, 3u), // No aliasing, different type.
- DEF_IGET(Instruction::IGET, 10u, 20u, 0u),
- DEF_IGET(Instruction::IGET, 11u, 20u, 1u),
- DEF_IPUT_WIDE(Instruction::IPUT_WIDE, 12u, 31u, 5u), // No aliasing, different type.
- DEF_IGET(Instruction::IGET, 13u, 20u, 0u),
- DEF_IGET(Instruction::IGET, 14u, 20u, 1u),
- DEF_IPUT(Instruction::IPUT, 15u, 31u, 4u), // Aliasing, same type.
- DEF_IGET(Instruction::IGET, 16u, 20u, 0u),
- DEF_IGET(Instruction::IGET, 17u, 20u, 1u),
+ DEF_IGET(Instruction::IGET, 11u, 20u, 0u),
+ DEF_IGET(Instruction::IGET, 12u, 20u, 1u),
+ DEF_IPUT_WIDE(Instruction::IPUT_WIDE, 13u, 31u, 5u), // No aliasing, different type.
+ DEF_IGET(Instruction::IGET, 15u, 20u, 0u),
+ DEF_IGET(Instruction::IGET, 16u, 20u, 1u),
+ DEF_IPUT(Instruction::IPUT, 17u, 31u, 4u), // Aliasing, same type.
+ DEF_IGET(Instruction::IGET, 18u, 20u, 0u),
+ DEF_IGET(Instruction::IGET, 19u, 20u, 1u),
};
PrepareIFields(ifields);
PrepareMIRs(mirs);
+ static const int32_t wide_sregs[] = { 9, 13 };
+ MarkAsWideSRegs(wide_sregs);
PerformLVN();
ASSERT_EQ(value_names_.size(), 18u);
EXPECT_EQ(value_names_[1], value_names_[4]);
@@ -628,14 +643,16 @@
DEF_AGET(Instruction::AGET, 4u, 20u, 40u),
DEF_AGET(Instruction::AGET, 5u, 20u, 41u),
DEF_APUT_WIDE(Instruction::APUT_WIDE, 6u, 31u, 43u), // No aliasing, different type.
- DEF_AGET(Instruction::AGET, 7u, 20u, 40u),
- DEF_AGET(Instruction::AGET, 8u, 20u, 41u),
- DEF_APUT(Instruction::APUT, 9u, 32u, 40u), // May alias with all elements.
- DEF_AGET(Instruction::AGET, 10u, 20u, 40u), // New value (same index name).
- DEF_AGET(Instruction::AGET, 11u, 20u, 41u), // New value (different index name).
+ DEF_AGET(Instruction::AGET, 8u, 20u, 40u),
+ DEF_AGET(Instruction::AGET, 9u, 20u, 41u),
+ DEF_APUT(Instruction::APUT, 10u, 32u, 40u), // May alias with all elements.
+ DEF_AGET(Instruction::AGET, 11u, 20u, 40u), // New value (same index name).
+ DEF_AGET(Instruction::AGET, 12u, 20u, 41u), // New value (different index name).
};
PrepareMIRs(mirs);
+ static const int32_t wide_sregs[] = { 6 };
+ MarkAsWideSRegs(wide_sregs);
PerformLVN();
ASSERT_EQ(value_names_.size(), 12u);
EXPECT_EQ(value_names_[1], value_names_[4]);
@@ -771,6 +788,8 @@
};
PrepareMIRs(mirs);
+ static const int32_t wide_sregs[] = { 5, 7, 12, 14, 16 };
+ MarkAsWideSRegs(wide_sregs);
PerformLVN();
for (size_t i = 0u; i != mir_count_; ++i) {
int expected = expected_ignore_div_zero_check[i] ? MIR_IGNORE_DIV_ZERO_CHECK : 0u;
@@ -782,51 +801,55 @@
static const MIRDef mirs[] = {
// Core reg constants.
DEF_CONST(Instruction::CONST_WIDE_16, 0u, 0),
- DEF_CONST(Instruction::CONST_WIDE_16, 1u, 1),
- DEF_CONST(Instruction::CONST_WIDE_16, 2u, -1),
- DEF_CONST(Instruction::CONST_WIDE_32, 3u, 1 << 16),
- DEF_CONST(Instruction::CONST_WIDE_32, 4u, -1 << 16),
- DEF_CONST(Instruction::CONST_WIDE_32, 5u, (1 << 16) + 1),
- DEF_CONST(Instruction::CONST_WIDE_32, 6u, (1 << 16) - 1),
- DEF_CONST(Instruction::CONST_WIDE_32, 7u, -(1 << 16) + 1),
- DEF_CONST(Instruction::CONST_WIDE_32, 8u, -(1 << 16) - 1),
- DEF_CONST(Instruction::CONST_WIDE, 9u, INT64_C(1) << 32),
- DEF_CONST(Instruction::CONST_WIDE, 10u, INT64_C(-1) << 32),
- DEF_CONST(Instruction::CONST_WIDE, 11u, (INT64_C(1) << 32) + 1),
- DEF_CONST(Instruction::CONST_WIDE, 12u, (INT64_C(1) << 32) - 1),
- DEF_CONST(Instruction::CONST_WIDE, 13u, (INT64_C(-1) << 32) + 1),
- DEF_CONST(Instruction::CONST_WIDE, 14u, (INT64_C(-1) << 32) - 1),
- DEF_CONST(Instruction::CONST_WIDE_HIGH16, 15u, 1), // Effectively 1 << 48.
- DEF_CONST(Instruction::CONST_WIDE_HIGH16, 16u, 0xffff), // Effectively -1 << 48.
- DEF_CONST(Instruction::CONST_WIDE, 17u, (INT64_C(1) << 48) + 1),
- DEF_CONST(Instruction::CONST_WIDE, 18u, (INT64_C(1) << 48) - 1),
- DEF_CONST(Instruction::CONST_WIDE, 19u, (INT64_C(-1) << 48) + 1),
- DEF_CONST(Instruction::CONST_WIDE, 20u, (INT64_C(-1) << 48) - 1),
+ DEF_CONST(Instruction::CONST_WIDE_16, 2u, 1),
+ DEF_CONST(Instruction::CONST_WIDE_16, 4u, -1),
+ DEF_CONST(Instruction::CONST_WIDE_32, 6u, 1 << 16),
+ DEF_CONST(Instruction::CONST_WIDE_32, 8u, -1 << 16),
+ DEF_CONST(Instruction::CONST_WIDE_32, 10u, (1 << 16) + 1),
+ DEF_CONST(Instruction::CONST_WIDE_32, 12u, (1 << 16) - 1),
+ DEF_CONST(Instruction::CONST_WIDE_32, 14u, -(1 << 16) + 1),
+ DEF_CONST(Instruction::CONST_WIDE_32, 16u, -(1 << 16) - 1),
+ DEF_CONST(Instruction::CONST_WIDE, 18u, INT64_C(1) << 32),
+ DEF_CONST(Instruction::CONST_WIDE, 20u, INT64_C(-1) << 32),
+ DEF_CONST(Instruction::CONST_WIDE, 22u, (INT64_C(1) << 32) + 1),
+ DEF_CONST(Instruction::CONST_WIDE, 24u, (INT64_C(1) << 32) - 1),
+ DEF_CONST(Instruction::CONST_WIDE, 26u, (INT64_C(-1) << 32) + 1),
+ DEF_CONST(Instruction::CONST_WIDE, 28u, (INT64_C(-1) << 32) - 1),
+ DEF_CONST(Instruction::CONST_WIDE_HIGH16, 30u, 1), // Effectively 1 << 48.
+ DEF_CONST(Instruction::CONST_WIDE_HIGH16, 32u, 0xffff), // Effectively -1 << 48.
+ DEF_CONST(Instruction::CONST_WIDE, 34u, (INT64_C(1) << 48) + 1),
+ DEF_CONST(Instruction::CONST_WIDE, 36u, (INT64_C(1) << 48) - 1),
+ DEF_CONST(Instruction::CONST_WIDE, 38u, (INT64_C(-1) << 48) + 1),
+ DEF_CONST(Instruction::CONST_WIDE, 40u, (INT64_C(-1) << 48) - 1),
// FP reg constants.
- DEF_CONST(Instruction::CONST_WIDE_16, 21u, 0),
- DEF_CONST(Instruction::CONST_WIDE_16, 22u, 1),
- DEF_CONST(Instruction::CONST_WIDE_16, 23u, -1),
- DEF_CONST(Instruction::CONST_WIDE_32, 24u, 1 << 16),
- DEF_CONST(Instruction::CONST_WIDE_32, 25u, -1 << 16),
- DEF_CONST(Instruction::CONST_WIDE_32, 26u, (1 << 16) + 1),
- DEF_CONST(Instruction::CONST_WIDE_32, 27u, (1 << 16) - 1),
- DEF_CONST(Instruction::CONST_WIDE_32, 28u, -(1 << 16) + 1),
- DEF_CONST(Instruction::CONST_WIDE_32, 29u, -(1 << 16) - 1),
- DEF_CONST(Instruction::CONST_WIDE, 30u, INT64_C(1) << 32),
- DEF_CONST(Instruction::CONST_WIDE, 31u, INT64_C(-1) << 32),
- DEF_CONST(Instruction::CONST_WIDE, 32u, (INT64_C(1) << 32) + 1),
- DEF_CONST(Instruction::CONST_WIDE, 33u, (INT64_C(1) << 32) - 1),
- DEF_CONST(Instruction::CONST_WIDE, 34u, (INT64_C(-1) << 32) + 1),
- DEF_CONST(Instruction::CONST_WIDE, 35u, (INT64_C(-1) << 32) - 1),
- DEF_CONST(Instruction::CONST_WIDE_HIGH16, 36u, 1), // Effectively 1 << 48.
- DEF_CONST(Instruction::CONST_WIDE_HIGH16, 37u, 0xffff), // Effectively -1 << 48.
- DEF_CONST(Instruction::CONST_WIDE, 38u, (INT64_C(1) << 48) + 1),
- DEF_CONST(Instruction::CONST_WIDE, 39u, (INT64_C(1) << 48) - 1),
- DEF_CONST(Instruction::CONST_WIDE, 40u, (INT64_C(-1) << 48) + 1),
- DEF_CONST(Instruction::CONST_WIDE, 41u, (INT64_C(-1) << 48) - 1),
+ DEF_CONST(Instruction::CONST_WIDE_16, 42u, 0),
+ DEF_CONST(Instruction::CONST_WIDE_16, 44u, 1),
+ DEF_CONST(Instruction::CONST_WIDE_16, 46u, -1),
+ DEF_CONST(Instruction::CONST_WIDE_32, 48u, 1 << 16),
+ DEF_CONST(Instruction::CONST_WIDE_32, 50u, -1 << 16),
+ DEF_CONST(Instruction::CONST_WIDE_32, 52u, (1 << 16) + 1),
+ DEF_CONST(Instruction::CONST_WIDE_32, 54u, (1 << 16) - 1),
+ DEF_CONST(Instruction::CONST_WIDE_32, 56u, -(1 << 16) + 1),
+ DEF_CONST(Instruction::CONST_WIDE_32, 58u, -(1 << 16) - 1),
+ DEF_CONST(Instruction::CONST_WIDE, 60u, INT64_C(1) << 32),
+ DEF_CONST(Instruction::CONST_WIDE, 62u, INT64_C(-1) << 32),
+ DEF_CONST(Instruction::CONST_WIDE, 64u, (INT64_C(1) << 32) + 1),
+ DEF_CONST(Instruction::CONST_WIDE, 66u, (INT64_C(1) << 32) - 1),
+ DEF_CONST(Instruction::CONST_WIDE, 68u, (INT64_C(-1) << 32) + 1),
+ DEF_CONST(Instruction::CONST_WIDE, 70u, (INT64_C(-1) << 32) - 1),
+ DEF_CONST(Instruction::CONST_WIDE_HIGH16, 72u, 1), // Effectively 1 << 48.
+ DEF_CONST(Instruction::CONST_WIDE_HIGH16, 74u, 0xffff), // Effectively -1 << 48.
+ DEF_CONST(Instruction::CONST_WIDE, 76u, (INT64_C(1) << 48) + 1),
+ DEF_CONST(Instruction::CONST_WIDE, 78u, (INT64_C(1) << 48) - 1),
+ DEF_CONST(Instruction::CONST_WIDE, 80u, (INT64_C(-1) << 48) + 1),
+ DEF_CONST(Instruction::CONST_WIDE, 82u, (INT64_C(-1) << 48) - 1),
};
PrepareMIRs(mirs);
+ for (size_t i = 0; i != arraysize(mirs); ++i) {
+ const int32_t wide_sregs[] = { mirs_[i].ssa_rep->defs[0] };
+ MarkAsWideSRegs(wide_sregs);
+ }
for (size_t i = arraysize(mirs) / 2u; i != arraysize(mirs); ++i) {
cu_.mir_graph->reg_location_[mirs_[i].ssa_rep->defs[0]].fp = true;
}
diff --git a/compiler/dex/mir_analysis.cc b/compiler/dex/mir_analysis.cc
index 3d7a640..9099e8a 100644
--- a/compiler/dex/mir_analysis.cc
+++ b/compiler/dex/mir_analysis.cc
@@ -968,7 +968,7 @@
* edges until we reach an explicit branch or return.
*/
BasicBlock* ending_bb = bb;
- if (ending_bb->last_mir_insn != NULL) {
+ if (ending_bb->last_mir_insn != nullptr) {
uint32_t ending_flags = kAnalysisAttributes[ending_bb->last_mir_insn->dalvikInsn.opcode];
while ((ending_flags & kAnBranch) == 0) {
ending_bb = GetBasicBlock(ending_bb->fall_through);
@@ -998,7 +998,7 @@
bool done = false;
while (!done) {
tbb->visited = true;
- for (MIR* mir = tbb->first_mir_insn; mir != NULL; mir = mir->next) {
+ for (MIR* mir = tbb->first_mir_insn; mir != nullptr; mir = mir->next) {
if (MIR::DecodedInstruction::IsPseudoMirOp(mir->dalvikInsn.opcode)) {
// Skip any MIR pseudo-op.
continue;
@@ -1195,7 +1195,7 @@
ClearAllVisitedFlags();
AllNodesIterator iter(this);
- for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+ for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
AnalyzeBlock(bb, &stats);
}
diff --git a/compiler/dex/mir_dataflow.cc b/compiler/dex/mir_dataflow.cc
index 2a920a4..a7ba061 100644
--- a/compiler/dex/mir_dataflow.cc
+++ b/compiler/dex/mir_dataflow.cc
@@ -123,7 +123,7 @@
DF_UA | DF_NULL_CHK_A | DF_REF_A,
// 1F CHK_CAST vAA, type@BBBB
- DF_UA | DF_REF_A | DF_UMS,
+ DF_UA | DF_REF_A | DF_CHK_CAST | DF_UMS,
// 20 INSTANCE_OF vA, vB, type@CCCC
DF_DA | DF_UB | DF_CORE_A | DF_REF_B | DF_UMS,
@@ -159,10 +159,10 @@
DF_NOP,
// 2B PACKED_SWITCH vAA, +BBBBBBBB
- DF_UA,
+ DF_UA | DF_CORE_A,
// 2C SPARSE_SWITCH vAA, +BBBBBBBB
- DF_UA,
+ DF_UA | DF_CORE_A,
// 2D CMPL_FLOAT vAA, vBB, vCC
DF_DA | DF_UB | DF_UC | DF_FP_B | DF_FP_C | DF_CORE_A,
@@ -180,22 +180,22 @@
DF_DA | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
// 32 IF_EQ vA, vB, +CCCC
- DF_UA | DF_UB,
+ DF_UA | DF_UB | DF_SAME_TYPE_AB,
// 33 IF_NE vA, vB, +CCCC
- DF_UA | DF_UB,
+ DF_UA | DF_UB | DF_SAME_TYPE_AB,
// 34 IF_LT vA, vB, +CCCC
- DF_UA | DF_UB,
+ DF_UA | DF_UB | DF_SAME_TYPE_AB,
// 35 IF_GE vA, vB, +CCCC
- DF_UA | DF_UB,
+ DF_UA | DF_UB | DF_SAME_TYPE_AB,
// 36 IF_GT vA, vB, +CCCC
- DF_UA | DF_UB,
+ DF_UA | DF_UB | DF_SAME_TYPE_AB,
// 37 IF_LE vA, vB, +CCCC
- DF_UA | DF_UB,
+ DF_UA | DF_UB | DF_SAME_TYPE_AB,
// 38 IF_EQZ vAA, +BBBB
DF_UA,
@@ -834,9 +834,6 @@
// 10B MIR_CHECK
0,
- // 10C MIR_CHECKPART2
- 0,
-
// 10D MIR_SELECT
DF_DA | DF_UB,
@@ -989,7 +986,7 @@
MIR* mir;
ArenaBitVector *use_v, *def_v, *live_in_v;
- if (bb->data_flow_info == NULL) return false;
+ if (bb->data_flow_info == nullptr) return false;
use_v = bb->data_flow_info->use_v =
new (arena_) ArenaBitVector(arena_, GetNumOfCodeAndTempVRs(), false, kBitMapUse);
@@ -998,7 +995,7 @@
live_in_v = bb->data_flow_info->live_in_v =
new (arena_) ArenaBitVector(arena_, GetNumOfCodeAndTempVRs(), false, kBitMapLiveIn);
- for (mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+ for (mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
uint64_t df_attributes = GetDataFlowAttributes(mir);
MIR::DecodedInstruction* d_insn = &mir->dalvikInsn;
@@ -1080,8 +1077,6 @@
if (mir->ssa_rep->num_uses_allocated < num_uses) {
mir->ssa_rep->uses = arena_->AllocArray<int32_t>(num_uses, kArenaAllocDFInfo);
- // NOTE: will be filled in during type & size inference pass
- mir->ssa_rep->fp_use = arena_->AllocArray<bool>(num_uses, kArenaAllocDFInfo);
}
}
@@ -1090,7 +1085,6 @@
if (mir->ssa_rep->num_defs_allocated < num_defs) {
mir->ssa_rep->defs = arena_->AllocArray<int32_t>(num_defs, kArenaAllocDFInfo);
- mir->ssa_rep->fp_def = arena_->AllocArray<bool>(num_defs, kArenaAllocDFInfo);
}
}
@@ -1191,7 +1185,7 @@
/* Entry function to convert a block into SSA representation */
bool MIRGraph::DoSSAConversion(BasicBlock* bb) {
- if (bb->data_flow_info == NULL) return false;
+ if (bb->data_flow_info == nullptr) return false;
/*
* Pruned SSA form: Insert phi nodes for each dalvik register marked in phi_node_blocks
@@ -1214,7 +1208,7 @@
}
}
- for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+ for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
mir->ssa_rep =
static_cast<struct SSARepresentation *>(arena_->Alloc(sizeof(SSARepresentation),
kArenaAllocDFInfo));
@@ -1287,35 +1281,27 @@
if (df_attributes & DF_HAS_USES) {
num_uses = 0;
if (df_attributes & DF_UA) {
- mir->ssa_rep->fp_use[num_uses] = df_attributes & DF_FP_A;
HandleSSAUse(mir->ssa_rep->uses, d_insn->vA, num_uses++);
if (df_attributes & DF_A_WIDE) {
- mir->ssa_rep->fp_use[num_uses] = df_attributes & DF_FP_A;
HandleSSAUse(mir->ssa_rep->uses, d_insn->vA+1, num_uses++);
}
}
if (df_attributes & DF_UB) {
- mir->ssa_rep->fp_use[num_uses] = df_attributes & DF_FP_B;
HandleSSAUse(mir->ssa_rep->uses, d_insn->vB, num_uses++);
if (df_attributes & DF_B_WIDE) {
- mir->ssa_rep->fp_use[num_uses] = df_attributes & DF_FP_B;
HandleSSAUse(mir->ssa_rep->uses, d_insn->vB+1, num_uses++);
}
}
if (df_attributes & DF_UC) {
- mir->ssa_rep->fp_use[num_uses] = df_attributes & DF_FP_C;
HandleSSAUse(mir->ssa_rep->uses, d_insn->vC, num_uses++);
if (df_attributes & DF_C_WIDE) {
- mir->ssa_rep->fp_use[num_uses] = df_attributes & DF_FP_C;
HandleSSAUse(mir->ssa_rep->uses, d_insn->vC+1, num_uses++);
}
}
}
if (df_attributes & DF_HAS_DEFS) {
- mir->ssa_rep->fp_def[0] = df_attributes & DF_FP_A;
HandleSSADef(mir->ssa_rep->defs, d_insn->vA, 0);
if (df_attributes & DF_A_WIDE) {
- mir->ssa_rep->fp_def[1] = df_attributes & DF_FP_A;
HandleSSADef(mir->ssa_rep->defs, d_insn->vA+1, 1);
}
}
@@ -1413,8 +1399,8 @@
return;
}
uint32_t weight = GetUseCountWeight(bb);
- for (MIR* mir = bb->first_mir_insn; (mir != NULL); mir = mir->next) {
- if (mir->ssa_rep == NULL) {
+ for (MIR* mir = bb->first_mir_insn; (mir != nullptr); mir = mir->next) {
+ if (mir->ssa_rep == nullptr) {
continue;
}
for (int i = 0; i < mir->ssa_rep->num_uses; i++) {
@@ -1459,7 +1445,7 @@
void MIRGraph::VerifyDataflow() {
/* Verify if all blocks are connected as claimed */
AllNodesIterator iter(this);
- for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+ for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
VerifyPredInfo(bb);
}
}
diff --git a/compiler/dex/mir_field_info.h b/compiler/dex/mir_field_info.h
index ca56958..e4570fd 100644
--- a/compiler/dex/mir_field_info.h
+++ b/compiler/dex/mir_field_info.h
@@ -124,7 +124,7 @@
uint16_t declaring_field_idx_;
// The type index of the class declaring the field, 0 if unresolved.
uint16_t declaring_class_idx_;
- // The dex file that defines the class containing the field and the field, nullptr if unresolved.
+ // The dex file that defines the class containing the field and the field, null if unresolved.
const DexFile* declaring_dex_file_;
};
@@ -179,6 +179,7 @@
friend class GlobalValueNumberingTest;
friend class GvnDeadCodeEliminationTest;
friend class LocalValueNumberingTest;
+ friend class TypeInferenceTest;
};
class MirSFieldLoweringInfo : public MirFieldInfo {
@@ -254,6 +255,7 @@
friend class GlobalValueNumberingTest;
friend class GvnDeadCodeEliminationTest;
friend class LocalValueNumberingTest;
+ friend class TypeInferenceTest;
};
} // namespace art
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
index 4d34038..1871f07 100644
--- a/compiler/dex/mir_graph.cc
+++ b/compiler/dex/mir_graph.cc
@@ -52,8 +52,7 @@
"OpNullCheck",
"OpRangeCheck",
"OpDivZeroCheck",
- "Check1",
- "Check2",
+ "Check",
"Select",
"ConstVector",
"MoveVector",
@@ -81,15 +80,15 @@
};
MIRGraph::MIRGraph(CompilationUnit* cu, ArenaAllocator* arena)
- : reg_location_(NULL),
+ : reg_location_(nullptr),
block_id_map_(std::less<unsigned int>(), arena->Adapter()),
cu_(cu),
ssa_base_vregs_(arena->Adapter(kArenaAllocSSAToDalvikMap)),
ssa_subscripts_(arena->Adapter(kArenaAllocSSAToDalvikMap)),
- vreg_to_ssa_map_(NULL),
- ssa_last_defs_(NULL),
- is_constant_v_(NULL),
- constant_values_(NULL),
+ vreg_to_ssa_map_(nullptr),
+ ssa_last_defs_(nullptr),
+ is_constant_v_(nullptr),
+ constant_values_(nullptr),
use_counts_(arena->Adapter()),
raw_use_counts_(arena->Adapter()),
num_reachable_blocks_(0),
@@ -106,24 +105,24 @@
topological_order_indexes_(arena->Adapter(kArenaAllocTopologicalSortOrder)),
topological_order_loop_head_stack_(arena->Adapter(kArenaAllocTopologicalSortOrder)),
max_nested_loops_(0u),
- i_dom_list_(NULL),
+ i_dom_list_(nullptr),
temp_scoped_alloc_(),
block_list_(arena->Adapter(kArenaAllocBBList)),
- try_block_addr_(NULL),
- entry_block_(NULL),
- exit_block_(NULL),
- current_code_item_(NULL),
+ try_block_addr_(nullptr),
+ entry_block_(nullptr),
+ exit_block_(nullptr),
+ current_code_item_(nullptr),
m_units_(arena->Adapter()),
method_stack_(arena->Adapter()),
current_method_(kInvalidEntry),
current_offset_(kInvalidEntry),
def_count_(0),
- opcode_count_(NULL),
+ opcode_count_(nullptr),
num_ssa_regs_(0),
extended_basic_blocks_(arena->Adapter()),
method_sreg_(0),
attributes_(METHOD_IS_LEAF), // Start with leaf assumption, change on encountering invoke.
- checkstats_(NULL),
+ checkstats_(nullptr),
arena_(arena),
backward_branches_(0),
forward_branches_(0),
@@ -185,13 +184,13 @@
BasicBlock* orig_block, BasicBlock** immed_pred_block_p) {
DCHECK_GT(code_offset, orig_block->start_offset);
MIR* insn = orig_block->first_mir_insn;
- MIR* prev = NULL; // Will be set to instruction before split.
+ MIR* prev = nullptr; // Will be set to instruction before split.
while (insn) {
if (insn->offset == code_offset) break;
prev = insn;
insn = insn->next;
}
- if (insn == NULL) {
+ if (insn == nullptr) {
LOG(FATAL) << "Break split failed";
}
// Now insn is at the instruction where we want to split, namely
@@ -291,8 +290,12 @@
BasicBlock* MIRGraph::FindBlock(DexOffset code_offset, bool create,
BasicBlock** immed_pred_block_p,
ScopedArenaVector<uint16_t>* dex_pc_to_block_map) {
- if (code_offset >= current_code_item_->insns_size_in_code_units_) {
- return nullptr;
+ if (UNLIKELY(code_offset >= current_code_item_->insns_size_in_code_units_)) {
+ // There can be a fall-through out of the method code. We shall record such a block
+ // here (assuming create==true) and check that it's dead at the end of InlineMethod().
+ // Though we're only aware of the cases where code_offset is exactly the same as
+ // insns_size_in_code_units_, treat greater code_offset the same just in case.
+ code_offset = current_code_item_->insns_size_in_code_units_;
}
int block_id = (*dex_pc_to_block_map)[code_offset];
@@ -483,6 +486,7 @@
BasicBlock* taken_block = FindBlock(target, /* create */ true,
/* immed_pred_block_p */ &cur_block,
dex_pc_to_block_map);
+ DCHECK(taken_block != nullptr);
cur_block->taken = taken_block->id;
taken_block->predecessors.push_back(cur_block->id);
@@ -494,6 +498,7 @@
/* immed_pred_block_p */
&cur_block,
dex_pc_to_block_map);
+ DCHECK(fallthrough_block != nullptr);
cur_block->fall_through = fallthrough_block->id;
fallthrough_block->predecessors.push_back(cur_block->id);
} else if (code_ptr < code_end) {
@@ -508,7 +513,8 @@
ScopedArenaVector<uint16_t>* dex_pc_to_block_map) {
UNUSED(flags);
const uint16_t* switch_data =
- reinterpret_cast<const uint16_t*>(GetCurrentInsns() + cur_offset + insn->dalvikInsn.vB);
+ reinterpret_cast<const uint16_t*>(GetCurrentInsns() + cur_offset +
+ static_cast<int32_t>(insn->dalvikInsn.vB));
int size;
const int* keyTable;
const int* target_table;
@@ -530,7 +536,7 @@
size = switch_data[1];
first_key = switch_data[2] | (switch_data[3] << 16);
target_table = reinterpret_cast<const int*>(&switch_data[4]);
- keyTable = NULL; // Make the compiler happy.
+ keyTable = nullptr; // Make the compiler happy.
/*
* Sparse switch data format:
* ushort ident = 0x0200 magic value
@@ -561,6 +567,7 @@
BasicBlock* case_block = FindBlock(cur_offset + target_table[i], /* create */ true,
/* immed_pred_block_p */ &cur_block,
dex_pc_to_block_map);
+ DCHECK(case_block != nullptr);
SuccessorBlockInfo* successor_block_info =
static_cast<SuccessorBlockInfo*>(arena_->Alloc(sizeof(SuccessorBlockInfo),
kArenaAllocSuccessor));
@@ -576,6 +583,7 @@
BasicBlock* fallthrough_block = FindBlock(cur_offset + width, /* create */ true,
/* immed_pred_block_p */ nullptr,
dex_pc_to_block_map);
+ DCHECK(fallthrough_block != nullptr);
cur_block->fall_through = fallthrough_block->id;
fallthrough_block->predecessors.push_back(cur_block->id);
return cur_block;
@@ -695,9 +703,10 @@
current_method_ = m_units_.size();
current_offset_ = 0;
// TODO: will need to snapshot stack image and use that as the mir context identification.
- m_units_.push_back(new DexCompilationUnit(cu_, class_loader, Runtime::Current()->GetClassLinker(),
- dex_file, current_code_item_, class_def_idx, method_idx, access_flags,
- cu_->compiler_driver->GetVerifiedMethod(&dex_file, method_idx)));
+ m_units_.push_back(new (arena_) DexCompilationUnit(
+ cu_, class_loader, Runtime::Current()->GetClassLinker(), dex_file,
+ current_code_item_, class_def_idx, method_idx, access_flags,
+ cu_->compiler_driver->GetVerifiedMethod(&dex_file, method_idx)));
const uint16_t* code_ptr = current_code_item_->insns_;
const uint16_t* code_end =
current_code_item_->insns_ + current_code_item_->insns_size_in_code_units_;
@@ -708,8 +717,8 @@
// FindBlock lookup cache.
ScopedArenaAllocator allocator(&cu_->arena_stack);
ScopedArenaVector<uint16_t> dex_pc_to_block_map(allocator.Adapter());
- dex_pc_to_block_map.resize(dex_pc_to_block_map.size() +
- current_code_item_->insns_size_in_code_units_);
+ dex_pc_to_block_map.resize(current_code_item_->insns_size_in_code_units_ +
+ 1 /* Fall-through on last insn; dead or punt to interpreter. */);
// TODO: replace with explicit resize routine. Using automatic extension side effect for now.
try_block_addr_->SetBit(current_code_item_->insns_size_in_code_units_);
@@ -717,8 +726,8 @@
// If this is the first method, set up default entry and exit blocks.
if (current_method_ == 0) {
- DCHECK(entry_block_ == NULL);
- DCHECK(exit_block_ == NULL);
+ DCHECK(entry_block_ == nullptr);
+ DCHECK(exit_block_ == nullptr);
DCHECK_EQ(GetNumBlocks(), 0U);
// Use id 0 to represent a null block.
BasicBlock* null_block = CreateNewBB(kNullBlock);
@@ -754,7 +763,7 @@
insn->m_unit_index = current_method_;
int width = ParseInsn(code_ptr, &insn->dalvikInsn);
Instruction::Code opcode = insn->dalvikInsn.opcode;
- if (opcode_count_ != NULL) {
+ if (opcode_count_ != nullptr) {
opcode_count_[static_cast<int>(opcode)]++;
}
@@ -875,10 +884,24 @@
if (cu_->verbose) {
DumpMIRGraph();
}
+
+ // Check if there's been a fall-through out of the method code.
+ BasicBlockId out_bb_id = dex_pc_to_block_map[current_code_item_->insns_size_in_code_units_];
+ if (UNLIKELY(out_bb_id != NullBasicBlockId)) {
+ // Eagerly calculate DFS order to determine if the block is dead.
+ DCHECK(!DfsOrdersUpToDate());
+ ComputeDFSOrders();
+ BasicBlock* out_bb = GetBasicBlock(out_bb_id);
+ DCHECK(out_bb != nullptr);
+ if (out_bb->block_type != kDead) {
+ LOG(WARNING) << "Live fall-through out of method in " << PrettyMethod(method_idx, dex_file);
+ SetPuntToInterpreter(true);
+ }
+ }
}
void MIRGraph::ShowOpcodeStats() {
- DCHECK(opcode_count_ != NULL);
+ DCHECK(opcode_count_ != nullptr);
LOG(INFO) << "Opcode Count";
for (int i = 0; i < kNumPackedOpcodes; i++) {
if (opcode_count_[i] != 0) {
@@ -946,7 +969,7 @@
return;
}
file = fopen(fpath.c_str(), "w");
- if (file == NULL) {
+ if (file == nullptr) {
PLOG(ERROR) << "Could not open " << fpath << " for DumpCFG.";
return;
}
@@ -960,7 +983,7 @@
for (idx = 0; idx < num_blocks; idx++) {
int block_idx = all_blocks ? idx : dfs_order_[idx];
BasicBlock* bb = GetBasicBlock(block_idx);
- if (bb == NULL) continue;
+ if (bb == nullptr) continue;
if (bb->block_type == kDead) continue;
if (bb->hidden) continue;
if (bb->block_type == kEntryBlock) {
@@ -1484,7 +1507,7 @@
Instruction::Format dalvik_format = Instruction::k10x; // Default to no-operand format.
// Handle special cases that recover the original dalvik instruction.
- if ((opcode == kMirOpCheck) || (opcode == kMirOpCheckPart2)) {
+ if (opcode == kMirOpCheck) {
str.append(extended_mir_op_names_[opcode - kMirOpFirst]);
str.append(": ");
// Recover the original Dex instruction.
@@ -1500,8 +1523,8 @@
}
nop = true;
}
- int defs = (ssa_rep != NULL) ? ssa_rep->num_defs : 0;
- int uses = (ssa_rep != NULL) ? ssa_rep->num_uses : 0;
+ int defs = (ssa_rep != nullptr) ? ssa_rep->num_defs : 0;
+ int uses = (ssa_rep != nullptr) ? ssa_rep->num_uses : 0;
if (MIR::DecodedInstruction::IsPseudoMirOp(opcode)) {
// Note that this does not check the MIR's opcode in all cases. In cases where it
@@ -1529,7 +1552,7 @@
for (int i = 0; i < uses; i++) {
str.append(" ");
str.append(GetSSANameWithConst(ssa_rep->uses[i], show_singles));
- if (!show_singles && (reg_location_ != NULL) && reg_location_[i].wide) {
+ if (!show_singles && (reg_location_ != nullptr) && reg_location_[i].wide) {
// For the listing, skip the high sreg.
i++;
}
@@ -1622,7 +1645,7 @@
// Similar to GetSSAName, but if ssa name represents an immediate show that as well.
std::string MIRGraph::GetSSANameWithConst(int ssa_reg, bool singles_only) {
- if (reg_location_ == NULL) {
+ if (reg_location_ == nullptr) {
// Pre-SSA - just use the standard name.
return GetSSAName(ssa_reg);
}
@@ -1715,7 +1738,7 @@
CallInfo* info = static_cast<CallInfo*>(arena_->Alloc(sizeof(CallInfo),
kArenaAllocMisc));
MIR* move_result_mir = FindMoveResult(bb, mir);
- if (move_result_mir == NULL) {
+ if (move_result_mir == nullptr) {
info->result.location = kLocInvalid;
} else {
info->result = GetRawDest(move_result_mir);
@@ -2293,7 +2316,7 @@
void BasicBlock::ResetOptimizationFlags(uint16_t reset_flags) {
// Reset flags for all MIRs in bb.
- for (MIR* mir = first_mir_insn; mir != NULL; mir = mir->next) {
+ for (MIR* mir = first_mir_insn; mir != nullptr; mir = mir->next) {
mir->optimization_flags &= (~reset_flags);
}
}
@@ -2493,8 +2516,6 @@
return Instruction::kContinue | Instruction::kThrow;
case kMirOpCheck:
return Instruction::kContinue | Instruction::kThrow;
- case kMirOpCheckPart2:
- return Instruction::kContinue;
case kMirOpSelect:
return Instruction::kContinue;
case kMirOpConstVector:
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
index 85b1344..7385a8b 100644
--- a/compiler/dex/mir_graph.h
+++ b/compiler/dex/mir_graph.h
@@ -39,6 +39,7 @@
class GlobalValueNumbering;
class GvnDeadCodeElimination;
class PassManager;
+class TypeInference;
// Forward declaration.
class MIRGraph;
@@ -64,6 +65,7 @@
kNullTransferSrc0, // Object copy src[0] -> dst.
kNullTransferSrcN, // Phi null check state transfer.
kRangeCheckC, // Range check of C.
+ kCheckCastA, // Check cast of A.
kFPA,
kFPB,
kFPC,
@@ -73,6 +75,7 @@
kRefA,
kRefB,
kRefC,
+ kSameTypeAB, // A and B have the same type but it can be core/ref/fp (IF_cc).
kUsesMethodStar, // Implicit use of Method*.
kUsesIField, // Accesses an instance field (IGET/IPUT).
kUsesSField, // Accesses a static field (SGET/SPUT).
@@ -101,6 +104,7 @@
#define DF_NULL_TRANSFER_0 (UINT64_C(1) << kNullTransferSrc0)
#define DF_NULL_TRANSFER_N (UINT64_C(1) << kNullTransferSrcN)
#define DF_RANGE_CHK_C (UINT64_C(1) << kRangeCheckC)
+#define DF_CHK_CAST (UINT64_C(1) << kCheckCastA)
#define DF_FP_A (UINT64_C(1) << kFPA)
#define DF_FP_B (UINT64_C(1) << kFPB)
#define DF_FP_C (UINT64_C(1) << kFPC)
@@ -110,6 +114,7 @@
#define DF_REF_A (UINT64_C(1) << kRefA)
#define DF_REF_B (UINT64_C(1) << kRefB)
#define DF_REF_C (UINT64_C(1) << kRefC)
+#define DF_SAME_TYPE_AB (UINT64_C(1) << kSameTypeAB)
#define DF_UMS (UINT64_C(1) << kUsesMethodStar)
#define DF_IFIELD (UINT64_C(1) << kUsesIField)
#define DF_SFIELD (UINT64_C(1) << kUsesSField)
@@ -217,13 +222,11 @@
*/
struct SSARepresentation {
int32_t* uses;
- bool* fp_use;
int32_t* defs;
- bool* fp_def;
- int16_t num_uses_allocated;
- int16_t num_defs_allocated;
- int16_t num_uses;
- int16_t num_defs;
+ uint16_t num_uses_allocated;
+ uint16_t num_defs_allocated;
+ uint16_t num_uses;
+ uint16_t num_defs;
static uint32_t GetStartUseIndex(Instruction::Code opcode);
};
@@ -334,7 +337,8 @@
// SGET/SPUT lowering info index, points to MIRGraph::sfield_lowering_infos_. Due to limit on
// the number of code points (64K) and size of SGET/SPUT insn (2), this will never exceed 32K.
uint32_t sfield_lowering_info;
- // INVOKE data index, points to MIRGraph::method_lowering_infos_.
+ // INVOKE data index, points to MIRGraph::method_lowering_infos_. Also used for inlined
+ // CONST and MOVE insn (with MIR_CALLEE) to remember the invoke for type inference.
uint32_t method_lowering_info;
} meta;
@@ -515,6 +519,7 @@
bool is_range;
DexOffset offset; // Offset in code units.
MIR* mir;
+ int32_t string_init_offset;
};
@@ -598,7 +603,7 @@
BasicBlock* GetBasicBlock(unsigned int block_id) const {
DCHECK_LT(block_id, block_list_.size()); // NOTE: NullBasicBlockId is 0.
- return (block_id == NullBasicBlockId) ? NULL : block_list_[block_id];
+ return (block_id == NullBasicBlockId) ? nullptr : block_list_[block_id];
}
size_t GetBasicBlockListCount() const {
@@ -647,6 +652,10 @@
*/
void DumpCFG(const char* dir_prefix, bool all_blocks, const char* suffix = nullptr);
+ bool HasCheckCast() const {
+ return (merged_df_flags_ & DF_CHK_CAST) != 0u;
+ }
+
bool HasFieldAccess() const {
return (merged_df_flags_ & (DF_IFIELD | DF_SFIELD)) != 0u;
}
@@ -691,8 +700,16 @@
void DoCacheMethodLoweringInfo();
const MirMethodLoweringInfo& GetMethodLoweringInfo(MIR* mir) const {
- DCHECK_LT(mir->meta.method_lowering_info, method_lowering_infos_.size());
- return method_lowering_infos_[mir->meta.method_lowering_info];
+ return GetMethodLoweringInfo(mir->meta.method_lowering_info);
+ }
+
+ const MirMethodLoweringInfo& GetMethodLoweringInfo(uint32_t lowering_info) const {
+ DCHECK_LT(lowering_info, method_lowering_infos_.size());
+ return method_lowering_infos_[lowering_info];
+ }
+
+ size_t GetMethodLoweringInfoCount() const {
+ return method_lowering_infos_.size();
}
void ComputeInlineIFieldLoweringInfo(uint16_t field_idx, MIR* invoke, MIR* iget_or_iput);
@@ -707,6 +724,8 @@
void BasicBlockOptimization();
void BasicBlockOptimizationEnd();
+ void StringChange();
+
const ArenaVector<BasicBlockId>& GetTopologicalSortOrder() {
DCHECK(!topological_order_.empty());
return topological_order_;
@@ -1073,7 +1092,9 @@
bool EliminateNullChecksGate();
bool EliminateNullChecks(BasicBlock* bb);
void EliminateNullChecksEnd();
+ void InferTypesStart();
bool InferTypes(BasicBlock* bb);
+ void InferTypesEnd();
bool EliminateClassInitChecksGate();
bool EliminateClassInitChecks(BasicBlock* bb);
void EliminateClassInitChecksEnd();
@@ -1083,6 +1104,7 @@
bool EliminateDeadCodeGate();
bool EliminateDeadCode(BasicBlock* bb);
void EliminateDeadCodeEnd();
+ void GlobalValueNumberingCleanup();
bool EliminateSuspendChecksGate();
bool EliminateSuspendChecks(BasicBlock* bb);
@@ -1100,34 +1122,6 @@
return temp_.gvn.sfield_ids[mir->meta.sfield_lowering_info];
}
- /*
- * Type inference handling helpers. Because Dalvik's bytecode is not fully typed,
- * we have to do some work to figure out the sreg type. For some operations it is
- * clear based on the opcode (i.e. ADD_FLOAT v0, v1, v2), but for others (MOVE), we
- * may never know the "real" type.
- *
- * We perform the type inference operation by using an iterative walk over
- * the graph, propagating types "defined" by typed opcodes to uses and defs in
- * non-typed opcodes (such as MOVE). The Setxx(index) helpers are used to set defined
- * types on typed opcodes (such as ADD_INT). The Setxx(index, is_xx) form is used to
- * propagate types through non-typed opcodes such as PHI and MOVE. The is_xx flag
- * tells whether our guess of the type is based on a previously typed definition.
- * If so, the defined type takes precedence. Note that it's possible to have the same sreg
- * show multiple defined types because dx treats constants as untyped bit patterns.
- * The return value of the Setxx() helpers says whether or not the Setxx() action changed
- * the current guess, and is used to know when to terminate the iterative walk.
- */
- bool SetFp(int index, bool is_fp);
- bool SetFp(int index);
- bool SetCore(int index, bool is_core);
- bool SetCore(int index);
- bool SetRef(int index, bool is_ref);
- bool SetRef(int index);
- bool SetWide(int index, bool is_wide);
- bool SetWide(int index);
- bool SetHigh(int index, bool is_high);
- bool SetHigh(int index);
-
bool PuntToInterpreter() {
return punt_to_interpreter_;
}
@@ -1252,7 +1246,6 @@
static const char* extended_mir_op_names_[kMirOpLast - kMirOpFirst];
void HandleSSADef(int* defs, int dalvik_reg, int reg_index);
- bool InferTypeAndSize(BasicBlock* bb, MIR* mir, bool changed);
protected:
int FindCommonParent(int block1, int block2);
@@ -1399,6 +1392,7 @@
ArenaBitVector* work_live_vregs;
ArenaBitVector** def_block_matrix; // num_vregs x num_blocks_.
ArenaBitVector** phi_node_blocks; // num_vregs x num_blocks_.
+ TypeInference* ti;
} ssa;
// Global value numbering.
struct {
@@ -1458,7 +1452,9 @@
friend class GvnDeadCodeEliminationTest;
friend class LocalValueNumberingTest;
friend class TopologicalSortOrderTest;
+ friend class TypeInferenceTest;
friend class QuickCFITest;
+ friend class QuickAssembleX86TestBase;
};
} // namespace art
diff --git a/compiler/dex/mir_method_info.cc b/compiler/dex/mir_method_info.cc
index 0c84b82..94be1fd 100644
--- a/compiler/dex/mir_method_info.cc
+++ b/compiler/dex/mir_method_info.cc
@@ -16,6 +16,7 @@
# include "mir_method_info.h"
+#include "dex/compiler_ir.h"
#include "dex/quick/dex_file_method_inliner.h"
#include "dex/quick/dex_file_to_method_inliner_map.h"
#include "dex/verified_method.h"
@@ -83,6 +84,13 @@
MethodReference* devirt_target = (it->target_dex_file_ != nullptr) ? &devirt_ref : nullptr;
InvokeType invoke_type = it->GetInvokeType();
mirror::ArtMethod* resolved_method = nullptr;
+
+ bool string_init = false;
+ if (default_inliner->IsStringInitMethodIndex(it->MethodIndex())) {
+ string_init = true;
+ invoke_type = kDirect;
+ }
+
if (!it->IsQuickened()) {
it->target_dex_file_ = dex_file;
it->target_method_idx_ = it->MethodIndex();
@@ -161,7 +169,8 @@
~(kFlagFastPath | kFlagIsIntrinsic | kFlagIsSpecial | kFlagClassIsInitialized |
(kInvokeTypeMask << kBitSharpTypeBegin));
it->flags_ = other_flags |
- (fast_path_flags != 0 ? kFlagFastPath : 0u) |
+ // String init path is a special always-fast path.
+ (fast_path_flags != 0 || string_init ? kFlagFastPath : 0u) |
((is_intrinsic_or_special & kInlineIntrinsic) != 0 ? kFlagIsIntrinsic : 0u) |
((is_intrinsic_or_special & kInlineSpecial) != 0 ? kFlagIsSpecial : 0u) |
(static_cast<uint16_t>(invoke_type) << kBitSharpTypeBegin) |
@@ -170,6 +179,9 @@
it->target_dex_file_ = target_method.dex_file;
it->target_method_idx_ = target_method.dex_method_index;
it->stats_flags_ = fast_path_flags;
+ if (string_init) {
+ it->direct_code_ = 0;
+ }
}
}
diff --git a/compiler/dex/mir_method_info.h b/compiler/dex/mir_method_info.h
index 7230c46..946c74b 100644
--- a/compiler/dex/mir_method_info.h
+++ b/compiler/dex/mir_method_info.h
@@ -88,7 +88,7 @@
// The type index of the class declaring the method, 0 if unresolved.
uint16_t declaring_class_idx_;
// The dex file that defines the class containing the method and the method,
- // nullptr if unresolved.
+ // null if unresolved.
const DexFile* declaring_dex_file_;
};
@@ -223,7 +223,7 @@
uintptr_t direct_code_;
uintptr_t direct_method_;
// Before Resolve(), target_dex_file_ and target_method_idx_ hold the verification-based
- // devirtualized invoke target if available, nullptr and 0u otherwise.
+ // devirtualized invoke target if available, null and 0u otherwise.
// After Resolve() they hold the actual target method that will be called; it will be either
// a devirtualized target method or the compilation's unit's dex file and MethodIndex().
const DexFile* target_dex_file_;
@@ -232,6 +232,7 @@
int stats_flags_;
friend class MirOptimizationTest;
+ friend class TypeInferenceTest;
};
} // namespace art
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
index 9d7b4b4..217dbee 100644
--- a/compiler/dex/mir_optimization.cc
+++ b/compiler/dex/mir_optimization.cc
@@ -18,6 +18,7 @@
#include "base/logging.h"
#include "base/scoped_arena_containers.h"
#include "dataflow_iterator-inl.h"
+#include "dex/verified_method.h"
#include "dex_flags.h"
#include "driver/compiler_driver.h"
#include "driver/dex_compilation_unit.h"
@@ -25,9 +26,11 @@
#include "gvn_dead_code_elimination.h"
#include "local_value_numbering.h"
#include "mir_field_info.h"
+#include "mirror/string.h"
#include "quick/dex_file_method_inliner.h"
#include "quick/dex_file_to_method_inliner_map.h"
#include "stack.h"
+#include "type_inference.h"
namespace art {
@@ -54,7 +57,7 @@
void MIRGraph::DoConstantPropagation(BasicBlock* bb) {
MIR* mir;
- for (mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+ for (mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
// Skip pass if BB has MIR without SSA representation.
if (mir->ssa_rep == nullptr) {
return;
@@ -115,11 +118,11 @@
/* Advance to next strictly dominated MIR node in an extended basic block */
MIR* MIRGraph::AdvanceMIR(BasicBlock** p_bb, MIR* mir) {
BasicBlock* bb = *p_bb;
- if (mir != NULL) {
+ if (mir != nullptr) {
mir = mir->next;
- while (mir == NULL) {
+ while (mir == nullptr) {
bb = GetBasicBlock(bb->fall_through);
- if ((bb == NULL) || Predecessors(bb) != 1) {
+ if ((bb == nullptr) || Predecessors(bb) != 1) {
// mir is null and we cannot proceed further.
break;
} else {
@@ -133,7 +136,7 @@
/*
* To be used at an invoke mir. If the logically next mir node represents
- * a move-result, return it. Else, return NULL. If a move-result exists,
+ * a move-result, return it. Else, return nullptr. If a move-result exists,
* it is required to immediately follow the invoke with no intervening
* opcodes or incoming arcs. However, if the result of the invoke is not
* used, a move-result may not be present.
@@ -141,7 +144,7 @@
MIR* MIRGraph::FindMoveResult(BasicBlock* bb, MIR* mir) {
BasicBlock* tbb = bb;
mir = AdvanceMIR(&tbb, mir);
- while (mir != NULL) {
+ while (mir != nullptr) {
if ((mir->dalvikInsn.opcode == Instruction::MOVE_RESULT) ||
(mir->dalvikInsn.opcode == Instruction::MOVE_RESULT_OBJECT) ||
(mir->dalvikInsn.opcode == Instruction::MOVE_RESULT_WIDE)) {
@@ -151,7 +154,7 @@
if (MIR::DecodedInstruction::IsPseudoMirOp(mir->dalvikInsn.opcode)) {
mir = AdvanceMIR(&tbb, mir);
} else {
- mir = NULL;
+ mir = nullptr;
}
}
return mir;
@@ -159,29 +162,29 @@
BasicBlock* MIRGraph::NextDominatedBlock(BasicBlock* bb) {
if (bb->block_type == kDead) {
- return NULL;
+ return nullptr;
}
DCHECK((bb->block_type == kEntryBlock) || (bb->block_type == kDalvikByteCode)
|| (bb->block_type == kExitBlock));
BasicBlock* bb_taken = GetBasicBlock(bb->taken);
BasicBlock* bb_fall_through = GetBasicBlock(bb->fall_through);
- if (((bb_fall_through == NULL) && (bb_taken != NULL)) &&
+ if (((bb_fall_through == nullptr) && (bb_taken != nullptr)) &&
((bb_taken->block_type == kDalvikByteCode) || (bb_taken->block_type == kExitBlock))) {
// Follow simple unconditional branches.
bb = bb_taken;
} else {
// Follow simple fallthrough
- bb = (bb_taken != NULL) ? NULL : bb_fall_through;
+ bb = (bb_taken != nullptr) ? nullptr : bb_fall_through;
}
- if (bb == NULL || (Predecessors(bb) != 1)) {
- return NULL;
+ if (bb == nullptr || (Predecessors(bb) != 1)) {
+ return nullptr;
}
DCHECK((bb->block_type == kDalvikByteCode) || (bb->block_type == kExitBlock));
return bb;
}
static MIR* FindPhi(BasicBlock* bb, int ssa_name) {
- for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+ for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
if (static_cast<int>(mir->dalvikInsn.opcode) == kMirOpPhi) {
for (int i = 0; i < mir->ssa_rep->num_uses; i++) {
if (mir->ssa_rep->uses[i] == ssa_name) {
@@ -190,11 +193,11 @@
}
}
}
- return NULL;
+ return nullptr;
}
static SelectInstructionKind SelectKind(MIR* mir) {
- // Work with the case when mir is nullptr.
+ // Work with the case when mir is null.
if (mir == nullptr) {
return kSelectNone;
}
@@ -255,7 +258,8 @@
}
// Calculate remaining ME temps available.
- size_t remaining_me_temps = max_available_non_special_compiler_temps_ - reserved_temps_for_backend_;
+ size_t remaining_me_temps = max_available_non_special_compiler_temps_ -
+ reserved_temps_for_backend_;
if (num_non_special_compiler_temps_ >= remaining_me_temps) {
return 0;
@@ -346,7 +350,8 @@
size_t available_temps = GetNumAvailableVRTemps();
if (available_temps <= 0 || (available_temps <= 1 && wide)) {
if (verbose) {
- LOG(INFO) << "CompilerTemps: Not enough temp(s) of type " << ct_type_str << " are available.";
+ LOG(INFO) << "CompilerTemps: Not enough temp(s) of type " << ct_type_str
+ << " are available.";
}
return nullptr;
}
@@ -364,8 +369,8 @@
compiler_temp->s_reg_low = AddNewSReg(compiler_temp->v_reg);
if (verbose) {
- LOG(INFO) << "CompilerTemps: New temp of type " << ct_type_str << " with v" << compiler_temp->v_reg
- << " and s" << compiler_temp->s_reg_low << " has been created.";
+ LOG(INFO) << "CompilerTemps: New temp of type " << ct_type_str << " with v"
+ << compiler_temp->v_reg << " and s" << compiler_temp->s_reg_low << " has been created.";
}
if (wide) {
@@ -477,8 +482,8 @@
local_valnum.reset(new (allocator.get()) LocalValueNumbering(global_valnum.get(), bb->id,
allocator.get()));
}
- while (bb != NULL) {
- for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+ while (bb != nullptr) {
+ for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
// TUNING: use the returned value number for CSE.
if (use_lvn) {
local_valnum->GetValueNumber(mir);
@@ -537,7 +542,7 @@
// Bitcode doesn't allow this optimization.
break;
}
- if (mir->next != NULL) {
+ if (mir->next != nullptr) {
MIR* mir_next = mir->next;
// Make sure result of cmp is used by next insn and nowhere else
if (IsInstructionIfCcZ(mir_next->dalvikInsn.opcode) &&
@@ -574,7 +579,6 @@
// Copy the SSA information that is relevant.
mir_next->ssa_rep->num_uses = mir->ssa_rep->num_uses;
mir_next->ssa_rep->uses = mir->ssa_rep->uses;
- mir_next->ssa_rep->fp_use = mir->ssa_rep->fp_use;
mir_next->ssa_rep->num_defs = 0;
mir->ssa_rep->num_uses = 0;
mir->ssa_rep->num_defs = 0;
@@ -594,12 +598,12 @@
cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) &&
IsInstructionIfCcZ(mir->dalvikInsn.opcode)) {
BasicBlock* ft = GetBasicBlock(bb->fall_through);
- DCHECK(ft != NULL);
+ DCHECK(ft != nullptr);
BasicBlock* ft_ft = GetBasicBlock(ft->fall_through);
BasicBlock* ft_tk = GetBasicBlock(ft->taken);
BasicBlock* tk = GetBasicBlock(bb->taken);
- DCHECK(tk != NULL);
+ DCHECK(tk != nullptr);
BasicBlock* tk_ft = GetBasicBlock(tk->fall_through);
BasicBlock* tk_tk = GetBasicBlock(tk->taken);
@@ -608,7 +612,7 @@
* transfers to the rejoin block and the fall_though edge goes to a block that
* unconditionally falls through to the rejoin block.
*/
- if ((tk_ft == NULL) && (ft_tk == NULL) && (tk_tk == ft_ft) &&
+ if ((tk_ft == nullptr) && (ft_tk == nullptr) && (tk_tk == ft_ft) &&
(Predecessors(tk) == 1) && (Predecessors(ft) == 1)) {
/*
* Okay - we have the basic diamond shape.
@@ -628,7 +632,7 @@
MIR* if_false = ft->first_mir_insn;
// It's possible that the target of the select isn't used - skip those (rare) cases.
MIR* phi = FindPhi(tk_tk, if_true->ssa_rep->defs[0]);
- if ((phi != NULL) && (if_true->dalvikInsn.vA == if_false->dalvikInsn.vA)) {
+ if ((phi != nullptr) && (if_true->dalvikInsn.vA == if_false->dalvikInsn.vA)) {
/*
* We'll convert the IF_EQZ/IF_NEZ to a SELECT. We need to find the
* Phi node in the merge block and delete it (while using the SSA name
@@ -668,16 +672,7 @@
mir->ssa_rep->uses = src_ssa;
mir->ssa_rep->num_uses = 3;
}
- mir->ssa_rep->num_defs = 1;
- mir->ssa_rep->defs = arena_->AllocArray<int32_t>(1, kArenaAllocDFInfo);
- mir->ssa_rep->fp_def = arena_->AllocArray<bool>(1, kArenaAllocDFInfo);
- mir->ssa_rep->fp_def[0] = if_true->ssa_rep->fp_def[0];
- // Match type of uses to def.
- mir->ssa_rep->fp_use = arena_->AllocArray<bool>(mir->ssa_rep->num_uses,
- kArenaAllocDFInfo);
- for (int i = 0; i < mir->ssa_rep->num_uses; i++) {
- mir->ssa_rep->fp_use[i] = mir->ssa_rep->fp_def[0];
- }
+ AllocateSSADefData(mir, 1);
/*
* There is usually a Phi node in the join block for our two cases. If the
* Phi node only contains our two cases as input, we will use the result
@@ -721,7 +716,8 @@
}
}
}
- bb = ((cu_->disable_opt & (1 << kSuppressExceptionEdges)) != 0) ? NextDominatedBlock(bb) : NULL;
+ bb = ((cu_->disable_opt & (1 << kSuppressExceptionEdges)) != 0) ? NextDominatedBlock(bb) :
+ nullptr;
}
if (use_lvn && UNLIKELY(!global_valnum->Good())) {
LOG(WARNING) << "LVN overflow in " << PrettyMethod(cu_->method_idx, *cu_->dex_file);
@@ -732,9 +728,9 @@
/* Collect stats on number of checks removed */
void MIRGraph::CountChecks(class BasicBlock* bb) {
- if (bb->data_flow_info != NULL) {
- for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
- if (mir->ssa_rep == NULL) {
+ if (bb->data_flow_info != nullptr) {
+ for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
+ if (mir->ssa_rep == nullptr) {
continue;
}
uint64_t df_attributes = GetDataFlowAttributes(mir);
@@ -935,7 +931,7 @@
// reset MIR_MARK
AllNodesIterator iter(this);
for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
- for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+ for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
mir->optimization_flags &= ~MIR_MARK;
}
}
@@ -1010,7 +1006,7 @@
// no intervening uses.
// Walk through the instruction in the block, updating as necessary
- for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+ for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
uint64_t df_attributes = GetDataFlowAttributes(mir);
if ((df_attributes & DF_NULL_TRANSFER_N) != 0u) {
@@ -1121,7 +1117,7 @@
// converge MIR_MARK with MIR_IGNORE_NULL_CHECK
AllNodesIterator iter(this);
for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
- for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+ for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
constexpr int kMarkToIgnoreNullCheckShift = kMIRMark - kMIRIgnoreNullCheck;
static_assert(kMarkToIgnoreNullCheckShift > 0, "Not a valid right-shift");
uint16_t mirMarkAdjustedToIgnoreNullCheck =
@@ -1131,23 +1127,26 @@
}
}
+void MIRGraph::InferTypesStart() {
+ DCHECK(temp_scoped_alloc_ != nullptr);
+ temp_.ssa.ti = new (temp_scoped_alloc_.get()) TypeInference(this, temp_scoped_alloc_.get());
+}
+
/*
* Perform type and size inference for a basic block.
*/
bool MIRGraph::InferTypes(BasicBlock* bb) {
if (bb->data_flow_info == nullptr) return false;
- bool infer_changed = false;
- for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
- if (mir->ssa_rep == NULL) {
- continue;
- }
+ DCHECK(temp_.ssa.ti != nullptr);
+ return temp_.ssa.ti->Apply(bb);
+}
- // Propagate type info.
- infer_changed = InferTypeAndSize(bb, mir, infer_changed);
- }
-
- return infer_changed;
+void MIRGraph::InferTypesEnd() {
+ DCHECK(temp_.ssa.ti != nullptr);
+ temp_.ssa.ti->Finish();
+ delete temp_.ssa.ti;
+ temp_.ssa.ti = nullptr;
}
bool MIRGraph::EliminateClassInitChecksGate() {
@@ -1358,8 +1357,13 @@
temp_scoped_alloc_.reset();
}
+static void DisableGVNDependentOptimizations(CompilationUnit* cu) {
+ cu->disable_opt |= (1u << kGvnDeadCodeElimination);
+}
+
bool MIRGraph::ApplyGlobalValueNumberingGate() {
if (GlobalValueNumbering::Skip(cu_)) {
+ DisableGVNDependentOptimizations(cu_);
return false;
}
@@ -1410,16 +1414,12 @@
cu_->disable_opt |= (1u << kLocalValueNumbering);
} else {
LOG(WARNING) << "GVN failed for " << PrettyMethod(cu_->method_idx, *cu_->dex_file);
- cu_->disable_opt |= (1u << kGvnDeadCodeElimination);
+ DisableGVNDependentOptimizations(cu_);
}
-
- if ((cu_->disable_opt & (1 << kGvnDeadCodeElimination)) != 0) {
- EliminateDeadCodeEnd();
- } // else preserve GVN data for CSE.
}
bool MIRGraph::EliminateDeadCodeGate() {
- if ((cu_->disable_opt & (1 << kGvnDeadCodeElimination)) != 0) {
+ if ((cu_->disable_opt & (1 << kGvnDeadCodeElimination)) != 0 || temp_.gvn.gvn == nullptr) {
return false;
}
DCHECK(temp_scoped_alloc_ != nullptr);
@@ -1440,16 +1440,26 @@
}
void MIRGraph::EliminateDeadCodeEnd() {
- DCHECK_EQ(temp_.gvn.dce != nullptr, (cu_->disable_opt & (1 << kGvnDeadCodeElimination)) == 0);
- if (temp_.gvn.dce != nullptr) {
- delete temp_.gvn.dce;
- temp_.gvn.dce = nullptr;
+ if (kIsDebugBuild) {
+ // DCE can make some previously dead vregs alive again. Make sure the obsolete
+ // live-in information is not used anymore.
+ AllNodesIterator iter(this);
+ for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
+ if (bb->data_flow_info != nullptr) {
+ bb->data_flow_info->live_in_v = nullptr;
+ }
+ }
}
+}
+
+void MIRGraph::GlobalValueNumberingCleanup() {
+ // If the GVN didn't run, these pointers should be null and everything is effectively no-op.
+ delete temp_.gvn.dce;
+ temp_.gvn.dce = nullptr;
delete temp_.gvn.gvn;
temp_.gvn.gvn = nullptr;
temp_.gvn.ifield_ids = nullptr;
temp_.gvn.sfield_ids = nullptr;
- DCHECK(temp_scoped_alloc_ != nullptr);
temp_scoped_alloc_.reset();
}
@@ -1509,7 +1519,7 @@
if (bb->block_type != kDalvikByteCode) {
return;
}
- for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+ for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
if (MIR::DecodedInstruction::IsPseudoMirOp(mir->dalvikInsn.opcode)) {
continue;
}
@@ -1540,7 +1550,8 @@
->GenInline(this, bb, mir, target.dex_method_index)) {
if (cu_->verbose || cu_->print_pass) {
LOG(INFO) << "SpecialMethodInliner: Inlined " << method_info.GetInvokeType() << " ("
- << sharp_type << ") call to \"" << PrettyMethod(target.dex_method_index, *target.dex_file)
+ << sharp_type << ") call to \"" << PrettyMethod(target.dex_method_index,
+ *target.dex_file)
<< "\" from \"" << PrettyMethod(cu_->method_idx, *cu_->dex_file)
<< "\" @0x" << std::hex << mir->offset;
}
@@ -1564,7 +1575,7 @@
static_cast<Checkstats*>(arena_->Alloc(sizeof(Checkstats), kArenaAllocDFInfo));
checkstats_ = stats;
AllNodesIterator iter(this);
- for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+ for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
CountChecks(bb);
}
if (stats->null_checks > 0) {
@@ -1597,7 +1608,7 @@
bool terminated_by_return = false;
bool do_local_value_numbering = false;
// Visit blocks strictly dominated by this head.
- while (bb != NULL) {
+ while (bb != nullptr) {
bb->visited = true;
terminated_by_return |= bb->terminated_by_return;
do_local_value_numbering |= bb->use_lvn;
@@ -1606,7 +1617,7 @@
if (terminated_by_return || do_local_value_numbering) {
// Do lvn for all blocks in this extended set.
bb = start_bb;
- while (bb != NULL) {
+ while (bb != nullptr) {
bb->use_lvn = do_local_value_numbering;
bb->dominates_return = terminated_by_return;
bb = NextDominatedBlock(bb);
@@ -1629,7 +1640,7 @@
if ((cu_->disable_opt & (1 << kSuppressExceptionEdges)) != 0) {
ClearAllVisitedFlags();
PreOrderDfsIterator iter2(this);
- for (BasicBlock* bb = iter2.Next(); bb != NULL; bb = iter2.Next()) {
+ for (BasicBlock* bb = iter2.Next(); bb != nullptr; bb = iter2.Next()) {
BuildExtendedBBList(bb);
}
// Perform extended basic block optimizations.
@@ -1638,7 +1649,7 @@
}
} else {
PreOrderDfsIterator iter(this);
- for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+ for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
BasicBlockOpt(bb);
}
}
@@ -1651,6 +1662,77 @@
temp_scoped_alloc_.reset();
}
+void MIRGraph::StringChange() {
+ AllNodesIterator iter(this);
+ for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
+ for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
+ // Look for new instance opcodes, skip otherwise
+ Instruction::Code opcode = mir->dalvikInsn.opcode;
+ if (opcode == Instruction::NEW_INSTANCE) {
+ uint32_t type_idx = mir->dalvikInsn.vB;
+ if (cu_->compiler_driver->IsStringTypeIndex(type_idx, cu_->dex_file)) {
+ // Change NEW_INSTANCE and throwing half of the insn (if it exists) into CONST_4 of 0
+ mir->dalvikInsn.opcode = Instruction::CONST_4;
+ mir->dalvikInsn.vB = 0;
+ MIR* check_mir = GetBasicBlock(bb->predecessors[0])->last_mir_insn;
+ if (check_mir != nullptr &&
+ static_cast<int>(check_mir->dalvikInsn.opcode) == kMirOpCheck) {
+ check_mir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
+ check_mir->dalvikInsn.vB = 0;
+ }
+ }
+ } else if ((opcode == Instruction::INVOKE_DIRECT) ||
+ (opcode == Instruction::INVOKE_DIRECT_RANGE)) {
+ uint32_t method_idx = mir->dalvikInsn.vB;
+ DexFileMethodInliner* inliner =
+ cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(cu_->dex_file);
+ if (inliner->IsStringInitMethodIndex(method_idx)) {
+ bool is_range = (opcode == Instruction::INVOKE_DIRECT_RANGE);
+ uint32_t orig_this_reg = is_range ? mir->dalvikInsn.vC : mir->dalvikInsn.arg[0];
+ // Remove this pointer from string init and change to static call.
+ mir->dalvikInsn.vA--;
+ if (!is_range) {
+ mir->dalvikInsn.opcode = Instruction::INVOKE_STATIC;
+ for (uint32_t i = 0; i < mir->dalvikInsn.vA; i++) {
+ mir->dalvikInsn.arg[i] = mir->dalvikInsn.arg[i + 1];
+ }
+ } else {
+ mir->dalvikInsn.opcode = Instruction::INVOKE_STATIC_RANGE;
+ mir->dalvikInsn.vC++;
+ }
+ // Insert a move-result instruction to the original this pointer reg.
+ MIR* move_result_mir = static_cast<MIR *>(arena_->Alloc(sizeof(MIR), kArenaAllocMIR));
+ move_result_mir->dalvikInsn.opcode = Instruction::MOVE_RESULT_OBJECT;
+ move_result_mir->dalvikInsn.vA = orig_this_reg;
+ move_result_mir->offset = mir->offset;
+ move_result_mir->m_unit_index = mir->m_unit_index;
+ bb->InsertMIRAfter(mir, move_result_mir);
+ // Add additional moves if this pointer was copied to other registers.
+ const VerifiedMethod* verified_method =
+ cu_->compiler_driver->GetVerifiedMethod(cu_->dex_file, cu_->method_idx);
+ DCHECK(verified_method != nullptr);
+ const SafeMap<uint32_t, std::set<uint32_t>>& string_init_map =
+ verified_method->GetStringInitPcRegMap();
+ auto map_it = string_init_map.find(mir->offset);
+ if (map_it != string_init_map.end()) {
+ const std::set<uint32_t>& reg_set = map_it->second;
+ for (auto set_it = reg_set.begin(); set_it != reg_set.end(); ++set_it) {
+ MIR* move_mir = static_cast<MIR *>(arena_->Alloc(sizeof(MIR), kArenaAllocMIR));
+ move_mir->dalvikInsn.opcode = Instruction::MOVE_OBJECT;
+ move_mir->dalvikInsn.vA = *set_it;
+ move_mir->dalvikInsn.vB = orig_this_reg;
+ move_mir->offset = mir->offset;
+ move_mir->m_unit_index = mir->m_unit_index;
+ bb->InsertMIRAfter(move_result_mir, move_mir);
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+
bool MIRGraph::EliminateSuspendChecksGate() {
if ((cu_->disable_opt & (1 << kSuspendCheckElimination)) != 0 || // Disabled.
GetMaxNestedLoops() == 0u || // Nothing to do.
diff --git a/compiler/dex/pass_driver.h b/compiler/dex/pass_driver.h
index 671bcec..8762b53 100644
--- a/compiler/dex/pass_driver.h
+++ b/compiler/dex/pass_driver.h
@@ -68,7 +68,7 @@
* @return whether the pass was applied.
*/
virtual bool RunPass(const char* pass_name) {
- // Paranoid: c_unit cannot be nullptr and we need a pass name.
+ // Paranoid: c_unit cannot be null and we need a pass name.
DCHECK(pass_name != nullptr);
DCHECK_NE(pass_name[0], 0);
diff --git a/compiler/dex/pass_driver_me.h b/compiler/dex/pass_driver_me.h
index 94eef22..cbe4a02 100644
--- a/compiler/dex/pass_driver_me.h
+++ b/compiler/dex/pass_driver_me.h
@@ -88,7 +88,7 @@
}
bool RunPass(const Pass* pass, bool time_split) OVERRIDE {
- // Paranoid: c_unit and pass cannot be nullptr, and the pass should have a name
+ // Paranoid: c_unit and pass cannot be null, and the pass should have a name.
DCHECK(pass != nullptr);
DCHECK(pass->GetName() != nullptr && pass->GetName()[0] != 0);
CompilationUnit* c_unit = pass_me_data_holder_.c_unit;
@@ -211,8 +211,9 @@
* @param settings_to_fill Fills the options to contain the mapping of name of option to the new
* configuration.
*/
- static void FillOverriddenPassSettings(const PassManagerOptions* options, const char* pass_name,
- SafeMap<const std::string, const OptionContent>& settings_to_fill) {
+ static void FillOverriddenPassSettings(
+ const PassManagerOptions* options, const char* pass_name,
+ SafeMap<const std::string, const OptionContent>& settings_to_fill) {
const std::string& settings = options->GetOverriddenPassOptions();
const size_t settings_len = settings.size();
diff --git a/compiler/dex/pass_driver_me_opts.cc b/compiler/dex/pass_driver_me_opts.cc
index 2e871da..375003b 100644
--- a/compiler/dex/pass_driver_me_opts.cc
+++ b/compiler/dex/pass_driver_me_opts.cc
@@ -35,6 +35,7 @@
* Disadvantage is the passes can't change their internal states depending on CompilationUnit:
* - This is not yet an issue: no current pass would require it.
*/
+ pass_manager->AddPass(new StringChange);
pass_manager->AddPass(new CacheFieldLoweringInfo);
pass_manager->AddPass(new CacheMethodLoweringInfo);
pass_manager->AddPass(new CalculatePredecessors);
@@ -46,6 +47,7 @@
pass_manager->AddPass(new CodeLayout);
pass_manager->AddPass(new GlobalValueNumberingPass);
pass_manager->AddPass(new DeadCodeEliminationPass);
+ pass_manager->AddPass(new GlobalValueNumberingCleanupPass);
pass_manager->AddPass(new ConstantPropagation);
pass_manager->AddPass(new MethodUseCount);
pass_manager->AddPass(new BBOptimizations);
diff --git a/compiler/dex/pass_driver_me_post_opt.cc b/compiler/dex/pass_driver_me_post_opt.cc
index a8b8a54..b35bc3d 100644
--- a/compiler/dex/pass_driver_me_post_opt.cc
+++ b/compiler/dex/pass_driver_me_post_opt.cc
@@ -41,7 +41,7 @@
pass_manager->AddPass(new SSAConversion);
pass_manager->AddPass(new PhiNodeOperands);
pass_manager->AddPass(new PerformInitRegLocations);
- pass_manager->AddPass(new TypeInference);
+ pass_manager->AddPass(new TypeInferencePass);
pass_manager->AddPass(new FinishSSATransformation);
}
diff --git a/compiler/dex/post_opt_passes.h b/compiler/dex/post_opt_passes.h
index 1ab8625..e9fa0eb 100644
--- a/compiler/dex/post_opt_passes.h
+++ b/compiler/dex/post_opt_passes.h
@@ -263,12 +263,19 @@
};
/**
- * @class TypeInference
+ * @class TypeInferencePass
* @brief Type inference pass.
*/
-class TypeInference : public PassMEMirSsaRep {
+class TypeInferencePass : public PassMEMirSsaRep {
public:
- TypeInference() : PassMEMirSsaRep("TypeInference", kRepeatingPreOrderDFSTraversal) {
+ TypeInferencePass() : PassMEMirSsaRep("TypeInference", kRepeatingPreOrderDFSTraversal) {
+ }
+
+ void Start(PassDataHolder* data) const {
+ DCHECK(data != nullptr);
+ CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
+ DCHECK(c_unit != nullptr);
+ c_unit->mir_graph->InferTypesStart();
}
bool Worker(PassDataHolder* data) const {
@@ -280,6 +287,13 @@
DCHECK(bb != nullptr);
return c_unit->mir_graph->InferTypes(bb);
}
+
+ void End(PassDataHolder* data) const {
+ DCHECK(data != nullptr);
+ CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
+ DCHECK(c_unit != nullptr);
+ c_unit->mir_graph.get()->InferTypesEnd();
+ }
};
/**
diff --git a/compiler/dex/quick/arm/assemble_arm.cc b/compiler/dex/quick/arm/assemble_arm.cc
index c5ac4c1..df4a9f2 100644
--- a/compiler/dex/quick/arm/assemble_arm.cc
+++ b/compiler/dex/quick/arm/assemble_arm.cc
@@ -1055,7 +1055,7 @@
// new_lir replaces orig_lir in the pcrel_fixup list.
void ArmMir2Lir::ReplaceFixup(LIR* prev_lir, LIR* orig_lir, LIR* new_lir) {
new_lir->u.a.pcrel_next = orig_lir->u.a.pcrel_next;
- if (UNLIKELY(prev_lir == NULL)) {
+ if (UNLIKELY(prev_lir == nullptr)) {
first_fixup_ = new_lir;
} else {
prev_lir->u.a.pcrel_next = new_lir;
@@ -1066,7 +1066,7 @@
// new_lir is inserted before orig_lir in the pcrel_fixup list.
void ArmMir2Lir::InsertFixupBefore(LIR* prev_lir, LIR* orig_lir, LIR* new_lir) {
new_lir->u.a.pcrel_next = orig_lir;
- if (UNLIKELY(prev_lir == NULL)) {
+ if (UNLIKELY(prev_lir == nullptr)) {
first_fixup_ = new_lir;
} else {
DCHECK(prev_lir->u.a.pcrel_next == orig_lir);
@@ -1084,7 +1084,7 @@
uint8_t* ArmMir2Lir::EncodeLIRs(uint8_t* write_pos, LIR* lir) {
uint8_t* const write_buffer = write_pos;
- for (; lir != NULL; lir = NEXT_LIR(lir)) {
+ for (; lir != nullptr; lir = NEXT_LIR(lir)) {
lir->offset = (write_pos - write_buffer);
if (!lir->flags.is_nop) {
int opcode = lir->opcode;
@@ -1258,8 +1258,8 @@
generation ^= 1;
// Note: nodes requring possible fixup linked in ascending order.
lir = first_fixup_;
- prev_lir = NULL;
- while (lir != NULL) {
+ prev_lir = nullptr;
+ while (lir != nullptr) {
/*
* NOTE: the lir being considered here will be encoded following the switch (so long as
* we're not in a retry situation). However, any new non-pc_rel instructions inserted
@@ -1506,7 +1506,7 @@
case kFixupAdr: {
const EmbeddedData* tab_rec = UnwrapPointer<EmbeddedData>(lir->operands[2]);
LIR* target = lir->target;
- int32_t target_disp = (tab_rec != NULL) ? tab_rec->offset + offset_adjustment
+ int32_t target_disp = (tab_rec != nullptr) ? tab_rec->offset + offset_adjustment
: target->offset + ((target->flags.generation == lir->flags.generation) ? 0 :
offset_adjustment);
int32_t disp = target_disp - ((lir->offset + 4) & ~3);
@@ -1642,7 +1642,7 @@
uint32_t ArmMir2Lir::LinkFixupInsns(LIR* head_lir, LIR* tail_lir, uint32_t offset) {
LIR* end_lir = tail_lir->next;
- LIR* last_fixup = NULL;
+ LIR* last_fixup = nullptr;
for (LIR* lir = head_lir; lir != end_lir; lir = NEXT_LIR(lir)) {
if (!lir->flags.is_nop) {
if (lir->flags.fixup != kFixupNone) {
@@ -1658,8 +1658,8 @@
}
// Link into the fixup chain.
lir->flags.use_def_invalid = true;
- lir->u.a.pcrel_next = NULL;
- if (first_fixup_ == NULL) {
+ lir->u.a.pcrel_next = nullptr;
+ if (first_fixup_ == nullptr) {
first_fixup_ = lir;
} else {
last_fixup->u.a.pcrel_next = lir;
diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc
index 3d18af6..2b2d6af 100644
--- a/compiler/dex/quick/arm/call_arm.cc
+++ b/compiler/dex/quick/arm/call_arm.cc
@@ -21,6 +21,7 @@
#include "arm_lir.h"
#include "base/logging.h"
#include "dex/mir_graph.h"
+#include "dex/quick/dex_file_to_method_inliner_map.h"
#include "dex/quick/mir_to_lir-inl.h"
#include "driver/compiler_driver.h"
#include "driver/compiler_options.h"
@@ -124,7 +125,7 @@
}
// Bounds check - if < 0 or >= size continue following switch
OpRegImm(kOpCmp, keyReg, size-1);
- LIR* branch_over = OpCondBranch(kCondHi, NULL);
+ LIR* branch_over = OpCondBranch(kCondHi, nullptr);
// Load the displacement from the switch table
RegStorage disp_reg = AllocTemp();
@@ -156,7 +157,7 @@
} else {
// If the null-check fails its handled by the slow-path to reduce exception related meta-data.
if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
- null_check_branch = OpCmpImmBranch(kCondEq, rs_r0, 0, NULL);
+ null_check_branch = OpCmpImmBranch(kCondEq, rs_r0, 0, nullptr);
}
}
Load32Disp(rs_rARM_SELF, Thread::ThinLockIdOffset<4>().Int32Value(), rs_r2);
@@ -165,12 +166,12 @@
MarkPossibleNullPointerException(opt_flags);
// Zero out the read barrier bits.
OpRegRegImm(kOpAnd, rs_r3, rs_r1, LockWord::kReadBarrierStateMaskShiftedToggled);
- LIR* not_unlocked_branch = OpCmpImmBranch(kCondNe, rs_r3, 0, NULL);
+ LIR* not_unlocked_branch = OpCmpImmBranch(kCondNe, rs_r3, 0, nullptr);
// r1 is zero except for the rb bits here. Copy the read barrier bits into r2.
OpRegRegReg(kOpOr, rs_r2, rs_r2, rs_r1);
NewLIR4(kThumb2Strex, rs_r1.GetReg(), rs_r2.GetReg(), rs_r0.GetReg(),
mirror::Object::MonitorOffset().Int32Value() >> 2);
- LIR* lock_success_branch = OpCmpImmBranch(kCondEq, rs_r1, 0, NULL);
+ LIR* lock_success_branch = OpCmpImmBranch(kCondEq, rs_r1, 0, nullptr);
LIR* slow_path_target = NewLIR0(kPseudoTargetLabel);
@@ -238,7 +239,7 @@
} else {
// If the null-check fails its handled by the slow-path to reduce exception related meta-data.
if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
- null_check_branch = OpCmpImmBranch(kCondEq, rs_r0, 0, NULL);
+ null_check_branch = OpCmpImmBranch(kCondEq, rs_r0, 0, nullptr);
}
}
if (!kUseReadBarrier) {
@@ -252,16 +253,16 @@
OpRegRegImm(kOpAnd, rs_r3, rs_r1, LockWord::kReadBarrierStateMaskShiftedToggled);
// Zero out except the read barrier bits.
OpRegRegImm(kOpAnd, rs_r1, rs_r1, LockWord::kReadBarrierStateMaskShifted);
- LIR* slow_unlock_branch = OpCmpBranch(kCondNe, rs_r3, rs_r2, NULL);
+ LIR* slow_unlock_branch = OpCmpBranch(kCondNe, rs_r3, rs_r2, nullptr);
GenMemBarrier(kAnyStore);
LIR* unlock_success_branch;
if (!kUseReadBarrier) {
Store32Disp(rs_r0, mirror::Object::MonitorOffset().Int32Value(), rs_r1);
- unlock_success_branch = OpUnconditionalBranch(NULL);
+ unlock_success_branch = OpUnconditionalBranch(nullptr);
} else {
NewLIR4(kThumb2Strex, rs_r2.GetReg(), rs_r1.GetReg(), rs_r0.GetReg(),
mirror::Object::MonitorOffset().Int32Value() >> 2);
- unlock_success_branch = OpCmpImmBranch(kCondEq, rs_r2, 0, NULL);
+ unlock_success_branch = OpCmpImmBranch(kCondEq, rs_r2, 0, nullptr);
}
LIR* slow_path_target = NewLIR0(kPseudoTargetLabel);
slow_unlock_branch->target = slow_path_target;
@@ -619,13 +620,31 @@
* Bit of a hack here - in the absence of a real scheduling pass,
* emit the next instruction in static & direct invoke sequences.
*/
-int ArmMir2Lir::ArmNextSDCallInsn(CompilationUnit* cu, CallInfo* info ATTRIBUTE_UNUSED,
+int ArmMir2Lir::ArmNextSDCallInsn(CompilationUnit* cu, CallInfo* info,
int state, const MethodReference& target_method,
uint32_t unused_idx ATTRIBUTE_UNUSED,
uintptr_t direct_code, uintptr_t direct_method,
InvokeType type) {
ArmMir2Lir* cg = static_cast<ArmMir2Lir*>(cu->cg.get());
- if (direct_code != 0 && direct_method != 0) {
+ if (info->string_init_offset != 0) {
+ RegStorage arg0_ref = cg->TargetReg(kArg0, kRef);
+ switch (state) {
+ case 0: { // Grab target method* from thread pointer
+ cg->LoadRefDisp(rs_rARM_SELF, info->string_init_offset, arg0_ref, kNotVolatile);
+ break;
+ }
+ case 1: // Grab the code from the method*
+ if (direct_code == 0) {
+ // kInvokeTgt := arg0_ref->entrypoint
+ cg->LoadWordDisp(arg0_ref,
+ mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ kArmPointerSize).Int32Value(), cg->TargetPtrReg(kInvokeTgt));
+ }
+ break;
+ default:
+ return -1;
+ }
+ } else if (direct_code != 0 && direct_method != 0) {
switch (state) {
case 0: // Get the current Method* [sets kArg0]
if (direct_code != static_cast<uintptr_t>(-1)) {
diff --git a/compiler/dex/quick/arm/fp_arm.cc b/compiler/dex/quick/arm/fp_arm.cc
index eb1383f..94fc474 100644
--- a/compiler/dex/quick/arm/fp_arm.cc
+++ b/compiler/dex/quick/arm/fp_arm.cc
@@ -187,7 +187,8 @@
return;
}
case Instruction::FLOAT_TO_LONG:
- GenConversionCall(kQuickF2l, rl_dest, rl_src);
+ CheckEntrypointTypes<kQuickF2l, int64_t, float>(); // int64_t -> kCoreReg
+ GenConversionCall(kQuickF2l, rl_dest, rl_src, kCoreReg);
return;
case Instruction::LONG_TO_FLOAT: {
rl_src = LoadValueWide(rl_src, kFPReg);
@@ -217,7 +218,8 @@
return;
}
case Instruction::DOUBLE_TO_LONG:
- GenConversionCall(kQuickD2l, rl_dest, rl_src);
+ CheckEntrypointTypes<kQuickD2l, int64_t, double>(); // int64_t -> kCoreReg
+ GenConversionCall(kQuickD2l, rl_dest, rl_src, kCoreReg);
return;
default:
LOG(FATAL) << "Unexpected opcode: " << opcode;
diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc
index 47669db..7598e50 100644
--- a/compiler/dex/quick/arm/int_arm.cc
+++ b/compiler/dex/quick/arm/int_arm.cc
@@ -138,10 +138,10 @@
RegStorage t_reg = AllocTemp();
LoadConstant(t_reg, -1);
OpRegReg(kOpCmp, rl_src1.reg.GetHigh(), rl_src2.reg.GetHigh());
- LIR* branch1 = OpCondBranch(kCondLt, NULL);
- LIR* branch2 = OpCondBranch(kCondGt, NULL);
+ LIR* branch1 = OpCondBranch(kCondLt, nullptr);
+ LIR* branch2 = OpCondBranch(kCondGt, nullptr);
OpRegRegReg(kOpSub, t_reg, rl_src1.reg.GetLow(), rl_src2.reg.GetLow());
- LIR* branch3 = OpCondBranch(kCondEq, NULL);
+ LIR* branch3 = OpCondBranch(kCondEq, nullptr);
LIR* it = OpIT(kCondHi, "E");
NewLIR2(kThumb2MovI8M, t_reg.GetReg(), ModifiedImmediate(-1));
@@ -389,7 +389,7 @@
* generate the long form in an attempt to avoid an extra assembly pass.
* TODO: consider interspersing slowpaths in code following unconditional branches.
*/
- bool skip = ((target != NULL) && (target->opcode == kPseudoThrowTarget));
+ bool skip = ((target != nullptr) && (target->opcode == kPseudoThrowTarget));
skip &= ((mir_graph_->GetNumDalvikInsns() - current_dalvik_offset_) > 64);
if (!skip && reg.Low8() && (check_value == 0)) {
if (arm_cond == kArmCondEq || arm_cond == kArmCondNe) {
@@ -882,7 +882,7 @@
RegLocation rl_object = LoadValue(rl_src_obj, kRefReg);
RegLocation rl_new_value;
if (!is_long) {
- rl_new_value = LoadValue(rl_src_new_value, LocToRegClass(rl_src_new_value));
+ rl_new_value = LoadValue(rl_src_new_value, is_object ? kRefReg : kCoreReg);
} else if (load_early) {
rl_new_value = LoadValueWide(rl_src_new_value, kCoreReg);
}
@@ -905,7 +905,7 @@
RegLocation rl_expected;
if (!is_long) {
- rl_expected = LoadValue(rl_src_expected, LocToRegClass(rl_src_new_value));
+ rl_expected = LoadValue(rl_src_expected, is_object ? kRefReg : kCoreReg);
} else if (load_early) {
rl_expected = LoadValueWide(rl_src_expected, kCoreReg);
} else {
@@ -1159,12 +1159,12 @@
LIR* ArmMir2Lir::OpTestSuspend(LIR* target) {
#ifdef ARM_R4_SUSPEND_FLAG
NewLIR2(kThumbSubRI8, rs_rARM_SUSPEND.GetReg(), 1);
- return OpCondBranch((target == NULL) ? kCondEq : kCondNe, target);
+ return OpCondBranch((target == nullptr) ? kCondEq : kCondNe, target);
#else
RegStorage t_reg = AllocTemp();
LoadBaseDisp(rs_rARM_SELF, Thread::ThreadFlagsOffset<4>().Int32Value(),
t_reg, kUnsignedHalf, kNotVolatile);
- LIR* cmp_branch = OpCmpImmBranch((target == NULL) ? kCondNe : kCondEq, t_reg,
+ LIR* cmp_branch = OpCmpImmBranch((target == nullptr) ? kCondNe : kCondEq, t_reg,
0, target);
FreeTemp(t_reg);
return cmp_branch;
@@ -1326,11 +1326,6 @@
}
}
- // Now, restore lr to its non-temp status.
- FreeTemp(tmp1);
- Clobber(rs_rARM_LR);
- UnmarkTemp(rs_rARM_LR);
-
if (reg_status != 0) {
// We had manually allocated registers for rl_result.
// Now construct a RegLocation.
@@ -1338,7 +1333,14 @@
rl_result.reg = RegStorage::MakeRegPair(res_lo, res_hi);
}
+ // Free tmp1 but keep LR as temp for StoreValueWide() if needed.
+ FreeTemp(tmp1);
+
StoreValueWide(rl_dest, rl_result);
+
+ // Now, restore lr to its non-temp status.
+ Clobber(rs_rARM_LR);
+ UnmarkTemp(rs_rARM_LR);
}
void ArmMir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
diff --git a/compiler/dex/quick/arm/utility_arm.cc b/compiler/dex/quick/arm/utility_arm.cc
index 25ea694..2ef92f8 100644
--- a/compiler/dex/quick/arm/utility_arm.cc
+++ b/compiler/dex/quick/arm/utility_arm.cc
@@ -90,7 +90,7 @@
}
}
LIR* data_target = ScanLiteralPool(literal_list_, value, 0);
- if (data_target == NULL) {
+ if (data_target == nullptr) {
data_target = AddWordData(&literal_list_, value);
}
ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
@@ -411,7 +411,7 @@
return NewLIR4(opcode, r_dest_src1.GetReg(), r_dest_src1.GetReg(), r_src2.GetReg(), shift);
} else {
LOG(FATAL) << "Unexpected encoding operand count";
- return NULL;
+ return nullptr;
}
}
@@ -695,7 +695,7 @@
}
LIR* ArmMir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) {
- LIR* res = NULL;
+ LIR* res = nullptr;
int32_t val_lo = Low32Bits(value);
int32_t val_hi = High32Bits(value);
if (r_dest.IsFloat()) {
@@ -721,10 +721,10 @@
LoadConstantNoClobber(r_dest.GetHigh(), val_hi);
}
}
- if (res == NULL) {
+ if (res == nullptr) {
// No short form - load from the literal pool.
LIR* data_target = ScanLiteralPoolWide(literal_list_, val_lo, val_hi);
- if (data_target == NULL) {
+ if (data_target == nullptr) {
data_target = AddWideData(&literal_list_, val_lo, val_hi);
}
ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
@@ -814,7 +814,7 @@
LIR* ArmMir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src,
int scale, OpSize size) {
bool all_low_regs = r_base.Low8() && r_index.Low8() && r_src.Low8();
- LIR* store = NULL;
+ LIR* store = nullptr;
ArmOpcode opcode = kThumbBkpt;
bool thumb_form = (all_low_regs && (scale == 0));
RegStorage reg_ptr;
diff --git a/compiler/dex/quick/arm64/assemble_arm64.cc b/compiler/dex/quick/arm64/assemble_arm64.cc
index 2f1ae66..b78fb80 100644
--- a/compiler/dex/quick/arm64/assemble_arm64.cc
+++ b/compiler/dex/quick/arm64/assemble_arm64.cc
@@ -663,7 +663,7 @@
// new_lir replaces orig_lir in the pcrel_fixup list.
void Arm64Mir2Lir::ReplaceFixup(LIR* prev_lir, LIR* orig_lir, LIR* new_lir) {
new_lir->u.a.pcrel_next = orig_lir->u.a.pcrel_next;
- if (UNLIKELY(prev_lir == NULL)) {
+ if (UNLIKELY(prev_lir == nullptr)) {
first_fixup_ = new_lir;
} else {
prev_lir->u.a.pcrel_next = new_lir;
@@ -674,7 +674,7 @@
// new_lir is inserted before orig_lir in the pcrel_fixup list.
void Arm64Mir2Lir::InsertFixupBefore(LIR* prev_lir, LIR* orig_lir, LIR* new_lir) {
new_lir->u.a.pcrel_next = orig_lir;
- if (UNLIKELY(prev_lir == NULL)) {
+ if (UNLIKELY(prev_lir == nullptr)) {
first_fixup_ = new_lir;
} else {
DCHECK(prev_lir->u.a.pcrel_next == orig_lir);
@@ -889,8 +889,8 @@
generation ^= 1;
// Note: nodes requiring possible fixup linked in ascending order.
lir = first_fixup_;
- prev_lir = NULL;
- while (lir != NULL) {
+ prev_lir = nullptr;
+ while (lir != nullptr) {
// NOTE: Any new non-pc_rel instructions inserted due to retry must be explicitly encoded at
// the time of insertion. Note that inserted instructions don't need use/def flags, but do
// need size and pc-rel status properly updated.
@@ -1037,7 +1037,7 @@
// Check that the instruction preceding the multiply-accumulate is a load or store.
if ((prev_insn_flags & IS_LOAD) != 0 || (prev_insn_flags & IS_STORE) != 0) {
// insert a NOP between the load/store and the multiply-accumulate.
- LIR* new_lir = RawLIR(lir->dalvik_offset, kA64Nop0, 0, 0, 0, 0, 0, NULL);
+ LIR* new_lir = RawLIR(lir->dalvik_offset, kA64Nop0, 0, 0, 0, 0, 0, nullptr);
new_lir->offset = lir->offset;
new_lir->flags.fixup = kFixupNone;
new_lir->flags.size = EncodingMap[kA64Nop0].size;
@@ -1108,7 +1108,7 @@
uint32_t Arm64Mir2Lir::LinkFixupInsns(LIR* head_lir, LIR* tail_lir, uint32_t offset) {
LIR* end_lir = tail_lir->next;
- LIR* last_fixup = NULL;
+ LIR* last_fixup = nullptr;
for (LIR* lir = head_lir; lir != end_lir; lir = NEXT_LIR(lir)) {
A64Opcode opcode = UNWIDE(lir->opcode);
if (!lir->flags.is_nop) {
@@ -1123,8 +1123,8 @@
}
// Link into the fixup chain.
lir->flags.use_def_invalid = true;
- lir->u.a.pcrel_next = NULL;
- if (first_fixup_ == NULL) {
+ lir->u.a.pcrel_next = nullptr;
+ if (first_fixup_ == nullptr) {
first_fixup_ = lir;
} else {
last_fixup->u.a.pcrel_next = lir;
diff --git a/compiler/dex/quick/arm64/call_arm64.cc b/compiler/dex/quick/arm64/call_arm64.cc
index 4abbd77..e49e40d 100644
--- a/compiler/dex/quick/arm64/call_arm64.cc
+++ b/compiler/dex/quick/arm64/call_arm64.cc
@@ -21,6 +21,7 @@
#include "arm64_lir.h"
#include "base/logging.h"
#include "dex/mir_graph.h"
+#include "dex/quick/dex_file_to_method_inliner_map.h"
#include "dex/quick/mir_to_lir-inl.h"
#include "driver/compiler_driver.h"
#include "driver/compiler_options.h"
@@ -127,7 +128,7 @@
}
// Bounds check - if < 0 or >= size continue following switch
OpRegImm(kOpCmp, key_reg, size - 1);
- LIR* branch_over = OpCondBranch(kCondHi, NULL);
+ LIR* branch_over = OpCondBranch(kCondHi, nullptr);
// Load the displacement from the switch table
RegStorage disp_reg = AllocTemp();
@@ -167,7 +168,7 @@
} else {
// If the null-check fails its handled by the slow-path to reduce exception related meta-data.
if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
- null_check_branch = OpCmpImmBranch(kCondEq, rs_x0, 0, NULL);
+ null_check_branch = OpCmpImmBranch(kCondEq, rs_x0, 0, nullptr);
}
}
Load32Disp(rs_xSELF, Thread::ThinLockIdOffset<8>().Int32Value(), rs_w1);
@@ -176,12 +177,12 @@
MarkPossibleNullPointerException(opt_flags);
// Zero out the read barrier bits.
OpRegRegImm(kOpAnd, rs_w2, rs_w3, LockWord::kReadBarrierStateMaskShiftedToggled);
- LIR* not_unlocked_branch = OpCmpImmBranch(kCondNe, rs_w2, 0, NULL);
+ LIR* not_unlocked_branch = OpCmpImmBranch(kCondNe, rs_w2, 0, nullptr);
// w3 is zero except for the rb bits here. Copy the read barrier bits into w1.
OpRegRegReg(kOpOr, rs_w1, rs_w1, rs_w3);
OpRegRegImm(kOpAdd, rs_x2, rs_x0, mirror::Object::MonitorOffset().Int32Value());
NewLIR3(kA64Stxr3wrX, rw3, rw1, rx2);
- LIR* lock_success_branch = OpCmpImmBranch(kCondEq, rs_w3, 0, NULL);
+ LIR* lock_success_branch = OpCmpImmBranch(kCondEq, rs_w3, 0, nullptr);
LIR* slow_path_target = NewLIR0(kPseudoTargetLabel);
not_unlocked_branch->target = slow_path_target;
@@ -220,7 +221,7 @@
} else {
// If the null-check fails its handled by the slow-path to reduce exception related meta-data.
if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
- null_check_branch = OpCmpImmBranch(kCondEq, rs_x0, 0, NULL);
+ null_check_branch = OpCmpImmBranch(kCondEq, rs_x0, 0, nullptr);
}
}
Load32Disp(rs_xSELF, Thread::ThinLockIdOffset<8>().Int32Value(), rs_w1);
@@ -235,16 +236,16 @@
OpRegRegImm(kOpAnd, rs_w3, rs_w2, LockWord::kReadBarrierStateMaskShiftedToggled);
// Zero out except the read barrier bits.
OpRegRegImm(kOpAnd, rs_w2, rs_w2, LockWord::kReadBarrierStateMaskShifted);
- LIR* slow_unlock_branch = OpCmpBranch(kCondNe, rs_w3, rs_w1, NULL);
+ LIR* slow_unlock_branch = OpCmpBranch(kCondNe, rs_w3, rs_w1, nullptr);
GenMemBarrier(kAnyStore);
LIR* unlock_success_branch;
if (!kUseReadBarrier) {
Store32Disp(rs_x0, mirror::Object::MonitorOffset().Int32Value(), rs_w2);
- unlock_success_branch = OpUnconditionalBranch(NULL);
+ unlock_success_branch = OpUnconditionalBranch(nullptr);
} else {
OpRegRegImm(kOpAdd, rs_x3, rs_x0, mirror::Object::MonitorOffset().Int32Value());
NewLIR3(kA64Stxr3wrX, rw1, rw2, rx3);
- unlock_success_branch = OpCmpImmBranch(kCondEq, rs_w1, 0, NULL);
+ unlock_success_branch = OpCmpImmBranch(kCondEq, rs_w1, 0, nullptr);
}
LIR* slow_path_target = NewLIR0(kPseudoTargetLabel);
slow_unlock_branch->target = slow_path_target;
@@ -460,7 +461,25 @@
InvokeType type) {
UNUSED(info, unused_idx);
Arm64Mir2Lir* cg = static_cast<Arm64Mir2Lir*>(cu->cg.get());
- if (direct_code != 0 && direct_method != 0) {
+ if (info->string_init_offset != 0) {
+ RegStorage arg0_ref = cg->TargetReg(kArg0, kRef);
+ switch (state) {
+ case 0: { // Grab target method* from thread pointer
+ cg->LoadRefDisp(rs_xSELF, info->string_init_offset, arg0_ref, kNotVolatile);
+ break;
+ }
+ case 1: // Grab the code from the method*
+ if (direct_code == 0) {
+ // kInvokeTgt := arg0_ref->entrypoint
+ cg->LoadWordDisp(arg0_ref,
+ mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ kArm64PointerSize).Int32Value(), cg->TargetPtrReg(kInvokeTgt));
+ }
+ break;
+ default:
+ return -1;
+ }
+ } else if (direct_code != 0 && direct_method != 0) {
switch (state) {
case 0: // Get the current Method* [sets kArg0]
if (direct_code != static_cast<uintptr_t>(-1)) {
diff --git a/compiler/dex/quick/arm64/int_arm64.cc b/compiler/dex/quick/arm64/int_arm64.cc
index b7dbd0a..9340d01 100644
--- a/compiler/dex/quick/arm64/int_arm64.cc
+++ b/compiler/dex/quick/arm64/int_arm64.cc
@@ -803,7 +803,7 @@
NewLIR2(kA64Ldaxr2rX | wide, r_tmp_stored.GetReg(), r_ptr.GetReg());
OpRegReg(kOpCmp, r_tmp, rl_expected.reg);
DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode));
- LIR* early_exit = OpCondBranch(kCondNe, NULL);
+ LIR* early_exit = OpCondBranch(kCondNe, nullptr);
NewLIR3(kA64Stlxr3wrX | wide, r_tmp32.GetReg(), rl_new_value_stored.GetReg(), r_ptr.GetReg());
NewLIR3(kA64Cmp3RdT, r_tmp32.GetReg(), 0, ENCODE_NO_SHIFT);
DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode));
diff --git a/compiler/dex/quick/arm64/utility_arm64.cc b/compiler/dex/quick/arm64/utility_arm64.cc
index e9ad8ba..483231f 100644
--- a/compiler/dex/quick/arm64/utility_arm64.cc
+++ b/compiler/dex/quick/arm64/utility_arm64.cc
@@ -121,7 +121,7 @@
}
LIR* data_target = ScanLiteralPool(literal_list_, value, 0);
- if (data_target == NULL) {
+ if (data_target == nullptr) {
// Wide, as we need 8B alignment.
data_target = AddWideData(&literal_list_, value, 0);
}
@@ -148,7 +148,7 @@
int32_t val_lo = Low32Bits(value);
int32_t val_hi = High32Bits(value);
LIR* data_target = ScanLiteralPoolWide(literal_list_, val_lo, val_hi);
- if (data_target == NULL) {
+ if (data_target == nullptr) {
data_target = AddWideData(&literal_list_, val_lo, val_hi);
}
@@ -525,7 +525,7 @@
int32_t val_lo = Low32Bits(value);
int32_t val_hi = High32Bits(value);
LIR* data_target = ScanLiteralPoolWide(literal_list_, val_lo, val_hi);
- if (data_target == NULL) {
+ if (data_target == nullptr) {
data_target = AddWideData(&literal_list_, val_lo, val_hi);
}
@@ -624,7 +624,7 @@
}
LOG(FATAL) << "Unexpected encoding operand count";
- return NULL;
+ return nullptr;
}
LIR* Arm64Mir2Lir::OpRegRegExtend(OpKind op, RegStorage r_dest_src1, RegStorage r_src2,
@@ -658,7 +658,7 @@
}
LOG(FATAL) << "Unexpected encoding operand count";
- return NULL;
+ return nullptr;
}
LIR* Arm64Mir2Lir::OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2) {
@@ -1190,7 +1190,7 @@
*/
LIR* Arm64Mir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStorage r_dest,
OpSize size) {
- LIR* load = NULL;
+ LIR* load = nullptr;
A64Opcode opcode = kA64Brk1d;
A64Opcode alt_opcode = kA64Brk1d;
int scale = 0;
@@ -1286,7 +1286,7 @@
LIR* Arm64Mir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src,
OpSize size) {
- LIR* store = NULL;
+ LIR* store = nullptr;
A64Opcode opcode = kA64Brk1d;
A64Opcode alt_opcode = kA64Brk1d;
int scale = 0;
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index 9f4a318..86bb69d 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -1080,7 +1080,7 @@
reginfo_map_.reserve(RegStorage::kMaxRegs);
pointer_storage_.reserve(128);
slow_paths_.reserve(32);
- // Reserve pointer id 0 for nullptr.
+ // Reserve pointer id 0 for null.
size_t null_idx = WrapPointer<void>(nullptr);
DCHECK_EQ(null_idx, 0U);
}
@@ -1391,22 +1391,6 @@
}
}
}
- if (bb->block_type != kEntryBlock && bb->first_mir_insn != nullptr &&
- static_cast<int>(bb->first_mir_insn->dalvikInsn.opcode) == kMirOpCheckPart2) {
- // In Mir2Lir::MethodBlockCodeGen() we have artificially moved the throwing
- // instruction to the previous block. However, the MIRGraph data used above
- // doesn't reflect that, so we still need to process that MIR insn here.
- MIR* mir = nullptr;
- BasicBlock* pred_bb = bb;
- // Traverse empty blocks.
- while (mir == nullptr && pred_bb->predecessors.size() == 1u) {
- pred_bb = mir_graph_->GetBasicBlock(bb->predecessors[0]);
- DCHECK(pred_bb != nullptr);
- mir = pred_bb->last_mir_insn;
- }
- DCHECK(mir != nullptr);
- UpdateReferenceVRegsLocal(nullptr, mir, references);
- }
}
bool Mir2Lir::UpdateReferenceVRegsLocal(MIR* mir, MIR* prev_mir, BitVector* references) {
diff --git a/compiler/dex/quick/dex_file_method_inliner.cc b/compiler/dex/quick/dex_file_method_inliner.cc
index 4ac6c0c..2568ee3 100644
--- a/compiler/dex/quick/dex_file_method_inliner.cc
+++ b/compiler/dex/quick/dex_file_method_inliner.cc
@@ -55,8 +55,12 @@
false, // kIntrinsicReferenceGetReferent
false, // kIntrinsicCharAt
false, // kIntrinsicCompareTo
+ false, // kIntrinsicGetCharsNoCheck
false, // kIntrinsicIsEmptyOrLength
false, // kIntrinsicIndexOf
+ true, // kIntrinsicNewStringFromBytes
+ true, // kIntrinsicNewStringFromChars
+ true, // kIntrinsicNewStringFromString
true, // kIntrinsicCurrentThread
true, // kIntrinsicPeek
true, // kIntrinsicPoke
@@ -88,8 +92,15 @@
static_assert(!kIntrinsicIsStatic[kIntrinsicReferenceGetReferent], "Get must not be static");
static_assert(!kIntrinsicIsStatic[kIntrinsicCharAt], "CharAt must not be static");
static_assert(!kIntrinsicIsStatic[kIntrinsicCompareTo], "CompareTo must not be static");
+static_assert(!kIntrinsicIsStatic[kIntrinsicGetCharsNoCheck], "GetCharsNoCheck must not be static");
static_assert(!kIntrinsicIsStatic[kIntrinsicIsEmptyOrLength], "IsEmptyOrLength must not be static");
static_assert(!kIntrinsicIsStatic[kIntrinsicIndexOf], "IndexOf must not be static");
+static_assert(kIntrinsicIsStatic[kIntrinsicNewStringFromBytes],
+ "NewStringFromBytes must be static");
+static_assert(kIntrinsicIsStatic[kIntrinsicNewStringFromChars],
+ "NewStringFromChars must be static");
+static_assert(kIntrinsicIsStatic[kIntrinsicNewStringFromString],
+ "NewStringFromString must be static");
static_assert(kIntrinsicIsStatic[kIntrinsicCurrentThread], "CurrentThread must be static");
static_assert(kIntrinsicIsStatic[kIntrinsicPeek], "Peek must be static");
static_assert(kIntrinsicIsStatic[kIntrinsicPoke], "Poke must be static");
@@ -137,9 +148,15 @@
"F", // kClassCacheFloat
"D", // kClassCacheDouble
"V", // kClassCacheVoid
+ "[B", // kClassCacheJavaLangByteArray
+ "[C", // kClassCacheJavaLangCharArray
+ "[I", // kClassCacheJavaLangIntArray
"Ljava/lang/Object;", // kClassCacheJavaLangObject
- "Ljava/lang/ref/Reference;", // kClassCacheJavaLangRefReference
+ "Ljava/lang/ref/Reference;", // kClassCacheJavaLangRefReference
"Ljava/lang/String;", // kClassCacheJavaLangString
+ "Ljava/lang/StringBuffer;", // kClassCacheJavaLangStringBuffer
+ "Ljava/lang/StringBuilder;", // kClassCacheJavaLangStringBuilder
+ "Ljava/lang/StringFactory;", // kClassCacheJavaLangStringFactory
"Ljava/lang/Double;", // kClassCacheJavaLangDouble
"Ljava/lang/Float;", // kClassCacheJavaLangFloat
"Ljava/lang/Integer;", // kClassCacheJavaLangInteger
@@ -148,10 +165,10 @@
"Ljava/lang/Math;", // kClassCacheJavaLangMath
"Ljava/lang/StrictMath;", // kClassCacheJavaLangStrictMath
"Ljava/lang/Thread;", // kClassCacheJavaLangThread
+ "Ljava/nio/charset/Charset;", // kClassCacheJavaNioCharsetCharset
"Llibcore/io/Memory;", // kClassCacheLibcoreIoMemory
"Lsun/misc/Unsafe;", // kClassCacheSunMiscUnsafe
"Ljava/lang/System;", // kClassCacheJavaLangSystem
- "[C" // kClassCacheJavaLangCharArray
};
const char* const DexFileMethodInliner::kNameCacheNames[] = {
@@ -172,9 +189,14 @@
"getReferent", // kNameCacheReferenceGet
"charAt", // kNameCacheCharAt
"compareTo", // kNameCacheCompareTo
+ "getCharsNoCheck", // kNameCacheGetCharsNoCheck
"isEmpty", // kNameCacheIsEmpty
"indexOf", // kNameCacheIndexOf
"length", // kNameCacheLength
+ "<init>", // kNameCacheInit
+ "newStringFromBytes", // kNameCacheNewStringFromBytes
+ "newStringFromChars", // kNameCacheNewStringFromChars
+ "newStringFromString", // kNameCacheNewStringFromString
"currentThread", // kNameCacheCurrentThread
"peekByte", // kNameCachePeekByte
"peekIntNative", // kNameCachePeekIntNative
@@ -282,7 +304,53 @@
kClassCacheJavaLangObject } },
// kProtoCacheCharArrayICharArrayII_V
{ kClassCacheVoid, 5, {kClassCacheJavaLangCharArray, kClassCacheInt,
- kClassCacheJavaLangCharArray, kClassCacheInt, kClassCacheInt}}
+ kClassCacheJavaLangCharArray, kClassCacheInt, kClassCacheInt} },
+ // kProtoCacheIICharArrayI_V
+ { kClassCacheVoid, 4, { kClassCacheInt, kClassCacheInt, kClassCacheJavaLangCharArray,
+ kClassCacheInt } },
+ // kProtoCacheByteArrayIII_String
+ { kClassCacheJavaLangString, 4, { kClassCacheJavaLangByteArray, kClassCacheInt, kClassCacheInt,
+ kClassCacheInt } },
+ // kProtoCacheIICharArray_String
+ { kClassCacheJavaLangString, 3, { kClassCacheInt, kClassCacheInt,
+ kClassCacheJavaLangCharArray } },
+ // kProtoCacheString_String
+ { kClassCacheJavaLangString, 1, { kClassCacheJavaLangString } },
+ // kProtoCache_V
+ { kClassCacheVoid, 0, { } },
+ // kProtoCacheByteArray_V
+ { kClassCacheVoid, 1, { kClassCacheJavaLangByteArray } },
+ // kProtoCacheByteArrayI_V
+ { kClassCacheVoid, 2, { kClassCacheJavaLangByteArray, kClassCacheInt } },
+ // kProtoCacheByteArrayII_V
+ { kClassCacheVoid, 3, { kClassCacheJavaLangByteArray, kClassCacheInt, kClassCacheInt } },
+ // kProtoCacheByteArrayIII_V
+ { kClassCacheVoid, 4, { kClassCacheJavaLangByteArray, kClassCacheInt, kClassCacheInt,
+ kClassCacheInt } },
+ // kProtoCacheByteArrayIIString_V
+ { kClassCacheVoid, 4, { kClassCacheJavaLangByteArray, kClassCacheInt, kClassCacheInt,
+ kClassCacheJavaLangString } },
+ // kProtoCacheByteArrayString_V
+ { kClassCacheVoid, 2, { kClassCacheJavaLangByteArray, kClassCacheJavaLangString } },
+ // kProtoCacheByteArrayIICharset_V
+ { kClassCacheVoid, 4, { kClassCacheJavaLangByteArray, kClassCacheInt, kClassCacheInt,
+ kClassCacheJavaNioCharsetCharset } },
+ // kProtoCacheByteArrayCharset_V
+ { kClassCacheVoid, 2, { kClassCacheJavaLangByteArray, kClassCacheJavaNioCharsetCharset } },
+ // kProtoCacheCharArray_V
+ { kClassCacheVoid, 1, { kClassCacheJavaLangCharArray } },
+ // kProtoCacheCharArrayII_V
+ { kClassCacheVoid, 3, { kClassCacheJavaLangCharArray, kClassCacheInt, kClassCacheInt } },
+ // kProtoCacheIICharArray_V
+ { kClassCacheVoid, 3, { kClassCacheInt, kClassCacheInt, kClassCacheJavaLangCharArray } },
+ // kProtoCacheIntArrayII_V
+ { kClassCacheVoid, 3, { kClassCacheJavaLangIntArray, kClassCacheInt, kClassCacheInt } },
+ // kProtoCacheString_V
+ { kClassCacheVoid, 1, { kClassCacheJavaLangString } },
+ // kProtoCacheStringBuffer_V
+ { kClassCacheVoid, 1, { kClassCacheJavaLangStringBuffer } },
+ // kProtoCacheStringBuilder_V
+ { kClassCacheVoid, 1, { kClassCacheJavaLangStringBuilder } },
};
const DexFileMethodInliner::IntrinsicDef DexFileMethodInliner::kIntrinsicMethods[] = {
@@ -343,6 +411,7 @@
INTRINSIC(JavaLangString, CharAt, I_C, kIntrinsicCharAt, 0),
INTRINSIC(JavaLangString, CompareTo, String_I, kIntrinsicCompareTo, 0),
+ INTRINSIC(JavaLangString, GetCharsNoCheck, IICharArrayI_V, kIntrinsicGetCharsNoCheck, 0),
INTRINSIC(JavaLangString, IsEmpty, _Z, kIntrinsicIsEmptyOrLength, kIntrinsicFlagIsEmpty),
INTRINSIC(JavaLangString, IndexOf, II_I, kIntrinsicIndexOf, kIntrinsicFlagNone),
INTRINSIC(JavaLangString, IndexOf, I_I, kIntrinsicIndexOf, kIntrinsicFlagBase0),
@@ -368,9 +437,9 @@
#define UNSAFE_GET_PUT(type, code, type_flags) \
INTRINSIC(SunMiscUnsafe, Get ## type, ObjectJ_ ## code, kIntrinsicUnsafeGet, \
- type_flags & ~kIntrinsicFlagIsObject), \
+ type_flags), \
INTRINSIC(SunMiscUnsafe, Get ## type ## Volatile, ObjectJ_ ## code, kIntrinsicUnsafeGet, \
- (type_flags | kIntrinsicFlagIsVolatile) & ~kIntrinsicFlagIsObject), \
+ type_flags | kIntrinsicFlagIsVolatile), \
INTRINSIC(SunMiscUnsafe, Put ## type, ObjectJ ## code ## _V, kIntrinsicUnsafePut, \
type_flags), \
INTRINSIC(SunMiscUnsafe, Put ## type ## Volatile, ObjectJ ## code ## _V, kIntrinsicUnsafePut, \
@@ -386,13 +455,34 @@
INTRINSIC(JavaLangSystem, ArrayCopy, CharArrayICharArrayII_V , kIntrinsicSystemArrayCopyCharArray,
0),
-
#undef INTRINSIC
+
+#define SPECIAL(c, n, p, o, d) \
+ { { kClassCache ## c, kNameCache ## n, kProtoCache ## p }, { o, kInlineSpecial, { d } } }
+
+ SPECIAL(JavaLangString, Init, _V, kInlineStringInit, 0),
+ SPECIAL(JavaLangString, Init, ByteArray_V, kInlineStringInit, 1),
+ SPECIAL(JavaLangString, Init, ByteArrayI_V, kInlineStringInit, 2),
+ SPECIAL(JavaLangString, Init, ByteArrayII_V, kInlineStringInit, 3),
+ SPECIAL(JavaLangString, Init, ByteArrayIII_V, kInlineStringInit, 4),
+ SPECIAL(JavaLangString, Init, ByteArrayIIString_V, kInlineStringInit, 5),
+ SPECIAL(JavaLangString, Init, ByteArrayString_V, kInlineStringInit, 6),
+ SPECIAL(JavaLangString, Init, ByteArrayIICharset_V, kInlineStringInit, 7),
+ SPECIAL(JavaLangString, Init, ByteArrayCharset_V, kInlineStringInit, 8),
+ SPECIAL(JavaLangString, Init, CharArray_V, kInlineStringInit, 9),
+ SPECIAL(JavaLangString, Init, CharArrayII_V, kInlineStringInit, 10),
+ SPECIAL(JavaLangString, Init, IICharArray_V, kInlineStringInit, 11),
+ SPECIAL(JavaLangString, Init, IntArrayII_V, kInlineStringInit, 12),
+ SPECIAL(JavaLangString, Init, String_V, kInlineStringInit, 13),
+ SPECIAL(JavaLangString, Init, StringBuffer_V, kInlineStringInit, 14),
+ SPECIAL(JavaLangString, Init, StringBuilder_V, kInlineStringInit, 15),
+
+#undef SPECIAL
};
DexFileMethodInliner::DexFileMethodInliner()
: lock_("DexFileMethodInliner lock", kDexFileMethodInlinerLock),
- dex_file_(NULL) {
+ dex_file_(nullptr) {
static_assert(kClassCacheFirst == 0, "kClassCacheFirst not 0");
static_assert(arraysize(kClassCacheNames) == kClassCacheLast,
"bad arraysize for kClassCacheNames");
@@ -491,11 +581,19 @@
return backend->GenInlinedCharAt(info);
case kIntrinsicCompareTo:
return backend->GenInlinedStringCompareTo(info);
+ case kIntrinsicGetCharsNoCheck:
+ return backend->GenInlinedStringGetCharsNoCheck(info);
case kIntrinsicIsEmptyOrLength:
return backend->GenInlinedStringIsEmptyOrLength(
info, intrinsic.d.data & kIntrinsicFlagIsEmpty);
case kIntrinsicIndexOf:
return backend->GenInlinedIndexOf(info, intrinsic.d.data & kIntrinsicFlagBase0);
+ case kIntrinsicNewStringFromBytes:
+ return backend->GenInlinedStringFactoryNewStringFromBytes(info);
+ case kIntrinsicNewStringFromChars:
+ return backend->GenInlinedStringFactoryNewStringFromChars(info);
+ case kIntrinsicNewStringFromString:
+ return backend->GenInlinedStringFactoryNewStringFromString(info);
case kIntrinsicCurrentThread:
return backend->GenInlinedCurrentThread(info);
case kIntrinsicPeek:
@@ -507,6 +605,7 @@
intrinsic.d.data & kIntrinsicFlagIsObject);
case kIntrinsicUnsafeGet:
return backend->GenInlinedUnsafeGet(info, intrinsic.d.data & kIntrinsicFlagIsLong,
+ intrinsic.d.data & kIntrinsicFlagIsObject,
intrinsic.d.data & kIntrinsicFlagIsVolatile);
case kIntrinsicUnsafePut:
return backend->GenInlinedUnsafePut(info, intrinsic.d.data & kIntrinsicFlagIsLong,
@@ -573,6 +672,8 @@
move_result = mir_graph->FindMoveResult(bb, invoke);
result = GenInlineIPut(mir_graph, bb, invoke, move_result, method);
break;
+ case kInlineStringInit:
+ return false;
default:
LOG(FATAL) << "Unexpected inline op: " << method.opcode;
break;
@@ -752,6 +853,7 @@
insn->dalvikInsn.opcode = Instruction::CONST;
insn->dalvikInsn.vA = move_result->dalvikInsn.vA;
insn->dalvikInsn.vB = method.d.data;
+ insn->meta.method_lowering_info = invoke->meta.method_lowering_info; // Preserve type info.
bb->InsertMIRAfter(move_result, insn);
return true;
}
@@ -790,6 +892,7 @@
insn->dalvikInsn.opcode = opcode;
insn->dalvikInsn.vA = move_result->dalvikInsn.vA;
insn->dalvikInsn.vB = arg;
+ insn->meta.method_lowering_info = invoke->meta.method_lowering_info; // Preserve type info.
bb->InsertMIRAfter(move_result, insn);
return true;
}
@@ -912,9 +1015,27 @@
}
move->dalvikInsn.vA = move_result->dalvikInsn.vA;
move->dalvikInsn.vB = return_reg;
+ move->meta.method_lowering_info = invoke->meta.method_lowering_info; // Preserve type info.
bb->InsertMIRAfter(insn, move);
}
return true;
}
+uint32_t DexFileMethodInliner::GetOffsetForStringInit(uint32_t method_index, size_t pointer_size) {
+ ReaderMutexLock mu(Thread::Current(), lock_);
+ auto it = inline_methods_.find(method_index);
+ if (it != inline_methods_.end() && (it->second.opcode == kInlineStringInit)) {
+ uint32_t string_init_base_offset = Thread::QuickEntryPointOffsetWithSize(
+ OFFSETOF_MEMBER(QuickEntryPoints, pNewEmptyString), pointer_size);
+ return string_init_base_offset + it->second.d.data * pointer_size;
+ }
+ return 0;
+}
+
+bool DexFileMethodInliner::IsStringInitMethodIndex(uint32_t method_index) {
+ ReaderMutexLock mu(Thread::Current(), lock_);
+ auto it = inline_methods_.find(method_index);
+ return (it != inline_methods_.end()) && (it->second.opcode == kInlineStringInit);
+}
+
} // namespace art
diff --git a/compiler/dex/quick/dex_file_method_inliner.h b/compiler/dex/quick/dex_file_method_inliner.h
index d1e5621..26b41bf 100644
--- a/compiler/dex/quick/dex_file_method_inliner.h
+++ b/compiler/dex/quick/dex_file_method_inliner.h
@@ -96,6 +96,17 @@
LOCKS_EXCLUDED(lock_);
/**
+ * Gets the thread pointer entrypoint offset for a string init method index and pointer size.
+ */
+ uint32_t GetOffsetForStringInit(uint32_t method_index, size_t pointer_size)
+ LOCKS_EXCLUDED(lock_);
+
+ /**
+ * Check whether a particular method index is a string init.
+ */
+ bool IsStringInitMethodIndex(uint32_t method_index) LOCKS_EXCLUDED(lock_);
+
+ /**
* To avoid multiple lookups of a class by its descriptor, we cache its
* type index in the IndexCache. These are the indexes into the IndexCache
* class_indexes array.
@@ -111,9 +122,15 @@
kClassCacheFloat,
kClassCacheDouble,
kClassCacheVoid,
+ kClassCacheJavaLangByteArray,
+ kClassCacheJavaLangCharArray,
+ kClassCacheJavaLangIntArray,
kClassCacheJavaLangObject,
kClassCacheJavaLangRefReference,
kClassCacheJavaLangString,
+ kClassCacheJavaLangStringBuffer,
+ kClassCacheJavaLangStringBuilder,
+ kClassCacheJavaLangStringFactory,
kClassCacheJavaLangDouble,
kClassCacheJavaLangFloat,
kClassCacheJavaLangInteger,
@@ -122,10 +139,10 @@
kClassCacheJavaLangMath,
kClassCacheJavaLangStrictMath,
kClassCacheJavaLangThread,
+ kClassCacheJavaNioCharsetCharset,
kClassCacheLibcoreIoMemory,
kClassCacheSunMiscUnsafe,
kClassCacheJavaLangSystem,
- kClassCacheJavaLangCharArray,
kClassCacheLast
};
@@ -153,9 +170,14 @@
kNameCacheReferenceGetReferent,
kNameCacheCharAt,
kNameCacheCompareTo,
+ kNameCacheGetCharsNoCheck,
kNameCacheIsEmpty,
kNameCacheIndexOf,
kNameCacheLength,
+ kNameCacheInit,
+ kNameCacheNewStringFromBytes,
+ kNameCacheNewStringFromChars,
+ kNameCacheNewStringFromString,
kNameCacheCurrentThread,
kNameCachePeekByte,
kNameCachePeekIntNative,
@@ -230,6 +252,26 @@
kProtoCacheObjectJ_Object,
kProtoCacheObjectJObject_V,
kProtoCacheCharArrayICharArrayII_V,
+ kProtoCacheIICharArrayI_V,
+ kProtoCacheByteArrayIII_String,
+ kProtoCacheIICharArray_String,
+ kProtoCacheString_String,
+ kProtoCache_V,
+ kProtoCacheByteArray_V,
+ kProtoCacheByteArrayI_V,
+ kProtoCacheByteArrayII_V,
+ kProtoCacheByteArrayIII_V,
+ kProtoCacheByteArrayIIString_V,
+ kProtoCacheByteArrayString_V,
+ kProtoCacheByteArrayIICharset_V,
+ kProtoCacheByteArrayCharset_V,
+ kProtoCacheCharArray_V,
+ kProtoCacheCharArrayII_V,
+ kProtoCacheIICharArray_V,
+ kProtoCacheIntArrayII_V,
+ kProtoCacheString_V,
+ kProtoCacheStringBuffer_V,
+ kProtoCacheStringBuilder_V,
kProtoCacheLast
};
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index b132c4c..0592c74 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -58,24 +58,19 @@
return (cu->enable_debug & (1 << kDebugSlowTypePath)) != 0;
}
-void Mir2Lir::GenIfNullUseHelperImmMethod(
- RegStorage r_result, QuickEntrypointEnum trampoline, int imm, RegStorage r_method) {
+void Mir2Lir::GenIfNullUseHelperImm(RegStorage r_result, QuickEntrypointEnum trampoline, int imm) {
class CallHelperImmMethodSlowPath : public LIRSlowPath {
public:
CallHelperImmMethodSlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont,
QuickEntrypointEnum trampoline_in, int imm_in,
- RegStorage r_method_in, RegStorage r_result_in)
+ RegStorage r_result_in)
: LIRSlowPath(m2l, fromfast, cont), trampoline_(trampoline_in),
- imm_(imm_in), r_method_(r_method_in), r_result_(r_result_in) {
+ imm_(imm_in), r_result_(r_result_in) {
}
void Compile() {
GenerateTargetLabel();
- if (r_method_.Valid()) {
- m2l_->CallRuntimeHelperImmReg(trampoline_, imm_, r_method_, true);
- } else {
- m2l_->CallRuntimeHelperImmMethod(trampoline_, imm_, true);
- }
+ m2l_->CallRuntimeHelperImm(trampoline_, imm_, true);
m2l_->OpRegCopy(r_result_, m2l_->TargetReg(kRet0, kRef));
m2l_->OpUnconditionalBranch(cont_);
}
@@ -83,15 +78,14 @@
private:
QuickEntrypointEnum trampoline_;
const int imm_;
- const RegStorage r_method_;
const RegStorage r_result_;
};
- LIR* branch = OpCmpImmBranch(kCondEq, r_result, 0, NULL);
+ LIR* branch = OpCmpImmBranch(kCondEq, r_result, 0, nullptr);
LIR* cont = NewLIR0(kPseudoTargetLabel);
AddSlowPath(new (arena_) CallHelperImmMethodSlowPath(this, branch, cont, trampoline, imm,
- r_method, r_result));
+ r_result));
}
RegStorage Mir2Lir::GenGetOtherTypeForSgetSput(const MirSFieldLoweringInfo& field_info,
@@ -101,22 +95,21 @@
FlushAllRegs();
RegStorage r_base = TargetReg(kArg0, kRef);
LockTemp(r_base);
- RegStorage r_method = RegStorage::InvalidReg(); // Loaded lazily, maybe in the slow-path.
if (CanUseOpPcRelDexCacheArrayLoad()) {
uint32_t offset = dex_cache_arrays_layout_.TypeOffset(field_info.StorageIndex());
OpPcRelDexCacheArrayLoad(cu_->dex_file, offset, r_base);
} else {
// Using fixed register to sync with possible call to runtime support.
- r_method = LoadCurrMethodWithHint(TargetReg(kArg1, kRef));
+ RegStorage r_method = LoadCurrMethodWithHint(r_base);
LoadRefDisp(r_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), r_base,
kNotVolatile);
int32_t offset_of_field = ObjArray::OffsetOfElement(field_info.StorageIndex()).Int32Value();
LoadRefDisp(r_base, offset_of_field, r_base, kNotVolatile);
}
- // r_base now points at static storage (Class*) or nullptr if the type is not yet resolved.
+ // r_base now points at static storage (Class*) or null if the type is not yet resolved.
LIR* unresolved_branch = nullptr;
if (!field_info.IsClassInDexCache() && (opt_flags & MIR_CLASS_IS_IN_DEX_CACHE) == 0) {
- // Check if r_base is nullptr.
+ // Check if r_base is null.
unresolved_branch = OpCmpImmBranch(kCondEq, r_base, 0, nullptr);
}
LIR* uninit_branch = nullptr;
@@ -136,13 +129,13 @@
class StaticFieldSlowPath : public Mir2Lir::LIRSlowPath {
public:
// There are up to two branches to the static field slow path, the "unresolved" when the type
- // entry in the dex cache is nullptr, and the "uninit" when the class is not yet initialized.
- // At least one will be non-nullptr here, otherwise we wouldn't generate the slow path.
+ // entry in the dex cache is null, and the "uninit" when the class is not yet initialized.
+ // At least one will be non-null here, otherwise we wouldn't generate the slow path.
StaticFieldSlowPath(Mir2Lir* m2l, LIR* unresolved, LIR* uninit, LIR* cont, int storage_index,
- RegStorage r_base_in, RegStorage r_method_in)
+ RegStorage r_base_in)
: LIRSlowPath(m2l, unresolved != nullptr ? unresolved : uninit, cont),
second_branch_(unresolved != nullptr ? uninit : nullptr),
- storage_index_(storage_index), r_base_(r_base_in), r_method_(r_method_in) {
+ storage_index_(storage_index), r_base_(r_base_in) {
}
void Compile() {
@@ -150,14 +143,7 @@
if (second_branch_ != nullptr) {
second_branch_->target = target;
}
- if (r_method_.Valid()) {
- // ArtMethod* was loaded in normal path - use it.
- m2l_->CallRuntimeHelperImmReg(kQuickInitializeStaticStorage, storage_index_, r_method_,
- true);
- } else {
- // ArtMethod* wasn't loaded in normal path - use a helper that loads it.
- m2l_->CallRuntimeHelperImmMethod(kQuickInitializeStaticStorage, storage_index_, true);
- }
+ m2l_->CallRuntimeHelperImm(kQuickInitializeStaticStorage, storage_index_, true);
// Copy helper's result into r_base, a no-op on all but MIPS.
m2l_->OpRegCopy(r_base_, m2l_->TargetReg(kRet0, kRef));
@@ -165,22 +151,18 @@
}
private:
- // Second branch to the slow path, or nullptr if there's only one branch.
+ // Second branch to the slow path, or null if there's only one branch.
LIR* const second_branch_;
const int storage_index_;
const RegStorage r_base_;
- RegStorage r_method_;
};
- // The slow path is invoked if the r_base is nullptr or the class pointed
+ // The slow path is invoked if the r_base is null or the class pointed
// to by it is not initialized.
LIR* cont = NewLIR0(kPseudoTargetLabel);
AddSlowPath(new (arena_) StaticFieldSlowPath(this, unresolved_branch, uninit_branch, cont,
- field_info.StorageIndex(), r_base, r_method));
- }
- if (IsTemp(r_method)) {
- FreeTemp(r_method);
+ field_info.StorageIndex(), r_base));
}
return r_base;
}
@@ -319,7 +301,7 @@
/* Perform an explicit null-check on a register. */
LIR* Mir2Lir::GenExplicitNullCheck(RegStorage m_reg, int opt_flags) {
if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) {
- return NULL;
+ return nullptr;
}
return GenNullCheck(m_reg);
}
@@ -1042,22 +1024,19 @@
type_idx)) {
// Call out to helper which resolves type and verifies access.
// Resolved type returned in kRet0.
- CallRuntimeHelperImmMethod(kQuickInitializeTypeAndVerifyAccess, type_idx, true);
+ CallRuntimeHelperImm(kQuickInitializeTypeAndVerifyAccess, type_idx, true);
rl_result = GetReturn(kRefReg);
} else {
rl_result = EvalLoc(rl_dest, kRefReg, true);
// We don't need access checks, load type from dex cache
- RegStorage r_method = RegStorage::InvalidReg();
if (CanUseOpPcRelDexCacheArrayLoad()) {
size_t offset = dex_cache_arrays_layout_.TypeOffset(type_idx);
OpPcRelDexCacheArrayLoad(cu_->dex_file, offset, rl_result.reg);
} else {
- RegLocation rl_method = LoadCurrMethod();
- CheckRegLocation(rl_method);
- r_method = rl_method.reg;
int32_t dex_cache_offset =
mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value();
RegStorage res_reg = AllocTempRef();
+ RegStorage r_method = LoadCurrMethodWithHint(res_reg);
LoadRefDisp(r_method, dex_cache_offset, res_reg, kNotVolatile);
int32_t offset_of_type = ClassArray::OffsetOfElement(type_idx).Int32Value();
LoadRefDisp(res_reg, offset_of_type, rl_result.reg, kNotVolatile);
@@ -1067,7 +1046,7 @@
type_idx) || ForceSlowTypePath(cu_)) {
// Slow path, at runtime test if type is null and if so initialize
FlushAllRegs();
- GenIfNullUseHelperImmMethod(rl_result.reg, kQuickInitializeType, type_idx, r_method);
+ GenIfNullUseHelperImm(rl_result.reg, kQuickInitializeType, type_idx);
}
}
StoreValue(rl_dest, rl_result);
@@ -1085,14 +1064,13 @@
// Might call out to helper, which will return resolved string in kRet0
RegStorage ret0 = TargetReg(kRet0, kRef);
- RegStorage r_method = RegStorage::InvalidReg();
if (CanUseOpPcRelDexCacheArrayLoad()) {
size_t offset = dex_cache_arrays_layout_.StringOffset(string_idx);
OpPcRelDexCacheArrayLoad(cu_->dex_file, offset, ret0);
} else {
- r_method = LoadCurrMethodWithHint(TargetReg(kArg1, kRef));
// Method to declaring class.
RegStorage arg0 = TargetReg(kArg0, kRef);
+ RegStorage r_method = LoadCurrMethodWithHint(arg0);
LoadRefDisp(r_method, mirror::ArtMethod::DeclaringClassOffset().Int32Value(),
arg0, kNotVolatile);
// Declaring class to dex cache strings.
@@ -1100,7 +1078,7 @@
LoadRefDisp(arg0, offset_of_string, ret0, kNotVolatile);
}
- GenIfNullUseHelperImmMethod(ret0, kQuickResolveString, string_idx, r_method);
+ GenIfNullUseHelperImm(ret0, kQuickResolveString, string_idx);
GenBarrier();
StoreValue(rl_dest, GetReturn(kRefReg));
@@ -1188,7 +1166,7 @@
DCHECK(!IsSameReg(result_reg, object.reg));
}
LoadConstant(result_reg, 0); // assume false
- LIR* null_branchover = OpCmpImmBranch(kCondEq, object.reg, 0, NULL);
+ LIR* null_branchover = OpCmpImmBranch(kCondEq, object.reg, 0, nullptr);
RegStorage check_class = AllocTypedTemp(false, kRefReg);
RegStorage object_class = AllocTypedTemp(false, kRefReg);
@@ -1262,12 +1240,11 @@
LoadValueDirectFixed(rl_src, ref_reg); // kArg0 <= ref
}
- RegStorage r_method = RegStorage::InvalidReg();
if (CanUseOpPcRelDexCacheArrayLoad()) {
size_t offset = dex_cache_arrays_layout_.TypeOffset(type_idx);
OpPcRelDexCacheArrayLoad(cu_->dex_file, offset, class_reg);
} else {
- r_method = LoadCurrMethodWithHint(TargetReg(kArg1, kRef));
+ RegStorage r_method = LoadCurrMethodWithHint(class_reg);
// Load dex cache entry into class_reg (kArg2)
LoadRefDisp(r_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
class_reg, kNotVolatile);
@@ -1275,7 +1252,7 @@
LoadRefDisp(class_reg, offset_of_type, class_reg, kNotVolatile);
}
if (!can_assume_type_is_in_dex_cache) {
- GenIfNullUseHelperImmMethod(class_reg, kQuickInitializeType, type_idx, r_method);
+ GenIfNullUseHelperImm(class_reg, kQuickInitializeType, type_idx);
// Should load value here.
LoadValueDirectFixed(rl_src, ref_reg); // kArg0 <= ref
@@ -1287,7 +1264,7 @@
// On MIPS and x86_64 rArg0 != rl_result, place false in result if branch is taken.
LoadConstant(rl_result.reg, 0);
}
- LIR* branch1 = OpCmpImmBranch(kCondEq, ref_reg, 0, NULL);
+ LIR* branch1 = OpCmpImmBranch(kCondEq, ref_reg, 0, nullptr);
/* load object->klass_ */
RegStorage ref_class_reg = TargetReg(kArg1, kRef); // kArg1 will hold the Class* of ref.
@@ -1295,7 +1272,7 @@
LoadRefDisp(ref_reg, mirror::Object::ClassOffset().Int32Value(),
ref_class_reg, kNotVolatile);
/* kArg0 is ref, kArg1 is ref->klass_, kArg2 is class */
- LIR* branchover = NULL;
+ LIR* branchover = nullptr;
if (type_known_final) {
// rl_result == ref == class.
GenSelectConst32(ref_class_reg, class_reg, kCondEq, 1, 0, rl_result.reg,
@@ -1320,7 +1297,7 @@
if (!type_known_abstract) {
/* Uses branchovers */
LoadConstant(rl_result.reg, 1); // assume true
- branchover = OpCmpBranch(kCondEq, TargetReg(kArg1, kRef), TargetReg(kArg2, kRef), NULL);
+ branchover = OpCmpBranch(kCondEq, TargetReg(kArg1, kRef), TargetReg(kArg2, kRef), nullptr);
}
OpRegCopy(TargetReg(kArg0, kRef), class_reg); // .ne case - arg0 <= class
@@ -1394,12 +1371,11 @@
class_reg, kNotVolatile);
} else {
// Load dex cache entry into class_reg (kArg2)
- RegStorage r_method = RegStorage::InvalidReg();
if (CanUseOpPcRelDexCacheArrayLoad()) {
size_t offset = dex_cache_arrays_layout_.TypeOffset(type_idx);
OpPcRelDexCacheArrayLoad(cu_->dex_file, offset, class_reg);
} else {
- r_method = LoadCurrMethodWithHint(TargetReg(kArg1, kRef));
+ RegStorage r_method = LoadCurrMethodWithHint(class_reg);
LoadRefDisp(r_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
class_reg, kNotVolatile);
@@ -1408,7 +1384,7 @@
}
if (!cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file, type_idx)) {
// Need to test presence of type in dex cache at runtime
- GenIfNullUseHelperImmMethod(class_reg, kQuickInitializeType, type_idx, r_method);
+ GenIfNullUseHelperImm(class_reg, kQuickInitializeType, type_idx);
}
}
// At this point, class_reg (kArg2) has class
@@ -2088,7 +2064,7 @@
}
void Mir2Lir::GenConversionCall(QuickEntrypointEnum trampoline, RegLocation rl_dest,
- RegLocation rl_src) {
+ RegLocation rl_src, RegisterClass return_reg_class) {
/*
* Don't optimize the register usage since it calls out to support
* functions
@@ -2097,12 +2073,10 @@
FlushAllRegs(); /* Send everything to home location */
CallRuntimeHelperRegLocation(trampoline, rl_src, false);
if (rl_dest.wide) {
- RegLocation rl_result;
- rl_result = GetReturnWide(LocToRegClass(rl_dest));
+ RegLocation rl_result = GetReturnWide(return_reg_class);
StoreValueWide(rl_dest, rl_result);
} else {
- RegLocation rl_result;
- rl_result = GetReturn(LocToRegClass(rl_dest));
+ RegLocation rl_result = GetReturn(return_reg_class);
StoreValue(rl_dest, rl_result);
}
}
@@ -2131,7 +2105,7 @@
}
if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitSuspendChecks()) {
FlushAllRegs();
- LIR* branch = OpTestSuspend(NULL);
+ LIR* branch = OpTestSuspend(nullptr);
LIR* cont = NewLIR0(kPseudoTargetLabel);
AddSlowPath(new (arena_) SuspendCheckSlowPath(this, branch, cont));
} else {
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index db7095d..ab011fc 100755
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -375,6 +375,18 @@
CallHelper(r_tgt, trampoline, safepoint_pc);
}
+void Mir2Lir::CallRuntimeHelperRegLocationRegLocationRegLocationRegLocation(
+ QuickEntrypointEnum trampoline, RegLocation arg0, RegLocation arg1, RegLocation arg2,
+ RegLocation arg3, bool safepoint_pc) {
+ RegStorage r_tgt = CallHelperSetup(trampoline);
+ LoadValueDirectFixed(arg0, TargetReg(kArg0, arg0));
+ LoadValueDirectFixed(arg1, TargetReg(kArg1, arg1));
+ LoadValueDirectFixed(arg2, TargetReg(kArg2, arg2));
+ LoadValueDirectFixed(arg3, TargetReg(kArg3, arg3));
+ ClobberCallerSave();
+ CallHelper(r_tgt, trampoline, safepoint_pc);
+}
+
/*
* If there are any ins passed in registers that have not been promoted
* to a callee-save register, flush them to the frame. Perform initial
@@ -882,8 +894,6 @@
ShortyToRegClass(mir_graph_->GetShortyFromMethodReference(info->method_ref)[0]));
} else {
res = info->result;
- DCHECK_EQ(LocToRegClass(res),
- ShortyToRegClass(mir_graph_->GetShortyFromMethodReference(info->method_ref)[0]));
}
return res;
}
@@ -896,8 +906,6 @@
mir_graph_->GetShortyFromMethodReference(info->method_ref)[0]));
} else {
res = info->result;
- DCHECK_EQ(LocToRegClass(res),
- ShortyToRegClass(mir_graph_->GetShortyFromMethodReference(info->method_ref)[0]));
}
return res;
}
@@ -970,14 +978,10 @@
}
bool Mir2Lir::GenInlinedCharAt(CallInfo* info) {
- // Location of reference to data array
+ // Location of char array data
int value_offset = mirror::String::ValueOffset().Int32Value();
// Location of count
int count_offset = mirror::String::CountOffset().Int32Value();
- // Starting offset within data array
- int offset_offset = mirror::String::OffsetOffset().Int32Value();
- // Start of char data with array_
- int data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Int32Value();
RegLocation rl_obj = info->args[0];
RegLocation rl_idx = info->args[1];
@@ -987,38 +991,21 @@
GenNullCheck(rl_obj.reg, info->opt_flags);
bool range_check = (!(info->opt_flags & MIR_IGNORE_RANGE_CHECK));
LIR* range_check_branch = nullptr;
- RegStorage reg_off;
- RegStorage reg_ptr;
- reg_off = AllocTemp();
- reg_ptr = AllocTempRef();
if (range_check) {
reg_max = AllocTemp();
Load32Disp(rl_obj.reg, count_offset, reg_max);
MarkPossibleNullPointerException(info->opt_flags);
- }
- Load32Disp(rl_obj.reg, offset_offset, reg_off);
- MarkPossibleNullPointerException(info->opt_flags);
- LoadRefDisp(rl_obj.reg, value_offset, reg_ptr, kNotVolatile);
- if (range_check) {
- // Set up a slow path to allow retry in case of bounds violation */
+ // Set up a slow path to allow retry in case of bounds violation
OpRegReg(kOpCmp, rl_idx.reg, reg_max);
FreeTemp(reg_max);
range_check_branch = OpCondBranch(kCondUge, nullptr);
}
- OpRegImm(kOpAdd, reg_ptr, data_offset);
- if (rl_idx.is_const) {
- OpRegImm(kOpAdd, reg_off, mir_graph_->ConstantValue(rl_idx.orig_sreg));
- } else {
- OpRegReg(kOpAdd, reg_off, rl_idx.reg);
- }
+ RegStorage reg_ptr = AllocTempRef();
+ OpRegRegImm(kOpAdd, reg_ptr, rl_obj.reg, value_offset);
FreeTemp(rl_obj.reg);
- if (rl_idx.location == kLocPhysReg) {
- FreeTemp(rl_idx.reg);
- }
RegLocation rl_dest = InlineTarget(info);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- LoadBaseIndexed(reg_ptr, reg_off, rl_result.reg, 1, kUnsignedHalf);
- FreeTemp(reg_off);
+ LoadBaseIndexed(reg_ptr, rl_idx.reg, rl_result.reg, 1, kUnsignedHalf);
FreeTemp(reg_ptr);
StoreValue(rl_dest, rl_result);
if (range_check) {
@@ -1029,6 +1016,59 @@
return true;
}
+bool Mir2Lir::GenInlinedStringGetCharsNoCheck(CallInfo* info) {
+ if (cu_->instruction_set == kMips) {
+ // TODO - add Mips implementation
+ return false;
+ }
+ size_t char_component_size = Primitive::ComponentSize(Primitive::kPrimChar);
+ // Location of data in char array buffer
+ int data_offset = mirror::Array::DataOffset(char_component_size).Int32Value();
+ // Location of char array data in string
+ int value_offset = mirror::String::ValueOffset().Int32Value();
+
+ RegLocation rl_obj = info->args[0];
+ RegLocation rl_start = info->args[1];
+ RegLocation rl_end = info->args[2];
+ RegLocation rl_buffer = info->args[3];
+ RegLocation rl_index = info->args[4];
+
+ ClobberCallerSave();
+ LockCallTemps(); // Using fixed registers
+ RegStorage reg_dst_ptr = TargetReg(kArg0, kRef);
+ RegStorage reg_src_ptr = TargetReg(kArg1, kRef);
+ RegStorage reg_length = TargetReg(kArg2, kNotWide);
+ RegStorage reg_tmp = TargetReg(kArg3, kNotWide);
+ RegStorage reg_tmp_ptr = RegStorage(RegStorage::k64BitSolo, reg_tmp.GetRawBits() & RegStorage::kRegTypeMask);
+
+ LoadValueDirectFixed(rl_buffer, reg_dst_ptr);
+ OpRegImm(kOpAdd, reg_dst_ptr, data_offset);
+ LoadValueDirectFixed(rl_index, reg_tmp);
+ OpRegRegImm(kOpLsl, reg_tmp, reg_tmp, 1);
+ OpRegReg(kOpAdd, reg_dst_ptr, cu_->instruction_set == kArm64 ? reg_tmp_ptr : reg_tmp);
+
+ LoadValueDirectFixed(rl_start, reg_tmp);
+ LoadValueDirectFixed(rl_end, reg_length);
+ OpRegReg(kOpSub, reg_length, reg_tmp);
+ OpRegRegImm(kOpLsl, reg_length, reg_length, 1);
+ LoadValueDirectFixed(rl_obj, reg_src_ptr);
+
+ OpRegImm(kOpAdd, reg_src_ptr, value_offset);
+ OpRegRegImm(kOpLsl, reg_tmp, reg_tmp, 1);
+ OpRegReg(kOpAdd, reg_src_ptr, cu_->instruction_set == kArm64 ? reg_tmp_ptr : reg_tmp);
+
+ RegStorage r_tgt;
+ if (cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64) {
+ r_tgt = LoadHelper(kQuickMemcpy);
+ } else {
+ r_tgt = RegStorage::InvalidReg();
+ }
+ // NOTE: not a safepoint
+ CallHelper(r_tgt, kQuickMemcpy, false, true);
+
+ return true;
+}
+
// Generates an inlined String.is_empty or String.length.
bool Mir2Lir::GenInlinedStringIsEmptyOrLength(CallInfo* info, bool is_empty) {
if (cu_->instruction_set == kMips || cu_->instruction_set == kMips64) {
@@ -1062,6 +1102,58 @@
return true;
}
+bool Mir2Lir::GenInlinedStringFactoryNewStringFromBytes(CallInfo* info) {
+ if (cu_->instruction_set == kMips) {
+ // TODO - add Mips implementation
+ return false;
+ }
+ RegLocation rl_data = info->args[0];
+ RegLocation rl_high = info->args[1];
+ RegLocation rl_offset = info->args[2];
+ RegLocation rl_count = info->args[3];
+ rl_data = LoadValue(rl_data, kRefReg);
+ LIR* data_null_check_branch = OpCmpImmBranch(kCondEq, rl_data.reg, 0, nullptr);
+ AddIntrinsicSlowPath(info, data_null_check_branch);
+ CallRuntimeHelperRegLocationRegLocationRegLocationRegLocation(
+ kQuickAllocStringFromBytes, rl_data, rl_high, rl_offset, rl_count, true);
+ RegLocation rl_return = GetReturn(kRefReg);
+ RegLocation rl_dest = InlineTarget(info);
+ StoreValue(rl_dest, rl_return);
+ return true;
+}
+
+bool Mir2Lir::GenInlinedStringFactoryNewStringFromChars(CallInfo* info) {
+ if (cu_->instruction_set == kMips) {
+ // TODO - add Mips implementation
+ return false;
+ }
+ RegLocation rl_offset = info->args[0];
+ RegLocation rl_count = info->args[1];
+ RegLocation rl_data = info->args[2];
+ CallRuntimeHelperRegLocationRegLocationRegLocation(
+ kQuickAllocStringFromChars, rl_offset, rl_count, rl_data, true);
+ RegLocation rl_return = GetReturn(kRefReg);
+ RegLocation rl_dest = InlineTarget(info);
+ StoreValue(rl_dest, rl_return);
+ return true;
+}
+
+bool Mir2Lir::GenInlinedStringFactoryNewStringFromString(CallInfo* info) {
+ if (cu_->instruction_set == kMips) {
+ // TODO - add Mips implementation
+ return false;
+ }
+ RegLocation rl_string = info->args[0];
+ rl_string = LoadValue(rl_string, kRefReg);
+ LIR* string_null_check_branch = OpCmpImmBranch(kCondEq, rl_string.reg, 0, nullptr);
+ AddIntrinsicSlowPath(info, string_null_check_branch);
+ CallRuntimeHelperRegLocation(kQuickAllocStringFromString, rl_string, true);
+ RegLocation rl_return = GetReturn(kRefReg);
+ RegLocation rl_dest = InlineTarget(info);
+ StoreValue(rl_dest, rl_return);
+ return true;
+}
+
bool Mir2Lir::GenInlinedReverseBytes(CallInfo* info, OpSize size) {
if (cu_->instruction_set == kMips || cu_->instruction_set == kMips64) {
// TODO: add Mips and Mips64 implementations.
@@ -1338,7 +1430,7 @@
}
bool Mir2Lir::GenInlinedUnsafeGet(CallInfo* info,
- bool is_long, bool is_volatile) {
+ bool is_long, bool is_object, bool is_volatile) {
if (cu_->instruction_set == kMips || cu_->instruction_set == kMips64) {
// TODO: add Mips and Mips64 implementations.
return false;
@@ -1351,7 +1443,7 @@
RegLocation rl_object = LoadValue(rl_src_obj, kRefReg);
RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg);
- RegLocation rl_result = EvalLoc(rl_dest, LocToRegClass(rl_dest), true);
+ RegLocation rl_result = EvalLoc(rl_dest, is_object ? kRefReg : kCoreReg, true);
if (is_long) {
if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64
|| cu_->instruction_set == kArm64) {
@@ -1411,7 +1503,7 @@
FreeTemp(rl_temp_offset);
}
} else {
- rl_value = LoadValue(rl_src_value, LocToRegClass(rl_src_value));
+ rl_value = LoadValue(rl_src_value, is_object ? kRefReg : kCoreReg);
if (rl_value.ref) {
StoreRefIndexed(rl_object.reg, rl_offset.reg, rl_value.reg, 0);
} else {
@@ -1455,9 +1547,22 @@
LockCallTemps();
const MirMethodLoweringInfo& method_info = mir_graph_->GetMethodLoweringInfo(info->mir);
+ MethodReference target_method = method_info.GetTargetMethod();
cu_->compiler_driver->ProcessedInvoke(method_info.GetInvokeType(), method_info.StatsFlags());
InvokeType original_type = static_cast<InvokeType>(method_info.GetInvokeType());
info->type = method_info.GetSharpType();
+ bool is_string_init = false;
+ if (method_info.IsSpecial()) {
+ DexFileMethodInliner* inliner = cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(
+ target_method.dex_file);
+ if (inliner->IsStringInitMethodIndex(target_method.dex_method_index)) {
+ is_string_init = true;
+ size_t pointer_size = GetInstructionSetPointerSize(cu_->instruction_set);
+ info->string_init_offset = inliner->GetOffsetForStringInit(target_method.dex_method_index,
+ pointer_size);
+ info->type = kStatic;
+ }
+ }
bool fast_path = method_info.FastPath();
bool skip_this;
@@ -1482,7 +1587,6 @@
next_call_insn = fast_path ? NextVCallInsn : NextVCallInsnSP;
skip_this = fast_path;
}
- MethodReference target_method = method_info.GetTargetMethod();
call_state = GenDalvikArgs(info, call_state, p_null_ck,
next_call_insn, target_method, method_info.VTableIndex(),
method_info.DirectCode(), method_info.DirectMethod(),
@@ -1499,11 +1603,13 @@
FreeCallTemps();
if (info->result.location != kLocInvalid) {
// We have a following MOVE_RESULT - do it now.
+ RegisterClass reg_class = is_string_init ? kRefReg :
+ ShortyToRegClass(mir_graph_->GetShortyFromMethodReference(info->method_ref)[0]);
if (info->result.wide) {
- RegLocation ret_loc = GetReturnWide(LocToRegClass(info->result));
+ RegLocation ret_loc = GetReturnWide(reg_class);
StoreValueWide(info->result, ret_loc);
} else {
- RegLocation ret_loc = GetReturn(LocToRegClass(info->result));
+ RegLocation ret_loc = GetReturn(reg_class);
StoreValue(info->result, ret_loc);
}
}
diff --git a/compiler/dex/quick/gen_loadstore.cc b/compiler/dex/quick/gen_loadstore.cc
index 54e5742..4215e8b 100644
--- a/compiler/dex/quick/gen_loadstore.cc
+++ b/compiler/dex/quick/gen_loadstore.cc
@@ -46,7 +46,7 @@
if (rl_src.location == kLocPhysReg) {
OpRegCopy(r_dest, rl_src.reg);
} else if (IsInexpensiveConstant(rl_src)) {
- // On 64-bit targets, will sign extend. Make sure constant reference is always NULL.
+ // On 64-bit targets, will sign extend. Make sure constant reference is always null.
DCHECK(!rl_src.ref || (mir_graph_->ConstantValue(rl_src) == 0));
LoadConstantNoClobber(r_dest, mir_graph_->ConstantValue(rl_src));
} else {
diff --git a/compiler/dex/quick/mips/assemble_mips.cc b/compiler/dex/quick/mips/assemble_mips.cc
index 936ff42..f9b9684 100644
--- a/compiler/dex/quick/mips/assemble_mips.cc
+++ b/compiler/dex/quick/mips/assemble_mips.cc
@@ -613,7 +613,7 @@
LOG(FATAL) << "Unexpected branch kind " << opcode;
UNREACHABLE();
}
- LIR* hop_target = NULL;
+ LIR* hop_target = nullptr;
if (!unconditional) {
hop_target = RawLIR(dalvik_offset, kPseudoTargetLabel);
LIR* hop_branch = RawLIR(dalvik_offset, opcode, lir->operands[0],
@@ -650,7 +650,7 @@
LIR *lir;
AssemblerStatus res = kSuccess; // Assume success.
- for (lir = first_lir_insn_; lir != NULL; lir = NEXT_LIR(lir)) {
+ for (lir = first_lir_insn_; lir != nullptr; lir = NEXT_LIR(lir)) {
if (lir->opcode < 0) {
continue;
}
@@ -668,7 +668,7 @@
* (label2 - label1), where label1 is a standard
* kPseudoTargetLabel and is stored in operands[2].
* If operands[3] is null, then label2 is a kPseudoTargetLabel
- * and is found in lir->target. If operands[3] is non-NULL,
+ * and is found in lir->target. If operands[3] is non-nullptr,
* then it is a Switch/Data table.
*/
int offset1 = UnwrapPointer<LIR>(lir->operands[2])->offset;
@@ -863,7 +863,7 @@
LIR* lir;
int offset = 0;
- for (lir = first_lir_insn_; lir != NULL; lir = NEXT_LIR(lir)) {
+ for (lir = first_lir_insn_; lir != nullptr; lir = NEXT_LIR(lir)) {
lir->offset = offset;
if (LIKELY(lir->opcode >= 0)) {
if (!lir->flags.is_nop) {
diff --git a/compiler/dex/quick/mips/call_mips.cc b/compiler/dex/quick/mips/call_mips.cc
index 05570e4..3d25384 100644
--- a/compiler/dex/quick/mips/call_mips.cc
+++ b/compiler/dex/quick/mips/call_mips.cc
@@ -20,7 +20,9 @@
#include "base/logging.h"
#include "dex/mir_graph.h"
+#include "dex/quick/dex_file_to_method_inliner_map.h"
#include "dex/quick/mir_to_lir-inl.h"
+#include "driver/compiler_driver.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "gc/accounting/card_table.h"
#include "mips_lir.h"
@@ -112,7 +114,7 @@
// Test loop.
RegStorage r_key = AllocTemp();
LIR* loop_label = NewLIR0(kPseudoTargetLabel);
- LIR* exit_branch = OpCmpBranch(kCondEq, r_base, r_end, NULL);
+ LIR* exit_branch = OpCmpBranch(kCondEq, r_base, r_end, nullptr);
Load32Disp(r_base, 0, r_key);
OpRegImm(kOpAdd, r_base, 8);
OpCmpBranch(kCondNe, rl_src.reg, r_key, loop_label);
@@ -188,7 +190,7 @@
tab_rec->anchor = base_label;
// Bounds check - if < 0 or >= size continue following switch.
- LIR* branch_over = OpCmpImmBranch(kCondHi, r_key, size-1, NULL);
+ LIR* branch_over = OpCmpImmBranch(kCondHi, r_key, size-1, nullptr);
// Materialize the table base pointer.
RegStorage r_base = AllocPtrSizeTemp();
@@ -397,11 +399,28 @@
* Bit of a hack here - in the absence of a real scheduling pass,
* emit the next instruction in static & direct invoke sequences.
*/
-static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info ATTRIBUTE_UNUSED, int state,
+static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info, int state,
const MethodReference& target_method, uint32_t, uintptr_t direct_code,
uintptr_t direct_method, InvokeType type) {
Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
- if (direct_code != 0 && direct_method != 0) {
+ if (info->string_init_offset != 0) {
+ RegStorage arg0_ref = cg->TargetReg(kArg0, kRef);
+ switch (state) {
+ case 0: { // Grab target method* from thread pointer
+ cg->LoadRefDisp(cg->TargetPtrReg(kSelf), info->string_init_offset, arg0_ref, kNotVolatile);
+ break;
+ }
+ case 1: // Grab the code from the method*
+ if (direct_code == 0) {
+ int32_t offset = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ InstructionSetPointerSize(cu->instruction_set)).Int32Value();
+ cg->LoadWordDisp(arg0_ref, offset, cg->TargetPtrReg(kInvokeTgt));
+ }
+ break;
+ default:
+ return -1;
+ }
+ } else if (direct_code != 0 && direct_method != 0) {
switch (state) {
case 0: // Get the current Method* [sets kArg0]
if (direct_code != static_cast<uintptr_t>(-1)) {
diff --git a/compiler/dex/quick/mips/int_mips.cc b/compiler/dex/quick/mips/int_mips.cc
index 1ca8bb6..9319c64 100644
--- a/compiler/dex/quick/mips/int_mips.cc
+++ b/compiler/dex/quick/mips/int_mips.cc
@@ -68,7 +68,7 @@
NewLIR3(kMipsSlt, t0.GetReg(), rl_src1.reg.GetHighReg(), rl_src2.reg.GetHighReg());
NewLIR3(kMipsSlt, t1.GetReg(), rl_src2.reg.GetHighReg(), rl_src1.reg.GetHighReg());
NewLIR3(kMipsSubu, rl_result.reg.GetReg(), t1.GetReg(), t0.GetReg());
- LIR* branch = OpCmpImmBranch(kCondNe, rl_result.reg, 0, NULL);
+ LIR* branch = OpCmpImmBranch(kCondNe, rl_result.reg, 0, nullptr);
NewLIR3(kMipsSltu, t0.GetReg(), rl_src1.reg.GetLowReg(), rl_src2.reg.GetLowReg());
NewLIR3(kMipsSltu, t1.GetReg(), rl_src2.reg.GetLowReg(), rl_src1.reg.GetLowReg());
NewLIR3(kMipsSubu, rl_result.reg.GetReg(), t1.GetReg(), t0.GetReg());
@@ -128,7 +128,7 @@
break;
default:
LOG(FATAL) << "No support for ConditionCode: " << cond;
- return NULL;
+ return nullptr;
}
if (cmp_zero) {
branch = NewLIR2(br_op, src1.GetReg(), src2.GetReg());
@@ -278,7 +278,7 @@
// Implement as a branch-over.
// TODO: Conditional move?
LoadConstant(rs_dest, true_val);
- LIR* ne_branchover = OpCmpBranch(code, left_op, right_op, NULL);
+ LIR* ne_branchover = OpCmpBranch(code, left_op, right_op, nullptr);
LoadConstant(rs_dest, false_val);
LIR* target_label = NewLIR0(kPseudoTargetLabel);
ne_branchover->target = target_label;
@@ -447,7 +447,7 @@
// Test suspend flag, return target of taken suspend branch.
LIR* MipsMir2Lir::OpTestSuspend(LIR* target) {
OpRegImm(kOpSub, TargetPtrReg(kSuspend), 1);
- return OpCmpImmBranch((target == NULL) ? kCondEq : kCondNe, TargetPtrReg(kSuspend), 0, target);
+ return OpCmpImmBranch((target == nullptr) ? kCondEq : kCondNe, TargetPtrReg(kSuspend), 0, target);
}
// Decrement register and branch on condition.
diff --git a/compiler/dex/quick/mips/utility_mips.cc b/compiler/dex/quick/mips/utility_mips.cc
index 8ab5422..95c61cd 100644
--- a/compiler/dex/quick/mips/utility_mips.cc
+++ b/compiler/dex/quick/mips/utility_mips.cc
@@ -566,7 +566,7 @@
/* Load value from base + scaled index. */
LIR* MipsMir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest,
int scale, OpSize size) {
- LIR *first = NULL;
+ LIR *first = nullptr;
LIR *res;
MipsOpCode opcode = kMipsNop;
bool is64bit = cu_->target64 && r_dest.Is64Bit();
@@ -640,7 +640,7 @@
// Store value base base + scaled index.
LIR* MipsMir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src,
int scale, OpSize size) {
- LIR *first = NULL;
+ LIR *first = nullptr;
MipsOpCode opcode = kMipsNop;
RegStorage t_reg = AllocTemp();
@@ -696,8 +696,8 @@
* rlp and then restore.
*/
LIR *res;
- LIR *load = NULL;
- LIR *load2 = NULL;
+ LIR *load = nullptr;
+ LIR *load2 = nullptr;
MipsOpCode opcode = kMipsNop;
bool short_form = IS_SIMM16(displacement);
bool is64bit = false;
@@ -857,8 +857,8 @@
LIR* MipsMir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src,
OpSize size) {
LIR *res;
- LIR *store = NULL;
- LIR *store2 = NULL;
+ LIR *store = nullptr;
+ LIR *store2 = nullptr;
MipsOpCode opcode = kMipsNop;
bool short_form = IS_SIMM16(displacement);
bool is64bit = false;
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index 961cd4f..e3e87ec 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -104,19 +104,6 @@
return res;
}
-RegisterClass Mir2Lir::LocToRegClass(RegLocation loc) {
- RegisterClass res;
- if (loc.fp) {
- DCHECK(!loc.ref) << "At most, one of ref/fp may be set";
- res = kFPReg;
- } else if (loc.ref) {
- res = kRefReg;
- } else {
- res = kCoreReg;
- }
- return res;
-}
-
void Mir2Lir::LockArg(size_t in_position) {
RegStorage reg_arg = in_to_reg_storage_mapping_.GetReg(in_position);
@@ -560,25 +547,20 @@
if (!kLeafOptimization || !mir_graph_->MethodIsLeaf()) {
GenSuspendTest(opt_flags);
}
- DCHECK_EQ(LocToRegClass(rl_src[0]), ShortyToRegClass(cu_->shorty[0]));
- StoreValue(GetReturn(LocToRegClass(rl_src[0])), rl_src[0]);
+ StoreValue(GetReturn(ShortyToRegClass(cu_->shorty[0])), rl_src[0]);
break;
case Instruction::RETURN_WIDE:
if (!kLeafOptimization || !mir_graph_->MethodIsLeaf()) {
GenSuspendTest(opt_flags);
}
- DCHECK_EQ(LocToRegClass(rl_src[0]), ShortyToRegClass(cu_->shorty[0]));
- StoreValueWide(GetReturnWide(LocToRegClass(rl_src[0])), rl_src[0]);
- break;
-
- case Instruction::MOVE_RESULT_WIDE:
- StoreValueWide(rl_dest, GetReturnWide(LocToRegClass(rl_dest)));
+ StoreValueWide(GetReturnWide(ShortyToRegClass(cu_->shorty[0])), rl_src[0]);
break;
case Instruction::MOVE_RESULT:
+ case Instruction::MOVE_RESULT_WIDE:
case Instruction::MOVE_RESULT_OBJECT:
- StoreValue(rl_dest, GetReturn(LocToRegClass(rl_dest)));
+ // Already processed with invoke or filled-new-array.
break;
case Instruction::MOVE:
@@ -1205,7 +1187,6 @@
case kMirOpRangeCheck:
case kMirOpDivZeroCheck:
case kMirOpCheck:
- case kMirOpCheckPart2:
// Ignore these known opcodes
break;
default:
@@ -1237,7 +1218,7 @@
block_label_list_[block_id].flags.fixup = kFixupLabel;
AppendLIR(&block_label_list_[block_id]);
- LIR* head_lir = NULL;
+ LIR* head_lir = nullptr;
// If this is a catch block, export the start address.
if (bb->catch_entry) {
@@ -1263,7 +1244,7 @@
DCHECK_EQ(cfi_.GetCurrentCFAOffset(), frame_size_);
}
- for (mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+ for (mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
ResetRegPool();
if (cu_->disable_opt & (1 << kTrackLiveTemps)) {
ClobberAllTemps();
@@ -1287,27 +1268,13 @@
GenPrintLabel(mir);
// Remember the first LIR for this block.
- if (head_lir == NULL) {
+ if (head_lir == nullptr) {
head_lir = &block_label_list_[bb->id];
// Set the first label as a scheduling barrier.
DCHECK(!head_lir->flags.use_def_invalid);
head_lir->u.m.def_mask = &kEncodeAll;
}
- if (opcode == kMirOpCheck) {
- // Combine check and work halves of throwing instruction.
- MIR* work_half = mir->meta.throw_insn;
- mir->dalvikInsn = work_half->dalvikInsn;
- mir->optimization_flags = work_half->optimization_flags;
- mir->meta = work_half->meta; // Whatever the work_half had, we need to copy it.
- opcode = work_half->dalvikInsn.opcode;
- SSARepresentation* ssa_rep = work_half->ssa_rep;
- work_half->ssa_rep = mir->ssa_rep;
- mir->ssa_rep = ssa_rep;
- work_half->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpCheckPart2);
- work_half->meta.throw_insn = mir;
- }
-
if (MIR::DecodedInstruction::IsPseudoMirOp(opcode)) {
HandleExtendedMethodMIR(bb, mir);
continue;
@@ -1327,7 +1294,7 @@
cu_->NewTimingSplit("SpecialMIR2LIR");
// Find the first DalvikByteCode block.
DCHECK_EQ(mir_graph_->GetNumReachableBlocks(), mir_graph_->GetDfsOrder().size());
- BasicBlock*bb = NULL;
+ BasicBlock*bb = nullptr;
for (BasicBlockId dfs_id : mir_graph_->GetDfsOrder()) {
BasicBlock* candidate = mir_graph_->GetBasicBlock(dfs_id);
if (candidate->block_type == kDalvikByteCode) {
@@ -1335,11 +1302,11 @@
break;
}
}
- if (bb == NULL) {
+ if (bb == nullptr) {
return false;
}
DCHECK_EQ(bb->start_offset, 0);
- DCHECK(bb->first_mir_insn != NULL);
+ DCHECK(bb->first_mir_insn != nullptr);
// Get the first instruction.
MIR* mir = bb->first_mir_insn;
@@ -1361,17 +1328,17 @@
PreOrderDfsIterator iter(mir_graph_);
BasicBlock* curr_bb = iter.Next();
BasicBlock* next_bb = iter.Next();
- while (curr_bb != NULL) {
+ while (curr_bb != nullptr) {
MethodBlockCodeGen(curr_bb);
// If the fall_through block is no longer laid out consecutively, drop in a branch.
BasicBlock* curr_bb_fall_through = mir_graph_->GetBasicBlock(curr_bb->fall_through);
- if ((curr_bb_fall_through != NULL) && (curr_bb_fall_through != next_bb)) {
+ if ((curr_bb_fall_through != nullptr) && (curr_bb_fall_through != next_bb)) {
OpUnconditionalBranch(&block_label_list_[curr_bb->fall_through]);
}
curr_bb = next_bb;
do {
next_bb = iter.Next();
- } while ((next_bb != NULL) && (next_bb->block_type == kDead));
+ } while ((next_bb != nullptr) && (next_bb->block_type == kDead));
}
HandleSlowPaths();
}
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index db59714..4fdc728 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -388,7 +388,7 @@
LIR* DefEnd() { return def_end_; }
void SetDefEnd(LIR* def_end) { def_end_ = def_end; }
void ResetDefBody() { def_start_ = def_end_ = nullptr; }
- // Find member of aliased set matching storage_used; return nullptr if none.
+ // Find member of aliased set matching storage_used; return null if none.
RegisterInfo* FindMatchingView(uint32_t storage_used) {
RegisterInfo* res = Master();
for (; res != nullptr; res = res->GetAliasChain()) {
@@ -605,7 +605,7 @@
char* ArenaStrdup(const char* str) {
size_t len = strlen(str) + 1;
char* res = arena_->AllocArray<char>(len, kArenaAllocMisc);
- if (res != NULL) {
+ if (res != nullptr) {
strncpy(res, str, len);
}
return res;
@@ -634,7 +634,6 @@
}
RegisterClass ShortyToRegClass(char shorty_type);
- RegisterClass LocToRegClass(RegLocation loc);
int ComputeFrameSize();
void Materialize();
virtual CompiledMethod* GetCompiledMethod();
@@ -651,7 +650,7 @@
void DumpPromotionMap();
void CodegenDump();
LIR* RawLIR(DexOffset dalvik_offset, int opcode, int op0 = 0, int op1 = 0,
- int op2 = 0, int op3 = 0, int op4 = 0, LIR* target = NULL);
+ int op2 = 0, int op3 = 0, int op4 = 0, LIR* target = nullptr);
LIR* NewLIR0(int opcode);
LIR* NewLIR1(int opcode, int dest);
LIR* NewLIR2(int opcode, int dest, int src1);
@@ -846,7 +845,8 @@
RegLocation rl_src, int lit);
virtual void GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest,
RegLocation rl_src1, RegLocation rl_src2, int flags);
- void GenConversionCall(QuickEntrypointEnum trampoline, RegLocation rl_dest, RegLocation rl_src);
+ void GenConversionCall(QuickEntrypointEnum trampoline, RegLocation rl_dest, RegLocation rl_src,
+ RegisterClass return_reg_class);
void GenSuspendTest(int opt_flags);
void GenSuspendTestAndBranch(int opt_flags, LIR* target);
@@ -897,6 +897,10 @@
RegLocation arg0, RegLocation arg1,
RegLocation arg2,
bool safepoint_pc);
+ void CallRuntimeHelperRegLocationRegLocationRegLocationRegLocation(
+ QuickEntrypointEnum trampoline, RegLocation arg0, RegLocation arg1,
+ RegLocation arg2, RegLocation arg3, bool safepoint_pc);
+
void GenInvoke(CallInfo* info);
void GenInvokeNoInline(CallInfo* info);
virtual NextCallInsn GetNextSDCallInsn() = 0;
@@ -937,7 +941,11 @@
bool GenInlinedReferenceGetReferent(CallInfo* info);
virtual bool GenInlinedCharAt(CallInfo* info);
+ bool GenInlinedStringGetCharsNoCheck(CallInfo* info);
bool GenInlinedStringIsEmptyOrLength(CallInfo* info, bool is_empty);
+ bool GenInlinedStringFactoryNewStringFromBytes(CallInfo* info);
+ bool GenInlinedStringFactoryNewStringFromChars(CallInfo* info);
+ bool GenInlinedStringFactoryNewStringFromString(CallInfo* info);
virtual bool GenInlinedReverseBits(CallInfo* info, OpSize size);
bool GenInlinedReverseBytes(CallInfo* info, OpSize size);
virtual bool GenInlinedAbsInt(CallInfo* info);
@@ -954,7 +962,7 @@
virtual bool GenInlinedIndexOf(CallInfo* info, bool zero_based);
bool GenInlinedStringCompareTo(CallInfo* info);
virtual bool GenInlinedCurrentThread(CallInfo* info);
- bool GenInlinedUnsafeGet(CallInfo* info, bool is_long, bool is_volatile);
+ bool GenInlinedUnsafeGet(CallInfo* info, bool is_long, bool is_object, bool is_volatile);
bool GenInlinedUnsafePut(CallInfo* info, bool is_long, bool is_object,
bool is_volatile, bool is_ordered);
@@ -1120,8 +1128,8 @@
* @param base_reg The register holding the base address.
* @param offset The offset from the base.
* @param check_value The immediate to compare to.
- * @param target branch target (or nullptr)
- * @param compare output for getting LIR for comparison (or nullptr)
+ * @param target branch target (or null)
+ * @param compare output for getting LIR for comparison (or null)
* @returns The branch instruction that was generated.
*/
virtual LIR* OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg,
@@ -1459,26 +1467,6 @@
return InexpensiveConstantInt(value);
}
- /**
- * @brief Whether division by the given divisor can be converted to multiply by its reciprocal.
- * @param divisor A constant divisor bits of float type.
- * @return Returns true iff, x/divisor == x*(1.0f/divisor), for every float x.
- */
- bool CanDivideByReciprocalMultiplyFloat(int32_t divisor) {
- // True, if float value significand bits are 0.
- return ((divisor & 0x7fffff) == 0);
- }
-
- /**
- * @brief Whether division by the given divisor can be converted to multiply by its reciprocal.
- * @param divisor A constant divisor bits of double type.
- * @return Returns true iff, x/divisor == x*(1.0/divisor), for every double x.
- */
- bool CanDivideByReciprocalMultiplyDouble(int64_t divisor) {
- // True, if double value significand bits are 0.
- return ((divisor & ((UINT64_C(1) << 52) - 1)) == 0);
- }
-
// May be optimized by targets.
virtual void GenMonitorEnter(int opt_flags, RegLocation rl_src);
virtual void GenMonitorExit(int opt_flags, RegLocation rl_src);
@@ -1692,10 +1680,8 @@
* @param r_result the result register.
* @param trampoline the helper to call in slow path.
* @param imm the immediate passed to the helper.
- * @param r_method the register with ArtMethod* if available, otherwise RegStorage::Invalid().
*/
- void GenIfNullUseHelperImmMethod(
- RegStorage r_result, QuickEntrypointEnum trampoline, int imm, RegStorage r_method);
+ void GenIfNullUseHelperImm(RegStorage r_result, QuickEntrypointEnum trampoline, int imm);
/**
* @brief Generate code to retrieve Class* for another type to be used by SGET/SPUT.
@@ -1854,7 +1840,7 @@
// to deduplicate the masks.
ResourceMaskCache mask_cache_;
- // Record the MIR that generated a given safepoint (nullptr for prologue safepoints).
+ // Record the MIR that generated a given safepoint (null for prologue safepoints).
ArenaVector<std::pair<LIR*, MIR*>> safepoints_;
// The layout of the cu_->dex_file's dex cache arrays for PC-relative addressing.
@@ -1869,7 +1855,7 @@
// For architectures that don't have true PC-relative addressing (see pc_rel_temp_
// above) and also have a limited range of offsets for loads, it's be useful to
// know the minimum offset into the dex cache arrays, so we calculate that as well
- // if pc_rel_temp_ isn't nullptr.
+ // if pc_rel_temp_ isn't null.
uint32_t dex_cache_arrays_min_offset_;
dwarf::LazyDebugFrameOpCodeWriter cfi_;
diff --git a/compiler/dex/quick/quick_cfi_test.cc b/compiler/dex/quick/quick_cfi_test.cc
index d276457..b3c7355 100644
--- a/compiler/dex/quick/quick_cfi_test.cc
+++ b/compiler/dex/quick/quick_cfi_test.cc
@@ -76,7 +76,7 @@
isa_features.reset(InstructionSetFeatures::FromVariant(isa, "default", &error));
CompilerDriver driver(&compiler_options, &verification_results, &method_inliner_map,
Compiler::kQuick, isa, isa_features.get(),
- false, 0, 0, 0, false, false, "", 0, -1, "");
+ false, nullptr, nullptr, nullptr, 0, false, false, "", 0, -1, "");
ClassLinker* linker = nullptr;
CompilationUnit cu(&pool, isa, &driver, linker);
DexFile::CodeItem code_item { 0, 0, 0, 0, 0, 0, { 0 } }; // NOLINT
@@ -100,7 +100,7 @@
}
}
m2l->AdjustSpillMask();
- m2l->GenEntrySequence(NULL, m2l->LocCReturnRef());
+ m2l->GenEntrySequence(nullptr, m2l->LocCReturnRef());
m2l->GenExitSequence();
m2l->HandleSlowPaths();
m2l->AssembleLIR();
diff --git a/compiler/dex/quick/quick_compiler.cc b/compiler/dex/quick/quick_compiler.cc
index fc3e687..7ca4382 100644
--- a/compiler/dex/quick/quick_compiler.cc
+++ b/compiler/dex/quick/quick_compiler.cc
@@ -102,7 +102,7 @@
static_assert(sizeof(kDisabledOptimizationsPerISA) == 8 * sizeof(uint32_t),
"kDisabledOpts unexpected");
-// Supported shorty types per instruction set. nullptr means that all are available.
+// Supported shorty types per instruction set. null means that all are available.
// Z : boolean
// B : byte
// S : short
@@ -403,7 +403,6 @@
kMirOpRangeCheck,
kMirOpDivZeroCheck,
kMirOpCheck,
- kMirOpCheckPart2,
kMirOpSelect,
};
@@ -422,7 +421,7 @@
Instruction::INVOKE_VIRTUAL_RANGE_QUICK,
};
-// Unsupported opcodes. nullptr can be used when everything is supported. Size of the lists is
+// Unsupported opcodes. null can be used when everything is supported. Size of the lists is
// recorded below.
static const int* kUnsupportedOpcodes[] = {
// 0 = kNone.
@@ -515,7 +514,7 @@
for (unsigned int idx = 0; idx < cu->mir_graph->GetNumBlocks(); idx++) {
BasicBlock* bb = cu->mir_graph->GetBasicBlock(idx);
- if (bb == NULL) continue;
+ if (bb == nullptr) continue;
if (bb->block_type == kDead) continue;
for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
int opcode = mir->dalvikInsn.opcode;
@@ -575,7 +574,7 @@
// (1 << kNullCheckElimination) |
// (1 << kClassInitCheckElimination) |
// (1 << kGlobalValueNumbering) |
- (1 << kGvnDeadCodeElimination) |
+ // (1 << kGvnDeadCodeElimination) |
// (1 << kLocalValueNumbering) |
// (1 << kPromoteRegs) |
// (1 << kTrackLiveTemps) |
diff --git a/compiler/dex/quick/ralloc_util.cc b/compiler/dex/quick/ralloc_util.cc
index e779479..8ec86fa 100644
--- a/compiler/dex/quick/ralloc_util.cc
+++ b/compiler/dex/quick/ralloc_util.cc
@@ -935,7 +935,7 @@
RegStorage my_reg = info->GetReg();
RegStorage partner_reg = info->Partner();
RegisterInfo* partner = GetRegInfo(partner_reg);
- DCHECK(partner != NULL);
+ DCHECK(partner != nullptr);
DCHECK(partner->IsWide());
DCHECK_EQ(my_reg.GetReg(), partner->Partner().GetReg());
DCHECK(partner->IsLive());
diff --git a/compiler/dex/quick/x86/assemble_x86.cc b/compiler/dex/quick/x86/assemble_x86.cc
index af19f5e..8467b71 100644
--- a/compiler/dex/quick/x86/assemble_x86.cc
+++ b/compiler/dex/quick/x86/assemble_x86.cc
@@ -409,7 +409,7 @@
EXT_0F_ENCODING_MAP(Paddq, 0x66, 0xD4, REG_DEF0_USE0),
EXT_0F_ENCODING_MAP(Psadbw, 0x66, 0xF6, REG_DEF0_USE0),
EXT_0F_ENCODING_MAP(Addps, 0x00, 0x58, REG_DEF0_USE0),
- EXT_0F_ENCODING_MAP(Addpd, 0xF2, 0x58, REG_DEF0_USE0),
+ EXT_0F_ENCODING_MAP(Addpd, 0x66, 0x58, REG_DEF0_USE0),
EXT_0F_ENCODING_MAP(Psubb, 0x66, 0xF8, REG_DEF0_USE0),
EXT_0F_ENCODING_MAP(Psubw, 0x66, 0xF9, REG_DEF0_USE0),
EXT_0F_ENCODING_MAP(Psubd, 0x66, 0xFA, REG_DEF0_USE0),
@@ -428,7 +428,7 @@
{ kX86PextrwRRI, kRegRegImm, IS_TERTIARY_OP | REG_DEF0 | REG_USE1, { 0x66, 0, 0x0F, 0xC5, 0x00, 0, 0, 1, false }, "PextwRRI", "!0r,!1r,!2d" },
{ kX86PextrdRRI, kRegRegImmStore, IS_TERTIARY_OP | REG_DEF0 | REG_USE1, { 0x66, 0, 0x0F, 0x3A, 0x16, 0, 0, 1, false }, "PextdRRI", "!0r,!1r,!2d" },
{ kX86PextrbMRI, kMemRegImm, IS_QUAD_OP | REG_USE02 | IS_STORE, { 0x66, 0, 0x0F, 0x3A, 0x16, 0, 0, 1, false }, "PextrbMRI", "[!0r+!1d],!2r,!3d" },
- { kX86PextrwMRI, kMemRegImm, IS_QUAD_OP | REG_USE02 | IS_STORE, { 0x66, 0, 0x0F, 0x3A, 0x16, 0, 0, 1, false }, "PextrwMRI", "[!0r+!1d],!2r,!3d" },
+ { kX86PextrwMRI, kMemRegImm, IS_QUAD_OP | REG_USE02 | IS_STORE, { 0x66, 0, 0x0F, 0x3A, 0x15, 0, 0, 1, false }, "PextrwMRI", "[!0r+!1d],!2r,!3d" },
{ kX86PextrdMRI, kMemRegImm, IS_QUAD_OP | REG_USE02 | IS_STORE, { 0x66, 0, 0x0F, 0x3A, 0x16, 0, 0, 1, false }, "PextrdMRI", "[!0r+!1d],!2r,!3d" },
{ kX86PshuflwRRI, kRegRegImm, IS_TERTIARY_OP | REG_DEF0 | REG_USE1, { 0xF2, 0, 0x0F, 0x70, 0, 0, 0, 1, false }, "PshuflwRRI", "!0r,!1r,!2d" },
@@ -1627,13 +1627,13 @@
* instruction. In those cases we will try to substitute a new code
* sequence or request that the trace be shortened and retried.
*/
-AssemblerStatus X86Mir2Lir::AssembleInstructions(CodeOffset start_addr) {
+AssemblerStatus X86Mir2Lir::AssembleInstructions(LIR* first_lir_insn, CodeOffset start_addr) {
UNUSED(start_addr);
LIR *lir;
AssemblerStatus res = kSuccess; // Assume success
const bool kVerbosePcFixup = false;
- for (lir = first_lir_insn_; lir != NULL; lir = NEXT_LIR(lir)) {
+ for (lir = first_lir_insn; lir != nullptr; lir = NEXT_LIR(lir)) {
if (IsPseudoLirOp(lir->opcode)) {
continue;
}
@@ -1646,7 +1646,7 @@
switch (lir->opcode) {
case kX86Jcc8: {
LIR *target_lir = lir->target;
- DCHECK(target_lir != NULL);
+ DCHECK(target_lir != nullptr);
int delta = 0;
CodeOffset pc;
if (IS_SIMM8(lir->operands[0])) {
@@ -1679,7 +1679,7 @@
}
case kX86Jcc32: {
LIR *target_lir = lir->target;
- DCHECK(target_lir != NULL);
+ DCHECK(target_lir != nullptr);
CodeOffset pc = lir->offset + 6 /* 2 byte opcode + rel32 */;
CodeOffset target = target_lir->offset;
int delta = target - pc;
@@ -1695,7 +1695,7 @@
}
case kX86Jecxz8: {
LIR *target_lir = lir->target;
- DCHECK(target_lir != NULL);
+ DCHECK(target_lir != nullptr);
CodeOffset pc;
pc = lir->offset + 2; // opcode + rel8
CodeOffset target = target_lir->offset;
@@ -1706,7 +1706,7 @@
}
case kX86Jmp8: {
LIR *target_lir = lir->target;
- DCHECK(target_lir != NULL);
+ DCHECK(target_lir != nullptr);
int delta = 0;
CodeOffset pc;
if (IS_SIMM8(lir->operands[0])) {
@@ -1738,7 +1738,7 @@
}
case kX86Jmp32: {
LIR *target_lir = lir->target;
- DCHECK(target_lir != NULL);
+ DCHECK(target_lir != nullptr);
CodeOffset pc = lir->offset + 5 /* opcode + rel32 */;
CodeOffset target = target_lir->offset;
int delta = target - pc;
@@ -1748,7 +1748,7 @@
default:
if (lir->flags.fixup == kFixupLoad) {
LIR *target_lir = lir->target;
- DCHECK(target_lir != NULL);
+ DCHECK(target_lir != nullptr);
CodeOffset target = target_lir->offset;
// Handle 64 bit RIP addressing.
if (lir->operands[1] == kRIPReg) {
@@ -1950,7 +1950,7 @@
LIR* lir;
int offset = 0;
- for (lir = first_lir_insn_; lir != NULL; lir = NEXT_LIR(lir)) {
+ for (lir = first_lir_insn_; lir != nullptr; lir = NEXT_LIR(lir)) {
lir->offset = offset;
if (LIKELY(!IsPseudoLirOp(lir->opcode))) {
if (!lir->flags.is_nop) {
@@ -2034,7 +2034,7 @@
*/
while (true) {
- AssemblerStatus res = AssembleInstructions(0);
+ AssemblerStatus res = AssembleInstructions(first_lir_insn_, 0);
if (res == kSuccess) {
break;
} else {
diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc
index d7a5eb0..2495757 100644
--- a/compiler/dex/quick/x86/call_x86.cc
+++ b/compiler/dex/quick/x86/call_x86.cc
@@ -19,6 +19,7 @@
#include "codegen_x86.h"
#include "base/logging.h"
+#include "dex/quick/dex_file_to_method_inliner_map.h"
#include "dex/quick/mir_to_lir-inl.h"
#include "driver/compiler_driver.h"
#include "driver/compiler_options.h"
@@ -80,7 +81,7 @@
// Bounds check - if < 0 or >= size continue following switch
OpRegImm(kOpCmp, keyReg, size - 1);
- LIR* branch_over = OpCondBranch(kCondHi, NULL);
+ LIR* branch_over = OpCondBranch(kCondHi, nullptr);
RegStorage addr_for_jump;
if (cu_->target64) {
@@ -343,11 +344,20 @@
int X86Mir2Lir::X86NextSDCallInsn(CompilationUnit* cu, CallInfo* info,
int state, const MethodReference& target_method,
uint32_t,
- uintptr_t direct_code, uintptr_t direct_method,
+ uintptr_t direct_code ATTRIBUTE_UNUSED, uintptr_t direct_method,
InvokeType type) {
- UNUSED(info, direct_code);
X86Mir2Lir* cg = static_cast<X86Mir2Lir*>(cu->cg.get());
- if (direct_method != 0) {
+ if (info->string_init_offset != 0) {
+ RegStorage arg0_ref = cg->TargetReg(kArg0, kRef);
+ switch (state) {
+ case 0: { // Grab target method* from thread pointer
+ cg->NewLIR2(kX86Mov32RT, arg0_ref.GetReg(), info->string_init_offset);
+ break;
+ }
+ default:
+ return -1;
+ }
+ } else if (direct_method != 0) {
switch (state) {
case 0: // Get the current Method* [sets kArg0]
if (direct_method != static_cast<uintptr_t>(-1)) {
diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h
index 72580a3..5a46520 100644
--- a/compiler/dex/quick/x86/codegen_x86.h
+++ b/compiler/dex/quick/x86/codegen_x86.h
@@ -432,7 +432,7 @@
int AssignInsnOffsets();
void AssignOffsets();
- AssemblerStatus AssembleInstructions(CodeOffset start_addr);
+ AssemblerStatus AssembleInstructions(LIR* first_lir_insn, CodeOffset start_addr);
size_t ComputeSize(const X86EncodingMap* entry, int32_t raw_reg, int32_t raw_index,
int32_t raw_base, int32_t displacement);
@@ -972,6 +972,9 @@
static const X86EncodingMap EncodingMap[kX86Last];
friend std::ostream& operator<<(std::ostream& os, const X86OpCode& rhs);
+ friend class QuickAssembleX86Test;
+ friend class QuickAssembleX86MacroTest;
+ friend class QuickAssembleX86LowLevelTest;
DISALLOW_COPY_AND_ASSIGN(X86Mir2Lir);
};
diff --git a/compiler/dex/quick/x86/fp_x86.cc b/compiler/dex/quick/x86/fp_x86.cc
index cfe0480..8e81746 100755
--- a/compiler/dex/quick/x86/fp_x86.cc
+++ b/compiler/dex/quick/x86/fp_x86.cc
@@ -309,7 +309,8 @@
branch_normal->target = NewLIR0(kPseudoTargetLabel);
StoreValueWide(rl_dest, rl_result);
} else {
- GenConversionCall(kQuickF2l, rl_dest, rl_src);
+ CheckEntrypointTypes<kQuickF2l, int64_t, float>(); // int64_t -> kCoreReg
+ GenConversionCall(kQuickF2l, rl_dest, rl_src, kCoreReg);
}
return;
case Instruction::DOUBLE_TO_LONG:
@@ -334,7 +335,8 @@
branch_normal->target = NewLIR0(kPseudoTargetLabel);
StoreValueWide(rl_dest, rl_result);
} else {
- GenConversionCall(kQuickD2l, rl_dest, rl_src);
+ CheckEntrypointTypes<kQuickD2l, int64_t, double>(); // int64_t -> kCoreReg
+ GenConversionCall(kQuickD2l, rl_dest, rl_src, kCoreReg);
}
return;
default:
@@ -482,13 +484,13 @@
} else {
NewLIR2(kX86UcomisdRR, rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
}
- LIR* branch = NULL;
+ LIR* branch = nullptr;
if (unordered_gt) {
branch = NewLIR2(kX86Jcc8, 0, kX86CondPE);
}
// If the result reg can't be byte accessed, use a jump and move instead of a set.
if (!IsByteRegister(rl_result.reg)) {
- LIR* branch2 = NULL;
+ LIR* branch2 = nullptr;
if (unordered_gt) {
branch2 = NewLIR2(kX86Jcc8, 0, kX86CondA);
NewLIR2(kX86Mov32RI, rl_result.reg.GetReg(), 0x0);
@@ -511,7 +513,7 @@
bool is_double) {
LIR* taken = &block_label_list_[bb->taken];
LIR* not_taken = &block_label_list_[bb->fall_through];
- LIR* branch = NULL;
+ LIR* branch = nullptr;
RegLocation rl_src1;
RegLocation rl_src2;
if (is_double) {
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
index 1043815..943bfc0 100755
--- a/compiler/dex/quick/x86/int_x86.cc
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -1229,7 +1229,7 @@
LockTemp(rs_r0);
RegLocation rl_object = LoadValue(rl_src_obj, kRefReg);
- RegLocation rl_new_value = LoadValue(rl_src_new_value, LocToRegClass(rl_src_new_value));
+ RegLocation rl_new_value = LoadValue(rl_src_new_value, is_object ? kRefReg : kCoreReg);
if (is_object && !mir_graph_->IsConstantNullRef(rl_new_value)) {
// Mark card for object assuming new value is stored.
@@ -1569,7 +1569,7 @@
} else {
OpTlsCmp(Thread::ThreadFlagsOffset<4>(), 0);
}
- return OpCondBranch((target == NULL) ? kCondNe : kCondEq, target);
+ return OpCondBranch((target == nullptr) ? kCondNe : kCondEq, target);
}
// Decrement register and branch on condition
@@ -3005,7 +3005,7 @@
// Assume that there is no match.
LoadConstant(result_reg, 0);
- LIR* null_branchover = OpCmpImmBranch(kCondEq, object.reg, 0, NULL);
+ LIR* null_branchover = OpCmpImmBranch(kCondEq, object.reg, 0, nullptr);
// We will use this register to compare to memory below.
// References are 32 bit in memory, and 64 bit in registers (in 64 bit mode).
diff --git a/compiler/dex/quick/x86/quick_assemble_x86_test.cc b/compiler/dex/quick/x86/quick_assemble_x86_test.cc
new file mode 100644
index 0000000..f58f206
--- /dev/null
+++ b/compiler/dex/quick/x86/quick_assemble_x86_test.cc
@@ -0,0 +1,270 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "dex/quick/quick_compiler.h"
+#include "dex/pass_manager.h"
+#include "dex/verification_results.h"
+#include "dex/quick/dex_file_to_method_inliner_map.h"
+#include "runtime/dex_file.h"
+#include "driver/compiler_options.h"
+#include "driver/compiler_driver.h"
+#include "codegen_x86.h"
+#include "gtest/gtest.h"
+#include "utils/assembler_test_base.h"
+
+namespace art {
+
+class QuickAssembleX86TestBase : public testing::Test {
+ protected:
+ X86Mir2Lir* Prepare(InstructionSet target) {
+ isa_ = target;
+ pool_.reset(new ArenaPool());
+ compiler_options_.reset(new CompilerOptions(
+ CompilerOptions::kDefaultCompilerFilter,
+ CompilerOptions::kDefaultHugeMethodThreshold,
+ CompilerOptions::kDefaultLargeMethodThreshold,
+ CompilerOptions::kDefaultSmallMethodThreshold,
+ CompilerOptions::kDefaultTinyMethodThreshold,
+ CompilerOptions::kDefaultNumDexMethodsThreshold,
+ false,
+ CompilerOptions::kDefaultTopKProfileThreshold,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ nullptr,
+ new PassManagerOptions(),
+ nullptr,
+ false));
+ verification_results_.reset(new VerificationResults(compiler_options_.get()));
+ method_inliner_map_.reset(new DexFileToMethodInlinerMap());
+ compiler_driver_.reset(new CompilerDriver(
+ compiler_options_.get(),
+ verification_results_.get(),
+ method_inliner_map_.get(),
+ Compiler::kQuick,
+ isa_,
+ nullptr,
+ false,
+ nullptr,
+ nullptr,
+ nullptr,
+ 0,
+ false,
+ false,
+ "",
+ 0,
+ -1,
+ ""));
+ cu_.reset(new CompilationUnit(pool_.get(), isa_, compiler_driver_.get(), nullptr));
+ DexFile::CodeItem* code_item = static_cast<DexFile::CodeItem*>(
+ cu_->arena.Alloc(sizeof(DexFile::CodeItem), kArenaAllocMisc));
+ memset(code_item, 0, sizeof(DexFile::CodeItem));
+ cu_->mir_graph.reset(new MIRGraph(cu_.get(), &cu_->arena));
+ cu_->mir_graph->current_code_item_ = code_item;
+ cu_->cg.reset(QuickCompiler::GetCodeGenerator(cu_.get(), nullptr));
+
+ test_helper_.reset(new AssemblerTestInfrastructure(
+ isa_ == kX86 ? "x86" : "x86_64",
+ "as",
+ isa_ == kX86 ? " --32" : "",
+ "objdump",
+ " -h",
+ "objdump",
+ isa_ == kX86 ?
+ " -D -bbinary -mi386 --no-show-raw-insn" :
+ " -D -bbinary -mi386:x86-64 -Mx86-64,addr64,data32 --no-show-raw-insn",
+ nullptr));
+
+ X86Mir2Lir* m2l = static_cast<X86Mir2Lir*>(cu_->cg.get());
+ m2l->CompilerInitializeRegAlloc();
+ return m2l;
+ }
+
+ void Release() {
+ cu_.reset();
+ compiler_driver_.reset();
+ method_inliner_map_.reset();
+ verification_results_.reset();
+ compiler_options_.reset();
+ pool_.reset();
+
+ test_helper_.reset();
+ }
+
+ void TearDown() OVERRIDE {
+ Release();
+ }
+
+ bool CheckTools(InstructionSet target) {
+ Prepare(target);
+ bool result = test_helper_->CheckTools();
+ Release();
+ return result;
+ }
+
+ std::unique_ptr<CompilationUnit> cu_;
+ std::unique_ptr<AssemblerTestInfrastructure> test_helper_;
+
+ private:
+ InstructionSet isa_;
+ std::unique_ptr<ArenaPool> pool_;
+ std::unique_ptr<CompilerOptions> compiler_options_;
+ std::unique_ptr<VerificationResults> verification_results_;
+ std::unique_ptr<DexFileToMethodInlinerMap> method_inliner_map_;
+ std::unique_ptr<CompilerDriver> compiler_driver_;
+};
+
+class QuickAssembleX86LowLevelTest : public QuickAssembleX86TestBase {
+ protected:
+ void Test(InstructionSet target, std::string test_name, std::string gcc_asm,
+ int opcode, int op0 = 0, int op1 = 0, int op2 = 0, int op3 = 0, int op4 = 0) {
+ X86Mir2Lir* m2l = Prepare(target);
+
+ LIR lir;
+ memset(&lir, 0, sizeof(LIR));
+ lir.opcode = opcode;
+ lir.operands[0] = op0;
+ lir.operands[1] = op1;
+ lir.operands[2] = op2;
+ lir.operands[3] = op3;
+ lir.operands[4] = op4;
+ lir.flags.size = m2l->GetInsnSize(&lir);
+
+ AssemblerStatus status = m2l->AssembleInstructions(&lir, 0);
+ // We don't expect a retry.
+ ASSERT_EQ(status, AssemblerStatus::kSuccess);
+
+ // Need a "base" std::vector.
+ std::vector<uint8_t> buffer(m2l->code_buffer_.begin(), m2l->code_buffer_.end());
+ test_helper_->Driver(buffer, gcc_asm, test_name);
+
+ Release();
+ }
+};
+
+TEST_F(QuickAssembleX86LowLevelTest, Addpd) {
+ Test(kX86, "Addpd", "addpd %xmm1, %xmm0\n", kX86AddpdRR,
+ RegStorage::Solo128(0).GetReg(), RegStorage::Solo128(1).GetReg());
+ Test(kX86_64, "Addpd", "addpd %xmm1, %xmm0\n", kX86AddpdRR,
+ RegStorage::Solo128(0).GetReg(), RegStorage::Solo128(1).GetReg());
+}
+
+TEST_F(QuickAssembleX86LowLevelTest, Subpd) {
+ Test(kX86, "Subpd", "subpd %xmm1, %xmm0\n", kX86SubpdRR,
+ RegStorage::Solo128(0).GetReg(), RegStorage::Solo128(1).GetReg());
+ Test(kX86_64, "Subpd", "subpd %xmm1, %xmm0\n", kX86SubpdRR,
+ RegStorage::Solo128(0).GetReg(), RegStorage::Solo128(1).GetReg());
+}
+
+TEST_F(QuickAssembleX86LowLevelTest, Mulpd) {
+ Test(kX86, "Mulpd", "mulpd %xmm1, %xmm0\n", kX86MulpdRR,
+ RegStorage::Solo128(0).GetReg(), RegStorage::Solo128(1).GetReg());
+ Test(kX86_64, "Mulpd", "mulpd %xmm1, %xmm0\n", kX86MulpdRR,
+ RegStorage::Solo128(0).GetReg(), RegStorage::Solo128(1).GetReg());
+}
+
+TEST_F(QuickAssembleX86LowLevelTest, Pextrw) {
+ Test(kX86, "Pextrw", "pextrw $7, %xmm3, 8(%eax)\n", kX86PextrwMRI,
+ RegStorage::Solo32(r0).GetReg(), 8, RegStorage::Solo128(3).GetReg(), 7);
+ Test(kX86_64, "Pextrw", "pextrw $7, %xmm8, 8(%r10)\n", kX86PextrwMRI,
+ RegStorage::Solo64(r10q).GetReg(), 8, RegStorage::Solo128(8).GetReg(), 7);
+}
+
+class QuickAssembleX86MacroTest : public QuickAssembleX86TestBase {
+ protected:
+ typedef void (X86Mir2Lir::*AsmFn)(MIR*);
+
+ void TestVectorFn(InstructionSet target,
+ Instruction::Code opcode,
+ AsmFn f,
+ std::string inst_string) {
+ X86Mir2Lir *m2l = Prepare(target);
+
+ // Create a vector MIR.
+ MIR* mir = cu_->mir_graph->NewMIR();
+ mir->dalvikInsn.opcode = opcode;
+ mir->dalvikInsn.vA = 0; // Destination and source.
+ mir->dalvikInsn.vB = 1; // Source.
+ int vector_size = 128;
+ int vector_type = kDouble;
+ mir->dalvikInsn.vC = (vector_type << 16) | vector_size; // Type size.
+ (m2l->*f)(mir);
+ m2l->AssembleLIR();
+
+ std::string gcc_asm = inst_string + " %xmm1, %xmm0\n";
+ // Need a "base" std::vector.
+ std::vector<uint8_t> buffer(m2l->code_buffer_.begin(), m2l->code_buffer_.end());
+ test_helper_->Driver(buffer, gcc_asm, inst_string);
+
+ Release();
+ }
+
+ // Tests are member functions as many of the assembler functions are protected or private,
+ // and it would be inelegant to define ART_FRIEND_TEST for all the tests.
+
+ void TestAddpd() {
+ TestVectorFn(kX86,
+ static_cast<Instruction::Code>(kMirOpPackedAddition),
+ &X86Mir2Lir::GenAddVector,
+ "addpd");
+ TestVectorFn(kX86_64,
+ static_cast<Instruction::Code>(kMirOpPackedAddition),
+ &X86Mir2Lir::GenAddVector,
+ "addpd");
+ }
+
+ void TestSubpd() {
+ TestVectorFn(kX86,
+ static_cast<Instruction::Code>(kMirOpPackedSubtract),
+ &X86Mir2Lir::GenSubtractVector,
+ "subpd");
+ TestVectorFn(kX86_64,
+ static_cast<Instruction::Code>(kMirOpPackedSubtract),
+ &X86Mir2Lir::GenSubtractVector,
+ "subpd");
+ }
+
+ void TestMulpd() {
+ TestVectorFn(kX86,
+ static_cast<Instruction::Code>(kMirOpPackedMultiply),
+ &X86Mir2Lir::GenMultiplyVector,
+ "mulpd");
+ TestVectorFn(kX86_64,
+ static_cast<Instruction::Code>(kMirOpPackedMultiply),
+ &X86Mir2Lir::GenMultiplyVector,
+ "mulpd");
+ }
+};
+
+TEST_F(QuickAssembleX86MacroTest, CheckTools) {
+ ASSERT_TRUE(CheckTools(kX86)) << "x86 tools not found.";
+ ASSERT_TRUE(CheckTools(kX86_64)) << "x86_64 tools not found.";
+}
+
+#define DECLARE_TEST(name) \
+ TEST_F(QuickAssembleX86MacroTest, name) { \
+ Test ## name(); \
+ }
+
+DECLARE_TEST(Addpd)
+DECLARE_TEST(Subpd)
+DECLARE_TEST(Mulpd)
+
+} // namespace art
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index a16e242..2f211da 100755
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -1281,7 +1281,7 @@
RegLocation rl_return = GetReturn(kCoreReg);
RegLocation rl_dest = InlineTarget(info);
- // Is the string non-NULL?
+ // Is the string non-null?
LoadValueDirectFixed(rl_obj, rs_rDX);
GenNullCheck(rs_rDX, info->opt_flags);
info->opt_flags |= MIR_IGNORE_NULL_CHECK; // Record that we've null checked.
@@ -1302,10 +1302,6 @@
int value_offset = mirror::String::ValueOffset().Int32Value();
// Location of count within the String object.
int count_offset = mirror::String::CountOffset().Int32Value();
- // Starting offset within data array.
- int offset_offset = mirror::String::OffsetOffset().Int32Value();
- // Start of char data with array_.
- int data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Int32Value();
// Compute the number of words to search in to rCX.
Load32Disp(rs_rDX, count_offset, rs_rCX);
@@ -1388,15 +1384,13 @@
// Load the address of the string into EDI.
// In case of start index we have to add the address to existing value in EDI.
- // The string starts at VALUE(String) + 2 * OFFSET(String) + DATA_OFFSET.
if (zero_based || (!zero_based && rl_start.is_const && start_value == 0)) {
- Load32Disp(rs_rDX, offset_offset, rs_rDI);
+ OpRegRegImm(kOpAdd, rs_rDI, rs_rDX, value_offset);
} else {
- OpRegMem(kOpAdd, rs_rDI, rs_rDX, offset_offset);
+ OpRegImm(kOpLsl, rs_rDI, 1);
+ OpRegReg(kOpAdd, rs_rDI, rs_rDX);
+ OpRegImm(kOpAdd, rs_rDI, value_offset);
}
- OpRegImm(kOpLsl, rs_rDI, 1);
- OpRegMem(kOpAdd, rs_rDI, rs_rDX, value_offset);
- OpRegImm(kOpAdd, rs_rDI, data_offset);
// EDI now contains the start of the string to be searched.
// We are all prepared to do the search for the character.
@@ -2423,24 +2417,15 @@
int value_offset = mirror::String::ValueOffset().Int32Value();
// Location of count
int count_offset = mirror::String::CountOffset().Int32Value();
- // Starting offset within data array
- int offset_offset = mirror::String::OffsetOffset().Int32Value();
- // Start of char data with array_
- int data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Int32Value();
RegLocation rl_obj = info->args[0];
RegLocation rl_idx = info->args[1];
rl_obj = LoadValue(rl_obj, kRefReg);
- // X86 wants to avoid putting a constant index into a register.
- if (!rl_idx.is_const) {
- rl_idx = LoadValue(rl_idx, kCoreReg);
- }
+ rl_idx = LoadValue(rl_idx, kCoreReg);
RegStorage reg_max;
GenNullCheck(rl_obj.reg, info->opt_flags);
bool range_check = (!(info->opt_flags & MIR_IGNORE_RANGE_CHECK));
LIR* range_check_branch = nullptr;
- RegStorage reg_off;
- RegStorage reg_ptr;
if (range_check) {
// On x86, we can compare to memory directly
// Set up a launch pad to allow retry in case of bounds violation */
@@ -2456,24 +2441,11 @@
range_check_branch = OpCondBranch(kCondUge, nullptr);
}
}
- reg_off = AllocTemp();
- reg_ptr = AllocTempRef();
- Load32Disp(rl_obj.reg, offset_offset, reg_off);
- LoadRefDisp(rl_obj.reg, value_offset, reg_ptr, kNotVolatile);
- if (rl_idx.is_const) {
- OpRegImm(kOpAdd, reg_off, mir_graph_->ConstantValue(rl_idx.orig_sreg));
- } else {
- OpRegReg(kOpAdd, reg_off, rl_idx.reg);
- }
- FreeTemp(rl_obj.reg);
- if (rl_idx.location == kLocPhysReg) {
- FreeTemp(rl_idx.reg);
- }
RegLocation rl_dest = InlineTarget(info);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- LoadBaseIndexedDisp(reg_ptr, reg_off, 1, data_offset, rl_result.reg, kUnsignedHalf);
- FreeTemp(reg_off);
- FreeTemp(reg_ptr);
+ LoadBaseIndexedDisp(rl_obj.reg, rl_idx.reg, 1, value_offset, rl_result.reg, kUnsignedHalf);
+ FreeTemp(rl_idx.reg);
+ FreeTemp(rl_obj.reg);
StoreValue(rl_dest, rl_result);
if (range_check) {
DCHECK(range_check_branch != nullptr);
diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc
index efcb9ee..61a1bec 100644
--- a/compiler/dex/quick/x86/utility_x86.cc
+++ b/compiler/dex/quick/x86/utility_x86.cc
@@ -578,7 +578,7 @@
} else if (pc_rel_base_reg_.Valid() || cu_->target64) {
// We will load the value from the literal area.
LIR* data_target = ScanLiteralPoolWide(literal_list_, val_lo, val_hi);
- if (data_target == NULL) {
+ if (data_target == nullptr) {
data_target = AddWideData(&literal_list_, val_lo, val_hi);
}
@@ -642,8 +642,8 @@
LIR* X86Mir2Lir::LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
int displacement, RegStorage r_dest, OpSize size) {
- LIR *load = NULL;
- LIR *load2 = NULL;
+ LIR *load = nullptr;
+ LIR *load2 = nullptr;
bool is_array = r_index.Valid();
bool pair = r_dest.IsPair();
bool is64bit = ((size == k64) || (size == kDouble));
@@ -763,7 +763,7 @@
}
}
- // Always return first load generated as this might cause a fault if base is nullptr.
+ // Always return first load generated as this might cause a fault if base is null.
return load;
}
@@ -791,8 +791,8 @@
LIR* X86Mir2Lir::StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
int displacement, RegStorage r_src, OpSize size,
int opt_flags) {
- LIR *store = NULL;
- LIR *store2 = NULL;
+ LIR *store = nullptr;
+ LIR *store2 = nullptr;
bool is_array = r_index.Valid();
bool pair = r_src.IsPair();
bool is64bit = (size == k64) || (size == kDouble);
diff --git a/compiler/dex/ssa_transformation.cc b/compiler/dex/ssa_transformation.cc
index 197f66d..939bf40 100644
--- a/compiler/dex/ssa_transformation.cc
+++ b/compiler/dex/ssa_transformation.cc
@@ -26,15 +26,15 @@
void MIRGraph::ClearAllVisitedFlags() {
AllNodesIterator iter(this);
- for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+ for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
bb->visited = false;
}
}
BasicBlock* MIRGraph::NeedsVisit(BasicBlock* bb) {
- if (bb != NULL) {
+ if (bb != nullptr) {
if (bb->visited || bb->hidden) {
- bb = NULL;
+ bb = nullptr;
}
}
return bb;
@@ -42,13 +42,13 @@
BasicBlock* MIRGraph::NextUnvisitedSuccessor(BasicBlock* bb) {
BasicBlock* res = NeedsVisit(GetBasicBlock(bb->fall_through));
- if (res == NULL) {
+ if (res == nullptr) {
res = NeedsVisit(GetBasicBlock(bb->taken));
- if (res == NULL) {
+ if (res == nullptr) {
if (bb->successor_block_list_type != kNotUsed) {
for (SuccessorBlockInfo* sbi : bb->successor_blocks) {
res = NeedsVisit(GetBasicBlock(sbi->block));
- if (res != NULL) {
+ if (res != nullptr) {
break;
}
}
@@ -75,7 +75,7 @@
while (!succ.empty()) {
BasicBlock* curr = succ.back();
BasicBlock* next_successor = NextUnvisitedSuccessor(curr);
- if (next_successor != NULL) {
+ if (next_successor != nullptr) {
MarkPreOrder(next_successor);
succ.push_back(next_successor);
continue;
@@ -107,7 +107,7 @@
if (num_reachable_blocks_ != GetNumBlocks()) {
// Kill all unreachable blocks.
AllNodesIterator iter(this);
- for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+ for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
if (!bb->visited) {
bb->Kill(this);
}
@@ -121,7 +121,7 @@
* register idx is defined in BasicBlock bb.
*/
bool MIRGraph::FillDefBlockMatrix(BasicBlock* bb) {
- if (bb->data_flow_info == NULL) {
+ if (bb->data_flow_info == nullptr) {
return false;
}
@@ -149,11 +149,11 @@
}
AllNodesIterator iter(this);
- for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+ for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
FindLocalLiveIn(bb);
}
AllNodesIterator iter2(this);
- for (BasicBlock* bb = iter2.Next(); bb != NULL; bb = iter2.Next()) {
+ for (BasicBlock* bb = iter2.Next(); bb != nullptr; bb = iter2.Next()) {
FillDefBlockMatrix(bb);
}
@@ -247,7 +247,7 @@
void MIRGraph::InitializeDominationInfo(BasicBlock* bb) {
int num_total_blocks = GetBasicBlockListCount();
- if (bb->dominators == NULL) {
+ if (bb->dominators == nullptr) {
bb->dominators = new (arena_) ArenaBitVector(arena_, num_total_blocks,
true /* expandable */, kBitMapDominators);
bb->i_dominated = new (arena_) ArenaBitVector(arena_, num_total_blocks,
@@ -357,7 +357,7 @@
/* Initialize domination-related data structures */
PreOrderDfsIterator iter(this);
- for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+ for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
InitializeDominationInfo(bb);
}
@@ -376,7 +376,7 @@
/* Compute the immediate dominators */
RepeatingReversePostOrderDfsIterator iter2(this);
bool change = false;
- for (BasicBlock* bb = iter2.Next(false); bb != NULL; bb = iter2.Next(change)) {
+ for (BasicBlock* bb = iter2.Next(false); bb != nullptr; bb = iter2.Next(change)) {
change = ComputeblockIDom(bb);
}
@@ -387,19 +387,19 @@
GetEntryBlock()->i_dom = 0;
PreOrderDfsIterator iter3(this);
- for (BasicBlock* bb = iter3.Next(); bb != NULL; bb = iter3.Next()) {
+ for (BasicBlock* bb = iter3.Next(); bb != nullptr; bb = iter3.Next()) {
SetDominators(bb);
}
ReversePostOrderDfsIterator iter4(this);
- for (BasicBlock* bb = iter4.Next(); bb != NULL; bb = iter4.Next()) {
+ for (BasicBlock* bb = iter4.Next(); bb != nullptr; bb = iter4.Next()) {
ComputeBlockDominators(bb);
}
// Compute the dominance frontier for each block.
ComputeDomPostOrderTraversal(GetEntryBlock());
PostOrderDOMIterator iter5(this);
- for (BasicBlock* bb = iter5.Next(); bb != NULL; bb = iter5.Next()) {
+ for (BasicBlock* bb = iter5.Next(); bb != nullptr; bb = iter5.Next()) {
ComputeDominanceFrontier(bb);
}
@@ -434,7 +434,7 @@
DCHECK_EQ(temp_.ssa.num_vregs, cu_->mir_graph.get()->GetNumOfCodeAndTempVRs());
ArenaBitVector* temp_live_vregs = temp_.ssa.work_live_vregs;
- if (bb->data_flow_info == NULL) {
+ if (bb->data_flow_info == nullptr) {
return false;
}
temp_live_vregs->Copy(bb->data_flow_info->live_in_v);
@@ -466,7 +466,7 @@
void MIRGraph::FindPhiNodeBlocks() {
RepeatingPostOrderDfsIterator iter(this);
bool change = false;
- for (BasicBlock* bb = iter.Next(false); bb != NULL; bb = iter.Next(change)) {
+ for (BasicBlock* bb = iter.Next(false); bb != nullptr; bb = iter.Next(change)) {
change = ComputeBlockLiveIns(bb);
}
@@ -505,7 +505,7 @@
*/
bool MIRGraph::InsertPhiNodeOperands(BasicBlock* bb) {
/* Phi nodes are at the beginning of each block */
- for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+ for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
if (mir->dalvikInsn.opcode != static_cast<Instruction::Code>(kMirOpPhi))
return true;
int ssa_reg = mir->ssa_rep->defs[0];
diff --git a/compiler/dex/type_inference.cc b/compiler/dex/type_inference.cc
new file mode 100644
index 0000000..19d591b
--- /dev/null
+++ b/compiler/dex/type_inference.cc
@@ -0,0 +1,1067 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "type_inference.h"
+
+#include "base/bit_vector-inl.h"
+#include "compiler_ir.h"
+#include "dataflow_iterator-inl.h"
+#include "dex_flags.h"
+#include "dex_file-inl.h"
+#include "driver/dex_compilation_unit.h"
+#include "mir_field_info.h"
+#include "mir_graph.h"
+#include "mir_method_info.h"
+
+namespace art {
+
+inline TypeInference::Type TypeInference::Type::ArrayType(uint32_t array_depth, Type nested_type) {
+ DCHECK_NE(array_depth, 0u);
+ return Type(kFlagNarrow | kFlagRef | kFlagLowWord | (array_depth << kBitArrayDepthStart) |
+ ((nested_type.raw_bits_ & kMaskWideAndType) << kArrayTypeShift));
+}
+
+inline TypeInference::Type TypeInference::Type::ArrayTypeFromComponent(Type component_type) {
+ if (component_type.ArrayDepth() == 0u) {
+ return ArrayType(1u, component_type);
+ }
+ if (UNLIKELY(component_type.ArrayDepth() == kMaxArrayDepth)) {
+ return component_type;
+ }
+ return Type(component_type.raw_bits_ + (1u << kBitArrayDepthStart)); // array_depth + 1u;
+}
+
+TypeInference::Type TypeInference::Type::ShortyType(char shorty) {
+ switch (shorty) {
+ case 'L':
+ return Type(kFlagLowWord | kFlagNarrow | kFlagRef);
+ case 'D':
+ return Type(kFlagLowWord | kFlagWide | kFlagFp);
+ case 'J':
+ return Type(kFlagLowWord | kFlagWide | kFlagCore);
+ case 'F':
+ return Type(kFlagLowWord | kFlagNarrow | kFlagFp);
+ default:
+ DCHECK(shorty == 'I' || shorty == 'S' || shorty == 'C' || shorty == 'B' || shorty == 'Z');
+ return Type(kFlagLowWord | kFlagNarrow | kFlagCore);
+ }
+}
+
+TypeInference::Type TypeInference::Type::DexType(const DexFile* dex_file, uint32_t type_idx) {
+ const char* desc = dex_file->GetTypeDescriptor(dex_file->GetTypeId(type_idx));
+ if (UNLIKELY(desc[0] == 'V')) {
+ return Unknown();
+ } else if (UNLIKELY(desc[0] == '[')) {
+ size_t array_depth = 0u;
+ while (*desc == '[') {
+ ++array_depth;
+ ++desc;
+ }
+ if (UNLIKELY(array_depth > kMaxArrayDepth)) {
+ LOG(WARNING) << "Array depth exceeds " << kMaxArrayDepth << ": " << array_depth
+ << " in dex file " << dex_file->GetLocation() << " type index " << type_idx;
+ array_depth = kMaxArrayDepth;
+ }
+ Type shorty_result = Type::ShortyType(desc[0]);
+ return ArrayType(array_depth, shorty_result);
+ } else {
+ return ShortyType(desc[0]);
+ }
+}
+
+bool TypeInference::Type::MergeArrayConflict(Type src_type) {
+ DCHECK(Ref());
+ DCHECK_NE(ArrayDepth(), src_type.ArrayDepth());
+ DCHECK_GE(std::min(ArrayDepth(), src_type.ArrayDepth()), 1u);
+ bool size_conflict =
+ (ArrayDepth() == 1u && (raw_bits_ & kFlagArrayWide) != 0u) ||
+ (src_type.ArrayDepth() == 1u && (src_type.raw_bits_ & kFlagArrayWide) != 0u);
+ // Mark all three array type bits so that merging any other type bits will not change this type.
+ return Copy(Type((raw_bits_ & kMaskNonArray) |
+ (1u << kBitArrayDepthStart) | kFlagArrayCore | kFlagArrayRef | kFlagArrayFp |
+ kFlagArrayNarrow | (size_conflict ? kFlagArrayWide : 0u)));
+}
+
+bool TypeInference::Type::MergeStrong(Type src_type) {
+ bool changed = MergeNonArrayFlags(src_type);
+ if (src_type.ArrayDepth() != 0u) {
+ if (ArrayDepth() == 0u) {
+ DCHECK_EQ(raw_bits_ & ~kMaskNonArray, 0u);
+ DCHECK_NE(src_type.raw_bits_ & kFlagRef, 0u);
+ raw_bits_ |= src_type.raw_bits_ & (~kMaskNonArray | kFlagRef);
+ changed = true;
+ } else if (ArrayDepth() == src_type.ArrayDepth()) {
+ changed |= MergeBits(src_type, kMaskArrayWideAndType);
+ } else if (src_type.ArrayDepth() == 1u &&
+ (((src_type.raw_bits_ ^ UnknownArrayType().raw_bits_) & kMaskArrayWideAndType) == 0u ||
+ ((src_type.raw_bits_ ^ ObjectArrayType().raw_bits_) & kMaskArrayWideAndType) == 0u)) {
+ // Source type is [L or [? but current type is at least [[, preserve it.
+ } else if (ArrayDepth() == 1u &&
+ (((raw_bits_ ^ UnknownArrayType().raw_bits_) & kMaskArrayWideAndType) == 0u ||
+ ((raw_bits_ ^ ObjectArrayType().raw_bits_) & kMaskArrayWideAndType) == 0u)) {
+ // Overwrite [? or [L with the source array type which is at least [[.
+ raw_bits_ = (raw_bits_ & kMaskNonArray) | (src_type.raw_bits_ & ~kMaskNonArray);
+ changed = true;
+ } else {
+ // Mark the array value type with conflict - both ref and fp.
+ changed |= MergeArrayConflict(src_type);
+ }
+ }
+ return changed;
+}
+
+bool TypeInference::Type::MergeWeak(Type src_type) {
+ bool changed = MergeNonArrayFlags(src_type);
+ if (src_type.ArrayDepth() != 0u && src_type.NonNull()) {
+ DCHECK_NE(src_type.ArrayDepth(), 0u);
+ if (ArrayDepth() == 0u) {
+ DCHECK_EQ(raw_bits_ & ~kMaskNonArray, 0u);
+ // Preserve current type.
+ } else if (ArrayDepth() == src_type.ArrayDepth()) {
+ changed |= MergeBits(src_type, kMaskArrayWideAndType);
+ } else if (src_type.ArrayDepth() == 1u &&
+ (((src_type.raw_bits_ ^ UnknownArrayType().raw_bits_) & kMaskArrayWideAndType) == 0u ||
+ ((src_type.raw_bits_ ^ ObjectArrayType().raw_bits_) & kMaskArrayWideAndType) == 0u)) {
+ // Source type is [L or [? but current type is at least [[, preserve it.
+ } else if (ArrayDepth() == 1u &&
+ (((raw_bits_ ^ UnknownArrayType().raw_bits_) & kMaskArrayWideAndType) == 0u ||
+ ((raw_bits_ ^ ObjectArrayType().raw_bits_) & kMaskArrayWideAndType) == 0u)) {
+ // We have [? or [L. If it's [?, upgrade to [L as the source array type is at least [[.
+ changed |= MergeBits(ObjectArrayType(), kMaskArrayWideAndType);
+ } else {
+ // Mark the array value type with conflict - both ref and fp.
+ changed |= MergeArrayConflict(src_type);
+ }
+ }
+ return changed;
+}
+
+TypeInference::CheckCastData::CheckCastData(MIRGraph* mir_graph, ScopedArenaAllocator* alloc)
+ : mir_graph_(mir_graph),
+ alloc_(alloc),
+ num_blocks_(mir_graph->GetNumBlocks()),
+ num_sregs_(mir_graph->GetNumSSARegs()),
+ check_cast_map_(std::less<MIR*>(), alloc->Adapter()),
+ split_sreg_data_(std::less<int32_t>(), alloc->Adapter()) {
+}
+
+void TypeInference::CheckCastData::AddCheckCast(MIR* check_cast, Type type) {
+ DCHECK_EQ(check_cast->dalvikInsn.opcode, Instruction::CHECK_CAST);
+ type.CheckPureRef();
+ int32_t extra_s_reg = static_cast<int32_t>(num_sregs_);
+ num_sregs_ += 1;
+ check_cast_map_.Put(check_cast, CheckCastMapValue{extra_s_reg, type}); // NOLINT
+ int32_t s_reg = check_cast->ssa_rep->uses[0];
+ auto lb = split_sreg_data_.lower_bound(s_reg);
+ if (lb == split_sreg_data_.end() || split_sreg_data_.key_comp()(s_reg, lb->first)) {
+ SplitSRegData split_s_reg_data = {
+ 0,
+ alloc_->AllocArray<int32_t>(num_blocks_, kArenaAllocMisc),
+ alloc_->AllocArray<int32_t>(num_blocks_, kArenaAllocMisc),
+ new (alloc_) ArenaBitVector(alloc_, num_blocks_, false)
+ };
+ std::fill_n(split_s_reg_data.starting_mod_s_reg, num_blocks_, INVALID_SREG);
+ std::fill_n(split_s_reg_data.ending_mod_s_reg, num_blocks_, INVALID_SREG);
+ split_s_reg_data.def_phi_blocks_->ClearAllBits();
+ BasicBlock* def_bb = FindDefBlock(check_cast);
+ split_s_reg_data.ending_mod_s_reg[def_bb->id] = s_reg;
+ split_s_reg_data.def_phi_blocks_->SetBit(def_bb->id);
+ lb = split_sreg_data_.PutBefore(lb, s_reg, split_s_reg_data);
+ }
+ lb->second.ending_mod_s_reg[check_cast->bb] = extra_s_reg;
+ lb->second.def_phi_blocks_->SetBit(check_cast->bb);
+}
+
+void TypeInference::CheckCastData::AddPseudoPhis() {
+ // Look for pseudo-phis where a split SSA reg merges with a differently typed version
+ // and initialize all starting_mod_s_reg.
+ DCHECK(!split_sreg_data_.empty());
+ ArenaBitVector* phi_blocks = new (alloc_) ArenaBitVector(alloc_, num_blocks_, false);
+
+ for (auto& entry : split_sreg_data_) {
+ SplitSRegData& data = entry.second;
+
+ // Find pseudo-phi nodes.
+ phi_blocks->ClearAllBits();
+ ArenaBitVector* input_blocks = data.def_phi_blocks_;
+ do {
+ for (uint32_t idx : input_blocks->Indexes()) {
+ BasicBlock* def_bb = mir_graph_->GetBasicBlock(idx);
+ if (def_bb->dom_frontier != nullptr) {
+ phi_blocks->Union(def_bb->dom_frontier);
+ }
+ }
+ } while (input_blocks->Union(phi_blocks));
+
+ // Find live pseudo-phis. Make sure they're merging the same SSA reg.
+ data.def_phi_blocks_->ClearAllBits();
+ int32_t s_reg = entry.first;
+ int v_reg = mir_graph_->SRegToVReg(s_reg);
+ for (uint32_t phi_bb_id : phi_blocks->Indexes()) {
+ BasicBlock* phi_bb = mir_graph_->GetBasicBlock(phi_bb_id);
+ DCHECK(phi_bb != nullptr);
+ DCHECK(phi_bb->data_flow_info != nullptr);
+ DCHECK(phi_bb->data_flow_info->live_in_v != nullptr);
+ if (IsSRegLiveAtStart(phi_bb, v_reg, s_reg)) {
+ int32_t extra_s_reg = static_cast<int32_t>(num_sregs_);
+ num_sregs_ += 1;
+ data.starting_mod_s_reg[phi_bb_id] = extra_s_reg;
+ data.def_phi_blocks_->SetBit(phi_bb_id);
+ }
+ }
+
+ // SSA rename for s_reg.
+ TopologicalSortIterator iter(mir_graph_);
+ for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
+ if (bb->data_flow_info == nullptr || bb->block_type == kEntryBlock) {
+ continue;
+ }
+ BasicBlockId bb_id = bb->id;
+ if (data.def_phi_blocks_->IsBitSet(bb_id)) {
+ DCHECK_NE(data.starting_mod_s_reg[bb_id], INVALID_SREG);
+ } else {
+ DCHECK_EQ(data.starting_mod_s_reg[bb_id], INVALID_SREG);
+ if (IsSRegLiveAtStart(bb, v_reg, s_reg)) {
+ // The earliest predecessor must have been processed already.
+ BasicBlock* pred_bb = FindTopologicallyEarliestPredecessor(bb);
+ int32_t mod_s_reg = data.ending_mod_s_reg[pred_bb->id];
+ data.starting_mod_s_reg[bb_id] = (mod_s_reg != INVALID_SREG) ? mod_s_reg : s_reg;
+ } else if (data.ending_mod_s_reg[bb_id] != INVALID_SREG) {
+ // Start the original defining block with s_reg.
+ data.starting_mod_s_reg[bb_id] = s_reg;
+ }
+ }
+ if (data.ending_mod_s_reg[bb_id] == INVALID_SREG) {
+ // If the block doesn't define the modified SSA reg, it propagates the starting type.
+ data.ending_mod_s_reg[bb_id] = data.starting_mod_s_reg[bb_id];
+ }
+ }
+ }
+}
+
+void TypeInference::CheckCastData::InitializeCheckCastSRegs(Type* sregs) const {
+ for (const auto& entry : check_cast_map_) {
+ DCHECK_LT(static_cast<size_t>(entry.second.modified_s_reg), num_sregs_);
+ sregs[entry.second.modified_s_reg] = entry.second.type.AsNonNull();
+ }
+}
+
+void TypeInference::CheckCastData::MergeCheckCastConflicts(Type* sregs) const {
+ for (const auto& entry : check_cast_map_) {
+ DCHECK_LT(static_cast<size_t>(entry.second.modified_s_reg), num_sregs_);
+ sregs[entry.first->ssa_rep->uses[0]].MergeNonArrayFlags(
+ sregs[entry.second.modified_s_reg].AsNull());
+ }
+}
+
+void TypeInference::CheckCastData::MarkPseudoPhiBlocks(uint64_t* bb_df_attrs) const {
+ for (auto& entry : split_sreg_data_) {
+ for (uint32_t bb_id : entry.second.def_phi_blocks_->Indexes()) {
+ bb_df_attrs[bb_id] |= DF_NULL_TRANSFER_N;
+ }
+ }
+}
+
+void TypeInference::CheckCastData::Start(BasicBlock* bb) {
+ for (auto& entry : split_sreg_data_) {
+ entry.second.current_mod_s_reg = entry.second.starting_mod_s_reg[bb->id];
+ }
+}
+
+bool TypeInference::CheckCastData::ProcessPseudoPhis(BasicBlock* bb, Type* sregs) {
+ bool changed = false;
+ for (auto& entry : split_sreg_data_) {
+ DCHECK_EQ(entry.second.current_mod_s_reg, entry.second.starting_mod_s_reg[bb->id]);
+ if (entry.second.def_phi_blocks_->IsBitSet(bb->id)) {
+ int32_t* ending_mod_s_reg = entry.second.ending_mod_s_reg;
+ Type merged_type = sregs[entry.second.current_mod_s_reg];
+ for (BasicBlockId pred_id : bb->predecessors) {
+ DCHECK_LT(static_cast<size_t>(ending_mod_s_reg[pred_id]), num_sregs_);
+ merged_type.MergeWeak(sregs[ending_mod_s_reg[pred_id]]);
+ }
+ if (UNLIKELY(!merged_type.IsDefined())) {
+ // This can happen during an initial merge of a loop head if the original def is
+ // actually an untyped null. (All other definitions are typed using the check-cast.)
+ } else if (merged_type.Wide()) {
+ // Ignore the pseudo-phi, just remember that there's a size mismatch.
+ sregs[entry.second.current_mod_s_reg].MarkSizeConflict();
+ } else {
+ DCHECK(merged_type.Narrow() && merged_type.LowWord() && !merged_type.HighWord());
+ // Propagate both down (fully) and up (without the "non-null" flag).
+ changed |= sregs[entry.second.current_mod_s_reg].Copy(merged_type);
+ merged_type = merged_type.AsNull();
+ for (BasicBlockId pred_id : bb->predecessors) {
+ DCHECK_LT(static_cast<size_t>(ending_mod_s_reg[pred_id]), num_sregs_);
+ sregs[ending_mod_s_reg[pred_id]].MergeStrong(merged_type);
+ }
+ }
+ }
+ }
+ return changed;
+}
+
+void TypeInference::CheckCastData::ProcessCheckCast(MIR* mir) {
+ auto mir_it = check_cast_map_.find(mir);
+ DCHECK(mir_it != check_cast_map_.end());
+ auto sreg_it = split_sreg_data_.find(mir->ssa_rep->uses[0]);
+ DCHECK(sreg_it != split_sreg_data_.end());
+ sreg_it->second.current_mod_s_reg = mir_it->second.modified_s_reg;
+}
+
+TypeInference::SplitSRegData* TypeInference::CheckCastData::GetSplitSRegData(int32_t s_reg) {
+ auto it = split_sreg_data_.find(s_reg);
+ return (it == split_sreg_data_.end()) ? nullptr : &it->second;
+}
+
+BasicBlock* TypeInference::CheckCastData::FindDefBlock(MIR* check_cast) {
+ // Find the initial definition of the SSA reg used by the check-cast.
+ DCHECK_EQ(check_cast->dalvikInsn.opcode, Instruction::CHECK_CAST);
+ int32_t s_reg = check_cast->ssa_rep->uses[0];
+ if (mir_graph_->IsInVReg(s_reg)) {
+ return mir_graph_->GetEntryBlock();
+ }
+ int v_reg = mir_graph_->SRegToVReg(s_reg);
+ BasicBlock* bb = mir_graph_->GetBasicBlock(check_cast->bb);
+ DCHECK(bb != nullptr);
+ while (true) {
+ // Find the earliest predecessor in the topological sort order to ensure we don't
+ // go in a loop.
+ BasicBlock* pred_bb = FindTopologicallyEarliestPredecessor(bb);
+ DCHECK(pred_bb != nullptr);
+ DCHECK(pred_bb->data_flow_info != nullptr);
+ DCHECK(pred_bb->data_flow_info->vreg_to_ssa_map_exit != nullptr);
+ if (pred_bb->data_flow_info->vreg_to_ssa_map_exit[v_reg] != s_reg) {
+ // The s_reg was not valid at the end of pred_bb, so it must have been defined in bb.
+ return bb;
+ }
+ bb = pred_bb;
+ }
+}
+
+BasicBlock* TypeInference::CheckCastData::FindTopologicallyEarliestPredecessor(BasicBlock* bb) {
+ DCHECK(!bb->predecessors.empty());
+ const auto& indexes = mir_graph_->GetTopologicalSortOrderIndexes();
+ DCHECK_LT(bb->id, indexes.size());
+ size_t best_idx = indexes[bb->id];
+ BasicBlockId best_id = NullBasicBlockId;
+ for (BasicBlockId pred_id : bb->predecessors) {
+ DCHECK_LT(pred_id, indexes.size());
+ if (best_idx > indexes[pred_id]) {
+ best_idx = indexes[pred_id];
+ best_id = pred_id;
+ }
+ }
+ // There must be at least one predecessor earlier than the bb.
+ DCHECK_LT(best_idx, indexes[bb->id]);
+ return mir_graph_->GetBasicBlock(best_id);
+}
+
+bool TypeInference::CheckCastData::IsSRegLiveAtStart(BasicBlock* bb, int v_reg, int32_t s_reg) {
+ DCHECK_EQ(v_reg, mir_graph_->SRegToVReg(s_reg));
+ DCHECK(bb != nullptr);
+ DCHECK(bb->data_flow_info != nullptr);
+ DCHECK(bb->data_flow_info->live_in_v != nullptr);
+ if (!bb->data_flow_info->live_in_v->IsBitSet(v_reg)) {
+ return false;
+ }
+ for (BasicBlockId pred_id : bb->predecessors) {
+ BasicBlock* pred_bb = mir_graph_->GetBasicBlock(pred_id);
+ DCHECK(pred_bb != nullptr);
+ DCHECK(pred_bb->data_flow_info != nullptr);
+ DCHECK(pred_bb->data_flow_info->vreg_to_ssa_map_exit != nullptr);
+ if (pred_bb->data_flow_info->vreg_to_ssa_map_exit[v_reg] != s_reg) {
+ return false;
+ }
+ }
+ return true;
+}
+
+TypeInference::TypeInference(MIRGraph* mir_graph, ScopedArenaAllocator* alloc)
+ : mir_graph_(mir_graph),
+ cu_(mir_graph->GetCurrentDexCompilationUnit()->GetCompilationUnit()),
+ check_cast_data_(!mir_graph->HasCheckCast() ? nullptr :
+ InitializeCheckCastData(mir_graph, alloc)),
+ num_sregs_(
+ check_cast_data_ != nullptr ? check_cast_data_->NumSRegs() : mir_graph->GetNumSSARegs()),
+ ifields_(mir_graph->GetIFieldLoweringInfoCount() == 0u ? nullptr :
+ PrepareIFieldTypes(cu_->dex_file, mir_graph, alloc)),
+ sfields_(mir_graph->GetSFieldLoweringInfoCount() == 0u ? nullptr :
+ PrepareSFieldTypes(cu_->dex_file, mir_graph, alloc)),
+ signatures_(mir_graph->GetMethodLoweringInfoCount() == 0u ? nullptr :
+ PrepareSignatures(cu_->dex_file, mir_graph, alloc)),
+ current_method_signature_(
+ Signature(cu_->dex_file, cu_->method_idx, (cu_->access_flags & kAccStatic) != 0, alloc)),
+ sregs_(alloc->AllocArray<Type>(num_sregs_, kArenaAllocMisc)),
+ bb_df_attrs_(alloc->AllocArray<uint64_t>(mir_graph->GetNumBlocks(), kArenaAllocDFInfo)) {
+ InitializeSRegs();
+}
+
+bool TypeInference::Apply(BasicBlock* bb) {
+ bool changed = false;
+ uint64_t bb_df_attrs = bb_df_attrs_[bb->id];
+ if (bb_df_attrs != 0u) {
+ if (UNLIKELY(check_cast_data_ != nullptr)) {
+ check_cast_data_->Start(bb);
+ if (bb_df_attrs & DF_NULL_TRANSFER_N) {
+ changed |= check_cast_data_->ProcessPseudoPhis(bb, sregs_);
+ }
+ }
+ MIR* mir = bb->first_mir_insn;
+ MIR* main_mirs_end = ((bb_df_attrs & DF_SAME_TYPE_AB) != 0u) ? bb->last_mir_insn : nullptr;
+ for (; mir != main_mirs_end && static_cast<int>(mir->dalvikInsn.opcode) == kMirOpPhi;
+ mir = mir->next) {
+ // Special-case handling for Phi comes first because we have 2 Phis instead of a wide one.
+ // At least one input must have been previously processed. Look for the first
+ // occurrence of a high_word or low_word flag to determine the type.
+ size_t num_uses = mir->ssa_rep->num_uses;
+ const int32_t* uses = mir->ssa_rep->uses;
+ const int32_t* defs = mir->ssa_rep->defs;
+ DCHECK_EQ(bb->predecessors.size(), num_uses);
+ Type merged_type = sregs_[defs[0]];
+ for (size_t pred_idx = 0; pred_idx != num_uses; ++pred_idx) {
+ int32_t input_mod_s_reg = PhiInputModifiedSReg(uses[pred_idx], bb, pred_idx);
+ merged_type.MergeWeak(sregs_[input_mod_s_reg]);
+ }
+ if (UNLIKELY(!merged_type.IsDefined())) {
+ // No change
+ } else if (merged_type.HighWord()) {
+ // Ignore the high word phi, just remember if there's a size mismatch.
+ if (UNLIKELY(merged_type.LowWord())) {
+ sregs_[defs[0]].MarkSizeConflict();
+ }
+ } else {
+ // Propagate both down (fully) and up (without the "non-null" flag).
+ changed |= sregs_[defs[0]].Copy(merged_type);
+ merged_type = merged_type.AsNull();
+ for (size_t pred_idx = 0; pred_idx != num_uses; ++pred_idx) {
+ int32_t input_mod_s_reg = PhiInputModifiedSReg(uses[pred_idx], bb, pred_idx);
+ changed |= UpdateSRegFromLowWordType(input_mod_s_reg, merged_type);
+ }
+ }
+ }
+
+ // Propagate types with MOVEs and AGETs, process CHECK_CASTs for modified SSA reg tracking.
+ for (; mir != main_mirs_end; mir = mir->next) {
+ uint64_t attrs = MIRGraph::GetDataFlowAttributes(mir);
+ size_t num_uses = mir->ssa_rep->num_uses;
+ const int32_t* uses = mir->ssa_rep->uses;
+ const int32_t* defs = mir->ssa_rep->defs;
+
+ // Special handling for moves. Propagate type both ways.
+ if ((attrs & DF_IS_MOVE) != 0) {
+ int32_t used_mod_s_reg = ModifiedSReg(uses[0]);
+ int32_t defd_mod_s_reg = defs[0];
+
+ // The "non-null" flag is propagated only downwards from actual definitions and it's
+ // not initially marked for moves, so used sreg must be marked before defined sreg.
+ // The only exception is an inlined move where we know the type from the original invoke.
+ DCHECK(sregs_[used_mod_s_reg].NonNull() || !sregs_[defd_mod_s_reg].NonNull() ||
+ (mir->optimization_flags & MIR_CALLEE) != 0);
+ changed |= UpdateSRegFromLowWordType(used_mod_s_reg, sregs_[defd_mod_s_reg].AsNull());
+
+ // The value is the same, so either both registers are null or no register is.
+ // In any case we can safely propagate the array type down.
+ changed |= UpdateSRegFromLowWordType(defd_mod_s_reg, sregs_[used_mod_s_reg]);
+ if (UNLIKELY((attrs & DF_REF_A) == 0 && sregs_[used_mod_s_reg].Ref())) {
+ // Mark type conflict: move instead of move-object.
+ sregs_[used_mod_s_reg].MarkTypeConflict();
+ }
+ continue;
+ }
+
+ // Handle AGET/APUT.
+ if ((attrs & DF_HAS_RANGE_CHKS) != 0) {
+ int32_t base_mod_s_reg = ModifiedSReg(uses[num_uses - 2u]);
+ int32_t mod_s_reg = (attrs & DF_DA) != 0 ? defs[0] : ModifiedSReg(uses[0]);
+ DCHECK_NE(sregs_[base_mod_s_reg].ArrayDepth(), 0u);
+ if (!sregs_[base_mod_s_reg].NonNull()) {
+ // If the base is null, don't propagate anything. All that we could determine
+ // has already been merged in the previous stage.
+ } else {
+ changed |= UpdateSRegFromLowWordType(mod_s_reg, sregs_[base_mod_s_reg].ComponentType());
+ Type array_type = Type::ArrayTypeFromComponent(sregs_[mod_s_reg]);
+ if ((attrs & DF_DA) != 0) {
+ changed |= sregs_[base_mod_s_reg].MergeStrong(array_type);
+ } else {
+ changed |= sregs_[base_mod_s_reg].MergeWeak(array_type);
+ }
+ }
+ if (UNLIKELY((attrs & DF_REF_A) == 0 && sregs_[mod_s_reg].Ref())) {
+ // Mark type conflict: aget/aput instead of aget/aput-object.
+ sregs_[mod_s_reg].MarkTypeConflict();
+ }
+ continue;
+ }
+
+ // Special-case handling for check-cast to advance modified SSA reg.
+ if (UNLIKELY((attrs & DF_CHK_CAST) != 0)) {
+ DCHECK(check_cast_data_ != nullptr);
+ check_cast_data_->ProcessCheckCast(mir);
+ }
+ }
+
+ // Propagate types for IF_cc if present.
+ if (mir != nullptr) {
+ DCHECK(mir == bb->last_mir_insn);
+ DCHECK(mir->next == nullptr);
+ DCHECK_NE(MIRGraph::GetDataFlowAttributes(mir) & DF_SAME_TYPE_AB, 0u);
+ DCHECK_EQ(mir->ssa_rep->num_uses, 2u);
+ const int32_t* uses = mir->ssa_rep->uses;
+ int32_t mod_s_reg0 = ModifiedSReg(uses[0]);
+ int32_t mod_s_reg1 = ModifiedSReg(uses[1]);
+ changed |= sregs_[mod_s_reg0].MergeWeak(sregs_[mod_s_reg1].AsNull());
+ changed |= sregs_[mod_s_reg1].MergeWeak(sregs_[mod_s_reg0].AsNull());
+ }
+ }
+ return changed;
+}
+
+void TypeInference::Finish() {
+ if (UNLIKELY(check_cast_data_ != nullptr)) {
+ check_cast_data_->MergeCheckCastConflicts(sregs_);
+ }
+
+ size_t num_sregs = mir_graph_->GetNumSSARegs(); // Without the extra SSA regs.
+ for (size_t s_reg = 0; s_reg != num_sregs; ++s_reg) {
+ if (sregs_[s_reg].SizeConflict()) {
+ /*
+ * The dex bytecode definition does not explicitly outlaw the definition of the same
+ * virtual register to be used in both a 32-bit and 64-bit pair context. However, dx
+ * does not generate this pattern (at least recently). Further, in the next revision of
+ * dex, we will forbid this. To support the few cases in the wild, detect this pattern
+ * and punt to the interpreter.
+ */
+ LOG(WARNING) << PrettyMethod(cu_->method_idx, *cu_->dex_file)
+ << " has size conflict block for sreg " << s_reg
+ << ", punting to interpreter.";
+ mir_graph_->SetPuntToInterpreter(true);
+ return;
+ }
+ }
+
+ size_t conflict_s_reg = 0;
+ bool type_conflict = false;
+ for (size_t s_reg = 0; s_reg != num_sregs; ++s_reg) {
+ Type type = sregs_[s_reg];
+ RegLocation* loc = &mir_graph_->reg_location_[s_reg];
+ loc->wide = type.Wide();
+ loc->defined = type.IsDefined();
+ loc->fp = type.Fp();
+ loc->core = type.Core();
+ loc->ref = type.Ref();
+ loc->high_word = type.HighWord();
+ if (UNLIKELY(type.TypeConflict())) {
+ type_conflict = true;
+ conflict_s_reg = s_reg;
+ }
+ }
+
+ if (type_conflict) {
+ /*
+ * We don't normally expect to see a Dalvik register definition used both as a
+ * floating point and core value, though technically it could happen with constants.
+ * Until we have proper typing, detect this situation and disable register promotion
+ * (which relies on the distinction between core a fp usages).
+ */
+ LOG(WARNING) << PrettyMethod(cu_->method_idx, *cu_->dex_file)
+ << " has type conflict block for sreg " << conflict_s_reg
+ << ", disabling register promotion.";
+ cu_->disable_opt |= (1 << kPromoteRegs);
+ }
+}
+
+TypeInference::Type TypeInference::FieldType(const DexFile* dex_file, uint32_t field_idx) {
+ uint32_t type_idx = dex_file->GetFieldId(field_idx).type_idx_;
+ Type result = Type::DexType(dex_file, type_idx);
+ return result;
+}
+
+TypeInference::Type* TypeInference::PrepareIFieldTypes(const DexFile* dex_file,
+ MIRGraph* mir_graph,
+ ScopedArenaAllocator* alloc) {
+ size_t count = mir_graph->GetIFieldLoweringInfoCount();
+ Type* ifields = alloc->AllocArray<Type>(count, kArenaAllocDFInfo);
+ for (uint32_t i = 0u; i != count; ++i) {
+ // NOTE: Quickened field accesses have invalid FieldIndex() but they are always resolved.
+ const MirFieldInfo& info = mir_graph->GetIFieldLoweringInfo(i);
+ const DexFile* current_dex_file = info.IsResolved() ? info.DeclaringDexFile() : dex_file;
+ uint32_t field_idx = info.IsResolved() ? info.DeclaringFieldIndex() : info.FieldIndex();
+ ifields[i] = FieldType(current_dex_file, field_idx);
+ DCHECK_EQ(info.MemAccessType() == kDexMemAccessWide, ifields[i].Wide());
+ DCHECK_EQ(info.MemAccessType() == kDexMemAccessObject, ifields[i].Ref());
+ }
+ return ifields;
+}
+
+TypeInference::Type* TypeInference::PrepareSFieldTypes(const DexFile* dex_file,
+ MIRGraph* mir_graph,
+ ScopedArenaAllocator* alloc) {
+ size_t count = mir_graph->GetSFieldLoweringInfoCount();
+ Type* sfields = alloc->AllocArray<Type>(count, kArenaAllocDFInfo);
+ for (uint32_t i = 0u; i != count; ++i) {
+ // FieldIndex() is always valid for static fields (no quickened instructions).
+ sfields[i] = FieldType(dex_file, mir_graph->GetSFieldLoweringInfo(i).FieldIndex());
+ }
+ return sfields;
+}
+
+TypeInference::MethodSignature TypeInference::Signature(const DexFile* dex_file,
+ uint32_t method_idx,
+ bool is_static,
+ ScopedArenaAllocator* alloc) {
+ const DexFile::MethodId& method_id = dex_file->GetMethodId(method_idx);
+ const DexFile::ProtoId& proto_id = dex_file->GetMethodPrototype(method_id);
+ Type return_type = Type::DexType(dex_file, proto_id.return_type_idx_);
+ const DexFile::TypeList* type_list = dex_file->GetProtoParameters(proto_id);
+ size_t this_size = (is_static ? 0u : 1u);
+ size_t param_size = ((type_list != nullptr) ? type_list->Size() : 0u);
+ size_t size = this_size + param_size;
+ Type* param_types = (size != 0u) ? alloc->AllocArray<Type>(size, kArenaAllocDFInfo) : nullptr;
+ if (!is_static) {
+ param_types[0] = Type::DexType(dex_file, method_id.class_idx_);
+ }
+ for (size_t i = 0; i != param_size; ++i) {
+ uint32_t type_idx = type_list->GetTypeItem(i).type_idx_;
+ param_types[this_size + i] = Type::DexType(dex_file, type_idx);
+ }
+ return MethodSignature{ return_type, size, param_types }; // NOLINT
+}
+
+TypeInference::MethodSignature* TypeInference::PrepareSignatures(const DexFile* dex_file,
+ MIRGraph* mir_graph,
+ ScopedArenaAllocator* alloc) {
+ size_t count = mir_graph->GetMethodLoweringInfoCount();
+ MethodSignature* signatures = alloc->AllocArray<MethodSignature>(count, kArenaAllocDFInfo);
+ for (uint32_t i = 0u; i != count; ++i) {
+ // NOTE: Quickened invokes have invalid MethodIndex() but they are always resolved.
+ const MirMethodInfo& info = mir_graph->GetMethodLoweringInfo(i);
+ uint32_t method_idx = info.IsResolved() ? info.DeclaringMethodIndex() : info.MethodIndex();
+ const DexFile* current_dex_file = info.IsResolved() ? info.DeclaringDexFile() : dex_file;
+ signatures[i] = Signature(current_dex_file, method_idx, info.IsStatic(), alloc);
+ }
+ return signatures;
+}
+
+TypeInference::CheckCastData* TypeInference::InitializeCheckCastData(MIRGraph* mir_graph,
+ ScopedArenaAllocator* alloc) {
+ if (!mir_graph->HasCheckCast()) {
+ return nullptr;
+ }
+
+ CheckCastData* data = nullptr;
+ const DexFile* dex_file = nullptr;
+ PreOrderDfsIterator iter(mir_graph);
+ for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
+ for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
+ if (mir->dalvikInsn.opcode == Instruction::CHECK_CAST) {
+ if (data == nullptr) {
+ data = new (alloc) CheckCastData(mir_graph, alloc);
+ dex_file = mir_graph->GetCurrentDexCompilationUnit()->GetCompilationUnit()->dex_file;
+ }
+ Type type = Type::DexType(dex_file, mir->dalvikInsn.vB);
+ data->AddCheckCast(mir, type);
+ }
+ }
+ }
+ if (data != nullptr) {
+ data->AddPseudoPhis();
+ }
+ return data;
+}
+
+void TypeInference::InitializeSRegs() {
+ std::fill_n(sregs_, num_sregs_, Type::Unknown());
+
+ /* Treat ArtMethod* as a normal reference */
+ sregs_[mir_graph_->GetMethodSReg()] = Type::NonArrayRefType();
+
+ // Initialize parameter SSA regs at method entry.
+ int32_t entry_param_s_reg = mir_graph_->GetFirstInVR();
+ for (size_t i = 0, size = current_method_signature_.num_params; i != size; ++i) {
+ Type param_type = current_method_signature_.param_types[i].AsNonNull();
+ sregs_[entry_param_s_reg] = param_type;
+ entry_param_s_reg += param_type.Wide() ? 2 : 1;
+ }
+ DCHECK_EQ(static_cast<uint32_t>(entry_param_s_reg),
+ mir_graph_->GetFirstInVR() + mir_graph_->GetNumOfInVRs());
+
+ // Initialize check-cast types.
+ if (UNLIKELY(check_cast_data_ != nullptr)) {
+ check_cast_data_->InitializeCheckCastSRegs(sregs_);
+ }
+
+ // Initialize well-known SSA register definition types. Merge inferred types
+ // upwards where a single merge is enough (INVOKE arguments and return type,
+ // RETURN type, IPUT/SPUT source type).
+ // NOTE: Using topological sort order to make sure the definition comes before
+ // any upward merging. This allows simple assignment of the defined types
+ // instead of MergeStrong().
+ TopologicalSortIterator iter(mir_graph_);
+ for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
+ uint64_t bb_df_attrs = 0u;
+ if (UNLIKELY(check_cast_data_ != nullptr)) {
+ check_cast_data_->Start(bb);
+ }
+ // Ignore pseudo-phis, we're not setting types for SSA regs that depend on them in this pass.
+ for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
+ uint64_t attrs = MIRGraph::GetDataFlowAttributes(mir);
+ bb_df_attrs |= attrs;
+
+ const uint32_t num_uses = mir->ssa_rep->num_uses;
+ const int32_t* uses = mir->ssa_rep->uses;
+ const int32_t* defs = mir->ssa_rep->defs;
+
+ uint16_t opcode = mir->dalvikInsn.opcode;
+ switch (opcode) {
+ case Instruction::CONST_4:
+ case Instruction::CONST_16:
+ case Instruction::CONST:
+ case Instruction::CONST_HIGH16:
+ case Instruction::CONST_WIDE_16:
+ case Instruction::CONST_WIDE_32:
+ case Instruction::CONST_WIDE:
+ case Instruction::CONST_WIDE_HIGH16:
+ case Instruction::MOVE:
+ case Instruction::MOVE_FROM16:
+ case Instruction::MOVE_16:
+ case Instruction::MOVE_WIDE:
+ case Instruction::MOVE_WIDE_FROM16:
+ case Instruction::MOVE_WIDE_16:
+ case Instruction::MOVE_OBJECT:
+ case Instruction::MOVE_OBJECT_FROM16:
+ case Instruction::MOVE_OBJECT_16:
+ if ((mir->optimization_flags & MIR_CALLEE) != 0) {
+ // Inlined const/move keeps method_lowering_info for type inference.
+ DCHECK_LT(mir->meta.method_lowering_info, mir_graph_->GetMethodLoweringInfoCount());
+ Type return_type = signatures_[mir->meta.method_lowering_info].return_type;
+ DCHECK(return_type.IsDefined()); // Method return type can't be void.
+ sregs_[defs[0]] = return_type.AsNonNull();
+ if (return_type.Wide()) {
+ DCHECK_EQ(defs[0] + 1, defs[1]);
+ sregs_[defs[1]] = return_type.ToHighWord();
+ }
+ break;
+ }
+ FALLTHROUGH_INTENDED;
+ case kMirOpPhi:
+ // These cannot be determined in this simple pass and will be processed later.
+ break;
+
+ case Instruction::MOVE_RESULT:
+ case Instruction::MOVE_RESULT_WIDE:
+ case Instruction::MOVE_RESULT_OBJECT:
+ // Nothing to do, handled with invoke-* or filled-new-array/-range.
+ break;
+ case Instruction::MOVE_EXCEPTION:
+ // NOTE: We can never catch an array.
+ sregs_[defs[0]] = Type::NonArrayRefType().AsNonNull();
+ break;
+ case Instruction::CONST_STRING:
+ case Instruction::CONST_STRING_JUMBO:
+ sregs_[defs[0]] = Type::NonArrayRefType().AsNonNull();
+ break;
+ case Instruction::CONST_CLASS:
+ sregs_[defs[0]] = Type::NonArrayRefType().AsNonNull();
+ break;
+ case Instruction::CHECK_CAST:
+ DCHECK(check_cast_data_ != nullptr);
+ check_cast_data_->ProcessCheckCast(mir);
+ break;
+ case Instruction::ARRAY_LENGTH:
+ sregs_[ModifiedSReg(uses[0])].MergeStrong(Type::UnknownArrayType());
+ break;
+ case Instruction::NEW_INSTANCE:
+ sregs_[defs[0]] = Type::DexType(cu_->dex_file, mir->dalvikInsn.vB).AsNonNull();
+ DCHECK(sregs_[defs[0]].Ref());
+ DCHECK_EQ(sregs_[defs[0]].ArrayDepth(), 0u);
+ break;
+ case Instruction::NEW_ARRAY:
+ sregs_[defs[0]] = Type::DexType(cu_->dex_file, mir->dalvikInsn.vC).AsNonNull();
+ DCHECK(sregs_[defs[0]].Ref());
+ DCHECK_NE(sregs_[defs[0]].ArrayDepth(), 0u);
+ break;
+ case Instruction::FILLED_NEW_ARRAY:
+ case Instruction::FILLED_NEW_ARRAY_RANGE: {
+ Type array_type = Type::DexType(cu_->dex_file, mir->dalvikInsn.vB);
+ array_type.CheckPureRef(); // Previously checked by the method verifier.
+ DCHECK_NE(array_type.ArrayDepth(), 0u);
+ Type component_type = array_type.ComponentType();
+ DCHECK(!component_type.Wide());
+ MIR* move_result_mir = mir_graph_->FindMoveResult(bb, mir);
+ if (move_result_mir != nullptr) {
+ DCHECK_EQ(move_result_mir->dalvikInsn.opcode, Instruction::MOVE_RESULT_OBJECT);
+ sregs_[move_result_mir->ssa_rep->defs[0]] = array_type.AsNonNull();
+ }
+ DCHECK_EQ(num_uses, mir->dalvikInsn.vA);
+ for (size_t next = 0u; next != num_uses; ++next) {
+ int32_t input_mod_s_reg = ModifiedSReg(uses[next]);
+ sregs_[input_mod_s_reg].MergeStrong(component_type);
+ }
+ break;
+ }
+ case Instruction::INVOKE_VIRTUAL:
+ case Instruction::INVOKE_SUPER:
+ case Instruction::INVOKE_DIRECT:
+ case Instruction::INVOKE_STATIC:
+ case Instruction::INVOKE_INTERFACE:
+ case Instruction::INVOKE_VIRTUAL_RANGE:
+ case Instruction::INVOKE_SUPER_RANGE:
+ case Instruction::INVOKE_DIRECT_RANGE:
+ case Instruction::INVOKE_STATIC_RANGE:
+ case Instruction::INVOKE_INTERFACE_RANGE:
+ case Instruction::INVOKE_VIRTUAL_QUICK:
+ case Instruction::INVOKE_VIRTUAL_RANGE_QUICK: {
+ const MethodSignature* signature = &signatures_[mir->meta.method_lowering_info];
+ MIR* move_result_mir = mir_graph_->FindMoveResult(bb, mir);
+ if (move_result_mir != nullptr) {
+ Type return_type = signature->return_type;
+ sregs_[move_result_mir->ssa_rep->defs[0]] = return_type.AsNonNull();
+ if (return_type.Wide()) {
+ DCHECK_EQ(move_result_mir->ssa_rep->defs[0] + 1, move_result_mir->ssa_rep->defs[1]);
+ sregs_[move_result_mir->ssa_rep->defs[1]] = return_type.ToHighWord();
+ }
+ }
+ size_t next = 0u;
+ for (size_t i = 0, size = signature->num_params; i != size; ++i) {
+ Type param_type = signature->param_types[i];
+ int32_t param_s_reg = ModifiedSReg(uses[next]);
+ DCHECK(!param_type.Wide() || uses[next] + 1 == uses[next + 1]);
+ UpdateSRegFromLowWordType(param_s_reg, param_type);
+ next += param_type.Wide() ? 2 : 1;
+ }
+ DCHECK_EQ(next, num_uses);
+ DCHECK_EQ(next, mir->dalvikInsn.vA);
+ break;
+ }
+
+ case Instruction::RETURN_WIDE:
+ DCHECK(current_method_signature_.return_type.Wide());
+ DCHECK_EQ(uses[0] + 1, uses[1]);
+ DCHECK_EQ(ModifiedSReg(uses[0]), uses[0]);
+ FALLTHROUGH_INTENDED;
+ case Instruction::RETURN:
+ case Instruction::RETURN_OBJECT: {
+ int32_t mod_s_reg = ModifiedSReg(uses[0]);
+ UpdateSRegFromLowWordType(mod_s_reg, current_method_signature_.return_type);
+ break;
+ }
+
+ // NOTE: For AGET/APUT we set only the array type. The operand type is set
+ // below based on the data flow attributes.
+ case Instruction::AGET:
+ case Instruction::APUT:
+ sregs_[ModifiedSReg(uses[num_uses - 2u])].MergeStrong(Type::NarrowArrayType());
+ break;
+ case Instruction::AGET_WIDE:
+ case Instruction::APUT_WIDE:
+ sregs_[ModifiedSReg(uses[num_uses - 2u])].MergeStrong(Type::WideArrayType());
+ break;
+ case Instruction::AGET_OBJECT:
+ sregs_[defs[0]] = sregs_[defs[0]].AsNonNull();
+ FALLTHROUGH_INTENDED;
+ case Instruction::APUT_OBJECT:
+ sregs_[ModifiedSReg(uses[num_uses - 2u])].MergeStrong(Type::ObjectArrayType());
+ break;
+ case Instruction::AGET_BOOLEAN:
+ case Instruction::APUT_BOOLEAN:
+ case Instruction::AGET_BYTE:
+ case Instruction::APUT_BYTE:
+ case Instruction::AGET_CHAR:
+ case Instruction::APUT_CHAR:
+ case Instruction::AGET_SHORT:
+ case Instruction::APUT_SHORT:
+ sregs_[ModifiedSReg(uses[num_uses - 2u])].MergeStrong(Type::NarrowCoreArrayType());
+ break;
+
+ case Instruction::IGET_WIDE:
+ case Instruction::IGET_WIDE_QUICK:
+ DCHECK_EQ(defs[0] + 1, defs[1]);
+ DCHECK_LT(mir->meta.ifield_lowering_info, mir_graph_->GetIFieldLoweringInfoCount());
+ sregs_[defs[1]] = ifields_[mir->meta.ifield_lowering_info].ToHighWord();
+ FALLTHROUGH_INTENDED;
+ case Instruction::IGET:
+ case Instruction::IGET_OBJECT:
+ case Instruction::IGET_BOOLEAN:
+ case Instruction::IGET_BYTE:
+ case Instruction::IGET_CHAR:
+ case Instruction::IGET_SHORT:
+ case Instruction::IGET_QUICK:
+ case Instruction::IGET_OBJECT_QUICK:
+ case Instruction::IGET_BOOLEAN_QUICK:
+ case Instruction::IGET_BYTE_QUICK:
+ case Instruction::IGET_CHAR_QUICK:
+ case Instruction::IGET_SHORT_QUICK:
+ DCHECK_LT(mir->meta.ifield_lowering_info, mir_graph_->GetIFieldLoweringInfoCount());
+ sregs_[defs[0]] = ifields_[mir->meta.ifield_lowering_info].AsNonNull();
+ break;
+ case Instruction::IPUT_WIDE:
+ case Instruction::IPUT_WIDE_QUICK:
+ DCHECK_EQ(uses[0] + 1, uses[1]);
+ FALLTHROUGH_INTENDED;
+ case Instruction::IPUT:
+ case Instruction::IPUT_OBJECT:
+ case Instruction::IPUT_BOOLEAN:
+ case Instruction::IPUT_BYTE:
+ case Instruction::IPUT_CHAR:
+ case Instruction::IPUT_SHORT:
+ case Instruction::IPUT_QUICK:
+ case Instruction::IPUT_OBJECT_QUICK:
+ case Instruction::IPUT_BOOLEAN_QUICK:
+ case Instruction::IPUT_BYTE_QUICK:
+ case Instruction::IPUT_CHAR_QUICK:
+ case Instruction::IPUT_SHORT_QUICK:
+ DCHECK_LT(mir->meta.ifield_lowering_info, mir_graph_->GetIFieldLoweringInfoCount());
+ UpdateSRegFromLowWordType(ModifiedSReg(uses[0]),
+ ifields_[mir->meta.ifield_lowering_info]);
+ break;
+ case Instruction::SGET_WIDE:
+ DCHECK_EQ(defs[0] + 1, defs[1]);
+ DCHECK_LT(mir->meta.sfield_lowering_info, mir_graph_->GetSFieldLoweringInfoCount());
+ sregs_[defs[1]] = sfields_[mir->meta.sfield_lowering_info].ToHighWord();
+ FALLTHROUGH_INTENDED;
+ case Instruction::SGET:
+ case Instruction::SGET_OBJECT:
+ case Instruction::SGET_BOOLEAN:
+ case Instruction::SGET_BYTE:
+ case Instruction::SGET_CHAR:
+ case Instruction::SGET_SHORT:
+ DCHECK_LT(mir->meta.sfield_lowering_info, mir_graph_->GetSFieldLoweringInfoCount());
+ sregs_[defs[0]] = sfields_[mir->meta.sfield_lowering_info].AsNonNull();
+ break;
+ case Instruction::SPUT_WIDE:
+ DCHECK_EQ(uses[0] + 1, uses[1]);
+ FALLTHROUGH_INTENDED;
+ case Instruction::SPUT:
+ case Instruction::SPUT_OBJECT:
+ case Instruction::SPUT_BOOLEAN:
+ case Instruction::SPUT_BYTE:
+ case Instruction::SPUT_CHAR:
+ case Instruction::SPUT_SHORT:
+ DCHECK_LT(mir->meta.sfield_lowering_info, mir_graph_->GetSFieldLoweringInfoCount());
+ UpdateSRegFromLowWordType(ModifiedSReg(uses[0]),
+ sfields_[mir->meta.sfield_lowering_info]);
+ break;
+
+ default:
+ // No invokes or reference definitions here.
+ DCHECK_EQ(attrs & (DF_FORMAT_35C | DF_FORMAT_3RC), 0u);
+ DCHECK_NE(attrs & (DF_DA | DF_REF_A), (DF_DA | DF_REF_A));
+ break;
+ }
+
+ if ((attrs & DF_NULL_TRANSFER_N) != 0) {
+ // Don't process Phis at this stage.
+ continue;
+ }
+
+ // Handle defs
+ if (attrs & DF_DA) {
+ int32_t s_reg = defs[0];
+ sregs_[s_reg].SetLowWord();
+ if (attrs & DF_FP_A) {
+ sregs_[s_reg].SetFp();
+ }
+ if (attrs & DF_CORE_A) {
+ sregs_[s_reg].SetCore();
+ }
+ if (attrs & DF_REF_A) {
+ sregs_[s_reg].SetRef();
+ }
+ if (attrs & DF_A_WIDE) {
+ sregs_[s_reg].SetWide();
+ DCHECK_EQ(s_reg + 1, ModifiedSReg(defs[1]));
+ sregs_[s_reg + 1].MergeHighWord(sregs_[s_reg]);
+ } else {
+ sregs_[s_reg].SetNarrow();
+ }
+ }
+
+ // Handles uses
+ size_t next = 0;
+ #define PROCESS(REG) \
+ if (attrs & DF_U##REG) { \
+ int32_t mod_s_reg = ModifiedSReg(uses[next]); \
+ sregs_[mod_s_reg].SetLowWord(); \
+ if (attrs & DF_FP_##REG) { \
+ sregs_[mod_s_reg].SetFp(); \
+ } \
+ if (attrs & DF_CORE_##REG) { \
+ sregs_[mod_s_reg].SetCore(); \
+ } \
+ if (attrs & DF_REF_##REG) { \
+ sregs_[mod_s_reg].SetRef(); \
+ } \
+ if (attrs & DF_##REG##_WIDE) { \
+ sregs_[mod_s_reg].SetWide(); \
+ DCHECK_EQ(mod_s_reg + 1, ModifiedSReg(uses[next + 1])); \
+ sregs_[mod_s_reg + 1].SetWide(); \
+ sregs_[mod_s_reg + 1].MergeHighWord(sregs_[mod_s_reg]); \
+ next += 2; \
+ } else { \
+ sregs_[mod_s_reg].SetNarrow(); \
+ next++; \
+ } \
+ }
+ PROCESS(A)
+ PROCESS(B)
+ PROCESS(C)
+ #undef PROCESS
+ DCHECK(next == mir->ssa_rep->num_uses || (attrs & (DF_FORMAT_35C | DF_FORMAT_3RC)) != 0);
+ }
+ // Record relevant attributes.
+ bb_df_attrs_[bb->id] = bb_df_attrs &
+ (DF_NULL_TRANSFER_N | DF_CHK_CAST | DF_IS_MOVE | DF_HAS_RANGE_CHKS | DF_SAME_TYPE_AB);
+ }
+
+ if (UNLIKELY(check_cast_data_ != nullptr)) {
+ check_cast_data_->MarkPseudoPhiBlocks(bb_df_attrs_);
+ }
+}
+
+int32_t TypeInference::ModifiedSReg(int32_t s_reg) {
+ if (UNLIKELY(check_cast_data_ != nullptr)) {
+ SplitSRegData* split_data = check_cast_data_->GetSplitSRegData(s_reg);
+ if (UNLIKELY(split_data != nullptr)) {
+ DCHECK_NE(split_data->current_mod_s_reg, INVALID_SREG);
+ return split_data->current_mod_s_reg;
+ }
+ }
+ return s_reg;
+}
+
+int32_t TypeInference::PhiInputModifiedSReg(int32_t s_reg, BasicBlock* bb, size_t pred_idx) {
+ DCHECK_LT(pred_idx, bb->predecessors.size());
+ if (UNLIKELY(check_cast_data_ != nullptr)) {
+ SplitSRegData* split_data = check_cast_data_->GetSplitSRegData(s_reg);
+ if (UNLIKELY(split_data != nullptr)) {
+ return split_data->ending_mod_s_reg[bb->predecessors[pred_idx]];
+ }
+ }
+ return s_reg;
+}
+
+bool TypeInference::UpdateSRegFromLowWordType(int32_t mod_s_reg, Type low_word_type) {
+ DCHECK(low_word_type.LowWord());
+ bool changed = sregs_[mod_s_reg].MergeStrong(low_word_type);
+ if (!sregs_[mod_s_reg].Narrow()) { // Wide without conflict with narrow.
+ DCHECK(!low_word_type.Narrow());
+ DCHECK_LT(mod_s_reg, mir_graph_->GetNumSSARegs()); // Original SSA reg.
+ changed |= sregs_[mod_s_reg + 1].MergeHighWord(sregs_[mod_s_reg]);
+ }
+ return changed;
+}
+
+} // namespace art
diff --git a/compiler/dex/type_inference.h b/compiler/dex/type_inference.h
new file mode 100644
index 0000000..c9b29bf
--- /dev/null
+++ b/compiler/dex/type_inference.h
@@ -0,0 +1,443 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_DEX_TYPE_INFERENCE_H_
+#define ART_COMPILER_DEX_TYPE_INFERENCE_H_
+
+#include "base/logging.h"
+#include "base/arena_object.h"
+#include "base/scoped_arena_containers.h"
+
+namespace art {
+
+class ArenaBitVector;
+class BasicBlock;
+struct CompilationUnit;
+class DexFile;
+class MirFieldInfo;
+class MirMethodInfo;
+class MIR;
+class MIRGraph;
+
+/**
+ * @brief Determine the type of SSA registers.
+ *
+ * @details
+ * Because Dalvik's bytecode is not fully typed, we have to do some work to figure
+ * out the sreg type. For some operations it is clear based on the opcode (i.e.
+ * ADD_FLOAT v0, v1, v2), but for others (MOVE), we may never know the "real" type.
+ *
+ * We perform the type inference operation in two phases:
+ * 1. First, we make one pass over all insns in the topological sort order and
+ * extract known type information from all insns for their defs and uses.
+ * 2. Then we repeatedly go through the graph to process insns that can propagate
+ * types from inputs to outputs and vice versa. These insns are just the MOVEs,
+ * AGET/APUTs, IF_ccs and Phis (including pseudo-Phis, see below).
+ *
+ * Since the main purpose is to determine the basic FP/core/reference type, we don't
+ * need to record the precise reference type, we only record the array type to determine
+ * the result types of agets and source type of aputs.
+ *
+ * One complication is the check-cast instruction that effectively defines a new
+ * virtual register that has a different type than the original sreg. We need to
+ * track these virtual sregs and insert pseudo-phis where they merge.
+ *
+ * Another problems is with null references. The same zero constant can be used
+ * as differently typed null and moved around with move-object which would normally
+ * be an ill-formed assignment. So we need to keep track of values that can be null
+ * and values that cannot.
+ *
+ * Note that it's possible to have the same sreg show multiple defined types because dx
+ * treats constants as untyped bit patterns. We disable register promotion in that case.
+ */
+class TypeInference : public DeletableArenaObject<kArenaAllocMisc> {
+ public:
+ TypeInference(MIRGraph* mir_graph, ScopedArenaAllocator* alloc);
+
+ bool Apply(BasicBlock* bb);
+ void Finish();
+
+ private:
+ struct Type {
+ static Type Unknown() {
+ return Type(0u);
+ }
+
+ static Type NonArrayRefType() {
+ return Type(kFlagLowWord | kFlagNarrow | kFlagRef);
+ }
+
+ static Type ObjectArrayType() {
+ return Type(kFlagNarrow | kFlagRef | kFlagLowWord |
+ (1u << kBitArrayDepthStart) | kFlagArrayNarrow | kFlagArrayRef);
+ }
+
+ static Type WideArrayType() {
+ // Core or FP unknown.
+ return Type(kFlagNarrow | kFlagRef | kFlagLowWord |
+ (1u << kBitArrayDepthStart) | kFlagArrayWide);
+ }
+
+ static Type NarrowArrayType() {
+ // Core or FP unknown.
+ return Type(kFlagNarrow | kFlagRef | kFlagLowWord |
+ (1u << kBitArrayDepthStart) | kFlagArrayNarrow);
+ }
+
+ static Type NarrowCoreArrayType() {
+ return Type(kFlagNarrow | kFlagRef | kFlagLowWord |
+ (1u << kBitArrayDepthStart) | kFlagArrayNarrow | kFlagArrayCore);
+ }
+
+ static Type UnknownArrayType() {
+ return Type(kFlagNarrow | kFlagRef | kFlagLowWord | (1u << kBitArrayDepthStart));
+ }
+
+ static Type ArrayType(uint32_t array_depth, Type nested_type);
+ static Type ArrayTypeFromComponent(Type component_type);
+ static Type ShortyType(char shorty);
+ static Type DexType(const DexFile* dex_file, uint32_t type_idx);
+
+ bool IsDefined() {
+ return raw_bits_ != 0u;
+ }
+
+ bool SizeConflict() const {
+ // NOTE: Ignore array element conflicts that don't propagate to direct conflicts.
+ return (Wide() && Narrow()) || (HighWord() && LowWord());
+ }
+
+ bool TypeConflict() const {
+ // NOTE: Ignore array element conflicts that don't propagate to direct conflicts.
+ return (raw_bits_ & kMaskType) != 0u && !IsPowerOfTwo(raw_bits_ & kMaskType); // 2+ bits.
+ }
+
+ void MarkSizeConflict() {
+ SetBits(kFlagLowWord | kFlagHighWord);
+ }
+
+ void MarkTypeConflict() {
+ // Mark all three type bits so that merging any other type bits will not change this type.
+ SetBits(kFlagFp | kFlagCore | kFlagRef);
+ }
+
+ void CheckPureRef() const {
+ DCHECK_EQ(raw_bits_ & (kMaskWideAndType | kMaskWord), kFlagNarrow | kFlagRef | kFlagLowWord);
+ }
+
+ // If reference, don't treat as possible null and require precise type.
+ //
+ // References without this flag are allowed to have a type conflict and their
+ // type will not be propagated down. However, for simplicity we allow propagation
+ // of other flags up as it will affect only other null references; should those
+ // references be marked non-null later, we would have to do it anyway.
+ // NOTE: This is a negative "non-null" flag rather then a positive "is-null"
+ // to simplify merging together with other non-array flags.
+ bool NonNull() const {
+ return IsBitSet(kFlagNonNull);
+ }
+
+ bool Wide() const {
+ return IsBitSet(kFlagWide);
+ }
+
+ bool Narrow() const {
+ return IsBitSet(kFlagNarrow);
+ }
+
+ bool Fp() const {
+ return IsBitSet(kFlagFp);
+ }
+
+ bool Core() const {
+ return IsBitSet(kFlagCore);
+ }
+
+ bool Ref() const {
+ return IsBitSet(kFlagRef);
+ }
+
+ bool LowWord() const {
+ return IsBitSet(kFlagLowWord);
+ }
+
+ bool HighWord() const {
+ return IsBitSet(kFlagHighWord);
+ }
+
+ uint32_t ArrayDepth() const {
+ return raw_bits_ >> kBitArrayDepthStart;
+ }
+
+ Type NestedType() const {
+ DCHECK_NE(ArrayDepth(), 0u);
+ return Type(kFlagLowWord | ((raw_bits_ & kMaskArrayWideAndType) >> kArrayTypeShift));
+ }
+
+ Type ComponentType() const {
+ DCHECK_NE(ArrayDepth(), 0u);
+ Type temp(raw_bits_ - (1u << kBitArrayDepthStart)); // array_depth - 1u;
+ return (temp.ArrayDepth() != 0u) ? temp.AsNull() : NestedType();
+ }
+
+ void SetWide() {
+ SetBits(kFlagWide);
+ }
+
+ void SetNarrow() {
+ SetBits(kFlagNarrow);
+ }
+
+ void SetFp() {
+ SetBits(kFlagFp);
+ }
+
+ void SetCore() {
+ SetBits(kFlagCore);
+ }
+
+ void SetRef() {
+ SetBits(kFlagRef);
+ }
+
+ void SetLowWord() {
+ SetBits(kFlagLowWord);
+ }
+
+ void SetHighWord() {
+ SetBits(kFlagHighWord);
+ }
+
+ Type ToHighWord() const {
+ DCHECK_EQ(raw_bits_ & (kMaskWide | kMaskWord), kFlagWide | kFlagLowWord);
+ return Type(raw_bits_ ^ (kFlagLowWord | kFlagHighWord));
+ }
+
+ bool MergeHighWord(Type low_word_type) {
+ // NOTE: low_word_type may be also Narrow() or HighWord().
+ DCHECK(low_word_type.Wide() && low_word_type.LowWord());
+ return MergeBits(Type(low_word_type.raw_bits_ | kFlagHighWord),
+ kMaskWideAndType | kFlagHighWord);
+ }
+
+ bool Copy(Type type) {
+ if (raw_bits_ != type.raw_bits_) {
+ raw_bits_ = type.raw_bits_;
+ return true;
+ }
+ return false;
+ }
+
+ // Merge non-array flags.
+ bool MergeNonArrayFlags(Type src_type) {
+ return MergeBits(src_type, kMaskNonArray);
+ }
+
+ // Merge array flags for conflict.
+ bool MergeArrayConflict(Type src_type);
+
+ // Merge all flags.
+ bool MergeStrong(Type src_type);
+
+ // Merge all flags.
+ bool MergeWeak(Type src_type);
+
+ // Get the same type but mark that it should not be treated as null.
+ Type AsNonNull() const {
+ return Type(raw_bits_ | kFlagNonNull);
+ }
+
+ // Get the same type but mark that it can be treated as null.
+ Type AsNull() const {
+ return Type(raw_bits_ & ~kFlagNonNull);
+ }
+
+ private:
+ enum FlagBits {
+ kBitNonNull = 0,
+ kBitWide,
+ kBitNarrow,
+ kBitFp,
+ kBitCore,
+ kBitRef,
+ kBitLowWord,
+ kBitHighWord,
+ kBitArrayWide,
+ kBitArrayNarrow,
+ kBitArrayFp,
+ kBitArrayCore,
+ kBitArrayRef,
+ kBitArrayDepthStart,
+ };
+ static constexpr size_t kArrayDepthBits = sizeof(uint32_t) * 8u - kBitArrayDepthStart;
+
+ static constexpr uint32_t kFlagNonNull = 1u << kBitNonNull;
+ static constexpr uint32_t kFlagWide = 1u << kBitWide;
+ static constexpr uint32_t kFlagNarrow = 1u << kBitNarrow;
+ static constexpr uint32_t kFlagFp = 1u << kBitFp;
+ static constexpr uint32_t kFlagCore = 1u << kBitCore;
+ static constexpr uint32_t kFlagRef = 1u << kBitRef;
+ static constexpr uint32_t kFlagLowWord = 1u << kBitLowWord;
+ static constexpr uint32_t kFlagHighWord = 1u << kBitHighWord;
+ static constexpr uint32_t kFlagArrayWide = 1u << kBitArrayWide;
+ static constexpr uint32_t kFlagArrayNarrow = 1u << kBitArrayNarrow;
+ static constexpr uint32_t kFlagArrayFp = 1u << kBitArrayFp;
+ static constexpr uint32_t kFlagArrayCore = 1u << kBitArrayCore;
+ static constexpr uint32_t kFlagArrayRef = 1u << kBitArrayRef;
+
+ static constexpr uint32_t kMaskWide = kFlagWide | kFlagNarrow;
+ static constexpr uint32_t kMaskType = kFlagFp | kFlagCore | kFlagRef;
+ static constexpr uint32_t kMaskWord = kFlagLowWord | kFlagHighWord;
+ static constexpr uint32_t kMaskArrayWide = kFlagArrayWide | kFlagArrayNarrow;
+ static constexpr uint32_t kMaskArrayType = kFlagArrayFp | kFlagArrayCore | kFlagArrayRef;
+ static constexpr uint32_t kMaskWideAndType = kMaskWide | kMaskType;
+ static constexpr uint32_t kMaskArrayWideAndType = kMaskArrayWide | kMaskArrayType;
+
+ static constexpr size_t kArrayTypeShift = kBitArrayWide - kBitWide;
+ static_assert(kArrayTypeShift == kBitArrayNarrow - kBitNarrow, "shift mismatch");
+ static_assert(kArrayTypeShift == kBitArrayFp - kBitFp, "shift mismatch");
+ static_assert(kArrayTypeShift == kBitArrayCore - kBitCore, "shift mismatch");
+ static_assert(kArrayTypeShift == kBitArrayRef - kBitRef, "shift mismatch");
+ static_assert((kMaskWide << kArrayTypeShift) == kMaskArrayWide, "shift mismatch");
+ static_assert((kMaskType << kArrayTypeShift) == kMaskArrayType, "shift mismatch");
+ static_assert((kMaskWideAndType << kArrayTypeShift) == kMaskArrayWideAndType, "shift mismatch");
+
+ static constexpr uint32_t kMaskArrayDepth = static_cast<uint32_t>(-1) << kBitArrayDepthStart;
+ static constexpr uint32_t kMaskNonArray = ~(kMaskArrayWideAndType | kMaskArrayDepth);
+
+ // The maximum representable array depth. If we exceed the maximum (which can happen
+ // only with an absurd nested array type in a dex file which would presumably cause
+ // OOM while being resolved), we can report false conflicts.
+ static constexpr uint32_t kMaxArrayDepth = static_cast<uint32_t>(-1) >> kBitArrayDepthStart;
+
+ explicit Type(uint32_t raw_bits) : raw_bits_(raw_bits) { }
+
+ bool IsBitSet(uint32_t flag) const {
+ return (raw_bits_ & flag) != 0u;
+ }
+
+ void SetBits(uint32_t flags) {
+ raw_bits_ |= flags;
+ }
+
+ bool MergeBits(Type src_type, uint32_t mask) {
+ uint32_t new_bits = raw_bits_ | (src_type.raw_bits_ & mask);
+ if (new_bits != raw_bits_) {
+ raw_bits_ = new_bits;
+ return true;
+ }
+ return false;
+ }
+
+ uint32_t raw_bits_;
+ };
+
+ struct MethodSignature {
+ Type return_type;
+ size_t num_params;
+ Type* param_types;
+ };
+
+ struct SplitSRegData {
+ int32_t current_mod_s_reg;
+ int32_t* starting_mod_s_reg; // Indexed by BasicBlock::id.
+ int32_t* ending_mod_s_reg; // Indexed by BasicBlock::id.
+
+ // NOTE: Before AddPseudoPhis(), def_phi_blocks_ marks the blocks
+ // with check-casts and the block with the original SSA reg.
+ // After AddPseudoPhis(), it marks blocks with pseudo-phis.
+ ArenaBitVector* def_phi_blocks_; // Indexed by BasicBlock::id.
+ };
+
+ class CheckCastData : public DeletableArenaObject<kArenaAllocMisc> {
+ public:
+ CheckCastData(MIRGraph* mir_graph, ScopedArenaAllocator* alloc);
+
+ size_t NumSRegs() const {
+ return num_sregs_;
+ }
+
+ void AddCheckCast(MIR* check_cast, Type type);
+ void AddPseudoPhis();
+ void InitializeCheckCastSRegs(Type* sregs) const;
+ void MergeCheckCastConflicts(Type* sregs) const;
+ void MarkPseudoPhiBlocks(uint64_t* bb_df_attrs) const;
+
+ void Start(BasicBlock* bb);
+ bool ProcessPseudoPhis(BasicBlock* bb, Type* sregs);
+ void ProcessCheckCast(MIR* mir);
+
+ SplitSRegData* GetSplitSRegData(int32_t s_reg);
+
+ private:
+ BasicBlock* FindDefBlock(MIR* check_cast);
+ BasicBlock* FindTopologicallyEarliestPredecessor(BasicBlock* bb);
+ bool IsSRegLiveAtStart(BasicBlock* bb, int v_reg, int32_t s_reg);
+
+ MIRGraph* const mir_graph_;
+ ScopedArenaAllocator* const alloc_;
+ const size_t num_blocks_;
+ size_t num_sregs_;
+
+ // Map check-cast mir to special sreg and type.
+ struct CheckCastMapValue {
+ int32_t modified_s_reg;
+ Type type;
+ };
+ ScopedArenaSafeMap<MIR*, CheckCastMapValue> check_cast_map_;
+ ScopedArenaSafeMap<int32_t, SplitSRegData> split_sreg_data_;
+ };
+
+ static Type FieldType(const DexFile* dex_file, uint32_t field_idx);
+ static Type* PrepareIFieldTypes(const DexFile* dex_file, MIRGraph* mir_graph,
+ ScopedArenaAllocator* alloc);
+ static Type* PrepareSFieldTypes(const DexFile* dex_file, MIRGraph* mir_graph,
+ ScopedArenaAllocator* alloc);
+ static MethodSignature Signature(const DexFile* dex_file, uint32_t method_idx, bool is_static,
+ ScopedArenaAllocator* alloc);
+ static MethodSignature* PrepareSignatures(const DexFile* dex_file, MIRGraph* mir_graph,
+ ScopedArenaAllocator* alloc);
+ static CheckCastData* InitializeCheckCastData(MIRGraph* mir_graph, ScopedArenaAllocator* alloc);
+
+ void InitializeSRegs();
+
+ int32_t ModifiedSReg(int32_t s_reg);
+ int32_t PhiInputModifiedSReg(int32_t s_reg, BasicBlock* bb, size_t pred_idx);
+
+ bool UpdateSRegFromLowWordType(int32_t mod_s_reg, Type low_word_type);
+
+ MIRGraph* const mir_graph_;
+ CompilationUnit* const cu_;
+
+ // The type inference propagates types also backwards but this must not happen across
+ // check-cast. So we need to effectively split an SSA reg into two at check-cast and
+ // keep track of the types separately.
+ std::unique_ptr<CheckCastData> check_cast_data_;
+
+ size_t num_sregs_; // Number of SSA regs or modified SSA regs, see check-cast.
+ const Type* const ifields_; // Indexed by MIR::meta::ifield_lowering_info.
+ const Type* const sfields_; // Indexed by MIR::meta::sfield_lowering_info.
+ const MethodSignature* const signatures_; // Indexed by MIR::meta::method_lowering_info.
+ const MethodSignature current_method_signature_;
+ Type* const sregs_; // Indexed by SSA reg or modified SSA reg, see check-cast.
+ uint64_t* const bb_df_attrs_; // Indexed by BasicBlock::id.
+
+ friend class TypeInferenceTest;
+};
+
+} // namespace art
+
+#endif // ART_COMPILER_DEX_TYPE_INFERENCE_H_
diff --git a/compiler/dex/type_inference_test.cc b/compiler/dex/type_inference_test.cc
new file mode 100644
index 0000000..eaa2bfa
--- /dev/null
+++ b/compiler/dex/type_inference_test.cc
@@ -0,0 +1,2044 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "base/logging.h"
+#include "compiler_ir.h"
+#include "dataflow_iterator-inl.h"
+#include "dex_flags.h"
+#include "dex/mir_field_info.h"
+#include "dex/mir_graph.h"
+#include "driver/dex_compilation_unit.h"
+#include "gtest/gtest.h"
+#include "type_inference.h"
+#include "utils/test_dex_file_builder.h"
+
+namespace art {
+
+class TypeInferenceTest : public testing::Test {
+ protected:
+ struct TypeDef {
+ const char* descriptor;
+ };
+
+ struct FieldDef {
+ const char* class_descriptor;
+ const char* type;
+ const char* name;
+ };
+
+ struct MethodDef {
+ const char* class_descriptor;
+ const char* signature;
+ const char* name;
+ InvokeType type;
+ };
+
+ struct BBDef {
+ static constexpr size_t kMaxSuccessors = 4;
+ static constexpr size_t kMaxPredecessors = 4;
+
+ BBType type;
+ size_t num_successors;
+ BasicBlockId successors[kMaxPredecessors];
+ size_t num_predecessors;
+ BasicBlockId predecessors[kMaxPredecessors];
+ };
+
+ struct MIRDef {
+ static constexpr size_t kMaxSsaDefs = 2;
+ static constexpr size_t kMaxSsaUses = 4;
+
+ BasicBlockId bbid;
+ Instruction::Code opcode;
+ int64_t value;
+ uint32_t metadata;
+ size_t num_uses;
+ int32_t uses[kMaxSsaUses];
+ size_t num_defs;
+ int32_t defs[kMaxSsaDefs];
+ };
+
+#define DEF_SUCC0() \
+ 0u, { }
+#define DEF_SUCC1(s1) \
+ 1u, { s1 }
+#define DEF_SUCC2(s1, s2) \
+ 2u, { s1, s2 }
+#define DEF_SUCC3(s1, s2, s3) \
+ 3u, { s1, s2, s3 }
+#define DEF_SUCC4(s1, s2, s3, s4) \
+ 4u, { s1, s2, s3, s4 }
+#define DEF_PRED0() \
+ 0u, { }
+#define DEF_PRED1(p1) \
+ 1u, { p1 }
+#define DEF_PRED2(p1, p2) \
+ 2u, { p1, p2 }
+#define DEF_PRED3(p1, p2, p3) \
+ 3u, { p1, p2, p3 }
+#define DEF_PRED4(p1, p2, p3, p4) \
+ 4u, { p1, p2, p3, p4 }
+#define DEF_BB(type, succ, pred) \
+ { type, succ, pred }
+
+#define DEF_CONST(bb, opcode, reg, value) \
+ { bb, opcode, value, 0u, 0, { }, 1, { reg } }
+#define DEF_CONST_WIDE(bb, opcode, reg, value) \
+ { bb, opcode, value, 0u, 0, { }, 2, { reg, reg + 1 } }
+#define DEF_CONST_STRING(bb, opcode, reg, index) \
+ { bb, opcode, index, 0u, 0, { }, 1, { reg } }
+#define DEF_IGET(bb, opcode, reg, obj, field_info) \
+ { bb, opcode, 0u, field_info, 1, { obj }, 1, { reg } }
+#define DEF_IGET_WIDE(bb, opcode, reg, obj, field_info) \
+ { bb, opcode, 0u, field_info, 1, { obj }, 2, { reg, reg + 1 } }
+#define DEF_IPUT(bb, opcode, reg, obj, field_info) \
+ { bb, opcode, 0u, field_info, 2, { reg, obj }, 0, { } }
+#define DEF_IPUT_WIDE(bb, opcode, reg, obj, field_info) \
+ { bb, opcode, 0u, field_info, 3, { reg, reg + 1, obj }, 0, { } }
+#define DEF_SGET(bb, opcode, reg, field_info) \
+ { bb, opcode, 0u, field_info, 0, { }, 1, { reg } }
+#define DEF_SGET_WIDE(bb, opcode, reg, field_info) \
+ { bb, opcode, 0u, field_info, 0, { }, 2, { reg, reg + 1 } }
+#define DEF_SPUT(bb, opcode, reg, field_info) \
+ { bb, opcode, 0u, field_info, 1, { reg }, 0, { } }
+#define DEF_SPUT_WIDE(bb, opcode, reg, field_info) \
+ { bb, opcode, 0u, field_info, 2, { reg, reg + 1 }, 0, { } }
+#define DEF_AGET(bb, opcode, reg, obj, idx) \
+ { bb, opcode, 0u, 0u, 2, { obj, idx }, 1, { reg } }
+#define DEF_AGET_WIDE(bb, opcode, reg, obj, idx) \
+ { bb, opcode, 0u, 0u, 2, { obj, idx }, 2, { reg, reg + 1 } }
+#define DEF_APUT(bb, opcode, reg, obj, idx) \
+ { bb, opcode, 0u, 0u, 3, { reg, obj, idx }, 0, { } }
+#define DEF_APUT_WIDE(bb, opcode, reg, obj, idx) \
+ { bb, opcode, 0u, 0u, 4, { reg, reg + 1, obj, idx }, 0, { } }
+#define DEF_INVOKE0(bb, opcode, method_idx) \
+ { bb, opcode, 0u, method_idx, 0, { }, 0, { } }
+#define DEF_INVOKE1(bb, opcode, reg, method_idx) \
+ { bb, opcode, 0u, method_idx, 1, { reg }, 0, { } }
+#define DEF_INVOKE2(bb, opcode, reg1, reg2, method_idx) \
+ { bb, opcode, 0u, method_idx, 2, { reg1, reg2 }, 0, { } }
+#define DEF_IFZ(bb, opcode, reg) \
+ { bb, opcode, 0u, 0u, 1, { reg }, 0, { } }
+#define DEF_MOVE(bb, opcode, reg, src) \
+ { bb, opcode, 0u, 0u, 1, { src }, 1, { reg } }
+#define DEF_MOVE_WIDE(bb, opcode, reg, src) \
+ { bb, opcode, 0u, 0u, 2, { src, src + 1 }, 2, { reg, reg + 1 } }
+#define DEF_PHI2(bb, reg, src1, src2) \
+ { bb, static_cast<Instruction::Code>(kMirOpPhi), 0, 0u, 2u, { src1, src2 }, 1, { reg } }
+#define DEF_BINOP(bb, opcode, result, src1, src2) \
+ { bb, opcode, 0u, 0u, 2, { src1, src2 }, 1, { result } }
+#define DEF_UNOP(bb, opcode, result, src) DEF_MOVE(bb, opcode, result, src)
+#define DEF_NULOP(bb, opcode, result) DEF_CONST(bb, opcode, result, 0)
+#define DEF_NULOP_WIDE(bb, opcode, result) DEF_CONST_WIDE(bb, opcode, result, 0)
+#define DEF_CHECK_CAST(bb, opcode, reg, type) \
+ { bb, opcode, 0, type, 1, { reg }, 0, { } }
+#define DEF_NEW_ARRAY(bb, opcode, reg, length, type) \
+ { bb, opcode, 0, type, 1, { length }, 1, { reg } }
+
+ void AddTypes(const TypeDef* defs, size_t count) {
+ for (size_t i = 0; i != count; ++i) {
+ const TypeDef* def = &defs[i];
+ dex_file_builder_.AddType(def->descriptor);
+ }
+ }
+
+ template <size_t count>
+ void PrepareTypes(const TypeDef (&defs)[count]) {
+ type_defs_ = defs;
+ type_count_ = count;
+ AddTypes(defs, count);
+ }
+
+ void AddFields(const FieldDef* defs, size_t count) {
+ for (size_t i = 0; i != count; ++i) {
+ const FieldDef* def = &defs[i];
+ dex_file_builder_.AddField(def->class_descriptor, def->type, def->name);
+ }
+ }
+
+ template <size_t count>
+ void PrepareIFields(const FieldDef (&defs)[count]) {
+ ifield_defs_ = defs;
+ ifield_count_ = count;
+ AddFields(defs, count);
+ }
+
+ template <size_t count>
+ void PrepareSFields(const FieldDef (&defs)[count]) {
+ sfield_defs_ = defs;
+ sfield_count_ = count;
+ AddFields(defs, count);
+ }
+
+ void AddMethods(const MethodDef* defs, size_t count) {
+ for (size_t i = 0; i != count; ++i) {
+ const MethodDef* def = &defs[i];
+ dex_file_builder_.AddMethod(def->class_descriptor, def->signature, def->name);
+ }
+ }
+
+ template <size_t count>
+ void PrepareMethods(const MethodDef (&defs)[count]) {
+ method_defs_ = defs;
+ method_count_ = count;
+ AddMethods(defs, count);
+ }
+
+ DexMemAccessType AccessTypeForDescriptor(const char* descriptor) {
+ switch (descriptor[0]) {
+ case 'I':
+ case 'F':
+ return kDexMemAccessWord;
+ case 'J':
+ case 'D':
+ return kDexMemAccessWide;
+ case '[':
+ case 'L':
+ return kDexMemAccessObject;
+ case 'Z':
+ return kDexMemAccessBoolean;
+ case 'B':
+ return kDexMemAccessByte;
+ case 'C':
+ return kDexMemAccessChar;
+ case 'S':
+ return kDexMemAccessShort;
+ default:
+ LOG(FATAL) << "Bad descriptor: " << descriptor;
+ UNREACHABLE();
+ }
+ }
+
+ size_t CountIns(const std::string& test_method_signature, bool is_static) {
+ const char* sig = test_method_signature.c_str();
+ CHECK_EQ(sig[0], '(');
+ ++sig;
+ size_t result = is_static ? 0u : 1u;
+ while (*sig != ')') {
+ result += (AccessTypeForDescriptor(sig) == kDexMemAccessWide) ? 2u : 1u;
+ while (*sig == '[') {
+ ++sig;
+ }
+ if (*sig == 'L') {
+ do {
+ ++sig;
+ CHECK(*sig != '\0' && *sig != ')');
+ } while (*sig != ';');
+ }
+ ++sig;
+ }
+ return result;
+ }
+
+ void BuildDexFile(const std::string& test_method_signature, bool is_static) {
+ dex_file_builder_.AddMethod(kClassName, test_method_signature, kMethodName);
+ dex_file_ = dex_file_builder_.Build(kDexLocation);
+ cu_.dex_file = dex_file_.get();
+ cu_.method_idx = dex_file_builder_.GetMethodIdx(kClassName, test_method_signature, kMethodName);
+ cu_.access_flags = is_static ? kAccStatic : 0u;
+ cu_.mir_graph->m_units_.push_back(new (cu_.mir_graph->arena_) DexCompilationUnit(
+ &cu_, cu_.class_loader, cu_.class_linker, *cu_.dex_file, nullptr /* code_item not used */,
+ 0u /* class_def_idx not used */, 0u /* method_index not used */,
+ cu_.access_flags, nullptr /* verified_method not used */));
+ cu_.mir_graph->current_method_ = 0u;
+ code_item_ = static_cast<DexFile::CodeItem*>(
+ cu_.arena.Alloc(sizeof(DexFile::CodeItem), kArenaAllocMisc));
+
+ code_item_->ins_size_ = CountIns(test_method_signature, is_static);
+ code_item_->registers_size_ = kLocalVRs + code_item_->ins_size_;
+ cu_.mir_graph->current_code_item_ = code_item_;
+ cu_.mir_graph->num_ssa_regs_ = kMaxSsaRegs;
+
+ cu_.mir_graph->ifield_lowering_infos_.clear();
+ cu_.mir_graph->ifield_lowering_infos_.reserve(ifield_count_);
+ for (size_t i = 0u; i != ifield_count_; ++i) {
+ const FieldDef* def = &ifield_defs_[i];
+ uint32_t field_idx =
+ dex_file_builder_.GetFieldIdx(def->class_descriptor, def->type, def->name);
+ MirIFieldLoweringInfo field_info(field_idx, AccessTypeForDescriptor(def->type), false);
+ field_info.declaring_dex_file_ = cu_.dex_file;
+ field_info.declaring_field_idx_ = field_idx;
+ cu_.mir_graph->ifield_lowering_infos_.push_back(field_info);
+ }
+
+ cu_.mir_graph->sfield_lowering_infos_.clear();
+ cu_.mir_graph->sfield_lowering_infos_.reserve(sfield_count_);
+ for (size_t i = 0u; i != sfield_count_; ++i) {
+ const FieldDef* def = &sfield_defs_[i];
+ uint32_t field_idx =
+ dex_file_builder_.GetFieldIdx(def->class_descriptor, def->type, def->name);
+ MirSFieldLoweringInfo field_info(field_idx, AccessTypeForDescriptor(def->type));
+ field_info.declaring_dex_file_ = cu_.dex_file;
+ field_info.declaring_field_idx_ = field_idx;
+ cu_.mir_graph->sfield_lowering_infos_.push_back(field_info);
+ }
+
+ cu_.mir_graph->method_lowering_infos_.clear();
+ cu_.mir_graph->method_lowering_infos_.reserve(ifield_count_);
+ for (size_t i = 0u; i != method_count_; ++i) {
+ const MethodDef* def = &method_defs_[i];
+ uint32_t method_idx =
+ dex_file_builder_.GetMethodIdx(def->class_descriptor, def->signature, def->name);
+ MirMethodLoweringInfo method_info(method_idx, def->type, false);
+ method_info.declaring_dex_file_ = cu_.dex_file;
+ method_info.declaring_method_idx_ = method_idx;
+ cu_.mir_graph->method_lowering_infos_.push_back(method_info);
+ }
+ }
+
+ void DoPrepareBasicBlocks(const BBDef* defs, size_t count) {
+ cu_.mir_graph->block_id_map_.clear();
+ cu_.mir_graph->block_list_.clear();
+ ASSERT_LT(3u, count); // null, entry, exit and at least one bytecode block.
+ ASSERT_EQ(kNullBlock, defs[0].type);
+ ASSERT_EQ(kEntryBlock, defs[1].type);
+ ASSERT_EQ(kExitBlock, defs[2].type);
+ for (size_t i = 0u; i != count; ++i) {
+ const BBDef* def = &defs[i];
+ BasicBlock* bb = cu_.mir_graph->CreateNewBB(def->type);
+ if (def->num_successors <= 2) {
+ bb->successor_block_list_type = kNotUsed;
+ bb->fall_through = (def->num_successors >= 1) ? def->successors[0] : 0u;
+ bb->taken = (def->num_successors >= 2) ? def->successors[1] : 0u;
+ } else {
+ bb->successor_block_list_type = kPackedSwitch;
+ bb->fall_through = 0u;
+ bb->taken = 0u;
+ bb->successor_blocks.reserve(def->num_successors);
+ for (size_t j = 0u; j != def->num_successors; ++j) {
+ SuccessorBlockInfo* successor_block_info =
+ static_cast<SuccessorBlockInfo*>(cu_.arena.Alloc(sizeof(SuccessorBlockInfo),
+ kArenaAllocSuccessor));
+ successor_block_info->block = j;
+ successor_block_info->key = 0u; // Not used by class init check elimination.
+ bb->successor_blocks.push_back(successor_block_info);
+ }
+ }
+ bb->predecessors.assign(def->predecessors, def->predecessors + def->num_predecessors);
+ if (def->type == kDalvikByteCode || def->type == kEntryBlock || def->type == kExitBlock) {
+ bb->data_flow_info = static_cast<BasicBlockDataFlow*>(
+ cu_.arena.Alloc(sizeof(BasicBlockDataFlow), kArenaAllocDFInfo));
+ bb->data_flow_info->live_in_v = live_in_v_;
+ }
+ }
+ ASSERT_EQ(count, cu_.mir_graph->block_list_.size());
+ cu_.mir_graph->entry_block_ = cu_.mir_graph->block_list_[1];
+ ASSERT_EQ(kEntryBlock, cu_.mir_graph->entry_block_->block_type);
+ cu_.mir_graph->exit_block_ = cu_.mir_graph->block_list_[2];
+ ASSERT_EQ(kExitBlock, cu_.mir_graph->exit_block_->block_type);
+ }
+
+ template <size_t count>
+ void PrepareBasicBlocks(const BBDef (&defs)[count]) {
+ DoPrepareBasicBlocks(defs, count);
+ }
+
+ void PrepareSingleBlock() {
+ static const BBDef bbs[] = {
+ DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
+ DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
+ DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(3)),
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED1(1)),
+ };
+ PrepareBasicBlocks(bbs);
+ }
+
+ void PrepareDiamond() {
+ static const BBDef bbs[] = {
+ DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
+ DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
+ DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(6)),
+ DEF_BB(kDalvikByteCode, DEF_SUCC2(4, 5), DEF_PRED1(1)),
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(6), DEF_PRED1(3)),
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(6), DEF_PRED1(3)),
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED2(4, 5)),
+ };
+ PrepareBasicBlocks(bbs);
+ }
+
+ void PrepareLoop() {
+ static const BBDef bbs[] = {
+ DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
+ DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
+ DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(5)),
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(4), DEF_PRED1(1)),
+ DEF_BB(kDalvikByteCode, DEF_SUCC2(5, 4), DEF_PRED2(3, 4)), // "taken" loops to self.
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED1(4)),
+ };
+ PrepareBasicBlocks(bbs);
+ }
+
+ void DoPrepareMIRs(const MIRDef* defs, size_t count) {
+ mir_count_ = count;
+ mirs_ = cu_.arena.AllocArray<MIR>(count, kArenaAllocMIR);
+ ssa_reps_.resize(count);
+ for (size_t i = 0u; i != count; ++i) {
+ const MIRDef* def = &defs[i];
+ MIR* mir = &mirs_[i];
+ ASSERT_LT(def->bbid, cu_.mir_graph->block_list_.size());
+ BasicBlock* bb = cu_.mir_graph->block_list_[def->bbid];
+ bb->AppendMIR(mir);
+ mir->dalvikInsn.opcode = def->opcode;
+ mir->dalvikInsn.vB = static_cast<int32_t>(def->value);
+ mir->dalvikInsn.vB_wide = def->value;
+ if (IsInstructionIGetOrIPut(def->opcode)) {
+ ASSERT_LT(def->metadata, cu_.mir_graph->ifield_lowering_infos_.size());
+ mir->meta.ifield_lowering_info = def->metadata;
+ ASSERT_EQ(cu_.mir_graph->ifield_lowering_infos_[def->metadata].MemAccessType(),
+ IGetOrIPutMemAccessType(def->opcode));
+ cu_.mir_graph->merged_df_flags_ |= DF_IFIELD;
+ } else if (IsInstructionSGetOrSPut(def->opcode)) {
+ ASSERT_LT(def->metadata, cu_.mir_graph->sfield_lowering_infos_.size());
+ mir->meta.sfield_lowering_info = def->metadata;
+ ASSERT_EQ(cu_.mir_graph->sfield_lowering_infos_[def->metadata].MemAccessType(),
+ SGetOrSPutMemAccessType(def->opcode));
+ cu_.mir_graph->merged_df_flags_ |= DF_SFIELD;
+ } else if (IsInstructionInvoke(def->opcode)) {
+ ASSERT_LT(def->metadata, cu_.mir_graph->method_lowering_infos_.size());
+ mir->meta.method_lowering_info = def->metadata;
+ mir->dalvikInsn.vA = def->num_uses;
+ cu_.mir_graph->merged_df_flags_ |= DF_FORMAT_35C;
+ } else if (def->opcode == static_cast<Instruction::Code>(kMirOpPhi)) {
+ mir->meta.phi_incoming =
+ allocator_->AllocArray<BasicBlockId>(def->num_uses, kArenaAllocDFInfo);
+ ASSERT_EQ(def->num_uses, bb->predecessors.size());
+ std::copy(bb->predecessors.begin(), bb->predecessors.end(), mir->meta.phi_incoming);
+ } else if (def->opcode == Instruction::CHECK_CAST) {
+ ASSERT_LT(def->metadata, type_count_);
+ mir->dalvikInsn.vB = dex_file_builder_.GetTypeIdx(type_defs_[def->metadata].descriptor);
+ cu_.mir_graph->merged_df_flags_ |= DF_CHK_CAST;
+ } else if (def->opcode == Instruction::NEW_ARRAY) {
+ ASSERT_LT(def->metadata, type_count_);
+ mir->dalvikInsn.vC = dex_file_builder_.GetTypeIdx(type_defs_[def->metadata].descriptor);
+ }
+ mir->ssa_rep = &ssa_reps_[i];
+ mir->ssa_rep->num_uses = def->num_uses;
+ mir->ssa_rep->uses = const_cast<int32_t*>(def->uses); // Not modified by LVN.
+ mir->ssa_rep->num_defs = def->num_defs;
+ mir->ssa_rep->defs = const_cast<int32_t*>(def->defs); // Not modified by LVN.
+ mir->dalvikInsn.opcode = def->opcode;
+ mir->offset = i; // LVN uses offset only for debug output
+ mir->optimization_flags = 0u;
+ }
+ code_item_->insns_size_in_code_units_ = 2u * count;
+ }
+
+ template <size_t count>
+ void PrepareMIRs(const MIRDef (&defs)[count]) {
+ DoPrepareMIRs(defs, count);
+ }
+
+ // BasicBlockDataFlow::vreg_to_ssa_map_exit is used only for check-casts.
+ void AllocEndingVRegToSRegMaps() {
+ AllNodesIterator iterator(cu_.mir_graph.get());
+ for (BasicBlock* bb = iterator.Next(); bb != nullptr; bb = iterator.Next()) {
+ if (bb->data_flow_info != nullptr) {
+ if (bb->data_flow_info->vreg_to_ssa_map_exit == nullptr) {
+ size_t num_vregs = code_item_->registers_size_;
+ bb->data_flow_info->vreg_to_ssa_map_exit = static_cast<int32_t*>(
+ cu_.arena.AllocArray<int32_t>(num_vregs, kArenaAllocDFInfo));
+ std::fill_n(bb->data_flow_info->vreg_to_ssa_map_exit, num_vregs, INVALID_SREG);
+ }
+ }
+ }
+ }
+
+ template <size_t count>
+ void MapVRegToSReg(int vreg, int32_t sreg, const BasicBlockId (&bb_ids)[count]) {
+ AllocEndingVRegToSRegMaps();
+ for (BasicBlockId bb_id : bb_ids) {
+ BasicBlock* bb = cu_.mir_graph->GetBasicBlock(bb_id);
+ CHECK(bb != nullptr);
+ CHECK(bb->data_flow_info != nullptr);
+ CHECK(bb->data_flow_info->vreg_to_ssa_map_exit != nullptr);
+ bb->data_flow_info->vreg_to_ssa_map_exit[vreg] = sreg;
+ }
+ }
+
+ void PerformTypeInference() {
+ cu_.mir_graph->SSATransformationStart();
+ cu_.mir_graph->ComputeDFSOrders();
+ cu_.mir_graph->ComputeDominators();
+ cu_.mir_graph->ComputeTopologicalSortOrder();
+ cu_.mir_graph->SSATransformationEnd();
+ ASSERT_TRUE(type_inference_ == nullptr);
+ type_inference_.reset(new (allocator_.get()) TypeInference(cu_.mir_graph.get(),
+ allocator_.get()));
+ RepeatingPreOrderDfsIterator iter(cu_.mir_graph.get());
+ bool changed = false;
+ for (BasicBlock* bb = iter.Next(changed); bb != nullptr; bb = iter.Next(changed)) {
+ changed = type_inference_->Apply(bb);
+ }
+ type_inference_->Finish();
+ }
+
+ TypeInferenceTest()
+ : pool_(),
+ cu_(&pool_, kRuntimeISA, nullptr, nullptr),
+ mir_count_(0u),
+ mirs_(nullptr),
+ code_item_(nullptr),
+ ssa_reps_(),
+ allocator_(),
+ live_in_v_(new (&cu_.arena) ArenaBitVector(&cu_.arena, kMaxSsaRegs, false, kBitMapMisc)),
+ type_defs_(nullptr),
+ type_count_(0u),
+ ifield_defs_(nullptr),
+ ifield_count_(0u),
+ sfield_defs_(nullptr),
+ sfield_count_(0u),
+ method_defs_(nullptr),
+ method_count_(0u),
+ dex_file_builder_(),
+ dex_file_(nullptr) {
+ cu_.mir_graph.reset(new MIRGraph(&cu_, &cu_.arena));
+ allocator_.reset(ScopedArenaAllocator::Create(&cu_.arena_stack));
+ // Bind all possible sregs to live vregs for test purposes.
+ live_in_v_->SetInitialBits(kMaxSsaRegs);
+ cu_.mir_graph->reg_location_ = static_cast<RegLocation*>(cu_.arena.Alloc(
+ kMaxSsaRegs * sizeof(cu_.mir_graph->reg_location_[0]), kArenaAllocRegAlloc));
+ cu_.mir_graph->method_sreg_ = kMaxSsaRegs - 1u;
+ cu_.mir_graph->reg_location_[cu_.mir_graph->GetMethodSReg()].location = kLocCompilerTemp;
+ // Bind all possible sregs to live vregs for test purposes.
+ live_in_v_->SetInitialBits(kMaxSsaRegs);
+ cu_.mir_graph->ssa_base_vregs_.reserve(kMaxSsaRegs);
+ cu_.mir_graph->ssa_subscripts_.reserve(kMaxSsaRegs);
+ for (unsigned int i = 0; i < kMaxSsaRegs; i++) {
+ cu_.mir_graph->ssa_base_vregs_.push_back(i);
+ cu_.mir_graph->ssa_subscripts_.push_back(0);
+ }
+ }
+
+ enum ExpectFlags : uint32_t {
+ kExpectWide = 0x0001u,
+ kExpectNarrow = 0x0002u,
+ kExpectFp = 0x0004u,
+ kExpectCore = 0x0008u,
+ kExpectRef = 0x0010u,
+ kExpectArrayWide = 0x0020u,
+ kExpectArrayNarrow = 0x0040u,
+ kExpectArrayFp = 0x0080u,
+ kExpectArrayCore = 0x0100u,
+ kExpectArrayRef = 0x0200u,
+ kExpectNull = 0x0400u,
+ kExpectHigh = 0x0800u, // Reserved for ExpectSRegType().
+ };
+
+ struct SRegExpectation {
+ uint32_t array_depth;
+ uint32_t flags;
+ };
+
+ void ExpectSRegType(int s_reg, const SRegExpectation& expectation, bool check_loc = true) {
+ uint32_t flags = expectation.flags;
+ uint32_t array_depth = expectation.array_depth;
+ TypeInference::Type type = type_inference_->sregs_[s_reg];
+
+ if (check_loc) {
+ RegLocation loc = cu_.mir_graph->reg_location_[s_reg];
+ EXPECT_EQ((flags & kExpectWide) != 0u, loc.wide) << s_reg;
+ EXPECT_EQ((flags & kExpectFp) != 0u, loc.fp) << s_reg;
+ EXPECT_EQ((flags & kExpectCore) != 0u, loc.core) << s_reg;
+ EXPECT_EQ((flags & kExpectRef) != 0u, loc.ref) << s_reg;
+ EXPECT_EQ((flags & kExpectHigh) != 0u, loc.high_word) << s_reg;
+ }
+
+ EXPECT_EQ((flags & kExpectWide) != 0u, type.Wide()) << s_reg;
+ EXPECT_EQ((flags & kExpectNarrow) != 0u, type.Narrow()) << s_reg;
+ EXPECT_EQ((flags & kExpectFp) != 0u, type.Fp()) << s_reg;
+ EXPECT_EQ((flags & kExpectCore) != 0u, type.Core()) << s_reg;
+ EXPECT_EQ((flags & kExpectRef) != 0u, type.Ref()) << s_reg;
+ EXPECT_EQ((flags & kExpectHigh) == 0u, type.LowWord()) << s_reg;
+ EXPECT_EQ((flags & kExpectHigh) != 0u, type.HighWord()) << s_reg;
+
+ if ((flags & kExpectRef) != 0u) {
+ EXPECT_EQ((flags & kExpectNull) != 0u, !type.NonNull()) << s_reg;
+ } else {
+ // Null should be checked only for references.
+ ASSERT_EQ((flags & kExpectNull), 0u);
+ }
+
+ ASSERT_EQ(array_depth, type.ArrayDepth()) << s_reg;
+ if (array_depth != 0u) {
+ ASSERT_NE((flags & kExpectRef), 0u);
+ TypeInference::Type nested_type = type.NestedType();
+ EXPECT_EQ((flags & kExpectArrayWide) != 0u, nested_type.Wide()) << s_reg;
+ EXPECT_EQ((flags & kExpectArrayNarrow) != 0u, nested_type.Narrow()) << s_reg;
+ EXPECT_EQ((flags & kExpectArrayFp) != 0u, nested_type.Fp()) << s_reg;
+ EXPECT_EQ((flags & kExpectArrayCore) != 0u, nested_type.Core()) << s_reg;
+ EXPECT_EQ((flags & kExpectArrayRef) != 0u, nested_type.Ref()) << s_reg;
+ }
+ if (!type.Narrow() && type.LowWord() &&
+ (expectation.flags & (kExpectWide | kExpectNarrow | kExpectHigh)) == kExpectWide) {
+ SRegExpectation high_expectation = { array_depth, flags | kExpectHigh };
+ ExpectSRegType(s_reg + 1, high_expectation);
+ }
+ }
+
+ void ExpectCore(int s_reg, bool core) {
+ EXPECT_EQ(core, type_inference_->sregs_[s_reg].Core());
+ }
+
+ void ExpectRef(int s_reg, bool ref) {
+ EXPECT_EQ(ref, type_inference_->sregs_[s_reg].Ref());
+ }
+
+ void ExpectArrayDepth(int s_reg, uint32_t array_depth) {
+ EXPECT_EQ(array_depth, type_inference_->sregs_[s_reg].ArrayDepth());
+ }
+
+ static constexpr size_t kMaxSsaRegs = 16384u;
+ static constexpr uint16_t kLocalVRs = 1000u;
+
+ static constexpr const char* kDexLocation = "TypeInferenceDexFile;";
+ static constexpr const char* kClassName = "LTypeInferenceTest;";
+ static constexpr const char* kMethodName = "test";
+
+ ArenaPool pool_;
+ CompilationUnit cu_;
+ size_t mir_count_;
+ MIR* mirs_;
+ DexFile::CodeItem* code_item_;
+ std::vector<SSARepresentation> ssa_reps_;
+ std::unique_ptr<ScopedArenaAllocator> allocator_;
+ std::unique_ptr<TypeInference> type_inference_;
+ ArenaBitVector* live_in_v_;
+
+ const TypeDef* type_defs_;
+ size_t type_count_;
+ const FieldDef* ifield_defs_;
+ size_t ifield_count_;
+ const FieldDef* sfield_defs_;
+ size_t sfield_count_;
+ const MethodDef* method_defs_;
+ size_t method_count_;
+
+ TestDexFileBuilder dex_file_builder_;
+ std::unique_ptr<const DexFile> dex_file_;
+};
+
+TEST_F(TypeInferenceTest, IGet) {
+ static const FieldDef ifields[] = {
+ { kClassName, "B", "byteField" },
+ { kClassName, "C", "charField" },
+ { kClassName, "D", "doubleField" },
+ { kClassName, "F", "floatField" },
+ { kClassName, "I", "intField" },
+ { kClassName, "J", "longField" },
+ { kClassName, "S", "shortField" },
+ { kClassName, "Z", "booleanField" },
+ { kClassName, "Ljava/lang/Object;", "objectField" },
+ { kClassName, "[Ljava/lang/Object;", "objectArrayField" },
+ };
+ constexpr uint32_t thiz = kLocalVRs;
+ static const MIRDef mirs[] = {
+ DEF_IGET(3u, Instruction::IGET_BYTE, 0u, thiz, 0u),
+ DEF_IGET(3u, Instruction::IGET_CHAR, 1u, thiz, 1u),
+ DEF_IGET_WIDE(3u, Instruction::IGET_WIDE, 2u, thiz, 2u),
+ DEF_IGET(3u, Instruction::IGET, 4u, thiz, 3u),
+ DEF_IGET(3u, Instruction::IGET, 5u, thiz, 4u),
+ DEF_IGET_WIDE(3u, Instruction::IGET_WIDE, 6u, thiz, 5u),
+ DEF_IGET(3u, Instruction::IGET_SHORT, 8u, thiz, 6u),
+ DEF_IGET(3u, Instruction::IGET_BOOLEAN, 9u, thiz, 7u),
+ DEF_IGET(3u, Instruction::IGET_OBJECT, 10u, thiz, 8u),
+ DEF_IGET(3u, Instruction::IGET_OBJECT, 11u, thiz, 9u),
+ };
+
+ PrepareIFields(ifields);
+ BuildDexFile("()V", false);
+ PrepareSingleBlock();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectFp | kExpectWide },
+ { 0u, kExpectFp | kExpectNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectCore | kExpectWide },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectRef | kExpectNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
+ };
+ static_assert(arraysize(expectations) == arraysize(mirs), "array size mismatch");
+ for (size_t i = 0; i != arraysize(expectations); ++i) {
+ EXPECT_EQ(mirs[i].opcode, mirs_[i].dalvikInsn.opcode);
+ ASSERT_LE(1u, mirs_[i].ssa_rep->num_defs);
+ ExpectSRegType(mirs_[i].ssa_rep->defs[0], expectations[i]);
+ }
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, SGet) {
+ static const FieldDef sfields[] = {
+ { kClassName, "B", "staticByteField" },
+ { kClassName, "C", "staticCharField" },
+ { kClassName, "D", "staticDoubleField" },
+ { kClassName, "F", "staticFloatField" },
+ { kClassName, "I", "staticIntField" },
+ { kClassName, "J", "staticLongField" },
+ { kClassName, "S", "staticShortField" },
+ { kClassName, "Z", "staticBooleanField" },
+ { kClassName, "Ljava/lang/Object;", "staticObjectField" },
+ { kClassName, "[Ljava/lang/Object;", "staticObjectArrayField" },
+ };
+ static const MIRDef mirs[] = {
+ DEF_SGET(3u, Instruction::SGET_BYTE, 0u, 0u),
+ DEF_SGET(3u, Instruction::SGET_CHAR, 1u, 1u),
+ DEF_SGET_WIDE(3u, Instruction::SGET_WIDE, 2u, 2u),
+ DEF_SGET(3u, Instruction::SGET, 4u, 3u),
+ DEF_SGET(3u, Instruction::SGET, 5u, 4u),
+ DEF_SGET_WIDE(3u, Instruction::SGET_WIDE, 6u, 5u),
+ DEF_SGET(3u, Instruction::SGET_SHORT, 8u, 6u),
+ DEF_SGET(3u, Instruction::SGET_BOOLEAN, 9u, 7u),
+ DEF_SGET(3u, Instruction::SGET_OBJECT, 10u, 8u),
+ DEF_SGET(3u, Instruction::SGET_OBJECT, 11u, 9u),
+ };
+
+ PrepareSFields(sfields);
+ BuildDexFile("()V", true);
+ PrepareSingleBlock();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectFp | kExpectWide },
+ { 0u, kExpectFp | kExpectNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectCore | kExpectWide },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectRef | kExpectNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
+ };
+ static_assert(arraysize(expectations) == arraysize(mirs), "array size mismatch");
+ for (size_t i = 0; i != arraysize(expectations); ++i) {
+ EXPECT_EQ(mirs[i].opcode, mirs_[i].dalvikInsn.opcode);
+ ASSERT_LE(1u, mirs_[i].ssa_rep->num_defs);
+ ExpectSRegType(mirs_[i].ssa_rep->defs[0], expectations[i]);
+ }
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, IPut) {
+ static const FieldDef ifields[] = {
+ { kClassName, "B", "byteField" },
+ { kClassName, "C", "charField" },
+ { kClassName, "D", "doubleField" },
+ { kClassName, "F", "floatField" },
+ { kClassName, "I", "intField" },
+ { kClassName, "J", "longField" },
+ { kClassName, "S", "shortField" },
+ { kClassName, "Z", "booleanField" },
+ { kClassName, "Ljava/lang/Object;", "objectField" },
+ { kClassName, "[Ljava/lang/Object;", "objectArrayField" },
+ };
+ constexpr uint32_t thiz = kLocalVRs;
+ static const MIRDef mirs[] = {
+ DEF_CONST(3u, Instruction::CONST, 0u, 0),
+ DEF_IPUT(3u, Instruction::IPUT_BYTE, 0u, thiz, 0u),
+ DEF_CONST(3u, Instruction::CONST, 1u, 0),
+ DEF_IPUT(3u, Instruction::IPUT_CHAR, 1u, thiz, 1u),
+ DEF_CONST_WIDE(3u, Instruction::CONST_WIDE, 2u, 0),
+ DEF_IPUT_WIDE(3u, Instruction::IPUT_WIDE, 2u, thiz, 2u),
+ DEF_CONST(3u, Instruction::CONST, 4u, 0),
+ DEF_IPUT(3u, Instruction::IPUT, 4u, thiz, 3u),
+ DEF_CONST(3u, Instruction::CONST, 5u, 0),
+ DEF_IPUT(3u, Instruction::IPUT, 5u, thiz, 4u),
+ DEF_CONST_WIDE(3u, Instruction::CONST_WIDE, 6u, 0),
+ DEF_IPUT_WIDE(3u, Instruction::IPUT_WIDE, 6u, thiz, 5u),
+ DEF_CONST(3u, Instruction::CONST, 8u, 0),
+ DEF_IPUT(3u, Instruction::IPUT_SHORT, 8u, thiz, 6u),
+ DEF_CONST(3u, Instruction::CONST, 9u, 0),
+ DEF_IPUT(3u, Instruction::IPUT_BOOLEAN, 9u, thiz, 7u),
+ DEF_CONST(3u, Instruction::CONST, 10u, 0),
+ DEF_IPUT(3u, Instruction::IPUT_OBJECT, 10u, thiz, 8u),
+ DEF_CONST(3u, Instruction::CONST, 11u, 0),
+ DEF_IPUT(3u, Instruction::IPUT_OBJECT, 11u, thiz, 9u),
+ };
+
+ PrepareIFields(ifields);
+ BuildDexFile("()V", false);
+ PrepareSingleBlock();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ // One expectation for every 2 MIRs.
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectFp | kExpectWide },
+ { 0u, kExpectFp | kExpectNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectCore | kExpectWide },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectRef | kExpectNarrow | kExpectNull },
+ { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+ };
+ static_assert(2 * arraysize(expectations) == arraysize(mirs), "array size mismatch");
+ for (size_t i = 0; i != arraysize(expectations); ++i) {
+ EXPECT_EQ(mirs[2 * i].opcode, mirs_[2 * i].dalvikInsn.opcode);
+ EXPECT_EQ(mirs[2 * i + 1].opcode, mirs_[2 * i + 1].dalvikInsn.opcode);
+ ASSERT_LE(1u, mirs_[2 * i].ssa_rep->num_defs);
+ ExpectSRegType(mirs_[2 * i].ssa_rep->defs[0], expectations[i]);
+ }
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, SPut) {
+ static const FieldDef sfields[] = {
+ { kClassName, "B", "staticByteField" },
+ { kClassName, "C", "staticCharField" },
+ { kClassName, "D", "staticDoubleField" },
+ { kClassName, "F", "staticFloatField" },
+ { kClassName, "I", "staticIntField" },
+ { kClassName, "J", "staticLongField" },
+ { kClassName, "S", "staticShortField" },
+ { kClassName, "Z", "staticBooleanField" },
+ { kClassName, "Ljava/lang/Object;", "staticObjectField" },
+ { kClassName, "[Ljava/lang/Object;", "staticObjectArrayField" },
+ };
+ static const MIRDef mirs[] = {
+ DEF_CONST(3u, Instruction::CONST, 0u, 0),
+ DEF_SPUT(3u, Instruction::SPUT_BYTE, 0u, 0u),
+ DEF_CONST(3u, Instruction::CONST, 1u, 0),
+ DEF_SPUT(3u, Instruction::SPUT_CHAR, 1u, 1u),
+ DEF_CONST_WIDE(3u, Instruction::CONST_WIDE, 2u, 0),
+ DEF_SPUT_WIDE(3u, Instruction::SPUT_WIDE, 2u, 2u),
+ DEF_CONST(3u, Instruction::CONST, 4u, 0),
+ DEF_SPUT(3u, Instruction::SPUT, 4u, 3u),
+ DEF_CONST(3u, Instruction::CONST, 5u, 0),
+ DEF_SPUT(3u, Instruction::SPUT, 5u, 4u),
+ DEF_CONST_WIDE(3u, Instruction::CONST_WIDE, 6u, 0),
+ DEF_SPUT_WIDE(3u, Instruction::SPUT_WIDE, 6u, 5u),
+ DEF_CONST(3u, Instruction::CONST, 8u, 0),
+ DEF_SPUT(3u, Instruction::SPUT_SHORT, 8u, 6u),
+ DEF_CONST(3u, Instruction::CONST, 9u, 0),
+ DEF_SPUT(3u, Instruction::SPUT_BOOLEAN, 9u, 7u),
+ DEF_CONST(3u, Instruction::CONST, 10u, 0),
+ DEF_SPUT(3u, Instruction::SPUT_OBJECT, 10u, 8u),
+ DEF_CONST(3u, Instruction::CONST, 11u, 0),
+ DEF_SPUT(3u, Instruction::SPUT_OBJECT, 11u, 9u),
+ };
+
+ PrepareSFields(sfields);
+ BuildDexFile("()V", true);
+ PrepareSingleBlock();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ // One expectation for every 2 MIRs.
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectFp | kExpectWide },
+ { 0u, kExpectFp | kExpectNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectCore | kExpectWide },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectRef | kExpectNarrow | kExpectNull },
+ { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+ };
+ static_assert(2 * arraysize(expectations) == arraysize(mirs), "array size mismatch");
+ for (size_t i = 0; i != arraysize(expectations); ++i) {
+ EXPECT_EQ(mirs[2 * i].opcode, mirs_[2 * i].dalvikInsn.opcode);
+ EXPECT_EQ(mirs[2 * i + 1].opcode, mirs_[2 * i + 1].dalvikInsn.opcode);
+ ASSERT_LE(1u, mirs_[2 * i].ssa_rep->num_defs);
+ ExpectSRegType(mirs_[2 * i].ssa_rep->defs[0], expectations[i]);
+ }
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, MethodReturnType) {
+ static const MethodDef methods[] = {
+ { kClassName, "()B", "byteFoo", kStatic },
+ { kClassName, "()C", "charFoo", kStatic },
+ { kClassName, "()D", "doubleFoo", kStatic },
+ { kClassName, "()F", "floatFoo", kStatic },
+ { kClassName, "()I", "intFoo", kStatic },
+ { kClassName, "()J", "longFoo", kStatic },
+ { kClassName, "()S", "shortFoo", kStatic },
+ { kClassName, "()Z", "booleanFoo", kStatic },
+ { kClassName, "()Ljava/lang/Object;", "objectFoo", kStatic },
+ { kClassName, "()[Ljava/lang/Object;", "objectArrayFoo", kStatic },
+ };
+ static const MIRDef mirs[] = {
+ DEF_INVOKE0(3u, Instruction::INVOKE_STATIC, 0u),
+ DEF_NULOP(3u, Instruction::MOVE_RESULT, 0u),
+ DEF_INVOKE0(3u, Instruction::INVOKE_STATIC, 1u),
+ DEF_NULOP(3u, Instruction::MOVE_RESULT, 1u),
+ DEF_INVOKE0(3u, Instruction::INVOKE_STATIC, 2u),
+ DEF_NULOP_WIDE(3u, Instruction::MOVE_RESULT_WIDE, 2u),
+ DEF_INVOKE0(3u, Instruction::INVOKE_STATIC, 3u),
+ DEF_NULOP(3u, Instruction::MOVE_RESULT, 4u),
+ DEF_INVOKE0(3u, Instruction::INVOKE_STATIC, 4u),
+ DEF_NULOP(3u, Instruction::MOVE_RESULT, 5u),
+ DEF_INVOKE0(3u, Instruction::INVOKE_STATIC, 5u),
+ DEF_NULOP_WIDE(3u, Instruction::MOVE_RESULT_WIDE, 6u),
+ DEF_INVOKE0(3u, Instruction::INVOKE_STATIC, 6u),
+ DEF_NULOP(3u, Instruction::MOVE_RESULT, 8u),
+ DEF_INVOKE0(3u, Instruction::INVOKE_STATIC, 7u),
+ DEF_NULOP(3u, Instruction::MOVE_RESULT, 9u),
+ DEF_INVOKE0(3u, Instruction::INVOKE_STATIC, 8u),
+ DEF_NULOP(3u, Instruction::MOVE_RESULT_OBJECT, 10u),
+ DEF_INVOKE0(3u, Instruction::INVOKE_STATIC, 9u),
+ DEF_NULOP(3u, Instruction::MOVE_RESULT_OBJECT, 11u),
+ };
+
+ PrepareMethods(methods);
+ BuildDexFile("()V", true);
+ PrepareSingleBlock();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ // One expectation for every 2 MIRs.
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectFp | kExpectWide },
+ { 0u, kExpectFp | kExpectNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectCore | kExpectWide },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectRef | kExpectNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
+ };
+ static_assert(2 * arraysize(expectations) == arraysize(mirs), "array size mismatch");
+ for (size_t i = 0; i != arraysize(expectations); ++i) {
+ EXPECT_EQ(mirs[2 * i].opcode, mirs_[2 * i].dalvikInsn.opcode);
+ EXPECT_EQ(mirs[2 * i + 1].opcode, mirs_[2 * i + 1].dalvikInsn.opcode);
+ ASSERT_LE(1u, mirs_[2 * i + 1].ssa_rep->num_defs);
+ ExpectSRegType(mirs_[2 * i + 1].ssa_rep->defs[0], expectations[i]);
+ }
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, MethodArgType) {
+ static const MethodDef methods[] = {
+ { kClassName, "(B)V", "fooByte", kStatic },
+ { kClassName, "(C)V", "fooChar", kStatic },
+ { kClassName, "(D)V", "fooDouble", kStatic },
+ { kClassName, "(F)V", "fooFloat", kStatic },
+ { kClassName, "(I)V", "fooInt", kStatic },
+ { kClassName, "(J)V", "fooLong", kStatic },
+ { kClassName, "(S)V", "fooShort", kStatic },
+ { kClassName, "(Z)V", "fooBoolean", kStatic },
+ { kClassName, "(Ljava/lang/Object;)V", "fooObject", kStatic },
+ { kClassName, "([Ljava/lang/Object;)V", "fooObjectArray", kStatic },
+ };
+ static const MIRDef mirs[] = {
+ DEF_CONST(3u, Instruction::CONST, 0u, 0),
+ DEF_INVOKE1(3u, Instruction::INVOKE_STATIC, 0u, 0u),
+ DEF_CONST(3u, Instruction::CONST, 1u, 0),
+ DEF_INVOKE1(3u, Instruction::INVOKE_STATIC, 1u, 1u),
+ DEF_CONST_WIDE(3u, Instruction::CONST_WIDE, 2u, 0),
+ DEF_INVOKE2(3u, Instruction::INVOKE_STATIC, 2u, 3u, 2u),
+ DEF_CONST(3u, Instruction::CONST, 4u, 0),
+ DEF_INVOKE1(3u, Instruction::INVOKE_STATIC, 4u, 3u),
+ DEF_CONST(3u, Instruction::CONST, 5u, 0),
+ DEF_INVOKE1(3u, Instruction::INVOKE_STATIC, 5u, 4u),
+ DEF_CONST_WIDE(3u, Instruction::CONST_WIDE, 6u, 0),
+ DEF_INVOKE2(3u, Instruction::INVOKE_STATIC, 6u, 7u, 5u),
+ DEF_CONST(3u, Instruction::CONST, 8u, 0),
+ DEF_INVOKE1(3u, Instruction::INVOKE_STATIC, 8u, 6u),
+ DEF_CONST(3u, Instruction::CONST, 9u, 0),
+ DEF_INVOKE1(3u, Instruction::INVOKE_STATIC, 9u, 7u),
+ DEF_CONST(3u, Instruction::CONST, 10u, 0),
+ DEF_INVOKE1(3u, Instruction::INVOKE_STATIC, 10u, 8u),
+ DEF_CONST(3u, Instruction::CONST, 11u, 0),
+ DEF_INVOKE1(3u, Instruction::INVOKE_STATIC, 11u, 9u),
+ };
+
+ PrepareMethods(methods);
+ BuildDexFile("()V", true);
+ PrepareSingleBlock();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ // One expectation for every 2 MIRs.
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectFp | kExpectWide },
+ { 0u, kExpectFp | kExpectNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectCore | kExpectWide },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectRef | kExpectNarrow | kExpectNull },
+ { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+ };
+ static_assert(2 * arraysize(expectations) == arraysize(mirs), "array size mismatch");
+ for (size_t i = 0; i != arraysize(expectations); ++i) {
+ EXPECT_EQ(mirs[2 * i].opcode, mirs_[2 * i].dalvikInsn.opcode);
+ EXPECT_EQ(mirs[2 * i + 1].opcode, mirs_[2 * i + 1].dalvikInsn.opcode);
+ ASSERT_LE(1u, mirs_[2 * i].ssa_rep->num_defs);
+ ExpectSRegType(mirs_[2 * i].ssa_rep->defs[0], expectations[i]);
+ }
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, APut1) {
+ static const MIRDef mirs[] = {
+ DEF_CONST(3u, Instruction::CONST, 0u, 0), // Object[] array
+ DEF_CONST(3u, Instruction::CONST, 1u, 0), // value; can't even determine whether core or fp.
+ DEF_CONST(3u, Instruction::CONST, 2u, 0), // index
+ DEF_APUT(3u, Instruction::APUT, 1u, 0u, 2u),
+ };
+
+ BuildDexFile("()V", true);
+ PrepareSingleBlock();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayNarrow },
+ { 0u, kExpectNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ };
+ for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+ ExpectSRegType(sreg, expectations[sreg]);
+ }
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, APut2) {
+ static const MIRDef mirs[] = {
+ DEF_CONST(3u, Instruction::CONST, 0u, 0), // Object[] array
+ DEF_CONST(3u, Instruction::CONST, 1u, 0), // Object[] value
+ DEF_CONST(3u, Instruction::CONST, 2u, 0), // index
+ DEF_APUT(3u, Instruction::APUT_OBJECT, 1u, 0u, 2u),
+ };
+
+ BuildDexFile("()V", true);
+ PrepareSingleBlock();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+ { 0u, kExpectRef | kExpectNarrow | kExpectNull },
+ { 0u, kExpectCore | kExpectNarrow },
+ };
+ for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+ ExpectSRegType(sreg, expectations[sreg]);
+ }
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, APut3) {
+ static const MIRDef mirs[] = {
+ // Either array1 or array2 could be Object[][] but there is no way to tell from the bytecode.
+ DEF_CONST(3u, Instruction::CONST, 0u, 0), // Object[] array1
+ DEF_CONST(3u, Instruction::CONST, 1u, 0), // Object[] array2
+ DEF_CONST(3u, Instruction::CONST, 2u, 0), // index
+ DEF_APUT(3u, Instruction::APUT_OBJECT, 0u, 1u, 2u),
+ DEF_APUT(3u, Instruction::APUT_OBJECT, 1u, 0u, 2u),
+ };
+
+ BuildDexFile("()V", true);
+ PrepareSingleBlock();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ };
+ for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+ ExpectSRegType(sreg, expectations[sreg]);
+ }
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, APut4) {
+ static const MIRDef mirs[] = {
+ DEF_CONST(3u, Instruction::CONST, 0u, 0),
+ DEF_CONST(3u, Instruction::CONST, 1u, 0), // index
+ DEF_AGET(3u, Instruction::AGET_OBJECT, 2u, 0u, 1u), // Object[] array
+ DEF_CONST(3u, Instruction::CONST, 3u, 0), // value; can't even determine whether core or fp.
+ DEF_APUT(3u, Instruction::APUT, 3u, 2u, 1u),
+ };
+
+ BuildDexFile("()V", true);
+ PrepareSingleBlock();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayNarrow },
+ { 0u, kExpectNarrow },
+ };
+ for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+ ExpectSRegType(sreg, expectations[sreg]);
+ }
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, APut5) {
+ static const MIRDef mirs[] = {
+ DEF_CONST(3u, Instruction::CONST, 0u, 0),
+ DEF_CONST(3u, Instruction::CONST, 1u, 0), // index
+ DEF_AGET(3u, Instruction::AGET_OBJECT, 2u, 0u, 1u), // Object[] array
+ DEF_CONST(3u, Instruction::CONST, 3u, 0), // Object[] value
+ DEF_APUT(3u, Instruction::APUT_OBJECT, 3u, 2u, 1u),
+ };
+
+ BuildDexFile("()V", true);
+ PrepareSingleBlock();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
+ { 0u, kExpectRef | kExpectNarrow | kExpectNull },
+ };
+ for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+ ExpectSRegType(sreg, expectations[sreg]);
+ }
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, APut6) {
+ static const MIRDef mirs[] = {
+ DEF_CONST(3u, Instruction::CONST, 0u, 0),
+ DEF_CONST(3u, Instruction::CONST, 1u, 0), // index
+ // Either array1 or array2 could be Object[][] but there is no way to tell from the bytecode.
+ DEF_AGET(3u, Instruction::AGET_OBJECT, 2u, 0u, 1u), // Object[] array1
+ DEF_AGET(3u, Instruction::AGET_OBJECT, 3u, 0u, 1u), // Object[] array2
+ DEF_APUT(3u, Instruction::APUT_OBJECT, 2u, 3u, 1u),
+ DEF_APUT(3u, Instruction::APUT_OBJECT, 3u, 2u, 1u),
+ };
+
+ BuildDexFile("()V", true);
+ PrepareSingleBlock();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
+ };
+ for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+ ExpectSRegType(sreg, expectations[sreg]);
+ }
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, TwoNullObjectArraysInLoop) {
+ static const MIRDef mirs[] = {
+ // void foo() {
+ // Object[] array1 = ((Object[])null)[0];
+ // Object[] array2 = ((Object[])null)[0];
+ // for (int i = 0; i != 3; ++i) {
+ // Object[] a1 = null; // One of these could be Object[][] but not both.
+ // Object[] a2 = null; // But they will be deduced as Object[].
+ // try { a1[0] = a2; } catch (Throwable ignored) { }
+ // try { a2[0] = a1; } catch (Throwable ignored) { }
+ // array1 = a1;
+ // array2 = a2;
+ // }
+ // }
+ //
+ // Omitting the try-catch:
+ DEF_CONST(3u, Instruction::CONST, 0u, 0), // null
+ DEF_CONST(3u, Instruction::CONST, 1u, 0), // index
+ DEF_AGET(3u, Instruction::AGET_OBJECT, 2u, 0u, 1u), // array1
+ DEF_AGET(3u, Instruction::AGET_OBJECT, 3u, 0u, 1u), // array2
+ DEF_PHI2(4u, 4u, 2u, 8u), // ? + [L -> [? gives [L (see array-length below)
+ DEF_PHI2(4u, 5u, 3u, 9u), // ? + [L -> ? gives ?
+ DEF_AGET(4u, Instruction::AGET_OBJECT, 6u, 0u, 1u), // a1
+ DEF_AGET(4u, Instruction::AGET_OBJECT, 7u, 0u, 1u), // a2
+ DEF_APUT(4u, Instruction::APUT_OBJECT, 6u, 7u, 1u),
+ DEF_APUT(4u, Instruction::APUT_OBJECT, 7u, 6u, 1u),
+ DEF_MOVE(4u, Instruction::MOVE_OBJECT, 8u, 6u),
+ DEF_MOVE(4u, Instruction::MOVE_OBJECT, 9u, 7u),
+ DEF_UNOP(5u, Instruction::ARRAY_LENGTH, 10u, 4u),
+ };
+
+ BuildDexFile("()V", true);
+ PrepareLoop();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
+ { 0u, kExpectRef | kExpectNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
+ { 0u, kExpectRef | kExpectNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ };
+ for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+ ExpectSRegType(sreg, expectations[sreg]);
+ }
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, ArrayArrayFloat) {
+ static const MethodDef methods[] = {
+ { kClassName, "(F)V", "fooFloat", kStatic },
+ };
+ static const MIRDef mirs[] = {
+ // void foo() {
+ // try {
+ // float[][][] aaaf = null;
+ // float[][] array = aaaf[0]; // Make sure array is treated as properly typed.
+ // array[0][0] = 0.0f; // const + aget-object[1] + aput
+ // fooFloat(array[0][0]); // aget-object[2] + aget + invoke
+ // // invoke: signature => input is F.
+ // // aget: output is F => base is [F (precise)
+ // // aget-object[2]: output is [F => base is [[F (precise)
+ // // aput: unknown input type => base is [?
+ // // aget-object[1]: base is [[F => result is L or [F, merge with [? => result is [F
+ // // aput (again): base is [F => result is F
+ // // const: F determined by the aput reprocessing.
+ // } catch (Throwable ignored) {
+ // }
+ // }
+ //
+ // Omitting the try-catch:
+ DEF_CONST(3u, Instruction::CONST, 0u, 0), // 0
+ DEF_CONST(3u, Instruction::CONST, 1u, 0), // aaaf
+ DEF_AGET(3u, Instruction::AGET_OBJECT, 2u, 1u, 0u), // array = aaaf[0]
+ DEF_CONST(3u, Instruction::CONST, 3u, 0), // 0.0f
+ DEF_AGET(3u, Instruction::AGET_OBJECT, 4u, 2u, 0u), // array[0]
+ DEF_APUT(3u, Instruction::APUT, 3u, 4u, 0u), // array[0][0] = 0.0f
+ DEF_AGET(3u, Instruction::AGET_OBJECT, 5u, 2u, 0u), // array[0]
+ DEF_AGET(3u, Instruction::AGET, 6u, 5u, 0u), // array[0][0]
+ DEF_INVOKE1(3u, Instruction::INVOKE_STATIC, 6u, 0u), // fooFloat(array[0][0])
+ };
+
+ PrepareMethods(methods);
+ BuildDexFile("()V", true);
+ PrepareSingleBlock();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 0u, kExpectCore | kExpectNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+ { 2u, kExpectRef | kExpectNarrow | kExpectArrayFp | kExpectArrayNarrow },
+ { 0u, kExpectFp | kExpectNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayFp | kExpectArrayNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayFp | kExpectArrayNarrow },
+ { 0u, kExpectFp | kExpectNarrow },
+ };
+ for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+ ExpectSRegType(sreg, expectations[sreg]);
+ }
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, CheckCast1) {
+ static const TypeDef types[] = {
+ { "[I" },
+ };
+ static const MIRDef mirs[] = {
+ DEF_CONST(3u, Instruction::CONST, 0u, 0),
+ DEF_CONST(3u, Instruction::CONST, 1u, 0),
+ DEF_AGET(3u, Instruction::AGET_OBJECT, 2u, 0u, 1u),
+ DEF_CHECK_CAST(4u, Instruction::CHECK_CAST, 2u, 0u),
+ DEF_CHECK_CAST(5u, Instruction::CHECK_CAST, 2u, 0u),
+ // Pseudo-phi from [I and [I into L infers only L but not [.
+ DEF_MOVE(6u, Instruction::MOVE_OBJECT, 3u, 2u),
+ };
+ PrepareTypes(types);
+ BuildDexFile("()V", true);
+ PrepareDiamond();
+ PrepareMIRs(mirs);
+ static const BasicBlockId v0_def_blocks[] = { 3u, 4u, 5u, 6u };
+ MapVRegToSReg(2, 2, v0_def_blocks);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectRef | kExpectNarrow },
+ { 0u, kExpectRef | kExpectNarrow },
+ };
+ for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+ ExpectSRegType(sreg, expectations[sreg]);
+ }
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, CheckCast2) {
+ static const TypeDef types[] = {
+ { "[I" },
+ };
+ static const MIRDef mirs[] = {
+ DEF_CONST(3u, Instruction::CONST, 0u, 0),
+ DEF_CONST(3u, Instruction::CONST, 1u, 0),
+ DEF_AGET(3u, Instruction::AGET_OBJECT, 2u, 0u, 1u),
+ DEF_CHECK_CAST(4u, Instruction::CHECK_CAST, 2u, 0u),
+ DEF_CHECK_CAST(5u, Instruction::CHECK_CAST, 2u, 0u),
+ // Pseudo-phi from [I and [I into [? infers [I.
+ DEF_MOVE(6u, Instruction::MOVE_OBJECT, 3u, 2u),
+ DEF_UNOP(6u, Instruction::ARRAY_LENGTH, 4u, 2u),
+ };
+ PrepareTypes(types);
+ BuildDexFile("()V", true);
+ PrepareDiamond();
+ PrepareMIRs(mirs);
+ static const BasicBlockId v0_def_blocks[] = { 3u, 4u, 5u, 6u };
+ MapVRegToSReg(2, 2, v0_def_blocks);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectRef | kExpectNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ };
+ for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+ ExpectSRegType(sreg, expectations[sreg]);
+ }
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, CheckCast3) {
+ static const TypeDef types[] = {
+ { "[I" },
+ { "[F" },
+ };
+ static const MIRDef mirs[] = {
+ DEF_CONST(3u, Instruction::CONST, 0u, 0),
+ DEF_CONST(3u, Instruction::CONST, 1u, 0),
+ DEF_AGET(3u, Instruction::AGET_OBJECT, 2u, 0u, 1u),
+ DEF_CHECK_CAST(4u, Instruction::CHECK_CAST, 2u, 0u),
+ DEF_CHECK_CAST(5u, Instruction::CHECK_CAST, 2u, 1u),
+ // Pseudo-phi from [I and [F into L correctly leaves it as L.
+ DEF_MOVE(6u, Instruction::MOVE_OBJECT, 3u, 2u),
+ };
+ PrepareTypes(types);
+ BuildDexFile("()V", true);
+ PrepareDiamond();
+ PrepareMIRs(mirs);
+ static const BasicBlockId v0_def_blocks[] = { 3u, 4u, 5u, 6u };
+ MapVRegToSReg(2, 2, v0_def_blocks);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectRef | kExpectNarrow },
+ { 0u, kExpectRef | kExpectNarrow },
+ };
+ for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+ ExpectSRegType(sreg, expectations[sreg]);
+ }
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, CheckCastConflict1) {
+ static const TypeDef types[] = {
+ { "[I" },
+ { "[F" },
+ };
+ static const MIRDef mirs[] = {
+ DEF_CONST(3u, Instruction::CONST, 0u, 0),
+ DEF_CONST(3u, Instruction::CONST, 1u, 0),
+ DEF_AGET(3u, Instruction::AGET_OBJECT, 2u, 0u, 1u),
+ DEF_CHECK_CAST(4u, Instruction::CHECK_CAST, 2u, 0u),
+ DEF_CHECK_CAST(5u, Instruction::CHECK_CAST, 2u, 1u),
+ // Pseudo-phi from [I and [F into [? infers conflict [I/[F.
+ DEF_MOVE(6u, Instruction::MOVE_OBJECT, 3u, 2u),
+ DEF_UNOP(6u, Instruction::ARRAY_LENGTH, 4u, 2u),
+ };
+ PrepareTypes(types);
+ BuildDexFile("()V", true);
+ PrepareDiamond();
+ PrepareMIRs(mirs);
+ static const BasicBlockId v0_def_blocks[] = { 3u, 4u, 5u, 6u };
+ MapVRegToSReg(2, 2, v0_def_blocks);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectRef | kExpectNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayFp | kExpectArrayNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ };
+ for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+ ExpectSRegType(sreg, expectations[sreg], false);
+ }
+ // The type conflict in array element wasn't propagated to an SSA reg.
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, CheckCastConflict2) {
+ static const TypeDef types[] = {
+ { "[I" },
+ { "[F" },
+ };
+ static const MIRDef mirs[] = {
+ DEF_CONST(3u, Instruction::CONST, 0u, 0),
+ DEF_CONST(3u, Instruction::CONST, 1u, 0),
+ DEF_AGET(3u, Instruction::AGET_OBJECT, 2u, 0u, 1u),
+ DEF_CHECK_CAST(4u, Instruction::CHECK_CAST, 2u, 0u),
+ DEF_CHECK_CAST(5u, Instruction::CHECK_CAST, 2u, 1u),
+ // Pseudo-phi from [I and [F into [? infers conflict [I/[F.
+ DEF_MOVE(6u, Instruction::MOVE_OBJECT, 3u, 2u),
+ DEF_AGET(6u, Instruction::AGET, 4u, 2u, 1u),
+ };
+ PrepareTypes(types);
+ BuildDexFile("()V", true);
+ PrepareDiamond();
+ PrepareMIRs(mirs);
+ static const BasicBlockId v0_def_blocks[] = { 3u, 4u, 5u, 6u };
+ MapVRegToSReg(2, 2, v0_def_blocks);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectRef | kExpectNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayFp | kExpectArrayNarrow },
+ { 0u, kExpectCore | kExpectFp | kExpectNarrow },
+ };
+ for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+ ExpectSRegType(sreg, expectations[sreg], false);
+ }
+ // Type conflict in an SSA reg, register promotion disabled.
+ EXPECT_NE(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, Phi1) {
+ static const TypeDef types[] = {
+ { "[I" },
+ };
+ static const MIRDef mirs[] = {
+ DEF_CONST(3u, Instruction::CONST, 0u, 100),
+ DEF_NEW_ARRAY(4u, Instruction::NEW_ARRAY, 1u, 0u, 0u),
+ DEF_NEW_ARRAY(5u, Instruction::NEW_ARRAY, 2u, 0u, 0u),
+ // Phi from [I and [I infers only L but not [.
+ DEF_PHI2(6u, 3u, 1u, 2u),
+ };
+ PrepareTypes(types);
+ BuildDexFile("()V", true);
+ PrepareDiamond();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 0u, kExpectCore | kExpectNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayNarrow },
+ { 0u, kExpectRef | kExpectNarrow },
+ };
+ for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+ ExpectSRegType(sreg, expectations[sreg]);
+ }
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, Phi2) {
+ static const TypeDef types[] = {
+ { "[F" },
+ };
+ static const MIRDef mirs[] = {
+ DEF_CONST(3u, Instruction::CONST, 0u, 100),
+ DEF_NEW_ARRAY(4u, Instruction::NEW_ARRAY, 1u, 0u, 0u),
+ DEF_NEW_ARRAY(5u, Instruction::NEW_ARRAY, 2u, 0u, 0u),
+ // Phi from [F and [F into [? infers [F.
+ DEF_PHI2(6u, 3u, 1u, 2u),
+ DEF_UNOP(6u, Instruction::ARRAY_LENGTH, 4u, 3u),
+ };
+ PrepareTypes(types);
+ BuildDexFile("()V", true);
+ PrepareDiamond();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 0u, kExpectCore | kExpectNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayFp | kExpectArrayNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayFp | kExpectArrayNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayFp | kExpectArrayNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ };
+ for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+ ExpectSRegType(sreg, expectations[sreg]);
+ }
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, Phi3) {
+ static const TypeDef types[] = {
+ { "[I" },
+ { "[F" },
+ };
+ static const MIRDef mirs[] = {
+ DEF_CONST(3u, Instruction::CONST, 0u, 100),
+ DEF_NEW_ARRAY(4u, Instruction::NEW_ARRAY, 1u, 0u, 0u),
+ DEF_NEW_ARRAY(5u, Instruction::NEW_ARRAY, 2u, 0u, 1u),
+ // Phi from [I and [F infers L.
+ DEF_PHI2(6u, 3u, 1u, 2u),
+ };
+ PrepareTypes(types);
+ BuildDexFile("()V", true);
+ PrepareDiamond();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 0u, kExpectCore | kExpectNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayFp | kExpectArrayNarrow },
+ { 0u, kExpectRef | kExpectNarrow },
+ };
+ for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+ ExpectSRegType(sreg, expectations[sreg]);
+ }
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, Phi4) {
+ static const TypeDef types[] = {
+ { "[I" },
+ };
+ static const MIRDef mirs[] = {
+ DEF_CONST(3u, Instruction::CONST, 0u, 100),
+ DEF_NEW_ARRAY(4u, Instruction::NEW_ARRAY, 1u, 0u, 0u),
+ DEF_CONST(5u, Instruction::CONST, 2u, 0),
+ // Pseudo-phi from [I and null infers L.
+ DEF_PHI2(6u, 3u, 1u, 2u),
+ };
+ PrepareTypes(types);
+ BuildDexFile("()V", true);
+ PrepareDiamond();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 0u, kExpectCore | kExpectNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayNarrow },
+ { 0u, kExpectRef | kExpectNarrow | kExpectNull },
+ { 0u, kExpectRef | kExpectNarrow },
+ };
+ for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+ ExpectSRegType(sreg, expectations[sreg]);
+ }
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, PhiConflict1) {
+ static const TypeDef types[] = {
+ { "[I" },
+ { "[F" },
+ };
+ static const MIRDef mirs[] = {
+ DEF_CONST(3u, Instruction::CONST, 0u, 100),
+ DEF_NEW_ARRAY(4u, Instruction::NEW_ARRAY, 1u, 0u, 0u),
+ DEF_NEW_ARRAY(5u, Instruction::NEW_ARRAY, 2u, 0u, 1u),
+ // Pseudo-phi from [I and [F into [? infers conflict [I/[F (then propagated upwards).
+ DEF_PHI2(6u, 3u, 1u, 2u),
+ DEF_UNOP(6u, Instruction::ARRAY_LENGTH, 4u, 3u),
+ };
+ PrepareTypes(types);
+ BuildDexFile("()V", true);
+ PrepareDiamond();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 0u, kExpectCore | kExpectNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayFp | kExpectArrayNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayFp | kExpectArrayNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayFp | kExpectArrayNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ };
+ for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+ ExpectSRegType(sreg, expectations[sreg], false);
+ }
+ // The type conflict in array element wasn't propagated to an SSA reg.
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, PhiConflict2) {
+ static const TypeDef types[] = {
+ { "[I" },
+ { "[F" },
+ };
+ static const MIRDef mirs[] = {
+ DEF_CONST(3u, Instruction::CONST, 0u, 100),
+ DEF_NEW_ARRAY(4u, Instruction::NEW_ARRAY, 1u, 0u, 0u),
+ DEF_NEW_ARRAY(5u, Instruction::NEW_ARRAY, 2u, 0u, 1u),
+ // Pseudo-phi from [I and [F into [? infers conflict [I/[F (then propagated upwards).
+ DEF_PHI2(6u, 3u, 1u, 2u),
+ DEF_AGET(6u, Instruction::AGET, 4u, 3u, 0u),
+ };
+ PrepareTypes(types);
+ BuildDexFile("()V", true);
+ PrepareDiamond();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 0u, kExpectCore | kExpectNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayFp | kExpectArrayNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayFp | kExpectArrayNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayFp | kExpectArrayNarrow },
+ { 0u, kExpectCore | kExpectFp | kExpectNarrow },
+ };
+ for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+ ExpectSRegType(sreg, expectations[sreg], false);
+ }
+ // Type conflict in an SSA reg, register promotion disabled.
+ EXPECT_NE(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, Wide1) {
+ static const MIRDef mirs[] = {
+ DEF_CONST(3u, Instruction::CONST, 0u, 0),
+ DEF_CONST(3u, Instruction::CONST, 1u, 0), // index
+ DEF_AGET(3u, Instruction::AGET_OBJECT, 2u, 0u, 1u), // long[]
+ DEF_CONST_WIDE(3u, Instruction::CONST_WIDE, 3u, 0), // long
+ DEF_APUT_WIDE(3u, Instruction::APUT_WIDE, 3u, 2u, 1u),
+ { 3u, Instruction::RETURN_OBJECT, 0, 0u, 1u, { 2u }, 0u, { } },
+ };
+
+ BuildDexFile("()[J", true);
+ PrepareSingleBlock();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayWide },
+ { 0u, kExpectCore | kExpectWide },
+ // NOTE: High word checked implicitly for sreg = 3.
+ };
+ for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+ ExpectSRegType(sreg, expectations[sreg], false);
+ }
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, WideSizeConflict1) {
+ static const MIRDef mirs[] = {
+ DEF_CONST_WIDE(3u, Instruction::CONST_WIDE, 0u, 0),
+ DEF_MOVE(3u, Instruction::MOVE, 2u, 0u),
+ };
+
+ BuildDexFile("()V", true);
+ PrepareSingleBlock();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 0u, kExpectNarrow | kExpectWide },
+ { 0u, kExpectNarrow | kExpectWide },
+ };
+ ExpectSRegType(0u, expectations[0], false);
+ ExpectSRegType(2u, expectations[1], false);
+ EXPECT_TRUE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, ArrayLongLength) {
+ static const FieldDef sfields[] = {
+ { kClassName, "[J", "arrayLongField" },
+ };
+ static const MIRDef mirs[] = {
+ DEF_CONST(4u, Instruction::CONST, 0u, 0),
+ DEF_SGET(5u, Instruction::SGET_OBJECT, 1u, 0u),
+ DEF_PHI2(6u, 2u, 0u, 1u),
+ DEF_UNOP(6u, Instruction::ARRAY_LENGTH, 3u, 2u),
+ DEF_SGET(6u, Instruction::SGET_OBJECT, 4u, 0u),
+ DEF_UNOP(6u, Instruction::ARRAY_LENGTH, 5u, 4u),
+ };
+
+ PrepareSFields(sfields);
+ BuildDexFile("()V", true);
+ PrepareDiamond();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayCore | kExpectArrayWide },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayWide },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayWide },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayWide },
+ { 0u, kExpectCore | kExpectNarrow },
+ };
+ for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+ ExpectSRegType(sreg, expectations[sreg]);
+ }
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, ArrayArrayObjectLength) {
+ static const FieldDef sfields[] = {
+ { kClassName, "[[Ljava/lang/Object;", "arrayLongField" },
+ };
+ static const MIRDef mirs[] = {
+ DEF_CONST(4u, Instruction::CONST, 0u, 0),
+ DEF_SGET(5u, Instruction::SGET_OBJECT, 1u, 0u),
+ DEF_PHI2(6u, 2u, 0u, 1u),
+ DEF_UNOP(6u, Instruction::ARRAY_LENGTH, 3u, 2u),
+ DEF_SGET(6u, Instruction::SGET_OBJECT, 4u, 0u),
+ DEF_UNOP(6u, Instruction::ARRAY_LENGTH, 5u, 4u),
+ };
+
+ PrepareSFields(sfields);
+ BuildDexFile("()V", true);
+ PrepareDiamond();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 1u, kExpectRef | kExpectNarrow | kExpectNull | kExpectArrayRef | kExpectArrayNarrow },
+ { 2u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 2u, kExpectRef | kExpectNarrow | kExpectArrayRef | kExpectArrayNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ };
+ for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+ ExpectSRegType(sreg, expectations[sreg]);
+ }
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, SGetAdd0SPut) {
+ static const FieldDef sfields[] = {
+ { kClassName, "I", "staticIntField" },
+ };
+ static const MIRDef mirs[] = {
+ DEF_SGET(3u, Instruction::SGET, 0u, 0u),
+ DEF_UNOP(3u, Instruction::ADD_INT_LIT8, 1u, 0u), // +0
+ DEF_SPUT(3u, Instruction::SPUT, 1u, 0u),
+ };
+
+ PrepareSFields(sfields);
+ BuildDexFile("()V", true);
+ PrepareSingleBlock();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ };
+ for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+ ExpectSRegType(sreg, expectations[sreg]);
+ }
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, MoveObjectNull) {
+ static const MethodDef methods[] = {
+ { kClassName, "([I[D)V", "foo", kStatic },
+ };
+ static const MIRDef mirs[] = {
+ DEF_CONST(3u, Instruction::CONST, 0u, 0),
+ DEF_MOVE(3u, Instruction::MOVE_OBJECT, 1u, 0u),
+ DEF_INVOKE2(3u, Instruction::INVOKE_STATIC, 0u, 1u, 0u),
+ };
+
+ PrepareMethods(methods);
+ BuildDexFile("()V", true);
+ PrepareSingleBlock();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectation = {
+ 1u,
+ kExpectRef | kExpectNarrow | kExpectNull |
+ kExpectArrayCore | kExpectArrayFp | kExpectArrayNarrow | kExpectArrayWide
+ };
+ ExpectSRegType(0u, expectation);
+ ExpectSRegType(1u, expectation);
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, MoveNull1) {
+ static const MethodDef methods[] = {
+ { kClassName, "([I[D)V", "foo", kStatic },
+ };
+ static const MIRDef mirs[] = {
+ DEF_CONST(3u, Instruction::CONST, 0u, 0),
+ DEF_MOVE(3u, Instruction::MOVE, 1u, 0u),
+ DEF_INVOKE2(3u, Instruction::INVOKE_STATIC, 0u, 1u, 0u),
+ };
+
+ PrepareMethods(methods);
+ BuildDexFile("()V", true);
+ PrepareSingleBlock();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectation = {
+ 1u,
+ kExpectCore | kExpectRef | kExpectFp | kExpectNarrow | kExpectNull |
+ kExpectArrayCore | kExpectArrayFp | kExpectArrayNarrow | kExpectArrayWide
+ };
+ ExpectSRegType(0u, expectation);
+ ExpectSRegType(1u, expectation);
+ // Type conflict using move instead of move-object for null, register promotion disabled.
+ EXPECT_NE(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, MoveNull2) {
+ static const FieldDef sfields[] = {
+ { kClassName, "[F", "staticArrayArrayFloatField" },
+ { kClassName, "[I", "staticArrayIntField" },
+ { kClassName, "[[I", "staticArrayArrayIntField" },
+ };
+ static const MIRDef mirs[] = {
+ DEF_CONST(4u, Instruction::CONST, 0u, 0),
+ DEF_MOVE(4u, Instruction::MOVE_OBJECT, 1u, 0u),
+ DEF_MOVE(4u, Instruction::MOVE_OBJECT, 2u, 1u),
+ DEF_SGET(5u, Instruction::SGET_OBJECT, 3u, 0u),
+ DEF_SGET(5u, Instruction::SGET_OBJECT, 4u, 1u),
+ DEF_SGET(5u, Instruction::SGET_OBJECT, 5u, 2u),
+ DEF_PHI2(6u, 6u, 0u, 3u),
+ DEF_PHI2(6u, 7u, 1u, 4u),
+ DEF_PHI2(6u, 8u, 2u, 5u),
+ DEF_UNOP(6u, Instruction::ARRAY_LENGTH, 9u, 6u),
+ DEF_UNOP(6u, Instruction::ARRAY_LENGTH, 10u, 7u),
+ DEF_UNOP(6u, Instruction::ARRAY_LENGTH, 11u, 8u),
+ { 6u, Instruction::RETURN_OBJECT, 0, 0u, 1u, { 8u }, 0u, { } },
+ };
+
+ PrepareSFields(sfields);
+ BuildDexFile("()[[I", true);
+ PrepareDiamond();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 1u, kExpectRef | kExpectNarrow | kExpectNull |
+ kExpectArrayCore | kExpectArrayFp | kExpectArrayRef | kExpectArrayNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectNull |
+ kExpectArrayCore | kExpectArrayFp | kExpectArrayRef | kExpectArrayNarrow},
+ { 1u, kExpectRef | kExpectNarrow | kExpectNull |
+ kExpectArrayCore | kExpectArrayFp | kExpectArrayRef | kExpectArrayNarrow},
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayFp | kExpectArrayNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayNarrow },
+ { 2u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayFp | kExpectArrayNarrow },
+ { 1u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayNarrow },
+ { 2u, kExpectRef | kExpectNarrow | kExpectArrayCore | kExpectArrayNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ };
+ for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+ ExpectSRegType(sreg, expectations[sreg]);
+ }
+ // Type conflict in array type not propagated to actual register.
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, ReuseNull1) {
+ static const FieldDef sfields[] = {
+ { kClassName, "[I", "staticArrayLongField" },
+ { kClassName, "[[F", "staticArrayArrayFloatField" },
+ };
+ static const MIRDef mirs[] = {
+ DEF_CONST(3u, Instruction::CONST, 0u, 0),
+ DEF_SPUT(3u, Instruction::SPUT_OBJECT, 0u, 0u),
+ DEF_SPUT(3u, Instruction::SPUT_OBJECT, 0u, 1u),
+ };
+
+ PrepareSFields(sfields);
+ BuildDexFile("()V", true);
+ PrepareSingleBlock();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectation = {
+ 1u,
+ kExpectRef | kExpectNarrow | kExpectNull |
+ kExpectArrayCore | kExpectArrayRef | kExpectArrayFp | kExpectArrayNarrow
+ };
+ ExpectSRegType(0u, expectation);
+ // Type conflict in array type not propagated to actual register.
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, ReuseNull2) {
+ static const FieldDef sfields[] = {
+ { kClassName, "[J", "staticArrayLongField" },
+ { kClassName, "[[F", "staticArrayArrayFloatField" },
+ };
+ static const MIRDef mirs[] = {
+ DEF_CONST(3u, Instruction::CONST, 0u, 0),
+ DEF_SPUT(3u, Instruction::SPUT_OBJECT, 0u, 0u),
+ DEF_SPUT(3u, Instruction::SPUT_OBJECT, 0u, 1u),
+ };
+
+ PrepareSFields(sfields);
+ BuildDexFile("()V", true);
+ PrepareSingleBlock();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectation = {
+ 1u,
+ kExpectRef | kExpectNarrow | kExpectNull |
+ kExpectArrayCore | kExpectArrayRef | kExpectArrayFp | kExpectArrayNarrow | kExpectArrayWide
+ };
+ ExpectSRegType(0u, expectation);
+ // Type conflict in array type not propagated to actual register.
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, ArgIsNonNull) {
+ constexpr uint32_t thiz = kLocalVRs;
+ static const MIRDef mirs[] = {
+ DEF_MOVE(3u, Instruction::MOVE_OBJECT, 0u, thiz),
+ };
+
+ BuildDexFile("(Ljava/lang/Object;)V", true);
+ PrepareSingleBlock();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectation = {
+ 0u,
+ kExpectRef | kExpectNarrow
+ };
+ ExpectSRegType(0u, expectation);
+ // Type conflict in array type not propagated to actual register.
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+TEST_F(TypeInferenceTest, IfCc) {
+ static const FieldDef sfields[] = {
+ { kClassName, "I", "intField" },
+ };
+ static const MIRDef mirs[] = {
+ DEF_SGET(3u, Instruction::SGET, 0u, 0u),
+ DEF_CONST(3u, Instruction::CONST, 1u, 0u),
+ { 3u, Instruction::IF_EQ, 0, 0u, 2, { 0u, 1u }, 0, { } },
+ };
+
+ PrepareSFields(sfields);
+ BuildDexFile("()V", false);
+ PrepareDiamond();
+ PrepareMIRs(mirs);
+ PerformTypeInference();
+
+ ASSERT_EQ(arraysize(mirs), mir_count_);
+ static const SRegExpectation expectations[] = {
+ { 0u, kExpectCore | kExpectNarrow },
+ { 0u, kExpectCore | kExpectNarrow },
+ };
+ for (int32_t sreg = 0; sreg != arraysize(expectations); ++sreg) {
+ ExpectSRegType(sreg, expectations[sreg]);
+ }
+ EXPECT_EQ(cu_.disable_opt & (1u << kPromoteRegs), 0u);
+ EXPECT_FALSE(cu_.mir_graph->PuntToInterpreter());
+}
+
+} // namespace art
diff --git a/compiler/dex/verification_results.cc b/compiler/dex/verification_results.cc
index a4df00e..c1d5cb7 100644
--- a/compiler/dex/verification_results.cc
+++ b/compiler/dex/verification_results.cc
@@ -46,7 +46,7 @@
}
bool VerificationResults::ProcessVerifiedMethod(verifier::MethodVerifier* method_verifier) {
- DCHECK(method_verifier != NULL);
+ DCHECK(method_verifier != nullptr);
MethodReference ref = method_verifier->GetMethodReference();
bool compile = IsCandidateForCompilation(ref, method_verifier->GetAccessFlags());
const VerifiedMethod* verified_method = VerifiedMethod::Create(method_verifier, compile);
diff --git a/compiler/dex/verified_method.cc b/compiler/dex/verified_method.cc
index 977757f..e788261 100644
--- a/compiler/dex/verified_method.cc
+++ b/compiler/dex/verified_method.cc
@@ -64,6 +64,9 @@
if (method_verifier->HasCheckCasts()) {
verified_method->GenerateSafeCastSet(method_verifier);
}
+
+ verified_method->SetStringInitPcRegMap(method_verifier->GetStringInitPcRegMap());
+
return verified_method.release();
}
@@ -166,7 +169,7 @@
}
}
} else {
- DCHECK(i >= 65536 || reg_bitmap == NULL);
+ DCHECK(i >= 65536 || reg_bitmap == nullptr);
}
}
}
@@ -283,7 +286,7 @@
}
mirror::ArtMethod* abstract_method = method_verifier->GetDexCache()->GetResolvedMethod(
is_range ? inst->VRegB_3rc() : inst->VRegB_35c());
- if (abstract_method == NULL) {
+ if (abstract_method == nullptr) {
// If the method is not found in the cache this means that it was never found
// by ResolveMethodAndCheckAccess() called when verifying invoke_*.
continue;
diff --git a/compiler/dex/verified_method.h b/compiler/dex/verified_method.h
index 437ae52..242e3df 100644
--- a/compiler/dex/verified_method.h
+++ b/compiler/dex/verified_method.h
@@ -59,7 +59,7 @@
return safe_cast_set_;
}
- // Returns the devirtualization target method, or nullptr if none.
+ // Returns the devirtualization target method, or null if none.
const MethodReference* GetDevirtTarget(uint32_t dex_pc) const;
// Returns the dequicken field / method for a quick invoke / field get. Returns null if there is
@@ -75,6 +75,13 @@
return has_verification_failures_;
}
+ void SetStringInitPcRegMap(SafeMap<uint32_t, std::set<uint32_t>>& string_init_pc_reg_map) {
+ string_init_pc_reg_map_ = string_init_pc_reg_map;
+ }
+ const SafeMap<uint32_t, std::set<uint32_t>>& GetStringInitPcRegMap() const {
+ return string_init_pc_reg_map_;
+ }
+
private:
VerifiedMethod() = default;
@@ -114,6 +121,10 @@
SafeCastSet safe_cast_set_;
bool has_verification_failures_;
+
+ // Copy of mapping generated by verifier of dex PCs of string init invocations
+ // to the set of other registers that the receiver has been copied into.
+ SafeMap<uint32_t, std::set<uint32_t>> string_init_pc_reg_map_;
};
} // namespace art
diff --git a/compiler/dex/vreg_analysis.cc b/compiler/dex/vreg_analysis.cc
index 2b78e38..948ba7b 100644
--- a/compiler/dex/vreg_analysis.cc
+++ b/compiler/dex/vreg_analysis.cc
@@ -23,400 +23,6 @@
namespace art {
-bool MIRGraph::SetFp(int index, bool is_fp) {
- bool change = false;
- if (is_fp && !reg_location_[index].fp) {
- reg_location_[index].fp = true;
- reg_location_[index].defined = true;
- change = true;
- }
- return change;
-}
-
-bool MIRGraph::SetFp(int index) {
- bool change = false;
- if (!reg_location_[index].fp) {
- reg_location_[index].fp = true;
- reg_location_[index].defined = true;
- change = true;
- }
- return change;
-}
-
-bool MIRGraph::SetCore(int index, bool is_core) {
- bool change = false;
- if (is_core && !reg_location_[index].defined) {
- reg_location_[index].core = true;
- reg_location_[index].defined = true;
- change = true;
- }
- return change;
-}
-
-bool MIRGraph::SetCore(int index) {
- bool change = false;
- if (!reg_location_[index].defined) {
- reg_location_[index].core = true;
- reg_location_[index].defined = true;
- change = true;
- }
- return change;
-}
-
-bool MIRGraph::SetRef(int index, bool is_ref) {
- bool change = false;
- if (is_ref && !reg_location_[index].defined) {
- reg_location_[index].ref = true;
- reg_location_[index].defined = true;
- change = true;
- }
- return change;
-}
-
-bool MIRGraph::SetRef(int index) {
- bool change = false;
- if (!reg_location_[index].defined) {
- reg_location_[index].ref = true;
- reg_location_[index].defined = true;
- change = true;
- }
- return change;
-}
-
-bool MIRGraph::SetWide(int index, bool is_wide) {
- bool change = false;
- if (is_wide && !reg_location_[index].wide) {
- reg_location_[index].wide = true;
- change = true;
- }
- return change;
-}
-
-bool MIRGraph::SetWide(int index) {
- bool change = false;
- if (!reg_location_[index].wide) {
- reg_location_[index].wide = true;
- change = true;
- }
- return change;
-}
-
-bool MIRGraph::SetHigh(int index, bool is_high) {
- bool change = false;
- if (is_high && !reg_location_[index].high_word) {
- reg_location_[index].high_word = true;
- change = true;
- }
- return change;
-}
-
-bool MIRGraph::SetHigh(int index) {
- bool change = false;
- if (!reg_location_[index].high_word) {
- reg_location_[index].high_word = true;
- change = true;
- }
- return change;
-}
-
-
-/*
- * Infer types and sizes. We don't need to track change on sizes,
- * as it doesn't propagate. We're guaranteed at least one pass through
- * the cfg.
- */
-bool MIRGraph::InferTypeAndSize(BasicBlock* bb, MIR* mir, bool changed) {
- SSARepresentation *ssa_rep = mir->ssa_rep;
-
- /*
- * The dex bytecode definition does not explicitly outlaw the definition of the same
- * virtual register to be used in both a 32-bit and 64-bit pair context. However, dx
- * does not generate this pattern (at least recently). Further, in the next revision of
- * dex, we will forbid this. To support the few cases in the wild, detect this pattern
- * and punt to the interpreter.
- */
- bool type_mismatch = false;
-
- if (ssa_rep) {
- uint64_t attrs = GetDataFlowAttributes(mir);
- const int* uses = ssa_rep->uses;
- const int* defs = ssa_rep->defs;
-
- // Handle defs
- if (attrs & DF_DA) {
- if (attrs & DF_CORE_A) {
- changed |= SetCore(defs[0]);
- }
- if (attrs & DF_REF_A) {
- changed |= SetRef(defs[0]);
- }
- if (attrs & DF_A_WIDE) {
- reg_location_[defs[0]].wide = true;
- reg_location_[defs[1]].wide = true;
- reg_location_[defs[1]].high_word = true;
- DCHECK_EQ(SRegToVReg(defs[0])+1,
- SRegToVReg(defs[1]));
- }
- }
-
-
- // Handles uses
- int next = 0;
- if (attrs & DF_UA) {
- if (attrs & DF_CORE_A) {
- changed |= SetCore(uses[next]);
- }
- if (attrs & DF_REF_A) {
- changed |= SetRef(uses[next]);
- }
- if (attrs & DF_A_WIDE) {
- reg_location_[uses[next]].wide = true;
- reg_location_[uses[next + 1]].wide = true;
- reg_location_[uses[next + 1]].high_word = true;
- DCHECK_EQ(SRegToVReg(uses[next])+1,
- SRegToVReg(uses[next + 1]));
- next += 2;
- } else {
- type_mismatch |= reg_location_[uses[next]].wide;
- next++;
- }
- }
- if (attrs & DF_UB) {
- if (attrs & DF_CORE_B) {
- changed |= SetCore(uses[next]);
- }
- if (attrs & DF_REF_B) {
- changed |= SetRef(uses[next]);
- }
- if (attrs & DF_B_WIDE) {
- reg_location_[uses[next]].wide = true;
- reg_location_[uses[next + 1]].wide = true;
- reg_location_[uses[next + 1]].high_word = true;
- DCHECK_EQ(SRegToVReg(uses[next])+1,
- SRegToVReg(uses[next + 1]));
- next += 2;
- } else {
- type_mismatch |= reg_location_[uses[next]].wide;
- next++;
- }
- }
- if (attrs & DF_UC) {
- if (attrs & DF_CORE_C) {
- changed |= SetCore(uses[next]);
- }
- if (attrs & DF_REF_C) {
- changed |= SetRef(uses[next]);
- }
- if (attrs & DF_C_WIDE) {
- reg_location_[uses[next]].wide = true;
- reg_location_[uses[next + 1]].wide = true;
- reg_location_[uses[next + 1]].high_word = true;
- DCHECK_EQ(SRegToVReg(uses[next])+1,
- SRegToVReg(uses[next + 1]));
- } else {
- type_mismatch |= reg_location_[uses[next]].wide;
- }
- }
-
- // Special-case return handling
- if ((mir->dalvikInsn.opcode == Instruction::RETURN) ||
- (mir->dalvikInsn.opcode == Instruction::RETURN_WIDE) ||
- (mir->dalvikInsn.opcode == Instruction::RETURN_OBJECT)) {
- switch (cu_->shorty[0]) {
- case 'I':
- type_mismatch |= reg_location_[uses[0]].wide;
- changed |= SetCore(uses[0]);
- break;
- case 'J':
- changed |= SetCore(uses[0]);
- changed |= SetCore(uses[1]);
- reg_location_[uses[0]].wide = true;
- reg_location_[uses[1]].wide = true;
- reg_location_[uses[1]].high_word = true;
- break;
- case 'F':
- type_mismatch |= reg_location_[uses[0]].wide;
- changed |= SetFp(uses[0]);
- break;
- case 'D':
- changed |= SetFp(uses[0]);
- changed |= SetFp(uses[1]);
- reg_location_[uses[0]].wide = true;
- reg_location_[uses[1]].wide = true;
- reg_location_[uses[1]].high_word = true;
- break;
- case 'L':
- type_mismatch |= reg_location_[uses[0]].wide;
- changed |= SetRef(uses[0]);
- break;
- default: break;
- }
- }
-
- // Special-case handling for format 35c/3rc invokes
- Instruction::Code opcode = mir->dalvikInsn.opcode;
- int flags = MIR::DecodedInstruction::IsPseudoMirOp(opcode) ?
- 0 : mir->dalvikInsn.FlagsOf();
- if ((flags & Instruction::kInvoke) &&
- (attrs & (DF_FORMAT_35C | DF_FORMAT_3RC))) {
- DCHECK_EQ(next, 0);
- const auto& lowering_info = GetMethodLoweringInfo(mir);
- const char* shorty = GetShortyFromMethodReference(lowering_info.GetTargetMethod());
- // Handle result type if floating point
- if ((shorty[0] == 'F') || (shorty[0] == 'D')) {
- MIR* move_result_mir = FindMoveResult(bb, mir);
- // Result might not be used at all, so no move-result
- if (move_result_mir && (move_result_mir->dalvikInsn.opcode !=
- Instruction::MOVE_RESULT_OBJECT)) {
- SSARepresentation* tgt_rep = move_result_mir->ssa_rep;
- DCHECK(tgt_rep != NULL);
- tgt_rep->fp_def[0] = true;
- changed |= SetFp(tgt_rep->defs[0]);
- if (shorty[0] == 'D') {
- tgt_rep->fp_def[1] = true;
- changed |= SetFp(tgt_rep->defs[1]);
- }
- }
- }
- int num_uses = mir->dalvikInsn.vA;
- // If this is a non-static invoke, mark implicit "this"
- if (!IsInstructionInvokeStatic(mir->dalvikInsn.opcode)) {
- reg_location_[uses[next]].defined = true;
- reg_location_[uses[next]].ref = true;
- type_mismatch |= reg_location_[uses[next]].wide;
- next++;
- }
- uint32_t cpos = 1;
- if (strlen(shorty) > 1) {
- for (int i = next; i < num_uses;) {
- DCHECK_LT(cpos, strlen(shorty));
- switch (shorty[cpos++]) {
- case 'D':
- ssa_rep->fp_use[i] = true;
- ssa_rep->fp_use[i+1] = true;
- reg_location_[uses[i]].wide = true;
- reg_location_[uses[i+1]].wide = true;
- reg_location_[uses[i+1]].high_word = true;
- DCHECK_EQ(SRegToVReg(uses[i])+1, SRegToVReg(uses[i+1]));
- i++;
- break;
- case 'J':
- reg_location_[uses[i]].wide = true;
- reg_location_[uses[i+1]].wide = true;
- reg_location_[uses[i+1]].high_word = true;
- DCHECK_EQ(SRegToVReg(uses[i])+1, SRegToVReg(uses[i+1]));
- changed |= SetCore(uses[i]);
- i++;
- break;
- case 'F':
- type_mismatch |= reg_location_[uses[i]].wide;
- ssa_rep->fp_use[i] = true;
- break;
- case 'L':
- type_mismatch |= reg_location_[uses[i]].wide;
- changed |= SetRef(uses[i]);
- break;
- default:
- type_mismatch |= reg_location_[uses[i]].wide;
- changed |= SetCore(uses[i]);
- break;
- }
- i++;
- }
- }
- }
-
- for (int i = 0; ssa_rep->fp_use && i< ssa_rep->num_uses; i++) {
- if (ssa_rep->fp_use[i]) {
- changed |= SetFp(uses[i]);
- }
- }
- for (int i = 0; ssa_rep->fp_def && i< ssa_rep->num_defs; i++) {
- if (ssa_rep->fp_def[i]) {
- changed |= SetFp(defs[i]);
- }
- }
- // Special-case handling for moves & Phi
- if (attrs & (DF_IS_MOVE | DF_NULL_TRANSFER_N)) {
- /*
- * If any of our inputs or outputs is defined, set all.
- * Some ugliness related to Phi nodes and wide values.
- * The Phi set will include all low words or all high
- * words, so we have to treat them specially.
- */
- bool is_phi = (static_cast<int>(mir->dalvikInsn.opcode) == kMirOpPhi);
- RegLocation rl_temp = reg_location_[defs[0]];
- bool defined_fp = rl_temp.defined && rl_temp.fp;
- bool defined_core = rl_temp.defined && rl_temp.core;
- bool defined_ref = rl_temp.defined && rl_temp.ref;
- bool is_wide = rl_temp.wide || ((attrs & DF_A_WIDE) != 0);
- bool is_high = is_phi && rl_temp.wide && rl_temp.high_word;
- for (int i = 0; i < ssa_rep->num_uses; i++) {
- rl_temp = reg_location_[uses[i]];
- defined_fp |= rl_temp.defined && rl_temp.fp;
- defined_core |= rl_temp.defined && rl_temp.core;
- defined_ref |= rl_temp.defined && rl_temp.ref;
- is_wide |= rl_temp.wide;
- is_high |= is_phi && rl_temp.wide && rl_temp.high_word;
- }
- /*
- * We don't normally expect to see a Dalvik register definition used both as a
- * floating point and core value, though technically it could happen with constants.
- * Until we have proper typing, detect this situation and disable register promotion
- * (which relies on the distinction between core a fp usages).
- */
- if ((defined_fp && (defined_core | defined_ref)) &&
- ((cu_->disable_opt & (1 << kPromoteRegs)) == 0)) {
- LOG(WARNING) << PrettyMethod(cu_->method_idx, *cu_->dex_file)
- << " op at block " << bb->id
- << " has both fp and core/ref uses for same def.";
- cu_->disable_opt |= (1 << kPromoteRegs);
- }
- changed |= SetFp(defs[0], defined_fp);
- changed |= SetCore(defs[0], defined_core);
- changed |= SetRef(defs[0], defined_ref);
- changed |= SetWide(defs[0], is_wide);
- changed |= SetHigh(defs[0], is_high);
- if (attrs & DF_A_WIDE) {
- changed |= SetWide(defs[1]);
- changed |= SetHigh(defs[1]);
- }
-
- bool has_ins = (GetNumOfInVRs() > 0);
-
- for (int i = 0; i < ssa_rep->num_uses; i++) {
- if (has_ins && IsInVReg(uses[i])) {
- // NB: The SSA name for the first def of an in-reg will be the same as
- // the reg's actual name.
- if (!reg_location_[uses[i]].fp && defined_fp) {
- // If we were about to infer that this first def of an in-reg is a float
- // when it wasn't previously (because float/int is set during SSA initialization),
- // do not allow this to happen.
- continue;
- }
- }
- changed |= SetFp(uses[i], defined_fp);
- changed |= SetCore(uses[i], defined_core);
- changed |= SetRef(uses[i], defined_ref);
- changed |= SetWide(uses[i], is_wide);
- changed |= SetHigh(uses[i], is_high);
- }
- if (attrs & DF_A_WIDE) {
- DCHECK_EQ(ssa_rep->num_uses, 2);
- changed |= SetWide(uses[1]);
- changed |= SetHigh(uses[1]);
- }
- }
- }
- if (type_mismatch) {
- LOG(WARNING) << "Deprecated dex type mismatch, interpreting "
- << PrettyMethod(cu_->method_idx, *cu_->dex_file);
- LOG(INFO) << "@ 0x" << std::hex << mir->offset;
- SetPuntToInterpreter(true);
- }
- return changed;
-}
-
static const char* storage_name[] = {" Frame ", "PhysReg", " CompilerTemp "};
void MIRGraph::DumpRegLocTable(RegLocation* table, int count) {
@@ -446,66 +52,12 @@
loc[i] = fresh_loc;
loc[i].s_reg_low = i;
loc[i].is_const = false; // Constants will be marked by constant propagation pass later.
- loc[i].wide = false;
}
- /* Treat Method* as a normal reference */
- int method_sreg = GetMethodSReg();
- loc[method_sreg].ref = true;
- loc[method_sreg].location = kLocCompilerTemp;
- loc[method_sreg].defined = true;
+ /* Mark the location of ArtMethod* as temporary */
+ loc[GetMethodSReg()].location = kLocCompilerTemp;
reg_location_ = loc;
-
- int num_regs = GetNumOfCodeVRs();
-
- /* Add types of incoming arguments based on signature */
- int num_ins = GetNumOfInVRs();
- if (num_ins > 0) {
- int s_reg = num_regs - num_ins;
- if ((cu_->access_flags & kAccStatic) == 0) {
- // For non-static, skip past "this"
- reg_location_[s_reg].defined = true;
- reg_location_[s_reg].ref = true;
- s_reg++;
- }
- const char* shorty = cu_->shorty;
- int shorty_len = strlen(shorty);
- for (int i = 1; i < shorty_len; i++) {
- switch (shorty[i]) {
- case 'D':
- reg_location_[s_reg].wide = true;
- reg_location_[s_reg+1].high_word = true;
- reg_location_[s_reg+1].fp = true;
- DCHECK_EQ(SRegToVReg(s_reg)+1, SRegToVReg(s_reg+1));
- reg_location_[s_reg].fp = true;
- reg_location_[s_reg].defined = true;
- s_reg++;
- break;
- case 'J':
- reg_location_[s_reg].wide = true;
- reg_location_[s_reg+1].high_word = true;
- DCHECK_EQ(SRegToVReg(s_reg)+1, SRegToVReg(s_reg+1));
- reg_location_[s_reg].core = true;
- reg_location_[s_reg].defined = true;
- s_reg++;
- break;
- case 'F':
- reg_location_[s_reg].fp = true;
- reg_location_[s_reg].defined = true;
- break;
- case 'L':
- reg_location_[s_reg].ref = true;
- reg_location_[s_reg].defined = true;
- break;
- default:
- reg_location_[s_reg].core = true;
- reg_location_[s_reg].defined = true;
- break;
- }
- s_reg++;
- }
- }
}
/*
diff --git a/compiler/driver/compiler_driver-inl.h b/compiler/driver/compiler_driver-inl.h
index b4d4695..e54cbf6 100644
--- a/compiler/driver/compiler_driver-inl.h
+++ b/compiler/driver/compiler_driver-inl.h
@@ -79,7 +79,7 @@
}
if (UNLIKELY(resolved_field->IsStatic() != is_static)) {
// ClassLinker can return a field of the wrong kind directly from the DexCache.
- // Silently return nullptr on such incompatible class change.
+ // Silently return null on such incompatible class change.
return nullptr;
}
return resolved_field;
@@ -127,34 +127,67 @@
return std::make_pair(fast_get, fast_put);
}
-inline std::pair<bool, bool> CompilerDriver::IsFastStaticField(
- mirror::DexCache* dex_cache, mirror::Class* referrer_class,
- ArtField* resolved_field, uint16_t field_idx, uint32_t* storage_index) {
- DCHECK(resolved_field->IsStatic());
+template <typename ArtMember>
+inline bool CompilerDriver::CanAccessResolvedMember(mirror::Class* referrer_class ATTRIBUTE_UNUSED,
+ mirror::Class* access_to ATTRIBUTE_UNUSED,
+ ArtMember* member ATTRIBUTE_UNUSED,
+ mirror::DexCache* dex_cache ATTRIBUTE_UNUSED,
+ uint32_t field_idx ATTRIBUTE_UNUSED) {
+ // Not defined for ArtMember values other than ArtField or mirror::ArtMethod.
+ UNREACHABLE();
+}
+
+template <>
+inline bool CompilerDriver::CanAccessResolvedMember<ArtField>(mirror::Class* referrer_class,
+ mirror::Class* access_to,
+ ArtField* field,
+ mirror::DexCache* dex_cache,
+ uint32_t field_idx) {
+ return referrer_class->CanAccessResolvedField(access_to, field, dex_cache, field_idx);
+}
+
+template <>
+inline bool CompilerDriver::CanAccessResolvedMember<mirror::ArtMethod>(
+ mirror::Class* referrer_class,
+ mirror::Class* access_to,
+ mirror::ArtMethod* method,
+ mirror::DexCache* dex_cache,
+ uint32_t field_idx) {
+ return referrer_class->CanAccessResolvedMethod(access_to, method, dex_cache, field_idx);
+}
+
+template <typename ArtMember>
+inline std::pair<bool, bool> CompilerDriver::IsClassOfStaticMemberAvailableToReferrer(
+ mirror::DexCache* dex_cache,
+ mirror::Class* referrer_class,
+ ArtMember* resolved_member,
+ uint16_t member_idx,
+ uint32_t* storage_index) {
+ DCHECK(resolved_member->IsStatic());
if (LIKELY(referrer_class != nullptr)) {
- mirror::Class* fields_class = resolved_field->GetDeclaringClass();
- if (fields_class == referrer_class) {
- *storage_index = fields_class->GetDexTypeIndex();
+ mirror::Class* members_class = resolved_member->GetDeclaringClass();
+ if (members_class == referrer_class) {
+ *storage_index = members_class->GetDexTypeIndex();
return std::make_pair(true, true);
}
- if (referrer_class->CanAccessResolvedField(fields_class, resolved_field,
- dex_cache, field_idx)) {
- // We have the resolved field, we must make it into a index for the referrer
+ if (CanAccessResolvedMember<ArtMember>(
+ referrer_class, members_class, resolved_member, dex_cache, member_idx)) {
+ // We have the resolved member, we must make it into a index for the referrer
// in its static storage (which may fail if it doesn't have a slot for it)
// TODO: for images we can elide the static storage base null check
// if we know there's a non-null entry in the image
const DexFile* dex_file = dex_cache->GetDexFile();
uint32_t storage_idx = DexFile::kDexNoIndex;
- if (LIKELY(fields_class->GetDexCache() == dex_cache)) {
- // common case where the dex cache of both the referrer and the field are the same,
+ if (LIKELY(members_class->GetDexCache() == dex_cache)) {
+ // common case where the dex cache of both the referrer and the member are the same,
// no need to search the dex file
- storage_idx = fields_class->GetDexTypeIndex();
+ storage_idx = members_class->GetDexTypeIndex();
} else {
- // Search dex file for localized ssb index, may fail if field's class is a parent
+ // Search dex file for localized ssb index, may fail if member's class is a parent
// of the class mentioned in the dex file and there is no dex cache entry.
std::string temp;
const DexFile::StringId* string_id =
- dex_file->FindStringId(resolved_field->GetDeclaringClass()->GetDescriptor(&temp));
+ dex_file->FindStringId(resolved_member->GetDeclaringClass()->GetDescriptor(&temp));
if (string_id != nullptr) {
const DexFile::TypeId* type_id =
dex_file->FindTypeId(dex_file->GetIndexForStringId(*string_id));
@@ -166,7 +199,7 @@
}
if (storage_idx != DexFile::kDexNoIndex) {
*storage_index = storage_idx;
- return std::make_pair(true, !resolved_field->IsFinal());
+ return std::make_pair(true, !resolved_member->IsFinal());
}
}
}
@@ -175,6 +208,23 @@
return std::make_pair(false, false);
}
+inline std::pair<bool, bool> CompilerDriver::IsFastStaticField(
+ mirror::DexCache* dex_cache, mirror::Class* referrer_class,
+ ArtField* resolved_field, uint16_t field_idx, uint32_t* storage_index) {
+ return IsClassOfStaticMemberAvailableToReferrer(
+ dex_cache, referrer_class, resolved_field, field_idx, storage_index);
+}
+
+inline bool CompilerDriver::IsClassOfStaticMethodAvailableToReferrer(
+ mirror::DexCache* dex_cache, mirror::Class* referrer_class,
+ mirror::ArtMethod* resolved_method, uint16_t method_idx, uint32_t* storage_index) {
+ std::pair<bool, bool> result = IsClassOfStaticMemberAvailableToReferrer(
+ dex_cache, referrer_class, resolved_method, method_idx, storage_index);
+ // Only the first member of `result` is meaningful, as there is no
+ // "write access" to a method.
+ return result.first;
+}
+
inline bool CompilerDriver::IsStaticFieldInReferrerClass(mirror::Class* referrer_class,
ArtField* resolved_field) {
DCHECK(resolved_field->IsStatic());
@@ -206,7 +256,7 @@
}
if (check_incompatible_class_change &&
UNLIKELY(resolved_method->CheckIncompatibleClassChange(invoke_type))) {
- // Silently return nullptr on incompatible class change.
+ // Silently return null on incompatible class change.
return nullptr;
}
return resolved_method;
@@ -302,7 +352,7 @@
target_dex_cache, class_loader,
NullHandle<mirror::ArtMethod>(), kVirtual);
}
- CHECK(called_method != NULL);
+ CHECK(called_method != nullptr);
CHECK(!called_method->IsAbstract());
int stats_flags = kFlagMethodResolved;
GetCodeAndMethodForDirectCall(/*out*/invoke_type,
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 1832647..5dc93ce 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -40,6 +40,7 @@
#include "dex/verification_results.h"
#include "dex/verified_method.h"
#include "dex/quick/dex_file_method_inliner.h"
+#include "dex/quick/dex_file_to_method_inliner_map.h"
#include "driver/compiler_options.h"
#include "elf_writer_quick.h"
#include "jni_internal.h"
@@ -73,11 +74,11 @@
static constexpr bool kTimeCompileMethod = !kIsDebugBuild;
-// Whether to produce 64-bit ELF files for 64-bit targets. Leave this off for now.
-static constexpr bool kProduce64BitELFFiles = false;
+// Whether to produce 64-bit ELF files for 64-bit targets.
+static constexpr bool kProduce64BitELFFiles = true;
-// Whether classes-to-compile is only applied to the boot image, or, when given, too all
-// compilations.
+// Whether classes-to-compile and methods-to-compile are only applied to the boot image, or, when
+// given, too all compilations.
static constexpr bool kRestrictCompilationFiltersToImage = true;
static double Percentage(size_t x, size_t y) {
@@ -349,6 +350,7 @@
const InstructionSetFeatures* instruction_set_features,
bool image, std::unordered_set<std::string>* image_classes,
std::unordered_set<std::string>* compiled_classes,
+ std::unordered_set<std::string>* compiled_methods,
size_t thread_count, bool dump_stats, bool dump_passes,
const std::string& dump_cfg_file_name, CumulativeLogger* timer,
int swap_fd, const std::string& profile_file)
@@ -369,6 +371,7 @@
image_(image),
image_classes_(image_classes),
classes_to_compile_(compiled_classes),
+ methods_to_compile_(compiled_methods),
had_hard_verifier_failure_(false),
thread_count_(thread_count),
stats_(new AOTCompilationStats),
@@ -493,7 +496,8 @@
const std::vector<const DexFile*>& dex_files,
TimingLogger* timings) {
DCHECK(!Runtime::Current()->IsStarted());
- std::unique_ptr<ThreadPool> thread_pool(new ThreadPool("Compiler driver thread pool", thread_count_ - 1));
+ std::unique_ptr<ThreadPool> thread_pool(
+ new ThreadPool("Compiler driver thread pool", thread_count_ - 1));
VLOG(compiler) << "Before precompile " << GetMemoryUsageString(false);
PreCompile(class_loader, dex_files, thread_pool.get(), timings);
Compile(class_loader, dex_files, thread_pool.get(), timings);
@@ -670,6 +674,19 @@
return classes_to_compile_->find(descriptor) != classes_to_compile_->end();
}
+bool CompilerDriver::IsMethodToCompile(const MethodReference& method_ref) const {
+ if (kRestrictCompilationFiltersToImage && !IsImage()) {
+ return true;
+ }
+
+ if (methods_to_compile_ == nullptr) {
+ return true;
+ }
+
+ std::string tmp = PrettyMethod(method_ref.dex_method_index, *method_ref.dex_file, true);
+ return methods_to_compile_->find(tmp.c_str()) != methods_to_compile_->end();
+}
+
static void ResolveExceptionsForMethod(MutableHandle<mirror::ArtMethod> method_handle,
std::set<std::pair<uint16_t, const DexFile*>>& exceptions_to_resolve)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -1224,7 +1241,7 @@
mirror::Class* referrer_class;
mirror::DexCache* dex_cache;
{
- StackHandleScope<3> hs(soa.Self());
+ StackHandleScope<2> hs(soa.Self());
Handle<mirror::DexCache> dex_cache_handle(
hs.NewHandle(mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile())));
Handle<mirror::ClassLoader> class_loader_handle(
@@ -2086,7 +2103,8 @@
VLOG(compiler) << "Compile: " << GetMemoryUsageString(false);
}
-void CompilerDriver::CompileClass(const ParallelCompilationManager* manager, size_t class_def_index) {
+void CompilerDriver::CompileClass(const ParallelCompilationManager* manager,
+ size_t class_def_index) {
ATRACE_CALL();
const DexFile& dex_file = *manager->GetDexFile();
const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
@@ -2232,9 +2250,11 @@
// Basic checks, e.g., not <clinit>.
verification_results_->IsCandidateForCompilation(method_ref, access_flags) &&
// Did not fail to create VerifiedMethod metadata.
- has_verified_method;
+ has_verified_method &&
+ // Is eligable for compilation by methods-to-compile filter.
+ IsMethodToCompile(method_ref);
if (compile) {
- // NOTE: if compiler declines to compile this method, it will return nullptr.
+ // NOTE: if compiler declines to compile this method, it will return null.
compiled_method = compiler_->Compile(code_item, access_flags, invoke_type, class_def_idx,
method_idx, class_loader, dex_file);
}
@@ -2466,4 +2486,16 @@
return oss.str();
}
+bool CompilerDriver::IsStringTypeIndex(uint16_t type_index, const DexFile* dex_file) {
+ const char* type = dex_file->GetTypeDescriptor(dex_file->GetTypeId(type_index));
+ return strcmp(type, "Ljava/lang/String;") == 0;
+}
+
+bool CompilerDriver::IsStringInit(uint32_t method_index, const DexFile* dex_file, int32_t* offset) {
+ DexFileMethodInliner* inliner = GetMethodInlinerMap()->GetMethodInliner(dex_file);
+ size_t pointer_size = InstructionSetPointerSize(GetInstructionSet());
+ *offset = inliner->GetOffsetForStringInit(method_index, pointer_size);
+ return inliner->IsStringInitMethodIndex(method_index);
+}
+
} // namespace art
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index ce13a17..2b0985a 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -94,7 +94,7 @@
// Create a compiler targeting the requested "instruction_set".
// "image" should be true if image specific optimizations should be
// enabled. "image_classes" lets the compiler know what classes it
- // can assume will be in the image, with nullptr implying all available
+ // can assume will be in the image, with null implying all available
// classes.
explicit CompilerDriver(const CompilerOptions* compiler_options,
VerificationResults* verification_results,
@@ -104,6 +104,7 @@
const InstructionSetFeatures* instruction_set_features,
bool image, std::unordered_set<std::string>* image_classes,
std::unordered_set<std::string>* compiled_classes,
+ std::unordered_set<std::string>* compiled_methods,
size_t thread_count, bool dump_stats, bool dump_passes,
const std::string& dump_cfg_file_name,
CumulativeLogger* timer, int swap_fd,
@@ -227,7 +228,7 @@
mirror::ClassLoader* GetClassLoader(ScopedObjectAccess& soa, const DexCompilationUnit* mUnit)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Resolve compiling method's class. Returns nullptr on failure.
+ // Resolve compiling method's class. Returns null on failure.
mirror::Class* ResolveCompilingMethodsClass(
const ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit)
@@ -239,7 +240,7 @@
const DexCompilationUnit* mUnit)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Resolve a field. Returns nullptr on failure, including incompatible class change.
+ // Resolve a field. Returns null on failure, including incompatible class change.
// NOTE: Unlike ClassLinker's ResolveField(), this method enforces is_static.
ArtField* ResolveField(
const ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
@@ -280,6 +281,18 @@
ArtField* resolved_field, uint16_t field_idx, uint32_t* storage_index)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Return whether the declaring class of `resolved_method` is
+ // available to `referrer_class`. If this is true, compute the type
+ // index of the declaring class in the referrer's dex file and
+ // return it through the out argument `storage_index`; otherwise
+ // return DexFile::kDexNoIndex through `storage_index`.
+ bool IsClassOfStaticMethodAvailableToReferrer(mirror::DexCache* dex_cache,
+ mirror::Class* referrer_class,
+ mirror::ArtMethod* resolved_method,
+ uint16_t method_idx,
+ uint32_t* storage_index)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
// Is static field's in referrer's class?
bool IsStaticFieldInReferrerClass(mirror::Class* referrer_class, ArtField* resolved_field)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -289,7 +302,7 @@
ArtField* resolved_field)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Resolve a method. Returns nullptr on failure, including incompatible class change.
+ // Resolve a method. Returns null on failure, including incompatible class change.
mirror::ArtMethod* ResolveMethod(
ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit,
@@ -428,6 +441,9 @@
// Checks whether the provided class should be compiled, i.e., is in classes_to_compile_.
bool IsClassToCompile(const char* descriptor) const;
+ // Checks whether the provided method should be compiled, i.e., is in method_to_compile_.
+ bool IsMethodToCompile(const MethodReference& method_ref) const;
+
void RecordClassStatus(ClassReference ref, mirror::Class::Status status)
LOCKS_EXCLUDED(compiled_classes_lock_);
@@ -450,11 +466,41 @@
// Get memory usage during compilation.
std::string GetMemoryUsageString(bool extended) const;
+ bool IsStringTypeIndex(uint16_t type_index, const DexFile* dex_file);
+ bool IsStringInit(uint32_t method_index, const DexFile* dex_file, int32_t* offset);
+
void SetHadHardVerifierFailure() {
had_hard_verifier_failure_ = true;
}
private:
+ // Return whether the declaring class of `resolved_member` is
+ // available to `referrer_class` for read or write access using two
+ // Boolean values returned as a pair. If is true at least for read
+ // access, compute the type index of the declaring class in the
+ // referrer's dex file and return it through the out argument
+ // `storage_index`; otherwise return DexFile::kDexNoIndex through
+ // `storage_index`.
+ template <typename ArtMember>
+ std::pair<bool, bool> IsClassOfStaticMemberAvailableToReferrer(mirror::DexCache* dex_cache,
+ mirror::Class* referrer_class,
+ ArtMember* resolved_member,
+ uint16_t member_idx,
+ uint32_t* storage_index)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Can `referrer_class` access the resolved `member`?
+ // Dispatch call to mirror::Class::CanAccessResolvedField or
+ // mirror::Class::CanAccessResolvedMember depending on the value of
+ // ArtMember.
+ template <typename ArtMember>
+ static bool CanAccessResolvedMember(mirror::Class* referrer_class,
+ mirror::Class* access_to,
+ ArtMember* member,
+ mirror::DexCache* dex_cache,
+ uint32_t field_idx)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
// These flags are internal to CompilerDriver for collecting INVOKE resolution statistics.
// The only external contract is that unresolved method has flags 0 and resolved non-0.
enum {
@@ -588,15 +634,20 @@
const bool image_;
// If image_ is true, specifies the classes that will be included in
- // the image. Note if image_classes_ is nullptr, all classes are
+ // the image. Note if image_classes_ is null, all classes are
// included in the image.
std::unique_ptr<std::unordered_set<std::string>> image_classes_;
- // Specifies the classes that will be compiled. Note that if classes_to_compile_ is nullptr,
+ // Specifies the classes that will be compiled. Note that if classes_to_compile_ is null,
// all classes are eligible for compilation (duplication filters etc. will still apply).
// This option may be restricted to the boot image, depending on a flag in the implementation.
std::unique_ptr<std::unordered_set<std::string>> classes_to_compile_;
+ // Specifies the methods that will be compiled. Note that if methods_to_compile_ is null,
+ // all methods are eligible for compilation (compilation filters etc. will still apply).
+ // This option may be restricted to the boot image, depending on a flag in the implementation.
+ std::unique_ptr<std::unordered_set<std::string>> methods_to_compile_;
+
bool had_hard_verifier_failure_;
size_t thread_count_;
diff --git a/compiler/driver/compiler_driver_test.cc b/compiler/driver/compiler_driver_test.cc
index e78ff90..5085f32 100644
--- a/compiler/driver/compiler_driver_test.cc
+++ b/compiler/driver/compiler_driver_test.cc
@@ -56,20 +56,20 @@
CHECK(started);
env_ = Thread::Current()->GetJniEnv();
class_ = env_->FindClass(class_name);
- CHECK(class_ != NULL) << "Class not found: " << class_name;
+ CHECK(class_ != nullptr) << "Class not found: " << class_name;
if (is_virtual) {
mid_ = env_->GetMethodID(class_, method, signature);
} else {
mid_ = env_->GetStaticMethodID(class_, method, signature);
}
- CHECK(mid_ != NULL) << "Method not found: " << class_name << "." << method << signature;
+ CHECK(mid_ != nullptr) << "Method not found: " << class_name << "." << method << signature;
}
void MakeAllExecutable(jobject class_loader) {
const std::vector<const DexFile*> class_path = GetDexFiles(class_loader);
for (size_t i = 0; i != class_path.size(); ++i) {
const DexFile* dex_file = class_path[i];
- CHECK(dex_file != NULL);
+ CHECK(dex_file != nullptr);
MakeDexFileExecutable(class_loader, *dex_file);
}
}
@@ -84,7 +84,7 @@
Handle<mirror::ClassLoader> loader(
hs.NewHandle(soa.Decode<mirror::ClassLoader*>(class_loader)));
mirror::Class* c = class_linker->FindClass(soa.Self(), descriptor, loader);
- CHECK(c != NULL);
+ CHECK(c != nullptr);
for (size_t j = 0; j < c->NumDirectMethods(); j++) {
MakeExecutable(c->GetDirectMethod(j));
}
@@ -101,39 +101,38 @@
// Disabled due to 10 second runtime on host
TEST_F(CompilerDriverTest, DISABLED_LARGE_CompileDexLibCore) {
- CompileAll(NULL);
+ CompileAll(nullptr);
// All libcore references should resolve
ScopedObjectAccess soa(Thread::Current());
- ASSERT_TRUE(java_lang_dex_file_ != NULL);
+ ASSERT_TRUE(java_lang_dex_file_ != nullptr);
const DexFile& dex = *java_lang_dex_file_;
mirror::DexCache* dex_cache = class_linker_->FindDexCache(dex);
EXPECT_EQ(dex.NumStringIds(), dex_cache->NumStrings());
for (size_t i = 0; i < dex_cache->NumStrings(); i++) {
const mirror::String* string = dex_cache->GetResolvedString(i);
- EXPECT_TRUE(string != NULL) << "string_idx=" << i;
+ EXPECT_TRUE(string != nullptr) << "string_idx=" << i;
}
EXPECT_EQ(dex.NumTypeIds(), dex_cache->NumResolvedTypes());
for (size_t i = 0; i < dex_cache->NumResolvedTypes(); i++) {
mirror::Class* type = dex_cache->GetResolvedType(i);
- EXPECT_TRUE(type != NULL) << "type_idx=" << i
+ EXPECT_TRUE(type != nullptr) << "type_idx=" << i
<< " " << dex.GetTypeDescriptor(dex.GetTypeId(i));
}
EXPECT_EQ(dex.NumMethodIds(), dex_cache->NumResolvedMethods());
for (size_t i = 0; i < dex_cache->NumResolvedMethods(); i++) {
mirror::ArtMethod* method = dex_cache->GetResolvedMethod(i);
- EXPECT_TRUE(method != NULL) << "method_idx=" << i
+ EXPECT_TRUE(method != nullptr) << "method_idx=" << i
<< " " << dex.GetMethodDeclaringClassDescriptor(dex.GetMethodId(i))
<< " " << dex.GetMethodName(dex.GetMethodId(i));
- EXPECT_TRUE(method->GetEntryPointFromQuickCompiledCode() != NULL) << "method_idx=" << i
- << " "
- << dex.GetMethodDeclaringClassDescriptor(dex.GetMethodId(i))
- << " " << dex.GetMethodName(dex.GetMethodId(i));
+ EXPECT_TRUE(method->GetEntryPointFromQuickCompiledCode() != nullptr) << "method_idx=" << i
+ << " " << dex.GetMethodDeclaringClassDescriptor(dex.GetMethodId(i)) << " "
+ << dex.GetMethodName(dex.GetMethodId(i));
}
EXPECT_EQ(dex.NumFieldIds(), dex_cache->NumResolvedFields());
for (size_t i = 0; i < dex_cache->NumResolvedFields(); i++) {
ArtField* field = Runtime::Current()->GetClassLinker()->GetResolvedField(i, dex_cache);
- EXPECT_TRUE(field != NULL) << "field_idx=" << i
+ EXPECT_TRUE(field != nullptr) << "field_idx=" << i
<< " " << dex.GetFieldDeclaringClassDescriptor(dex.GetFieldId(i))
<< " " << dex.GetFieldName(dex.GetFieldId(i));
}
@@ -153,14 +152,14 @@
CompileDirectMethod(NullHandle<mirror::ClassLoader>(), "java.lang.Object", "<init>", "()V");
class_loader = LoadDex("AbstractMethod");
}
- ASSERT_TRUE(class_loader != NULL);
+ ASSERT_TRUE(class_loader != nullptr);
EnsureCompiled(class_loader, "AbstractClass", "foo", "()V", true);
// Create a jobj_ of ConcreteClass, NOT AbstractClass.
jclass c_class = env_->FindClass("ConcreteClass");
jmethodID constructor = env_->GetMethodID(c_class, "<init>", "()V");
jobject jobj_ = env_->NewObject(c_class, constructor);
- ASSERT_TRUE(jobj_ != NULL);
+ ASSERT_TRUE(jobj_ != nullptr);
// Force non-virtual call to AbstractClass foo, will throw AbstractMethodError exception.
env_->CallNonvirtualVoidMethod(jobj_, class_, mid_);
@@ -175,6 +174,60 @@
}
}
+class CompilerDriverMethodsTest : public CompilerDriverTest {
+ protected:
+ std::unordered_set<std::string>* GetCompiledMethods() OVERRIDE {
+ return new std::unordered_set<std::string>({
+ "byte StaticLeafMethods.identity(byte)",
+ "int StaticLeafMethods.sum(int, int, int)",
+ "double StaticLeafMethods.sum(double, double, double, double)"
+ });
+ }
+};
+
+TEST_F(CompilerDriverMethodsTest, Selection) {
+ Thread* self = Thread::Current();
+ jobject class_loader;
+ {
+ ScopedObjectAccess soa(self);
+ class_loader = LoadDex("StaticLeafMethods");
+ }
+ ASSERT_NE(class_loader, nullptr);
+
+ // Need to enable dex-file writability. Methods rejected to be compiled will run through the
+ // dex-to-dex compiler.
+ for (const DexFile* dex_file : GetDexFiles(class_loader)) {
+ ASSERT_TRUE(dex_file->EnableWrite());
+ }
+
+ CompileAll(class_loader);
+
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ StackHandleScope<1> hs(self);
+ ScopedObjectAccess soa(self);
+ Handle<mirror::ClassLoader> h_loader(hs.NewHandle(
+ reinterpret_cast<mirror::ClassLoader*>(self->DecodeJObject(class_loader))));
+ mirror::Class* klass = class_linker->FindClass(self, "LStaticLeafMethods;", h_loader);
+ ASSERT_NE(klass, nullptr);
+
+ std::unique_ptr<std::unordered_set<std::string>> expected(GetCompiledMethods());
+
+ for (int32_t i = 0; static_cast<uint32_t>(i) < klass->NumDirectMethods(); i++) {
+ mirror::ArtMethod* m = klass->GetDirectMethod(i);
+ std::string name = PrettyMethod(m, true);
+ const void* code =
+ m->GetEntryPointFromQuickCompiledCodePtrSize(InstructionSetPointerSize(kRuntimeISA));
+ ASSERT_NE(code, nullptr);
+ if (expected->find(name) != expected->end()) {
+ expected->erase(name);
+ EXPECT_FALSE(class_linker->IsQuickToInterpreterBridge(code));
+ } else {
+ EXPECT_TRUE(class_linker->IsQuickToInterpreterBridge(code));
+ }
+ }
+ EXPECT_TRUE(expected->empty());
+}
+
// TODO: need check-cast test (when stub complete & we can throw/catch
} // namespace art
diff --git a/compiler/driver/dex_compilation_unit.h b/compiler/driver/dex_compilation_unit.h
index 03ae489..3983006 100644
--- a/compiler/driver/dex_compilation_unit.h
+++ b/compiler/driver/dex_compilation_unit.h
@@ -21,6 +21,7 @@
#include "dex_file.h"
#include "jni.h"
+#include "base/arena_object.h"
namespace art {
namespace mirror {
@@ -31,7 +32,7 @@
struct CompilationUnit;
class VerifiedMethod;
-class DexCompilationUnit {
+class DexCompilationUnit : public DeletableArenaObject<kArenaAllocMisc> {
public:
explicit DexCompilationUnit(CompilationUnit* cu);
diff --git a/compiler/dwarf/dwarf_constants.h b/compiler/dwarf/dwarf_constants.h
index 61a44cd..3b570e5 100644
--- a/compiler/dwarf/dwarf_constants.h
+++ b/compiler/dwarf/dwarf_constants.h
@@ -680,6 +680,14 @@
DW_EH_PE_aligned = 0x50,
};
+enum CFIFormat : uint8_t {
+ // This is the original format as defined by the specification.
+ // It is used for the .debug_frame section.
+ DW_DEBUG_FRAME_FORMAT,
+ // Slightly modified format used for the .eh_frame section.
+ DW_EH_FRAME_FORMAT
+};
+
} // namespace dwarf
} // namespace art
diff --git a/compiler/dwarf/dwarf_test.cc b/compiler/dwarf/dwarf_test.cc
index edba00a..4971f0e 100644
--- a/compiler/dwarf/dwarf_test.cc
+++ b/compiler/dwarf/dwarf_test.cc
@@ -26,6 +26,8 @@
namespace art {
namespace dwarf {
+constexpr CFIFormat kCFIFormat = DW_DEBUG_FRAME_FORMAT;
+
// Run the tests only on host since we need objdump.
#ifndef HAVE_ANDROID_OS
@@ -120,30 +122,30 @@
DW_CHECK_NEXT("DW_CFA_restore: r5 (ebp)");
DebugFrameOpCodeWriter<> initial_opcodes;
- WriteEhFrameCIE(is64bit, DW_EH_PE_absptr, Reg(is64bit ? 16 : 8),
- initial_opcodes, &eh_frame_data_);
- std::vector<uintptr_t> eh_frame_patches;
+ WriteDebugFrameCIE(is64bit, DW_EH_PE_absptr, Reg(is64bit ? 16 : 8),
+ initial_opcodes, kCFIFormat, &debug_frame_data_);
+ std::vector<uintptr_t> debug_frame_patches;
std::vector<uintptr_t> expected_patches { 28 }; // NOLINT
- WriteEhFrameFDE(is64bit, 0, 0x01000000, 0x01000000, opcodes.data(),
- &eh_frame_data_, &eh_frame_patches);
+ WriteDebugFrameFDE(is64bit, 0, 0x01000000, 0x01000000, opcodes.data(),
+ kCFIFormat, &debug_frame_data_, &debug_frame_patches);
- EXPECT_EQ(expected_patches, eh_frame_patches);
+ EXPECT_EQ(expected_patches, debug_frame_patches);
CheckObjdumpOutput(is64bit, "-W");
}
TEST_F(DwarfTest, DebugFrame64) {
constexpr bool is64bit = true;
DebugFrameOpCodeWriter<> initial_opcodes;
- WriteEhFrameCIE(is64bit, DW_EH_PE_absptr, Reg(16),
- initial_opcodes, &eh_frame_data_);
+ WriteDebugFrameCIE(is64bit, DW_EH_PE_absptr, Reg(16),
+ initial_opcodes, kCFIFormat, &debug_frame_data_);
DebugFrameOpCodeWriter<> opcodes;
- std::vector<uintptr_t> eh_frame_patches;
+ std::vector<uintptr_t> debug_frame_patches;
std::vector<uintptr_t> expected_patches { 32 }; // NOLINT
- WriteEhFrameFDE(is64bit, 0, 0x0100000000000000, 0x0200000000000000,
- opcodes.data(), &eh_frame_data_, &eh_frame_patches);
+ WriteDebugFrameFDE(is64bit, 0, 0x0100000000000000, 0x0200000000000000,
+ opcodes.data(), kCFIFormat, &debug_frame_data_, &debug_frame_patches);
DW_CHECK("FDE cie=00000000 pc=100000000000000..300000000000000");
- EXPECT_EQ(expected_patches, eh_frame_patches);
+ EXPECT_EQ(expected_patches, debug_frame_patches);
CheckObjdumpOutput(is64bit, "-W");
}
@@ -173,11 +175,11 @@
DW_CHECK_NEXT("DW_CFA_offset: r14 (r14)");
DW_CHECK_NEXT("DW_CFA_offset: r15 (r15)");
DebugFrameOpCodeWriter<> initial_opcodes;
- WriteEhFrameCIE(is64bit, DW_EH_PE_absptr, Reg(16),
- initial_opcodes, &eh_frame_data_);
- std::vector<uintptr_t> eh_frame_patches;
- WriteEhFrameFDE(is64bit, 0, 0x0100000000000000, 0x0200000000000000,
- opcodes.data(), &eh_frame_data_, &eh_frame_patches);
+ WriteDebugFrameCIE(is64bit, DW_EH_PE_absptr, Reg(16),
+ initial_opcodes, kCFIFormat, &debug_frame_data_);
+ std::vector<uintptr_t> debug_frame_patches;
+ WriteDebugFrameFDE(is64bit, 0, 0x0100000000000000, 0x0200000000000000,
+ opcodes.data(), kCFIFormat, &debug_frame_data_, &debug_frame_patches);
CheckObjdumpOutput(is64bit, "-W");
}
diff --git a/compiler/dwarf/dwarf_test.h b/compiler/dwarf/dwarf_test.h
index d31cfa5..3afb5ea 100644
--- a/compiler/dwarf/dwarf_test.h
+++ b/compiler/dwarf/dwarf_test.h
@@ -56,47 +56,42 @@
}
// Pretty-print the generated DWARF data using objdump.
- template<typename Elf_Word, typename Elf_Sword, typename Elf_Addr, typename Elf_Dyn,
- typename Elf_Sym, typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr>
- std::vector<std::string> Objdump(bool is64bit, const char* args) {
+ template<typename ElfTypes>
+ std::vector<std::string> Objdump(const char* args) {
// Write simple elf file with just the DWARF sections.
+ InstructionSet isa = (sizeof(typename ElfTypes::Addr) == 8) ? kX86_64 : kX86;
class NoCode : public CodeOutput {
- virtual void SetCodeOffset(size_t) { }
- virtual bool Write(OutputStream*) { return true; }
- } code;
- ScratchFile file;
- InstructionSet isa = is64bit ? kX86_64 : kX86;
- ElfBuilder<Elf_Word, Elf_Sword, Elf_Addr, Elf_Dyn,
- Elf_Sym, Elf_Ehdr, Elf_Phdr, Elf_Shdr> builder(
- &code, file.GetFile(), isa, 0, 0, 0, 0, 0, 0, false, false);
- typedef ElfRawSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> Section;
- Section debug_info(".debug_info", SHT_PROGBITS, 0, nullptr, 0, 1, 0);
- Section debug_abbrev(".debug_abbrev", SHT_PROGBITS, 0, nullptr, 0, 1, 0);
- Section debug_str(".debug_str", SHT_PROGBITS, 0, nullptr, 0, 1, 0);
- Section debug_line(".debug_line", SHT_PROGBITS, 0, nullptr, 0, 1, 0);
- Section eh_frame(".eh_frame", SHT_PROGBITS, SHF_ALLOC, nullptr, 0, 4, 0);
+ bool Write(OutputStream*) OVERRIDE { return true; } // NOLINT
+ } no_code;
+ ElfBuilder<ElfTypes> builder(isa, 0, &no_code, 0, &no_code, 0);
+ typedef typename ElfBuilder<ElfTypes>::RawSection RawSection;
+ RawSection debug_info(".debug_info", SHT_PROGBITS, 0, nullptr, 0, 1, 0);
+ RawSection debug_abbrev(".debug_abbrev", SHT_PROGBITS, 0, nullptr, 0, 1, 0);
+ RawSection debug_str(".debug_str", SHT_PROGBITS, 0, nullptr, 0, 1, 0);
+ RawSection debug_line(".debug_line", SHT_PROGBITS, 0, nullptr, 0, 1, 0);
+ RawSection debug_frame(".debug_frame", SHT_PROGBITS, 0, nullptr, 0, 8, 0);
if (!debug_info_data_.empty()) {
debug_info.SetBuffer(debug_info_data_);
- builder.RegisterRawSection(&debug_info);
+ builder.RegisterSection(&debug_info);
}
if (!debug_abbrev_data_.empty()) {
debug_abbrev.SetBuffer(debug_abbrev_data_);
- builder.RegisterRawSection(&debug_abbrev);
+ builder.RegisterSection(&debug_abbrev);
}
if (!debug_str_data_.empty()) {
debug_str.SetBuffer(debug_str_data_);
- builder.RegisterRawSection(&debug_str);
+ builder.RegisterSection(&debug_str);
}
if (!debug_line_data_.empty()) {
debug_line.SetBuffer(debug_line_data_);
- builder.RegisterRawSection(&debug_line);
+ builder.RegisterSection(&debug_line);
}
- if (!eh_frame_data_.empty()) {
- eh_frame.SetBuffer(eh_frame_data_);
- builder.RegisterRawSection(&eh_frame);
+ if (!debug_frame_data_.empty()) {
+ debug_frame.SetBuffer(debug_frame_data_);
+ builder.RegisterSection(&debug_frame);
}
- builder.Init();
- builder.Write();
+ ScratchFile file;
+ builder.Write(file.GetFile());
// Read the elf file back using objdump.
std::vector<std::string> lines;
@@ -125,11 +120,9 @@
std::vector<std::string> Objdump(bool is64bit, const char* args) {
if (is64bit) {
- return Objdump<Elf64_Word, Elf64_Sword, Elf64_Addr, Elf64_Dyn,
- Elf64_Sym, Elf64_Ehdr, Elf64_Phdr, Elf64_Shdr>(is64bit, args);
+ return Objdump<ElfTypes64>(args);
} else {
- return Objdump<Elf32_Word, Elf32_Sword, Elf32_Addr, Elf32_Dyn,
- Elf32_Sym, Elf32_Ehdr, Elf32_Phdr, Elf32_Shdr>(is64bit, args);
+ return Objdump<ElfTypes32>(args);
}
}
@@ -174,7 +167,7 @@
}
// Buffers which are going to assembled into ELF file and passed to objdump.
- std::vector<uint8_t> eh_frame_data_;
+ std::vector<uint8_t> debug_frame_data_;
std::vector<uint8_t> debug_info_data_;
std::vector<uint8_t> debug_abbrev_data_;
std::vector<uint8_t> debug_str_data_;
diff --git a/compiler/dwarf/headers.h b/compiler/dwarf/headers.h
index 9f64766..ad315ee 100644
--- a/compiler/dwarf/headers.h
+++ b/compiler/dwarf/headers.h
@@ -35,17 +35,18 @@
// and compilers are expected *not* to use it by default.
// In particular, it is not related to machine architecture.
-// Write common information entry (CIE) to .eh_frame section.
+// Write common information entry (CIE) to .debug_frame or .eh_frame section.
template<typename Allocator>
-void WriteEhFrameCIE(bool is64bit,
- ExceptionHeaderValueApplication address_type,
- Reg return_address_register,
- const DebugFrameOpCodeWriter<Allocator>& opcodes,
- std::vector<uint8_t>* eh_frame) {
- Writer<> writer(eh_frame);
+void WriteDebugFrameCIE(bool is64bit,
+ ExceptionHeaderValueApplication address_type,
+ Reg return_address_register,
+ const DebugFrameOpCodeWriter<Allocator>& opcodes,
+ CFIFormat format,
+ std::vector<uint8_t>* debug_frame) {
+ Writer<> writer(debug_frame);
size_t cie_header_start_ = writer.data()->size();
writer.PushUint32(0); // Length placeholder.
- writer.PushUint32(0); // CIE id.
+ writer.PushUint32((format == DW_EH_FRAME_FORMAT) ? 0 : 0xFFFFFFFF); // CIE id.
writer.PushUint8(1); // Version.
writer.PushString("zR");
writer.PushUleb128(DebugFrameOpCodeWriter<Allocator>::kCodeAlignmentFactor);
@@ -62,20 +63,26 @@
writer.UpdateUint32(cie_header_start_, writer.data()->size() - cie_header_start_ - 4);
}
-// Write frame description entry (FDE) to .eh_frame section.
+// Write frame description entry (FDE) to .debug_frame or .eh_frame section.
template<typename Allocator>
-void WriteEhFrameFDE(bool is64bit, size_t cie_offset,
- uint64_t initial_address, uint64_t address_range,
- const std::vector<uint8_t, Allocator>* opcodes,
- std::vector<uint8_t>* eh_frame,
- std::vector<uintptr_t>* eh_frame_patches) {
- Writer<> writer(eh_frame);
+void WriteDebugFrameFDE(bool is64bit, size_t cie_offset,
+ uint64_t initial_address, uint64_t address_range,
+ const std::vector<uint8_t, Allocator>* opcodes,
+ CFIFormat format,
+ std::vector<uint8_t>* debug_frame,
+ std::vector<uintptr_t>* debug_frame_patches) {
+ Writer<> writer(debug_frame);
size_t fde_header_start = writer.data()->size();
writer.PushUint32(0); // Length placeholder.
- uint32_t cie_pointer = writer.data()->size() - cie_offset;
- writer.PushUint32(cie_pointer);
+ if (format == DW_EH_FRAME_FORMAT) {
+ uint32_t cie_pointer = writer.data()->size() - cie_offset;
+ writer.PushUint32(cie_pointer);
+ } else {
+ uint32_t cie_pointer = cie_offset;
+ writer.PushUint32(cie_pointer);
+ }
// Relocate initial_address, but not address_range (it is size).
- eh_frame_patches->push_back(writer.data()->size());
+ debug_frame_patches->push_back(writer.data()->size());
if (is64bit) {
writer.PushUint64(initial_address);
writer.PushUint64(address_range);
diff --git a/compiler/dwarf/register.h b/compiler/dwarf/register.h
index fa666df..7045237 100644
--- a/compiler/dwarf/register.h
+++ b/compiler/dwarf/register.h
@@ -33,6 +33,7 @@
// There are ways around this in DWARF but they are complex.
// It would be much simpler to always spill whole D registers.
// Arm64 mapping is correct since we already do this there.
+ // libunwind might struggle with the new mapping as well.
static Reg ArmCore(int num) { return Reg(num); }
static Reg ArmFp(int num) { return Reg(64 + num); } // S0–S31.
diff --git a/compiler/elf_builder.h b/compiler/elf_builder.h
index 323c933..972bd08 100644
--- a/compiler/elf_builder.h
+++ b/compiler/elf_builder.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2014 The Android Open Source Project
+ * Copyright (C) 2015 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -17,563 +17,500 @@
#ifndef ART_COMPILER_ELF_BUILDER_H_
#define ART_COMPILER_ELF_BUILDER_H_
+#include <vector>
+
#include "arch/instruction_set.h"
-#include "base/stl_util.h"
-#include "base/value_object.h"
+#include "base/unix_file/fd_file.h"
#include "buffered_output_stream.h"
#include "elf_utils.h"
#include "file_output_stream.h"
namespace art {
-template <typename Elf_Word, typename Elf_Sword, typename Elf_Shdr>
-class ElfSectionBuilder : public ValueObject {
- public:
- ElfSectionBuilder(const std::string& sec_name, Elf_Word type, Elf_Word flags,
- const ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> *link, Elf_Word info,
- Elf_Word align, Elf_Word entsize)
- : section_index_(0), name_(sec_name), link_(link) {
- memset(§ion_, 0, sizeof(section_));
- section_.sh_type = type;
- section_.sh_flags = flags;
- section_.sh_info = info;
- section_.sh_addralign = align;
- section_.sh_entsize = entsize;
- }
- ElfSectionBuilder(const ElfSectionBuilder&) = default;
-
- ~ElfSectionBuilder() {}
-
- Elf_Word GetLink() const {
- return (link_ != nullptr) ? link_->section_index_ : 0;
- }
-
- const Elf_Shdr* GetSection() const {
- return §ion_;
- }
-
- Elf_Shdr* GetSection() {
- return §ion_;
- }
-
- Elf_Word GetSectionIndex() const {
- return section_index_;
- }
-
- void SetSectionIndex(Elf_Word section_index) {
- section_index_ = section_index;
- }
-
- const std::string& GetName() const {
- return name_;
- }
-
- private:
- Elf_Shdr section_;
- Elf_Word section_index_;
- const std::string name_;
- const ElfSectionBuilder* const link_;
-};
-
-template <typename Elf_Word, typename Elf_Sword, typename Elf_Dyn, typename Elf_Shdr>
-class ElfDynamicBuilder FINAL : public ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> {
- public:
- void AddDynamicTag(Elf_Sword tag, Elf_Word d_un) {
- if (tag == DT_NULL) {
- return;
- }
- dynamics_.push_back({nullptr, tag, d_un});
- }
-
- void AddDynamicTag(Elf_Sword tag, Elf_Word d_un,
- const ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr>* section) {
- if (tag == DT_NULL) {
- return;
- }
- dynamics_.push_back({section, tag, d_un});
- }
-
- ElfDynamicBuilder(const std::string& sec_name,
- ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> *link)
- : ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr>(sec_name, SHT_DYNAMIC, SHF_ALLOC | SHF_ALLOC,
- link, 0, kPageSize, sizeof(Elf_Dyn)) {}
- ~ElfDynamicBuilder() {}
-
- Elf_Word GetSize() const {
- // Add 1 for the DT_NULL, 1 for DT_STRSZ, and 1 for DT_SONAME. All of
- // these must be added when we actually put the file together because
- // their values are very dependent on state.
- return dynamics_.size() + 3;
- }
-
- // Create the actual dynamic vector. strsz should be the size of the .dynstr
- // table and soname_off should be the offset of the soname in .dynstr.
- // Since niether can be found prior to final layout we will wait until here
- // to add them.
- std::vector<Elf_Dyn> GetDynamics(Elf_Word strsz, Elf_Word soname) const {
- std::vector<Elf_Dyn> ret;
- for (auto it = dynamics_.cbegin(); it != dynamics_.cend(); ++it) {
- if (it->section_ != nullptr) {
- // We are adding an address relative to a section.
- ret.push_back(
- {it->tag_, {it->off_ + it->section_->GetSection()->sh_addr}});
- } else {
- ret.push_back({it->tag_, {it->off_}});
- }
- }
- ret.push_back({DT_STRSZ, {strsz}});
- ret.push_back({DT_SONAME, {soname}});
- ret.push_back({DT_NULL, {0}});
- return ret;
- }
-
- private:
- struct ElfDynamicState {
- const ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr>* section_;
- Elf_Sword tag_;
- Elf_Word off_;
- };
- std::vector<ElfDynamicState> dynamics_;
-};
-
-template <typename Elf_Word, typename Elf_Sword, typename Elf_Shdr>
-class ElfRawSectionBuilder FINAL : public ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> {
- public:
- ElfRawSectionBuilder(const std::string& sec_name, Elf_Word type, Elf_Word flags,
- const ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr>* link, Elf_Word info,
- Elf_Word align, Elf_Word entsize)
- : ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr>(sec_name, type, flags, link, info, align,
- entsize) {
- }
- ElfRawSectionBuilder(const ElfRawSectionBuilder&) = default;
-
- ~ElfRawSectionBuilder() {}
-
- std::vector<uint8_t>* GetBuffer() {
- return &buf_;
- }
-
- void SetBuffer(const std::vector<uint8_t>& buf) {
- buf_ = buf;
- }
-
- private:
- std::vector<uint8_t> buf_;
-};
-
-template <typename Elf_Word, typename Elf_Sword, typename Elf_Shdr>
-class ElfOatSectionBuilder FINAL : public ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> {
- public:
- ElfOatSectionBuilder(const std::string& sec_name, Elf_Word size, Elf_Word offset,
- Elf_Word type, Elf_Word flags)
- : ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr>(sec_name, type, flags, nullptr, 0, kPageSize,
- 0),
- offset_(offset), size_(size) {
- }
-
- ~ElfOatSectionBuilder() {}
-
- Elf_Word GetOffset() const {
- return offset_;
- }
-
- Elf_Word GetSize() const {
- return size_;
- }
-
- private:
- // Offset of the content within the file.
- Elf_Word offset_;
- // Size of the content within the file.
- Elf_Word size_;
-};
-
-static inline constexpr uint8_t MakeStInfo(uint8_t binding, uint8_t type) {
- return ((binding) << 4) + ((type) & 0xf);
-}
-
-// from bionic
-static inline unsigned elfhash(const char *_name) {
- const unsigned char *name = (const unsigned char *) _name;
- unsigned h = 0, g;
-
- while (*name) {
- h = (h << 4) + *name++;
- g = h & 0xf0000000;
- h ^= g;
- h ^= g >> 24;
- }
- return h;
-}
-
-template <typename Elf_Word, typename Elf_Sword, typename Elf_Addr, typename Elf_Sym,
- typename Elf_Shdr>
-class ElfSymtabBuilder FINAL : public ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> {
- public:
- // Add a symbol with given name to this symtab. The symbol refers to
- // 'relative_addr' within the given section and has the given attributes.
- void AddSymbol(const std::string& name,
- const ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr>* section,
- Elf_Addr addr,
- bool is_relative,
- Elf_Word size,
- uint8_t binding,
- uint8_t type,
- uint8_t other = 0) {
- CHECK(section);
- ElfSymtabBuilder::ElfSymbolState state {name, section, addr, size, is_relative,
- MakeStInfo(binding, type), other, 0};
- symbols_.push_back(state);
- }
-
- ElfSymtabBuilder(const std::string& sec_name, Elf_Word type,
- const std::string& str_name, Elf_Word str_type, bool alloc)
- : ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr>(sec_name, type, ((alloc) ? SHF_ALLOC : 0U),
- &strtab_, 0, sizeof(Elf_Word),
- sizeof(Elf_Sym)), str_name_(str_name),
- str_type_(str_type),
- strtab_(str_name,
- str_type,
- ((alloc) ? SHF_ALLOC : 0U),
- nullptr, 0, 1, 1) {
- }
-
- ~ElfSymtabBuilder() {}
-
- std::vector<Elf_Word> GenerateHashContents() const {
- // Here is how The ELF hash table works.
- // There are 3 arrays to worry about.
- // * The symbol table where the symbol information is.
- // * The bucket array which is an array of indexes into the symtab and chain.
- // * The chain array which is also an array of indexes into the symtab and chain.
- //
- // Lets say the state is something like this.
- // +--------+ +--------+ +-----------+
- // | symtab | | bucket | | chain |
- // | null | | 1 | | STN_UNDEF |
- // | <sym1> | | 4 | | 2 |
- // | <sym2> | | | | 5 |
- // | <sym3> | | | | STN_UNDEF |
- // | <sym4> | | | | 3 |
- // | <sym5> | | | | STN_UNDEF |
- // +--------+ +--------+ +-----------+
- //
- // The lookup process (in python psudocode) is
- //
- // def GetSym(name):
- // # NB STN_UNDEF == 0
- // indx = bucket[elfhash(name) % num_buckets]
- // while indx != STN_UNDEF:
- // if GetSymbolName(symtab[indx]) == name:
- // return symtab[indx]
- // indx = chain[indx]
- // return SYMBOL_NOT_FOUND
- //
- // Between bucket and chain arrays every symtab index must be present exactly
- // once (except for STN_UNDEF, which must be present 1 + num_bucket times).
-
- // Select number of buckets.
- // This is essentially arbitrary.
- Elf_Word nbuckets;
- Elf_Word chain_size = GetSize();
- if (symbols_.size() < 8) {
- nbuckets = 2;
- } else if (symbols_.size() < 32) {
- nbuckets = 4;
- } else if (symbols_.size() < 256) {
- nbuckets = 16;
- } else {
- // Have about 32 ids per bucket.
- nbuckets = RoundUp(symbols_.size()/32, 2);
- }
- std::vector<Elf_Word> hash;
- hash.push_back(nbuckets);
- hash.push_back(chain_size);
- uint32_t bucket_offset = hash.size();
- uint32_t chain_offset = bucket_offset + nbuckets;
- hash.resize(hash.size() + nbuckets + chain_size, 0);
-
- Elf_Word* buckets = hash.data() + bucket_offset;
- Elf_Word* chain = hash.data() + chain_offset;
-
- // Set up the actual hash table.
- for (Elf_Word i = 0; i < symbols_.size(); i++) {
- // Add 1 since we need to have the null symbol that is not in the symbols
- // list.
- Elf_Word index = i + 1;
- Elf_Word hash_val = static_cast<Elf_Word>(elfhash(symbols_[i].name_.c_str())) % nbuckets;
- if (buckets[hash_val] == 0) {
- buckets[hash_val] = index;
- } else {
- hash_val = buckets[hash_val];
- CHECK_LT(hash_val, chain_size);
- while (chain[hash_val] != 0) {
- hash_val = chain[hash_val];
- CHECK_LT(hash_val, chain_size);
- }
- chain[hash_val] = index;
- // Check for loops. Works because if this is non-empty then there must be
- // another cell which already contains the same symbol index as this one,
- // which means some symbol has more then one name, which isn't allowed.
- CHECK_EQ(chain[index], static_cast<Elf_Word>(0));
- }
- }
-
- return hash;
- }
-
- std::string GenerateStrtab() {
- std::string tab;
- tab += '\0';
- for (auto it = symbols_.begin(); it != symbols_.end(); ++it) {
- it->name_idx_ = tab.size();
- tab += it->name_;
- tab += '\0';
- }
- strtab_.GetSection()->sh_size = tab.size();
- return tab;
- }
-
- std::vector<Elf_Sym> GenerateSymtab() {
- std::vector<Elf_Sym> ret;
- Elf_Sym undef_sym;
- memset(&undef_sym, 0, sizeof(undef_sym));
- undef_sym.st_shndx = SHN_UNDEF;
- ret.push_back(undef_sym);
-
- for (auto it = symbols_.cbegin(); it != symbols_.cend(); ++it) {
- Elf_Sym sym;
- memset(&sym, 0, sizeof(sym));
- sym.st_name = it->name_idx_;
- if (it->is_relative_) {
- sym.st_value = it->addr_ + it->section_->GetSection()->sh_offset;
- } else {
- sym.st_value = it->addr_;
- }
- sym.st_size = it->size_;
- sym.st_other = it->other_;
- sym.st_shndx = it->section_->GetSectionIndex();
- sym.st_info = it->info_;
-
- ret.push_back(sym);
- }
- return ret;
- }
-
- Elf_Word GetSize() const {
- // 1 is for the implicit NULL symbol.
- return symbols_.size() + 1;
- }
-
- ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr>* GetStrTab() {
- return &strtab_;
- }
-
- private:
- struct ElfSymbolState {
- const std::string name_;
- const ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr>* section_;
- Elf_Addr addr_;
- Elf_Word size_;
- bool is_relative_;
- uint8_t info_;
- uint8_t other_;
- // Used during Write() to temporarially hold name index in the strtab.
- Elf_Word name_idx_;
- };
-
- // Information for the strsym for dynstr sections.
- const std::string str_name_;
- Elf_Word str_type_;
- // The symbols in the same order they will be in the symbol table.
- std::vector<ElfSymbolState> symbols_;
- ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> strtab_;
-};
-
-template <typename Elf_Word>
-class ElfFilePiece {
- public:
- virtual ~ElfFilePiece() {}
-
- virtual bool Write(File* elf_file) {
- if (static_cast<off_t>(offset_) != lseek(elf_file->Fd(), offset_, SEEK_SET)) {
- PLOG(ERROR) << "Failed to seek to " << GetDescription() << " offset " << offset_ << " for "
- << elf_file->GetPath();
- return false;
- }
-
- return DoActualWrite(elf_file);
- }
-
- static bool Compare(ElfFilePiece* a, ElfFilePiece* b) {
- return a->offset_ < b->offset_;
- }
-
- protected:
- explicit ElfFilePiece(Elf_Word offset) : offset_(offset) {}
-
- Elf_Word GetOffset() const {
- return offset_;
- }
-
- virtual const char* GetDescription() const = 0;
- virtual bool DoActualWrite(File* elf_file) = 0;
-
- private:
- const Elf_Word offset_;
-
- DISALLOW_COPY_AND_ASSIGN(ElfFilePiece);
-};
-
-template <typename Elf_Word>
-class ElfFileMemoryPiece FINAL : public ElfFilePiece<Elf_Word> {
- public:
- ElfFileMemoryPiece(const std::string& name, Elf_Word offset, const void* data, Elf_Word size)
- : ElfFilePiece<Elf_Word>(offset), dbg_name_(name), data_(data), size_(size) {}
-
- protected:
- bool DoActualWrite(File* elf_file) OVERRIDE {
- DCHECK(data_ != nullptr || size_ == 0U) << dbg_name_ << " " << size_;
-
- if (!elf_file->WriteFully(data_, size_)) {
- PLOG(ERROR) << "Failed to write " << dbg_name_ << " for " << elf_file->GetPath();
- return false;
- }
-
- return true;
- }
-
- const char* GetDescription() const OVERRIDE {
- return dbg_name_.c_str();
- }
-
- private:
- const std::string& dbg_name_;
- const void *data_;
- Elf_Word size_;
-};
-
class CodeOutput {
public:
- virtual void SetCodeOffset(size_t offset) = 0;
virtual bool Write(OutputStream* out) = 0;
virtual ~CodeOutput() {}
};
-template <typename Elf_Word>
-class ElfFileRodataPiece FINAL : public ElfFilePiece<Elf_Word> {
- public:
- ElfFileRodataPiece(Elf_Word offset, CodeOutput* output) : ElfFilePiece<Elf_Word>(offset),
- output_(output) {}
-
- protected:
- bool DoActualWrite(File* elf_file) OVERRIDE {
- output_->SetCodeOffset(this->GetOffset());
- std::unique_ptr<BufferedOutputStream> output_stream(
- new BufferedOutputStream(new FileOutputStream(elf_file)));
- if (!output_->Write(output_stream.get())) {
- PLOG(ERROR) << "Failed to write .rodata and .text for " << elf_file->GetPath();
- return false;
- }
-
- return true;
- }
-
- const char* GetDescription() const OVERRIDE {
- return ".rodata";
- }
-
- private:
- CodeOutput* const output_;
-
- DISALLOW_COPY_AND_ASSIGN(ElfFileRodataPiece);
-};
-
-template <typename Elf_Word>
-class ElfFileOatTextPiece FINAL : public ElfFilePiece<Elf_Word> {
- public:
- ElfFileOatTextPiece(Elf_Word offset, CodeOutput* output) : ElfFilePiece<Elf_Word>(offset),
- output_(output) {}
-
- protected:
- bool DoActualWrite(File* elf_file ATTRIBUTE_UNUSED) OVERRIDE {
- // All data is written by the ElfFileRodataPiece right now, as the oat writer writes in one
- // piece. This is for future flexibility.
- UNUSED(output_);
- return true;
- }
-
- const char* GetDescription() const OVERRIDE {
- return ".text";
- }
-
- private:
- CodeOutput* const output_;
-
- DISALLOW_COPY_AND_ASSIGN(ElfFileOatTextPiece);
-};
-
-template <typename Elf_Word>
-static bool WriteOutFile(const std::vector<ElfFilePiece<Elf_Word>*>& pieces, File* elf_file) {
- // TODO It would be nice if this checked for overlap.
- for (auto it = pieces.begin(); it != pieces.end(); ++it) {
- if (!(*it)->Write(elf_file)) {
- return false;
- }
- }
- return true;
-}
-
-template <typename Elf_Word, typename Elf_Shdr>
-static inline constexpr Elf_Word NextOffset(const Elf_Shdr& cur, const Elf_Shdr& prev) {
- return RoundUp(prev.sh_size + prev.sh_offset, cur.sh_addralign);
-}
-
-template <typename Elf_Word, typename Elf_Sword, typename Elf_Addr, typename Elf_Dyn,
- typename Elf_Sym, typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr>
+// Writes ELF file.
+// The main complication is that the sections often want to reference
+// each other. We solve this by writing the ELF file in two stages:
+// * Sections are asked about their size, and overall layout is calculated.
+// * Sections do the actual writes which may use offsets of other sections.
+template <typename ElfTypes>
class ElfBuilder FINAL {
public:
- ElfBuilder(CodeOutput* oat_writer,
- File* elf_file,
- InstructionSet isa,
- Elf_Word rodata_relative_offset,
- Elf_Word rodata_size,
- Elf_Word text_relative_offset,
- Elf_Word text_size,
- Elf_Word bss_relative_offset,
- Elf_Word bss_size,
- const bool add_symbols,
- bool debug = false)
- : oat_writer_(oat_writer),
- elf_file_(elf_file),
- add_symbols_(add_symbols),
- debug_logging_(debug),
- text_builder_(".text", text_size, text_relative_offset, SHT_PROGBITS,
- SHF_ALLOC | SHF_EXECINSTR),
- rodata_builder_(".rodata", rodata_size, rodata_relative_offset, SHT_PROGBITS, SHF_ALLOC),
- bss_builder_(".bss", bss_size, bss_relative_offset, SHT_NOBITS, SHF_ALLOC),
- dynsym_builder_(".dynsym", SHT_DYNSYM, ".dynstr", SHT_STRTAB, true),
- symtab_builder_(".symtab", SHT_SYMTAB, ".strtab", SHT_STRTAB, false),
- hash_builder_(".hash", SHT_HASH, SHF_ALLOC, &dynsym_builder_, 0, sizeof(Elf_Word),
- sizeof(Elf_Word)),
- dynamic_builder_(".dynamic", &dynsym_builder_),
- shstrtab_builder_(".shstrtab", SHT_STRTAB, 0, NULL, 0, 1, 1) {
- SetupEhdr();
- SetupDynamic();
- SetupRequiredSymbols();
- SetISA(isa);
+ using Elf_Addr = typename ElfTypes::Addr;
+ using Elf_Off = typename ElfTypes::Off;
+ using Elf_Word = typename ElfTypes::Word;
+ using Elf_Sword = typename ElfTypes::Sword;
+ using Elf_Ehdr = typename ElfTypes::Ehdr;
+ using Elf_Shdr = typename ElfTypes::Shdr;
+ using Elf_Sym = typename ElfTypes::Sym;
+ using Elf_Phdr = typename ElfTypes::Phdr;
+ using Elf_Dyn = typename ElfTypes::Dyn;
+
+ // Base class of all sections.
+ class Section {
+ public:
+ Section(const std::string& name, Elf_Word type, Elf_Word flags,
+ const Section* link, Elf_Word info, Elf_Word align, Elf_Word entsize)
+ : header_(new Elf_Shdr()), section_index_(0), name_(name), link_(link) {
+ header_->sh_type = type;
+ header_->sh_flags = flags;
+ header_->sh_info = info;
+ header_->sh_addralign = align;
+ header_->sh_entsize = entsize;
+ }
+ virtual ~Section() {}
+
+ // Returns the size of the content of this section. It is used to
+ // calculate file offsets of all sections before doing any writes.
+ virtual Elf_Word GetSize() const = 0;
+
+ // Write the content of this section to the given file.
+ // This must write exactly the number of bytes returned by GetSize().
+ // Offsets of all sections are known when this method is called.
+ virtual bool Write(File* elf_file) = 0;
+
+ Elf_Word GetLink() const {
+ return (link_ != nullptr) ? link_->GetSectionIndex() : 0;
+ }
+
+ const Elf_Shdr* GetHeader() const {
+ return header_.get();
+ }
+
+ Elf_Shdr* GetHeader() {
+ return header_.get();
+ }
+
+ Elf_Word GetSectionIndex() const {
+ DCHECK_NE(section_index_, 0u);
+ return section_index_;
+ }
+
+ void SetSectionIndex(Elf_Word section_index) {
+ section_index_ = section_index;
+ }
+
+ const std::string& GetName() const {
+ return name_;
+ }
+
+ private:
+ // Elf_Shdr is somewhat large so allocate it on the heap.
+ // Otherwise we get in trouble with stack frame sizes.
+ std::unique_ptr<Elf_Shdr> header_;
+ Elf_Word section_index_;
+ const std::string name_;
+ const Section* const link_;
+
+ DISALLOW_COPY_AND_ASSIGN(Section);
+ };
+
+ // Writer of .dynamic section.
+ class DynamicSection FINAL : public Section {
+ public:
+ void AddDynamicTag(Elf_Sword tag, Elf_Word value, const Section* section) {
+ DCHECK_NE(tag, static_cast<Elf_Sword>(DT_NULL));
+ dynamics_.push_back({tag, value, section});
+ }
+
+ DynamicSection(const std::string& name, Section* link)
+ : Section(name, SHT_DYNAMIC, SHF_ALLOC,
+ link, 0, kPageSize, sizeof(Elf_Dyn)) {}
+
+ Elf_Word GetSize() const OVERRIDE {
+ return (dynamics_.size() + 1 /* DT_NULL */) * sizeof(Elf_Dyn);
+ }
+
+ bool Write(File* elf_file) OVERRIDE {
+ std::vector<Elf_Dyn> buffer;
+ buffer.reserve(dynamics_.size() + 1u);
+ for (const ElfDynamicState& it : dynamics_) {
+ if (it.section_ != nullptr) {
+ // We are adding an address relative to a section.
+ buffer.push_back(
+ {it.tag_, {it.value_ + it.section_->GetHeader()->sh_addr}});
+ } else {
+ buffer.push_back({it.tag_, {it.value_}});
+ }
+ }
+ buffer.push_back({DT_NULL, {0}});
+ return WriteArray(elf_file, buffer.data(), buffer.size());
+ }
+
+ private:
+ struct ElfDynamicState {
+ Elf_Sword tag_;
+ Elf_Word value_;
+ const Section* section_;
+ };
+ std::vector<ElfDynamicState> dynamics_;
+ };
+
+ using PatchFn = void (*)(const std::vector<uintptr_t>& patch_locations,
+ Elf_Addr buffer_address,
+ Elf_Addr base_address,
+ std::vector<uint8_t>* buffer);
+
+ // Section with content based on simple memory buffer.
+ // The buffer can be optionally patched before writing.
+ class RawSection FINAL : public Section {
+ public:
+ RawSection(const std::string& name, Elf_Word type, Elf_Word flags,
+ const Section* link, Elf_Word info, Elf_Word align, Elf_Word entsize,
+ PatchFn patch = nullptr, const Section* patch_base_section = nullptr)
+ : Section(name, type, flags, link, info, align, entsize),
+ patched_(false), patch_(patch), patch_base_section_(patch_base_section) {
+ }
+
+ Elf_Word GetSize() const OVERRIDE {
+ return buffer_.size();
+ }
+
+ bool Write(File* elf_file) OVERRIDE {
+ if (!patch_locations_.empty()) {
+ DCHECK(!patched_); // Do not patch twice.
+ DCHECK(patch_ != nullptr);
+ DCHECK(patch_base_section_ != nullptr);
+ patch_(patch_locations_,
+ this->GetHeader()->sh_addr,
+ patch_base_section_->GetHeader()->sh_addr,
+ &buffer_);
+ patched_ = true;
+ }
+ return WriteArray(elf_file, buffer_.data(), buffer_.size());
+ }
+
+ bool IsEmpty() const {
+ return buffer_.size() == 0;
+ }
+
+ std::vector<uint8_t>* GetBuffer() {
+ return &buffer_;
+ }
+
+ void SetBuffer(const std::vector<uint8_t>& buffer) {
+ buffer_ = buffer;
+ }
+
+ std::vector<uintptr_t>* GetPatchLocations() {
+ return &patch_locations_;
+ }
+
+ private:
+ std::vector<uint8_t> buffer_;
+ std::vector<uintptr_t> patch_locations_;
+ bool patched_;
+ // User-provided function to do the actual patching.
+ PatchFn patch_;
+ // The section that we patch against (usually .text).
+ const Section* patch_base_section_;
+ };
+
+ // Writer of .rodata section or .text section.
+ // The write is done lazily using the provided CodeOutput.
+ class OatSection FINAL : public Section {
+ public:
+ OatSection(const std::string& name, Elf_Word type, Elf_Word flags,
+ const Section* link, Elf_Word info, Elf_Word align,
+ Elf_Word entsize, Elf_Word size, CodeOutput* code_output)
+ : Section(name, type, flags, link, info, align, entsize),
+ size_(size), code_output_(code_output) {
+ }
+
+ Elf_Word GetSize() const OVERRIDE {
+ return size_;
+ }
+
+ bool Write(File* elf_file) OVERRIDE {
+ // The BufferedOutputStream class contains the buffer as field,
+ // therefore it is too big to allocate on the stack.
+ std::unique_ptr<BufferedOutputStream> output_stream(
+ new BufferedOutputStream(new FileOutputStream(elf_file)));
+ return code_output_->Write(output_stream.get());
+ }
+
+ private:
+ Elf_Word size_;
+ CodeOutput* code_output_;
+ };
+
+ // Writer of .bss section.
+ class NoBitsSection FINAL : public Section {
+ public:
+ NoBitsSection(const std::string& name, Elf_Word size)
+ : Section(name, SHT_NOBITS, SHF_ALLOC, nullptr, 0, kPageSize, 0),
+ size_(size) {
+ }
+
+ Elf_Word GetSize() const OVERRIDE {
+ return size_;
+ }
+
+ bool Write(File* elf_file ATTRIBUTE_UNUSED) OVERRIDE {
+ LOG(ERROR) << "This section should not be written to the ELF file";
+ return false;
+ }
+
+ private:
+ Elf_Word size_;
+ };
+
+ // Writer of .dynstr .strtab and .shstrtab sections.
+ class StrtabSection FINAL : public Section {
+ public:
+ StrtabSection(const std::string& name, Elf_Word flags)
+ : Section(name, SHT_STRTAB, flags, nullptr, 0, 1, 1) {
+ buffer_.reserve(4 * KB);
+ // The first entry of strtab must be empty string.
+ buffer_ += '\0';
+ }
+
+ Elf_Word AddName(const std::string& name) {
+ Elf_Word offset = buffer_.size();
+ buffer_ += name;
+ buffer_ += '\0';
+ return offset;
+ }
+
+ Elf_Word GetSize() const OVERRIDE {
+ return buffer_.size();
+ }
+
+ bool Write(File* elf_file) OVERRIDE {
+ return WriteArray(elf_file, buffer_.data(), buffer_.size());
+ }
+
+ private:
+ std::string buffer_;
+ };
+
+ class HashSection;
+
+ // Writer of .dynsym and .symtab sections.
+ class SymtabSection FINAL : public Section {
+ public:
+ // Add a symbol with given name to this symtab. The symbol refers to
+ // 'relative_addr' within the given section and has the given attributes.
+ void AddSymbol(const std::string& name, const Section* section,
+ Elf_Addr addr, bool is_relative, Elf_Word size,
+ uint8_t binding, uint8_t type, uint8_t other = 0) {
+ CHECK(section != nullptr);
+ Elf_Word name_idx = strtab_->AddName(name);
+ symbols_.push_back({ name, section, addr, size, is_relative,
+ MakeStInfo(binding, type), other, name_idx });
+ }
+
+ SymtabSection(const std::string& name, Elf_Word type, Elf_Word flags,
+ StrtabSection* strtab)
+ : Section(name, type, flags, strtab, 0, sizeof(Elf_Word), sizeof(Elf_Sym)),
+ strtab_(strtab) {
+ }
+
+ bool IsEmpty() const {
+ return symbols_.empty();
+ }
+
+ Elf_Word GetSize() const OVERRIDE {
+ return (1 /* NULL */ + symbols_.size()) * sizeof(Elf_Sym);
+ }
+
+ bool Write(File* elf_file) OVERRIDE {
+ std::vector<Elf_Sym> buffer;
+ buffer.reserve(1u + symbols_.size());
+ buffer.push_back(Elf_Sym()); // NULL.
+ for (const ElfSymbolState& it : symbols_) {
+ Elf_Sym sym = Elf_Sym();
+ sym.st_name = it.name_idx_;
+ if (it.is_relative_) {
+ sym.st_value = it.addr_ + it.section_->GetHeader()->sh_addr;
+ } else {
+ sym.st_value = it.addr_;
+ }
+ sym.st_size = it.size_;
+ sym.st_other = it.other_;
+ sym.st_shndx = it.section_->GetSectionIndex();
+ sym.st_info = it.info_;
+ buffer.push_back(sym);
+ }
+ return WriteArray(elf_file, buffer.data(), buffer.size());
+ }
+
+ private:
+ struct ElfSymbolState {
+ const std::string name_;
+ const Section* section_;
+ Elf_Addr addr_;
+ Elf_Word size_;
+ bool is_relative_;
+ uint8_t info_;
+ uint8_t other_;
+ Elf_Word name_idx_; // index in the strtab.
+ };
+
+ static inline constexpr uint8_t MakeStInfo(uint8_t binding, uint8_t type) {
+ return ((binding) << 4) + ((type) & 0xf);
+ }
+
+ // The symbols in the same order they will be in the symbol table.
+ std::vector<ElfSymbolState> symbols_;
+ StrtabSection* strtab_;
+
+ friend class HashSection;
+ };
+
+ // TODO: Consider removing.
+ // We use it only for the dynsym section which has only 5 symbols.
+ // We do not use it for symtab, and we probably do not have to
+ // since we use those symbols only to print backtraces.
+ class HashSection FINAL : public Section {
+ public:
+ HashSection(const std::string& name, Elf_Word flags, SymtabSection* symtab)
+ : Section(name, SHT_HASH, flags, symtab,
+ 0, sizeof(Elf_Word), sizeof(Elf_Word)),
+ symtab_(symtab) {
+ }
+
+ Elf_Word GetSize() const OVERRIDE {
+ Elf_Word nbuckets = GetNumBuckets();
+ Elf_Word chain_size = symtab_->symbols_.size() + 1 /* NULL */;
+ return (2 /* header */ + nbuckets + chain_size) * sizeof(Elf_Word);
+ }
+
+ bool Write(File* const elf_file) OVERRIDE {
+ // Here is how The ELF hash table works.
+ // There are 3 arrays to worry about.
+ // * The symbol table where the symbol information is.
+ // * The bucket array which is an array of indexes into the symtab and chain.
+ // * The chain array which is also an array of indexes into the symtab and chain.
+ //
+ // Lets say the state is something like this.
+ // +--------+ +--------+ +-----------+
+ // | symtab | | bucket | | chain |
+ // | null | | 1 | | STN_UNDEF |
+ // | <sym1> | | 4 | | 2 |
+ // | <sym2> | | | | 5 |
+ // | <sym3> | | | | STN_UNDEF |
+ // | <sym4> | | | | 3 |
+ // | <sym5> | | | | STN_UNDEF |
+ // +--------+ +--------+ +-----------+
+ //
+ // The lookup process (in python psudocode) is
+ //
+ // def GetSym(name):
+ // # NB STN_UNDEF == 0
+ // indx = bucket[elfhash(name) % num_buckets]
+ // while indx != STN_UNDEF:
+ // if GetSymbolName(symtab[indx]) == name:
+ // return symtab[indx]
+ // indx = chain[indx]
+ // return SYMBOL_NOT_FOUND
+ //
+ // Between bucket and chain arrays every symtab index must be present exactly
+ // once (except for STN_UNDEF, which must be present 1 + num_bucket times).
+ const auto& symbols = symtab_->symbols_;
+ // Select number of buckets.
+ // This is essentially arbitrary.
+ Elf_Word nbuckets = GetNumBuckets();
+ // 1 is for the implicit NULL symbol.
+ Elf_Word chain_size = (symbols.size() + 1);
+ std::vector<Elf_Word> hash;
+ hash.push_back(nbuckets);
+ hash.push_back(chain_size);
+ uint32_t bucket_offset = hash.size();
+ uint32_t chain_offset = bucket_offset + nbuckets;
+ hash.resize(hash.size() + nbuckets + chain_size, 0);
+
+ Elf_Word* buckets = hash.data() + bucket_offset;
+ Elf_Word* chain = hash.data() + chain_offset;
+
+ // Set up the actual hash table.
+ for (Elf_Word i = 0; i < symbols.size(); i++) {
+ // Add 1 since we need to have the null symbol that is not in the symbols
+ // list.
+ Elf_Word index = i + 1;
+ Elf_Word hash_val = static_cast<Elf_Word>(elfhash(symbols[i].name_.c_str())) % nbuckets;
+ if (buckets[hash_val] == 0) {
+ buckets[hash_val] = index;
+ } else {
+ hash_val = buckets[hash_val];
+ CHECK_LT(hash_val, chain_size);
+ while (chain[hash_val] != 0) {
+ hash_val = chain[hash_val];
+ CHECK_LT(hash_val, chain_size);
+ }
+ chain[hash_val] = index;
+ // Check for loops. Works because if this is non-empty then there must be
+ // another cell which already contains the same symbol index as this one,
+ // which means some symbol has more then one name, which isn't allowed.
+ CHECK_EQ(chain[index], static_cast<Elf_Word>(0));
+ }
+ }
+ return WriteArray(elf_file, hash.data(), hash.size());
+ }
+
+ private:
+ Elf_Word GetNumBuckets() const {
+ const auto& symbols = symtab_->symbols_;
+ if (symbols.size() < 8) {
+ return 2;
+ } else if (symbols.size() < 32) {
+ return 4;
+ } else if (symbols.size() < 256) {
+ return 16;
+ } else {
+ // Have about 32 ids per bucket.
+ return RoundUp(symbols.size()/32, 2);
+ }
+ }
+
+ // from bionic
+ static inline unsigned elfhash(const char *_name) {
+ const unsigned char *name = (const unsigned char *) _name;
+ unsigned h = 0, g;
+
+ while (*name) {
+ h = (h << 4) + *name++;
+ g = h & 0xf0000000;
+ h ^= g;
+ h ^= g >> 24;
+ }
+ return h;
+ }
+
+ SymtabSection* symtab_;
+
+ DISALLOW_COPY_AND_ASSIGN(HashSection);
+ };
+
+ ElfBuilder(InstructionSet isa,
+ Elf_Word rodata_size, CodeOutput* rodata_writer,
+ Elf_Word text_size, CodeOutput* text_writer,
+ Elf_Word bss_size)
+ : isa_(isa),
+ dynstr_(".dynstr", SHF_ALLOC),
+ dynsym_(".dynsym", SHT_DYNSYM, SHF_ALLOC, &dynstr_),
+ hash_(".hash", SHF_ALLOC, &dynsym_),
+ rodata_(".rodata", SHT_PROGBITS, SHF_ALLOC,
+ nullptr, 0, kPageSize, 0, rodata_size, rodata_writer),
+ text_(".text", SHT_PROGBITS, SHF_ALLOC | SHF_EXECINSTR,
+ nullptr, 0, kPageSize, 0, text_size, text_writer),
+ bss_(".bss", bss_size),
+ dynamic_(".dynamic", &dynsym_),
+ strtab_(".strtab", 0),
+ symtab_(".symtab", SHT_SYMTAB, 0, &strtab_),
+ shstrtab_(".shstrtab", 0) {
}
~ElfBuilder() {}
- const ElfOatSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr>& GetTextBuilder() const {
- return text_builder_;
- }
+ OatSection* GetText() { return &text_; }
+ SymtabSection* GetSymtab() { return &symtab_; }
- ElfSymtabBuilder<Elf_Word, Elf_Sword, Elf_Addr, Elf_Sym, Elf_Shdr>* GetSymtabBuilder() {
- return &symtab_builder_;
- }
-
- bool Init() {
+ bool Write(File* elf_file) {
// Since the .text section of an oat file contains relative references to .rodata
// and (optionally) .bss, we keep these 2 or 3 sections together. This creates
// a non-traditional layout where the .bss section is mapped independently of the
@@ -584,11 +521,12 @@
// | Elf_Ehdr |
// +-------------------------+
// | Elf_Phdr PHDR |
- // | Elf_Phdr LOAD R | .dynsym .dynstr .hash .eh_frame .eh_frame_hdr .rodata
+ // | Elf_Phdr LOAD R | .dynsym .dynstr .hash .rodata
// | Elf_Phdr LOAD R X | .text
// | Elf_Phdr LOAD RW | .bss (Optional)
// | Elf_Phdr LOAD RW | .dynamic
// | Elf_Phdr DYNAMIC | .dynamic
+ // | Elf_Phdr LOAD R | .eh_frame .eh_frame_hdr
// | Elf_Phdr EH_FRAME R | .eh_frame_hdr
// +-------------------------+
// | .dynsym |
@@ -600,25 +538,10 @@
// | Elf_Sym oatbsslastword | (Optional)
// +-------------------------+
// | .dynstr |
- // | \0 |
- // | oatdata\0 |
- // | oatexec\0 |
- // | oatlastword\0 |
- // | boot.oat\0 |
+ // | names for .dynsym |
// +-------------------------+
// | .hash |
- // | Elf_Word nbucket = b |
- // | Elf_Word nchain = c |
- // | Elf_Word bucket[0] |
- // | ... |
- // | Elf_Word bucket[b - 1] |
- // | Elf_Word chain[0] |
- // | ... |
- // | Elf_Word chain[c - 1] |
- // +-------------------------+
- // | .eh_frame | (Optional)
- // +-------------------------+
- // | .eh_frame_hdr | (Optional)
+ // | hashtable for dynsym |
// +-------------------------+
// | .rodata |
// | oatdata..oatexec-4 |
@@ -627,38 +550,23 @@
// | oatexec..oatlastword |
// +-------------------------+
// | .dynamic |
- // | Elf_Dyn DT_SONAME |
// | Elf_Dyn DT_HASH |
+ // | Elf_Dyn DT_STRTAB |
// | Elf_Dyn DT_SYMTAB |
// | Elf_Dyn DT_SYMENT |
- // | Elf_Dyn DT_STRTAB |
// | Elf_Dyn DT_STRSZ |
+ // | Elf_Dyn DT_SONAME |
// | Elf_Dyn DT_NULL |
// +-------------------------+ (Optional)
- // | .strtab | (Optional)
- // | program symbol names | (Optional)
- // +-------------------------+ (Optional)
// | .symtab | (Optional)
// | program symbols | (Optional)
- // +-------------------------+
- // | .shstrtab |
- // | \0 |
- // | .dynamic\0 |
- // | .dynsym\0 |
- // | .dynstr\0 |
- // | .hash\0 |
- // | .rodata\0 |
- // | .text\0 |
- // | .bss\0 | (Optional)
- // | .shstrtab\0 |
- // | .symtab\0 | (Optional)
- // | .strtab\0 | (Optional)
- // | .eh_frame\0 | (Optional)
- // | .eh_frame_hdr\0 | (Optional)
- // | .debug_info\0 | (Optional)
- // | .debug_abbrev\0 | (Optional)
- // | .debug_str\0 | (Optional)
- // | .debug_line\0 | (Optional)
+ // +-------------------------+ (Optional)
+ // | .strtab | (Optional)
+ // | names for .symtab | (Optional)
+ // +-------------------------+ (Optional)
+ // | .eh_frame | (Optional)
+ // +-------------------------+ (Optional)
+ // | .eh_frame_hdr | (Optional)
// +-------------------------+ (Optional)
// | .debug_info | (Optional)
// +-------------------------+ (Optional)
@@ -667,8 +575,11 @@
// | .debug_str | (Optional)
// +-------------------------+ (Optional)
// | .debug_line | (Optional)
- // +-------------------------+ (Optional)
- // | Elf_Shdr NULL |
+ // +-------------------------+
+ // | .shstrtab |
+ // | names of sections |
+ // +-------------------------+
+ // | Elf_Shdr null |
// | Elf_Shdr .dynsym |
// | Elf_Shdr .dynstr |
// | Elf_Shdr .hash |
@@ -676,553 +587,266 @@
// | Elf_Shdr .text |
// | Elf_Shdr .bss | (Optional)
// | Elf_Shdr .dynamic |
- // | Elf_Shdr .shstrtab |
+ // | Elf_Shdr .symtab | (Optional)
+ // | Elf_Shdr .strtab | (Optional)
// | Elf_Shdr .eh_frame | (Optional)
// | Elf_Shdr .eh_frame_hdr | (Optional)
// | Elf_Shdr .debug_info | (Optional)
// | Elf_Shdr .debug_abbrev | (Optional)
// | Elf_Shdr .debug_str | (Optional)
// | Elf_Shdr .debug_line | (Optional)
+ // | Elf_Shdr .oat_patches | (Optional)
+ // | Elf_Shdr .shstrtab |
// +-------------------------+
+ constexpr bool debug_logging_ = false;
- if (fatal_error_) {
- return false;
+ // Create a list of all section which we want to write.
+ // This is the order in which they will be written.
+ std::vector<Section*> sections;
+ sections.push_back(&dynsym_);
+ sections.push_back(&dynstr_);
+ sections.push_back(&hash_);
+ sections.push_back(&rodata_);
+ sections.push_back(&text_);
+ if (bss_.GetSize() != 0u) {
+ sections.push_back(&bss_);
}
- // Step 1. Figure out all the offsets.
-
- if (debug_logging_) {
- LOG(INFO) << "phdr_offset=" << PHDR_OFFSET << std::hex << " " << PHDR_OFFSET;
- LOG(INFO) << "phdr_size=" << PHDR_SIZE << std::hex << " " << PHDR_SIZE;
+ sections.push_back(&dynamic_);
+ if (!symtab_.IsEmpty()) {
+ sections.push_back(&symtab_);
+ sections.push_back(&strtab_);
+ }
+ for (Section* section : other_sections_) {
+ sections.push_back(section);
+ }
+ sections.push_back(&shstrtab_);
+ for (size_t i = 0; i < sections.size(); i++) {
+ // The first section index is 1. Index 0 is reserved for NULL.
+ // Section index is used for relative symbols and for section links.
+ sections[i]->SetSectionIndex(i + 1);
+ // Add section name to .shstrtab.
+ Elf_Word name_offset = shstrtab_.AddName(sections[i]->GetName());
+ sections[i]->GetHeader()->sh_name = name_offset;
}
- memset(&program_headers_, 0, sizeof(program_headers_));
- program_headers_[PH_PHDR].p_type = PT_PHDR;
- program_headers_[PH_PHDR].p_offset = PHDR_OFFSET;
- program_headers_[PH_PHDR].p_vaddr = PHDR_OFFSET;
- program_headers_[PH_PHDR].p_paddr = PHDR_OFFSET;
- program_headers_[PH_PHDR].p_filesz = sizeof(program_headers_);
- program_headers_[PH_PHDR].p_memsz = sizeof(program_headers_);
- program_headers_[PH_PHDR].p_flags = PF_R;
- program_headers_[PH_PHDR].p_align = sizeof(Elf_Word);
+ // The running program does not have access to section headers
+ // and the loader is not supposed to use them either.
+ // The dynamic sections therefore replicates some of the layout
+ // information like the address and size of .rodata and .text.
+ // It also contains other metadata like the SONAME.
+ // The .dynamic section is found using the PT_DYNAMIC program header.
+ BuildDynsymSection();
+ BuildDynamicSection(elf_file->GetPath());
- program_headers_[PH_LOAD_R__].p_type = PT_LOAD;
- program_headers_[PH_LOAD_R__].p_offset = 0;
- program_headers_[PH_LOAD_R__].p_vaddr = 0;
- program_headers_[PH_LOAD_R__].p_paddr = 0;
- program_headers_[PH_LOAD_R__].p_flags = PF_R;
+ // We do not know the number of headers until the final stages of write.
+ // It is easiest to just reserve a fixed amount of space for them.
+ constexpr size_t kMaxProgramHeaders = 8;
+ constexpr size_t kProgramHeadersOffset = sizeof(Elf_Ehdr);
+ constexpr size_t kProgramHeadersSize = sizeof(Elf_Phdr) * kMaxProgramHeaders;
- program_headers_[PH_LOAD_R_X].p_type = PT_LOAD;
- program_headers_[PH_LOAD_R_X].p_flags = PF_R | PF_X;
-
- program_headers_[PH_LOAD_RW_BSS].p_type = PT_LOAD;
- program_headers_[PH_LOAD_RW_BSS].p_flags = PF_R | PF_W;
-
- program_headers_[PH_LOAD_RW_DYNAMIC].p_type = PT_LOAD;
- program_headers_[PH_LOAD_RW_DYNAMIC].p_flags = PF_R | PF_W;
-
- program_headers_[PH_DYNAMIC].p_type = PT_DYNAMIC;
- program_headers_[PH_DYNAMIC].p_flags = PF_R | PF_W;
-
- program_headers_[PH_EH_FRAME_HDR].p_type = PT_NULL;
- program_headers_[PH_EH_FRAME_HDR].p_flags = PF_R;
-
- // Get the dynstr string.
- dynstr_ = dynsym_builder_.GenerateStrtab();
-
- // Add the SONAME to the dynstr.
- dynstr_soname_offset_ = dynstr_.size();
- std::string file_name(elf_file_->GetPath());
- size_t directory_separator_pos = file_name.rfind('/');
- if (directory_separator_pos != std::string::npos) {
- file_name = file_name.substr(directory_separator_pos + 1);
- }
- dynstr_ += file_name;
- dynstr_ += '\0';
- if (debug_logging_) {
- LOG(INFO) << "dynstr size (bytes) =" << dynstr_.size()
- << std::hex << " " << dynstr_.size();
- LOG(INFO) << "dynsym size (elements)=" << dynsym_builder_.GetSize()
- << std::hex << " " << dynsym_builder_.GetSize();
- }
-
- // Get the section header string table.
- shstrtab_ += '\0';
-
- // Setup sym_undef
- memset(&null_hdr_, 0, sizeof(null_hdr_));
- null_hdr_.sh_type = SHT_NULL;
- null_hdr_.sh_link = SHN_UNDEF;
- section_ptrs_.push_back(&null_hdr_);
-
- section_index_ = 1;
-
- // setup .dynsym
- section_ptrs_.push_back(dynsym_builder_.GetSection());
- AssignSectionStr(&dynsym_builder_, &shstrtab_);
- dynsym_builder_.SetSectionIndex(section_index_);
- section_index_++;
-
- // Setup .dynstr
- section_ptrs_.push_back(dynsym_builder_.GetStrTab()->GetSection());
- AssignSectionStr(dynsym_builder_.GetStrTab(), &shstrtab_);
- dynsym_builder_.GetStrTab()->SetSectionIndex(section_index_);
- section_index_++;
-
- // Setup .hash
- section_ptrs_.push_back(hash_builder_.GetSection());
- AssignSectionStr(&hash_builder_, &shstrtab_);
- hash_builder_.SetSectionIndex(section_index_);
- section_index_++;
-
- // Setup .rodata
- section_ptrs_.push_back(rodata_builder_.GetSection());
- AssignSectionStr(&rodata_builder_, &shstrtab_);
- rodata_builder_.SetSectionIndex(section_index_);
- section_index_++;
-
- // Setup .text
- section_ptrs_.push_back(text_builder_.GetSection());
- AssignSectionStr(&text_builder_, &shstrtab_);
- text_builder_.SetSectionIndex(section_index_);
- section_index_++;
-
- // Setup .bss
- if (bss_builder_.GetSize() != 0u) {
- section_ptrs_.push_back(bss_builder_.GetSection());
- AssignSectionStr(&bss_builder_, &shstrtab_);
- bss_builder_.SetSectionIndex(section_index_);
- section_index_++;
- }
-
- // Setup .dynamic
- section_ptrs_.push_back(dynamic_builder_.GetSection());
- AssignSectionStr(&dynamic_builder_, &shstrtab_);
- dynamic_builder_.SetSectionIndex(section_index_);
- section_index_++;
-
- // Fill in the hash section.
- hash_ = dynsym_builder_.GenerateHashContents();
-
- if (debug_logging_) {
- LOG(INFO) << ".hash size (bytes)=" << hash_.size() * sizeof(Elf_Word)
- << std::hex << " " << hash_.size() * sizeof(Elf_Word);
- }
-
- Elf_Word base_offset = sizeof(Elf_Ehdr) + sizeof(program_headers_);
-
- // Get the layout in the sections.
- //
- // Get the layout of the dynsym section.
- dynsym_builder_.GetSection()->sh_offset =
- RoundUp(base_offset, dynsym_builder_.GetSection()->sh_addralign);
- dynsym_builder_.GetSection()->sh_addr = dynsym_builder_.GetSection()->sh_offset;
- dynsym_builder_.GetSection()->sh_size = dynsym_builder_.GetSize() * sizeof(Elf_Sym);
- dynsym_builder_.GetSection()->sh_link = dynsym_builder_.GetLink();
-
- // Get the layout of the dynstr section.
- dynsym_builder_.GetStrTab()->GetSection()->sh_offset =
- NextOffset<Elf_Word, Elf_Shdr>(*dynsym_builder_.GetStrTab()->GetSection(),
- *dynsym_builder_.GetSection());
- dynsym_builder_.GetStrTab()->GetSection()->sh_addr =
- dynsym_builder_.GetStrTab()->GetSection()->sh_offset;
- dynsym_builder_.GetStrTab()->GetSection()->sh_size = dynstr_.size();
- dynsym_builder_.GetStrTab()->GetSection()->sh_link = dynsym_builder_.GetStrTab()->GetLink();
-
- // Get the layout of the hash section
- hash_builder_.GetSection()->sh_offset =
- NextOffset<Elf_Word, Elf_Shdr>(*hash_builder_.GetSection(),
- *dynsym_builder_.GetStrTab()->GetSection());
- hash_builder_.GetSection()->sh_addr = hash_builder_.GetSection()->sh_offset;
- hash_builder_.GetSection()->sh_size = hash_.size() * sizeof(Elf_Word);
- hash_builder_.GetSection()->sh_link = hash_builder_.GetLink();
-
- // Get the layout of the extra sections with SHF_ALLOC flag.
- // This will deal with .eh_frame and .eh_frame_hdr.
- // .eh_frame contains relative pointers to .text which we
- // want to fixup between the calls to Init() and Write().
- // Therefore we handle those sections here as opposed to Write().
- // It also has the nice side effect of including .eh_frame
- // with the rest of LOAD_R segment. It must come before .rodata
- // because .rodata and .text must be next to each other.
- Elf_Shdr* prev = hash_builder_.GetSection();
- for (auto* it : other_builders_) {
- if ((it->GetSection()->sh_flags & SHF_ALLOC) != 0) {
- it->GetSection()->sh_offset = NextOffset<Elf_Word, Elf_Shdr>(*it->GetSection(), *prev);
- it->GetSection()->sh_addr = it->GetSection()->sh_offset;
- it->GetSection()->sh_size = it->GetBuffer()->size();
- it->GetSection()->sh_link = it->GetLink();
- prev = it->GetSection();
+ // Layout of all sections - determine the final file offsets and addresses.
+ // This must be done after we have built all sections and know their size.
+ Elf_Off file_offset = kProgramHeadersOffset + kProgramHeadersSize;
+ Elf_Addr load_address = file_offset;
+ std::vector<Elf_Shdr> section_headers;
+ section_headers.reserve(1u + sections.size());
+ section_headers.push_back(Elf_Shdr()); // NULL at index 0.
+ for (auto* section : sections) {
+ Elf_Shdr* header = section->GetHeader();
+ Elf_Off alignment = header->sh_addralign > 0 ? header->sh_addralign : 1;
+ header->sh_size = section->GetSize();
+ header->sh_link = section->GetLink();
+ // Allocate memory for the section in the file.
+ if (header->sh_type != SHT_NOBITS) {
+ header->sh_offset = RoundUp(file_offset, alignment);
+ file_offset = header->sh_offset + header->sh_size;
}
+ // Allocate memory for the section during program execution.
+ if ((header->sh_flags & SHF_ALLOC) != 0) {
+ header->sh_addr = RoundUp(load_address, alignment);
+ load_address = header->sh_addr + header->sh_size;
+ }
+ if (debug_logging_) {
+ LOG(INFO) << "Section " << section->GetName() << ":" << std::hex
+ << " offset=0x" << header->sh_offset
+ << " addr=0x" << header->sh_addr
+ << " size=0x" << header->sh_size;
+ }
+ // Collect section headers into continuous array for convenience.
+ section_headers.push_back(*header);
}
- // If the sections exist, check that they have been handled.
- const auto* eh_frame = FindRawSection(".eh_frame");
+ Elf_Off section_headers_offset = RoundUp(file_offset, sizeof(Elf_Word));
+
+ // Create program headers now that we know the layout of the whole file.
+ // Each segment contains one or more sections which are mapped together.
+ // Not all sections are mapped during the execution of the program.
+ // PT_LOAD does the mapping. Other PT_* types allow the program to locate
+ // interesting parts of memory and their addresses overlap with PT_LOAD.
+ std::vector<Elf_Phdr> program_headers;
+ program_headers.push_back(MakeProgramHeader(PT_PHDR, PF_R,
+ kProgramHeadersOffset, kProgramHeadersSize, sizeof(Elf_Word)));
+ // Create the main LOAD R segment which spans all sections up to .rodata.
+ const Elf_Shdr* rodata = rodata_.GetHeader();
+ program_headers.push_back(MakeProgramHeader(PT_LOAD, PF_R,
+ 0, rodata->sh_offset + rodata->sh_size, rodata->sh_addralign));
+ program_headers.push_back(MakeProgramHeader(PT_LOAD, PF_R | PF_X, text_));
+ if (bss_.GetHeader()->sh_size != 0u) {
+ program_headers.push_back(MakeProgramHeader(PT_LOAD, PF_R | PF_W, bss_));
+ }
+ program_headers.push_back(MakeProgramHeader(PT_LOAD, PF_R | PF_W, dynamic_));
+ program_headers.push_back(MakeProgramHeader(PT_DYNAMIC, PF_R | PF_W, dynamic_));
+ const Section* eh_frame = FindSection(".eh_frame");
if (eh_frame != nullptr) {
- DCHECK_NE(eh_frame->GetSection()->sh_offset, 0u);
- }
- const auto* eh_frame_hdr = FindRawSection(".eh_frame_hdr");
- if (eh_frame_hdr != nullptr) {
- DCHECK_NE(eh_frame_hdr->GetSection()->sh_offset, 0u);
- }
-
- // Get the layout of the rodata section.
- rodata_builder_.GetSection()->sh_offset =
- NextOffset<Elf_Word, Elf_Shdr>(*rodata_builder_.GetSection(), *prev);
- rodata_builder_.GetSection()->sh_addr = rodata_builder_.GetSection()->sh_offset;
- rodata_builder_.GetSection()->sh_size = rodata_builder_.GetSize();
- rodata_builder_.GetSection()->sh_link = rodata_builder_.GetLink();
-
- // Get the layout of the text section.
- text_builder_.GetSection()->sh_offset =
- NextOffset<Elf_Word, Elf_Shdr>(*text_builder_.GetSection(),
- *rodata_builder_.GetSection());
- text_builder_.GetSection()->sh_addr = text_builder_.GetSection()->sh_offset;
- text_builder_.GetSection()->sh_size = text_builder_.GetSize();
- text_builder_.GetSection()->sh_link = text_builder_.GetLink();
- CHECK_ALIGNED(rodata_builder_.GetSection()->sh_offset +
- rodata_builder_.GetSection()->sh_size, kPageSize);
-
- // Get the layout of the .bss section.
- bss_builder_.GetSection()->sh_offset =
- NextOffset<Elf_Word, Elf_Shdr>(*bss_builder_.GetSection(),
- *text_builder_.GetSection());
- bss_builder_.GetSection()->sh_addr = bss_builder_.GetSection()->sh_offset;
- bss_builder_.GetSection()->sh_size = bss_builder_.GetSize();
- bss_builder_.GetSection()->sh_link = bss_builder_.GetLink();
-
- // Get the layout of the dynamic section.
- CHECK(IsAlignedParam(bss_builder_.GetSection()->sh_offset,
- dynamic_builder_.GetSection()->sh_addralign));
- dynamic_builder_.GetSection()->sh_offset = bss_builder_.GetSection()->sh_offset;
- dynamic_builder_.GetSection()->sh_addr =
- NextOffset<Elf_Word, Elf_Shdr>(*dynamic_builder_.GetSection(), *bss_builder_.GetSection());
- dynamic_builder_.GetSection()->sh_size = dynamic_builder_.GetSize() * sizeof(Elf_Dyn);
- dynamic_builder_.GetSection()->sh_link = dynamic_builder_.GetLink();
-
- if (debug_logging_) {
- LOG(INFO) << "dynsym off=" << dynsym_builder_.GetSection()->sh_offset
- << " dynsym size=" << dynsym_builder_.GetSection()->sh_size;
- LOG(INFO) << "dynstr off=" << dynsym_builder_.GetStrTab()->GetSection()->sh_offset
- << " dynstr size=" << dynsym_builder_.GetStrTab()->GetSection()->sh_size;
- LOG(INFO) << "hash off=" << hash_builder_.GetSection()->sh_offset
- << " hash size=" << hash_builder_.GetSection()->sh_size;
- LOG(INFO) << "rodata off=" << rodata_builder_.GetSection()->sh_offset
- << " rodata size=" << rodata_builder_.GetSection()->sh_size;
- LOG(INFO) << "text off=" << text_builder_.GetSection()->sh_offset
- << " text size=" << text_builder_.GetSection()->sh_size;
- LOG(INFO) << "dynamic off=" << dynamic_builder_.GetSection()->sh_offset
- << " dynamic size=" << dynamic_builder_.GetSection()->sh_size;
- }
-
- return true;
- }
-
- bool Write() {
- std::vector<ElfFilePiece<Elf_Word>*> pieces;
- Elf_Shdr* prev = dynamic_builder_.GetSection();
- std::string strtab;
-
- if (IncludingDebugSymbols()) {
- // Setup .symtab
- section_ptrs_.push_back(symtab_builder_.GetSection());
- AssignSectionStr(&symtab_builder_, &shstrtab_);
- symtab_builder_.SetSectionIndex(section_index_);
- section_index_++;
-
- // Setup .strtab
- section_ptrs_.push_back(symtab_builder_.GetStrTab()->GetSection());
- AssignSectionStr(symtab_builder_.GetStrTab(), &shstrtab_);
- symtab_builder_.GetStrTab()->SetSectionIndex(section_index_);
- section_index_++;
-
- strtab = symtab_builder_.GenerateStrtab();
- if (debug_logging_) {
- LOG(INFO) << "strtab size (bytes) =" << strtab.size()
- << std::hex << " " << strtab.size();
- LOG(INFO) << "symtab size (elements) =" << symtab_builder_.GetSize()
- << std::hex << " " << symtab_builder_.GetSize();
+ program_headers.push_back(MakeProgramHeader(PT_LOAD, PF_R, *eh_frame));
+ const Section* eh_frame_hdr = FindSection(".eh_frame_hdr");
+ if (eh_frame_hdr != nullptr) {
+ // Check layout: eh_frame is before eh_frame_hdr and there is no gap.
+ CHECK_LE(eh_frame->GetHeader()->sh_offset, eh_frame_hdr->GetHeader()->sh_offset);
+ CHECK_EQ(eh_frame->GetHeader()->sh_offset + eh_frame->GetHeader()->sh_size,
+ eh_frame_hdr->GetHeader()->sh_offset);
+ // Extend the PT_LOAD of .eh_frame to include the .eh_frame_hdr as well.
+ program_headers.back().p_filesz += eh_frame_hdr->GetHeader()->sh_size;
+ program_headers.back().p_memsz += eh_frame_hdr->GetHeader()->sh_size;
+ program_headers.push_back(MakeProgramHeader(PT_GNU_EH_FRAME, PF_R, *eh_frame_hdr));
}
}
+ CHECK_LE(program_headers.size(), kMaxProgramHeaders);
- // Setup all the other sections.
- for (auto* builder : other_builders_) {
- section_ptrs_.push_back(builder->GetSection());
- AssignSectionStr(builder, &shstrtab_);
- builder->SetSectionIndex(section_index_);
- section_index_++;
- }
+ // Create the main ELF header.
+ Elf_Ehdr elf_header = MakeElfHeader(isa_);
+ elf_header.e_phoff = kProgramHeadersOffset;
+ elf_header.e_shoff = section_headers_offset;
+ elf_header.e_phnum = program_headers.size();
+ elf_header.e_shnum = section_headers.size();
+ elf_header.e_shstrndx = shstrtab_.GetSectionIndex();
- // Setup shstrtab
- section_ptrs_.push_back(shstrtab_builder_.GetSection());
- AssignSectionStr(&shstrtab_builder_, &shstrtab_);
- shstrtab_builder_.SetSectionIndex(section_index_);
- section_index_++;
-
- if (debug_logging_) {
- LOG(INFO) << ".shstrtab size (bytes) =" << shstrtab_.size()
- << std::hex << " " << shstrtab_.size();
- LOG(INFO) << "section list size (elements)=" << section_ptrs_.size()
- << std::hex << " " << section_ptrs_.size();
- }
-
- if (IncludingDebugSymbols()) {
- // Get the layout of the symtab section.
- symtab_builder_.GetSection()->sh_offset =
- NextOffset<Elf_Word, Elf_Shdr>(*symtab_builder_.GetSection(),
- *dynamic_builder_.GetSection());
- symtab_builder_.GetSection()->sh_addr = 0;
- // Add to leave space for the null symbol.
- symtab_builder_.GetSection()->sh_size = symtab_builder_.GetSize() * sizeof(Elf_Sym);
- symtab_builder_.GetSection()->sh_link = symtab_builder_.GetLink();
-
- // Get the layout of the dynstr section.
- symtab_builder_.GetStrTab()->GetSection()->sh_offset =
- NextOffset<Elf_Word, Elf_Shdr>(*symtab_builder_.GetStrTab()->GetSection(),
- *symtab_builder_.GetSection());
- symtab_builder_.GetStrTab()->GetSection()->sh_addr = 0;
- symtab_builder_.GetStrTab()->GetSection()->sh_size = strtab.size();
- symtab_builder_.GetStrTab()->GetSection()->sh_link = symtab_builder_.GetStrTab()->GetLink();
-
- prev = symtab_builder_.GetStrTab()->GetSection();
- if (debug_logging_) {
- LOG(INFO) << "symtab off=" << symtab_builder_.GetSection()->sh_offset
- << " symtab size=" << symtab_builder_.GetSection()->sh_size;
- LOG(INFO) << "strtab off=" << symtab_builder_.GetStrTab()->GetSection()->sh_offset
- << " strtab size=" << symtab_builder_.GetStrTab()->GetSection()->sh_size;
- }
- }
-
- // Get the layout of the extra sections without SHF_ALLOC flag.
- // (This will deal with the debug sections if they are there)
- for (auto* it : other_builders_) {
- if ((it->GetSection()->sh_flags & SHF_ALLOC) == 0) {
- it->GetSection()->sh_offset = NextOffset<Elf_Word, Elf_Shdr>(*it->GetSection(), *prev);
- it->GetSection()->sh_addr = 0;
- it->GetSection()->sh_size = it->GetBuffer()->size();
- it->GetSection()->sh_link = it->GetLink();
-
- // We postpone adding an ElfFilePiece to keep the order in "pieces."
-
- prev = it->GetSection();
- if (debug_logging_) {
- LOG(INFO) << it->GetName() << " off=" << it->GetSection()->sh_offset
- << " size=" << it->GetSection()->sh_size;
- }
- }
- }
-
- // Get the layout of the shstrtab section
- shstrtab_builder_.GetSection()->sh_offset =
- NextOffset<Elf_Word, Elf_Shdr>(*shstrtab_builder_.GetSection(), *prev);
- shstrtab_builder_.GetSection()->sh_addr = 0;
- shstrtab_builder_.GetSection()->sh_size = shstrtab_.size();
- shstrtab_builder_.GetSection()->sh_link = shstrtab_builder_.GetLink();
- if (debug_logging_) {
- LOG(INFO) << "shstrtab off=" << shstrtab_builder_.GetSection()->sh_offset
- << " shstrtab size=" << shstrtab_builder_.GetSection()->sh_size;
- }
-
- // The section list comes after come after.
- Elf_Word sections_offset = RoundUp(
- shstrtab_builder_.GetSection()->sh_offset + shstrtab_builder_.GetSection()->sh_size,
- sizeof(Elf_Word));
-
- // Setup the actual symbol arrays.
- std::vector<Elf_Sym> dynsym = dynsym_builder_.GenerateSymtab();
- CHECK_EQ(dynsym.size() * sizeof(Elf_Sym), dynsym_builder_.GetSection()->sh_size);
- std::vector<Elf_Sym> symtab;
- if (IncludingDebugSymbols()) {
- symtab = symtab_builder_.GenerateSymtab();
- CHECK_EQ(symtab.size() * sizeof(Elf_Sym), symtab_builder_.GetSection()->sh_size);
- }
-
- // Setup the dynamic section.
- // This will add the 2 values we cannot know until now time, namely the size
- // and the soname_offset.
- std::vector<Elf_Dyn> dynamic = dynamic_builder_.GetDynamics(dynstr_.size(),
- dynstr_soname_offset_);
- CHECK_EQ(dynamic.size() * sizeof(Elf_Dyn), dynamic_builder_.GetSection()->sh_size);
-
- // Finish setup of the program headers now that we know the layout of the
- // whole file.
- Elf_Word load_r_size =
- rodata_builder_.GetSection()->sh_offset + rodata_builder_.GetSection()->sh_size;
- program_headers_[PH_LOAD_R__].p_filesz = load_r_size;
- program_headers_[PH_LOAD_R__].p_memsz = load_r_size;
- program_headers_[PH_LOAD_R__].p_align = rodata_builder_.GetSection()->sh_addralign;
-
- Elf_Word load_rx_size = text_builder_.GetSection()->sh_size;
- program_headers_[PH_LOAD_R_X].p_offset = text_builder_.GetSection()->sh_offset;
- program_headers_[PH_LOAD_R_X].p_vaddr = text_builder_.GetSection()->sh_offset;
- program_headers_[PH_LOAD_R_X].p_paddr = text_builder_.GetSection()->sh_offset;
- program_headers_[PH_LOAD_R_X].p_filesz = load_rx_size;
- program_headers_[PH_LOAD_R_X].p_memsz = load_rx_size;
- program_headers_[PH_LOAD_R_X].p_align = text_builder_.GetSection()->sh_addralign;
-
- program_headers_[PH_LOAD_RW_BSS].p_offset = bss_builder_.GetSection()->sh_offset;
- program_headers_[PH_LOAD_RW_BSS].p_vaddr = bss_builder_.GetSection()->sh_offset;
- program_headers_[PH_LOAD_RW_BSS].p_paddr = bss_builder_.GetSection()->sh_offset;
- program_headers_[PH_LOAD_RW_BSS].p_filesz = 0;
- program_headers_[PH_LOAD_RW_BSS].p_memsz = bss_builder_.GetSection()->sh_size;
- program_headers_[PH_LOAD_RW_BSS].p_align = bss_builder_.GetSection()->sh_addralign;
-
- program_headers_[PH_LOAD_RW_DYNAMIC].p_offset = dynamic_builder_.GetSection()->sh_offset;
- program_headers_[PH_LOAD_RW_DYNAMIC].p_vaddr = dynamic_builder_.GetSection()->sh_addr;
- program_headers_[PH_LOAD_RW_DYNAMIC].p_paddr = dynamic_builder_.GetSection()->sh_addr;
- program_headers_[PH_LOAD_RW_DYNAMIC].p_filesz = dynamic_builder_.GetSection()->sh_size;
- program_headers_[PH_LOAD_RW_DYNAMIC].p_memsz = dynamic_builder_.GetSection()->sh_size;
- program_headers_[PH_LOAD_RW_DYNAMIC].p_align = dynamic_builder_.GetSection()->sh_addralign;
-
- program_headers_[PH_DYNAMIC].p_offset = dynamic_builder_.GetSection()->sh_offset;
- program_headers_[PH_DYNAMIC].p_vaddr = dynamic_builder_.GetSection()->sh_addr;
- program_headers_[PH_DYNAMIC].p_paddr = dynamic_builder_.GetSection()->sh_addr;
- program_headers_[PH_DYNAMIC].p_filesz = dynamic_builder_.GetSection()->sh_size;
- program_headers_[PH_DYNAMIC].p_memsz = dynamic_builder_.GetSection()->sh_size;
- program_headers_[PH_DYNAMIC].p_align = dynamic_builder_.GetSection()->sh_addralign;
-
- const auto* eh_frame_hdr = FindRawSection(".eh_frame_hdr");
- if (eh_frame_hdr != nullptr) {
- const auto* eh_frame = FindRawSection(".eh_frame");
- // Check layout:
- // 1) eh_frame is before eh_frame_hdr.
- // 2) There's no gap.
- CHECK(eh_frame != nullptr);
- CHECK_LE(eh_frame->GetSection()->sh_offset, eh_frame_hdr->GetSection()->sh_offset);
- CHECK_EQ(eh_frame->GetSection()->sh_offset + eh_frame->GetSection()->sh_size,
- eh_frame_hdr->GetSection()->sh_offset);
-
- program_headers_[PH_EH_FRAME_HDR].p_type = PT_GNU_EH_FRAME;
- program_headers_[PH_EH_FRAME_HDR].p_offset = eh_frame_hdr->GetSection()->sh_offset;
- program_headers_[PH_EH_FRAME_HDR].p_vaddr = eh_frame_hdr->GetSection()->sh_addr;
- program_headers_[PH_EH_FRAME_HDR].p_paddr = eh_frame_hdr->GetSection()->sh_addr;
- program_headers_[PH_EH_FRAME_HDR].p_filesz = eh_frame_hdr->GetSection()->sh_size;
- program_headers_[PH_EH_FRAME_HDR].p_memsz = eh_frame_hdr->GetSection()->sh_size;
- program_headers_[PH_EH_FRAME_HDR].p_align = eh_frame_hdr->GetSection()->sh_addralign;
- }
-
- // Finish setup of the Ehdr values.
- elf_header_.e_phoff = PHDR_OFFSET;
- elf_header_.e_shoff = sections_offset;
- elf_header_.e_phnum = (bss_builder_.GetSection()->sh_size != 0u) ? PH_NUM : PH_NUM - 1;
- elf_header_.e_shnum = section_ptrs_.size();
- elf_header_.e_shstrndx = shstrtab_builder_.GetSectionIndex();
-
- // Add the rest of the pieces to the list.
- pieces.push_back(new ElfFileMemoryPiece<Elf_Word>("Elf Header", 0, &elf_header_,
- sizeof(elf_header_)));
- if (bss_builder_.GetSection()->sh_size != 0u) {
- pieces.push_back(new ElfFileMemoryPiece<Elf_Word>("Program headers", PHDR_OFFSET,
- &program_headers_[0],
- elf_header_.e_phnum * sizeof(Elf_Phdr)));
- } else {
- // Skip PH_LOAD_RW_BSS.
- Elf_Word part1_size = PH_LOAD_RW_BSS * sizeof(Elf_Phdr);
- Elf_Word part2_size = (PH_NUM - PH_LOAD_RW_BSS - 1) * sizeof(Elf_Phdr);
- CHECK_EQ(part1_size + part2_size, elf_header_.e_phnum * sizeof(Elf_Phdr));
- pieces.push_back(new ElfFileMemoryPiece<Elf_Word>("Program headers", PHDR_OFFSET,
- &program_headers_[0], part1_size));
- pieces.push_back(new ElfFileMemoryPiece<Elf_Word>("Program headers part 2",
- PHDR_OFFSET + part1_size,
- &program_headers_[PH_LOAD_RW_BSS + 1],
- part2_size));
- }
- pieces.push_back(new ElfFileMemoryPiece<Elf_Word>(".dynamic",
- dynamic_builder_.GetSection()->sh_offset,
- dynamic.data(),
- dynamic_builder_.GetSection()->sh_size));
- pieces.push_back(new ElfFileMemoryPiece<Elf_Word>(".dynsym", dynsym_builder_.GetSection()->sh_offset,
- dynsym.data(),
- dynsym.size() * sizeof(Elf_Sym)));
- pieces.push_back(new ElfFileMemoryPiece<Elf_Word>(".dynstr",
- dynsym_builder_.GetStrTab()->GetSection()->sh_offset,
- dynstr_.c_str(), dynstr_.size()));
- pieces.push_back(new ElfFileMemoryPiece<Elf_Word>(".hash", hash_builder_.GetSection()->sh_offset,
- hash_.data(),
- hash_.size() * sizeof(Elf_Word)));
- pieces.push_back(new ElfFileRodataPiece<Elf_Word>(rodata_builder_.GetSection()->sh_offset,
- oat_writer_));
- pieces.push_back(new ElfFileOatTextPiece<Elf_Word>(text_builder_.GetSection()->sh_offset,
- oat_writer_));
- if (IncludingDebugSymbols()) {
- pieces.push_back(new ElfFileMemoryPiece<Elf_Word>(".symtab",
- symtab_builder_.GetSection()->sh_offset,
- symtab.data(),
- symtab.size() * sizeof(Elf_Sym)));
- pieces.push_back(new ElfFileMemoryPiece<Elf_Word>(".strtab",
- symtab_builder_.GetStrTab()->GetSection()->sh_offset,
- strtab.c_str(), strtab.size()));
- }
- pieces.push_back(new ElfFileMemoryPiece<Elf_Word>(".shstrtab",
- shstrtab_builder_.GetSection()->sh_offset,
- &shstrtab_[0], shstrtab_.size()));
- for (uint32_t i = 0; i < section_ptrs_.size(); ++i) {
- // Just add all the sections in induvidually since they are all over the
- // place on the heap/stack.
- Elf_Word cur_off = sections_offset + i * sizeof(Elf_Shdr);
- pieces.push_back(new ElfFileMemoryPiece<Elf_Word>("section table piece", cur_off,
- section_ptrs_[i], sizeof(Elf_Shdr)));
- }
-
- // Postponed debug info.
- for (auto* it : other_builders_) {
- pieces.push_back(new ElfFileMemoryPiece<Elf_Word>(it->GetName(), it->GetSection()->sh_offset,
- it->GetBuffer()->data(),
- it->GetBuffer()->size()));
- }
-
- if (!WriteOutFile(pieces)) {
- LOG(ERROR) << "Unable to write to file " << elf_file_->GetPath();
-
- STLDeleteElements(&pieces); // Have to manually clean pieces.
+ // Write all headers and section content to the file.
+ // Depending on the implementations of Section::Write, this
+ // might be just memory copies or some more elaborate operations.
+ if (!WriteArray(elf_file, &elf_header, 1)) {
+ LOG(INFO) << "Failed to write the ELF header";
return false;
}
-
- STLDeleteElements(&pieces); // Have to manually clean pieces.
+ if (!WriteArray(elf_file, program_headers.data(), program_headers.size())) {
+ LOG(INFO) << "Failed to write the program headers";
+ return false;
+ }
+ for (Section* section : sections) {
+ const Elf_Shdr* header = section->GetHeader();
+ if (header->sh_type != SHT_NOBITS) {
+ if (!SeekTo(elf_file, header->sh_offset) || !section->Write(elf_file)) {
+ LOG(INFO) << "Failed to write section " << section->GetName();
+ return false;
+ }
+ Elf_Word current_offset = lseek(elf_file->Fd(), 0, SEEK_CUR);
+ CHECK_EQ(current_offset, header->sh_offset + header->sh_size)
+ << "The number of bytes written does not match GetSize()";
+ }
+ }
+ if (!SeekTo(elf_file, section_headers_offset) ||
+ !WriteArray(elf_file, section_headers.data(), section_headers.size())) {
+ LOG(INFO) << "Failed to write the section headers";
+ return false;
+ }
return true;
}
- // Adds the given raw section to the builder. It does not take ownership.
- void RegisterRawSection(ElfRawSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr>* bld) {
- other_builders_.push_back(bld);
+ // Adds the given section to the builder. It does not take ownership.
+ void RegisterSection(Section* section) {
+ other_sections_.push_back(section);
}
- const ElfRawSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr>*
- FindRawSection(const char* name) {
- for (const auto* other_builder : other_builders_) {
- if (other_builder->GetName() == name) {
- return other_builder;
+ const Section* FindSection(const char* name) {
+ for (const auto* section : other_sections_) {
+ if (section->GetName() == name) {
+ return section;
}
}
return nullptr;
}
private:
- void SetISA(InstructionSet isa) {
+ static bool SeekTo(File* elf_file, Elf_Word offset) {
+ DCHECK_LE(lseek(elf_file->Fd(), 0, SEEK_CUR), static_cast<off_t>(offset))
+ << "Seeking backwards";
+ if (static_cast<off_t>(offset) != lseek(elf_file->Fd(), offset, SEEK_SET)) {
+ PLOG(ERROR) << "Failed to seek in file " << elf_file->GetPath();
+ return false;
+ }
+ return true;
+ }
+
+ template<typename T>
+ static bool WriteArray(File* elf_file, const T* data, size_t count) {
+ DCHECK(data != nullptr);
+ if (!elf_file->WriteFully(data, count * sizeof(T))) {
+ PLOG(ERROR) << "Failed to write to file " << elf_file->GetPath();
+ return false;
+ }
+ return true;
+ }
+
+ // Helper - create segment header based on memory range.
+ static Elf_Phdr MakeProgramHeader(Elf_Word type, Elf_Word flags,
+ Elf_Off offset, Elf_Word size, Elf_Word align) {
+ Elf_Phdr phdr = Elf_Phdr();
+ phdr.p_type = type;
+ phdr.p_flags = flags;
+ phdr.p_offset = offset;
+ phdr.p_vaddr = offset;
+ phdr.p_paddr = offset;
+ phdr.p_filesz = size;
+ phdr.p_memsz = size;
+ phdr.p_align = align;
+ return phdr;
+ }
+
+ // Helper - create segment header based on section header.
+ static Elf_Phdr MakeProgramHeader(Elf_Word type, Elf_Word flags,
+ const Section& section) {
+ const Elf_Shdr* shdr = section.GetHeader();
+ // Only run-time allocated sections should be in segment headers.
+ CHECK_NE(shdr->sh_flags & SHF_ALLOC, 0u);
+ Elf_Phdr phdr = Elf_Phdr();
+ phdr.p_type = type;
+ phdr.p_flags = flags;
+ phdr.p_offset = shdr->sh_offset;
+ phdr.p_vaddr = shdr->sh_addr;
+ phdr.p_paddr = shdr->sh_addr;
+ phdr.p_filesz = shdr->sh_type != SHT_NOBITS ? shdr->sh_size : 0u;
+ phdr.p_memsz = shdr->sh_size;
+ phdr.p_align = shdr->sh_addralign;
+ return phdr;
+ }
+
+ static Elf_Ehdr MakeElfHeader(InstructionSet isa) {
+ Elf_Ehdr elf_header = Elf_Ehdr();
switch (isa) {
case kArm:
// Fall through.
case kThumb2: {
- elf_header_.e_machine = EM_ARM;
- elf_header_.e_flags = EF_ARM_EABI_VER5;
+ elf_header.e_machine = EM_ARM;
+ elf_header.e_flags = EF_ARM_EABI_VER5;
break;
}
case kArm64: {
- elf_header_.e_machine = EM_AARCH64;
- elf_header_.e_flags = 0;
+ elf_header.e_machine = EM_AARCH64;
+ elf_header.e_flags = 0;
break;
}
case kX86: {
- elf_header_.e_machine = EM_386;
- elf_header_.e_flags = 0;
+ elf_header.e_machine = EM_386;
+ elf_header.e_flags = 0;
break;
}
case kX86_64: {
- elf_header_.e_machine = EM_X86_64;
- elf_header_.e_flags = 0;
+ elf_header.e_machine = EM_X86_64;
+ elf_header.e_flags = 0;
break;
}
case kMips: {
- elf_header_.e_machine = EM_MIPS;
- elf_header_.e_flags = (EF_MIPS_NOREORDER |
+ elf_header.e_machine = EM_MIPS;
+ elf_header.e_flags = (EF_MIPS_NOREORDER |
EF_MIPS_PIC |
EF_MIPS_CPIC |
EF_MIPS_ABI_O32 |
@@ -1230,148 +854,82 @@
break;
}
case kMips64: {
- elf_header_.e_machine = EM_MIPS;
- elf_header_.e_flags = (EF_MIPS_NOREORDER |
+ elf_header.e_machine = EM_MIPS;
+ elf_header.e_flags = (EF_MIPS_NOREORDER |
EF_MIPS_PIC |
EF_MIPS_CPIC |
EF_MIPS_ARCH_64R6);
break;
}
- default: {
- fatal_error_ = true;
- LOG(FATAL) << "Unknown instruction set: " << isa;
- break;
+ case kNone: {
+ LOG(FATAL) << "No instruction set";
}
}
- }
- void SetupEhdr() {
- memset(&elf_header_, 0, sizeof(elf_header_));
- elf_header_.e_ident[EI_MAG0] = ELFMAG0;
- elf_header_.e_ident[EI_MAG1] = ELFMAG1;
- elf_header_.e_ident[EI_MAG2] = ELFMAG2;
- elf_header_.e_ident[EI_MAG3] = ELFMAG3;
- elf_header_.e_ident[EI_CLASS] = (sizeof(Elf_Addr) == sizeof(Elf32_Addr))
+ elf_header.e_ident[EI_MAG0] = ELFMAG0;
+ elf_header.e_ident[EI_MAG1] = ELFMAG1;
+ elf_header.e_ident[EI_MAG2] = ELFMAG2;
+ elf_header.e_ident[EI_MAG3] = ELFMAG3;
+ elf_header.e_ident[EI_CLASS] = (sizeof(Elf_Addr) == sizeof(Elf32_Addr))
? ELFCLASS32 : ELFCLASS64;;
- elf_header_.e_ident[EI_DATA] = ELFDATA2LSB;
- elf_header_.e_ident[EI_VERSION] = EV_CURRENT;
- elf_header_.e_ident[EI_OSABI] = ELFOSABI_LINUX;
- elf_header_.e_ident[EI_ABIVERSION] = 0;
- elf_header_.e_type = ET_DYN;
- elf_header_.e_version = 1;
- elf_header_.e_entry = 0;
- elf_header_.e_ehsize = sizeof(Elf_Ehdr);
- elf_header_.e_phentsize = sizeof(Elf_Phdr);
- elf_header_.e_shentsize = sizeof(Elf_Shdr);
- elf_header_.e_phoff = sizeof(Elf_Ehdr);
+ elf_header.e_ident[EI_DATA] = ELFDATA2LSB;
+ elf_header.e_ident[EI_VERSION] = EV_CURRENT;
+ elf_header.e_ident[EI_OSABI] = ELFOSABI_LINUX;
+ elf_header.e_ident[EI_ABIVERSION] = 0;
+ elf_header.e_type = ET_DYN;
+ elf_header.e_version = 1;
+ elf_header.e_entry = 0;
+ elf_header.e_ehsize = sizeof(Elf_Ehdr);
+ elf_header.e_phentsize = sizeof(Elf_Phdr);
+ elf_header.e_shentsize = sizeof(Elf_Shdr);
+ elf_header.e_phoff = sizeof(Elf_Ehdr);
+ return elf_header;
}
- // Sets up a bunch of the required Dynamic Section entries.
- // Namely it will initialize all the mandatory ones that it can.
- // Specifically:
- // DT_HASH
- // DT_STRTAB
- // DT_SYMTAB
- // DT_SYMENT
- //
- // Some such as DT_SONAME, DT_STRSZ and DT_NULL will be put in later.
- void SetupDynamic() {
- dynamic_builder_.AddDynamicTag(DT_HASH, 0, &hash_builder_);
- dynamic_builder_.AddDynamicTag(DT_STRTAB, 0, dynsym_builder_.GetStrTab());
- dynamic_builder_.AddDynamicTag(DT_SYMTAB, 0, &dynsym_builder_);
- dynamic_builder_.AddDynamicTag(DT_SYMENT, sizeof(Elf_Sym));
+ void BuildDynamicSection(const std::string& elf_file_path) {
+ std::string soname(elf_file_path);
+ size_t directory_separator_pos = soname.rfind('/');
+ if (directory_separator_pos != std::string::npos) {
+ soname = soname.substr(directory_separator_pos + 1);
+ }
+ // NB: We must add the name before adding DT_STRSZ.
+ Elf_Word soname_offset = dynstr_.AddName(soname);
+
+ dynamic_.AddDynamicTag(DT_HASH, 0, &hash_);
+ dynamic_.AddDynamicTag(DT_STRTAB, 0, &dynstr_);
+ dynamic_.AddDynamicTag(DT_SYMTAB, 0, &dynsym_);
+ dynamic_.AddDynamicTag(DT_SYMENT, sizeof(Elf_Sym), nullptr);
+ dynamic_.AddDynamicTag(DT_STRSZ, dynstr_.GetSize(), nullptr);
+ dynamic_.AddDynamicTag(DT_SONAME, soname_offset, nullptr);
}
- // Sets up the basic dynamic symbols that are needed, namely all those we
- // can know already.
- //
- // Specifically adds:
- // oatdata
- // oatexec
- // oatlastword
- void SetupRequiredSymbols() {
- dynsym_builder_.AddSymbol("oatdata", &rodata_builder_, 0, true,
- rodata_builder_.GetSize(), STB_GLOBAL, STT_OBJECT);
- dynsym_builder_.AddSymbol("oatexec", &text_builder_, 0, true,
- text_builder_.GetSize(), STB_GLOBAL, STT_OBJECT);
- dynsym_builder_.AddSymbol("oatlastword", &text_builder_, text_builder_.GetSize() - 4,
- true, 4, STB_GLOBAL, STT_OBJECT);
- if (bss_builder_.GetSize() != 0u) {
- dynsym_builder_.AddSymbol("oatbss", &bss_builder_, 0, true,
- bss_builder_.GetSize(), STB_GLOBAL, STT_OBJECT);
- dynsym_builder_.AddSymbol("oatbsslastword", &bss_builder_, bss_builder_.GetSize() - 4,
- true, 4, STB_GLOBAL, STT_OBJECT);
+ void BuildDynsymSection() {
+ dynsym_.AddSymbol("oatdata", &rodata_, 0, true,
+ rodata_.GetSize(), STB_GLOBAL, STT_OBJECT);
+ dynsym_.AddSymbol("oatexec", &text_, 0, true,
+ text_.GetSize(), STB_GLOBAL, STT_OBJECT);
+ dynsym_.AddSymbol("oatlastword", &text_, text_.GetSize() - 4,
+ true, 4, STB_GLOBAL, STT_OBJECT);
+ if (bss_.GetSize() != 0u) {
+ dynsym_.AddSymbol("oatbss", &bss_, 0, true,
+ bss_.GetSize(), STB_GLOBAL, STT_OBJECT);
+ dynsym_.AddSymbol("oatbsslastword", &bss_, bss_.GetSize() - 4,
+ true, 4, STB_GLOBAL, STT_OBJECT);
}
}
- void AssignSectionStr(ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr>* builder,
- std::string* strtab) {
- builder->GetSection()->sh_name = strtab->size();
- *strtab += builder->GetName();
- *strtab += '\0';
- if (debug_logging_) {
- LOG(INFO) << "adding section name \"" << builder->GetName() << "\" "
- << "to shstrtab at offset " << builder->GetSection()->sh_name;
- }
- }
-
-
- // Write each of the pieces out to the file.
- bool WriteOutFile(const std::vector<ElfFilePiece<Elf_Word>*>& pieces) {
- for (auto it = pieces.begin(); it != pieces.end(); ++it) {
- if (!(*it)->Write(elf_file_)) {
- return false;
- }
- }
- return true;
- }
-
- bool IncludingDebugSymbols() const {
- return add_symbols_ && symtab_builder_.GetSize() > 1;
- }
-
- CodeOutput* const oat_writer_;
- File* const elf_file_;
- const bool add_symbols_;
- const bool debug_logging_;
-
- bool fatal_error_ = false;
-
- // What phdr is.
- static const uint32_t PHDR_OFFSET = sizeof(Elf_Ehdr);
- enum : uint8_t {
- PH_PHDR = 0,
- PH_LOAD_R__ = 1,
- PH_LOAD_R_X = 2,
- PH_LOAD_RW_BSS = 3,
- PH_LOAD_RW_DYNAMIC = 4,
- PH_DYNAMIC = 5,
- PH_EH_FRAME_HDR = 6,
- PH_NUM = 7,
- };
- static const uint32_t PHDR_SIZE = sizeof(Elf_Phdr) * PH_NUM;
- Elf_Phdr program_headers_[PH_NUM];
-
- Elf_Ehdr elf_header_;
-
- Elf_Shdr null_hdr_;
- std::string shstrtab_;
- // The index of the current section being built. The first being 1.
- uint32_t section_index_;
- std::string dynstr_;
- uint32_t dynstr_soname_offset_;
- std::vector<const Elf_Shdr*> section_ptrs_;
- std::vector<Elf_Word> hash_;
-
- ElfOatSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> text_builder_;
- ElfOatSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> rodata_builder_;
- ElfOatSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> bss_builder_;
- ElfSymtabBuilder<Elf_Word, Elf_Sword, Elf_Addr, Elf_Sym, Elf_Shdr> dynsym_builder_;
- ElfSymtabBuilder<Elf_Word, Elf_Sword, Elf_Addr, Elf_Sym, Elf_Shdr> symtab_builder_;
- ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> hash_builder_;
- ElfDynamicBuilder<Elf_Word, Elf_Sword, Elf_Dyn, Elf_Shdr> dynamic_builder_;
- ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> shstrtab_builder_;
- std::vector<ElfRawSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr>*> other_builders_;
+ InstructionSet isa_;
+ StrtabSection dynstr_;
+ SymtabSection dynsym_;
+ HashSection hash_;
+ OatSection rodata_;
+ OatSection text_;
+ NoBitsSection bss_;
+ DynamicSection dynamic_;
+ StrtabSection strtab_;
+ SymtabSection symtab_;
+ std::vector<Section*> other_sections_;
+ StrtabSection shstrtab_;
DISALLOW_COPY_AND_ASSIGN(ElfBuilder);
};
diff --git a/compiler/elf_writer.cc b/compiler/elf_writer.cc
index 47402f3..f75638d 100644
--- a/compiler/elf_writer.cc
+++ b/compiler/elf_writer.cc
@@ -39,16 +39,17 @@
}
void ElfWriter::GetOatElfInformation(File* file,
- size_t& oat_loaded_size,
- size_t& oat_data_offset) {
+ size_t* oat_loaded_size,
+ size_t* oat_data_offset) {
std::string error_msg;
std::unique_ptr<ElfFile> elf_file(ElfFile::Open(file, false, false, &error_msg));
CHECK(elf_file.get() != nullptr) << error_msg;
- oat_loaded_size = elf_file->GetLoadedSize();
- CHECK_NE(0U, oat_loaded_size);
- oat_data_offset = GetOatDataAddress(elf_file.get());
- CHECK_NE(0U, oat_data_offset);
+ bool success = elf_file->GetLoadedSize(oat_loaded_size, &error_msg);
+ CHECK(success) << error_msg;
+ CHECK_NE(0U, *oat_loaded_size);
+ *oat_data_offset = GetOatDataAddress(elf_file.get());
+ CHECK_NE(0U, *oat_data_offset);
}
bool ElfWriter::Fixup(File* file, uintptr_t oat_data_begin) {
diff --git a/compiler/elf_writer.h b/compiler/elf_writer.h
index 033c1f8..8e13b51 100644
--- a/compiler/elf_writer.h
+++ b/compiler/elf_writer.h
@@ -38,8 +38,8 @@
// Looks up information about location of oat file in elf file container.
// Used for ImageWriter to perform memory layout.
static void GetOatElfInformation(File* file,
- size_t& oat_loaded_size,
- size_t& oat_data_offset);
+ size_t* oat_loaded_size,
+ size_t* oat_data_offset);
// Returns runtime oat_data runtime address for an opened ElfFile.
static uintptr_t GetOatDataAddress(ElfFile* elf_file);
diff --git a/compiler/elf_writer_debug.cc b/compiler/elf_writer_debug.cc
index 09bb6bc..f4df6c1 100644
--- a/compiler/elf_writer_debug.cc
+++ b/compiler/elf_writer_debug.cc
@@ -18,6 +18,7 @@
#include <unordered_set>
+#include "base/casts.h"
#include "compiled_method.h"
#include "driver/compiler_driver.h"
#include "dex_file-inl.h"
@@ -28,9 +29,10 @@
namespace art {
namespace dwarf {
-static void WriteEhFrameCIE(InstructionSet isa,
- ExceptionHeaderValueApplication addr_type,
- std::vector<uint8_t>* eh_frame) {
+static void WriteDebugFrameCIE(InstructionSet isa,
+ ExceptionHeaderValueApplication addr_type,
+ CFIFormat format,
+ std::vector<uint8_t>* eh_frame) {
// Scratch registers should be marked as undefined. This tells the
// debugger that its value in the previous frame is not recoverable.
bool is64bit = Is64BitInstructionSet(isa);
@@ -56,7 +58,8 @@
}
}
auto return_reg = Reg::ArmCore(14); // R14(LR).
- WriteEhFrameCIE(is64bit, addr_type, return_reg, opcodes, eh_frame);
+ WriteDebugFrameCIE(is64bit, addr_type, return_reg,
+ opcodes, format, eh_frame);
return;
}
case kArm64: {
@@ -79,7 +82,8 @@
}
}
auto return_reg = Reg::Arm64Core(30); // R30(LR).
- WriteEhFrameCIE(is64bit, addr_type, return_reg, opcodes, eh_frame);
+ WriteDebugFrameCIE(is64bit, addr_type, return_reg,
+ opcodes, format, eh_frame);
return;
}
case kMips:
@@ -95,10 +99,13 @@
}
}
auto return_reg = Reg::MipsCore(31); // R31(RA).
- WriteEhFrameCIE(is64bit, addr_type, return_reg, opcodes, eh_frame);
+ WriteDebugFrameCIE(is64bit, addr_type, return_reg,
+ opcodes, format, eh_frame);
return;
}
case kX86: {
+ // FIXME: Add fp registers once libunwind adds support for them. Bug: 20491296
+ constexpr bool generate_opcodes_for_x86_fp = false;
DebugFrameOpCodeWriter<> opcodes;
opcodes.DefCFA(Reg::X86Core(4), 4); // R4(ESP).
opcodes.Offset(Reg::X86Core(8), -4); // R8(EIP).
@@ -113,11 +120,14 @@
}
}
// fp registers.
- for (int reg = 0; reg < 8; reg++) {
- opcodes.Undefined(Reg::X86Fp(reg));
+ if (generate_opcodes_for_x86_fp) {
+ for (int reg = 0; reg < 8; reg++) {
+ opcodes.Undefined(Reg::X86Fp(reg));
+ }
}
auto return_reg = Reg::X86Core(8); // R8(EIP).
- WriteEhFrameCIE(is64bit, addr_type, return_reg, opcodes, eh_frame);
+ WriteDebugFrameCIE(is64bit, addr_type, return_reg,
+ opcodes, format, eh_frame);
return;
}
case kX86_64: {
@@ -143,7 +153,8 @@
}
}
auto return_reg = Reg::X86_64Core(16); // R16(RIP).
- WriteEhFrameCIE(is64bit, addr_type, return_reg, opcodes, eh_frame);
+ WriteDebugFrameCIE(is64bit, addr_type, return_reg,
+ opcodes, format, eh_frame);
return;
}
case kNone:
@@ -153,38 +164,62 @@
UNREACHABLE();
}
-void WriteEhFrame(const CompilerDriver* compiler,
- const OatWriter* oat_writer,
- ExceptionHeaderValueApplication address_type,
- std::vector<uint8_t>* eh_frame,
- std::vector<uintptr_t>* eh_frame_patches,
- std::vector<uint8_t>* eh_frame_hdr) {
+void WriteCFISection(const CompilerDriver* compiler,
+ const OatWriter* oat_writer,
+ ExceptionHeaderValueApplication address_type,
+ CFIFormat format,
+ std::vector<uint8_t>* debug_frame,
+ std::vector<uintptr_t>* debug_frame_patches,
+ std::vector<uint8_t>* eh_frame_hdr,
+ std::vector<uintptr_t>* eh_frame_hdr_patches) {
const auto& method_infos = oat_writer->GetMethodDebugInfo();
const InstructionSet isa = compiler->GetInstructionSet();
- // Write .eh_frame section.
- size_t cie_offset = eh_frame->size();
- WriteEhFrameCIE(isa, address_type, eh_frame);
+ // Write .eh_frame/.debug_frame section.
+ std::map<uint32_t, size_t> address_to_fde_offset_map;
+ size_t cie_offset = debug_frame->size();
+ WriteDebugFrameCIE(isa, address_type, format, debug_frame);
for (const OatWriter::DebugInfo& mi : method_infos) {
- const SwapVector<uint8_t>* opcodes = mi.compiled_method_->GetCFIInfo();
- if (opcodes != nullptr) {
- WriteEhFrameFDE(Is64BitInstructionSet(isa), cie_offset,
- mi.low_pc_, mi.high_pc_ - mi.low_pc_,
- opcodes, eh_frame, eh_frame_patches);
+ if (!mi.deduped_) { // Only one FDE per unique address.
+ const SwapVector<uint8_t>* opcodes = mi.compiled_method_->GetCFIInfo();
+ if (opcodes != nullptr) {
+ address_to_fde_offset_map.emplace(mi.low_pc_, debug_frame->size());
+ WriteDebugFrameFDE(Is64BitInstructionSet(isa), cie_offset,
+ mi.low_pc_, mi.high_pc_ - mi.low_pc_,
+ opcodes, format, debug_frame, debug_frame_patches);
+ }
}
}
- // Write .eh_frame_hdr section.
- Writer<> header(eh_frame_hdr);
- header.PushUint8(1); // Version.
- header.PushUint8(DW_EH_PE_pcrel | DW_EH_PE_sdata4); // Encoding of .eh_frame pointer.
- header.PushUint8(DW_EH_PE_omit); // Encoding of binary search table size.
- header.PushUint8(DW_EH_PE_omit); // Encoding of binary search table addresses.
- // .eh_frame pointer - .eh_frame_hdr section is after .eh_frame section, and need to encode
- // relative to this location as libunwind doesn't honor datarel for eh_frame_hdr correctly.
- header.PushInt32(-static_cast<int32_t>(eh_frame->size() + 4U));
- // Omit binary search table size (number of entries).
- // Omit binary search table.
+ if (format == DW_EH_FRAME_FORMAT) {
+ // Write .eh_frame_hdr section.
+ Writer<> header(eh_frame_hdr);
+ header.PushUint8(1); // Version.
+ // Encoding of .eh_frame pointer - libunwind does not honor datarel here,
+ // so we have to use pcrel which means relative to the pointer's location.
+ header.PushUint8(DW_EH_PE_pcrel | DW_EH_PE_sdata4);
+ // Encoding of binary search table size.
+ header.PushUint8(DW_EH_PE_udata4);
+ // Encoding of binary search table addresses - libunwind supports only this
+ // specific combination, which means relative to the start of .eh_frame_hdr.
+ header.PushUint8(DW_EH_PE_datarel | DW_EH_PE_sdata4);
+ // .eh_frame pointer - .eh_frame_hdr section is after .eh_frame section
+ const int32_t relative_eh_frame_begin = -static_cast<int32_t>(debug_frame->size());
+ header.PushInt32(relative_eh_frame_begin - 4U);
+ // Binary search table size (number of entries).
+ header.PushUint32(dchecked_integral_cast<uint32_t>(address_to_fde_offset_map.size()));
+ // Binary search table.
+ for (const auto& address_to_fde_offset : address_to_fde_offset_map) {
+ u_int32_t code_address = address_to_fde_offset.first;
+ int32_t fde_address = dchecked_integral_cast<int32_t>(address_to_fde_offset.second);
+ eh_frame_hdr_patches->push_back(header.data()->size());
+ header.PushUint32(code_address);
+ // We know the exact layout (eh_frame is immediately before eh_frame_hdr)
+ // and the data is relative to the start of the eh_frame_hdr,
+ // so patching isn't necessary (in contrast to the code address above).
+ header.PushInt32(relative_eh_frame_begin + fde_address);
+ }
+ }
}
/*
diff --git a/compiler/elf_writer_debug.h b/compiler/elf_writer_debug.h
index 5bf4841..69f7e0d 100644
--- a/compiler/elf_writer_debug.h
+++ b/compiler/elf_writer_debug.h
@@ -25,12 +25,14 @@
namespace art {
namespace dwarf {
-void WriteEhFrame(const CompilerDriver* compiler,
- const OatWriter* oat_writer,
- ExceptionHeaderValueApplication address_type,
- std::vector<uint8_t>* eh_frame,
- std::vector<uintptr_t>* eh_frame_patches,
- std::vector<uint8_t>* eh_frame_hdr);
+void WriteCFISection(const CompilerDriver* compiler,
+ const OatWriter* oat_writer,
+ ExceptionHeaderValueApplication address_type,
+ CFIFormat format,
+ std::vector<uint8_t>* debug_frame,
+ std::vector<uintptr_t>* debug_frame_patches,
+ std::vector<uint8_t>* eh_frame_hdr,
+ std::vector<uintptr_t>* eh_frame_hdr_patches);
void WriteDebugSections(const CompilerDriver* compiler,
const OatWriter* oat_writer,
diff --git a/compiler/elf_writer_quick.cc b/compiler/elf_writer_quick.cc
index 44c14a0..96dd7ca 100644
--- a/compiler/elf_writer_quick.cc
+++ b/compiler/elf_writer_quick.cc
@@ -21,7 +21,6 @@
#include "base/logging.h"
#include "base/unix_file/fd_file.h"
-#include "buffered_output_stream.h"
#include "compiled_method.h"
#include "dex_file-inl.h"
#include "driver/compiler_driver.h"
@@ -30,7 +29,6 @@
#include "elf_file.h"
#include "elf_utils.h"
#include "elf_writer_debug.h"
-#include "file_output_stream.h"
#include "globals.h"
#include "leb128.h"
#include "oat.h"
@@ -39,48 +37,33 @@
namespace art {
-template <typename Elf_Word, typename Elf_Sword, typename Elf_Addr,
- typename Elf_Dyn, typename Elf_Sym, typename Elf_Ehdr,
- typename Elf_Phdr, typename Elf_Shdr>
-bool ElfWriterQuick<Elf_Word, Elf_Sword, Elf_Addr, Elf_Dyn,
- Elf_Sym, Elf_Ehdr, Elf_Phdr, Elf_Shdr>::Create(File* elf_file,
- OatWriter* oat_writer,
- const std::vector<const DexFile*>& dex_files,
- const std::string& android_root,
- bool is_host,
- const CompilerDriver& driver) {
+// .eh_frame and .debug_frame are almost identical.
+// Except for some minor formatting differences, the main difference
+// is that .eh_frame is allocated within the running program because
+// it is used by C++ exception handling (which we do not use so we
+// can choose either). C++ compilers generally tend to use .eh_frame
+// because if they need it sometimes, they might as well always use it.
+constexpr dwarf::CFIFormat kCFIFormat = dwarf::DW_EH_FRAME_FORMAT;
+
+template <typename ElfTypes>
+bool ElfWriterQuick<ElfTypes>::Create(File* elf_file,
+ OatWriter* oat_writer,
+ const std::vector<const DexFile*>& dex_files,
+ const std::string& android_root,
+ bool is_host,
+ const CompilerDriver& driver) {
ElfWriterQuick elf_writer(driver, elf_file);
return elf_writer.Write(oat_writer, dex_files, android_root, is_host);
}
-class OatWriterWrapper FINAL : public CodeOutput {
- public:
- explicit OatWriterWrapper(OatWriter* oat_writer) : oat_writer_(oat_writer) {}
-
- void SetCodeOffset(size_t offset) {
- oat_writer_->SetOatDataOffset(offset);
- }
- bool Write(OutputStream* out) OVERRIDE {
- return oat_writer_->Write(out);
- }
- private:
- OatWriter* const oat_writer_;
-};
-
-template <typename Elf_Word, typename Elf_Sword, typename Elf_Addr,
- typename Elf_Dyn, typename Elf_Sym, typename Elf_Ehdr,
- typename Elf_Phdr, typename Elf_Shdr>
-static void WriteDebugSymbols(ElfBuilder<Elf_Word, Elf_Sword, Elf_Addr, Elf_Dyn,
- Elf_Sym, Elf_Ehdr, Elf_Phdr, Elf_Shdr>* builder,
- OatWriter* oat_writer);
+template <typename ElfTypes>
+static void WriteDebugSymbols(ElfBuilder<ElfTypes>* builder, OatWriter* oat_writer);
// Encode patch locations in .oat_patches format.
-template <typename Elf_Word, typename Elf_Sword, typename Elf_Addr,
- typename Elf_Dyn, typename Elf_Sym, typename Elf_Ehdr,
- typename Elf_Phdr, typename Elf_Shdr>
-void ElfWriterQuick<Elf_Word, Elf_Sword, Elf_Addr, Elf_Dyn, Elf_Sym, Elf_Ehdr,
- Elf_Phdr, Elf_Shdr>::EncodeOatPatches(const OatWriter::PatchLocationsMap& sections,
- std::vector<uint8_t>* buffer) {
+template <typename ElfTypes>
+void ElfWriterQuick<ElfTypes>::EncodeOatPatches(
+ const OatWriter::PatchLocationsMap& sections,
+ std::vector<uint8_t>* buffer) {
for (const auto& section : sections) {
const std::string& name = section.first;
std::vector<uintptr_t>* locations = section.second.get();
@@ -108,135 +91,159 @@
buffer->push_back(0); // End of sections.
}
-template<typename AddressType, bool SubtractPatchLocation = false>
-static void PatchAddresses(const std::vector<uintptr_t>* patch_locations,
- AddressType delta, std::vector<uint8_t>* buffer) {
- // Addresses in .debug_* sections are unaligned.
- typedef __attribute__((__aligned__(1))) AddressType UnalignedAddressType;
- if (patch_locations != nullptr) {
- for (uintptr_t patch_location : *patch_locations) {
- *reinterpret_cast<UnalignedAddressType*>(buffer->data() + patch_location) +=
- delta - (SubtractPatchLocation ? patch_location : 0);
+class RodataWriter FINAL : public CodeOutput {
+ public:
+ explicit RodataWriter(OatWriter* oat_writer) : oat_writer_(oat_writer) {}
+
+ bool Write(OutputStream* out) OVERRIDE {
+ return oat_writer_->WriteRodata(out);
+ }
+
+ private:
+ OatWriter* oat_writer_;
+};
+
+class TextWriter FINAL : public CodeOutput {
+ public:
+ explicit TextWriter(OatWriter* oat_writer) : oat_writer_(oat_writer) {}
+
+ bool Write(OutputStream* out) OVERRIDE {
+ return oat_writer_->WriteCode(out);
+ }
+
+ private:
+ OatWriter* oat_writer_;
+};
+
+enum PatchResult {
+ kAbsoluteAddress, // Absolute memory location.
+ kPointerRelativeAddress, // Offset relative to the location of the pointer.
+ kSectionRelativeAddress, // Offset relative to start of containing section.
+};
+
+// Patch memory addresses within a buffer.
+// It assumes that the unpatched addresses are offsets relative to base_address.
+// (which generally means method's low_pc relative to the start of .text)
+template <typename Elf_Addr, typename Address, PatchResult kPatchResult>
+static void Patch(const std::vector<uintptr_t>& patch_locations,
+ Elf_Addr buffer_address, Elf_Addr base_address,
+ std::vector<uint8_t>* buffer) {
+ for (uintptr_t location : patch_locations) {
+ typedef __attribute__((__aligned__(1))) Address UnalignedAddress;
+ auto* to_patch = reinterpret_cast<UnalignedAddress*>(buffer->data() + location);
+ switch (kPatchResult) {
+ case kAbsoluteAddress:
+ *to_patch = (base_address + *to_patch);
+ break;
+ case kPointerRelativeAddress:
+ *to_patch = (base_address + *to_patch) - (buffer_address + location);
+ break;
+ case kSectionRelativeAddress:
+ *to_patch = (base_address + *to_patch) - buffer_address;
+ break;
}
}
}
-template <typename Elf_Word, typename Elf_Sword, typename Elf_Addr,
- typename Elf_Dyn, typename Elf_Sym, typename Elf_Ehdr,
- typename Elf_Phdr, typename Elf_Shdr>
-bool ElfWriterQuick<Elf_Word, Elf_Sword, Elf_Addr, Elf_Dyn,
- Elf_Sym, Elf_Ehdr, Elf_Phdr, Elf_Shdr>::Write(OatWriter* oat_writer,
- const std::vector<const DexFile*>& dex_files_unused ATTRIBUTE_UNUSED,
- const std::string& android_root_unused ATTRIBUTE_UNUSED,
- bool is_host_unused ATTRIBUTE_UNUSED) {
- constexpr bool debug = false;
- const OatHeader& oat_header = oat_writer->GetOatHeader();
- Elf_Word oat_data_size = oat_header.GetExecutableOffset();
- uint32_t oat_exec_size = oat_writer->GetSize() - oat_data_size;
- uint32_t oat_bss_size = oat_writer->GetBssSize();
+template <typename ElfTypes>
+bool ElfWriterQuick<ElfTypes>::Write(
+ OatWriter* oat_writer,
+ const std::vector<const DexFile*>& dex_files_unused ATTRIBUTE_UNUSED,
+ const std::string& android_root_unused ATTRIBUTE_UNUSED,
+ bool is_host_unused ATTRIBUTE_UNUSED) {
+ using Elf_Addr = typename ElfTypes::Addr;
+ const InstructionSet isa = compiler_driver_->GetInstructionSet();
- OatWriterWrapper wrapper(oat_writer);
+ // Setup the builder with the main OAT sections (.rodata .text .bss).
+ const size_t rodata_size = oat_writer->GetOatHeader().GetExecutableOffset();
+ const size_t text_size = oat_writer->GetSize() - rodata_size;
+ const size_t bss_size = oat_writer->GetBssSize();
+ RodataWriter rodata_writer(oat_writer);
+ TextWriter text_writer(oat_writer);
+ std::unique_ptr<ElfBuilder<ElfTypes>> builder(new ElfBuilder<ElfTypes>(
+ isa, rodata_size, &rodata_writer, text_size, &text_writer, bss_size));
- std::unique_ptr<ElfBuilder<Elf_Word, Elf_Sword, Elf_Addr, Elf_Dyn,
- Elf_Sym, Elf_Ehdr, Elf_Phdr, Elf_Shdr> > builder(
- new ElfBuilder<Elf_Word, Elf_Sword, Elf_Addr, Elf_Dyn,
- Elf_Sym, Elf_Ehdr, Elf_Phdr, Elf_Shdr>(
- &wrapper,
- elf_file_,
- compiler_driver_->GetInstructionSet(),
- 0,
- oat_data_size,
- oat_data_size,
- oat_exec_size,
- RoundUp(oat_data_size + oat_exec_size, kPageSize),
- oat_bss_size,
- compiler_driver_->GetCompilerOptions().GetIncludeDebugSymbols(),
- debug));
-
- InstructionSet isa = compiler_driver_->GetInstructionSet();
- int alignment = GetInstructionSetPointerSize(isa);
- typedef ElfRawSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> RawSection;
- RawSection eh_frame(".eh_frame", SHT_PROGBITS, SHF_ALLOC, nullptr, 0, alignment, 0);
- RawSection eh_frame_hdr(".eh_frame_hdr", SHT_PROGBITS, SHF_ALLOC, nullptr, 0, 4, 0);
- RawSection debug_info(".debug_info", SHT_PROGBITS, 0, nullptr, 0, 1, 0);
+ // Add debug sections.
+ // They are stack allocated here (in the same scope as the builder),
+ // but they are registred with the builder only if they are used.
+ using RawSection = typename ElfBuilder<ElfTypes>::RawSection;
+ const auto* text = builder->GetText();
+ const bool is64bit = Is64BitInstructionSet(isa);
+ const int pointer_size = GetInstructionSetPointerSize(isa);
+ RawSection eh_frame(".eh_frame", SHT_PROGBITS, SHF_ALLOC, nullptr, 0, kPageSize, 0,
+ is64bit ? Patch<Elf_Addr, uint64_t, kPointerRelativeAddress> :
+ Patch<Elf_Addr, uint32_t, kPointerRelativeAddress>,
+ text);
+ RawSection eh_frame_hdr(".eh_frame_hdr", SHT_PROGBITS, SHF_ALLOC, nullptr, 0, 4, 0,
+ Patch<Elf_Addr, uint32_t, kSectionRelativeAddress>, text);
+ RawSection debug_frame(".debug_frame", SHT_PROGBITS, 0, nullptr, 0, pointer_size, 0,
+ is64bit ? Patch<Elf_Addr, uint64_t, kAbsoluteAddress> :
+ Patch<Elf_Addr, uint32_t, kAbsoluteAddress>,
+ text);
+ RawSection debug_info(".debug_info", SHT_PROGBITS, 0, nullptr, 0, 1, 0,
+ Patch<Elf_Addr, uint32_t, kAbsoluteAddress>, text);
RawSection debug_abbrev(".debug_abbrev", SHT_PROGBITS, 0, nullptr, 0, 1, 0);
RawSection debug_str(".debug_str", SHT_PROGBITS, 0, nullptr, 0, 1, 0);
- RawSection debug_line(".debug_line", SHT_PROGBITS, 0, nullptr, 0, 1, 0);
- RawSection oat_patches(".oat_patches", SHT_OAT_PATCH, 0, NULL, 0, 1, 0);
-
- // Do not add to .oat_patches since we will make the addresses relative.
- std::vector<uintptr_t> eh_frame_patches;
- if (compiler_driver_->GetCompilerOptions().GetIncludeCFI() &&
- !oat_writer->GetMethodDebugInfo().empty()) {
- dwarf::WriteEhFrame(compiler_driver_, oat_writer,
- dwarf::DW_EH_PE_pcrel,
- eh_frame.GetBuffer(), &eh_frame_patches,
- eh_frame_hdr.GetBuffer());
- builder->RegisterRawSection(&eh_frame);
- builder->RegisterRawSection(&eh_frame_hdr);
+ RawSection debug_line(".debug_line", SHT_PROGBITS, 0, nullptr, 0, 1, 0,
+ Patch<Elf_Addr, uint32_t, kAbsoluteAddress>, text);
+ if (!oat_writer->GetMethodDebugInfo().empty()) {
+ if (compiler_driver_->GetCompilerOptions().GetIncludeCFI()) {
+ if (kCFIFormat == dwarf::DW_EH_FRAME_FORMAT) {
+ dwarf::WriteCFISection(
+ compiler_driver_, oat_writer,
+ dwarf::DW_EH_PE_pcrel, kCFIFormat,
+ eh_frame.GetBuffer(), eh_frame.GetPatchLocations(),
+ eh_frame_hdr.GetBuffer(), eh_frame_hdr.GetPatchLocations());
+ builder->RegisterSection(&eh_frame);
+ builder->RegisterSection(&eh_frame_hdr);
+ } else {
+ DCHECK(kCFIFormat == dwarf::DW_DEBUG_FRAME_FORMAT);
+ dwarf::WriteCFISection(
+ compiler_driver_, oat_writer,
+ dwarf::DW_EH_PE_absptr, kCFIFormat,
+ debug_frame.GetBuffer(), debug_frame.GetPatchLocations(),
+ nullptr, nullptr);
+ builder->RegisterSection(&debug_frame);
+ *oat_writer->GetAbsolutePatchLocationsFor(".debug_frame") =
+ *debug_frame.GetPatchLocations();
+ }
+ }
+ if (compiler_driver_->GetCompilerOptions().GetIncludeDebugSymbols()) {
+ // Add methods to .symtab.
+ WriteDebugSymbols(builder.get(), oat_writer);
+ // Generate DWARF .debug_* sections.
+ dwarf::WriteDebugSections(
+ compiler_driver_, oat_writer,
+ debug_info.GetBuffer(), debug_info.GetPatchLocations(),
+ debug_abbrev.GetBuffer(),
+ debug_str.GetBuffer(),
+ debug_line.GetBuffer(), debug_line.GetPatchLocations());
+ builder->RegisterSection(&debug_info);
+ builder->RegisterSection(&debug_abbrev);
+ builder->RegisterSection(&debug_str);
+ builder->RegisterSection(&debug_line);
+ *oat_writer->GetAbsolutePatchLocationsFor(".debug_info") =
+ *debug_info.GetPatchLocations();
+ *oat_writer->GetAbsolutePatchLocationsFor(".debug_line") =
+ *debug_line.GetPatchLocations();
+ }
}
- // Must be done after .eh_frame is created since it is used in the Elf layout.
- if (!builder->Init()) {
- return false;
- }
-
- std::vector<uintptr_t>* debug_info_patches = nullptr;
- std::vector<uintptr_t>* debug_line_patches = nullptr;
- if (compiler_driver_->GetCompilerOptions().GetIncludeDebugSymbols() &&
- !oat_writer->GetMethodDebugInfo().empty()) {
- // Add methods to .symtab.
- WriteDebugSymbols(builder.get(), oat_writer);
- // Generate DWARF .debug_* sections.
- debug_info_patches = oat_writer->GetAbsolutePatchLocationsFor(".debug_info");
- debug_line_patches = oat_writer->GetAbsolutePatchLocationsFor(".debug_line");
- dwarf::WriteDebugSections(compiler_driver_, oat_writer,
- debug_info.GetBuffer(), debug_info_patches,
- debug_abbrev.GetBuffer(),
- debug_str.GetBuffer(),
- debug_line.GetBuffer(), debug_line_patches);
- builder->RegisterRawSection(&debug_info);
- builder->RegisterRawSection(&debug_abbrev);
- builder->RegisterRawSection(&debug_str);
- builder->RegisterRawSection(&debug_line);
- }
-
+ // Add relocation section.
+ RawSection oat_patches(".oat_patches", SHT_OAT_PATCH, 0, nullptr, 0, 1, 0);
if (compiler_driver_->GetCompilerOptions().GetIncludePatchInformation() ||
// ElfWriter::Fixup will be called regardless and it needs to be able
// to patch debug sections so we have to include patches for them.
compiler_driver_->GetCompilerOptions().GetIncludeDebugSymbols()) {
EncodeOatPatches(oat_writer->GetAbsolutePatchLocations(), oat_patches.GetBuffer());
- builder->RegisterRawSection(&oat_patches);
+ builder->RegisterSection(&oat_patches);
}
- // We know where .text and .eh_frame will be located, so patch the addresses.
- Elf_Addr text_addr = builder->GetTextBuilder().GetSection()->sh_addr;
- // TODO: Simplify once we use Elf64 - we can use Elf_Addr instead of branching.
- if (Is64BitInstructionSet(compiler_driver_->GetInstructionSet())) {
- // relative_address = (text_addr + address) - (eh_frame_addr + patch_location);
- PatchAddresses<uint64_t, true>(&eh_frame_patches,
- text_addr - eh_frame.GetSection()->sh_addr, eh_frame.GetBuffer());
- PatchAddresses<uint64_t>(debug_info_patches, text_addr, debug_info.GetBuffer());
- PatchAddresses<uint64_t>(debug_line_patches, text_addr, debug_line.GetBuffer());
- } else {
- // relative_address = (text_addr + address) - (eh_frame_addr + patch_location);
- PatchAddresses<uint32_t, true>(&eh_frame_patches,
- text_addr - eh_frame.GetSection()->sh_addr, eh_frame.GetBuffer());
- PatchAddresses<uint32_t>(debug_info_patches, text_addr, debug_info.GetBuffer());
- PatchAddresses<uint32_t>(debug_line_patches, text_addr, debug_line.GetBuffer());
- }
-
- return builder->Write();
+ return builder->Write(elf_file_);
}
-template <typename Elf_Word, typename Elf_Sword, typename Elf_Addr,
- typename Elf_Dyn, typename Elf_Sym, typename Elf_Ehdr,
- typename Elf_Phdr, typename Elf_Shdr>
-// Do not inline to avoid Clang stack frame problems. b/18738594
-NO_INLINE
-static void WriteDebugSymbols(ElfBuilder<Elf_Word, Elf_Sword, Elf_Addr, Elf_Dyn,
- Elf_Sym, Elf_Ehdr, Elf_Phdr, Elf_Shdr>* builder,
- OatWriter* oat_writer) {
+template <typename ElfTypes>
+static void WriteDebugSymbols(ElfBuilder<ElfTypes>* builder, OatWriter* oat_writer) {
const std::vector<OatWriter::DebugInfo>& method_info = oat_writer->GetMethodDebugInfo();
// Find all addresses (low_pc) which contain deduped methods.
@@ -248,9 +255,11 @@
}
}
- ElfSymtabBuilder<Elf_Word, Elf_Sword, Elf_Addr, Elf_Sym, Elf_Shdr>* symtab =
- builder->GetSymtabBuilder();
+ auto* symtab = builder->GetSymtab();
for (auto it = method_info.begin(); it != method_info.end(); ++it) {
+ if (it->deduped_) {
+ continue; // Add symbol only for the first instance.
+ }
std::string name = PrettyMethod(it->dex_method_index_, *it->dex_file_, true);
if (deduped_addresses.find(it->low_pc_) != deduped_addresses.end()) {
name += " [DEDUPED]";
@@ -259,22 +268,20 @@
uint32_t low_pc = it->low_pc_;
// Add in code delta, e.g., thumb bit 0 for Thumb2 code.
low_pc += it->compiled_method_->CodeDelta();
- symtab->AddSymbol(name, &builder->GetTextBuilder(), low_pc,
+ symtab->AddSymbol(name, builder->GetText(), low_pc,
true, it->high_pc_ - it->low_pc_, STB_GLOBAL, STT_FUNC);
// Conforming to aaelf, add $t mapping symbol to indicate start of a sequence of thumb2
// instructions, so that disassembler tools can correctly disassemble.
if (it->compiled_method_->GetInstructionSet() == kThumb2) {
- symtab->AddSymbol("$t", &builder->GetTextBuilder(), it->low_pc_ & ~1, true,
+ symtab->AddSymbol("$t", builder->GetText(), it->low_pc_ & ~1, true,
0, STB_LOCAL, STT_NOTYPE);
}
}
}
// Explicit instantiations
-template class ElfWriterQuick<Elf32_Word, Elf32_Sword, Elf32_Addr, Elf32_Dyn,
- Elf32_Sym, Elf32_Ehdr, Elf32_Phdr, Elf32_Shdr>;
-template class ElfWriterQuick<Elf64_Word, Elf64_Sword, Elf64_Addr, Elf64_Dyn,
- Elf64_Sym, Elf64_Ehdr, Elf64_Phdr, Elf64_Shdr>;
+template class ElfWriterQuick<ElfTypes32>;
+template class ElfWriterQuick<ElfTypes64>;
} // namespace art
diff --git a/compiler/elf_writer_quick.h b/compiler/elf_writer_quick.h
index 811beb4..955b568 100644
--- a/compiler/elf_writer_quick.h
+++ b/compiler/elf_writer_quick.h
@@ -23,9 +23,7 @@
namespace art {
-template <typename Elf_Word, typename Elf_Sword, typename Elf_Addr,
- typename Elf_Dyn, typename Elf_Sym, typename Elf_Ehdr,
- typename Elf_Phdr, typename Elf_Shdr>
+template <typename ElfTypes>
class ElfWriterQuick FINAL : public ElfWriter {
public:
// Write an ELF file. Returns true on success, false on failure.
@@ -57,10 +55,8 @@
};
// Explicitly instantiated in elf_writer_quick.cc
-typedef ElfWriterQuick<Elf32_Word, Elf32_Sword, Elf32_Addr, Elf32_Dyn,
- Elf32_Sym, Elf32_Ehdr, Elf32_Phdr, Elf32_Shdr> ElfWriterQuick32;
-typedef ElfWriterQuick<Elf64_Word, Elf64_Sword, Elf64_Addr, Elf64_Dyn,
- Elf64_Sym, Elf64_Ehdr, Elf64_Phdr, Elf64_Shdr> ElfWriterQuick64;
+typedef ElfWriterQuick<ElfTypes32> ElfWriterQuick32;
+typedef ElfWriterQuick<ElfTypes64> ElfWriterQuick64;
} // namespace art
diff --git a/compiler/elf_writer_test.cc b/compiler/elf_writer_test.cc
index 3e5ad7b..08523d8 100644
--- a/compiler/elf_writer_test.cc
+++ b/compiler/elf_writer_test.cc
@@ -55,12 +55,12 @@
LOG(INFO) << "elf_filename=" << elf_filename;
UnreserveImageSpace();
- void* dl_oatdata = NULL;
- void* dl_oatexec = NULL;
- void* dl_oatlastword = NULL;
+ void* dl_oatdata = nullptr;
+ void* dl_oatexec = nullptr;
+ void* dl_oatlastword = nullptr;
std::unique_ptr<File> file(OS::OpenFileForReading(elf_filename.c_str()));
- ASSERT_TRUE(file.get() != NULL);
+ ASSERT_TRUE(file.get() != nullptr);
{
std::string error_msg;
std::unique_ptr<ElfFile> ef(ElfFile::Open(file.get(), false, false, &error_msg));
diff --git a/compiler/image_test.cc b/compiler/image_test.cc
index 8016831..eaf3489 100644
--- a/compiler/image_test.cc
+++ b/compiler/image_test.cc
@@ -68,7 +68,7 @@
// TODO: compile_pic should be a test argument.
{
{
- jobject class_loader = NULL;
+ jobject class_loader = nullptr;
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
TimingLogger timings("ImageTest::WriteRead", false, false);
TimingLogger::ScopedTiming t("CompileAll", &timings);
@@ -92,7 +92,7 @@
}
// Workound bug that mcld::Linker::emit closes oat_file by reopening as dup_oat.
std::unique_ptr<File> dup_oat(OS::OpenFileReadWrite(oat_file.GetFilename().c_str()));
- ASSERT_TRUE(dup_oat.get() != NULL);
+ ASSERT_TRUE(dup_oat.get() != nullptr);
{
bool success_image =
@@ -107,7 +107,7 @@
{
std::unique_ptr<File> file(OS::OpenFileForReading(image_file.GetFilename().c_str()));
- ASSERT_TRUE(file.get() != NULL);
+ ASSERT_TRUE(file.get() != nullptr);
ImageHeader image_header;
ASSERT_EQ(file->ReadFully(&image_header, sizeof(image_header)), true);
ASSERT_TRUE(image_header.IsValid());
@@ -118,12 +118,12 @@
ASSERT_TRUE(!heap->GetContinuousSpaces().empty());
gc::space::ContinuousSpace* space = heap->GetNonMovingSpace();
ASSERT_FALSE(space->IsImageSpace());
- ASSERT_TRUE(space != NULL);
+ ASSERT_TRUE(space != nullptr);
ASSERT_TRUE(space->IsMallocSpace());
ASSERT_GE(sizeof(image_header) + space->Size(), static_cast<size_t>(file->GetLength()));
}
- ASSERT_TRUE(compiler_driver_->GetImageClasses() != NULL);
+ ASSERT_TRUE(compiler_driver_->GetImageClasses() != nullptr);
std::unordered_set<std::string> image_classes(*compiler_driver_->GetImageClasses());
// Need to delete the compiler since it has worker threads which are attached to runtime.
@@ -137,7 +137,7 @@
writer.reset(nullptr);
runtime_.reset();
- java_lang_dex_file_ = NULL;
+ java_lang_dex_file_ = nullptr;
MemMap::Init();
std::unique_ptr<const DexFile> dex(LoadExpectSingleDexFile(GetLibCoreDexFileName().c_str()));
@@ -145,7 +145,7 @@
RuntimeOptions options;
std::string image("-Ximage:");
image.append(image_location.GetFilename());
- options.push_back(std::make_pair(image.c_str(), reinterpret_cast<void*>(NULL)));
+ options.push_back(std::make_pair(image.c_str(), static_cast<void*>(nullptr)));
// By default the compiler this creates will not include patch information.
options.push_back(std::make_pair("-Xnorelocate", nullptr));
@@ -158,7 +158,7 @@
// give it away now and then switch to a more managable ScopedObjectAccess.
Thread::Current()->TransitionFromRunnableToSuspended(kNative);
ScopedObjectAccess soa(Thread::Current());
- ASSERT_TRUE(runtime_.get() != NULL);
+ ASSERT_TRUE(runtime_.get() != nullptr);
class_linker_ = runtime_->GetClassLinker();
gc::Heap* heap = Runtime::Current()->GetHeap();
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index a99ef34..195949b 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -89,7 +89,12 @@
Thread::Current()->TransitionFromSuspendedToRunnable();
PruneNonImageClasses(); // Remove junk
ComputeLazyFieldsForImageClasses(); // Add useful information
- ProcessStrings();
+
+ // Calling this can in theory fill in some resolved strings. However, in practice it seems to
+ // never resolve any.
+ if (kComputeEagerResolvedStrings) {
+ ComputeEagerResolvedStrings();
+ }
Thread::Current()->TransitionFromRunnableToSuspended(kNative);
}
gc::Heap* heap = Runtime::Current()->GetHeap();
@@ -129,7 +134,7 @@
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
std::unique_ptr<File> oat_file(OS::OpenFileReadWrite(oat_filename.c_str()));
- if (oat_file.get() == NULL) {
+ if (oat_file.get() == nullptr) {
PLOG(ERROR) << "Failed to open oat file " << oat_filename << " for " << oat_location;
return false;
}
@@ -161,7 +166,7 @@
size_t oat_loaded_size = 0;
size_t oat_data_offset = 0;
- ElfWriter::GetOatElfInformation(oat_file.get(), oat_loaded_size, oat_data_offset);
+ ElfWriter::GetOatElfInformation(oat_file.get(), &oat_loaded_size, &oat_data_offset);
Thread::Current()->TransitionFromSuspendedToRunnable();
CreateHeader(oat_loaded_size, oat_data_offset);
@@ -180,7 +185,7 @@
std::unique_ptr<File> image_file(OS::CreateEmptyFile(image_filename.c_str()));
ImageHeader* image_header = reinterpret_cast<ImageHeader*>(image_->Begin());
- if (image_file.get() == NULL) {
+ if (image_file.get() == nullptr) {
LOG(ERROR) << "Failed to open image file " << image_filename;
return false;
}
@@ -519,7 +524,7 @@
void ImageWriter::ComputeLazyFieldsForImageClasses() {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- class_linker->VisitClassesWithoutClassesLock(ComputeLazyFieldsForClassesVisitor, NULL);
+ class_linker->VisitClassesWithoutClassesLock(ComputeLazyFieldsForClassesVisitor, nullptr);
}
bool ImageWriter::ComputeLazyFieldsForClassesVisitor(Class* c, void* /*arg*/) {
@@ -529,14 +534,6 @@
return true;
}
-// Count the number of strings in the heap and put the result in arg as a size_t pointer.
-static void CountStringsCallback(Object* obj, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- if (obj->GetClass()->IsStringClass()) {
- ++*reinterpret_cast<size_t*>(arg);
- }
-}
-
// Collect all the java.lang.String in the heap and put them in the output strings_ array.
class StringCollector {
public:
@@ -566,99 +563,19 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::String* lhs_s = lhs.AsMirrorPtr();
mirror::String* rhs_s = rhs.AsMirrorPtr();
- uint16_t* lhs_begin = lhs_s->GetCharArray()->GetData() + lhs_s->GetOffset();
- uint16_t* rhs_begin = rhs_s->GetCharArray()->GetData() + rhs_s->GetOffset();
+ uint16_t* lhs_begin = lhs_s->GetValue();
+ uint16_t* rhs_begin = rhs_s->GetValue();
return std::lexicographical_compare(lhs_begin, lhs_begin + lhs_s->GetLength(),
rhs_begin, rhs_begin + rhs_s->GetLength());
}
};
-static bool IsPrefix(mirror::String* pref, mirror::String* full)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- if (pref->GetLength() > full->GetLength()) {
- return false;
- }
- uint16_t* pref_begin = pref->GetCharArray()->GetData() + pref->GetOffset();
- uint16_t* full_begin = full->GetCharArray()->GetData() + full->GetOffset();
- return std::equal(pref_begin, pref_begin + pref->GetLength(), full_begin);
-}
-
-void ImageWriter::ProcessStrings() {
- size_t total_strings = 0;
- gc::Heap* heap = Runtime::Current()->GetHeap();
- ClassLinker* cl = Runtime::Current()->GetClassLinker();
- // Count the strings.
- heap->VisitObjects(CountStringsCallback, &total_strings);
- Thread* self = Thread::Current();
- StackHandleScope<1> hs(self);
- auto strings = hs.NewHandle(cl->AllocStringArray(self, total_strings));
- StringCollector string_collector(strings, 0U);
- // Read strings into the array.
- heap->VisitObjects(StringCollector::Callback, &string_collector);
- // Some strings could have gotten freed if AllocStringArray caused a GC.
- CHECK_LE(string_collector.GetIndex(), total_strings);
- total_strings = string_collector.GetIndex();
- auto* strings_begin = reinterpret_cast<mirror::HeapReference<mirror::String>*>(
- strings->GetRawData(sizeof(mirror::HeapReference<mirror::String>), 0));
- std::sort(strings_begin, strings_begin + total_strings, LexicographicalStringComparator());
- // Characters of strings which are non equal prefix of another string (not the same string).
- // We don't count the savings from equal strings since these would get interned later anyways.
- size_t prefix_saved_chars = 0;
- // Count characters needed for the strings.
- size_t num_chars = 0u;
- mirror::String* prev_s = nullptr;
- for (size_t idx = 0; idx != total_strings; ++idx) {
- mirror::String* s = strings->GetWithoutChecks(idx);
- size_t length = s->GetLength();
- num_chars += length;
- if (prev_s != nullptr && IsPrefix(prev_s, s)) {
- size_t prev_length = prev_s->GetLength();
- num_chars -= prev_length;
- if (prev_length != length) {
- prefix_saved_chars += prev_length;
- }
- }
- prev_s = s;
- }
- // Create character array, copy characters and point the strings there.
- mirror::CharArray* array = mirror::CharArray::Alloc(self, num_chars);
- string_data_array_ = array;
- uint16_t* array_data = array->GetData();
- size_t pos = 0u;
- prev_s = nullptr;
- for (size_t idx = 0; idx != total_strings; ++idx) {
- mirror::String* s = strings->GetWithoutChecks(idx);
- uint16_t* s_data = s->GetCharArray()->GetData() + s->GetOffset();
- int32_t s_length = s->GetLength();
- int32_t prefix_length = 0u;
- if (idx != 0u && IsPrefix(prev_s, s)) {
- prefix_length = prev_s->GetLength();
- }
- memcpy(array_data + pos, s_data + prefix_length, (s_length - prefix_length) * sizeof(*s_data));
- s->SetOffset(pos - prefix_length);
- s->SetArray(array);
- pos += s_length - prefix_length;
- prev_s = s;
- }
- CHECK_EQ(pos, num_chars);
-
- if (kIsDebugBuild || VLOG_IS_ON(compiler)) {
- LOG(INFO) << "Total # image strings=" << total_strings << " combined length="
- << num_chars << " prefix saved chars=" << prefix_saved_chars;
- }
- // Calling this can in theory fill in some resolved strings. However, in practice it seems to
- // never resolve any.
- if (kComputeEagerResolvedStrings) {
- ComputeEagerResolvedStrings();
- }
-}
-
void ImageWriter::ComputeEagerResolvedStringsCallback(Object* obj, void* arg ATTRIBUTE_UNUSED) {
if (!obj->GetClass()->IsStringClass()) {
return;
}
mirror::String* string = obj->AsString();
- const uint16_t* utf16_string = string->GetCharArray()->GetData() + string->GetOffset();
+ const uint16_t* utf16_string = string->GetValue();
size_t utf16_length = static_cast<size_t>(string->GetLength());
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
ReaderMutexLock mu(Thread::Current(), *class_linker->DexLock());
@@ -675,7 +592,7 @@
if (string_id != nullptr) {
// This string occurs in this dex file, assign the dex cache entry.
uint32_t string_idx = dex_file.GetIndexForStringId(*string_id);
- if (dex_cache->GetResolvedString(string_idx) == NULL) {
+ if (dex_cache->GetResolvedString(string_idx) == nullptr) {
dex_cache->SetResolvedString(string_idx, string);
}
}
@@ -697,7 +614,7 @@
};
void ImageWriter::PruneNonImageClasses() {
- if (compiler_driver_.GetImageClasses() == NULL) {
+ if (compiler_driver_.GetImageClasses() == nullptr) {
return;
}
Runtime* runtime = Runtime::Current();
@@ -712,7 +629,7 @@
// Remove the undesired classes from the class roots.
for (const std::string& it : non_image_classes) {
- bool result = class_linker->RemoveClass(it.c_str(), NULL);
+ bool result = class_linker->RemoveClass(it.c_str(), nullptr);
DCHECK(result);
}
@@ -724,13 +641,13 @@
DexCache* dex_cache = class_linker->GetDexCache(idx);
for (size_t i = 0; i < dex_cache->NumResolvedTypes(); i++) {
Class* klass = dex_cache->GetResolvedType(i);
- if (klass != NULL && !IsImageClass(klass)) {
- dex_cache->SetResolvedType(i, NULL);
+ if (klass != nullptr && !IsImageClass(klass)) {
+ dex_cache->SetResolvedType(i, nullptr);
}
}
for (size_t i = 0; i < dex_cache->NumResolvedMethods(); i++) {
ArtMethod* method = dex_cache->GetResolvedMethod(i);
- if (method != NULL && !IsImageClass(method->GetDeclaringClass())) {
+ if (method != nullptr && !IsImageClass(method->GetDeclaringClass())) {
dex_cache->SetResolvedMethod(i, resolution_method);
}
}
@@ -777,14 +694,14 @@
void ImageWriter::DumpImageClasses() {
auto image_classes = compiler_driver_.GetImageClasses();
- CHECK(image_classes != NULL);
+ CHECK(image_classes != nullptr);
for (const std::string& image_class : *image_classes) {
LOG(INFO) << " " << image_class;
}
}
void ImageWriter::CalculateObjectBinSlots(Object* obj) {
- DCHECK(obj != NULL);
+ DCHECK(obj != nullptr);
// if it is a string, we want to intern it if its not interned.
if (obj->GetClass()->IsStringClass()) {
// we must be an interned string that was forward referenced and already assigned
@@ -856,7 +773,7 @@
image_roots->Set<false>(ImageHeader::kDexCaches, dex_caches.Get());
image_roots->Set<false>(ImageHeader::kClassRoots, class_linker->GetClassRoots());
for (int i = 0; i < ImageHeader::kImageRootsMax; i++) {
- CHECK(image_roots->Get(i) != NULL);
+ CHECK(image_roots->Get(i) != nullptr);
}
return image_roots.Get();
}
diff --git a/compiler/image_writer.h b/compiler/image_writer.h
index a2d99ee..c0cffa5 100644
--- a/compiler/image_writer.h
+++ b/compiler/image_writer.h
@@ -220,9 +220,6 @@
static void ComputeEagerResolvedStringsCallback(mirror::Object* obj, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Combine string char arrays.
- void ProcessStrings() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
// Remove unwanted classes from various roots.
void PruneNonImageClasses() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static bool NonImageClassesVisitor(mirror::Class* c, void* arg)
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index 9ff7ab8..7c400ee 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -62,7 +62,7 @@
JitCompiler::JitCompiler() : total_time_(0) {
auto* pass_manager_options = new PassManagerOptions;
- pass_manager_options->SetDisablePassList("GVN,DCE");
+ pass_manager_options->SetDisablePassList("GVN,DCE,GVNCleanup");
compiler_options_.reset(new CompilerOptions(
CompilerOptions::kDefaultCompilerFilter,
CompilerOptions::kDefaultHugeMethodThreshold,
@@ -94,7 +94,7 @@
compiler_driver_.reset(new CompilerDriver(
compiler_options_.get(), verification_results_.get(), method_inliner_map_.get(),
Compiler::kQuick, instruction_set, instruction_set_features_.get(), false,
- nullptr, nullptr, 1, false, true,
+ nullptr, nullptr, nullptr, 1, false, true,
std::string(), cumulative_logger_.get(), -1, std::string()));
// Disable dedupe so we can remove compiled methods.
compiler_driver_->SetDedupeEnabled(false);
diff --git a/compiler/jit/jit_compiler.h b/compiler/jit/jit_compiler.h
index 0876499..d9a5ac6 100644
--- a/compiler/jit/jit_compiler.h
+++ b/compiler/jit/jit_compiler.h
@@ -67,10 +67,11 @@
const uint8_t* mapping_table, const uint8_t* vmap_table, const uint8_t* gc_map);
bool MakeExecutable(CompiledMethod* compiled_method, mirror::ArtMethod* method)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ DISALLOW_COPY_AND_ASSIGN(JitCompiler);
};
} // namespace jit
-
} // namespace art
#endif // ART_COMPILER_JIT_JIT_COMPILER_H_
diff --git a/compiler/jni/quick/calling_convention.cc b/compiler/jni/quick/calling_convention.cc
index d25acc7..436fc0c 100644
--- a/compiler/jni/quick/calling_convention.cc
+++ b/compiler/jni/quick/calling_convention.cc
@@ -47,7 +47,7 @@
return new x86_64::X86_64ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty);
default:
LOG(FATAL) << "Unknown InstructionSet: " << instruction_set;
- return NULL;
+ return nullptr;
}
}
@@ -122,7 +122,7 @@
return new x86_64::X86_64JniCallingConvention(is_static, is_synchronized, shorty);
default:
LOG(FATAL) << "Unknown InstructionSet: " << instruction_set;
- return NULL;
+ return nullptr;
}
}
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index 2402ea5..a06303d 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -138,7 +138,8 @@
FrameOffset handle_scope_offset = main_jni_conv->CurrentParamHandleScopeEntryOffset();
// Check handle scope offset is within frame
CHECK_LT(handle_scope_offset.Uint32Value(), frame_size);
- // TODO: Insert the read barrier for this load.
+ // Note this LoadRef() already includes the heap poisoning negation.
+ // Note this LoadRef() does not include read barrier. It will be handled below.
__ LoadRef(main_jni_conv->InterproceduralScratchRegister(),
mr_conv->MethodRegister(), mirror::ArtMethod::DeclaringClassOffset());
__ VerifyObject(main_jni_conv->InterproceduralScratchRegister(), false);
@@ -152,9 +153,9 @@
// References need placing in handle scope and the entry value passing
if (ref_param) {
// Compute handle scope entry, note null is placed in the handle scope but its boxed value
- // must be NULL
+ // must be null.
FrameOffset handle_scope_offset = main_jni_conv->CurrentParamHandleScopeEntryOffset();
- // Check handle scope offset is within frame and doesn't run into the saved segment state
+ // Check handle scope offset is within frame and doesn't run into the saved segment state.
CHECK_LT(handle_scope_offset.Uint32Value(), frame_size);
CHECK_NE(handle_scope_offset.Uint32Value(),
main_jni_conv->SavedLocalReferenceCookieOffset().Uint32Value());
@@ -189,6 +190,49 @@
size_t current_out_arg_size = main_out_arg_size;
__ IncreaseFrameSize(main_out_arg_size);
+ // Call the read barrier for the declaring class loaded from the method for a static call.
+ // Note that we always have outgoing param space available for at least two params.
+ if (kUseReadBarrier && is_static) {
+ ThreadOffset<4> read_barrier32 = QUICK_ENTRYPOINT_OFFSET(4, pReadBarrierJni);
+ ThreadOffset<8> read_barrier64 = QUICK_ENTRYPOINT_OFFSET(8, pReadBarrierJni);
+ main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size));
+ main_jni_conv->Next(); // Skip JNIEnv.
+ FrameOffset class_handle_scope_offset = main_jni_conv->CurrentParamHandleScopeEntryOffset();
+ main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size));
+ // Pass the handle for the class as the first argument.
+ if (main_jni_conv->IsCurrentParamOnStack()) {
+ FrameOffset out_off = main_jni_conv->CurrentParamStackOffset();
+ __ CreateHandleScopeEntry(out_off, class_handle_scope_offset,
+ mr_conv->InterproceduralScratchRegister(),
+ false);
+ } else {
+ ManagedRegister out_reg = main_jni_conv->CurrentParamRegister();
+ __ CreateHandleScopeEntry(out_reg, class_handle_scope_offset,
+ ManagedRegister::NoRegister(), false);
+ }
+ main_jni_conv->Next();
+ // Pass the current thread as the second argument and call.
+ if (main_jni_conv->IsCurrentParamInRegister()) {
+ __ GetCurrentThread(main_jni_conv->CurrentParamRegister());
+ if (is_64_bit_target) {
+ __ Call(main_jni_conv->CurrentParamRegister(), Offset(read_barrier64),
+ main_jni_conv->InterproceduralScratchRegister());
+ } else {
+ __ Call(main_jni_conv->CurrentParamRegister(), Offset(read_barrier32),
+ main_jni_conv->InterproceduralScratchRegister());
+ }
+ } else {
+ __ GetCurrentThread(main_jni_conv->CurrentParamStackOffset(),
+ main_jni_conv->InterproceduralScratchRegister());
+ if (is_64_bit_target) {
+ __ CallFromThread64(read_barrier64, main_jni_conv->InterproceduralScratchRegister());
+ } else {
+ __ CallFromThread32(read_barrier32, main_jni_conv->InterproceduralScratchRegister());
+ }
+ }
+ main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size)); // Reset.
+ }
+
// 6. Call into appropriate JniMethodStart passing Thread* so that transition out of Runnable
// can occur. The result is the saved JNI local state that is restored by the exit call. We
// abuse the JNI calling convention here, that is guaranteed to support passing 2 pointer
@@ -243,9 +287,9 @@
// 7. Iterate over arguments placing values from managed calling convention in
// to the convention required for a native call (shuffling). For references
// place an index/pointer to the reference after checking whether it is
- // NULL (which must be encoded as NULL).
+ // null (which must be encoded as null).
// Note: we do this prior to materializing the JNIEnv* and static's jclass to
- // give as many free registers for the shuffle as possible
+ // give as many free registers for the shuffle as possible.
mr_conv->ResetIterator(FrameOffset(frame_size + main_out_arg_size));
uint32_t args_count = 0;
while (mr_conv->HasNext()) {
@@ -451,7 +495,7 @@
ArrayRef<const LinkerPatch>());
}
-// Copy a single parameter from the managed to the JNI calling convention
+// Copy a single parameter from the managed to the JNI calling convention.
static void CopyParameter(Assembler* jni_asm,
ManagedRuntimeCallingConvention* mr_conv,
JniCallingConvention* jni_conv,
@@ -469,7 +513,7 @@
} else {
CHECK(jni_conv->IsCurrentParamOnStack());
}
- // References need placing in handle scope and the entry address passing
+ // References need placing in handle scope and the entry address passing.
if (ref_param) {
null_allowed = mr_conv->IsCurrentArgPossiblyNull();
// Compute handle scope offset. Note null is placed in the handle scope but the jobject
diff --git a/compiler/linker/relative_patcher_test.h b/compiler/linker/relative_patcher_test.h
index 70630f3..1f7500a 100644
--- a/compiler/linker/relative_patcher_test.h
+++ b/compiler/linker/relative_patcher_test.h
@@ -45,7 +45,7 @@
inliner_map_(),
driver_(&compiler_options_, &verification_results_, &inliner_map_,
Compiler::kQuick, instruction_set, nullptr,
- false, nullptr, nullptr, 1u,
+ false, nullptr, nullptr, nullptr, 1u,
false, false, "", nullptr, -1, ""),
error_msg_(),
instruction_set_(instruction_set),
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index 989b04f..a871a82 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -93,8 +93,8 @@
verification_results_.get(),
method_inliner_map_.get(),
compiler_kind, insn_set,
- insn_features.get(), false, nullptr, nullptr, 2, true,
- true, "", timer_.get(), -1, ""));
+ insn_features.get(), false, nullptr, nullptr, nullptr,
+ 2, true, true, "", timer_.get(), -1, ""));
jobject class_loader = nullptr;
if (kCompile) {
TimingLogger timings2("OatTest::WriteRead", false, false);
@@ -176,7 +176,7 @@
EXPECT_EQ(72U, sizeof(OatHeader));
EXPECT_EQ(4U, sizeof(OatMethodOffsets));
EXPECT_EQ(28U, sizeof(OatQuickMethodHeader));
- EXPECT_EQ(92 * GetInstructionSetPointerSize(kRuntimeISA), sizeof(QuickEntryPoints));
+ EXPECT_EQ(112 * GetInstructionSetPointerSize(kRuntimeISA), sizeof(QuickEntryPoints));
}
TEST_F(OatTest, OatHeaderIsValid) {
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index 5abd204..15b4017 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -73,7 +73,7 @@
image_file_location_oat_begin_(image_file_location_oat_begin),
image_patch_delta_(image_patch_delta),
key_value_store_(key_value_store),
- oat_header_(NULL),
+ oat_header_(nullptr),
size_dex_file_alignment_(0),
size_executable_offset_alignment_(0),
size_oat_header_(0),
@@ -326,7 +326,7 @@
ClassReference class_ref(dex_file_, class_def_index_);
CompiledClass* compiled_class = writer_->compiler_driver_->GetCompiledClass(class_ref);
mirror::Class::Status status;
- if (compiled_class != NULL) {
+ if (compiled_class != nullptr) {
status = compiled_class->GetStatus();
} else if (writer_->compiler_driver_->GetVerificationResults()->IsClassRejected(class_ref)) {
status = mirror::Class::kStatusError;
@@ -473,7 +473,7 @@
ClassReference class_ref(dex_file_, class_def_index_);
CompiledClass* compiled_class = compiler_driver->GetCompiledClass(class_ref);
mirror::Class::Status status;
- if (compiled_class != NULL) {
+ if (compiled_class != nullptr) {
status = compiled_class->GetStatus();
} else if (compiler_driver->GetVerificationResults()->IsClassRejected(class_ref)) {
status = mirror::Class::kStatusError;
@@ -690,7 +690,7 @@
OatClass* oat_class = writer_->oat_classes_[oat_class_index_];
const CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index);
- if (compiled_method != NULL) { // ie. not an abstract method
+ if (compiled_method != nullptr) { // ie. not an abstract method
size_t file_offset = file_offset_;
OutputStream* out = out_;
@@ -893,7 +893,7 @@
OatClass* oat_class = writer_->oat_classes_[oat_class_index_];
const CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index);
- if (compiled_method != NULL) { // ie. not an abstract method
+ if (compiled_method != nullptr) { // ie. not an abstract method
size_t file_offset = file_offset_;
OutputStream* out = out_;
@@ -940,7 +940,7 @@
}
const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_index);
const uint8_t* class_data = dex_file->GetClassData(class_def);
- if (class_data != NULL) { // ie not an empty class, such as a marker interface
+ if (class_data != nullptr) { // ie not an empty class, such as a marker interface
ClassDataItemIterator it(*dex_file, class_data);
while (it.HasNextStaticField()) {
it.Next();
@@ -987,7 +987,7 @@
// create the OatDexFiles
for (size_t i = 0; i != dex_files_->size(); ++i) {
const DexFile* dex_file = (*dex_files_)[i];
- CHECK(dex_file != NULL);
+ CHECK(dex_file != nullptr);
OatDexFile* oat_dex_file = new OatDexFile(offset, *dex_file);
oat_dex_files_.push_back(oat_dex_file);
offset += oat_dex_file->SizeOf();
@@ -1112,13 +1112,14 @@
return offset;
}
-bool OatWriter::Write(OutputStream* out) {
+bool OatWriter::WriteRodata(OutputStream* out) {
const off_t raw_file_offset = out->Seek(0, kSeekCurrent);
if (raw_file_offset == (off_t) -1) {
LOG(ERROR) << "Failed to get file offset in " << out->GetLocation();
return false;
}
const size_t file_offset = static_cast<size_t>(raw_file_offset);
+ oat_data_offset_ = file_offset;
// Reserve space for header. It will be written last - after updating the checksum.
size_t header_size = oat_header_->GetHeaderSize();
@@ -1146,6 +1147,27 @@
return false;
}
+ // Write padding.
+ off_t new_offset = out->Seek(size_executable_offset_alignment_, kSeekCurrent);
+ relative_offset += size_executable_offset_alignment_;
+ DCHECK_EQ(relative_offset, oat_header_->GetExecutableOffset());
+ size_t expected_file_offset = file_offset + relative_offset;
+ if (static_cast<uint32_t>(new_offset) != expected_file_offset) {
+ PLOG(ERROR) << "Failed to seek to oat code section. Actual: " << new_offset
+ << " Expected: " << expected_file_offset << " File: " << out->GetLocation();
+ return 0;
+ }
+ DCHECK_OFFSET();
+
+ return true;
+}
+
+bool OatWriter::WriteCode(OutputStream* out) {
+ size_t header_size = oat_header_->GetHeaderSize();
+ const size_t file_offset = oat_data_offset_;
+ size_t relative_offset = oat_header_->GetExecutableOffset();
+ DCHECK_OFFSET();
+
relative_offset = WriteCode(out, file_offset, relative_offset);
if (relative_offset == 0) {
LOG(ERROR) << "Failed to write oat code to " << out->GetLocation();
@@ -1215,7 +1237,7 @@
PLOG(ERROR) << "Failed to seek to oat header position in " << out->GetLocation();
return false;
}
- DCHECK_EQ(raw_file_offset, out->Seek(0, kSeekCurrent));
+ DCHECK_EQ(file_offset, static_cast<size_t>(out->Seek(0, kSeekCurrent)));
if (!out->WriteFully(oat_header_, header_size)) {
PLOG(ERROR) << "Failed to write oat header to " << out->GetLocation();
return false;
@@ -1290,16 +1312,6 @@
}
size_t OatWriter::WriteCode(OutputStream* out, const size_t file_offset, size_t relative_offset) {
- off_t new_offset = out->Seek(size_executable_offset_alignment_, kSeekCurrent);
- relative_offset += size_executable_offset_alignment_;
- DCHECK_EQ(relative_offset, oat_header_->GetExecutableOffset());
- size_t expected_file_offset = file_offset + relative_offset;
- if (static_cast<uint32_t>(new_offset) != expected_file_offset) {
- PLOG(ERROR) << "Failed to seek to oat code section. Actual: " << new_offset
- << " Expected: " << expected_file_offset << " File: " << out->GetLocation();
- return 0;
- }
- DCHECK_OFFSET();
if (compiler_driver_->IsImage()) {
InstructionSet instruction_set = compiler_driver_->GetInstructionSet();
@@ -1471,13 +1483,13 @@
oat_method_offsets_offset_from_oat_class += sizeof(method_bitmap_size_);
oat_method_offsets_offset_from_oat_class += method_bitmap_size_;
} else {
- method_bitmap_ = NULL;
+ method_bitmap_ = nullptr;
method_bitmap_size_ = 0;
}
for (size_t i = 0; i < num_methods; i++) {
CompiledMethod* compiled_method = compiled_methods_[i];
- if (compiled_method == NULL) {
+ if (compiled_method == nullptr) {
oat_method_offsets_offsets_from_oat_class_[i] = 0;
} else {
oat_method_offsets_offsets_from_oat_class_[i] = oat_method_offsets_offset_from_oat_class;
diff --git a/compiler/oat_writer.h b/compiler/oat_writer.h
index cc2b39a..6f1b4ec 100644
--- a/compiler/oat_writer.h
+++ b/compiler/oat_writer.h
@@ -118,11 +118,8 @@
return it.first->second.get();
}
- void SetOatDataOffset(size_t oat_data_offset) {
- oat_data_offset_ = oat_data_offset;
- }
-
- bool Write(OutputStream* out);
+ bool WriteRodata(OutputStream* out);
+ bool WriteCode(OutputStream* out);
~OatWriter();
@@ -235,13 +232,13 @@
// used to validate file position when writing.
size_t offset_;
- // CompiledMethods for each class_def_method_index, or NULL if no method is available.
+ // CompiledMethods for each class_def_method_index, or null if no method is available.
std::vector<CompiledMethod*> compiled_methods_;
// Offset from OatClass::offset_ to the OatMethodOffsets for the
// class_def_method_index. If 0, it means the corresponding
// CompiledMethod entry in OatClass::compiled_methods_ should be
- // NULL and that the OatClass::type_ should be kOatClassBitmap.
+ // null and that the OatClass::type_ should be kOatClassBitmap.
std::vector<uint32_t> oat_method_offsets_offsets_from_oat_class_;
// data to write
@@ -258,12 +255,12 @@
// OatClassType::type_ is kOatClassBitmap, a set bit indicates the
// method has an OatMethodOffsets in methods_offsets_, otherwise
// the entry was ommited to save space. If OatClassType::type_ is
- // not is kOatClassBitmap, the bitmap will be NULL.
+ // not is kOatClassBitmap, the bitmap will be null.
BitVector* method_bitmap_;
// OatMethodOffsets and OatMethodHeaders for each CompiledMethod
// present in the OatClass. Note that some may be missing if
- // OatClass::compiled_methods_ contains NULL values (and
+ // OatClass::compiled_methods_ contains null values (and
// oat_method_offsets_offsets_from_oat_class_ should contain 0
// values in this case).
std::vector<OatMethodOffsets> method_offsets_;
diff --git a/compiler/optimizing/boolean_simplifier.cc b/compiler/optimizing/boolean_simplifier.cc
index 06328f2..8100a29 100644
--- a/compiler/optimizing/boolean_simplifier.cc
+++ b/compiler/optimizing/boolean_simplifier.cc
@@ -18,6 +18,26 @@
namespace art {
+void HBooleanSimplifier::TryRemovingNegatedCondition(HBasicBlock* block) {
+ DCHECK(block->EndsWithIf());
+
+ // Check if the condition is a Boolean negation.
+ HIf* if_instruction = block->GetLastInstruction()->AsIf();
+ HInstruction* boolean_not = if_instruction->InputAt(0);
+ if (!boolean_not->IsBooleanNot()) {
+ return;
+ }
+
+ // Make BooleanNot's input the condition of the If and swap branches.
+ if_instruction->ReplaceInput(boolean_not->InputAt(0), 0);
+ block->SwapSuccessors();
+
+ // Remove the BooleanNot if it is now unused.
+ if (!boolean_not->HasUses()) {
+ boolean_not->GetBlock()->RemoveInstruction(boolean_not);
+ }
+}
+
// Returns true if 'block1' and 'block2' are empty, merge into the same single
// successor and the successor can only be reached from them.
static bool BlocksDoMergeTogether(HBasicBlock* block1, HBasicBlock* block2) {
@@ -72,61 +92,75 @@
return graph->GetIntConstant(0);
}
} else {
- // General case when 'cond' is another instruction of type boolean.
- DCHECK_EQ(cond->GetType(), Primitive::Type::kPrimBoolean);
+ // General case when 'cond' is another instruction of type boolean,
+ // as verified by SSAChecker.
return new (allocator) HBooleanNot(cond);
}
}
+void HBooleanSimplifier::TryRemovingBooleanSelection(HBasicBlock* block) {
+ DCHECK(block->EndsWithIf());
+
+ // Find elements of the pattern.
+ HIf* if_instruction = block->GetLastInstruction()->AsIf();
+ HBasicBlock* true_block = if_instruction->IfTrueSuccessor();
+ HBasicBlock* false_block = if_instruction->IfFalseSuccessor();
+ if (!BlocksDoMergeTogether(true_block, false_block)) {
+ return;
+ }
+ HBasicBlock* merge_block = true_block->GetSuccessors().Get(0);
+ if (!merge_block->HasSinglePhi()) {
+ return;
+ }
+ HPhi* phi = merge_block->GetFirstPhi()->AsPhi();
+ HInstruction* true_value = phi->InputAt(merge_block->GetPredecessorIndexOf(true_block));
+ HInstruction* false_value = phi->InputAt(merge_block->GetPredecessorIndexOf(false_block));
+
+ // Check if the selection negates/preserves the value of the condition and
+ // if so, generate a suitable replacement instruction.
+ HInstruction* if_condition = if_instruction->InputAt(0);
+ HInstruction* replacement;
+ if (NegatesCondition(true_value, false_value)) {
+ replacement = GetOppositeCondition(if_condition);
+ if (replacement->GetBlock() == nullptr) {
+ block->InsertInstructionBefore(replacement, if_instruction);
+ }
+ } else if (PreservesCondition(true_value, false_value)) {
+ replacement = if_condition;
+ } else {
+ return;
+ }
+
+ // Replace the selection outcome with the new instruction.
+ phi->ReplaceWith(replacement);
+ merge_block->RemovePhi(phi);
+
+ // Delete the true branch and merge the resulting chain of blocks
+ // 'block->false_block->merge_block' into one.
+ true_block->DisconnectAndDelete();
+ block->MergeWith(false_block);
+ block->MergeWith(merge_block);
+
+ // Remove the original condition if it is now unused.
+ if (!if_condition->HasUses()) {
+ if_condition->GetBlock()->RemoveInstructionOrPhi(if_condition);
+ }
+}
+
void HBooleanSimplifier::Run() {
// Iterate in post order in the unlikely case that removing one occurrence of
- // the pattern empties a branch block of another occurrence. Otherwise the
- // order does not matter.
+ // the selection pattern empties a branch block of another occurrence.
+ // Otherwise the order does not matter.
for (HPostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
HBasicBlock* block = it.Current();
if (!block->EndsWithIf()) continue;
- // Find elements of the pattern.
- HIf* if_instruction = block->GetLastInstruction()->AsIf();
- HBasicBlock* true_block = if_instruction->IfTrueSuccessor();
- HBasicBlock* false_block = if_instruction->IfFalseSuccessor();
- if (!BlocksDoMergeTogether(true_block, false_block)) {
- continue;
- }
- HBasicBlock* merge_block = true_block->GetSuccessors().Get(0);
- if (!merge_block->HasSinglePhi()) {
- continue;
- }
- HPhi* phi = merge_block->GetFirstPhi()->AsPhi();
- HInstruction* true_value = phi->InputAt(merge_block->GetPredecessorIndexOf(true_block));
- HInstruction* false_value = phi->InputAt(merge_block->GetPredecessorIndexOf(false_block));
+ // If condition is negated, remove the negation and swap the branches.
+ TryRemovingNegatedCondition(block);
- // Check if the selection negates/preserves the value of the condition and
- // if so, generate a suitable replacement instruction.
- HInstruction* if_condition = if_instruction->InputAt(0);
- HInstruction* replacement;
- if (NegatesCondition(true_value, false_value)) {
- replacement = GetOppositeCondition(if_condition);
- if (replacement->GetBlock() == nullptr) {
- block->InsertInstructionBefore(replacement, if_instruction);
- }
- } else if (PreservesCondition(true_value, false_value)) {
- replacement = if_condition;
- } else {
- continue;
- }
-
- // Replace the selection outcome with the new instruction.
- phi->ReplaceWith(replacement);
- merge_block->RemovePhi(phi);
-
- // Link the start/end blocks and remove empty branches.
- graph_->MergeEmptyBranches(block, merge_block);
-
- // Remove the original condition if it is now unused.
- if (!if_condition->HasUses()) {
- if_condition->GetBlock()->RemoveInstruction(if_condition);
- }
+ // If this is a boolean-selection diamond pattern, replace its result with
+ // the condition value (or its negation) and simplify the graph.
+ TryRemovingBooleanSelection(block);
}
}
diff --git a/compiler/optimizing/boolean_simplifier.h b/compiler/optimizing/boolean_simplifier.h
index a88733e..733ebaa 100644
--- a/compiler/optimizing/boolean_simplifier.h
+++ b/compiler/optimizing/boolean_simplifier.h
@@ -14,11 +14,15 @@
* limitations under the License.
*/
-// This optimization recognizes a common pattern where a boolean value is
-// either cast to an integer or negated by selecting from zero/one integer
-// constants with an If statement. Because boolean values are internally
-// represented as zero/one, we can safely replace the pattern with a suitable
-// condition instruction.
+// This optimization recognizes two common patterns:
+// (a) Boolean selection: Casting a boolean to an integer or negating it is
+// carried out with an If statement selecting from zero/one integer
+// constants. Because Boolean values are represented as zero/one, the
+// pattern can be replaced with the condition instruction itself or its
+// negation, depending on the layout.
+// (b) Negated condition: Instruction simplifier may replace an If's condition
+// with a boolean value. If this value is the result of a Boolean negation,
+// the true/false branches can be swapped and negation removed.
// Example: Negating a boolean value
// B1:
@@ -66,6 +70,9 @@
static constexpr const char* kBooleanSimplifierPassName = "boolean_simplifier";
private:
+ void TryRemovingNegatedCondition(HBasicBlock* block);
+ void TryRemovingBooleanSelection(HBasicBlock* block);
+
DISALLOW_COPY_AND_ASSIGN(HBooleanSimplifier);
};
diff --git a/compiler/optimizing/bounds_check_elimination.cc b/compiler/optimizing/bounds_check_elimination.cc
index 6511120..b2b5496 100644
--- a/compiler/optimizing/bounds_check_elimination.cc
+++ b/compiler/optimizing/bounds_check_elimination.cc
@@ -246,6 +246,148 @@
int32_t constant_;
};
+// Collect array access data for a loop.
+// TODO: make it work for multiple arrays inside the loop.
+class ArrayAccessInsideLoopFinder : public ValueObject {
+ public:
+ explicit ArrayAccessInsideLoopFinder(HInstruction* induction_variable)
+ : induction_variable_(induction_variable),
+ found_array_length_(nullptr),
+ offset_low_(INT_MAX),
+ offset_high_(INT_MIN) {
+ Run();
+ }
+
+ HArrayLength* GetFoundArrayLength() const { return found_array_length_; }
+ bool HasFoundArrayLength() const { return found_array_length_ != nullptr; }
+ int32_t GetOffsetLow() const { return offset_low_; }
+ int32_t GetOffsetHigh() const { return offset_high_; }
+
+ // Returns if `block` that is in loop_info may exit the loop, unless it's
+ // the loop header for loop_info.
+ static bool EarlyExit(HBasicBlock* block, HLoopInformation* loop_info) {
+ DCHECK(loop_info->Contains(*block));
+ if (block == loop_info->GetHeader()) {
+ // Loop header of loop_info. Exiting loop is normal.
+ return false;
+ }
+ const GrowableArray<HBasicBlock*> successors = block->GetSuccessors();
+ for (size_t i = 0; i < successors.Size(); i++) {
+ if (!loop_info->Contains(*successors.Get(i))) {
+ // One of the successors exits the loop.
+ return true;
+ }
+ }
+ return false;
+ }
+
+ static bool DominatesAllBackEdges(HBasicBlock* block, HLoopInformation* loop_info) {
+ for (size_t i = 0, e = loop_info->GetBackEdges().Size(); i < e; ++i) {
+ HBasicBlock* back_edge = loop_info->GetBackEdges().Get(i);
+ if (!block->Dominates(back_edge)) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ void Run() {
+ HLoopInformation* loop_info = induction_variable_->GetBlock()->GetLoopInformation();
+ for (HBlocksInLoopIterator it_loop(*loop_info); !it_loop.Done(); it_loop.Advance()) {
+ HBasicBlock* block = it_loop.Current();
+ DCHECK(block->IsInLoop());
+ if (!DominatesAllBackEdges(block, loop_info)) {
+ // In order not to trigger deoptimization unnecessarily, make sure
+ // that all array accesses collected are really executed in the loop.
+ // For array accesses in a branch inside the loop, don't collect the
+ // access. The bounds check in that branch might not be eliminated.
+ continue;
+ }
+ if (EarlyExit(block, loop_info)) {
+ // If the loop body can exit loop (like break, return, etc.), it's not guaranteed
+ // that the loop will loop through the full monotonic value range from
+ // initial_ to end_. So adding deoptimization might be too aggressive and can
+ // trigger deoptimization unnecessarily even if the loop won't actually throw
+ // AIOOBE. Otherwise, the loop induction variable is going to cover the full
+ // monotonic value range from initial_ to end_, and deoptimizations are added
+ // iff the loop will throw AIOOBE.
+ found_array_length_ = nullptr;
+ return;
+ }
+ for (HInstruction* instruction = block->GetFirstInstruction();
+ instruction != nullptr;
+ instruction = instruction->GetNext()) {
+ if (!instruction->IsArrayGet() && !instruction->IsArraySet()) {
+ continue;
+ }
+ HInstruction* index = instruction->InputAt(1);
+ if (!index->IsBoundsCheck()) {
+ continue;
+ }
+
+ HArrayLength* array_length = index->InputAt(1)->AsArrayLength();
+ if (array_length == nullptr) {
+ DCHECK(index->InputAt(1)->IsIntConstant());
+ // TODO: may optimize for constant case.
+ continue;
+ }
+
+ HInstruction* array = array_length->InputAt(0);
+ if (array->IsNullCheck()) {
+ array = array->AsNullCheck()->InputAt(0);
+ }
+ if (loop_info->Contains(*array->GetBlock())) {
+ // Array is defined inside the loop. Skip.
+ continue;
+ }
+
+ if (found_array_length_ != nullptr && found_array_length_ != array_length) {
+ // There is already access for another array recorded for the loop.
+ // TODO: handle multiple arrays.
+ continue;
+ }
+
+ index = index->AsBoundsCheck()->InputAt(0);
+ HInstruction* left = index;
+ int32_t right = 0;
+ if (left == induction_variable_ ||
+ (ValueBound::IsAddOrSubAConstant(index, &left, &right) &&
+ left == induction_variable_)) {
+ // For patterns like array[i] or array[i + 2].
+ if (right < offset_low_) {
+ offset_low_ = right;
+ }
+ if (right > offset_high_) {
+ offset_high_ = right;
+ }
+ } else {
+ // Access not in induction_variable/(induction_variable_ + constant)
+ // format. Skip.
+ continue;
+ }
+ // Record this array.
+ found_array_length_ = array_length;
+ }
+ }
+ }
+
+ private:
+ // The instruction that corresponds to a MonotonicValueRange.
+ HInstruction* induction_variable_;
+
+ // The array length of the array that's accessed inside the loop.
+ HArrayLength* found_array_length_;
+
+ // The lowest and highest constant offsets relative to induction variable
+ // instruction_ in all array accesses.
+ // If array access are: array[i-1], array[i], array[i+1],
+ // offset_low_ is -1 and offset_high is 1.
+ int32_t offset_low_;
+ int32_t offset_high_;
+
+ DISALLOW_COPY_AND_ASSIGN(ArrayAccessInsideLoopFinder);
+};
+
/**
* Represent a range of lower bound and upper bound, both being inclusive.
* Currently a ValueRange may be generated as a result of the following:
@@ -332,21 +474,31 @@
class MonotonicValueRange : public ValueRange {
public:
MonotonicValueRange(ArenaAllocator* allocator,
+ HPhi* induction_variable,
HInstruction* initial,
int32_t increment,
ValueBound bound)
// To be conservative, give it full range [INT_MIN, INT_MAX] in case it's
// used as a regular value range, due to possible overflow/underflow.
: ValueRange(allocator, ValueBound::Min(), ValueBound::Max()),
+ induction_variable_(induction_variable),
initial_(initial),
+ end_(nullptr),
+ inclusive_(false),
increment_(increment),
bound_(bound) {}
virtual ~MonotonicValueRange() {}
+ HInstruction* GetInductionVariable() const { return induction_variable_; }
int32_t GetIncrement() const { return increment_; }
-
ValueBound GetBound() const { return bound_; }
+ void SetEnd(HInstruction* end) { end_ = end; }
+ void SetInclusive(bool inclusive) { inclusive_ = inclusive; }
+ HBasicBlock* GetLoopHead() const {
+ DCHECK(induction_variable_->GetBlock()->IsLoopHeader());
+ return induction_variable_->GetBlock();
+ }
MonotonicValueRange* AsMonotonicValueRange() OVERRIDE { return this; }
@@ -371,6 +523,10 @@
if (increment_ > 0) {
// Monotonically increasing.
ValueBound lower = ValueBound::NarrowLowerBound(bound_, range->GetLower());
+ if (!lower.IsConstant() || lower.GetConstant() == INT_MIN) {
+ // Lower bound isn't useful. Leave it to deoptimization.
+ return this;
+ }
// We currently conservatively assume max array length is INT_MAX. If we can
// make assumptions about the max array length, e.g. due to the max heap size,
@@ -417,6 +573,11 @@
DCHECK_NE(increment_, 0);
// Monotonically decreasing.
ValueBound upper = ValueBound::NarrowUpperBound(bound_, range->GetUpper());
+ if ((!upper.IsConstant() || upper.GetConstant() == INT_MAX) &&
+ !upper.IsRelatedToArrayLength()) {
+ // Upper bound isn't useful. Leave it to deoptimization.
+ return this;
+ }
// Need to take care of underflow. Try to prove underflow won't happen
// for common cases.
@@ -432,10 +593,217 @@
}
}
+ // Returns true if adding a (constant >= value) check for deoptimization
+ // is allowed and will benefit compiled code.
+ bool CanAddDeoptimizationConstant(HInstruction* value,
+ int32_t constant,
+ bool* is_proven) {
+ *is_proven = false;
+ // See if we can prove the relationship first.
+ if (value->IsIntConstant()) {
+ if (value->AsIntConstant()->GetValue() >= constant) {
+ // Already true.
+ *is_proven = true;
+ return true;
+ } else {
+ // May throw exception. Don't add deoptimization.
+ // Keep bounds checks in the loops.
+ return false;
+ }
+ }
+ // Can benefit from deoptimization.
+ return true;
+ }
+
+ // Adds a check that (value >= constant), and HDeoptimize otherwise.
+ void AddDeoptimizationConstant(HInstruction* value,
+ int32_t constant) {
+ HBasicBlock* block = induction_variable_->GetBlock();
+ DCHECK(block->IsLoopHeader());
+ HGraph* graph = block->GetGraph();
+ HBasicBlock* pre_header = block->GetLoopInformation()->GetPreHeader();
+ HSuspendCheck* suspend_check = block->GetLoopInformation()->GetSuspendCheck();
+ HIntConstant* const_instr = graph->GetIntConstant(constant);
+ HCondition* cond = new (graph->GetArena()) HLessThan(value, const_instr);
+ HDeoptimize* deoptimize = new (graph->GetArena())
+ HDeoptimize(cond, suspend_check->GetDexPc());
+ pre_header->InsertInstructionBefore(cond, pre_header->GetLastInstruction());
+ pre_header->InsertInstructionBefore(deoptimize, pre_header->GetLastInstruction());
+ deoptimize->CopyEnvironmentFromWithLoopPhiAdjustment(
+ suspend_check->GetEnvironment(), block);
+ }
+
+ // Returns true if adding a (value <= array_length + offset) check for deoptimization
+ // is allowed and will benefit compiled code.
+ bool CanAddDeoptimizationArrayLength(HInstruction* value,
+ HArrayLength* array_length,
+ int32_t offset,
+ bool* is_proven) {
+ *is_proven = false;
+ if (offset > 0) {
+ // There might be overflow issue.
+ // TODO: handle this, possibly with some distance relationship between
+ // offset_low and offset_high, or using another deoptimization to make
+ // sure (array_length + offset) doesn't overflow.
+ return false;
+ }
+
+ // See if we can prove the relationship first.
+ if (value == array_length) {
+ if (offset >= 0) {
+ // Already true.
+ *is_proven = true;
+ return true;
+ } else {
+ // May throw exception. Don't add deoptimization.
+ // Keep bounds checks in the loops.
+ return false;
+ }
+ }
+ // Can benefit from deoptimization.
+ return true;
+ }
+
+ // Adds a check that (value <= array_length + offset), and HDeoptimize otherwise.
+ void AddDeoptimizationArrayLength(HInstruction* value,
+ HArrayLength* array_length,
+ int32_t offset) {
+ HBasicBlock* block = induction_variable_->GetBlock();
+ DCHECK(block->IsLoopHeader());
+ HGraph* graph = block->GetGraph();
+ HBasicBlock* pre_header = block->GetLoopInformation()->GetPreHeader();
+ HSuspendCheck* suspend_check = block->GetLoopInformation()->GetSuspendCheck();
+
+ // We may need to hoist null-check and array_length out of loop first.
+ if (!array_length->GetBlock()->Dominates(pre_header)) {
+ HInstruction* array = array_length->InputAt(0);
+ HNullCheck* null_check = array->AsNullCheck();
+ if (null_check != nullptr) {
+ array = null_check->InputAt(0);
+ }
+ // We've already made sure array is defined before the loop when collecting
+ // array accesses for the loop.
+ DCHECK(array->GetBlock()->Dominates(pre_header));
+ if (null_check != nullptr && !null_check->GetBlock()->Dominates(pre_header)) {
+ // Hoist null check out of loop with a deoptimization.
+ HNullConstant* null_constant = graph->GetNullConstant();
+ HCondition* null_check_cond = new (graph->GetArena()) HEqual(array, null_constant);
+ // TODO: for one dex_pc, share the same deoptimization slow path.
+ HDeoptimize* null_check_deoptimize = new (graph->GetArena())
+ HDeoptimize(null_check_cond, suspend_check->GetDexPc());
+ pre_header->InsertInstructionBefore(null_check_cond, pre_header->GetLastInstruction());
+ pre_header->InsertInstructionBefore(
+ null_check_deoptimize, pre_header->GetLastInstruction());
+ // Eliminate null check in the loop.
+ null_check->ReplaceWith(array);
+ null_check->GetBlock()->RemoveInstruction(null_check);
+ null_check_deoptimize->CopyEnvironmentFromWithLoopPhiAdjustment(
+ suspend_check->GetEnvironment(), block);
+ }
+ // Hoist array_length out of loop.
+ array_length->MoveBefore(pre_header->GetLastInstruction());
+ }
+
+ HIntConstant* offset_instr = graph->GetIntConstant(offset);
+ HAdd* add = new (graph->GetArena()) HAdd(Primitive::kPrimInt, array_length, offset_instr);
+ HCondition* cond = new (graph->GetArena()) HGreaterThan(value, add);
+ HDeoptimize* deoptimize = new (graph->GetArena())
+ HDeoptimize(cond, suspend_check->GetDexPc());
+ pre_header->InsertInstructionBefore(add, pre_header->GetLastInstruction());
+ pre_header->InsertInstructionBefore(cond, pre_header->GetLastInstruction());
+ pre_header->InsertInstructionBefore(deoptimize, pre_header->GetLastInstruction());
+ deoptimize->CopyEnvironmentFromWithLoopPhiAdjustment(
+ suspend_check->GetEnvironment(), block);
+ }
+
+ // Add deoptimizations in loop pre-header with the collected array access
+ // data so that value ranges can be established in loop body.
+ // Returns true if deoptimizations are successfully added, or if it's proven
+ // it's not necessary.
+ bool AddDeoptimization(const ArrayAccessInsideLoopFinder& finder) {
+ int32_t offset_low = finder.GetOffsetLow();
+ int32_t offset_high = finder.GetOffsetHigh();
+ HArrayLength* array_length = finder.GetFoundArrayLength();
+
+ HBasicBlock* pre_header =
+ induction_variable_->GetBlock()->GetLoopInformation()->GetPreHeader();
+ if (!initial_->GetBlock()->Dominates(pre_header) ||
+ !end_->GetBlock()->Dominates(pre_header)) {
+ // Can't move initial_ or end_ into pre_header for comparisons.
+ return false;
+ }
+
+ bool is_constant_proven, is_length_proven;
+ if (increment_ == 1) {
+ // Increasing from initial_ to end_.
+ int32_t offset = inclusive_ ? -offset_high - 1 : -offset_high;
+ if (CanAddDeoptimizationConstant(initial_, -offset_low, &is_constant_proven) &&
+ CanAddDeoptimizationArrayLength(end_, array_length, offset, &is_length_proven)) {
+ if (!is_constant_proven) {
+ AddDeoptimizationConstant(initial_, -offset_low);
+ }
+ if (!is_length_proven) {
+ AddDeoptimizationArrayLength(end_, array_length, offset);
+ }
+ return true;
+ }
+ } else if (increment_ == -1) {
+ // Decreasing from initial_ to end_.
+ int32_t constant = inclusive_ ? -offset_low : -offset_low - 1;
+ if (CanAddDeoptimizationConstant(end_, constant, &is_constant_proven) &&
+ CanAddDeoptimizationArrayLength(
+ initial_, array_length, -offset_high - 1, &is_length_proven)) {
+ if (!is_constant_proven) {
+ AddDeoptimizationConstant(end_, constant);
+ }
+ if (!is_length_proven) {
+ AddDeoptimizationArrayLength(initial_, array_length, -offset_high - 1);
+ }
+ return true;
+ }
+ }
+ return false;
+ }
+
+ // Try to add HDeoptimize's in the loop pre-header first to narrow this range.
+ ValueRange* NarrowWithDeoptimization() {
+ if (increment_ != 1 && increment_ != -1) {
+ // TODO: possibly handle overflow/underflow issues with deoptimization.
+ return this;
+ }
+
+ if (end_ == nullptr) {
+ // No full info to add deoptimization.
+ return this;
+ }
+
+ ArrayAccessInsideLoopFinder finder(induction_variable_);
+
+ if (!finder.HasFoundArrayLength()) {
+ // No array access was found inside the loop that can benefit
+ // from deoptimization.
+ return this;
+ }
+
+ if (!AddDeoptimization(finder)) {
+ return this;
+ }
+
+ // After added deoptimizations, induction variable fits in
+ // [-offset_low, array.length-1-offset_high], adjusted with collected offsets.
+ ValueBound lower = ValueBound(0, -finder.GetOffsetLow());
+ ValueBound upper = ValueBound(finder.GetFoundArrayLength(), -1 - finder.GetOffsetHigh());
+ // We've narrowed the range after added deoptimizations.
+ return new (GetAllocator()) ValueRange(GetAllocator(), lower, upper);
+ }
+
private:
- HInstruction* const initial_;
- const int32_t increment_;
- ValueBound bound_; // Additional value bound info for initial_;
+ HPhi* const induction_variable_; // Induction variable for this monotonic value range.
+ HInstruction* const initial_; // Initial value.
+ HInstruction* end_; // End value.
+ bool inclusive_; // Whether end value is inclusive.
+ const int32_t increment_; // Increment for each loop iteration.
+ const ValueBound bound_; // Additional value bound info for initial_.
DISALLOW_COPY_AND_ASSIGN(MonotonicValueRange);
};
@@ -598,6 +966,20 @@
// There should be no critical edge at this point.
DCHECK_EQ(false_successor->GetPredecessors().Size(), 1u);
+ ValueRange* left_range = LookupValueRange(left, block);
+ MonotonicValueRange* left_monotonic_range = nullptr;
+ if (left_range != nullptr) {
+ left_monotonic_range = left_range->AsMonotonicValueRange();
+ if (left_monotonic_range != nullptr) {
+ HBasicBlock* loop_head = left_monotonic_range->GetLoopHead();
+ if (instruction->GetBlock() != loop_head) {
+ // For monotonic value range, don't handle `instruction`
+ // if it's not defined in the loop header.
+ return;
+ }
+ }
+ }
+
bool found;
ValueBound bound = ValueBound::DetectValueBoundFromValue(right, &found);
// Each comparison can establish a lower bound and an upper bound
@@ -610,7 +992,6 @@
ValueRange* right_range = LookupValueRange(right, block);
if (right_range != nullptr) {
if (right_range->IsMonotonicValueRange()) {
- ValueRange* left_range = LookupValueRange(left, block);
if (left_range != nullptr && left_range->IsMonotonicValueRange()) {
HandleIfBetweenTwoMonotonicValueRanges(instruction, left, right, cond,
left_range->AsMonotonicValueRange(),
@@ -628,6 +1009,17 @@
bool overflow, underflow;
if (cond == kCondLT || cond == kCondLE) {
+ if (left_monotonic_range != nullptr) {
+ // Update the info for monotonic value range.
+ if (left_monotonic_range->GetInductionVariable() == left &&
+ left_monotonic_range->GetIncrement() < 0 &&
+ block == left_monotonic_range->GetLoopHead() &&
+ instruction->IfFalseSuccessor()->GetLoopInformation() == block->GetLoopInformation()) {
+ left_monotonic_range->SetEnd(right);
+ left_monotonic_range->SetInclusive(cond == kCondLT);
+ }
+ }
+
if (!upper.Equals(ValueBound::Max())) {
int32_t compensation = (cond == kCondLT) ? -1 : 0; // upper bound is inclusive
ValueBound new_upper = upper.Add(compensation, &overflow, &underflow);
@@ -651,6 +1043,17 @@
ApplyRangeFromComparison(left, block, false_successor, new_range);
}
} else if (cond == kCondGT || cond == kCondGE) {
+ if (left_monotonic_range != nullptr) {
+ // Update the info for monotonic value range.
+ if (left_monotonic_range->GetInductionVariable() == left &&
+ left_monotonic_range->GetIncrement() > 0 &&
+ block == left_monotonic_range->GetLoopHead() &&
+ instruction->IfFalseSuccessor()->GetLoopInformation() == block->GetLoopInformation()) {
+ left_monotonic_range->SetEnd(right);
+ left_monotonic_range->SetInclusive(cond == kCondGT);
+ }
+ }
+
// array.length as a lower bound isn't considered useful.
if (!lower.Equals(ValueBound::Min()) && !lower.IsRelatedToArrayLength()) {
int32_t compensation = (cond == kCondGT) ? 1 : 0; // lower bound is inclusive
@@ -755,9 +1158,26 @@
bounds_check->GetBlock()->RemoveInstruction(bounds_check);
}
+ static bool HasSameInputAtBackEdges(HPhi* phi) {
+ DCHECK(phi->IsLoopHeaderPhi());
+ // Start with input 1. Input 0 is from the incoming block.
+ HInstruction* input1 = phi->InputAt(1);
+ DCHECK(phi->GetBlock()->GetLoopInformation()->IsBackEdge(
+ *phi->GetBlock()->GetPredecessors().Get(1)));
+ for (size_t i = 2, e = phi->InputCount(); i < e; ++i) {
+ DCHECK(phi->GetBlock()->GetLoopInformation()->IsBackEdge(
+ *phi->GetBlock()->GetPredecessors().Get(i)));
+ if (input1 != phi->InputAt(i)) {
+ return false;
+ }
+ }
+ return true;
+ }
+
void VisitPhi(HPhi* phi) {
- if (phi->IsLoopHeaderPhi() && phi->GetType() == Primitive::kPrimInt) {
- DCHECK_EQ(phi->InputCount(), 2U);
+ if (phi->IsLoopHeaderPhi()
+ && (phi->GetType() == Primitive::kPrimInt)
+ && HasSameInputAtBackEdges(phi)) {
HInstruction* instruction = phi->InputAt(1);
HInstruction *left;
int32_t increment;
@@ -790,6 +1210,7 @@
}
range = new (GetGraph()->GetArena()) MonotonicValueRange(
GetGraph()->GetArena(),
+ phi,
initial_value,
increment,
bound);
@@ -809,6 +1230,36 @@
HInstruction* left = cond->GetLeft();
HInstruction* right = cond->GetRight();
HandleIf(instruction, left, right, cmp);
+
+ HBasicBlock* block = instruction->GetBlock();
+ ValueRange* left_range = LookupValueRange(left, block);
+ if (left_range == nullptr) {
+ return;
+ }
+
+ if (left_range->IsMonotonicValueRange() &&
+ block == left_range->AsMonotonicValueRange()->GetLoopHead()) {
+ // The comparison is for an induction variable in the loop header.
+ DCHECK(left == left_range->AsMonotonicValueRange()->GetInductionVariable());
+ HBasicBlock* loop_body_successor;
+ if (LIKELY(block->GetLoopInformation()->
+ Contains(*instruction->IfFalseSuccessor()))) {
+ loop_body_successor = instruction->IfFalseSuccessor();
+ } else {
+ loop_body_successor = instruction->IfTrueSuccessor();
+ }
+ ValueRange* new_left_range = LookupValueRange(left, loop_body_successor);
+ if (new_left_range == left_range) {
+ // We are not successful in narrowing the monotonic value range to
+ // a regular value range. Try using deoptimization.
+ new_left_range = left_range->AsMonotonicValueRange()->
+ NarrowWithDeoptimization();
+ if (new_left_range != left_range) {
+ GetValueRangeMap(instruction->IfFalseSuccessor())->
+ Overwrite(left->GetId(), new_left_range);
+ }
+ }
+ }
}
}
}
@@ -1064,7 +1515,7 @@
};
void BoundsCheckElimination::Run() {
- if (!graph_->HasArrayAccesses()) {
+ if (!graph_->HasBoundsChecks()) {
return;
}
diff --git a/compiler/optimizing/bounds_check_elimination_test.cc b/compiler/optimizing/bounds_check_elimination_test.cc
index 75cf1cf..163458f 100644
--- a/compiler/optimizing/bounds_check_elimination_test.cc
+++ b/compiler/optimizing/bounds_check_elimination_test.cc
@@ -42,8 +42,8 @@
ArenaPool pool;
ArenaAllocator allocator(&pool);
- HGraph* graph = new (&allocator) HGraph(&allocator);
- graph->SetHasArrayAccesses(true);
+ HGraph* graph = CreateGraph(&allocator);
+ graph->SetHasBoundsChecks(true);
HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
graph->AddBlock(entry);
@@ -147,8 +147,8 @@
ArenaPool pool;
ArenaAllocator allocator(&pool);
- HGraph* graph = new (&allocator) HGraph(&allocator);
- graph->SetHasArrayAccesses(true);
+ HGraph* graph = CreateGraph(&allocator);
+ graph->SetHasBoundsChecks(true);
HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
graph->AddBlock(entry);
@@ -219,8 +219,8 @@
ArenaPool pool;
ArenaAllocator allocator(&pool);
- HGraph* graph = new (&allocator) HGraph(&allocator);
- graph->SetHasArrayAccesses(true);
+ HGraph* graph = CreateGraph(&allocator);
+ graph->SetHasBoundsChecks(true);
HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
graph->AddBlock(entry);
@@ -291,8 +291,8 @@
ArenaPool pool;
ArenaAllocator allocator(&pool);
- HGraph* graph = new (&allocator) HGraph(&allocator);
- graph->SetHasArrayAccesses(true);
+ HGraph* graph = CreateGraph(&allocator);
+ graph->SetHasBoundsChecks(true);
HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
graph->AddBlock(entry);
@@ -364,8 +364,8 @@
int initial,
int increment,
IfCondition cond = kCondGE) {
- HGraph* graph = new (allocator) HGraph(allocator);
- graph->SetHasArrayAccesses(true);
+ HGraph* graph = CreateGraph(allocator);
+ graph->SetHasBoundsChecks(true);
HBasicBlock* entry = new (allocator) HBasicBlock(graph);
graph->AddBlock(entry);
@@ -501,8 +501,8 @@
int initial,
int increment = -1,
IfCondition cond = kCondLE) {
- HGraph* graph = new (allocator) HGraph(allocator);
- graph->SetHasArrayAccesses(true);
+ HGraph* graph = CreateGraph(allocator);
+ graph->SetHasBoundsChecks(true);
HBasicBlock* entry = new (allocator) HBasicBlock(graph);
graph->AddBlock(entry);
@@ -632,8 +632,8 @@
int initial,
int increment,
IfCondition cond) {
- HGraph* graph = new (allocator) HGraph(allocator);
- graph->SetHasArrayAccesses(true);
+ HGraph* graph = CreateGraph(allocator);
+ graph->SetHasBoundsChecks(true);
HBasicBlock* entry = new (allocator) HBasicBlock(graph);
graph->AddBlock(entry);
@@ -743,8 +743,8 @@
HInstruction** bounds_check,
int initial,
IfCondition cond = kCondGE) {
- HGraph* graph = new (allocator) HGraph(allocator);
- graph->SetHasArrayAccesses(true);
+ HGraph* graph = CreateGraph(allocator);
+ graph->SetHasBoundsChecks(true);
HBasicBlock* entry = new (allocator) HBasicBlock(graph);
graph->AddBlock(entry);
@@ -868,8 +868,8 @@
ArenaPool pool;
ArenaAllocator allocator(&pool);
- HGraph* graph = new (&allocator) HGraph(&allocator);
- graph->SetHasArrayAccesses(true);
+ HGraph* graph = CreateGraph(&allocator);
+ graph->SetHasBoundsChecks(true);
HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
graph->AddBlock(entry);
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index 818d671..c4eaabf 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -19,8 +19,10 @@
#include "art_field-inl.h"
#include "base/logging.h"
#include "class_linker.h"
+#include "dex/verified_method.h"
#include "dex_file-inl.h"
#include "dex_instruction-inl.h"
+#include "dex/verified_method.h"
#include "driver/compiler_driver-inl.h"
#include "driver/compiler_options.h"
#include "mirror/class_loader.h"
@@ -280,7 +282,10 @@
// To avoid splitting blocks, we compute ahead of time the instructions that
// start a new block, and create these blocks.
- ComputeBranchTargets(code_ptr, code_end, &number_of_branches);
+ if (!ComputeBranchTargets(code_ptr, code_end, &number_of_branches)) {
+ MaybeRecordStat(MethodCompilationStat::kNotCompiledBranchOutsideMethodCode);
+ return false;
+ }
// Note that the compiler driver is null when unit testing.
if ((compiler_driver_ != nullptr) && SkipCompilation(code_item, number_of_branches)) {
@@ -347,7 +352,7 @@
current_block_ = block;
}
-void HGraphBuilder::ComputeBranchTargets(const uint16_t* code_ptr,
+bool HGraphBuilder::ComputeBranchTargets(const uint16_t* code_ptr,
const uint16_t* code_end,
size_t* number_of_branches) {
branch_targets_.SetSize(code_end - code_ptr);
@@ -372,7 +377,14 @@
}
dex_pc += instruction.SizeInCodeUnits();
code_ptr += instruction.SizeInCodeUnits();
- if ((code_ptr < code_end) && (FindBlockStartingAt(dex_pc) == nullptr)) {
+
+ if (code_ptr >= code_end) {
+ if (instruction.CanFlowThrough()) {
+ // In the normal case we should never hit this but someone can artificially forge a dex
+ // file to fall-through out the method code. In this case we bail out compilation.
+ return false;
+ }
+ } else if (FindBlockStartingAt(dex_pc) == nullptr) {
block = new (arena_) HBasicBlock(graph_, dex_pc);
branch_targets_.Put(dex_pc, block);
}
@@ -404,7 +416,12 @@
// Fall-through. Add a block if there is more code afterwards.
dex_pc += instruction.SizeInCodeUnits();
code_ptr += instruction.SizeInCodeUnits();
- if ((code_ptr < code_end) && (FindBlockStartingAt(dex_pc) == nullptr)) {
+ if (code_ptr >= code_end) {
+ // In the normal case we should never hit this but someone can artificially forge a dex
+ // file to fall-through out the method code. In this case we bail out compilation.
+ // (A switch can fall-through so we don't need to check CanFlowThrough().)
+ return false;
+ } else if (FindBlockStartingAt(dex_pc) == nullptr) {
block = new (arena_) HBasicBlock(graph_, dex_pc);
branch_targets_.Put(dex_pc, block);
}
@@ -413,6 +430,7 @@
dex_pc += instruction.SizeInCodeUnits();
}
}
+ return true;
}
HBasicBlock* HGraphBuilder::FindBlockStartingAt(int32_t index) const {
@@ -521,11 +539,6 @@
}
static bool RequiresConstructorBarrier(const DexCompilationUnit* cu, const CompilerDriver& driver) {
- // dex compilation unit is null only when unit testing.
- if (cu == nullptr) {
- return false;
- }
-
Thread* self = Thread::Current();
return cu->IsConstructor()
&& driver.RequiresConstructorBarrier(self, cu->GetDexFile(), cu->GetClassDefIndex());
@@ -533,9 +546,12 @@
void HGraphBuilder::BuildReturn(const Instruction& instruction, Primitive::Type type) {
if (type == Primitive::kPrimVoid) {
- // Note that we might insert redundant barriers when inlining `super` calls.
- // TODO: add a data flow analysis to get rid of duplicate barriers.
- if (RequiresConstructorBarrier(dex_compilation_unit_, *compiler_driver_)) {
+ if (graph_->ShouldGenerateConstructorBarrier()) {
+ // The compilation unit is null during testing.
+ if (dex_compilation_unit_ != nullptr) {
+ DCHECK(RequiresConstructorBarrier(dex_compilation_unit_, *compiler_driver_))
+ << "Inconsistent use of ShouldGenerateConstructorBarrier. Should not generate a barrier.";
+ }
current_block_->AddInstruction(new (arena_) HMemoryBarrier(kStoreStore));
}
current_block_->AddInstruction(new (arena_) HReturnVoid());
@@ -587,7 +603,7 @@
const char* descriptor = dex_file_->StringDataByIdx(proto_id.shorty_idx_);
Primitive::Type return_type = Primitive::GetType(descriptor[0]);
bool is_instance_call = invoke_type != kStatic;
- const size_t number_of_arguments = strlen(descriptor) - (is_instance_call ? 0 : 1);
+ size_t number_of_arguments = strlen(descriptor) - (is_instance_call ? 0 : 1);
MethodReference target_method(dex_file_, method_idx);
uintptr_t direct_code;
@@ -605,7 +621,25 @@
}
DCHECK(optimized_invoke_type != kSuper);
+ // By default, consider that the called method implicitly requires
+ // an initialization check of its declaring method.
+ HInvokeStaticOrDirect::ClinitCheckRequirement clinit_check_requirement =
+ HInvokeStaticOrDirect::ClinitCheckRequirement::kImplicit;
+ // Potential class initialization check, in the case of a static method call.
+ HClinitCheck* clinit_check = nullptr;
+ // Replace calls to String.<init> with StringFactory.
+ int32_t string_init_offset = 0;
+ bool is_string_init = compiler_driver_->IsStringInit(method_idx, dex_file_, &string_init_offset);
+ if (is_string_init) {
+ return_type = Primitive::kPrimNot;
+ is_instance_call = false;
+ number_of_arguments--;
+ invoke_type = kStatic;
+ optimized_invoke_type = kStatic;
+ }
+
HInvoke* invoke = nullptr;
+
if (optimized_invoke_type == kVirtual) {
invoke = new (arena_) HInvokeVirtual(
arena_, number_of_arguments, return_type, dex_pc, method_idx, table_index);
@@ -620,9 +654,76 @@
bool is_recursive =
(target_method.dex_method_index == dex_compilation_unit_->GetDexMethodIndex());
DCHECK(!is_recursive || (target_method.dex_file == dex_compilation_unit_->GetDexFile()));
+
+ if (optimized_invoke_type == kStatic) {
+ ScopedObjectAccess soa(Thread::Current());
+ StackHandleScope<4> hs(soa.Self());
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(
+ dex_compilation_unit_->GetClassLinker()->FindDexCache(
+ *dex_compilation_unit_->GetDexFile())));
+ Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
+ soa.Decode<mirror::ClassLoader*>(dex_compilation_unit_->GetClassLoader())));
+ mirror::ArtMethod* resolved_method = compiler_driver_->ResolveMethod(
+ soa, dex_cache, class_loader, dex_compilation_unit_, method_idx,
+ optimized_invoke_type);
+
+ if (resolved_method == nullptr) {
+ MaybeRecordStat(MethodCompilationStat::kNotCompiledUnresolvedMethod);
+ return false;
+ }
+
+ const DexFile& outer_dex_file = *outer_compilation_unit_->GetDexFile();
+ Handle<mirror::DexCache> outer_dex_cache(hs.NewHandle(
+ outer_compilation_unit_->GetClassLinker()->FindDexCache(outer_dex_file)));
+ Handle<mirror::Class> referrer_class(hs.NewHandle(GetOutermostCompilingClass()));
+
+ // The index at which the method's class is stored in the DexCache's type array.
+ uint32_t storage_index = DexFile::kDexNoIndex;
+ bool is_referrer_class = (resolved_method->GetDeclaringClass() == referrer_class.Get());
+ if (is_referrer_class) {
+ storage_index = referrer_class->GetDexTypeIndex();
+ } else if (outer_dex_cache.Get() == dex_cache.Get()) {
+ // Get `storage_index` from IsClassOfStaticMethodAvailableToReferrer.
+ compiler_driver_->IsClassOfStaticMethodAvailableToReferrer(outer_dex_cache.Get(),
+ referrer_class.Get(),
+ resolved_method,
+ method_idx,
+ &storage_index);
+ }
+
+ if (referrer_class.Get()->IsSubClass(resolved_method->GetDeclaringClass())) {
+ // If the referrer class is the declaring class or a subclass
+ // of the declaring class, no class initialization is needed
+ // before the static method call.
+ clinit_check_requirement = HInvokeStaticOrDirect::ClinitCheckRequirement::kNone;
+ } else if (storage_index != DexFile::kDexNoIndex) {
+ // If the method's class type index is available, check
+ // whether we should add an explicit class initialization
+ // check for its declaring class before the static method call.
+
+ // TODO: find out why this check is needed.
+ bool is_in_dex_cache = compiler_driver_->CanAssumeTypeIsPresentInDexCache(
+ *outer_compilation_unit_->GetDexFile(), storage_index);
+ bool is_initialized =
+ resolved_method->GetDeclaringClass()->IsInitialized() && is_in_dex_cache;
+
+ if (is_initialized) {
+ clinit_check_requirement = HInvokeStaticOrDirect::ClinitCheckRequirement::kNone;
+ } else {
+ clinit_check_requirement = HInvokeStaticOrDirect::ClinitCheckRequirement::kExplicit;
+ HLoadClass* load_class =
+ new (arena_) HLoadClass(storage_index, is_referrer_class, dex_pc);
+ current_block_->AddInstruction(load_class);
+ clinit_check = new (arena_) HClinitCheck(load_class, dex_pc);
+ current_block_->AddInstruction(clinit_check);
+ }
+ }
+ }
+
invoke = new (arena_) HInvokeStaticOrDirect(
arena_, number_of_arguments, return_type, dex_pc, target_method.dex_method_index,
- is_recursive, invoke_type, optimized_invoke_type);
+ is_recursive, string_init_offset, invoke_type, optimized_invoke_type,
+ clinit_check_requirement);
}
size_t start_index = 0;
@@ -638,6 +739,9 @@
uint32_t descriptor_index = 1;
uint32_t argument_index = start_index;
+ if (is_string_init) {
+ start_index = 1;
+ }
for (size_t i = start_index; i < number_of_vreg_arguments; i++, argument_index++) {
Primitive::Type type = Primitive::GetType(descriptor[descriptor_index++]);
bool is_wide = (type == Primitive::kPrimLong) || (type == Primitive::kPrimDouble);
@@ -654,10 +758,38 @@
i++;
}
}
-
DCHECK_EQ(argument_index, number_of_arguments);
+
+ if (clinit_check_requirement == HInvokeStaticOrDirect::ClinitCheckRequirement::kExplicit) {
+ // Add the class initialization check as last input of `invoke`.
+ DCHECK(clinit_check != nullptr);
+ invoke->SetArgumentAt(argument_index, clinit_check);
+ }
+
current_block_->AddInstruction(invoke);
latest_result_ = invoke;
+
+ // Add move-result for StringFactory method.
+ if (is_string_init) {
+ uint32_t orig_this_reg = is_range ? register_index : args[0];
+ const VerifiedMethod* verified_method =
+ compiler_driver_->GetVerifiedMethod(dex_file_, dex_compilation_unit_->GetDexMethodIndex());
+ if (verified_method == nullptr) {
+ LOG(WARNING) << "No verified method for method calling String.<init>: "
+ << PrettyMethod(dex_compilation_unit_->GetDexMethodIndex(), *dex_file_);
+ return false;
+ }
+ const SafeMap<uint32_t, std::set<uint32_t>>& string_init_map =
+ verified_method->GetStringInitPcRegMap();
+ auto map_it = string_init_map.find(dex_pc);
+ if (map_it != string_init_map.end()) {
+ std::set<uint32_t> reg_set = map_it->second;
+ for (auto set_it = reg_set.begin(); set_it != reg_set.end(); ++set_it) {
+ UpdateLocal(*set_it, invoke);
+ }
+ }
+ UpdateLocal(orig_this_reg, invoke);
+ }
return true;
}
@@ -732,7 +864,6 @@
return compiling_class.Get() == cls.Get();
}
-
bool HGraphBuilder::BuildStaticFieldAccess(const Instruction& instruction,
uint32_t dex_pc,
bool is_put) {
@@ -764,7 +895,7 @@
if (is_referrer_class) {
storage_index = referrer_class->GetDexTypeIndex();
} else if (outer_dex_cache.Get() != dex_cache.Get()) {
- // The compiler driver cannot currently understand multple dex caches involved. Just bailout.
+ // The compiler driver cannot currently understand multiple dex caches involved. Just bailout.
return false;
} else {
std::pair<bool, bool> pair = compiler_driver_->IsFastStaticField(
@@ -882,7 +1013,7 @@
current_block_->AddInstruction(new (arena_) HArrayGet(object, index, anticipated_type));
UpdateLocal(source_or_dest_reg, current_block_->GetLastInstruction());
}
- graph_->SetHasArrayAccesses(true);
+ graph_->SetHasBoundsChecks(true);
}
void HGraphBuilder::BuildFilledNewArray(uint32_t dex_pc,
@@ -984,6 +1115,7 @@
default:
LOG(FATAL) << "Unknown element width for " << payload->element_width;
}
+ graph_->SetHasBoundsChecks(true);
}
void HGraphBuilder::BuildFillWideArrayData(HInstruction* object,
@@ -1834,12 +1966,19 @@
case Instruction::NEW_INSTANCE: {
uint16_t type_index = instruction.VRegB_21c();
- QuickEntrypointEnum entrypoint = NeedsAccessCheck(type_index)
- ? kQuickAllocObjectWithAccessCheck
- : kQuickAllocObject;
+ if (compiler_driver_->IsStringTypeIndex(type_index, dex_file_)) {
+ // Turn new-instance of string into a const 0.
+ int32_t register_index = instruction.VRegA();
+ HNullConstant* constant = graph_->GetNullConstant();
+ UpdateLocal(register_index, constant);
+ } else {
+ QuickEntrypointEnum entrypoint = NeedsAccessCheck(type_index)
+ ? kQuickAllocObjectWithAccessCheck
+ : kQuickAllocObject;
- current_block_->AddInstruction(new (arena_) HNewInstance(dex_pc, type_index, entrypoint));
- UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction());
+ current_block_->AddInstruction(new (arena_) HNewInstance(dex_pc, type_index, entrypoint));
+ UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction());
+ }
break;
}
diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h
index dc6d97e..36503ce 100644
--- a/compiler/optimizing/builder.h
+++ b/compiler/optimizing/builder.h
@@ -88,7 +88,10 @@
// the newly created blocks.
// As a side effect, also compute the number of dex instructions, blocks, and
// branches.
- void ComputeBranchTargets(const uint16_t* start,
+ // Returns true if all the branches fall inside the method code, false otherwise.
+ // (In normal cases this should always return true but someone can artificially
+ // create a code unit in which branches fall-through out of it).
+ bool ComputeBranchTargets(const uint16_t* start,
const uint16_t* end,
size_t* number_of_branches);
void MaybeUpdateCurrentBlock(size_t index);
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 8ab759d..d71266d 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -100,11 +100,11 @@
for (size_t i = 0; i < instruction->EnvironmentSize(); ++i) {
if (environment->GetInstructionAt(i) != nullptr) {
Primitive::Type type = environment->GetInstructionAt(i)->GetType();
- DCHECK(CheckType(type, locations->GetEnvironmentAt(i)))
- << type << " " << locations->GetEnvironmentAt(i);
+ DCHECK(CheckType(type, environment->GetLocationAt(i)))
+ << type << " " << environment->GetLocationAt(i);
} else {
- DCHECK(locations->GetEnvironmentAt(i).IsInvalid())
- << locations->GetEnvironmentAt(i);
+ DCHECK(environment->GetLocationAt(i).IsInvalid())
+ << environment->GetLocationAt(i);
}
}
return true;
@@ -153,6 +153,7 @@
}
void CodeGenerator::CompileInternal(CodeAllocator* allocator, bool is_baseline) {
+ is_baseline_ = is_baseline;
HGraphVisitor* instruction_visitor = GetInstructionVisitor();
DCHECK_EQ(current_block_index_, 0u);
GenerateFrameEntry();
@@ -612,7 +613,7 @@
}
void CodeGenerator::BuildStackMaps(std::vector<uint8_t>* data) {
- uint32_t size = stack_map_stream_.ComputeNeededSize();
+ uint32_t size = stack_map_stream_.PrepareForFillIn();
data->resize(size);
MemoryRegion region(data->data(), size);
stack_map_stream_.FillIn(region);
@@ -644,22 +645,34 @@
}
}
+ uint32_t outer_dex_pc = dex_pc;
+ uint32_t outer_environment_size = 0;
+ uint32_t inlining_depth = 0;
+ if (instruction != nullptr) {
+ for (HEnvironment* environment = instruction->GetEnvironment();
+ environment != nullptr;
+ environment = environment->GetParent()) {
+ outer_dex_pc = environment->GetDexPc();
+ outer_environment_size = environment->Size();
+ if (environment != instruction->GetEnvironment()) {
+ inlining_depth++;
+ }
+ }
+ }
+
// Collect PC infos for the mapping table.
struct PcInfo pc_info;
- pc_info.dex_pc = dex_pc;
+ pc_info.dex_pc = outer_dex_pc;
pc_info.native_pc = GetAssembler()->CodeSize();
pc_infos_.Add(pc_info);
- uint32_t inlining_depth = 0;
-
if (instruction == nullptr) {
// For stack overflow checks.
- stack_map_stream_.AddStackMapEntry(dex_pc, pc_info.native_pc, 0, 0, 0, inlining_depth);
+ stack_map_stream_.BeginStackMapEntry(pc_info.dex_pc, pc_info.native_pc, 0, 0, 0, 0);
+ stack_map_stream_.EndStackMapEntry();
return;
}
LocationSummary* locations = instruction->GetLocations();
- HEnvironment* environment = instruction->GetEnvironment();
- size_t environment_size = instruction->EnvironmentSize();
uint32_t register_mask = locations->GetRegisterMask();
if (locations->OnlyCallsOnSlowPath()) {
@@ -672,63 +685,82 @@
}
// The register mask must be a subset of callee-save registers.
DCHECK_EQ(register_mask & core_callee_save_mask_, register_mask);
- stack_map_stream_.AddStackMapEntry(dex_pc,
- pc_info.native_pc,
- register_mask,
- locations->GetStackMask(),
- environment_size,
- inlining_depth);
+ stack_map_stream_.BeginStackMapEntry(pc_info.dex_pc,
+ pc_info.native_pc,
+ register_mask,
+ locations->GetStackMask(),
+ outer_environment_size,
+ inlining_depth);
+
+ EmitEnvironment(instruction->GetEnvironment(), slow_path);
+ stack_map_stream_.EndStackMapEntry();
+}
+
+void CodeGenerator::EmitEnvironment(HEnvironment* environment, SlowPathCode* slow_path) {
+ if (environment == nullptr) return;
+
+ if (environment->GetParent() != nullptr) {
+ // We emit the parent environment first.
+ EmitEnvironment(environment->GetParent(), slow_path);
+ stack_map_stream_.BeginInlineInfoEntry(environment->GetMethodIdx(),
+ environment->GetDexPc(),
+ environment->GetInvokeType(),
+ environment->Size());
+ }
// Walk over the environment, and record the location of dex registers.
- for (size_t i = 0; i < environment_size; ++i) {
+ for (size_t i = 0, environment_size = environment->Size(); i < environment_size; ++i) {
HInstruction* current = environment->GetInstructionAt(i);
if (current == nullptr) {
- stack_map_stream_.AddDexRegisterEntry(i, DexRegisterLocation::Kind::kNone, 0);
+ stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kNone, 0);
continue;
}
- Location location = locations->GetEnvironmentAt(i);
+ Location location = environment->GetLocationAt(i);
switch (location.GetKind()) {
case Location::kConstant: {
DCHECK_EQ(current, location.GetConstant());
if (current->IsLongConstant()) {
int64_t value = current->AsLongConstant()->GetValue();
stack_map_stream_.AddDexRegisterEntry(
- i, DexRegisterLocation::Kind::kConstant, Low32Bits(value));
+ DexRegisterLocation::Kind::kConstant, Low32Bits(value));
stack_map_stream_.AddDexRegisterEntry(
- ++i, DexRegisterLocation::Kind::kConstant, High32Bits(value));
+ DexRegisterLocation::Kind::kConstant, High32Bits(value));
+ ++i;
DCHECK_LT(i, environment_size);
} else if (current->IsDoubleConstant()) {
int64_t value = bit_cast<int64_t, double>(current->AsDoubleConstant()->GetValue());
stack_map_stream_.AddDexRegisterEntry(
- i, DexRegisterLocation::Kind::kConstant, Low32Bits(value));
+ DexRegisterLocation::Kind::kConstant, Low32Bits(value));
stack_map_stream_.AddDexRegisterEntry(
- ++i, DexRegisterLocation::Kind::kConstant, High32Bits(value));
+ DexRegisterLocation::Kind::kConstant, High32Bits(value));
+ ++i;
DCHECK_LT(i, environment_size);
} else if (current->IsIntConstant()) {
int32_t value = current->AsIntConstant()->GetValue();
- stack_map_stream_.AddDexRegisterEntry(i, DexRegisterLocation::Kind::kConstant, value);
+ stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant, value);
} else if (current->IsNullConstant()) {
- stack_map_stream_.AddDexRegisterEntry(i, DexRegisterLocation::Kind::kConstant, 0);
+ stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant, 0);
} else {
DCHECK(current->IsFloatConstant()) << current->DebugName();
int32_t value = bit_cast<int32_t, float>(current->AsFloatConstant()->GetValue());
- stack_map_stream_.AddDexRegisterEntry(i, DexRegisterLocation::Kind::kConstant, value);
+ stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant, value);
}
break;
}
case Location::kStackSlot: {
stack_map_stream_.AddDexRegisterEntry(
- i, DexRegisterLocation::Kind::kInStack, location.GetStackIndex());
+ DexRegisterLocation::Kind::kInStack, location.GetStackIndex());
break;
}
case Location::kDoubleStackSlot: {
stack_map_stream_.AddDexRegisterEntry(
- i, DexRegisterLocation::Kind::kInStack, location.GetStackIndex());
+ DexRegisterLocation::Kind::kInStack, location.GetStackIndex());
stack_map_stream_.AddDexRegisterEntry(
- ++i, DexRegisterLocation::Kind::kInStack, location.GetHighStackIndex(kVRegSize));
+ DexRegisterLocation::Kind::kInStack, location.GetHighStackIndex(kVRegSize));
+ ++i;
DCHECK_LT(i, environment_size);
break;
}
@@ -737,16 +769,18 @@
int id = location.reg();
if (slow_path != nullptr && slow_path->IsCoreRegisterSaved(id)) {
uint32_t offset = slow_path->GetStackOffsetOfCoreRegister(id);
- stack_map_stream_.AddDexRegisterEntry(i, DexRegisterLocation::Kind::kInStack, offset);
+ stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset);
if (current->GetType() == Primitive::kPrimLong) {
stack_map_stream_.AddDexRegisterEntry(
- ++i, DexRegisterLocation::Kind::kInStack, offset + kVRegSize);
+ DexRegisterLocation::Kind::kInStack, offset + kVRegSize);
+ ++i;
DCHECK_LT(i, environment_size);
}
} else {
- stack_map_stream_.AddDexRegisterEntry(i, DexRegisterLocation::Kind::kInRegister, id);
+ stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegister, id);
if (current->GetType() == Primitive::kPrimLong) {
- stack_map_stream_.AddDexRegisterEntry(++i, DexRegisterLocation::Kind::kInRegister, id);
+ stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegister, id);
+ ++i;
DCHECK_LT(i, environment_size);
}
}
@@ -757,17 +791,18 @@
int id = location.reg();
if (slow_path != nullptr && slow_path->IsFpuRegisterSaved(id)) {
uint32_t offset = slow_path->GetStackOffsetOfFpuRegister(id);
- stack_map_stream_.AddDexRegisterEntry(i, DexRegisterLocation::Kind::kInStack, offset);
+ stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset);
if (current->GetType() == Primitive::kPrimDouble) {
stack_map_stream_.AddDexRegisterEntry(
- ++i, DexRegisterLocation::Kind::kInStack, offset + kVRegSize);
+ DexRegisterLocation::Kind::kInStack, offset + kVRegSize);
+ ++i;
DCHECK_LT(i, environment_size);
}
} else {
- stack_map_stream_.AddDexRegisterEntry(i, DexRegisterLocation::Kind::kInFpuRegister, id);
+ stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInFpuRegister, id);
if (current->GetType() == Primitive::kPrimDouble) {
- stack_map_stream_.AddDexRegisterEntry(
- ++i, DexRegisterLocation::Kind::kInFpuRegister, id);
+ stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInFpuRegister, id);
+ ++i;
DCHECK_LT(i, environment_size);
}
}
@@ -779,16 +814,17 @@
int high = location.high();
if (slow_path != nullptr && slow_path->IsFpuRegisterSaved(low)) {
uint32_t offset = slow_path->GetStackOffsetOfFpuRegister(low);
- stack_map_stream_.AddDexRegisterEntry(i, DexRegisterLocation::Kind::kInStack, offset);
+ stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset);
} else {
- stack_map_stream_.AddDexRegisterEntry(i, DexRegisterLocation::Kind::kInFpuRegister, low);
+ stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInFpuRegister, low);
}
if (slow_path != nullptr && slow_path->IsFpuRegisterSaved(high)) {
uint32_t offset = slow_path->GetStackOffsetOfFpuRegister(high);
- stack_map_stream_.AddDexRegisterEntry(++i, DexRegisterLocation::Kind::kInStack, offset);
+ stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset);
+ ++i;
} else {
- stack_map_stream_.AddDexRegisterEntry(
- ++i, DexRegisterLocation::Kind::kInFpuRegister, high);
+ stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInFpuRegister, high);
+ ++i;
}
DCHECK_LT(i, environment_size);
break;
@@ -799,23 +835,23 @@
int high = location.high();
if (slow_path != nullptr && slow_path->IsCoreRegisterSaved(low)) {
uint32_t offset = slow_path->GetStackOffsetOfCoreRegister(low);
- stack_map_stream_.AddDexRegisterEntry(i, DexRegisterLocation::Kind::kInStack, offset);
+ stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset);
} else {
- stack_map_stream_.AddDexRegisterEntry(i, DexRegisterLocation::Kind::kInRegister, low);
+ stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegister, low);
}
if (slow_path != nullptr && slow_path->IsCoreRegisterSaved(high)) {
uint32_t offset = slow_path->GetStackOffsetOfCoreRegister(high);
- stack_map_stream_.AddDexRegisterEntry(++i, DexRegisterLocation::Kind::kInStack, offset);
+ stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset);
} else {
- stack_map_stream_.AddDexRegisterEntry(
- ++i, DexRegisterLocation::Kind::kInRegister, high);
+ stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegister, high);
}
+ ++i;
DCHECK_LT(i, environment_size);
break;
}
case Location::kInvalid: {
- stack_map_stream_.AddDexRegisterEntry(i, DexRegisterLocation::Kind::kNone, 0);
+ stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kNone, 0);
break;
}
@@ -823,11 +859,17 @@
LOG(FATAL) << "Unexpected kind " << location.GetKind();
}
}
+
+ if (environment->GetParent() != nullptr) {
+ stack_map_stream_.EndInlineInfoEntry();
+ }
}
bool CodeGenerator::CanMoveNullCheckToUser(HNullCheck* null_check) {
HInstruction* first_next_not_move = null_check->GetNextDisregardingMoves();
- return (first_next_not_move != nullptr) && first_next_not_move->CanDoImplicitNullCheck();
+
+ return (first_next_not_move != nullptr)
+ && first_next_not_move->CanDoImplicitNullCheckOn(null_check->InputAt(0));
}
void CodeGenerator::MaybeRecordImplicitNullCheck(HInstruction* instr) {
@@ -842,7 +884,7 @@
return;
}
- if (!instr->CanDoImplicitNullCheck()) {
+ if (!instr->CanDoImplicitNullCheckOn(instr->InputAt(0))) {
return;
}
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index e536b2d..740beab 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -34,10 +34,15 @@
// Binary encoding of 2^31 for type double.
static int64_t constexpr k2Pow31EncodingForDouble = INT64_C(0x41E0000000000000);
+// Minimum value for a primitive integer.
+static int32_t constexpr kPrimIntMin = 0x80000000;
+// Minimum value for a primitive long.
+static int64_t constexpr kPrimLongMin = INT64_C(0x8000000000000000);
+
// Maximum value for a primitive integer.
static int32_t constexpr kPrimIntMax = 0x7fffffff;
// Maximum value for a primitive long.
-static int64_t constexpr kPrimLongMax = 0x7fffffffffffffff;
+static int64_t constexpr kPrimLongMax = INT64_C(0x7fffffffffffffff);
class Assembler;
class CodeGenerator;
@@ -77,8 +82,8 @@
virtual void EmitNativeCode(CodeGenerator* codegen) = 0;
- void SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations);
- void RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations);
+ virtual void SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations);
+ virtual void RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations);
void RecordPcInfo(CodeGenerator* codegen, HInstruction* instruction, uint32_t dex_pc);
bool IsCoreRegisterSaved(int reg) const {
@@ -97,14 +102,35 @@
return saved_fpu_stack_offsets_[reg];
}
- private:
+ protected:
static constexpr size_t kMaximumNumberOfExpectedRegisters = 32;
static constexpr uint32_t kRegisterNotSaved = -1;
uint32_t saved_core_stack_offsets_[kMaximumNumberOfExpectedRegisters];
uint32_t saved_fpu_stack_offsets_[kMaximumNumberOfExpectedRegisters];
+
+ private:
DISALLOW_COPY_AND_ASSIGN(SlowPathCode);
};
+class InvokeDexCallingConventionVisitor {
+ public:
+ virtual Location GetNextLocation(Primitive::Type type) = 0;
+
+ protected:
+ InvokeDexCallingConventionVisitor() {}
+ virtual ~InvokeDexCallingConventionVisitor() {}
+
+ // The current index for core registers.
+ uint32_t gp_index_ = 0u;
+ // The current index for floating-point registers.
+ uint32_t float_index_ = 0u;
+ // The current stack index.
+ uint32_t stack_index_ = 0u;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionVisitor);
+};
+
class CodeGenerator {
public:
// Compiles the graph to executable instructions. Returns whether the compilation
@@ -212,6 +238,10 @@
std::vector<uint8_t>* vector, const DexCompilationUnit& dex_compilation_unit) const;
void BuildStackMaps(std::vector<uint8_t>* vector);
+ bool IsBaseline() const {
+ return is_baseline_;
+ }
+
bool IsLeafMethod() const {
return is_leaf_;
}
@@ -304,6 +334,7 @@
return GetFpuSpillSize() + GetCoreSpillSize();
}
+ virtual ParallelMoveResolver* GetMoveResolver() = 0;
protected:
CodeGenerator(HGraph* graph,
@@ -325,6 +356,7 @@
number_of_register_pairs_(number_of_register_pairs),
core_callee_save_mask_(core_callee_save_mask),
fpu_callee_save_mask_(fpu_callee_save_mask),
+ is_baseline_(false),
graph_(graph),
compiler_options_(compiler_options),
pc_infos_(graph->GetArena(), 32),
@@ -346,7 +378,6 @@
virtual Location GetStackLocation(HLoadLocal* load) const = 0;
- virtual ParallelMoveResolver* GetMoveResolver() = 0;
virtual HGraphVisitor* GetLocationBuilder() = 0;
virtual HGraphVisitor* GetInstructionVisitor() = 0;
@@ -404,11 +435,15 @@
const uint32_t core_callee_save_mask_;
const uint32_t fpu_callee_save_mask_;
+ // Whether we are using baseline.
+ bool is_baseline_;
+
private:
void InitLocationsBaseline(HInstruction* instruction);
size_t GetStackOffsetOfSavedRegister(size_t index);
void CompileInternal(CodeAllocator* allocator, bool is_baseline);
void BlockIfInRegister(Location location, bool is_out = false) const;
+ void EmitEnvironment(HEnvironment* environment, SlowPathCode* slow_path);
HGraph* const graph_;
const CompilerOptions& compiler_options_;
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 38fa043..1c76630 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -17,6 +17,7 @@
#include "code_generator_arm.h"
#include "arch/arm/instruction_set_features_arm.h"
+#include "code_generator_utils.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "gc/accounting/card_table.h"
#include "intrinsics.h"
@@ -112,6 +113,10 @@
return &return_label_;
}
+ HBasicBlock* GetSuccessor() const {
+ return successor_;
+ }
+
private:
HSuspendCheck* const instruction_;
// If not null, the block to branch to after the suspend check.
@@ -176,7 +181,6 @@
InvokeRuntimeCallingConvention calling_convention;
__ LoadImmediate(calling_convention.GetRegisterAt(0), cls_->GetTypeIndex());
- arm_codegen->LoadCurrentMethod(calling_convention.GetRegisterAt(1));
int32_t entry_point_offset = do_clinit_
? QUICK_ENTRY_POINT(pInitializeStaticStorage)
: QUICK_ENTRY_POINT(pInitializeType);
@@ -222,7 +226,6 @@
SaveLiveRegisters(codegen, locations);
InvokeRuntimeCallingConvention calling_convention;
- arm_codegen->LoadCurrentMethod(calling_convention.GetRegisterAt(1));
__ LoadImmediate(calling_convention.GetRegisterAt(0), instruction_->GetStringIndex());
arm_codegen->InvokeRuntime(
QUICK_ENTRY_POINT(pResolveString), instruction_, instruction_->GetDexPc(), this);
@@ -345,11 +348,11 @@
}
void CodeGeneratorARM::DumpCoreRegister(std::ostream& stream, int reg) const {
- stream << ArmManagedRegister::FromCoreRegister(Register(reg));
+ stream << Register(reg);
}
void CodeGeneratorARM::DumpFloatingPointRegister(std::ostream& stream, int reg) const {
- stream << ArmManagedRegister::FromSRegister(SRegister(reg));
+ stream << SRegister(reg);
}
size_t CodeGeneratorARM::SaveCoreRegister(size_t stack_index, uint32_t reg_id) {
@@ -388,7 +391,7 @@
location_builder_(graph, this),
instruction_visitor_(graph, this),
move_resolver_(graph->GetArena(), this),
- assembler_(true),
+ assembler_(false /* can_relocate_branches */),
isa_features_(isa_features) {
// Save the PC register to mimic Quick.
AddAllocatedRegister(Location::RegisterLocation(PC));
@@ -607,7 +610,7 @@
UNREACHABLE();
}
-Location InvokeDexCallingConventionVisitor::GetNextLocation(Primitive::Type type) {
+Location InvokeDexCallingConventionVisitorARM::GetNextLocation(Primitive::Type type) {
switch (type) {
case Primitive::kPrimBoolean:
case Primitive::kPrimByte:
@@ -682,7 +685,7 @@
return Location();
}
-Location InvokeDexCallingConventionVisitor::GetReturnLocation(Primitive::Type type) {
+Location InvokeDexCallingConventionVisitorARM::GetReturnLocation(Primitive::Type type) {
switch (type) {
case Primitive::kPrimBoolean:
case Primitive::kPrimByte:
@@ -1243,6 +1246,10 @@
}
void LocationsBuilderARM::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
+ // When we do not run baseline, explicit clinit checks triggered by static
+ // invokes must have been pruned by art::PrepareForRegisterAllocation.
+ DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
+
IntrinsicLocationsBuilderARM intrinsic(GetGraph()->GetArena(),
codegen_->GetInstructionSetFeatures());
if (intrinsic.TryDispatch(invoke)) {
@@ -1267,6 +1274,10 @@
}
void InstructionCodeGeneratorARM::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
+ // When we do not run baseline, explicit clinit checks triggered by static
+ // invokes must have been pruned by art::PrepareForRegisterAllocation.
+ DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
+
if (TryGenerateIntrinsicCode(invoke, codegen_)) {
return;
}
@@ -1282,8 +1293,8 @@
new (GetGraph()->GetArena()) LocationSummary(invoke, LocationSummary::kCall);
locations->AddTemp(Location::RegisterLocation(R0));
- InvokeDexCallingConventionVisitor calling_convention_visitor;
- for (size_t i = 0; i < invoke->InputCount(); i++) {
+ InvokeDexCallingConventionVisitorARM calling_convention_visitor;
+ for (size_t i = 0; i < invoke->GetNumberOfArguments(); i++) {
HInstruction* input = invoke->InputAt(i);
locations->SetInAt(i, calling_convention_visitor.GetNextLocation(input->GetType()));
}
@@ -2175,11 +2186,134 @@
}
}
+void InstructionCodeGeneratorARM::DivRemOneOrMinusOne(HBinaryOperation* instruction) {
+ DCHECK(instruction->IsDiv() || instruction->IsRem());
+ DCHECK(instruction->GetResultType() == Primitive::kPrimInt);
+
+ LocationSummary* locations = instruction->GetLocations();
+ Location second = locations->InAt(1);
+ DCHECK(second.IsConstant());
+
+ Register out = locations->Out().AsRegister<Register>();
+ Register dividend = locations->InAt(0).AsRegister<Register>();
+ int32_t imm = second.GetConstant()->AsIntConstant()->GetValue();
+ DCHECK(imm == 1 || imm == -1);
+
+ if (instruction->IsRem()) {
+ __ LoadImmediate(out, 0);
+ } else {
+ if (imm == 1) {
+ __ Mov(out, dividend);
+ } else {
+ __ rsb(out, dividend, ShifterOperand(0));
+ }
+ }
+}
+
+void InstructionCodeGeneratorARM::DivRemByPowerOfTwo(HBinaryOperation* instruction) {
+ DCHECK(instruction->IsDiv() || instruction->IsRem());
+ DCHECK(instruction->GetResultType() == Primitive::kPrimInt);
+
+ LocationSummary* locations = instruction->GetLocations();
+ Location second = locations->InAt(1);
+ DCHECK(second.IsConstant());
+
+ Register out = locations->Out().AsRegister<Register>();
+ Register dividend = locations->InAt(0).AsRegister<Register>();
+ Register temp = locations->GetTemp(0).AsRegister<Register>();
+ int32_t imm = second.GetConstant()->AsIntConstant()->GetValue();
+ int32_t abs_imm = std::abs(imm);
+ DCHECK(IsPowerOfTwo(abs_imm));
+ int ctz_imm = CTZ(abs_imm);
+
+ if (ctz_imm == 1) {
+ __ Lsr(temp, dividend, 32 - ctz_imm);
+ } else {
+ __ Asr(temp, dividend, 31);
+ __ Lsr(temp, temp, 32 - ctz_imm);
+ }
+ __ add(out, temp, ShifterOperand(dividend));
+
+ if (instruction->IsDiv()) {
+ __ Asr(out, out, ctz_imm);
+ if (imm < 0) {
+ __ rsb(out, out, ShifterOperand(0));
+ }
+ } else {
+ __ ubfx(out, out, 0, ctz_imm);
+ __ sub(out, out, ShifterOperand(temp));
+ }
+}
+
+void InstructionCodeGeneratorARM::GenerateDivRemWithAnyConstant(HBinaryOperation* instruction) {
+ DCHECK(instruction->IsDiv() || instruction->IsRem());
+ DCHECK(instruction->GetResultType() == Primitive::kPrimInt);
+
+ LocationSummary* locations = instruction->GetLocations();
+ Location second = locations->InAt(1);
+ DCHECK(second.IsConstant());
+
+ Register out = locations->Out().AsRegister<Register>();
+ Register dividend = locations->InAt(0).AsRegister<Register>();
+ Register temp1 = locations->GetTemp(0).AsRegister<Register>();
+ Register temp2 = locations->GetTemp(1).AsRegister<Register>();
+ int64_t imm = second.GetConstant()->AsIntConstant()->GetValue();
+
+ int64_t magic;
+ int shift;
+ CalculateMagicAndShiftForDivRem(imm, false /* is_long */, &magic, &shift);
+
+ __ LoadImmediate(temp1, magic);
+ __ smull(temp2, temp1, dividend, temp1);
+
+ if (imm > 0 && magic < 0) {
+ __ add(temp1, temp1, ShifterOperand(dividend));
+ } else if (imm < 0 && magic > 0) {
+ __ sub(temp1, temp1, ShifterOperand(dividend));
+ }
+
+ if (shift != 0) {
+ __ Asr(temp1, temp1, shift);
+ }
+
+ if (instruction->IsDiv()) {
+ __ sub(out, temp1, ShifterOperand(temp1, ASR, 31));
+ } else {
+ __ sub(temp1, temp1, ShifterOperand(temp1, ASR, 31));
+ // TODO: Strength reduction for mls.
+ __ LoadImmediate(temp2, imm);
+ __ mls(out, temp1, temp2, dividend);
+ }
+}
+
+void InstructionCodeGeneratorARM::GenerateDivRemConstantIntegral(HBinaryOperation* instruction) {
+ DCHECK(instruction->IsDiv() || instruction->IsRem());
+ DCHECK(instruction->GetResultType() == Primitive::kPrimInt);
+
+ LocationSummary* locations = instruction->GetLocations();
+ Location second = locations->InAt(1);
+ DCHECK(second.IsConstant());
+
+ int32_t imm = second.GetConstant()->AsIntConstant()->GetValue();
+ if (imm == 0) {
+ // Do not generate anything. DivZeroCheck would prevent any code to be executed.
+ } else if (imm == 1 || imm == -1) {
+ DivRemOneOrMinusOne(instruction);
+ } else if (IsPowerOfTwo(std::abs(imm))) {
+ DivRemByPowerOfTwo(instruction);
+ } else {
+ DCHECK(imm <= -2 || imm >= 2);
+ GenerateDivRemWithAnyConstant(instruction);
+ }
+}
+
void LocationsBuilderARM::VisitDiv(HDiv* div) {
LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
if (div->GetResultType() == Primitive::kPrimLong) {
// pLdiv runtime call.
call_kind = LocationSummary::kCall;
+ } else if (div->GetResultType() == Primitive::kPrimInt && div->InputAt(1)->IsConstant()) {
+ // sdiv will be replaced by other instruction sequence.
} else if (div->GetResultType() == Primitive::kPrimInt &&
!codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
// pIdivmod runtime call.
@@ -2190,7 +2324,20 @@
switch (div->GetResultType()) {
case Primitive::kPrimInt: {
- if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
+ if (div->InputAt(1)->IsConstant()) {
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(div->InputAt(1)));
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ int32_t abs_imm = std::abs(div->InputAt(1)->AsIntConstant()->GetValue());
+ if (abs_imm <= 1) {
+ // No temp register required.
+ } else {
+ locations->AddTemp(Location::RequiresRegister());
+ if (!IsPowerOfTwo(abs_imm)) {
+ locations->AddTemp(Location::RequiresRegister());
+ }
+ }
+ } else if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
@@ -2234,7 +2381,9 @@
switch (div->GetResultType()) {
case Primitive::kPrimInt: {
- if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
+ if (second.IsConstant()) {
+ GenerateDivRemConstantIntegral(div);
+ } else if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
__ sdiv(out.AsRegister<Register>(),
first.AsRegister<Register>(),
second.AsRegister<Register>());
@@ -2286,8 +2435,11 @@
// Most remainders are implemented in the runtime.
LocationSummary::CallKind call_kind = LocationSummary::kCall;
- if (rem->GetResultType() == Primitive::kPrimInt &&
- codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
+ if (rem->GetResultType() == Primitive::kPrimInt && rem->InputAt(1)->IsConstant()) {
+ // sdiv will be replaced by other instruction sequence.
+ call_kind = LocationSummary::kNoCall;
+ } else if ((rem->GetResultType() == Primitive::kPrimInt)
+ && codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
// Have hardware divide instruction for int, do it with three instructions.
call_kind = LocationSummary::kNoCall;
}
@@ -2296,7 +2448,20 @@
switch (type) {
case Primitive::kPrimInt: {
- if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
+ if (rem->InputAt(1)->IsConstant()) {
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(rem->InputAt(1)));
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ int32_t abs_imm = std::abs(rem->InputAt(1)->AsIntConstant()->GetValue());
+ if (abs_imm <= 1) {
+ // No temp register required.
+ } else {
+ locations->AddTemp(Location::RequiresRegister());
+ if (!IsPowerOfTwo(abs_imm)) {
+ locations->AddTemp(Location::RequiresRegister());
+ }
+ }
+ } else if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
@@ -2353,7 +2518,9 @@
Primitive::Type type = rem->GetResultType();
switch (type) {
case Primitive::kPrimInt: {
- if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
+ if (second.IsConstant()) {
+ GenerateDivRemConstantIntegral(rem);
+ } else if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
Register reg1 = first.AsRegister<Register>();
Register reg2 = second.AsRegister<Register>();
Register temp = locations->GetTemp(0).AsRegister<Register>();
@@ -2870,7 +3037,8 @@
}
void InstructionCodeGeneratorARM::HandleFieldSet(HInstruction* instruction,
- const FieldInfo& field_info) {
+ const FieldInfo& field_info,
+ bool value_can_be_null) {
DCHECK(instruction->IsInstanceFieldSet() || instruction->IsStaticFieldSet());
LocationSummary* locations = instruction->GetLocations();
@@ -2959,7 +3127,8 @@
if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1))) {
Register temp = locations->GetTemp(0).AsRegister<Register>();
Register card = locations->GetTemp(1).AsRegister<Register>();
- codegen_->MarkGCCard(temp, card, base, value.AsRegister<Register>());
+ codegen_->MarkGCCard(
+ temp, card, base, value.AsRegister<Register>(), value_can_be_null);
}
if (is_volatile) {
@@ -3086,7 +3255,7 @@
}
void InstructionCodeGeneratorARM::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
- HandleFieldSet(instruction, instruction->GetFieldInfo());
+ HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetValueCanBeNull());
}
void LocationsBuilderARM::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
@@ -3110,7 +3279,7 @@
}
void InstructionCodeGeneratorARM::VisitStaticFieldSet(HStaticFieldSet* instruction) {
- HandleFieldSet(instruction, instruction->GetFieldInfo());
+ HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetValueCanBeNull());
}
void LocationsBuilderARM::VisitNullCheck(HNullCheck* instruction) {
@@ -3380,7 +3549,7 @@
DCHECK_EQ(value_type, Primitive::kPrimNot);
Register temp = locations->GetTemp(0).AsRegister<Register>();
Register card = locations->GetTemp(1).AsRegister<Register>();
- codegen_->MarkGCCard(temp, card, obj, value);
+ codegen_->MarkGCCard(temp, card, obj, value, instruction->GetValueCanBeNull());
}
} else {
DCHECK_EQ(value_type, Primitive::kPrimNot);
@@ -3485,13 +3654,21 @@
__ b(slow_path->GetEntryLabel(), CS);
}
-void CodeGeneratorARM::MarkGCCard(Register temp, Register card, Register object, Register value) {
+void CodeGeneratorARM::MarkGCCard(Register temp,
+ Register card,
+ Register object,
+ Register value,
+ bool can_be_null) {
Label is_null;
- __ CompareAndBranchIfZero(value, &is_null);
+ if (can_be_null) {
+ __ CompareAndBranchIfZero(value, &is_null);
+ }
__ LoadFromOffset(kLoadWord, card, TR, Thread::CardTableOffset<kArmWordSize>().Int32Value());
__ Lsr(temp, object, gc::accounting::CardTable::kCardShift);
__ strb(card, Address(card, temp));
- __ Bind(&is_null);
+ if (can_be_null) {
+ __ Bind(&is_null);
+ }
}
void LocationsBuilderARM::VisitTemporary(HTemporary* temp) {
@@ -3533,8 +3710,18 @@
void InstructionCodeGeneratorARM::GenerateSuspendCheck(HSuspendCheck* instruction,
HBasicBlock* successor) {
SuspendCheckSlowPathARM* slow_path =
- new (GetGraph()->GetArena()) SuspendCheckSlowPathARM(instruction, successor);
- codegen_->AddSlowPath(slow_path);
+ down_cast<SuspendCheckSlowPathARM*>(instruction->GetSlowPath());
+ if (slow_path == nullptr) {
+ slow_path = new (GetGraph()->GetArena()) SuspendCheckSlowPathARM(instruction, successor);
+ instruction->SetSlowPath(slow_path);
+ codegen_->AddSlowPath(slow_path);
+ if (successor != nullptr) {
+ DCHECK(successor->IsLoopHeader());
+ codegen_->ClearSpillSlotsFromLoopPhisInStackMap(instruction);
+ }
+ } else {
+ DCHECK_EQ(slow_path->GetSuccessor(), successor);
+ }
__ LoadFromOffset(
kLoadUnsignedHalfword, IP, TR, Thread::ThreadFlagsOffset<kArmWordSize>().Int32Value());
@@ -3898,9 +4085,11 @@
SlowPathCodeARM* slow_path = nullptr;
// Return 0 if `obj` is null.
- // TODO: avoid this check if we know obj is not null.
- __ cmp(obj, ShifterOperand(0));
- __ b(&zero, EQ);
+ // avoid null check if we know obj is not null.
+ if (instruction->MustDoNullCheck()) {
+ __ cmp(obj, ShifterOperand(0));
+ __ b(&zero, EQ);
+ }
// Compare the class of `obj` with `cls`.
__ LoadFromOffset(kLoadWord, out, obj, class_offset);
__ cmp(out, ShifterOperand(cls));
@@ -3919,8 +4108,12 @@
__ LoadImmediate(out, 1);
__ b(&done);
}
- __ Bind(&zero);
- __ LoadImmediate(out, 0);
+
+ if (instruction->MustDoNullCheck() || instruction->IsClassFinal()) {
+ __ Bind(&zero);
+ __ LoadImmediate(out, 0);
+ }
+
if (slow_path != nullptr) {
__ Bind(slow_path->GetExitLabel());
}
@@ -3946,9 +4139,11 @@
instruction, locations->InAt(1), locations->GetTemp(0), instruction->GetDexPc());
codegen_->AddSlowPath(slow_path);
- // TODO: avoid this check if we know obj is not null.
- __ cmp(obj, ShifterOperand(0));
- __ b(slow_path->GetExitLabel(), EQ);
+ // avoid null check if we know obj is not null.
+ if (instruction->MustDoNullCheck()) {
+ __ cmp(obj, ShifterOperand(0));
+ __ b(slow_path->GetExitLabel(), EQ);
+ }
// Compare the class of `obj` with `cls`.
__ LoadFromOffset(kLoadWord, temp, obj, class_offset);
__ cmp(temp, ShifterOperand(cls));
@@ -4053,15 +4248,9 @@
//
// Currently we implement the app -> app logic, which looks up in the resolve cache.
- // temp = method;
- LoadCurrentMethod(temp);
- if (!invoke->IsRecursive()) {
- // temp = temp->dex_cache_resolved_methods_;
- __ LoadFromOffset(
- kLoadWord, temp, temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value());
- // temp = temp[index_in_cache]
- __ LoadFromOffset(
- kLoadWord, temp, temp, CodeGenerator::GetCacheOffset(invoke->GetDexMethodIndex()));
+ if (invoke->IsStringInit()) {
+ // temp = thread->string_init_entrypoint
+ __ LoadFromOffset(kLoadWord, temp, TR, invoke->GetStringInitOffset());
// LR = temp[offset_of_quick_compiled_code]
__ LoadFromOffset(kLoadWord, LR, temp,
mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
@@ -4069,7 +4258,24 @@
// LR()
__ blx(LR);
} else {
- __ bl(GetFrameEntryLabel());
+ // temp = method;
+ LoadCurrentMethod(temp);
+ if (!invoke->IsRecursive()) {
+ // temp = temp->dex_cache_resolved_methods_;
+ __ LoadFromOffset(
+ kLoadWord, temp, temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value());
+ // temp = temp[index_in_cache]
+ __ LoadFromOffset(
+ kLoadWord, temp, temp, CodeGenerator::GetCacheOffset(invoke->GetDexMethodIndex()));
+ // LR = temp[offset_of_quick_compiled_code]
+ __ LoadFromOffset(kLoadWord, LR, temp,
+ mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ kArmWordSize).Int32Value());
+ // LR()
+ __ blx(LR);
+ } else {
+ __ bl(GetFrameEntryLabel());
+ }
}
DCHECK(!IsLeafMethod());
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index 6009036..071bbee 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -78,22 +78,19 @@
DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConvention);
};
-class InvokeDexCallingConventionVisitor {
+class InvokeDexCallingConventionVisitorARM : public InvokeDexCallingConventionVisitor {
public:
- InvokeDexCallingConventionVisitor()
- : gp_index_(0), float_index_(0), double_index_(0), stack_index_(0) {}
+ InvokeDexCallingConventionVisitorARM() {}
+ virtual ~InvokeDexCallingConventionVisitorARM() {}
- Location GetNextLocation(Primitive::Type type);
+ Location GetNextLocation(Primitive::Type type) OVERRIDE;
Location GetReturnLocation(Primitive::Type type);
private:
InvokeDexCallingConvention calling_convention;
- uint32_t gp_index_;
- uint32_t float_index_;
- uint32_t double_index_;
- uint32_t stack_index_;
+ uint32_t double_index_ = 0;
- DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionVisitor);
+ DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionVisitorARM);
};
class ParallelMoveResolverARM : public ParallelMoveResolverWithSwap {
@@ -151,7 +148,7 @@
void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info);
CodeGeneratorARM* const codegen_;
- InvokeDexCallingConventionVisitor parameter_visitor_;
+ InvokeDexCallingConventionVisitorARM parameter_visitor_;
DISALLOW_COPY_AND_ASSIGN(LocationsBuilderARM);
};
@@ -184,7 +181,9 @@
HInstruction* instruction);
void GenerateWideAtomicLoad(Register addr, uint32_t offset,
Register out_lo, Register out_hi);
- void HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info);
+ void HandleFieldSet(HInstruction* instruction,
+ const FieldInfo& field_info,
+ bool value_can_be_null);
void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info);
void GenerateImplicitNullCheck(HNullCheck* instruction);
void GenerateExplicitNullCheck(HNullCheck* instruction);
@@ -192,6 +191,10 @@
Label* true_target,
Label* false_target,
Label* always_true_target);
+ void DivRemOneOrMinusOne(HBinaryOperation* instruction);
+ void DivRemByPowerOfTwo(HBinaryOperation* instruction);
+ void GenerateDivRemWithAnyConstant(HBinaryOperation* instruction);
+ void GenerateDivRemConstantIntegral(HBinaryOperation* instruction);
ArmAssembler* const assembler_;
CodeGeneratorARM* const codegen_;
@@ -273,7 +276,7 @@
int32_t offset, HInstruction* instruction, uint32_t dex_pc, SlowPathCode* slow_path);
// Emit a write barrier.
- void MarkGCCard(Register temp, Register card, Register object, Register value);
+ void MarkGCCard(Register temp, Register card, Register object, Register value, bool can_be_null);
Label* GetLabelOf(HBasicBlock* block) const {
return CommonGetLabelOf<Label>(block_labels_.GetRawStorage(), block);
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 23ba339..b6d99ab 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -17,6 +17,7 @@
#include "code_generator_arm64.h"
#include "arch/arm64/instruction_set_features_arm64.h"
+#include "code_generator_utils.h"
#include "common_arm64.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "entrypoints/quick/quick_entrypoints_enum.h"
@@ -64,6 +65,7 @@
using helpers::WRegisterFrom;
using helpers::XRegisterFrom;
using helpers::ARM64EncodableConstantOrRegister;
+using helpers::ArtVixlRegCodeCoherentForRegSet;
static constexpr size_t kHeapRefSize = sizeof(mirror::HeapReference<mirror::Object>);
static constexpr int kCurrentMethodStackOffset = 0;
@@ -105,6 +107,88 @@
#define __ down_cast<CodeGeneratorARM64*>(codegen)->GetVIXLAssembler()->
#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, x).Int32Value()
+// Calculate memory accessing operand for save/restore live registers.
+static void SaveRestoreLiveRegistersHelper(CodeGenerator* codegen,
+ RegisterSet* register_set,
+ int64_t spill_offset,
+ bool is_save) {
+ DCHECK(ArtVixlRegCodeCoherentForRegSet(register_set->GetCoreRegisters(),
+ codegen->GetNumberOfCoreRegisters(),
+ register_set->GetFloatingPointRegisters(),
+ codegen->GetNumberOfFloatingPointRegisters()));
+
+ CPURegList core_list = CPURegList(CPURegister::kRegister, kXRegSize,
+ register_set->GetCoreRegisters() & (~callee_saved_core_registers.list()));
+ CPURegList fp_list = CPURegList(CPURegister::kFPRegister, kDRegSize,
+ register_set->GetFloatingPointRegisters() & (~callee_saved_fp_registers.list()));
+
+ MacroAssembler* masm = down_cast<CodeGeneratorARM64*>(codegen)->GetVIXLAssembler();
+ UseScratchRegisterScope temps(masm);
+
+ Register base = masm->StackPointer();
+ int64_t core_spill_size = core_list.TotalSizeInBytes();
+ int64_t fp_spill_size = fp_list.TotalSizeInBytes();
+ int64_t reg_size = kXRegSizeInBytes;
+ int64_t max_ls_pair_offset = spill_offset + core_spill_size + fp_spill_size - 2 * reg_size;
+ uint32_t ls_access_size = WhichPowerOf2(reg_size);
+ if (((core_list.Count() > 1) || (fp_list.Count() > 1)) &&
+ !masm->IsImmLSPair(max_ls_pair_offset, ls_access_size)) {
+ // If the offset does not fit in the instruction's immediate field, use an alternate register
+ // to compute the base address(float point registers spill base address).
+ Register new_base = temps.AcquireSameSizeAs(base);
+ __ Add(new_base, base, Operand(spill_offset + core_spill_size));
+ base = new_base;
+ spill_offset = -core_spill_size;
+ int64_t new_max_ls_pair_offset = fp_spill_size - 2 * reg_size;
+ DCHECK(masm->IsImmLSPair(spill_offset, ls_access_size));
+ DCHECK(masm->IsImmLSPair(new_max_ls_pair_offset, ls_access_size));
+ }
+
+ if (is_save) {
+ __ StoreCPURegList(core_list, MemOperand(base, spill_offset));
+ __ StoreCPURegList(fp_list, MemOperand(base, spill_offset + core_spill_size));
+ } else {
+ __ LoadCPURegList(core_list, MemOperand(base, spill_offset));
+ __ LoadCPURegList(fp_list, MemOperand(base, spill_offset + core_spill_size));
+ }
+}
+
+void SlowPathCodeARM64::SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) {
+ RegisterSet* register_set = locations->GetLiveRegisters();
+ size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath();
+ for (size_t i = 0, e = codegen->GetNumberOfCoreRegisters(); i < e; ++i) {
+ if (!codegen->IsCoreCalleeSaveRegister(i) && register_set->ContainsCoreRegister(i)) {
+ // If the register holds an object, update the stack mask.
+ if (locations->RegisterContainsObject(i)) {
+ locations->SetStackBit(stack_offset / kVRegSize);
+ }
+ DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
+ DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
+ saved_core_stack_offsets_[i] = stack_offset;
+ stack_offset += kXRegSizeInBytes;
+ }
+ }
+
+ for (size_t i = 0, e = codegen->GetNumberOfFloatingPointRegisters(); i < e; ++i) {
+ if (!codegen->IsFloatingPointCalleeSaveRegister(i) &&
+ register_set->ContainsFloatingPointRegister(i)) {
+ DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
+ DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
+ saved_fpu_stack_offsets_[i] = stack_offset;
+ stack_offset += kDRegSizeInBytes;
+ }
+ }
+
+ SaveRestoreLiveRegistersHelper(codegen, register_set,
+ codegen->GetFirstRegisterSlotInSlowPath(), true /* is_save */);
+}
+
+void SlowPathCodeARM64::RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) {
+ RegisterSet* register_set = locations->GetLiveRegisters();
+ SaveRestoreLiveRegistersHelper(codegen, register_set,
+ codegen->GetFirstRegisterSlotInSlowPath(), false /* is_save */);
+}
+
class BoundsCheckSlowPathARM64 : public SlowPathCodeARM64 {
public:
BoundsCheckSlowPathARM64(HBoundsCheck* instruction,
@@ -173,14 +257,13 @@
InvokeRuntimeCallingConvention calling_convention;
__ Mov(calling_convention.GetRegisterAt(0).W(), cls_->GetTypeIndex());
- arm64_codegen->LoadCurrentMethod(calling_convention.GetRegisterAt(1).W());
int32_t entry_point_offset = do_clinit_ ? QUICK_ENTRY_POINT(pInitializeStaticStorage)
: QUICK_ENTRY_POINT(pInitializeType);
arm64_codegen->InvokeRuntime(entry_point_offset, at_, dex_pc_, this);
if (do_clinit_) {
- CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, uint32_t, mirror::ArtMethod*>();
+ CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, uint32_t>();
} else {
- CheckEntrypointTypes<kQuickInitializeType, void*, uint32_t, mirror::ArtMethod*>();
+ CheckEntrypointTypes<kQuickInitializeType, void*, uint32_t>();
}
// Move the class to the desired location.
@@ -225,11 +308,10 @@
SaveLiveRegisters(codegen, locations);
InvokeRuntimeCallingConvention calling_convention;
- arm64_codegen->LoadCurrentMethod(calling_convention.GetRegisterAt(1).W());
__ Mov(calling_convention.GetRegisterAt(0).W(), instruction_->GetStringIndex());
arm64_codegen->InvokeRuntime(
QUICK_ENTRY_POINT(pResolveString), instruction_, instruction_->GetDexPc(), this);
- CheckEntrypointTypes<kQuickResolveString, void*, uint32_t, mirror::ArtMethod*>();
+ CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
Primitive::Type type = instruction_->GetType();
arm64_codegen->MoveLocation(locations->Out(), calling_convention.GetReturnLocation(type), type);
@@ -287,6 +369,10 @@
return &return_label_;
}
+ HBasicBlock* GetSuccessor() const {
+ return successor_;
+ }
+
private:
HSuspendCheck* const instruction_;
// If not null, the block to branch to after the suspend check.
@@ -374,15 +460,15 @@
#undef __
-Location InvokeDexCallingConventionVisitor::GetNextLocation(Primitive::Type type) {
+Location InvokeDexCallingConventionVisitorARM64::GetNextLocation(Primitive::Type type) {
Location next_location;
if (type == Primitive::kPrimVoid) {
LOG(FATAL) << "Unreachable type " << type;
}
if (Primitive::IsFloatingPointType(type) &&
- (fp_index_ < calling_convention.GetNumberOfFpuRegisters())) {
- next_location = LocationFrom(calling_convention.GetFpuRegisterAt(fp_index_++));
+ (float_index_ < calling_convention.GetNumberOfFpuRegisters())) {
+ next_location = LocationFrom(calling_convention.GetFpuRegisterAt(float_index_++));
} else if (!Primitive::IsFloatingPointType(type) &&
(gp_index_ < calling_convention.GetNumberOfRegisters())) {
next_location = LocationFrom(calling_convention.GetRegisterAt(gp_index_++));
@@ -527,6 +613,19 @@
GetAssembler()->cfi().DefCFAOffset(GetFrameSize());
}
+vixl::CPURegList CodeGeneratorARM64::GetFramePreservedCoreRegisters() const {
+ DCHECK(ArtVixlRegCodeCoherentForRegSet(core_spill_mask_, GetNumberOfCoreRegisters(), 0, 0));
+ return vixl::CPURegList(vixl::CPURegister::kRegister, vixl::kXRegSize,
+ core_spill_mask_);
+}
+
+vixl::CPURegList CodeGeneratorARM64::GetFramePreservedFPRegisters() const {
+ DCHECK(ArtVixlRegCodeCoherentForRegSet(0, 0, fpu_spill_mask_,
+ GetNumberOfFloatingPointRegisters()));
+ return vixl::CPURegList(vixl::CPURegister::kFPRegister, vixl::kDRegSize,
+ fpu_spill_mask_);
+}
+
void CodeGeneratorARM64::Bind(HBasicBlock* block) {
__ Bind(GetLabelOf(block));
}
@@ -602,16 +701,20 @@
return Location::NoLocation();
}
-void CodeGeneratorARM64::MarkGCCard(Register object, Register value) {
+void CodeGeneratorARM64::MarkGCCard(Register object, Register value, bool value_can_be_null) {
UseScratchRegisterScope temps(GetVIXLAssembler());
Register card = temps.AcquireX();
Register temp = temps.AcquireW(); // Index within the CardTable - 32bit.
vixl::Label done;
- __ Cbz(value, &done);
+ if (value_can_be_null) {
+ __ Cbz(value, &done);
+ }
__ Ldr(card, MemOperand(tr, Thread::CardTableOffset<kArm64WordSize>().Int32Value()));
__ Lsr(temp, object, gc::accounting::CardTable::kCardShift);
__ Strb(card, MemOperand(card, temp.X()));
- __ Bind(&done);
+ if (value_can_be_null) {
+ __ Bind(&done);
+ }
}
void CodeGeneratorARM64::SetupBlockedRegisters(bool is_baseline) const {
@@ -689,11 +792,11 @@
}
void CodeGeneratorARM64::DumpCoreRegister(std::ostream& stream, int reg) const {
- stream << Arm64ManagedRegister::FromXRegister(XRegister(reg));
+ stream << XRegister(reg);
}
void CodeGeneratorARM64::DumpFloatingPointRegister(std::ostream& stream, int reg) const {
- stream << Arm64ManagedRegister::FromDRegister(DRegister(reg));
+ stream << DRegister(reg);
}
void CodeGeneratorARM64::MoveConstant(CPURegister destination, HConstant* constant) {
@@ -977,14 +1080,12 @@
BlockPoolsScope block_pools(GetVIXLAssembler());
__ Ldr(lr, MemOperand(tr, entry_point_offset));
__ Blr(lr);
- if (instruction != nullptr) {
- RecordPcInfo(instruction, dex_pc, slow_path);
- DCHECK(instruction->IsSuspendCheck()
- || instruction->IsBoundsCheck()
- || instruction->IsNullCheck()
- || instruction->IsDivZeroCheck()
- || !IsLeafMethod());
- }
+ RecordPcInfo(instruction, dex_pc, slow_path);
+ DCHECK(instruction->IsSuspendCheck()
+ || instruction->IsBoundsCheck()
+ || instruction->IsNullCheck()
+ || instruction->IsDivZeroCheck()
+ || !IsLeafMethod());
}
void InstructionCodeGeneratorARM64::GenerateClassInitializationCheck(SlowPathCodeARM64* slow_path,
@@ -1036,8 +1137,19 @@
void InstructionCodeGeneratorARM64::GenerateSuspendCheck(HSuspendCheck* instruction,
HBasicBlock* successor) {
SuspendCheckSlowPathARM64* slow_path =
- new (GetGraph()->GetArena()) SuspendCheckSlowPathARM64(instruction, successor);
- codegen_->AddSlowPath(slow_path);
+ down_cast<SuspendCheckSlowPathARM64*>(instruction->GetSlowPath());
+ if (slow_path == nullptr) {
+ slow_path = new (GetGraph()->GetArena()) SuspendCheckSlowPathARM64(instruction, successor);
+ instruction->SetSlowPath(slow_path);
+ codegen_->AddSlowPath(slow_path);
+ if (successor != nullptr) {
+ DCHECK(successor->IsLoopHeader());
+ codegen_->ClearSpillSlotsFromLoopPhisInStackMap(instruction);
+ }
+ } else {
+ DCHECK_EQ(slow_path->GetSuccessor(), successor);
+ }
+
UseScratchRegisterScope temps(codegen_->GetVIXLAssembler());
Register temp = temps.AcquireW();
@@ -1157,7 +1269,8 @@
}
void InstructionCodeGeneratorARM64::HandleFieldSet(HInstruction* instruction,
- const FieldInfo& field_info) {
+ const FieldInfo& field_info,
+ bool value_can_be_null) {
DCHECK(instruction->IsInstanceFieldSet() || instruction->IsStaticFieldSet());
BlockPoolsScope block_pools(GetVIXLAssembler());
@@ -1183,7 +1296,7 @@
}
if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1))) {
- codegen_->MarkGCCard(obj, Register(value));
+ codegen_->MarkGCCard(obj, Register(value), value_can_be_null);
}
}
@@ -1409,7 +1522,7 @@
codegen_->MaybeRecordImplicitNullCheck(instruction);
}
if (CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue())) {
- codegen_->MarkGCCard(obj, value.W());
+ codegen_->MarkGCCard(obj, value.W(), instruction->GetValueCanBeNull());
}
}
}
@@ -1452,8 +1565,10 @@
instruction, locations->InAt(1), LocationFrom(obj_cls), instruction->GetDexPc());
codegen_->AddSlowPath(slow_path);
- // TODO: avoid this check if we know obj is not null.
- __ Cbz(obj, slow_path->GetExitLabel());
+ // Avoid null check if we know obj is not null.
+ if (instruction->MustDoNullCheck()) {
+ __ Cbz(obj, slow_path->GetExitLabel());
+ }
// Compare the class of `obj` with `cls`.
__ Ldr(obj_cls, HeapOperand(obj, mirror::Object::ClassOffset()));
__ Cmp(obj_cls, cls);
@@ -1590,6 +1705,152 @@
#undef DEFINE_CONDITION_VISITORS
#undef FOR_EACH_CONDITION_INSTRUCTION
+void InstructionCodeGeneratorARM64::DivRemOneOrMinusOne(HBinaryOperation* instruction) {
+ DCHECK(instruction->IsDiv() || instruction->IsRem());
+
+ LocationSummary* locations = instruction->GetLocations();
+ Location second = locations->InAt(1);
+ DCHECK(second.IsConstant());
+
+ Register out = OutputRegister(instruction);
+ Register dividend = InputRegisterAt(instruction, 0);
+ int64_t imm = Int64FromConstant(second.GetConstant());
+ DCHECK(imm == 1 || imm == -1);
+
+ if (instruction->IsRem()) {
+ __ Mov(out, 0);
+ } else {
+ if (imm == 1) {
+ __ Mov(out, dividend);
+ } else {
+ __ Neg(out, dividend);
+ }
+ }
+}
+
+void InstructionCodeGeneratorARM64::DivRemByPowerOfTwo(HBinaryOperation* instruction) {
+ DCHECK(instruction->IsDiv() || instruction->IsRem());
+
+ LocationSummary* locations = instruction->GetLocations();
+ Location second = locations->InAt(1);
+ DCHECK(second.IsConstant());
+
+ Register out = OutputRegister(instruction);
+ Register dividend = InputRegisterAt(instruction, 0);
+ int64_t imm = Int64FromConstant(second.GetConstant());
+ int64_t abs_imm = std::abs(imm);
+ DCHECK(IsPowerOfTwo(abs_imm));
+ int ctz_imm = CTZ(abs_imm);
+
+ UseScratchRegisterScope temps(GetVIXLAssembler());
+ Register temp = temps.AcquireSameSizeAs(out);
+
+ if (instruction->IsDiv()) {
+ __ Add(temp, dividend, abs_imm - 1);
+ __ Cmp(dividend, 0);
+ __ Csel(out, temp, dividend, lt);
+ if (imm > 0) {
+ __ Asr(out, out, ctz_imm);
+ } else {
+ __ Neg(out, Operand(out, ASR, ctz_imm));
+ }
+ } else {
+ int bits = instruction->GetResultType() == Primitive::kPrimInt ? 32 : 64;
+ __ Asr(temp, dividend, bits - 1);
+ __ Lsr(temp, temp, bits - ctz_imm);
+ __ Add(out, dividend, temp);
+ __ And(out, out, abs_imm - 1);
+ __ Sub(out, out, temp);
+ }
+}
+
+void InstructionCodeGeneratorARM64::GenerateDivRemWithAnyConstant(HBinaryOperation* instruction) {
+ DCHECK(instruction->IsDiv() || instruction->IsRem());
+
+ LocationSummary* locations = instruction->GetLocations();
+ Location second = locations->InAt(1);
+ DCHECK(second.IsConstant());
+
+ Register out = OutputRegister(instruction);
+ Register dividend = InputRegisterAt(instruction, 0);
+ int64_t imm = Int64FromConstant(second.GetConstant());
+
+ Primitive::Type type = instruction->GetResultType();
+ DCHECK(type == Primitive::kPrimInt || type == Primitive::kPrimLong);
+
+ int64_t magic;
+ int shift;
+ CalculateMagicAndShiftForDivRem(imm, type == Primitive::kPrimLong /* is_long */, &magic, &shift);
+
+ UseScratchRegisterScope temps(GetVIXLAssembler());
+ Register temp = temps.AcquireSameSizeAs(out);
+
+ // temp = get_high(dividend * magic)
+ __ Mov(temp, magic);
+ if (type == Primitive::kPrimLong) {
+ __ Smulh(temp, dividend, temp);
+ } else {
+ __ Smull(temp.X(), dividend, temp);
+ __ Lsr(temp.X(), temp.X(), 32);
+ }
+
+ if (imm > 0 && magic < 0) {
+ __ Add(temp, temp, dividend);
+ } else if (imm < 0 && magic > 0) {
+ __ Sub(temp, temp, dividend);
+ }
+
+ if (shift != 0) {
+ __ Asr(temp, temp, shift);
+ }
+
+ if (instruction->IsDiv()) {
+ __ Sub(out, temp, Operand(temp, ASR, type == Primitive::kPrimLong ? 63 : 31));
+ } else {
+ __ Sub(temp, temp, Operand(temp, ASR, type == Primitive::kPrimLong ? 63 : 31));
+ // TODO: Strength reduction for msub.
+ Register temp_imm = temps.AcquireSameSizeAs(out);
+ __ Mov(temp_imm, imm);
+ __ Msub(out, temp, temp_imm, dividend);
+ }
+}
+
+void InstructionCodeGeneratorARM64::GenerateDivRemIntegral(HBinaryOperation* instruction) {
+ DCHECK(instruction->IsDiv() || instruction->IsRem());
+ Primitive::Type type = instruction->GetResultType();
+ DCHECK(type == Primitive::kPrimInt || Primitive::kPrimLong);
+
+ LocationSummary* locations = instruction->GetLocations();
+ Register out = OutputRegister(instruction);
+ Location second = locations->InAt(1);
+
+ if (second.IsConstant()) {
+ int64_t imm = Int64FromConstant(second.GetConstant());
+
+ if (imm == 0) {
+ // Do not generate anything. DivZeroCheck would prevent any code to be executed.
+ } else if (imm == 1 || imm == -1) {
+ DivRemOneOrMinusOne(instruction);
+ } else if (IsPowerOfTwo(std::abs(imm))) {
+ DivRemByPowerOfTwo(instruction);
+ } else {
+ DCHECK(imm <= -2 || imm >= 2);
+ GenerateDivRemWithAnyConstant(instruction);
+ }
+ } else {
+ Register dividend = InputRegisterAt(instruction, 0);
+ Register divisor = InputRegisterAt(instruction, 1);
+ if (instruction->IsDiv()) {
+ __ Sdiv(out, dividend, divisor);
+ } else {
+ UseScratchRegisterScope temps(GetVIXLAssembler());
+ Register temp = temps.AcquireSameSizeAs(out);
+ __ Sdiv(temp, dividend, divisor);
+ __ Msub(out, temp, divisor, dividend);
+ }
+ }
+}
+
void LocationsBuilderARM64::VisitDiv(HDiv* div) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(div, LocationSummary::kNoCall);
@@ -1597,7 +1858,7 @@
case Primitive::kPrimInt:
case Primitive::kPrimLong:
locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(div->InputAt(1)));
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
break;
@@ -1618,7 +1879,7 @@
switch (type) {
case Primitive::kPrimInt:
case Primitive::kPrimLong:
- __ Sdiv(OutputRegister(div), InputRegisterAt(div, 0), InputRegisterAt(div, 1));
+ GenerateDivRemIntegral(div);
break;
case Primitive::kPrimFloat:
@@ -1833,7 +2094,7 @@
}
void InstructionCodeGeneratorARM64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
- HandleFieldSet(instruction, instruction->GetFieldInfo());
+ HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetValueCanBeNull());
}
void LocationsBuilderARM64::VisitInstanceOf(HInstanceOf* instruction) {
@@ -1855,9 +2116,11 @@
vixl::Label done;
// Return 0 if `obj` is null.
- // TODO: Avoid this check if we know `obj` is not null.
- __ Mov(out, 0);
- __ Cbz(obj, &done);
+ // Avoid null check if we know `obj` is not null.
+ if (instruction->MustDoNullCheck()) {
+ __ Mov(out, 0);
+ __ Cbz(obj, &done);
+ }
// Compare the class of `obj` with `cls`.
__ Ldr(out, HeapOperand(obj, mirror::Object::ClassOffset()));
@@ -1905,8 +2168,8 @@
new (GetGraph()->GetArena()) LocationSummary(invoke, LocationSummary::kCall);
locations->AddTemp(LocationFrom(x0));
- InvokeDexCallingConventionVisitor calling_convention_visitor;
- for (size_t i = 0; i < invoke->InputCount(); i++) {
+ InvokeDexCallingConventionVisitorARM64 calling_convention_visitor;
+ for (size_t i = 0; i < invoke->GetNumberOfArguments(); i++) {
HInstruction* input = invoke->InputAt(i);
locations->SetInAt(i, calling_convention_visitor.GetNextLocation(input->GetType()));
}
@@ -1966,6 +2229,10 @@
}
void LocationsBuilderARM64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
+ // When we do not run baseline, explicit clinit checks triggered by static
+ // invokes must have been pruned by art::PrepareForRegisterAllocation.
+ DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
+
IntrinsicLocationsBuilderARM64 intrinsic(GetGraph()->GetArena());
if (intrinsic.TryDispatch(invoke)) {
return;
@@ -1996,26 +2263,40 @@
//
// Currently we implement the app -> app logic, which looks up in the resolve cache.
- // temp = method;
- LoadCurrentMethod(temp);
- if (!invoke->IsRecursive()) {
- // temp = temp->dex_cache_resolved_methods_;
- __ Ldr(temp, HeapOperand(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset()));
- // temp = temp[index_in_cache];
- __ Ldr(temp, HeapOperand(temp, index_in_cache));
- // lr = temp->entry_point_from_quick_compiled_code_;
+ if (invoke->IsStringInit()) {
+ // temp = thread->string_init_entrypoint
+ __ Ldr(temp, HeapOperand(tr, invoke->GetStringInitOffset()));
+ // LR = temp->entry_point_from_quick_compiled_code_;
__ Ldr(lr, HeapOperand(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
kArm64WordSize)));
- // lr();
+ // lr()
__ Blr(lr);
} else {
- __ Bl(&frame_entry_label_);
+ // temp = method;
+ LoadCurrentMethod(temp);
+ if (!invoke->IsRecursive()) {
+ // temp = temp->dex_cache_resolved_methods_;
+ __ Ldr(temp, HeapOperand(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset()));
+ // temp = temp[index_in_cache];
+ __ Ldr(temp, HeapOperand(temp, index_in_cache));
+ // lr = temp->entry_point_from_quick_compiled_code_;
+ __ Ldr(lr, HeapOperand(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ kArm64WordSize)));
+ // lr();
+ __ Blr(lr);
+ } else {
+ __ Bl(&frame_entry_label_);
+ }
}
DCHECK(!IsLeafMethod());
}
void InstructionCodeGeneratorARM64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
+ // When we do not run baseline, explicit clinit checks triggered by static
+ // invokes must have been pruned by art::PrepareForRegisterAllocation.
+ DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
+
if (TryGenerateIntrinsicCode(invoke, codegen_)) {
return;
}
@@ -2421,7 +2702,7 @@
case Primitive::kPrimInt:
case Primitive::kPrimLong:
locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(rem->InputAt(1)));
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
break;
@@ -2446,14 +2727,7 @@
switch (type) {
case Primitive::kPrimInt:
case Primitive::kPrimLong: {
- UseScratchRegisterScope temps(GetVIXLAssembler());
- Register dividend = InputRegisterAt(rem, 0);
- Register divisor = InputRegisterAt(rem, 1);
- Register output = OutputRegister(rem);
- Register temp = temps.AcquireSameSizeAs(output);
-
- __ Sdiv(temp, dividend, divisor);
- __ Msub(output, temp, divisor, dividend);
+ GenerateDivRemIntegral(rem);
break;
}
@@ -2563,7 +2837,7 @@
}
void InstructionCodeGeneratorARM64::VisitStaticFieldSet(HStaticFieldSet* instruction) {
- HandleFieldSet(instruction, instruction->GetFieldInfo());
+ HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetValueCanBeNull());
}
void LocationsBuilderARM64::VisitSuspendCheck(HSuspendCheck* instruction) {
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 5a35867..b56ca10 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -70,6 +70,9 @@
vixl::Label* GetEntryLabel() { return &entry_label_; }
vixl::Label* GetExitLabel() { return &exit_label_; }
+ void SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) OVERRIDE;
+ void RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) OVERRIDE;
+
private:
vixl::Label entry_label_;
vixl::Label exit_label_;
@@ -119,25 +122,20 @@
DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConvention);
};
-class InvokeDexCallingConventionVisitor {
+class InvokeDexCallingConventionVisitorARM64 : public InvokeDexCallingConventionVisitor {
public:
- InvokeDexCallingConventionVisitor() : gp_index_(0), fp_index_(0), stack_index_(0) {}
+ InvokeDexCallingConventionVisitorARM64() {}
+ virtual ~InvokeDexCallingConventionVisitorARM64() {}
- Location GetNextLocation(Primitive::Type type);
+ Location GetNextLocation(Primitive::Type type) OVERRIDE;
Location GetReturnLocation(Primitive::Type return_type) {
return calling_convention.GetReturnLocation(return_type);
}
private:
InvokeDexCallingConvention calling_convention;
- // The current index for core registers.
- uint32_t gp_index_;
- // The current index for floating-point registers.
- uint32_t fp_index_;
- // The current stack index.
- uint32_t stack_index_;
- DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionVisitor);
+ DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionVisitorARM64);
};
class InstructionCodeGeneratorARM64 : public HGraphVisitor {
@@ -159,7 +157,9 @@
void GenerateMemoryBarrier(MemBarrierKind kind);
void GenerateSuspendCheck(HSuspendCheck* instruction, HBasicBlock* successor);
void HandleBinaryOp(HBinaryOperation* instr);
- void HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info);
+ void HandleFieldSet(HInstruction* instruction,
+ const FieldInfo& field_info,
+ bool value_can_be_null);
void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info);
void HandleShift(HBinaryOperation* instr);
void GenerateImplicitNullCheck(HNullCheck* instruction);
@@ -168,6 +168,11 @@
vixl::Label* true_target,
vixl::Label* false_target,
vixl::Label* always_true_target);
+ void DivRemOneOrMinusOne(HBinaryOperation* instruction);
+ void DivRemByPowerOfTwo(HBinaryOperation* instruction);
+ void GenerateDivRemWithAnyConstant(HBinaryOperation* instruction);
+ void GenerateDivRemIntegral(HBinaryOperation* instruction);
+
Arm64Assembler* const assembler_;
CodeGeneratorARM64* const codegen_;
@@ -193,7 +198,7 @@
void HandleShift(HBinaryOperation* instr);
CodeGeneratorARM64* const codegen_;
- InvokeDexCallingConventionVisitor parameter_visitor_;
+ InvokeDexCallingConventionVisitorARM64 parameter_visitor_;
DISALLOW_COPY_AND_ASSIGN(LocationsBuilderARM64);
};
@@ -232,15 +237,8 @@
void GenerateFrameEntry() OVERRIDE;
void GenerateFrameExit() OVERRIDE;
- vixl::CPURegList GetFramePreservedCoreRegisters() const {
- return vixl::CPURegList(vixl::CPURegister::kRegister, vixl::kXRegSize,
- core_spill_mask_);
- }
-
- vixl::CPURegList GetFramePreservedFPRegisters() const {
- return vixl::CPURegList(vixl::CPURegister::kFPRegister, vixl::kDRegSize,
- fpu_spill_mask_);
- }
+ vixl::CPURegList GetFramePreservedCoreRegisters() const;
+ vixl::CPURegList GetFramePreservedFPRegisters() const;
void Bind(HBasicBlock* block) OVERRIDE;
@@ -271,7 +269,7 @@
vixl::MacroAssembler* GetVIXLAssembler() { return GetAssembler()->vixl_masm_; }
// Emit a write barrier.
- void MarkGCCard(vixl::Register object, vixl::Register value);
+ void MarkGCCard(vixl::Register object, vixl::Register value, bool value_can_be_null);
// Register allocation.
@@ -282,10 +280,10 @@
Location GetStackLocation(HLoadLocal* load) const OVERRIDE;
- size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id);
- size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id);
- size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id);
- size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id);
+ size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
+ size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
+ size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
+ size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
// The number of registers that can be allocated. The register allocator may
// decide to reserve and not use a few of them.
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 3dcfca6..a6f01da 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -153,6 +153,10 @@
return &return_label_;
}
+ HBasicBlock* GetSuccessor() const {
+ return successor_;
+ }
+
private:
HSuspendCheck* const instruction_;
HBasicBlock* const successor_;
@@ -174,7 +178,6 @@
SaveLiveRegisters(codegen, locations);
InvokeRuntimeCallingConvention calling_convention;
- x86_codegen->LoadCurrentMethod(calling_convention.GetRegisterAt(1));
__ movl(calling_convention.GetRegisterAt(0), Immediate(instruction_->GetStringIndex()));
__ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pResolveString)));
RecordPcInfo(codegen, instruction_, instruction_->GetDexPc());
@@ -208,7 +211,6 @@
InvokeRuntimeCallingConvention calling_convention;
__ movl(calling_convention.GetRegisterAt(0), Immediate(cls_->GetTypeIndex()));
- x86_codegen->LoadCurrentMethod(calling_convention.GetRegisterAt(1));
__ fs()->call(Address::Absolute(do_clinit_
? QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pInitializeStaticStorage)
: QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pInitializeType)));
@@ -338,11 +340,11 @@
}
void CodeGeneratorX86::DumpCoreRegister(std::ostream& stream, int reg) const {
- stream << X86ManagedRegister::FromCpuRegister(Register(reg));
+ stream << Register(reg);
}
void CodeGeneratorX86::DumpFloatingPointRegister(std::ostream& stream, int reg) const {
- stream << X86ManagedRegister::FromXmmRegister(XmmRegister(reg));
+ stream << XmmRegister(reg);
}
size_t CodeGeneratorX86::SaveCoreRegister(size_t stack_index, uint32_t reg_id) {
@@ -553,7 +555,7 @@
UNREACHABLE();
}
-Location InvokeDexCallingConventionVisitor::GetNextLocation(Primitive::Type type) {
+Location InvokeDexCallingConventionVisitorX86::GetNextLocation(Primitive::Type type) {
switch (type) {
case Primitive::kPrimBoolean:
case Primitive::kPrimByte:
@@ -584,7 +586,7 @@
}
case Primitive::kPrimFloat: {
- uint32_t index = fp_index_++;
+ uint32_t index = float_index_++;
stack_index_++;
if (index < calling_convention.GetNumberOfFpuRegisters()) {
return Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(index));
@@ -594,7 +596,7 @@
}
case Primitive::kPrimDouble: {
- uint32_t index = fp_index_++;
+ uint32_t index = float_index_++;
stack_index_ += 2;
if (index < calling_convention.GetNumberOfFpuRegisters()) {
return Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(index));
@@ -811,7 +813,6 @@
HLoopInformation* info = block->GetLoopInformation();
if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
- codegen_->ClearSpillSlotsFromLoopPhisInStackMap(info->GetSuspendCheck());
GenerateSuspendCheck(info->GetSuspendCheck(), successor);
return;
}
@@ -1196,6 +1197,10 @@
}
void LocationsBuilderX86::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
+ // When we do not run baseline, explicit clinit checks triggered by static
+ // invokes must have been pruned by art::PrepareForRegisterAllocation.
+ DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
+
IntrinsicLocationsBuilderX86 intrinsic(codegen_);
if (intrinsic.TryDispatch(invoke)) {
return;
@@ -1214,6 +1219,10 @@
}
void InstructionCodeGeneratorX86::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
+ // When we do not run baseline, explicit clinit checks triggered by static
+ // invokes must have been pruned by art::PrepareForRegisterAllocation.
+ DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
+
if (TryGenerateIntrinsicCode(invoke, codegen_)) {
return;
}
@@ -1232,8 +1241,8 @@
new (GetGraph()->GetArena()) LocationSummary(invoke, LocationSummary::kCall);
locations->AddTemp(Location::RegisterLocation(EAX));
- InvokeDexCallingConventionVisitor calling_convention_visitor;
- for (size_t i = 0; i < invoke->InputCount(); i++) {
+ InvokeDexCallingConventionVisitorX86 calling_convention_visitor;
+ for (size_t i = 0; i < invoke->GetNumberOfArguments(); i++) {
HInstruction* input = invoke->InputAt(i);
locations->SetInAt(i, calling_convention_visitor.GetNextLocation(input->GetType()));
}
@@ -2734,17 +2743,12 @@
new (GetGraph()->GetArena()) LocationSummary(op, LocationSummary::kNoCall);
switch (op->GetResultType()) {
- case Primitive::kPrimInt: {
- locations->SetInAt(0, Location::RequiresRegister());
- // The shift count needs to be in CL.
- locations->SetInAt(1, Location::ByteRegisterOrConstant(ECX, op->InputAt(1)));
- locations->SetOut(Location::SameAsFirstInput());
- break;
- }
+ case Primitive::kPrimInt:
case Primitive::kPrimLong: {
+ // Can't have Location::Any() and output SameAsFirstInput()
locations->SetInAt(0, Location::RequiresRegister());
- // The shift count needs to be in CL.
- locations->SetInAt(1, Location::RegisterLocation(ECX));
+ // The shift count needs to be in CL or a constant.
+ locations->SetInAt(1, Location::ByteRegisterOrConstant(ECX, op->InputAt(1)));
locations->SetOut(Location::SameAsFirstInput());
break;
}
@@ -2763,6 +2767,7 @@
switch (op->GetResultType()) {
case Primitive::kPrimInt: {
+ DCHECK(first.IsRegister());
Register first_reg = first.AsRegister<Register>();
if (second.IsRegister()) {
Register second_reg = second.AsRegister<Register>();
@@ -2775,7 +2780,11 @@
__ shrl(first_reg, second_reg);
}
} else {
- Immediate imm(second.GetConstant()->AsIntConstant()->GetValue() & kMaxIntShiftValue);
+ int32_t shift = second.GetConstant()->AsIntConstant()->GetValue() & kMaxIntShiftValue;
+ if (shift == 0) {
+ return;
+ }
+ Immediate imm(shift);
if (op->IsShl()) {
__ shll(first_reg, imm);
} else if (op->IsShr()) {
@@ -2787,14 +2796,29 @@
break;
}
case Primitive::kPrimLong: {
- Register second_reg = second.AsRegister<Register>();
- DCHECK_EQ(ECX, second_reg);
- if (op->IsShl()) {
- GenerateShlLong(first, second_reg);
- } else if (op->IsShr()) {
- GenerateShrLong(first, second_reg);
+ if (second.IsRegister()) {
+ Register second_reg = second.AsRegister<Register>();
+ DCHECK_EQ(ECX, second_reg);
+ if (op->IsShl()) {
+ GenerateShlLong(first, second_reg);
+ } else if (op->IsShr()) {
+ GenerateShrLong(first, second_reg);
+ } else {
+ GenerateUShrLong(first, second_reg);
+ }
} else {
- GenerateUShrLong(first, second_reg);
+ // Shift by a constant.
+ int shift = second.GetConstant()->AsIntConstant()->GetValue() & kMaxLongShiftValue;
+ // Nothing to do if the shift is 0, as the input is already the output.
+ if (shift != 0) {
+ if (op->IsShl()) {
+ GenerateShlLong(first, shift);
+ } else if (op->IsShr()) {
+ GenerateShrLong(first, shift);
+ } else {
+ GenerateUShrLong(first, shift);
+ }
+ }
}
break;
}
@@ -2803,6 +2827,34 @@
}
}
+void InstructionCodeGeneratorX86::GenerateShlLong(const Location& loc, int shift) {
+ Register low = loc.AsRegisterPairLow<Register>();
+ Register high = loc.AsRegisterPairHigh<Register>();
+ if (shift == 1) {
+ // This is just an addition.
+ __ addl(low, low);
+ __ adcl(high, high);
+ } else if (shift == 32) {
+ // Shift by 32 is easy. High gets low, and low gets 0.
+ codegen_->EmitParallelMoves(
+ loc.ToLow(),
+ loc.ToHigh(),
+ Primitive::kPrimInt,
+ Location::ConstantLocation(GetGraph()->GetIntConstant(0)),
+ loc.ToLow(),
+ Primitive::kPrimInt);
+ } else if (shift > 32) {
+ // Low part becomes 0. High part is low part << (shift-32).
+ __ movl(high, low);
+ __ shll(high, Immediate(shift - 32));
+ __ xorl(low, low);
+ } else {
+ // Between 1 and 31.
+ __ shld(high, low, Immediate(shift));
+ __ shll(low, Immediate(shift));
+ }
+}
+
void InstructionCodeGeneratorX86::GenerateShlLong(const Location& loc, Register shifter) {
Label done;
__ shld(loc.AsRegisterPairHigh<Register>(), loc.AsRegisterPairLow<Register>(), shifter);
@@ -2814,6 +2866,27 @@
__ Bind(&done);
}
+void InstructionCodeGeneratorX86::GenerateShrLong(const Location& loc, int shift) {
+ Register low = loc.AsRegisterPairLow<Register>();
+ Register high = loc.AsRegisterPairHigh<Register>();
+ if (shift == 32) {
+ // Need to copy the sign.
+ DCHECK_NE(low, high);
+ __ movl(low, high);
+ __ sarl(high, Immediate(31));
+ } else if (shift > 32) {
+ DCHECK_NE(low, high);
+ // High part becomes sign. Low part is shifted by shift - 32.
+ __ movl(low, high);
+ __ sarl(high, Immediate(31));
+ __ sarl(low, Immediate(shift - 32));
+ } else {
+ // Between 1 and 31.
+ __ shrd(low, high, Immediate(shift));
+ __ sarl(high, Immediate(shift));
+ }
+}
+
void InstructionCodeGeneratorX86::GenerateShrLong(const Location& loc, Register shifter) {
Label done;
__ shrd(loc.AsRegisterPairLow<Register>(), loc.AsRegisterPairHigh<Register>(), shifter);
@@ -2825,6 +2898,30 @@
__ Bind(&done);
}
+void InstructionCodeGeneratorX86::GenerateUShrLong(const Location& loc, int shift) {
+ Register low = loc.AsRegisterPairLow<Register>();
+ Register high = loc.AsRegisterPairHigh<Register>();
+ if (shift == 32) {
+ // Shift by 32 is easy. Low gets high, and high gets 0.
+ codegen_->EmitParallelMoves(
+ loc.ToHigh(),
+ loc.ToLow(),
+ Primitive::kPrimInt,
+ Location::ConstantLocation(GetGraph()->GetIntConstant(0)),
+ loc.ToHigh(),
+ Primitive::kPrimInt);
+ } else if (shift > 32) {
+ // Low part is high >> (shift - 32). High part becomes 0.
+ __ movl(low, high);
+ __ shrl(low, Immediate(shift - 32));
+ __ xorl(high, high);
+ } else {
+ // Between 1 and 31.
+ __ shrd(low, high, Immediate(shift));
+ __ shrl(high, Immediate(shift));
+ }
+}
+
void InstructionCodeGeneratorX86::GenerateUShrLong(const Location& loc, Register shifter) {
Label done;
__ shrd(loc.AsRegisterPairLow<Register>(), loc.AsRegisterPairHigh<Register>(), shifter);
@@ -3104,33 +3201,50 @@
// 3) app -> app
//
// Currently we implement the app -> app logic, which looks up in the resolve cache.
- // temp = method;
- LoadCurrentMethod(temp);
- if (!invoke->IsRecursive()) {
- // temp = temp->dex_cache_resolved_methods_;
- __ movl(temp, Address(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value()));
- // temp = temp[index_in_cache]
- __ movl(temp, Address(temp, CodeGenerator::GetCacheOffset(invoke->GetDexMethodIndex())));
+
+ if (invoke->IsStringInit()) {
+ // temp = thread->string_init_entrypoint
+ __ fs()->movl(temp, Address::Absolute(invoke->GetStringInitOffset()));
// (temp + offset_of_quick_compiled_code)()
__ call(Address(
temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86WordSize).Int32Value()));
} else {
- __ call(GetFrameEntryLabel());
+ // temp = method;
+ LoadCurrentMethod(temp);
+ if (!invoke->IsRecursive()) {
+ // temp = temp->dex_cache_resolved_methods_;
+ __ movl(temp, Address(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value()));
+ // temp = temp[index_in_cache]
+ __ movl(temp, Address(temp, CodeGenerator::GetCacheOffset(invoke->GetDexMethodIndex())));
+ // (temp + offset_of_quick_compiled_code)()
+ __ call(Address(temp,
+ mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86WordSize).Int32Value()));
+ } else {
+ __ call(GetFrameEntryLabel());
+ }
}
DCHECK(!IsLeafMethod());
}
-void CodeGeneratorX86::MarkGCCard(Register temp, Register card, Register object, Register value) {
+void CodeGeneratorX86::MarkGCCard(Register temp,
+ Register card,
+ Register object,
+ Register value,
+ bool value_can_be_null) {
Label is_null;
- __ testl(value, value);
- __ j(kEqual, &is_null);
+ if (value_can_be_null) {
+ __ testl(value, value);
+ __ j(kEqual, &is_null);
+ }
__ fs()->movl(card, Address::Absolute(Thread::CardTableOffset<kX86WordSize>().Int32Value()));
__ movl(temp, object);
__ shrl(temp, Immediate(gc::accounting::CardTable::kCardShift));
__ movb(Address(temp, card, TIMES_1, 0),
X86ManagedRegister::FromCpuRegister(card).AsByteRegister());
- __ Bind(&is_null);
+ if (value_can_be_null) {
+ __ Bind(&is_null);
+ }
}
void LocationsBuilderX86::HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info) {
@@ -3275,7 +3389,8 @@
}
void InstructionCodeGeneratorX86::HandleFieldSet(HInstruction* instruction,
- const FieldInfo& field_info) {
+ const FieldInfo& field_info,
+ bool value_can_be_null) {
DCHECK(instruction->IsInstanceFieldSet() || instruction->IsStaticFieldSet());
LocationSummary* locations = instruction->GetLocations();
@@ -3348,7 +3463,7 @@
if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1))) {
Register temp = locations->GetTemp(0).AsRegister<Register>();
Register card = locations->GetTemp(1).AsRegister<Register>();
- codegen_->MarkGCCard(temp, card, base, value.AsRegister<Register>());
+ codegen_->MarkGCCard(temp, card, base, value.AsRegister<Register>(), value_can_be_null);
}
if (is_volatile) {
@@ -3369,7 +3484,7 @@
}
void InstructionCodeGeneratorX86::VisitStaticFieldSet(HStaticFieldSet* instruction) {
- HandleFieldSet(instruction, instruction->GetFieldInfo());
+ HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetValueCanBeNull());
}
void LocationsBuilderX86::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
@@ -3377,7 +3492,7 @@
}
void InstructionCodeGeneratorX86::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
- HandleFieldSet(instruction, instruction->GetFieldInfo());
+ HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetValueCanBeNull());
}
void LocationsBuilderX86::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
@@ -3711,7 +3826,8 @@
if (needs_write_barrier) {
Register temp = locations->GetTemp(0).AsRegister<Register>();
Register card = locations->GetTemp(1).AsRegister<Register>();
- codegen_->MarkGCCard(temp, card, obj, value.AsRegister<Register>());
+ codegen_->MarkGCCard(
+ temp, card, obj, value.AsRegister<Register>(), instruction->GetValueCanBeNull());
}
} else {
DCHECK_EQ(value_type, Primitive::kPrimNot);
@@ -3809,7 +3925,7 @@
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
- locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
if (instruction->HasUses()) {
locations->SetOut(Location::SameAsFirstInput());
}
@@ -3821,16 +3937,38 @@
Location length_loc = locations->InAt(1);
SlowPathCodeX86* slow_path =
new (GetGraph()->GetArena()) BoundsCheckSlowPathX86(instruction, index_loc, length_loc);
- codegen_->AddSlowPath(slow_path);
- Register length = length_loc.AsRegister<Register>();
- if (index_loc.IsConstant()) {
- int32_t value = CodeGenerator::GetInt32ValueOf(index_loc.GetConstant());
- __ cmpl(length, Immediate(value));
+ if (length_loc.IsConstant()) {
+ int32_t length = CodeGenerator::GetInt32ValueOf(length_loc.GetConstant());
+ if (index_loc.IsConstant()) {
+ // BCE will remove the bounds check if we are guarenteed to pass.
+ int32_t index = CodeGenerator::GetInt32ValueOf(index_loc.GetConstant());
+ if (index < 0 || index >= length) {
+ codegen_->AddSlowPath(slow_path);
+ __ jmp(slow_path->GetEntryLabel());
+ } else {
+ // Some optimization after BCE may have generated this, and we should not
+ // generate a bounds check if it is a valid range.
+ }
+ return;
+ }
+
+ // We have to reverse the jump condition because the length is the constant.
+ Register index_reg = index_loc.AsRegister<Register>();
+ __ cmpl(index_reg, Immediate(length));
+ codegen_->AddSlowPath(slow_path);
+ __ j(kAboveEqual, slow_path->GetEntryLabel());
} else {
- __ cmpl(length, index_loc.AsRegister<Register>());
+ Register length = length_loc.AsRegister<Register>();
+ if (index_loc.IsConstant()) {
+ int32_t value = CodeGenerator::GetInt32ValueOf(index_loc.GetConstant());
+ __ cmpl(length, Immediate(value));
+ } else {
+ __ cmpl(length, index_loc.AsRegister<Register>());
+ }
+ codegen_->AddSlowPath(slow_path);
+ __ j(kBelowEqual, slow_path->GetEntryLabel());
}
- __ j(kBelowEqual, slow_path->GetEntryLabel());
}
void LocationsBuilderX86::VisitTemporary(HTemporary* temp) {
@@ -3872,8 +4010,19 @@
void InstructionCodeGeneratorX86::GenerateSuspendCheck(HSuspendCheck* instruction,
HBasicBlock* successor) {
SuspendCheckSlowPathX86* slow_path =
- new (GetGraph()->GetArena()) SuspendCheckSlowPathX86(instruction, successor);
- codegen_->AddSlowPath(slow_path);
+ down_cast<SuspendCheckSlowPathX86*>(instruction->GetSlowPath());
+ if (slow_path == nullptr) {
+ slow_path = new (GetGraph()->GetArena()) SuspendCheckSlowPathX86(instruction, successor);
+ instruction->SetSlowPath(slow_path);
+ codegen_->AddSlowPath(slow_path);
+ if (successor != nullptr) {
+ DCHECK(successor->IsLoopHeader());
+ codegen_->ClearSpillSlotsFromLoopPhisInStackMap(instruction);
+ }
+ } else {
+ DCHECK_EQ(slow_path->GetSuccessor(), successor);
+ }
+
__ fs()->cmpw(Address::Absolute(
Thread::ThreadFlagsOffset<kX86WordSize>().Int32Value()), Immediate(0));
if (successor == nullptr) {
@@ -4250,9 +4399,11 @@
SlowPathCodeX86* slow_path = nullptr;
// Return 0 if `obj` is null.
- // TODO: avoid this check if we know obj is not null.
- __ testl(obj, obj);
- __ j(kEqual, &zero);
+ // Avoid null check if we know obj is not null.
+ if (instruction->MustDoNullCheck()) {
+ __ testl(obj, obj);
+ __ j(kEqual, &zero);
+ }
__ movl(out, Address(obj, class_offset));
// Compare the class of `obj` with `cls`.
if (cls.IsRegister()) {
@@ -4277,8 +4428,12 @@
__ movl(out, Immediate(1));
__ jmp(&done);
}
- __ Bind(&zero);
- __ movl(out, Immediate(0));
+
+ if (instruction->MustDoNullCheck() || instruction->IsClassFinal()) {
+ __ Bind(&zero);
+ __ movl(out, Immediate(0));
+ }
+
if (slow_path != nullptr) {
__ Bind(slow_path->GetExitLabel());
}
@@ -4303,11 +4458,13 @@
instruction, locations->InAt(1), locations->GetTemp(0), instruction->GetDexPc());
codegen_->AddSlowPath(slow_path);
- // TODO: avoid this check if we know obj is not null.
- __ testl(obj, obj);
- __ j(kEqual, slow_path->GetExitLabel());
- __ movl(temp, Address(obj, class_offset));
+ // Avoid null check if we know obj is not null.
+ if (instruction->MustDoNullCheck()) {
+ __ testl(obj, obj);
+ __ j(kEqual, slow_path->GetExitLabel());
+ }
+ __ movl(temp, Address(obj, class_offset));
// Compare the class of `obj` with `cls`.
if (cls.IsRegister()) {
__ cmpl(temp, cls.AsRegister<Register>());
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 8bd3cd3..28766d8 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -75,22 +75,17 @@
DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConvention);
};
-class InvokeDexCallingConventionVisitor {
+class InvokeDexCallingConventionVisitorX86 : public InvokeDexCallingConventionVisitor {
public:
- InvokeDexCallingConventionVisitor() : gp_index_(0), fp_index_(0), stack_index_(0) {}
+ InvokeDexCallingConventionVisitorX86() {}
+ virtual ~InvokeDexCallingConventionVisitorX86() {}
- Location GetNextLocation(Primitive::Type type);
+ Location GetNextLocation(Primitive::Type type) OVERRIDE;
private:
InvokeDexCallingConvention calling_convention;
- // The current index for cpu registers.
- uint32_t gp_index_;
- // The current index for fpu registers.
- uint32_t fp_index_;
- // The current stack index.
- uint32_t stack_index_;
- DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionVisitor);
+ DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionVisitorX86);
};
class ParallelMoveResolverX86 : public ParallelMoveResolverWithSwap {
@@ -137,7 +132,7 @@
void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info);
CodeGeneratorX86* const codegen_;
- InvokeDexCallingConventionVisitor parameter_visitor_;
+ InvokeDexCallingConventionVisitorX86 parameter_visitor_;
DISALLOW_COPY_AND_ASSIGN(LocationsBuilderX86);
};
@@ -171,8 +166,13 @@
void GenerateShlLong(const Location& loc, Register shifter);
void GenerateShrLong(const Location& loc, Register shifter);
void GenerateUShrLong(const Location& loc, Register shifter);
+ void GenerateShlLong(const Location& loc, int shift);
+ void GenerateShrLong(const Location& loc, int shift);
+ void GenerateUShrLong(const Location& loc, int shift);
void GenerateMemoryBarrier(MemBarrierKind kind);
- void HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info);
+ void HandleFieldSet(HInstruction* instruction,
+ const FieldInfo& field_info,
+ bool value_can_be_null);
void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info);
// Push value to FPU stack. `is_fp` specifies whether the value is floating point or not.
// `is_wide` specifies whether it is long/double or not.
@@ -262,7 +262,11 @@
void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Register temp);
// Emit a write barrier.
- void MarkGCCard(Register temp, Register card, Register object, Register value);
+ void MarkGCCard(Register temp,
+ Register card,
+ Register object,
+ Register value,
+ bool value_can_be_null);
void LoadCurrentMethod(Register reg);
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index d8d2ae3..f49c26d 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -99,7 +99,7 @@
if (is_div_) {
__ negq(cpu_reg_);
} else {
- __ movq(cpu_reg_, Immediate(0));
+ __ xorl(cpu_reg_, cpu_reg_);
}
}
__ jmp(GetExitLabel());
@@ -136,6 +136,10 @@
return &return_label_;
}
+ HBasicBlock* GetSuccessor() const {
+ return successor_;
+ }
+
private:
HSuspendCheck* const instruction_;
HBasicBlock* const successor_;
@@ -197,7 +201,6 @@
InvokeRuntimeCallingConvention calling_convention;
__ movl(CpuRegister(calling_convention.GetRegisterAt(0)), Immediate(cls_->GetTypeIndex()));
- x64_codegen->LoadCurrentMethod(CpuRegister(calling_convention.GetRegisterAt(1)));
__ gs()->call(Address::Absolute((do_clinit_
? QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pInitializeStaticStorage)
: QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pInitializeType)) , true));
@@ -244,7 +247,6 @@
SaveLiveRegisters(codegen, locations);
InvokeRuntimeCallingConvention calling_convention;
- x64_codegen->LoadCurrentMethod(CpuRegister(calling_convention.GetRegisterAt(1)));
__ movl(CpuRegister(calling_convention.GetRegisterAt(0)),
Immediate(instruction_->GetStringIndex()));
__ gs()->call(Address::Absolute(
@@ -368,29 +370,37 @@
//
// Currently we implement the app -> app logic, which looks up in the resolve cache.
- // temp = method;
- LoadCurrentMethod(temp);
- if (!invoke->IsRecursive()) {
- // temp = temp->dex_cache_resolved_methods_;
- __ movl(temp, Address(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset().SizeValue()));
- // temp = temp[index_in_cache]
- __ movl(temp, Address(temp, CodeGenerator::GetCacheOffset(invoke->GetDexMethodIndex())));
+ if (invoke->IsStringInit()) {
+ // temp = thread->string_init_entrypoint
+ __ gs()->movl(temp, Address::Absolute(invoke->GetStringInitOffset()));
// (temp + offset_of_quick_compiled_code)()
__ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
kX86_64WordSize).SizeValue()));
} else {
- __ call(&frame_entry_label_);
+ // temp = method;
+ LoadCurrentMethod(temp);
+ if (!invoke->IsRecursive()) {
+ // temp = temp->dex_cache_resolved_methods_;
+ __ movl(temp, Address(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset().SizeValue()));
+ // temp = temp[index_in_cache]
+ __ movl(temp, Address(temp, CodeGenerator::GetCacheOffset(invoke->GetDexMethodIndex())));
+ // (temp + offset_of_quick_compiled_code)()
+ __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ kX86_64WordSize).SizeValue()));
+ } else {
+ __ call(&frame_entry_label_);
+ }
}
DCHECK(!IsLeafMethod());
}
void CodeGeneratorX86_64::DumpCoreRegister(std::ostream& stream, int reg) const {
- stream << X86_64ManagedRegister::FromCpuRegister(Register(reg));
+ stream << Register(reg);
}
void CodeGeneratorX86_64::DumpFloatingPointRegister(std::ostream& stream, int reg) const {
- stream << X86_64ManagedRegister::FromXmmRegister(FloatRegister(reg));
+ stream << FloatRegister(reg);
}
size_t CodeGeneratorX86_64::SaveCoreRegister(size_t stack_index, uint32_t reg_id) {
@@ -665,7 +675,7 @@
DCHECK(constant->IsLongConstant());
value = constant->AsLongConstant()->GetValue();
}
- __ movq(CpuRegister(TMP), Immediate(value));
+ Load64BitValue(CpuRegister(TMP), value);
__ movq(Address(CpuRegister(RSP), destination.GetStackIndex()), CpuRegister(TMP));
} else {
DCHECK(source.IsDoubleStackSlot());
@@ -698,9 +708,9 @@
} else if (const_to_move->IsLongConstant()) {
int64_t value = const_to_move->AsLongConstant()->GetValue();
if (location.IsRegister()) {
- __ movq(location.AsRegister<CpuRegister>(), Immediate(value));
+ Load64BitValue(location.AsRegister<CpuRegister>(), value);
} else if (location.IsDoubleStackSlot()) {
- __ movq(CpuRegister(TMP), Immediate(value));
+ Load64BitValue(CpuRegister(TMP), value);
__ movq(Address(CpuRegister(RSP), location.GetStackIndex()), CpuRegister(TMP));
} else {
DCHECK(location.IsConstant());
@@ -765,7 +775,6 @@
HLoopInformation* info = block->GetLoopInformation();
if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
- codegen_->ClearSpillSlotsFromLoopPhisInStackMap(info->GetSuspendCheck());
GenerateSuspendCheck(info->GetSuspendCheck(), successor);
return;
}
@@ -950,7 +959,7 @@
LocationSummary* locations = comp->GetLocations();
CpuRegister reg = locations->Out().AsRegister<CpuRegister>();
// Clear register: setcc only sets the low byte.
- __ xorq(reg, reg);
+ __ xorl(reg, reg);
Location lhs = locations->InAt(0);
Location rhs = locations->InAt(1);
if (rhs.IsRegister()) {
@@ -1023,14 +1032,14 @@
switch (compare->InputAt(0)->GetType()) {
case Primitive::kPrimLong: {
locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RegisterOrInt32LongConstant(compare->InputAt(1)));
+ locations->SetInAt(1, Location::Any());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
break;
}
case Primitive::kPrimFloat:
case Primitive::kPrimDouble: {
locations->SetInAt(0, Location::RequiresFpuRegister());
- locations->SetInAt(1, Location::RequiresFpuRegister());
+ locations->SetInAt(1, Location::Any());
locations->SetOut(Location::RequiresRegister());
break;
}
@@ -1052,24 +1061,46 @@
CpuRegister left_reg = left.AsRegister<CpuRegister>();
if (right.IsConstant()) {
int64_t value = right.GetConstant()->AsLongConstant()->GetValue();
- DCHECK(IsInt<32>(value));
- if (value == 0) {
- __ testq(left_reg, left_reg);
+ if (IsInt<32>(value)) {
+ if (value == 0) {
+ __ testq(left_reg, left_reg);
+ } else {
+ __ cmpq(left_reg, Immediate(static_cast<int32_t>(value)));
+ }
} else {
- __ cmpq(left_reg, Immediate(static_cast<int32_t>(value)));
+ // Value won't fit in an int.
+ __ cmpq(left_reg, codegen_->LiteralInt64Address(value));
}
+ } else if (right.IsDoubleStackSlot()) {
+ __ cmpq(left_reg, Address(CpuRegister(RSP), right.GetStackIndex()));
} else {
__ cmpq(left_reg, right.AsRegister<CpuRegister>());
}
break;
}
case Primitive::kPrimFloat: {
- __ ucomiss(left.AsFpuRegister<XmmRegister>(), right.AsFpuRegister<XmmRegister>());
+ XmmRegister left_reg = left.AsFpuRegister<XmmRegister>();
+ if (right.IsConstant()) {
+ float value = right.GetConstant()->AsFloatConstant()->GetValue();
+ __ ucomiss(left_reg, codegen_->LiteralFloatAddress(value));
+ } else if (right.IsStackSlot()) {
+ __ ucomiss(left_reg, Address(CpuRegister(RSP), right.GetStackIndex()));
+ } else {
+ __ ucomiss(left_reg, right.AsFpuRegister<XmmRegister>());
+ }
__ j(kUnordered, compare->IsGtBias() ? &greater : &less);
break;
}
case Primitive::kPrimDouble: {
- __ ucomisd(left.AsFpuRegister<XmmRegister>(), right.AsFpuRegister<XmmRegister>());
+ XmmRegister left_reg = left.AsFpuRegister<XmmRegister>();
+ if (right.IsConstant()) {
+ double value = right.GetConstant()->AsDoubleConstant()->GetValue();
+ __ ucomisd(left_reg, codegen_->LiteralDoubleAddress(value));
+ } else if (right.IsDoubleStackSlot()) {
+ __ ucomisd(left_reg, Address(CpuRegister(RSP), right.GetStackIndex()));
+ } else {
+ __ ucomisd(left_reg, right.AsFpuRegister<XmmRegister>());
+ }
__ j(kUnordered, compare->IsGtBias() ? &greater : &less);
break;
}
@@ -1178,8 +1209,7 @@
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
- locations->SetInAt(0,
- Location::FpuRegisterLocation(XMM0));
+ locations->SetInAt(0, Location::FpuRegisterLocation(XMM0));
break;
default:
@@ -1213,7 +1243,7 @@
codegen_->GenerateFrameExit();
}
-Location InvokeDexCallingConventionVisitor::GetNextLocation(Primitive::Type type) {
+Location InvokeDexCallingConventionVisitorX86_64::GetNextLocation(Primitive::Type type) {
switch (type) {
case Primitive::kPrimBoolean:
case Primitive::kPrimByte:
@@ -1243,7 +1273,7 @@
}
case Primitive::kPrimFloat: {
- uint32_t index = fp_index_++;
+ uint32_t index = float_index_++;
stack_index_++;
if (index < calling_convention.GetNumberOfFpuRegisters()) {
return Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(index));
@@ -1253,7 +1283,7 @@
}
case Primitive::kPrimDouble: {
- uint32_t index = fp_index_++;
+ uint32_t index = float_index_++;
stack_index_ += 2;
if (index < calling_convention.GetNumberOfFpuRegisters()) {
return Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(index));
@@ -1270,6 +1300,10 @@
}
void LocationsBuilderX86_64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
+ // When we do not run baseline, explicit clinit checks triggered by static
+ // invokes must have been pruned by art::PrepareForRegisterAllocation.
+ DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
+
IntrinsicLocationsBuilderX86_64 intrinsic(codegen_);
if (intrinsic.TryDispatch(invoke)) {
return;
@@ -1288,6 +1322,10 @@
}
void InstructionCodeGeneratorX86_64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
+ // When we do not run baseline, explicit clinit checks triggered by static
+ // invokes must have been pruned by art::PrepareForRegisterAllocation.
+ DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
+
if (TryGenerateIntrinsicCode(invoke, codegen_)) {
return;
}
@@ -1303,8 +1341,8 @@
new (GetGraph()->GetArena()) LocationSummary(invoke, LocationSummary::kCall);
locations->AddTemp(Location::RegisterLocation(RDI));
- InvokeDexCallingConventionVisitor calling_convention_visitor;
- for (size_t i = 0; i < invoke->InputCount(); i++) {
+ InvokeDexCallingConventionVisitorX86_64 calling_convention_visitor;
+ for (size_t i = 0; i < invoke->GetNumberOfArguments(); i++) {
HInstruction* input = invoke->InputAt(i);
locations->SetInAt(i, calling_convention_visitor.GetNextLocation(input->GetType()));
}
@@ -1384,8 +1422,8 @@
size_t class_offset = mirror::Object::ClassOffset().SizeValue();
// Set the hidden argument.
- __ movq(invoke->GetLocations()->GetTemp(1).AsRegister<CpuRegister>(),
- Immediate(invoke->GetDexMethodIndex()));
+ CpuRegister hidden_reg = invoke->GetLocations()->GetTemp(1).AsRegister<CpuRegister>();
+ codegen_->Load64BitValue(hidden_reg, invoke->GetDexMethodIndex());
// temp = object->GetClass();
if (receiver.IsStackSlot()) {
@@ -1419,7 +1457,6 @@
case Primitive::kPrimDouble:
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::SameAsFirstInput());
- locations->AddTemp(Location::RequiresRegister());
locations->AddTemp(Location::RequiresFpuRegister());
break;
@@ -1447,26 +1484,22 @@
case Primitive::kPrimFloat: {
DCHECK(in.Equals(out));
- CpuRegister constant = locations->GetTemp(0).AsRegister<CpuRegister>();
- XmmRegister mask = locations->GetTemp(1).AsFpuRegister<XmmRegister>();
+ XmmRegister mask = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
// Implement float negation with an exclusive or with value
// 0x80000000 (mask for bit 31, representing the sign of a
// single-precision floating-point number).
- __ movq(constant, Immediate(INT64_C(0x80000000)));
- __ movd(mask, constant);
+ __ movss(mask, codegen_->LiteralInt32Address(0x80000000));
__ xorps(out.AsFpuRegister<XmmRegister>(), mask);
break;
}
case Primitive::kPrimDouble: {
DCHECK(in.Equals(out));
- CpuRegister constant = locations->GetTemp(0).AsRegister<CpuRegister>();
- XmmRegister mask = locations->GetTemp(1).AsFpuRegister<XmmRegister>();
+ XmmRegister mask = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
// Implement double negation with an exclusive or with value
// 0x8000000000000000 (mask for bit 63, representing the sign of
// a double-precision floating-point number).
- __ movq(constant, Immediate(INT64_C(0x8000000000000000)));
- __ movd(mask, constant);
+ __ movsd(mask, codegen_->LiteralInt64Address(INT64_C(0x8000000000000000)));
__ xorpd(out.AsFpuRegister<XmmRegister>(), mask);
break;
}
@@ -1613,19 +1646,19 @@
case Primitive::kPrimInt:
case Primitive::kPrimChar:
// Processing a Dex `int-to-float' instruction.
- locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(0, Location::Any());
locations->SetOut(Location::RequiresFpuRegister());
break;
case Primitive::kPrimLong:
// Processing a Dex `long-to-float' instruction.
- locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(0, Location::Any());
locations->SetOut(Location::RequiresFpuRegister());
break;
case Primitive::kPrimDouble:
// Processing a Dex `double-to-float' instruction.
- locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetInAt(0, Location::Any());
locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
break;
@@ -1644,19 +1677,19 @@
case Primitive::kPrimInt:
case Primitive::kPrimChar:
// Processing a Dex `int-to-double' instruction.
- locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(0, Location::Any());
locations->SetOut(Location::RequiresFpuRegister());
break;
case Primitive::kPrimLong:
// Processing a Dex `long-to-double' instruction.
- locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(0, Location::Any());
locations->SetOut(Location::RequiresFpuRegister());
break;
case Primitive::kPrimFloat:
// Processing a Dex `float-to-double' instruction.
- locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetInAt(0, Location::Any());
locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
break;
@@ -1826,7 +1859,7 @@
XmmRegister temp = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
Label done, nan;
- __ movq(output, Immediate(kPrimLongMax));
+ codegen_->Load64BitValue(output, kPrimLongMax);
// temp = long-to-float(output)
__ cvtsi2ss(temp, output, true);
// if input >= temp goto done
@@ -1839,7 +1872,7 @@
__ jmp(&done);
__ Bind(&nan);
// output = 0
- __ xorq(output, output);
+ __ xorl(output, output);
__ Bind(&done);
break;
}
@@ -1851,7 +1884,7 @@
XmmRegister temp = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
Label done, nan;
- __ movq(output, Immediate(kPrimLongMax));
+ codegen_->Load64BitValue(output, kPrimLongMax);
// temp = long-to-double(output)
__ cvtsi2sd(temp, output, true);
// if input >= temp goto done
@@ -1864,7 +1897,7 @@
__ jmp(&done);
__ Bind(&nan);
// output = 0
- __ xorq(output, output);
+ __ xorl(output, output);
__ Bind(&done);
break;
}
@@ -1910,17 +1943,56 @@
case Primitive::kPrimInt:
case Primitive::kPrimChar:
// Processing a Dex `int-to-float' instruction.
- __ cvtsi2ss(out.AsFpuRegister<XmmRegister>(), in.AsRegister<CpuRegister>(), false);
+ if (in.IsRegister()) {
+ __ cvtsi2ss(out.AsFpuRegister<XmmRegister>(), in.AsRegister<CpuRegister>(), false);
+ } else if (in.IsConstant()) {
+ int32_t v = in.GetConstant()->AsIntConstant()->GetValue();
+ XmmRegister dest = out.AsFpuRegister<XmmRegister>();
+ if (v == 0) {
+ __ xorps(dest, dest);
+ } else {
+ __ movss(dest, codegen_->LiteralFloatAddress(static_cast<float>(v)));
+ }
+ } else {
+ __ cvtsi2ss(out.AsFpuRegister<XmmRegister>(),
+ Address(CpuRegister(RSP), in.GetStackIndex()), false);
+ }
break;
case Primitive::kPrimLong:
// Processing a Dex `long-to-float' instruction.
- __ cvtsi2ss(out.AsFpuRegister<XmmRegister>(), in.AsRegister<CpuRegister>(), true);
+ if (in.IsRegister()) {
+ __ cvtsi2ss(out.AsFpuRegister<XmmRegister>(), in.AsRegister<CpuRegister>(), true);
+ } else if (in.IsConstant()) {
+ int64_t v = in.GetConstant()->AsLongConstant()->GetValue();
+ XmmRegister dest = out.AsFpuRegister<XmmRegister>();
+ if (v == 0) {
+ __ xorps(dest, dest);
+ } else {
+ __ movss(dest, codegen_->LiteralFloatAddress(static_cast<float>(v)));
+ }
+ } else {
+ __ cvtsi2ss(out.AsFpuRegister<XmmRegister>(),
+ Address(CpuRegister(RSP), in.GetStackIndex()), true);
+ }
break;
case Primitive::kPrimDouble:
// Processing a Dex `double-to-float' instruction.
- __ cvtsd2ss(out.AsFpuRegister<XmmRegister>(), in.AsFpuRegister<XmmRegister>());
+ if (in.IsFpuRegister()) {
+ __ cvtsd2ss(out.AsFpuRegister<XmmRegister>(), in.AsFpuRegister<XmmRegister>());
+ } else if (in.IsConstant()) {
+ double v = in.GetConstant()->AsDoubleConstant()->GetValue();
+ XmmRegister dest = out.AsFpuRegister<XmmRegister>();
+ if (bit_cast<int64_t, double>(v) == 0) {
+ __ xorps(dest, dest);
+ } else {
+ __ movss(dest, codegen_->LiteralFloatAddress(static_cast<float>(v)));
+ }
+ } else {
+ __ cvtsd2ss(out.AsFpuRegister<XmmRegister>(),
+ Address(CpuRegister(RSP), in.GetStackIndex()));
+ }
break;
default:
@@ -1938,17 +2010,56 @@
case Primitive::kPrimInt:
case Primitive::kPrimChar:
// Processing a Dex `int-to-double' instruction.
- __ cvtsi2sd(out.AsFpuRegister<XmmRegister>(), in.AsRegister<CpuRegister>(), false);
+ if (in.IsRegister()) {
+ __ cvtsi2sd(out.AsFpuRegister<XmmRegister>(), in.AsRegister<CpuRegister>(), false);
+ } else if (in.IsConstant()) {
+ int32_t v = in.GetConstant()->AsIntConstant()->GetValue();
+ XmmRegister dest = out.AsFpuRegister<XmmRegister>();
+ if (v == 0) {
+ __ xorpd(dest, dest);
+ } else {
+ __ movsd(dest, codegen_->LiteralDoubleAddress(static_cast<double>(v)));
+ }
+ } else {
+ __ cvtsi2sd(out.AsFpuRegister<XmmRegister>(),
+ Address(CpuRegister(RSP), in.GetStackIndex()), false);
+ }
break;
case Primitive::kPrimLong:
// Processing a Dex `long-to-double' instruction.
- __ cvtsi2sd(out.AsFpuRegister<XmmRegister>(), in.AsRegister<CpuRegister>(), true);
+ if (in.IsRegister()) {
+ __ cvtsi2sd(out.AsFpuRegister<XmmRegister>(), in.AsRegister<CpuRegister>(), true);
+ } else if (in.IsConstant()) {
+ int64_t v = in.GetConstant()->AsLongConstant()->GetValue();
+ XmmRegister dest = out.AsFpuRegister<XmmRegister>();
+ if (v == 0) {
+ __ xorpd(dest, dest);
+ } else {
+ __ movsd(dest, codegen_->LiteralDoubleAddress(static_cast<double>(v)));
+ }
+ } else {
+ __ cvtsi2sd(out.AsFpuRegister<XmmRegister>(),
+ Address(CpuRegister(RSP), in.GetStackIndex()), true);
+ }
break;
case Primitive::kPrimFloat:
// Processing a Dex `float-to-double' instruction.
- __ cvtss2sd(out.AsFpuRegister<XmmRegister>(), in.AsFpuRegister<XmmRegister>());
+ if (in.IsFpuRegister()) {
+ __ cvtss2sd(out.AsFpuRegister<XmmRegister>(), in.AsFpuRegister<XmmRegister>());
+ } else if (in.IsConstant()) {
+ float v = in.GetConstant()->AsFloatConstant()->GetValue();
+ XmmRegister dest = out.AsFpuRegister<XmmRegister>();
+ if (bit_cast<int32_t, float>(v) == 0) {
+ __ xorpd(dest, dest);
+ } else {
+ __ movsd(dest, codegen_->LiteralDoubleAddress(static_cast<double>(v)));
+ }
+ } else {
+ __ cvtss2sd(out.AsFpuRegister<XmmRegister>(),
+ Address(CpuRegister(RSP), in.GetStackIndex()));
+ }
break;
default:
@@ -2375,7 +2486,7 @@
case Primitive::kPrimLong: {
if (instruction->IsRem()) {
- __ xorq(output_register, output_register);
+ __ xorl(output_register, output_register);
} else {
__ movq(output_register, input_register);
if (imm == -1) {
@@ -2419,7 +2530,7 @@
DCHECK_EQ(instruction->GetResultType(), Primitive::kPrimLong);
CpuRegister rdx = locations->GetTemp(0).AsRegister<CpuRegister>();
- __ movq(rdx, Immediate(std::abs(imm) - 1));
+ codegen_->Load64BitValue(rdx, std::abs(imm) - 1);
__ addq(rdx, numerator);
__ testq(numerator, numerator);
__ cmov(kGreaterEqual, rdx, numerator);
@@ -2516,7 +2627,7 @@
__ movq(numerator, rax);
// RAX = magic
- __ movq(rax, Immediate(magic));
+ codegen_->Load64BitValue(rax, magic);
// RDX:RAX = magic * numerator
__ imulq(numerator);
@@ -2545,8 +2656,7 @@
if (IsInt<32>(imm)) {
__ imulq(rdx, Immediate(static_cast<int32_t>(imm)));
} else {
- __ movq(numerator, Immediate(imm));
- __ imulq(rdx, numerator);
+ __ imulq(rdx, codegen_->LiteralInt64Address(imm));
}
__ subq(rax, rdx);
@@ -2912,8 +3022,8 @@
void InstructionCodeGeneratorX86_64::VisitNewInstance(HNewInstance* instruction) {
InvokeRuntimeCallingConvention calling_convention;
codegen_->LoadCurrentMethod(CpuRegister(calling_convention.GetRegisterAt(1)));
- __ movq(CpuRegister(calling_convention.GetRegisterAt(0)), Immediate(instruction->GetTypeIndex()));
-
+ codegen_->Load64BitValue(CpuRegister(calling_convention.GetRegisterAt(0)),
+ instruction->GetTypeIndex());
__ gs()->call(
Address::Absolute(GetThreadOffset<kX86_64WordSize>(instruction->GetEntrypoint()), true));
@@ -2934,7 +3044,8 @@
void InstructionCodeGeneratorX86_64::VisitNewArray(HNewArray* instruction) {
InvokeRuntimeCallingConvention calling_convention;
codegen_->LoadCurrentMethod(CpuRegister(calling_convention.GetRegisterAt(2)));
- __ movq(CpuRegister(calling_convention.GetRegisterAt(0)), Immediate(instruction->GetTypeIndex()));
+ codegen_->Load64BitValue(CpuRegister(calling_convention.GetRegisterAt(0)),
+ instruction->GetTypeIndex());
__ gs()->call(
Address::Absolute(GetThreadOffset<kX86_64WordSize>(instruction->GetEntrypoint()), true));
@@ -3128,7 +3239,7 @@
if (Primitive::IsFloatingPointType(instruction->InputAt(1)->GetType())) {
locations->SetInAt(1, Location::RequiresFpuRegister());
} else {
- locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrInt32LongConstant(instruction->InputAt(1)));
}
if (needs_write_barrier) {
// Temporary registers for the write barrier.
@@ -3138,7 +3249,8 @@
}
void InstructionCodeGeneratorX86_64::HandleFieldSet(HInstruction* instruction,
- const FieldInfo& field_info) {
+ const FieldInfo& field_info,
+ bool value_can_be_null) {
DCHECK(instruction->IsInstanceFieldSet() || instruction->IsStaticFieldSet());
LocationSummary* locations = instruction->GetLocations();
@@ -3155,24 +3267,46 @@
switch (field_type) {
case Primitive::kPrimBoolean:
case Primitive::kPrimByte: {
- __ movb(Address(base, offset), value.AsRegister<CpuRegister>());
+ if (value.IsConstant()) {
+ int32_t v = CodeGenerator::GetInt32ValueOf(value.GetConstant());
+ __ movb(Address(base, offset), Immediate(v));
+ } else {
+ __ movb(Address(base, offset), value.AsRegister<CpuRegister>());
+ }
break;
}
case Primitive::kPrimShort:
case Primitive::kPrimChar: {
- __ movw(Address(base, offset), value.AsRegister<CpuRegister>());
+ if (value.IsConstant()) {
+ int32_t v = CodeGenerator::GetInt32ValueOf(value.GetConstant());
+ __ movw(Address(base, offset), Immediate(v));
+ } else {
+ __ movw(Address(base, offset), value.AsRegister<CpuRegister>());
+ }
break;
}
case Primitive::kPrimInt:
case Primitive::kPrimNot: {
- __ movl(Address(base, offset), value.AsRegister<CpuRegister>());
+ if (value.IsConstant()) {
+ int32_t v = CodeGenerator::GetInt32ValueOf(value.GetConstant());
+ __ movw(Address(base, offset), Immediate(v));
+ } else {
+ __ movl(Address(base, offset), value.AsRegister<CpuRegister>());
+ }
break;
}
case Primitive::kPrimLong: {
- __ movq(Address(base, offset), value.AsRegister<CpuRegister>());
+ if (value.IsConstant()) {
+ int64_t v = value.GetConstant()->AsLongConstant()->GetValue();
+ DCHECK(IsInt<32>(v));
+ int32_t v_32 = v;
+ __ movq(Address(base, offset), Immediate(v_32));
+ } else {
+ __ movq(Address(base, offset), value.AsRegister<CpuRegister>());
+ }
break;
}
@@ -3196,7 +3330,7 @@
if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1))) {
CpuRegister temp = locations->GetTemp(0).AsRegister<CpuRegister>();
CpuRegister card = locations->GetTemp(1).AsRegister<CpuRegister>();
- codegen_->MarkGCCard(temp, card, base, value.AsRegister<CpuRegister>());
+ codegen_->MarkGCCard(temp, card, base, value.AsRegister<CpuRegister>(), value_can_be_null);
}
if (is_volatile) {
@@ -3209,7 +3343,7 @@
}
void InstructionCodeGeneratorX86_64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
- HandleFieldSet(instruction, instruction->GetFieldInfo());
+ HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetValueCanBeNull());
}
void LocationsBuilderX86_64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
@@ -3233,7 +3367,7 @@
}
void InstructionCodeGeneratorX86_64::VisitStaticFieldSet(HStaticFieldSet* instruction) {
- HandleFieldSet(instruction, instruction->GetFieldInfo());
+ HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetValueCanBeNull());
}
void LocationsBuilderX86_64::VisitNullCheck(HNullCheck* instruction) {
@@ -3291,8 +3425,7 @@
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(
- 1, Location::RegisterOrConstant(instruction->InputAt(1)));
+ locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
if (Primitive::IsFloatingPointType(instruction->GetType())) {
locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
} else {
@@ -3431,7 +3564,7 @@
1, Location::RegisterOrConstant(instruction->InputAt(1)));
locations->SetInAt(2, Location::RequiresRegister());
if (value_type == Primitive::kPrimLong) {
- locations->SetInAt(2, Location::RequiresRegister());
+ locations->SetInAt(2, Location::RegisterOrInt32LongConstant(instruction->InputAt(2)));
} else if (value_type == Primitive::kPrimFloat || value_type == Primitive::kPrimDouble) {
locations->SetInAt(2, Location::RequiresFpuRegister());
} else {
@@ -3519,8 +3652,8 @@
__ movl(Address(obj, offset), value.AsRegister<CpuRegister>());
} else {
DCHECK(value.IsConstant()) << value;
- __ movl(Address(obj, offset),
- Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
+ int32_t v = CodeGenerator::GetInt32ValueOf(value.GetConstant());
+ __ movl(Address(obj, offset), Immediate(v));
}
} else {
DCHECK(index.IsRegister()) << index;
@@ -3529,8 +3662,9 @@
value.AsRegister<CpuRegister>());
} else {
DCHECK(value.IsConstant()) << value;
+ int32_t v = CodeGenerator::GetInt32ValueOf(value.GetConstant());
__ movl(Address(obj, index.AsRegister<CpuRegister>(), TIMES_4, data_offset),
- Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
+ Immediate(v));
}
}
codegen_->MaybeRecordImplicitNullCheck(instruction);
@@ -3538,7 +3672,8 @@
DCHECK_EQ(value_type, Primitive::kPrimNot);
CpuRegister temp = locations->GetTemp(0).AsRegister<CpuRegister>();
CpuRegister card = locations->GetTemp(1).AsRegister<CpuRegister>();
- codegen_->MarkGCCard(temp, card, obj, value.AsRegister<CpuRegister>());
+ codegen_->MarkGCCard(
+ temp, card, obj, value.AsRegister<CpuRegister>(), instruction->GetValueCanBeNull());
}
} else {
DCHECK_EQ(value_type, Primitive::kPrimNot);
@@ -3554,12 +3689,25 @@
uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
if (index.IsConstant()) {
size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
- DCHECK(value.IsRegister());
- __ movq(Address(obj, offset), value.AsRegister<CpuRegister>());
+ if (value.IsRegister()) {
+ __ movq(Address(obj, offset), value.AsRegister<CpuRegister>());
+ } else {
+ int64_t v = value.GetConstant()->AsLongConstant()->GetValue();
+ DCHECK(IsInt<32>(v));
+ int32_t v_32 = v;
+ __ movq(Address(obj, offset), Immediate(v_32));
+ }
} else {
- DCHECK(value.IsRegister());
- __ movq(Address(obj, index.AsRegister<CpuRegister>(), TIMES_8, data_offset),
- value.AsRegister<CpuRegister>());
+ if (value.IsRegister()) {
+ __ movq(Address(obj, index.AsRegister<CpuRegister>(), TIMES_8, data_offset),
+ value.AsRegister<CpuRegister>());
+ } else {
+ int64_t v = value.GetConstant()->AsLongConstant()->GetValue();
+ DCHECK(IsInt<32>(v));
+ int32_t v_32 = v;
+ __ movq(Address(obj, index.AsRegister<CpuRegister>(), TIMES_8, data_offset),
+ Immediate(v_32));
+ }
}
codegen_->MaybeRecordImplicitNullCheck(instruction);
break;
@@ -3621,7 +3769,7 @@
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
- locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
if (instruction->HasUses()) {
locations->SetOut(Location::SameAsFirstInput());
}
@@ -3633,31 +3781,58 @@
Location length_loc = locations->InAt(1);
SlowPathCodeX86_64* slow_path =
new (GetGraph()->GetArena()) BoundsCheckSlowPathX86_64(instruction, index_loc, length_loc);
- codegen_->AddSlowPath(slow_path);
- CpuRegister length = length_loc.AsRegister<CpuRegister>();
- if (index_loc.IsConstant()) {
- int32_t value = CodeGenerator::GetInt32ValueOf(index_loc.GetConstant());
- __ cmpl(length, Immediate(value));
+ if (length_loc.IsConstant()) {
+ int32_t length = CodeGenerator::GetInt32ValueOf(length_loc.GetConstant());
+ if (index_loc.IsConstant()) {
+ // BCE will remove the bounds check if we are guarenteed to pass.
+ int32_t index = CodeGenerator::GetInt32ValueOf(index_loc.GetConstant());
+ if (index < 0 || index >= length) {
+ codegen_->AddSlowPath(slow_path);
+ __ jmp(slow_path->GetEntryLabel());
+ } else {
+ // Some optimization after BCE may have generated this, and we should not
+ // generate a bounds check if it is a valid range.
+ }
+ return;
+ }
+
+ // We have to reverse the jump condition because the length is the constant.
+ CpuRegister index_reg = index_loc.AsRegister<CpuRegister>();
+ __ cmpl(index_reg, Immediate(length));
+ codegen_->AddSlowPath(slow_path);
+ __ j(kAboveEqual, slow_path->GetEntryLabel());
} else {
- __ cmpl(length, index_loc.AsRegister<CpuRegister>());
+ CpuRegister length = length_loc.AsRegister<CpuRegister>();
+ if (index_loc.IsConstant()) {
+ int32_t value = CodeGenerator::GetInt32ValueOf(index_loc.GetConstant());
+ __ cmpl(length, Immediate(value));
+ } else {
+ __ cmpl(length, index_loc.AsRegister<CpuRegister>());
+ }
+ codegen_->AddSlowPath(slow_path);
+ __ j(kBelowEqual, slow_path->GetEntryLabel());
}
- __ j(kBelowEqual, slow_path->GetEntryLabel());
}
void CodeGeneratorX86_64::MarkGCCard(CpuRegister temp,
CpuRegister card,
CpuRegister object,
- CpuRegister value) {
+ CpuRegister value,
+ bool value_can_be_null) {
Label is_null;
- __ testl(value, value);
- __ j(kEqual, &is_null);
+ if (value_can_be_null) {
+ __ testl(value, value);
+ __ j(kEqual, &is_null);
+ }
__ gs()->movq(card, Address::Absolute(
Thread::CardTableOffset<kX86_64WordSize>().Int32Value(), true));
__ movq(temp, object);
__ shrq(temp, Immediate(gc::accounting::CardTable::kCardShift));
__ movb(Address(temp, card, TIMES_1, 0), card);
- __ Bind(&is_null);
+ if (value_can_be_null) {
+ __ Bind(&is_null);
+ }
}
void LocationsBuilderX86_64::VisitTemporary(HTemporary* temp) {
@@ -3699,8 +3874,19 @@
void InstructionCodeGeneratorX86_64::GenerateSuspendCheck(HSuspendCheck* instruction,
HBasicBlock* successor) {
SuspendCheckSlowPathX86_64* slow_path =
- new (GetGraph()->GetArena()) SuspendCheckSlowPathX86_64(instruction, successor);
- codegen_->AddSlowPath(slow_path);
+ down_cast<SuspendCheckSlowPathX86_64*>(instruction->GetSlowPath());
+ if (slow_path == nullptr) {
+ slow_path = new (GetGraph()->GetArena()) SuspendCheckSlowPathX86_64(instruction, successor);
+ instruction->SetSlowPath(slow_path);
+ codegen_->AddSlowPath(slow_path);
+ if (successor != nullptr) {
+ DCHECK(successor->IsLoopHeader());
+ codegen_->ClearSpillSlotsFromLoopPhisInStackMap(instruction);
+ }
+ } else {
+ DCHECK_EQ(slow_path->GetSuccessor(), successor);
+ }
+
__ gs()->cmpw(Address::Absolute(
Thread::ThreadFlagsOffset<kX86_64WordSize>().Int32Value(), true), Immediate(0));
if (successor == nullptr) {
@@ -3773,45 +3959,42 @@
} else if (constant->IsLongConstant()) {
int64_t value = constant->AsLongConstant()->GetValue();
if (destination.IsRegister()) {
- __ movq(destination.AsRegister<CpuRegister>(), Immediate(value));
+ codegen_->Load64BitValue(destination.AsRegister<CpuRegister>(), value);
} else {
DCHECK(destination.IsDoubleStackSlot()) << destination;
- __ movq(CpuRegister(TMP), Immediate(value));
+ codegen_->Load64BitValue(CpuRegister(TMP), value);
__ movq(Address(CpuRegister(RSP), destination.GetStackIndex()), CpuRegister(TMP));
}
} else if (constant->IsFloatConstant()) {
float fp_value = constant->AsFloatConstant()->GetValue();
int32_t value = bit_cast<int32_t, float>(fp_value);
- Immediate imm(value);
if (destination.IsFpuRegister()) {
XmmRegister dest = destination.AsFpuRegister<XmmRegister>();
if (value == 0) {
// easy FP 0.0.
__ xorps(dest, dest);
} else {
- __ movl(CpuRegister(TMP), imm);
- __ movd(dest, CpuRegister(TMP));
+ __ movss(dest, codegen_->LiteralFloatAddress(fp_value));
}
} else {
DCHECK(destination.IsStackSlot()) << destination;
+ Immediate imm(value);
__ movl(Address(CpuRegister(RSP), destination.GetStackIndex()), imm);
}
} else {
DCHECK(constant->IsDoubleConstant()) << constant->DebugName();
double fp_value = constant->AsDoubleConstant()->GetValue();
int64_t value = bit_cast<int64_t, double>(fp_value);
- Immediate imm(value);
if (destination.IsFpuRegister()) {
XmmRegister dest = destination.AsFpuRegister<XmmRegister>();
if (value == 0) {
__ xorpd(dest, dest);
} else {
- __ movq(CpuRegister(TMP), imm);
- __ movd(dest, CpuRegister(TMP));
+ __ movsd(dest, codegen_->LiteralDoubleAddress(fp_value));
}
} else {
DCHECK(destination.IsDoubleStackSlot()) << destination;
- __ movq(CpuRegister(TMP), imm);
+ codegen_->Load64BitValue(CpuRegister(TMP), value);
__ movq(Address(CpuRegister(RSP), destination.GetStackIndex()), CpuRegister(TMP));
}
}
@@ -4052,9 +4235,11 @@
SlowPathCodeX86_64* slow_path = nullptr;
// Return 0 if `obj` is null.
- // TODO: avoid this check if we know obj is not null.
- __ testl(obj, obj);
- __ j(kEqual, &zero);
+ // Avoid null check if we know obj is not null.
+ if (instruction->MustDoNullCheck()) {
+ __ testl(obj, obj);
+ __ j(kEqual, &zero);
+ }
// Compare the class of `obj` with `cls`.
__ movl(out, Address(obj, class_offset));
if (cls.IsRegister()) {
@@ -4078,8 +4263,12 @@
__ movl(out, Immediate(1));
__ jmp(&done);
}
- __ Bind(&zero);
- __ movl(out, Immediate(0));
+
+ if (instruction->MustDoNullCheck() || instruction->IsClassFinal()) {
+ __ Bind(&zero);
+ __ movl(out, Immediate(0));
+ }
+
if (slow_path != nullptr) {
__ Bind(slow_path->GetExitLabel());
}
@@ -4104,9 +4293,11 @@
instruction, locations->InAt(1), locations->GetTemp(0), instruction->GetDexPc());
codegen_->AddSlowPath(slow_path);
- // TODO: avoid this check if we know obj is not null.
- __ testl(obj, obj);
- __ j(kEqual, slow_path->GetExitLabel());
+ // Avoid null check if we know obj is not null.
+ if (instruction->MustDoNullCheck()) {
+ __ testl(obj, obj);
+ __ j(kEqual, slow_path->GetExitLabel());
+ }
// Compare the class of `obj` with `cls`.
__ movl(temp, Address(obj, class_offset));
if (cls.IsRegister()) {
@@ -4145,13 +4336,7 @@
DCHECK(instruction->GetResultType() == Primitive::kPrimInt
|| instruction->GetResultType() == Primitive::kPrimLong);
locations->SetInAt(0, Location::RequiresRegister());
- if (instruction->GetType() == Primitive::kPrimInt) {
- locations->SetInAt(1, Location::Any());
- } else {
- // We can handle 32 bit constants.
- locations->SetInAt(1, Location::RequiresRegister());
- locations->SetInAt(1, Location::RegisterOrInt32LongConstant(instruction->InputAt(1)));
- }
+ locations->SetInAt(1, Location::Any());
locations->SetOut(Location::SameAsFirstInput());
}
@@ -4212,25 +4397,43 @@
if (second.IsConstant()) {
second_is_constant = true;
value = second.GetConstant()->AsLongConstant()->GetValue();
- DCHECK(IsInt<32>(value));
}
+ bool is_int32_value = IsInt<32>(value);
if (instruction->IsAnd()) {
if (second_is_constant) {
- __ andq(first_reg, Immediate(static_cast<int32_t>(value)));
+ if (is_int32_value) {
+ __ andq(first_reg, Immediate(static_cast<int32_t>(value)));
+ } else {
+ __ andq(first_reg, codegen_->LiteralInt64Address(value));
+ }
+ } else if (second.IsDoubleStackSlot()) {
+ __ andq(first_reg, Address(CpuRegister(RSP), second.GetStackIndex()));
} else {
__ andq(first_reg, second.AsRegister<CpuRegister>());
}
} else if (instruction->IsOr()) {
if (second_is_constant) {
- __ orq(first_reg, Immediate(static_cast<int32_t>(value)));
+ if (is_int32_value) {
+ __ orq(first_reg, Immediate(static_cast<int32_t>(value)));
+ } else {
+ __ orq(first_reg, codegen_->LiteralInt64Address(value));
+ }
+ } else if (second.IsDoubleStackSlot()) {
+ __ orq(first_reg, Address(CpuRegister(RSP), second.GetStackIndex()));
} else {
__ orq(first_reg, second.AsRegister<CpuRegister>());
}
} else {
DCHECK(instruction->IsXor());
if (second_is_constant) {
- __ xorq(first_reg, Immediate(static_cast<int32_t>(value)));
+ if (is_int32_value) {
+ __ xorq(first_reg, Immediate(static_cast<int32_t>(value)));
+ } else {
+ __ xorq(first_reg, codegen_->LiteralInt64Address(value));
+ }
+ } else if (second.IsDoubleStackSlot()) {
+ __ xorq(first_reg, Address(CpuRegister(RSP), second.GetStackIndex()));
} else {
__ xorq(first_reg, second.AsRegister<CpuRegister>());
}
@@ -4250,6 +4453,17 @@
LOG(FATAL) << "Unreachable";
}
+void CodeGeneratorX86_64::Load64BitValue(CpuRegister dest, int64_t value) {
+ if (value == 0) {
+ __ xorl(dest, dest);
+ } else if (value > 0 && IsInt<32>(value)) {
+ // We can use a 32 bit move, as it will zero-extend and is one byte shorter.
+ __ movl(dest, Immediate(static_cast<int32_t>(value)));
+ } else {
+ __ movq(dest, Immediate(value));
+ }
+}
+
void CodeGeneratorX86_64::Finalize(CodeAllocator* allocator) {
// Generate the constant area if needed.
X86_64Assembler* assembler = GetAssembler();
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 6cdc822..d7bd525 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -37,7 +37,7 @@
static constexpr size_t kParameterCoreRegistersLength = arraysize(kParameterCoreRegisters);
static constexpr size_t kParameterFloatRegistersLength = arraysize(kParameterFloatRegisters);
-static constexpr Register kRuntimeParameterCoreRegisters[] = { RDI, RSI, RDX };
+static constexpr Register kRuntimeParameterCoreRegisters[] = { RDI, RSI, RDX, RCX };
static constexpr size_t kRuntimeParameterCoreRegistersLength =
arraysize(kRuntimeParameterCoreRegisters);
static constexpr FloatRegister kRuntimeParameterFpuRegisters[] = { XMM0, XMM1 };
@@ -68,22 +68,17 @@
DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConvention);
};
-class InvokeDexCallingConventionVisitor {
+class InvokeDexCallingConventionVisitorX86_64 : public InvokeDexCallingConventionVisitor {
public:
- InvokeDexCallingConventionVisitor() : gp_index_(0), fp_index_(0), stack_index_(0) {}
+ InvokeDexCallingConventionVisitorX86_64() {}
+ virtual ~InvokeDexCallingConventionVisitorX86_64() {}
- Location GetNextLocation(Primitive::Type type);
+ Location GetNextLocation(Primitive::Type type) OVERRIDE;
private:
InvokeDexCallingConvention calling_convention;
- // The current index for cpu registers.
- uint32_t gp_index_;
- // The current index for fpu registers.
- uint32_t fp_index_;
- // The current stack index.
- uint32_t stack_index_;
- DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionVisitor);
+ DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionVisitorX86_64);
};
class CodeGeneratorX86_64;
@@ -147,7 +142,7 @@
void HandleFieldGet(HInstruction* instruction);
CodeGeneratorX86_64* const codegen_;
- InvokeDexCallingConventionVisitor parameter_visitor_;
+ InvokeDexCallingConventionVisitorX86_64 parameter_visitor_;
DISALLOW_COPY_AND_ASSIGN(LocationsBuilderX86_64);
};
@@ -179,7 +174,9 @@
void GenerateDivRemIntegral(HBinaryOperation* instruction);
void HandleShift(HBinaryOperation* operation);
void GenerateMemoryBarrier(MemBarrierKind kind);
- void HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info);
+ void HandleFieldSet(HInstruction* instruction,
+ const FieldInfo& field_info,
+ bool value_can_be_null);
void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info);
void GenerateImplicitNullCheck(HNullCheck* instruction);
void GenerateExplicitNullCheck(HNullCheck* instruction);
@@ -253,7 +250,11 @@
}
// Emit a write barrier.
- void MarkGCCard(CpuRegister temp, CpuRegister card, CpuRegister object, CpuRegister value);
+ void MarkGCCard(CpuRegister temp,
+ CpuRegister card,
+ CpuRegister object,
+ CpuRegister value,
+ bool value_can_be_null);
// Helper method to move a value between two locations.
void Move(Location destination, Location source);
@@ -287,6 +288,9 @@
Address LiteralInt32Address(int32_t v);
Address LiteralInt64Address(int64_t v);
+ // Load a 64 bit value into a register in the most efficient manner.
+ void Load64BitValue(CpuRegister dest, int64_t value);
+
private:
// Labels for each block that will be compiled.
GrowableArray<Label> block_labels_;
diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc
index 94f56e5..bfed1a8 100644
--- a/compiler/optimizing/codegen_test.cc
+++ b/compiler/optimizing/codegen_test.cc
@@ -225,7 +225,7 @@
static void TestCode(const uint16_t* data, bool has_result = false, int32_t expected = 0) {
ArenaPool pool;
ArenaAllocator arena(&pool);
- HGraph* graph = new (&arena) HGraph(&arena);
+ HGraph* graph = CreateGraph(&arena);
HGraphBuilder builder(graph);
const DexFile::CodeItem* item = reinterpret_cast<const DexFile::CodeItem*>(data);
bool graph_built = builder.BuildGraph(*item);
@@ -238,7 +238,7 @@
static void TestCodeLong(const uint16_t* data, bool has_result, int64_t expected) {
ArenaPool pool;
ArenaAllocator arena(&pool);
- HGraph* graph = new (&arena) HGraph(&arena);
+ HGraph* graph = CreateGraph(&arena);
HGraphBuilder builder(graph, Primitive::kPrimLong);
const DexFile::CodeItem* item = reinterpret_cast<const DexFile::CodeItem*>(data);
bool graph_built = builder.BuildGraph(*item);
@@ -504,7 +504,7 @@
ArenaPool pool;
ArenaAllocator allocator(&pool);
- HGraph* graph = new (&allocator) HGraph(&allocator);
+ HGraph* graph = CreateGraph(&allocator);
HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
@@ -623,7 +623,7 @@
for (size_t i = 0; i < arraysize(lhs); i++) {
ArenaPool pool;
ArenaAllocator allocator(&pool);
- HGraph* graph = new (&allocator) HGraph(&allocator);
+ HGraph* graph = CreateGraph(&allocator);
HBasicBlock* entry_block = new (&allocator) HBasicBlock(graph);
graph->AddBlock(entry_block);
@@ -669,7 +669,7 @@
for (size_t i = 0; i < arraysize(lhs); i++) {
ArenaPool pool;
ArenaAllocator allocator(&pool);
- HGraph* graph = new (&allocator) HGraph(&allocator);
+ HGraph* graph = CreateGraph(&allocator);
HBasicBlock* entry_block = new (&allocator) HBasicBlock(graph);
graph->AddBlock(entry_block);
diff --git a/compiler/optimizing/common_arm64.h b/compiler/optimizing/common_arm64.h
index 53f1f3c..246fff9 100644
--- a/compiler/optimizing/common_arm64.h
+++ b/compiler/optimizing/common_arm64.h
@@ -218,6 +218,28 @@
return Location::RequiresRegister();
}
+// Check if registers in art register set have the same register code in vixl. If the register
+// codes are same, we can initialize vixl register list simply by the register masks. Currently,
+// only SP/WSP and ZXR/WZR codes are different between art and vixl.
+// Note: This function is only used for debug checks.
+static inline bool ArtVixlRegCodeCoherentForRegSet(uint32_t art_core_registers,
+ size_t num_core,
+ uint32_t art_fpu_registers,
+ size_t num_fpu) {
+ // The register masks won't work if the number of register is larger than 32.
+ DCHECK_GE(sizeof(art_core_registers) * 8, num_core);
+ DCHECK_GE(sizeof(art_fpu_registers) * 8, num_fpu);
+ for (size_t art_reg_code = 0; art_reg_code < num_core; ++art_reg_code) {
+ if (RegisterSet::Contains(art_core_registers, art_reg_code)) {
+ if (art_reg_code != static_cast<size_t>(VIXLRegCodeFromART(art_reg_code))) {
+ return false;
+ }
+ }
+ }
+ // There is no register code translation for float registers.
+ return true;
+}
+
} // namespace helpers
} // namespace arm64
} // namespace art
diff --git a/compiler/optimizing/constant_folding.cc b/compiler/optimizing/constant_folding.cc
index b7a92b5..20ce110 100644
--- a/compiler/optimizing/constant_folding.cc
+++ b/compiler/optimizing/constant_folding.cc
@@ -28,6 +28,7 @@
void VisitShift(HBinaryOperation* shift);
void VisitAnd(HAnd* instruction) OVERRIDE;
+ void VisitCompare(HCompare* instruction) OVERRIDE;
void VisitMul(HMul* instruction) OVERRIDE;
void VisitOr(HOr* instruction) OVERRIDE;
void VisitRem(HRem* instruction) OVERRIDE;
@@ -70,6 +71,14 @@
inst->ReplaceWith(constant);
inst->GetBlock()->RemoveInstruction(inst);
}
+ } else if (inst->IsTypeConversion()) {
+ // Constant folding: replace `TypeConversion(a)' with a constant at
+ // compile time if `a' is a constant.
+ HConstant* constant = inst->AsTypeConversion()->TryStaticEvaluation();
+ if (constant != nullptr) {
+ inst->ReplaceWith(constant);
+ inst->GetBlock()->RemoveInstruction(inst);
+ }
} else if (inst->IsDivZeroCheck()) {
// We can safely remove the check if the input is a non-null constant.
HDivZeroCheck* check = inst->AsDivZeroCheck();
@@ -108,6 +117,26 @@
}
}
+void InstructionWithAbsorbingInputSimplifier::VisitCompare(HCompare* instruction) {
+ HConstant* input_cst = instruction->GetConstantRight();
+ if (input_cst != nullptr) {
+ HInstruction* input_value = instruction->GetLeastConstantLeft();
+ if (Primitive::IsFloatingPointType(input_value->GetType()) &&
+ ((input_cst->IsFloatConstant() && input_cst->AsFloatConstant()->IsNaN()) ||
+ (input_cst->IsDoubleConstant() && input_cst->AsDoubleConstant()->IsNaN()))) {
+ // Replace code looking like
+ // CMP{G,L} dst, src, NaN
+ // with
+ // CONSTANT +1 (gt bias)
+ // or
+ // CONSTANT -1 (lt bias)
+ instruction->ReplaceWith(GetGraph()->GetConstant(Primitive::kPrimInt,
+ (instruction->IsGtBias() ? 1 : -1)));
+ instruction->GetBlock()->RemoveInstruction(instruction);
+ }
+ }
+}
+
void InstructionWithAbsorbingInputSimplifier::VisitMul(HMul* instruction) {
HConstant* input_cst = instruction->GetConstantRight();
Primitive::Type type = instruction->GetType();
diff --git a/compiler/optimizing/constant_folding.h b/compiler/optimizing/constant_folding.h
index ac00824..66ff578 100644
--- a/compiler/optimizing/constant_folding.h
+++ b/compiler/optimizing/constant_folding.h
@@ -32,8 +32,8 @@
*/
class HConstantFolding : public HOptimization {
public:
- explicit HConstantFolding(HGraph* graph)
- : HOptimization(graph, true, kConstantFoldingPassName) {}
+ explicit HConstantFolding(HGraph* graph, const char* name = kConstantFoldingPassName)
+ : HOptimization(graph, true, name) {}
void Run() OVERRIDE;
diff --git a/compiler/optimizing/constant_folding_test.cc b/compiler/optimizing/constant_folding_test.cc
index 02ad675..422223f 100644
--- a/compiler/optimizing/constant_folding_test.cc
+++ b/compiler/optimizing/constant_folding_test.cc
@@ -572,14 +572,19 @@
};
// Expected difference after dead code elimination.
- diff_t expected_dce_diff = {
- { " 3: IntConstant\n", removed },
- { " 13: IntConstant\n", removed },
- { " 18: IntConstant\n", removed },
- { " 24: IntConstant\n", removed },
- { " 34: IntConstant\n", removed },
- };
- std::string expected_after_dce = Patch(expected_after_cf, expected_dce_diff);
+ std::string expected_after_dce =
+ "BasicBlock 0, succ: 1\n"
+ " 5: IntConstant []\n"
+ " 30: SuspendCheck\n"
+ " 32: IntConstant []\n"
+ " 33: IntConstant []\n"
+ " 35: IntConstant [28]\n"
+ " 31: Goto 1\n"
+ "BasicBlock 1, pred: 0, succ: 5\n"
+ " 21: SuspendCheck\n"
+ " 28: Return(35)\n"
+ "BasicBlock 5, pred: 1\n"
+ " 29: Exit\n";
TestCode(data,
expected_before,
@@ -647,13 +652,15 @@
ASSERT_EQ(inst->AsIntConstant()->GetValue(), 1);
};
- // Expected difference after dead code elimination.
- diff_t expected_dce_diff = {
- { " 3: IntConstant [9, 15, 22]\n", " 3: IntConstant [9, 22]\n" },
- { " 22: Phi(3, 5) [15]\n", " 22: Phi(3, 5)\n" },
- { " 15: Add(22, 3)\n", removed }
- };
- std::string expected_after_dce = Patch(expected_after_cf, expected_dce_diff);
+ // Expected graph after dead code elimination.
+ std::string expected_after_dce =
+ "BasicBlock 0, succ: 1\n"
+ " 19: SuspendCheck\n"
+ " 20: Goto 1\n"
+ "BasicBlock 1, pred: 0, succ: 4\n"
+ " 17: ReturnVoid\n"
+ "BasicBlock 4, pred: 1\n"
+ " 18: Exit\n";
TestCode(data,
expected_before,
diff --git a/compiler/optimizing/dead_code_elimination.cc b/compiler/optimizing/dead_code_elimination.cc
index 9499040..b31de98 100644
--- a/compiler/optimizing/dead_code_elimination.cc
+++ b/compiler/optimizing/dead_code_elimination.cc
@@ -17,13 +17,97 @@
#include "dead_code_elimination.h"
#include "base/bit_vector-inl.h"
+#include "ssa_phi_elimination.h"
namespace art {
-void HDeadCodeElimination::Run() {
+static void MarkReachableBlocks(HBasicBlock* block, ArenaBitVector* visited) {
+ int block_id = block->GetBlockId();
+ if (visited->IsBitSet(block_id)) {
+ return;
+ }
+ visited->SetBit(block_id);
+
+ HInstruction* last_instruction = block->GetLastInstruction();
+ if (last_instruction->IsIf()) {
+ HIf* if_instruction = last_instruction->AsIf();
+ HInstruction* condition = if_instruction->InputAt(0);
+ if (!condition->IsIntConstant()) {
+ MarkReachableBlocks(if_instruction->IfTrueSuccessor(), visited);
+ MarkReachableBlocks(if_instruction->IfFalseSuccessor(), visited);
+ } else if (condition->AsIntConstant()->IsOne()) {
+ MarkReachableBlocks(if_instruction->IfTrueSuccessor(), visited);
+ } else {
+ DCHECK(condition->AsIntConstant()->IsZero());
+ MarkReachableBlocks(if_instruction->IfFalseSuccessor(), visited);
+ }
+ } else {
+ for (size_t i = 0, e = block->GetSuccessors().Size(); i < e; ++i) {
+ MarkReachableBlocks(block->GetSuccessors().Get(i), visited);
+ }
+ }
+}
+
+static void MarkLoopHeadersContaining(const HBasicBlock& block, ArenaBitVector* set) {
+ for (HLoopInformationOutwardIterator it(block); !it.Done(); it.Advance()) {
+ set->SetBit(it.Current()->GetHeader()->GetBlockId());
+ }
+}
+
+void HDeadCodeElimination::MaybeRecordDeadBlock(HBasicBlock* block) {
+ if (stats_ != nullptr) {
+ stats_->RecordStat(MethodCompilationStat::kRemovedDeadInstruction,
+ block->GetPhis().CountSize() + block->GetInstructions().CountSize());
+ }
+}
+
+void HDeadCodeElimination::RemoveDeadBlocks() {
+ // Classify blocks as reachable/unreachable.
+ ArenaAllocator* allocator = graph_->GetArena();
+ ArenaBitVector live_blocks(allocator, graph_->GetBlocks().Size(), false);
+ ArenaBitVector affected_loops(allocator, graph_->GetBlocks().Size(), false);
+
+ MarkReachableBlocks(graph_->GetEntryBlock(), &live_blocks);
+
+ // Remove all dead blocks. Iterate in post order because removal needs the
+ // block's chain of dominators and nested loops need to be updated from the
+ // inside out.
+ for (HPostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
+ HBasicBlock* block = it.Current();
+ int id = block->GetBlockId();
+ if (live_blocks.IsBitSet(id)) {
+ if (affected_loops.IsBitSet(id)) {
+ DCHECK(block->IsLoopHeader());
+ block->GetLoopInformation()->Update();
+ }
+ } else {
+ MaybeRecordDeadBlock(block);
+ MarkLoopHeadersContaining(*block, &affected_loops);
+ block->DisconnectAndDelete();
+ }
+ }
+
+ // Connect successive blocks created by dead branches. Order does not matter.
+ for (HReversePostOrderIterator it(*graph_); !it.Done();) {
+ HBasicBlock* block = it.Current();
+ if (block->IsEntryBlock() || block->GetSuccessors().Size() != 1u) {
+ it.Advance();
+ continue;
+ }
+ HBasicBlock* successor = block->GetSuccessors().Get(0);
+ if (successor->IsExitBlock() || successor->GetPredecessors().Size() != 1u) {
+ it.Advance();
+ continue;
+ }
+ block->MergeWith(successor);
+
+ // Reiterate on this block in case it can be merged with its new successor.
+ }
+}
+
+void HDeadCodeElimination::RemoveDeadInstructions() {
// Process basic blocks in post-order in the dominator tree, so that
- // a dead instruction depending on another dead instruction is
- // removed.
+ // a dead instruction depending on another dead instruction is removed.
for (HPostOrderIterator b(*graph_); !b.Done(); b.Advance()) {
HBasicBlock* block = b.Current();
// Traverse this block's instructions in backward order and remove
@@ -41,9 +125,16 @@
&& !inst->IsMemoryBarrier() // If we added an explicit barrier then we should keep it.
&& !inst->HasUses()) {
block->RemoveInstruction(inst);
+ MaybeRecordStat(MethodCompilationStat::kRemovedDeadInstruction);
}
}
}
}
+void HDeadCodeElimination::Run() {
+ RemoveDeadBlocks();
+ SsaRedundantPhiElimination(graph_).Run();
+ RemoveDeadInstructions();
+}
+
} // namespace art
diff --git a/compiler/optimizing/dead_code_elimination.h b/compiler/optimizing/dead_code_elimination.h
index 3db2c3f..59a57c4 100644
--- a/compiler/optimizing/dead_code_elimination.h
+++ b/compiler/optimizing/dead_code_elimination.h
@@ -19,6 +19,7 @@
#include "nodes.h"
#include "optimization.h"
+#include "optimizing_compiler_stats.h"
namespace art {
@@ -28,15 +29,21 @@
*/
class HDeadCodeElimination : public HOptimization {
public:
- explicit HDeadCodeElimination(HGraph* graph)
- : HOptimization(graph, true, kDeadCodeEliminationPassName) {}
+ HDeadCodeElimination(HGraph* graph,
+ OptimizingCompilerStats* stats = nullptr,
+ const char* name = kInitialDeadCodeEliminationPassName)
+ : HOptimization(graph, true, name, stats) {}
void Run() OVERRIDE;
- static constexpr const char* kDeadCodeEliminationPassName =
- "dead_code_elimination";
+ static constexpr const char* kInitialDeadCodeEliminationPassName = "dead_code_elimination";
+ static constexpr const char* kFinalDeadCodeEliminationPassName = "dead_code_elimination_final";
private:
+ void MaybeRecordDeadBlock(HBasicBlock* block);
+ void RemoveDeadBlocks();
+ void RemoveDeadInstructions();
+
DISALLOW_COPY_AND_ASSIGN(HDeadCodeElimination);
};
diff --git a/compiler/optimizing/dead_code_elimination_test.cc b/compiler/optimizing/dead_code_elimination_test.cc
index 98ae1ec..3209d3e 100644
--- a/compiler/optimizing/dead_code_elimination_test.cc
+++ b/compiler/optimizing/dead_code_elimination_test.cc
@@ -169,20 +169,25 @@
"BasicBlock 5, pred: 4\n"
" 28: Exit\n";
- // Expected difference after dead code elimination.
- diff_t expected_diff = {
- { " 13: IntConstant [14]\n", removed },
- { " 24: IntConstant [25]\n", removed },
- { " 14: Add(19, 13) [25]\n", removed },
- // The SuspendCheck instruction following this Add instruction
- // inserts the latter in an environment, thus making it "used" and
- // therefore non removable. It ensues that some other Add and
- // IntConstant instructions cannot be removed, as they are direct
- // or indirect inputs of the initial Add instruction.
- { " 19: Add(9, 18) [14]\n", " 19: Add(9, 18) []\n" },
- { " 25: Add(14, 24)\n", removed },
- };
- std::string expected_after = Patch(expected_before, expected_diff);
+ // The SuspendCheck instruction following this Add instruction
+ // inserts the latter in an environment, thus making it "used" and
+ // therefore non removable. It ensures that some other Add and
+ // IntConstant instructions cannot be removed, as they are direct
+ // or indirect inputs of the initial Add instruction.
+ std::string expected_after =
+ "BasicBlock 0, succ: 1\n"
+ " 3: IntConstant [9]\n"
+ " 5: IntConstant [9]\n"
+ " 18: IntConstant [19]\n"
+ " 29: SuspendCheck\n"
+ " 30: Goto 1\n"
+ "BasicBlock 1, pred: 0, succ: 5\n"
+ " 9: Add(3, 5) [19]\n"
+ " 19: Add(9, 18) []\n"
+ " 21: SuspendCheck\n"
+ " 27: ReturnVoid\n"
+ "BasicBlock 5, pred: 1\n"
+ " 28: Exit\n";
TestCode(data, expected_before, expected_after);
}
diff --git a/compiler/optimizing/dominator_test.cc b/compiler/optimizing/dominator_test.cc
index 61a7697..78ae1dd 100644
--- a/compiler/optimizing/dominator_test.cc
+++ b/compiler/optimizing/dominator_test.cc
@@ -27,7 +27,7 @@
static void TestCode(const uint16_t* data, const int* blocks, size_t blocks_length) {
ArenaPool pool;
ArenaAllocator allocator(&pool);
- HGraph* graph = new (&allocator) HGraph(&allocator);
+ HGraph* graph = CreateGraph(&allocator);
HGraphBuilder builder(graph);
const DexFile::CodeItem* item = reinterpret_cast<const DexFile::CodeItem*>(data);
bool graph_built = builder.BuildGraph(*item);
diff --git a/compiler/optimizing/find_loops_test.cc b/compiler/optimizing/find_loops_test.cc
index 2bfecc6..29aa97a 100644
--- a/compiler/optimizing/find_loops_test.cc
+++ b/compiler/optimizing/find_loops_test.cc
@@ -28,7 +28,7 @@
namespace art {
static HGraph* TestCode(const uint16_t* data, ArenaAllocator* allocator) {
- HGraph* graph = new (allocator) HGraph(allocator);
+ HGraph* graph = CreateGraph(allocator);
HGraphBuilder builder(graph);
const DexFile::CodeItem* item = reinterpret_cast<const DexFile::CodeItem*>(data);
builder.BuildGraph(*item);
@@ -235,14 +235,13 @@
TestBlock(graph, 0, false, -1); // entry block
TestBlock(graph, 1, false, -1); // pre header
- const int blocks2[] = {2, 3, 4, 5, 8};
- TestBlock(graph, 2, true, 2, blocks2, 5); // loop header
+ const int blocks2[] = {2, 3, 4, 5};
+ TestBlock(graph, 2, true, 2, blocks2, arraysize(blocks2)); // loop header
TestBlock(graph, 3, false, 2); // block in loop
- TestBlock(graph, 4, false, 2); // original back edge
- TestBlock(graph, 5, false, 2); // original back edge
+ TestBlock(graph, 4, false, 2); // back edge
+ TestBlock(graph, 5, false, 2); // back edge
TestBlock(graph, 6, false, -1); // return block
TestBlock(graph, 7, false, -1); // exit block
- TestBlock(graph, 8, false, 2); // synthesized back edge
}
diff --git a/compiler/optimizing/graph_checker.cc b/compiler/optimizing/graph_checker.cc
index e743d8e..fd28f0b 100644
--- a/compiler/optimizing/graph_checker.cc
+++ b/compiler/optimizing/graph_checker.cc
@@ -88,26 +88,51 @@
// Visit this block's list of phis.
for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
+ HInstruction* current = it.Current();
// Ensure this block's list of phis contains only phis.
- if (!it.Current()->IsPhi()) {
+ if (!current->IsPhi()) {
AddError(StringPrintf("Block %d has a non-phi in its phi list.",
current_block_->GetBlockId()));
}
- it.Current()->Accept(this);
+ if (current->GetNext() == nullptr && current != block->GetLastPhi()) {
+ AddError(StringPrintf("The recorded last phi of block %d does not match "
+ "the actual last phi %d.",
+ current_block_->GetBlockId(),
+ current->GetId()));
+ }
+ current->Accept(this);
}
// Visit this block's list of instructions.
- for (HInstructionIterator it(block->GetInstructions()); !it.Done();
- it.Advance()) {
+ for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
+ HInstruction* current = it.Current();
// Ensure this block's list of instructions does not contains phis.
- if (it.Current()->IsPhi()) {
+ if (current->IsPhi()) {
AddError(StringPrintf("Block %d has a phi in its non-phi list.",
current_block_->GetBlockId()));
}
- it.Current()->Accept(this);
+ if (current->GetNext() == nullptr && current != block->GetLastInstruction()) {
+ AddError(StringPrintf("The recorded last instruction of block %d does not match "
+ "the actual last instruction %d.",
+ current_block_->GetBlockId(),
+ current->GetId()));
+ }
+ current->Accept(this);
}
}
+void GraphChecker::VisitBoundsCheck(HBoundsCheck* check) {
+ if (!GetGraph()->HasBoundsChecks()) {
+ AddError(StringPrintf("Instruction %s:%d is a HBoundsCheck, "
+ "but HasBoundsChecks() returns false",
+ check->DebugName(),
+ check->GetId()));
+ }
+
+ // Perform the instruction base checks too.
+ VisitInstruction(check);
+}
+
void GraphChecker::VisitInstruction(HInstruction* instruction) {
if (seen_ids_.IsBitSet(instruction->GetId())) {
AddError(StringPrintf("Instruction id %d is duplicate in graph.",
@@ -145,7 +170,8 @@
}
}
- // Ensure the uses of `instruction` are defined in a block of the graph.
+ // Ensure the uses of `instruction` are defined in a block of the graph,
+ // and the entry in the use list is consistent.
for (HUseIterator<HInstruction*> use_it(instruction->GetUses());
!use_it.Done(); use_it.Advance()) {
HInstruction* use = use_it.Current()->GetUser();
@@ -159,6 +185,27 @@
use->GetId(),
instruction->GetId()));
}
+ size_t use_index = use_it.Current()->GetIndex();
+ if ((use_index >= use->InputCount()) || (use->InputAt(use_index) != instruction)) {
+ AddError(StringPrintf("User %s:%d of instruction %d has a wrong "
+ "UseListNode index.",
+ use->DebugName(),
+ use->GetId(),
+ instruction->GetId()));
+ }
+ }
+
+ // Ensure the environment uses entries are consistent.
+ for (HUseIterator<HEnvironment*> use_it(instruction->GetEnvUses());
+ !use_it.Done(); use_it.Advance()) {
+ HEnvironment* use = use_it.Current()->GetUser();
+ size_t use_index = use_it.Current()->GetIndex();
+ if ((use_index >= use->Size()) || (use->GetInstructionAt(use_index) != instruction)) {
+ AddError(StringPrintf("Environment user of %s:%d has a wrong "
+ "UseListNode index.",
+ instruction->DebugName(),
+ instruction->GetId()));
+ }
}
// Ensure 'instruction' has pointers to its inputs' use entries.
@@ -166,7 +213,11 @@
HUserRecord<HInstruction*> input_record = instruction->InputRecordAt(i);
HInstruction* input = input_record.GetInstruction();
HUseListNode<HInstruction*>* use_node = input_record.GetUseNode();
- if (use_node == nullptr || !input->GetUses().Contains(use_node)) {
+ size_t use_index = use_node->GetIndex();
+ if ((use_node == nullptr)
+ || !input->GetUses().Contains(use_node)
+ || (use_index >= e)
+ || (use_index != i)) {
AddError(StringPrintf("Instruction %s:%d has an invalid pointer to use entry "
"at input %u (%s:%d).",
instruction->DebugName(),
@@ -178,6 +229,30 @@
}
}
+void GraphChecker::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
+ VisitInstruction(invoke);
+
+ if (invoke->IsStaticWithExplicitClinitCheck()) {
+ size_t last_input_index = invoke->InputCount() - 1;
+ HInstruction* last_input = invoke->InputAt(last_input_index);
+ if (last_input == nullptr) {
+ AddError(StringPrintf("Static invoke %s:%d marked as having an explicit clinit check "
+ "has a null pointer as last input.",
+ invoke->DebugName(),
+ invoke->GetId()));
+ }
+ if (!last_input->IsClinitCheck() && !last_input->IsLoadClass()) {
+ AddError(StringPrintf("Static invoke %s:%d marked as having an explicit clinit check "
+ "has a last instruction (%s:%d) which is neither a clinit check "
+ "nor a load class instruction.",
+ invoke->DebugName(),
+ invoke->GetId(),
+ last_input->DebugName(),
+ last_input->GetId()));
+ }
+ }
+}
+
void SSAChecker::VisitBasicBlock(HBasicBlock* block) {
super_type::VisitBasicBlock(block);
@@ -213,6 +288,7 @@
void SSAChecker::CheckLoop(HBasicBlock* loop_header) {
int id = loop_header->GetBlockId();
+ HLoopInformation* loop_information = loop_header->GetLoopInformation();
// Ensure the pre-header block is first in the list of
// predecessors of a loop header.
@@ -222,57 +298,61 @@
id));
}
- // Ensure the loop header has only two predecessors and that only the
- // second one is a back edge.
+ // Ensure the loop header has only one incoming branch and the remaining
+ // predecessors are back edges.
size_t num_preds = loop_header->GetPredecessors().Size();
if (num_preds < 2) {
AddError(StringPrintf(
"Loop header %d has less than two predecessors: %zu.",
id,
num_preds));
- } else if (num_preds > 2) {
- AddError(StringPrintf(
- "Loop header %d has more than two predecessors: %zu.",
- id,
- num_preds));
} else {
- HLoopInformation* loop_information = loop_header->GetLoopInformation();
HBasicBlock* first_predecessor = loop_header->GetPredecessors().Get(0);
if (loop_information->IsBackEdge(*first_predecessor)) {
AddError(StringPrintf(
"First predecessor of loop header %d is a back edge.",
id));
}
- HBasicBlock* second_predecessor = loop_header->GetPredecessors().Get(1);
- if (!loop_information->IsBackEdge(*second_predecessor)) {
- AddError(StringPrintf(
- "Second predecessor of loop header %d is not a back edge.",
- id));
+ for (size_t i = 1, e = loop_header->GetPredecessors().Size(); i < e; ++i) {
+ HBasicBlock* predecessor = loop_header->GetPredecessors().Get(i);
+ if (!loop_information->IsBackEdge(*predecessor)) {
+ AddError(StringPrintf(
+ "Loop header %d has multiple incoming (non back edge) blocks.",
+ id));
+ }
}
}
- // Ensure there is only one back edge per loop.
- size_t num_back_edges =
- loop_header->GetLoopInformation()->GetBackEdges().Size();
+ const ArenaBitVector& loop_blocks = loop_information->GetBlocks();
+
+ // Ensure back edges belong to the loop.
+ size_t num_back_edges = loop_information->GetBackEdges().Size();
if (num_back_edges == 0) {
AddError(StringPrintf(
"Loop defined by header %d has no back edge.",
id));
- } else if (num_back_edges > 1) {
- AddError(StringPrintf(
- "Loop defined by header %d has several back edges: %zu.",
- id,
- num_back_edges));
+ } else {
+ for (size_t i = 0; i < num_back_edges; ++i) {
+ int back_edge_id = loop_information->GetBackEdges().Get(i)->GetBlockId();
+ if (!loop_blocks.IsBitSet(back_edge_id)) {
+ AddError(StringPrintf(
+ "Loop defined by header %d has an invalid back edge %d.",
+ id,
+ back_edge_id));
+ }
+ }
}
- // Ensure all blocks in the loop are dominated by the loop header.
- const ArenaBitVector& loop_blocks =
- loop_header->GetLoopInformation()->GetBlocks();
+ // Ensure all blocks in the loop are live and dominated by the loop header.
for (uint32_t i : loop_blocks.Indexes()) {
HBasicBlock* loop_block = GetGraph()->GetBlocks().Get(i);
- if (!loop_header->Dominates(loop_block)) {
+ if (loop_block == nullptr) {
+ AddError(StringPrintf("Loop defined by header %d contains a previously removed block %d.",
+ id,
+ i));
+ } else if (!loop_header->Dominates(loop_block)) {
AddError(StringPrintf("Loop block %d not dominated by loop header %d.",
- loop_block->GetBlockId(),
+ i,
id));
}
}
@@ -283,7 +363,7 @@
if (!loop_blocks.IsSubsetOf(&outer_info->GetBlocks())) {
AddError(StringPrintf("Blocks of loop defined by header %d are not a subset of blocks of "
"an outer loop defined by header %d.",
- loop_header->GetBlockId(),
+ id,
outer_info->GetHeader()->GetBlockId()));
}
}
@@ -306,8 +386,9 @@
// Ensure an instruction having an environment is dominated by the
// instructions contained in the environment.
- HEnvironment* environment = instruction->GetEnvironment();
- if (environment != nullptr) {
+ for (HEnvironment* environment = instruction->GetEnvironment();
+ environment != nullptr;
+ environment = environment->GetParent()) {
for (size_t i = 0, e = environment->Size(); i < e; ++i) {
HInstruction* env_instruction = environment->GetInstructionAt(i);
if (env_instruction != nullptr
@@ -470,7 +551,7 @@
Primitive::PrettyDescriptor(op->InputAt(1)->GetType())));
}
} else {
- if (PrimitiveKind(op->InputAt(1)->GetType()) != PrimitiveKind(op->InputAt(0)->GetType())) {
+ if (PrimitiveKind(op->InputAt(0)->GetType()) != PrimitiveKind(op->InputAt(1)->GetType())) {
AddError(StringPrintf(
"Binary operation %s %d has inputs of different types: "
"%s, and %s.",
@@ -495,7 +576,7 @@
"from its input type: %s vs %s.",
op->DebugName(), op->GetId(),
Primitive::PrettyDescriptor(op->GetType()),
- Primitive::PrettyDescriptor(op->InputAt(1)->GetType())));
+ Primitive::PrettyDescriptor(op->InputAt(0)->GetType())));
}
}
}
diff --git a/compiler/optimizing/graph_checker.h b/compiler/optimizing/graph_checker.h
index 24fee37..b4314da 100644
--- a/compiler/optimizing/graph_checker.h
+++ b/compiler/optimizing/graph_checker.h
@@ -42,6 +42,12 @@
// Check `instruction`.
void VisitInstruction(HInstruction* instruction) OVERRIDE;
+ // Perform control-flow graph checks on instruction.
+ void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE;
+
+ // Check that the HasBoundsChecks() flag is set for bounds checks.
+ void VisitBoundsCheck(HBoundsCheck* check) OVERRIDE;
+
// Was the last visit of the graph valid?
bool IsValid() const {
return errors_.empty();
diff --git a/compiler/optimizing/graph_checker_test.cc b/compiler/optimizing/graph_checker_test.cc
index 923468f..eca0d93 100644
--- a/compiler/optimizing/graph_checker_test.cc
+++ b/compiler/optimizing/graph_checker_test.cc
@@ -30,7 +30,7 @@
* 1: Exit
*/
HGraph* CreateSimpleCFG(ArenaAllocator* allocator) {
- HGraph* graph = new (allocator) HGraph(allocator);
+ HGraph* graph = CreateGraph(allocator);
HBasicBlock* entry_block = new (allocator) HBasicBlock(graph);
entry_block->AddInstruction(new (allocator) HGoto());
graph->AddBlock(entry_block);
diff --git a/compiler/optimizing/graph_test.cc b/compiler/optimizing/graph_test.cc
index 50398b4..59d5092 100644
--- a/compiler/optimizing/graph_test.cc
+++ b/compiler/optimizing/graph_test.cc
@@ -73,7 +73,7 @@
ArenaPool pool;
ArenaAllocator allocator(&pool);
- HGraph* graph = new (&allocator) HGraph(&allocator);
+ HGraph* graph = CreateGraph(&allocator);
HBasicBlock* entry_block = createEntryBlock(graph, &allocator);
HBasicBlock* if_block = createIfBlock(graph, &allocator);
HBasicBlock* if_true = createGotoBlock(graph, &allocator);
@@ -108,7 +108,7 @@
ArenaPool pool;
ArenaAllocator allocator(&pool);
- HGraph* graph = new (&allocator) HGraph(&allocator);
+ HGraph* graph = CreateGraph(&allocator);
HBasicBlock* entry_block = createEntryBlock(graph, &allocator);
HBasicBlock* if_block = createIfBlock(graph, &allocator);
HBasicBlock* if_false = createGotoBlock(graph, &allocator);
@@ -143,7 +143,7 @@
ArenaPool pool;
ArenaAllocator allocator(&pool);
- HGraph* graph = new (&allocator) HGraph(&allocator);
+ HGraph* graph = CreateGraph(&allocator);
HBasicBlock* entry_block = createEntryBlock(graph, &allocator);
HBasicBlock* if_block = createIfBlock(graph, &allocator);
HBasicBlock* return_block = createReturnBlock(graph, &allocator);
@@ -178,7 +178,7 @@
ArenaPool pool;
ArenaAllocator allocator(&pool);
- HGraph* graph = new (&allocator) HGraph(&allocator);
+ HGraph* graph = CreateGraph(&allocator);
HBasicBlock* entry_block = createEntryBlock(graph, &allocator);
HBasicBlock* if_block = createIfBlock(graph, &allocator);
HBasicBlock* return_block = createReturnBlock(graph, &allocator);
@@ -213,7 +213,7 @@
ArenaPool pool;
ArenaAllocator allocator(&pool);
- HGraph* graph = new (&allocator) HGraph(&allocator);
+ HGraph* graph = CreateGraph(&allocator);
HBasicBlock* entry_block = createEntryBlock(graph, &allocator);
HBasicBlock* first_if_block = createIfBlock(graph, &allocator);
HBasicBlock* if_block = createIfBlock(graph, &allocator);
@@ -252,7 +252,7 @@
ArenaPool pool;
ArenaAllocator allocator(&pool);
- HGraph* graph = new (&allocator) HGraph(&allocator);
+ HGraph* graph = CreateGraph(&allocator);
HBasicBlock* entry_block = createEntryBlock(graph, &allocator);
HBasicBlock* first_if_block = createIfBlock(graph, &allocator);
HBasicBlock* if_block = createIfBlock(graph, &allocator);
@@ -288,7 +288,7 @@
ArenaPool pool;
ArenaAllocator allocator(&pool);
- HGraph* graph = new (&allocator) HGraph(&allocator);
+ HGraph* graph = CreateGraph(&allocator);
HBasicBlock* block = createGotoBlock(graph, &allocator);
HInstruction* got = block->GetLastInstruction();
ASSERT_TRUE(got->IsControlFlow());
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index ca9cbc3..7da4f2d 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -17,14 +17,75 @@
#include "graph_visualizer.h"
#include "code_generator.h"
+#include "dead_code_elimination.h"
#include "licm.h"
#include "nodes.h"
#include "optimization.h"
#include "register_allocator.h"
#include "ssa_liveness_analysis.h"
+#include <cctype>
+#include <sstream>
+
namespace art {
+static bool HasWhitespace(const char* str) {
+ DCHECK(str != nullptr);
+ while (str[0] != 0) {
+ if (isspace(str[0])) {
+ return true;
+ }
+ str++;
+ }
+ return false;
+}
+
+class StringList {
+ public:
+ enum Format {
+ kArrayBrackets,
+ kSetBrackets,
+ };
+
+ // Create an empty list
+ explicit StringList(Format format = kArrayBrackets) : format_(format), is_empty_(true) {}
+
+ // Construct StringList from a linked list. List element class T
+ // must provide methods `GetNext` and `Dump`.
+ template<class T>
+ explicit StringList(T* first_entry, Format format = kArrayBrackets) : StringList(format) {
+ for (T* current = first_entry; current != nullptr; current = current->GetNext()) {
+ current->Dump(NewEntryStream());
+ }
+ }
+
+ std::ostream& NewEntryStream() {
+ if (is_empty_) {
+ is_empty_ = false;
+ } else {
+ sstream_ << ",";
+ }
+ return sstream_;
+ }
+
+ private:
+ Format format_;
+ bool is_empty_;
+ std::ostringstream sstream_;
+
+ friend std::ostream& operator<<(std::ostream& os, const StringList& list);
+};
+
+std::ostream& operator<<(std::ostream& os, const StringList& list) {
+ switch (list.format_) {
+ case StringList::kArrayBrackets: return os << "[" << list.sstream_.str() << "]";
+ case StringList::kSetBrackets: return os << "{" << list.sstream_.str() << "}";
+ default:
+ LOG(FATAL) << "Invalid StringList format";
+ UNREACHABLE();
+ }
+}
+
/**
* HGraph visitor to generate a file suitable for the c1visualizer tool and IRHydra.
*/
@@ -124,76 +185,99 @@
output_<< std::endl;
}
- void DumpLocation(Location location) {
+ void DumpLocation(std::ostream& stream, const Location& location) {
if (location.IsRegister()) {
- codegen_.DumpCoreRegister(output_, location.reg());
+ codegen_.DumpCoreRegister(stream, location.reg());
} else if (location.IsFpuRegister()) {
- codegen_.DumpFloatingPointRegister(output_, location.reg());
+ codegen_.DumpFloatingPointRegister(stream, location.reg());
} else if (location.IsConstant()) {
- output_ << "constant";
+ stream << "#";
HConstant* constant = location.GetConstant();
if (constant->IsIntConstant()) {
- output_ << " " << constant->AsIntConstant()->GetValue();
+ stream << constant->AsIntConstant()->GetValue();
} else if (constant->IsLongConstant()) {
- output_ << " " << constant->AsLongConstant()->GetValue();
+ stream << constant->AsLongConstant()->GetValue();
}
} else if (location.IsInvalid()) {
- output_ << "invalid";
+ stream << "invalid";
} else if (location.IsStackSlot()) {
- output_ << location.GetStackIndex() << "(sp)";
+ stream << location.GetStackIndex() << "(sp)";
} else if (location.IsFpuRegisterPair()) {
- codegen_.DumpFloatingPointRegister(output_, location.low());
- output_ << " and ";
- codegen_.DumpFloatingPointRegister(output_, location.high());
+ codegen_.DumpFloatingPointRegister(stream, location.low());
+ stream << "|";
+ codegen_.DumpFloatingPointRegister(stream, location.high());
} else if (location.IsRegisterPair()) {
- codegen_.DumpCoreRegister(output_, location.low());
- output_ << " and ";
- codegen_.DumpCoreRegister(output_, location.high());
+ codegen_.DumpCoreRegister(stream, location.low());
+ stream << "|";
+ codegen_.DumpCoreRegister(stream, location.high());
} else if (location.IsUnallocated()) {
- output_ << "<U>";
+ stream << "unallocated";
} else {
DCHECK(location.IsDoubleStackSlot());
- output_ << "2x" << location.GetStackIndex() << "(sp)";
+ stream << "2x" << location.GetStackIndex() << "(sp)";
}
}
+ std::ostream& StartAttributeStream(const char* name = nullptr) {
+ if (name == nullptr) {
+ output_ << " ";
+ } else {
+ DCHECK(!HasWhitespace(name)) << "Checker does not allow spaces in attributes";
+ output_ << " " << name << ":";
+ }
+ return output_;
+ }
+
void VisitParallelMove(HParallelMove* instruction) OVERRIDE {
- output_ << " (";
+ StartAttributeStream("liveness") << instruction->GetLifetimePosition();
+ StringList moves;
for (size_t i = 0, e = instruction->NumMoves(); i < e; ++i) {
MoveOperands* move = instruction->MoveOperandsAt(i);
- DumpLocation(move->GetSource());
- output_ << " -> ";
- DumpLocation(move->GetDestination());
- if (i + 1 != e) {
- output_ << ", ";
- }
+ std::ostream& str = moves.NewEntryStream();
+ DumpLocation(str, move->GetSource());
+ str << "->";
+ DumpLocation(str, move->GetDestination());
}
- output_ << ")";
- output_ << " (liveness: " << instruction->GetLifetimePosition() << ")";
+ StartAttributeStream("moves") << moves;
}
void VisitIntConstant(HIntConstant* instruction) OVERRIDE {
- output_ << " " << instruction->GetValue();
+ StartAttributeStream() << instruction->GetValue();
}
void VisitLongConstant(HLongConstant* instruction) OVERRIDE {
- output_ << " " << instruction->GetValue();
+ StartAttributeStream() << instruction->GetValue();
}
void VisitFloatConstant(HFloatConstant* instruction) OVERRIDE {
- output_ << " " << instruction->GetValue();
+ StartAttributeStream() << instruction->GetValue();
}
void VisitDoubleConstant(HDoubleConstant* instruction) OVERRIDE {
- output_ << " " << instruction->GetValue();
+ StartAttributeStream() << instruction->GetValue();
}
void VisitPhi(HPhi* phi) OVERRIDE {
- output_ << " " << phi->GetRegNumber();
+ StartAttributeStream("reg") << phi->GetRegNumber();
}
void VisitMemoryBarrier(HMemoryBarrier* barrier) OVERRIDE {
- output_ << " " << barrier->GetBarrierKind();
+ StartAttributeStream("kind") << barrier->GetBarrierKind();
+ }
+
+ void VisitLoadClass(HLoadClass* load_cass) OVERRIDE {
+ StartAttributeStream("gen_clinit_check") << std::boolalpha
+ << load_cass->MustGenerateClinitCheck() << std::noboolalpha;
+ }
+
+ void VisitCheckCast(HCheckCast* check_cast) OVERRIDE {
+ StartAttributeStream("must_do_null_check") << std::boolalpha
+ << check_cast->MustDoNullCheck() << std::noboolalpha;
+ }
+
+ void VisitInstanceOf(HInstanceOf* instance_of) OVERRIDE {
+ StartAttributeStream("must_do_null_check") << std::boolalpha
+ << instance_of->MustDoNullCheck() << std::noboolalpha;
}
bool IsPass(const char* name) {
@@ -202,59 +286,66 @@
void PrintInstruction(HInstruction* instruction) {
output_ << instruction->DebugName();
- instruction->Accept(this);
if (instruction->InputCount() > 0) {
- output_ << " [ ";
- for (HInputIterator inputs(instruction); !inputs.Done(); inputs.Advance()) {
- output_ << GetTypeId(inputs.Current()->GetType()) << inputs.Current()->GetId() << " ";
+ StringList inputs;
+ for (HInputIterator it(instruction); !it.Done(); it.Advance()) {
+ inputs.NewEntryStream() << GetTypeId(it.Current()->GetType()) << it.Current()->GetId();
}
- output_ << "]";
+ StartAttributeStream() << inputs;
}
+ instruction->Accept(this);
if (instruction->HasEnvironment()) {
- HEnvironment* env = instruction->GetEnvironment();
- output_ << " (env: [ ";
- for (size_t i = 0, e = env->Size(); i < e; ++i) {
- HInstruction* insn = env->GetInstructionAt(i);
- if (insn != nullptr) {
- output_ << GetTypeId(insn->GetType()) << insn->GetId() << " ";
- } else {
- output_ << " _ ";
+ StringList envs;
+ for (HEnvironment* environment = instruction->GetEnvironment();
+ environment != nullptr;
+ environment = environment->GetParent()) {
+ StringList vregs;
+ for (size_t i = 0, e = environment->Size(); i < e; ++i) {
+ HInstruction* insn = environment->GetInstructionAt(i);
+ if (insn != nullptr) {
+ vregs.NewEntryStream() << GetTypeId(insn->GetType()) << insn->GetId();
+ } else {
+ vregs.NewEntryStream() << "_";
+ }
}
+ envs.NewEntryStream() << vregs;
}
- output_ << "])";
+ StartAttributeStream("env") << envs;
}
if (IsPass(SsaLivenessAnalysis::kLivenessPassName)
&& is_after_pass_
&& instruction->GetLifetimePosition() != kNoLifetime) {
- output_ << " (liveness: " << instruction->GetLifetimePosition();
+ StartAttributeStream("liveness") << instruction->GetLifetimePosition();
if (instruction->HasLiveInterval()) {
- output_ << " ";
- const LiveInterval& interval = *instruction->GetLiveInterval();
- interval.Dump(output_);
+ LiveInterval* interval = instruction->GetLiveInterval();
+ StartAttributeStream("ranges")
+ << StringList(interval->GetFirstRange(), StringList::kSetBrackets);
+ StartAttributeStream("uses") << StringList(interval->GetFirstUse());
+ StartAttributeStream("env_uses") << StringList(interval->GetFirstEnvironmentUse());
+ StartAttributeStream("is_fixed") << interval->IsFixed();
+ StartAttributeStream("is_split") << interval->IsSplit();
+ StartAttributeStream("is_low") << interval->IsLowInterval();
+ StartAttributeStream("is_high") << interval->IsHighInterval();
}
- output_ << ")";
} else if (IsPass(RegisterAllocator::kRegisterAllocatorPassName) && is_after_pass_) {
+ StartAttributeStream("liveness") << instruction->GetLifetimePosition();
LocationSummary* locations = instruction->GetLocations();
if (locations != nullptr) {
- output_ << " ( ";
+ StringList inputs;
for (size_t i = 0; i < instruction->InputCount(); ++i) {
- DumpLocation(locations->InAt(i));
- output_ << " ";
+ DumpLocation(inputs.NewEntryStream(), locations->InAt(i));
}
- output_ << ")";
- if (locations->Out().IsValid()) {
- output_ << " -> ";
- DumpLocation(locations->Out());
- }
+ std::ostream& attr = StartAttributeStream("locations");
+ attr << inputs << "->";
+ DumpLocation(attr, locations->Out());
}
- output_ << " (liveness: " << instruction->GetLifetimePosition() << ")";
- } else if (IsPass(LICM::kLoopInvariantCodeMotionPassName)) {
- output_ << " ( loop_header:";
+ } else if (IsPass(LICM::kLoopInvariantCodeMotionPassName)
+ || IsPass(HDeadCodeElimination::kFinalDeadCodeEliminationPassName)) {
HLoopInformation* info = instruction->GetBlock()->GetLoopInformation();
if (info == nullptr) {
- output_ << "null )";
+ StartAttributeStream("loop") << "none";
} else {
- output_ << "B" << info->GetHeader()->GetBlockId() << " )";
+ StartAttributeStream("loop") << "B" << info->GetHeader()->GetBlockId();
}
}
}
@@ -274,7 +365,7 @@
output_ << bci << " " << num_uses << " "
<< GetTypeId(instruction->GetType()) << instruction->GetId() << " ";
PrintInstruction(instruction);
- output_ << kEndInstructionMarker << std::endl;
+ output_ << " " << kEndInstructionMarker << std::endl;
}
}
diff --git a/compiler/optimizing/gvn.cc b/compiler/optimizing/gvn.cc
index 74848d5..708733e 100644
--- a/compiler/optimizing/gvn.cc
+++ b/compiler/optimizing/gvn.cc
@@ -55,7 +55,7 @@
buckets_owned_(allocator, num_buckets_, false),
num_entries_(to_copy.num_entries_) {
// ArenaAllocator returns zeroed memory, so entries of buckets_ and
- // buckets_owned_ are initialized to nullptr and false, respectively.
+ // buckets_owned_ are initialized to null and false, respectively.
DCHECK(IsPowerOfTwo(num_buckets_));
if (num_buckets_ == to_copy.num_buckets_) {
// Hash table remains the same size. We copy the bucket pointers and leave
diff --git a/compiler/optimizing/gvn_test.cc b/compiler/optimizing/gvn_test.cc
index a81d49a..c3ce7e1 100644
--- a/compiler/optimizing/gvn_test.cc
+++ b/compiler/optimizing/gvn_test.cc
@@ -29,7 +29,7 @@
ArenaPool pool;
ArenaAllocator allocator(&pool);
- HGraph* graph = new (&allocator) HGraph(&allocator);
+ HGraph* graph = CreateGraph(&allocator);
HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
@@ -78,7 +78,7 @@
ArenaPool pool;
ArenaAllocator allocator(&pool);
- HGraph* graph = new (&allocator) HGraph(&allocator);
+ HGraph* graph = CreateGraph(&allocator);
HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
@@ -133,7 +133,7 @@
ArenaPool pool;
ArenaAllocator allocator(&pool);
- HGraph* graph = new (&allocator) HGraph(&allocator);
+ HGraph* graph = CreateGraph(&allocator);
HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
@@ -220,7 +220,7 @@
ArenaPool pool;
ArenaAllocator allocator(&pool);
- HGraph* graph = new (&allocator) HGraph(&allocator);
+ HGraph* graph = CreateGraph(&allocator);
HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 6d2a8d7..47c6318 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -130,6 +130,16 @@
return false;
}
+ if (invoke_instruction->IsInvokeStaticOrDirect() &&
+ invoke_instruction->AsInvokeStaticOrDirect()->IsStaticWithImplicitClinitCheck()) {
+ // Case of a static method that cannot be inlined because it implicitly
+ // requires an initialization check of its declaring class.
+ VLOG(compiler) << "Method " << PrettyMethod(method_index, caller_dex_file)
+ << " is not inlined because it is static and requires a clinit"
+ << " check that cannot be emitted due to Dex cache limitations";
+ return false;
+ }
+
if (!TryBuildAndInline(resolved_method, invoke_instruction, method_index, can_use_dex_cache)) {
resolved_method->SetShouldNotInline();
return false;
@@ -159,8 +169,33 @@
resolved_method->GetAccessFlags(),
nullptr);
+ bool requires_ctor_barrier = false;
+
+ if (dex_compilation_unit.IsConstructor()) {
+ // If it's a super invocation and we already generate a barrier there's no need
+ // to generate another one.
+ // We identify super calls by looking at the "this" pointer. If its value is the
+ // same as the local "this" pointer then we must have a super invocation.
+ bool is_super_invocation = invoke_instruction->InputAt(0)->IsParameterValue()
+ && invoke_instruction->InputAt(0)->AsParameterValue()->IsThis();
+ if (is_super_invocation && graph_->ShouldGenerateConstructorBarrier()) {
+ requires_ctor_barrier = false;
+ } else {
+ Thread* self = Thread::Current();
+ requires_ctor_barrier = compiler_driver_->RequiresConstructorBarrier(self,
+ dex_compilation_unit.GetDexFile(),
+ dex_compilation_unit.GetClassDefIndex());
+ }
+ }
+
HGraph* callee_graph = new (graph_->GetArena()) HGraph(
- graph_->GetArena(), graph_->IsDebuggable(), graph_->GetCurrentInstructionId());
+ graph_->GetArena(),
+ caller_dex_file,
+ method_index,
+ requires_ctor_barrier,
+ invoke_instruction->GetOriginalInvokeType(),
+ graph_->IsDebuggable(),
+ graph_->GetCurrentInstructionId());
OptimizingCompilerStats inline_stats;
HGraphBuilder builder(callee_graph,
@@ -190,7 +225,7 @@
}
// Run simple optimizations on the graph.
- HDeadCodeElimination dce(callee_graph);
+ HDeadCodeElimination dce(callee_graph, stats_);
HConstantFolding fold(callee_graph);
InstructionSimplifier simplify(callee_graph, stats_);
@@ -258,8 +293,8 @@
callee_graph->InlineInto(graph_, invoke_instruction);
- if (callee_graph->HasArrayAccesses()) {
- graph_->SetHasArrayAccesses(true);
+ if (callee_graph->HasBoundsChecks()) {
+ graph_->SetHasBoundsChecks(true);
}
return true;
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index f30c9a6..fcb3471 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -45,6 +45,8 @@
void VisitEqual(HEqual* equal) OVERRIDE;
void VisitNotEqual(HNotEqual* equal) OVERRIDE;
void VisitBooleanNot(HBooleanNot* bool_not) OVERRIDE;
+ void VisitInstanceFieldSet(HInstanceFieldSet* equal) OVERRIDE;
+ void VisitStaticFieldSet(HStaticFieldSet* equal) OVERRIDE;
void VisitArraySet(HArraySet* equal) OVERRIDE;
void VisitTypeConversion(HTypeConversion* instruction) OVERRIDE;
void VisitNullCheck(HNullCheck* instruction) OVERRIDE;
@@ -62,6 +64,8 @@
void VisitSub(HSub* instruction) OVERRIDE;
void VisitUShr(HUShr* instruction) OVERRIDE;
void VisitXor(HXor* instruction) OVERRIDE;
+ void VisitInstanceOf(HInstanceOf* instruction) OVERRIDE;
+ bool IsDominatedByInputNullCheck(HInstruction* instr);
OptimizingCompilerStats* stats_;
bool simplification_occurred_ = false;
@@ -77,6 +81,8 @@
}
void InstructionSimplifierVisitor::Run() {
+ // Iterate in reverse post order to open up more simplifications to users
+ // of instructions that got simplified.
for (HReversePostOrderIterator it(*GetGraph()); !it.Done();) {
// The simplification of an instruction to another instruction may yield
// possibilities for other simplifications. So although we perform a reverse
@@ -89,10 +95,6 @@
// current index, so don't advance the iterator.
continue;
}
- if (simplifications_at_current_position_ >= kMaxSamePositionSimplifications) {
- LOG(WARNING) << "Too many simplifications (" << simplifications_at_current_position_
- << ") occurred at the current position.";
- }
simplifications_at_current_position_ = 0;
it.Advance();
}
@@ -140,13 +142,25 @@
HConstant* input_cst = instruction->GetConstantRight();
HInstruction* input_other = instruction->GetLeastConstantLeft();
- if ((input_cst != nullptr) && input_cst->IsZero()) {
- // Replace code looking like
- // SHL dst, src, 0
- // with
- // src
- instruction->ReplaceWith(input_other);
- instruction->GetBlock()->RemoveInstruction(instruction);
+ if (input_cst != nullptr) {
+ if (input_cst->IsZero()) {
+ // Replace code looking like
+ // SHL dst, src, 0
+ // with
+ // src
+ instruction->ReplaceWith(input_other);
+ instruction->GetBlock()->RemoveInstruction(instruction);
+ } else if (instruction->IsShl() && input_cst->IsOne()) {
+ // Replace Shl looking like
+ // SHL dst, src, 1
+ // with
+ // ADD dst, src, src
+ HAdd *add = new(GetGraph()->GetArena()) HAdd(instruction->GetType(),
+ input_other,
+ input_other);
+ instruction->GetBlock()->ReplaceAndRemoveInstructionWith(instruction, add);
+ RecordSimplification();
+ }
}
}
@@ -161,8 +175,23 @@
}
}
+bool InstructionSimplifierVisitor::IsDominatedByInputNullCheck(HInstruction* instr) {
+ HInstruction* input = instr->InputAt(0);
+ for (HUseIterator<HInstruction*> it(input->GetUses()); !it.Done(); it.Advance()) {
+ HInstruction* use = it.Current()->GetUser();
+ if (use->IsNullCheck() && use->StrictlyDominates(instr)) {
+ return true;
+ }
+ }
+ return false;
+}
+
void InstructionSimplifierVisitor::VisitCheckCast(HCheckCast* check_cast) {
HLoadClass* load_class = check_cast->InputAt(1)->AsLoadClass();
+ if (!check_cast->InputAt(0)->CanBeNull() || IsDominatedByInputNullCheck(check_cast)) {
+ check_cast->ClearMustDoNullCheck();
+ }
+
if (!load_class->IsResolved()) {
// If the class couldn't be resolve it's not safe to compare against it. It's
// default type would be Top which might be wider that the actual class type
@@ -180,6 +209,26 @@
}
}
+void InstructionSimplifierVisitor::VisitInstanceOf(HInstanceOf* instruction) {
+ if (!instruction->InputAt(0)->CanBeNull() || IsDominatedByInputNullCheck(instruction)) {
+ instruction->ClearMustDoNullCheck();
+ }
+}
+
+void InstructionSimplifierVisitor::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
+ if ((instruction->GetValue()->GetType() == Primitive::kPrimNot)
+ && !instruction->GetValue()->CanBeNull()) {
+ instruction->ClearValueCanBeNull();
+ }
+}
+
+void InstructionSimplifierVisitor::VisitStaticFieldSet(HStaticFieldSet* instruction) {
+ if ((instruction->GetValue()->GetType() == Primitive::kPrimNot)
+ && !instruction->GetValue()->CanBeNull()) {
+ instruction->ClearValueCanBeNull();
+ }
+}
+
void InstructionSimplifierVisitor::VisitSuspendCheck(HSuspendCheck* check) {
HBasicBlock* block = check->GetBlock();
// Currently always keep the suspend check at entry.
@@ -275,6 +324,10 @@
instruction->ClearNeedsTypeCheck();
}
}
+
+ if (!value->CanBeNull()) {
+ instruction->ClearValueCanBeNull();
+ }
}
void InstructionSimplifierVisitor::VisitTypeConversion(HTypeConversion* instruction) {
@@ -370,15 +423,42 @@
return;
}
- if ((input_cst != nullptr) && input_cst->IsMinusOne() &&
- (Primitive::IsFloatingPointType(type) || Primitive::IsIntOrLongType(type))) {
+ if ((input_cst != nullptr) && input_cst->IsMinusOne()) {
// Replace code looking like
// DIV dst, src, -1
// with
// NEG dst, src
instruction->GetBlock()->ReplaceAndRemoveInstructionWith(
- instruction, (new (GetGraph()->GetArena()) HNeg(type, input_other)));
+ instruction, new (GetGraph()->GetArena()) HNeg(type, input_other));
RecordSimplification();
+ return;
+ }
+
+ if ((input_cst != nullptr) && Primitive::IsFloatingPointType(type)) {
+ // Try replacing code looking like
+ // DIV dst, src, constant
+ // with
+ // MUL dst, src, 1 / constant
+ HConstant* reciprocal = nullptr;
+ if (type == Primitive::Primitive::kPrimDouble) {
+ double value = input_cst->AsDoubleConstant()->GetValue();
+ if (CanDivideByReciprocalMultiplyDouble(bit_cast<int64_t, double>(value))) {
+ reciprocal = GetGraph()->GetDoubleConstant(1.0 / value);
+ }
+ } else {
+ DCHECK_EQ(type, Primitive::kPrimFloat);
+ float value = input_cst->AsFloatConstant()->GetValue();
+ if (CanDivideByReciprocalMultiplyFloat(bit_cast<int32_t, float>(value))) {
+ reciprocal = GetGraph()->GetFloatConstant(1.0f / value);
+ }
+ }
+
+ if (reciprocal != nullptr) {
+ instruction->GetBlock()->ReplaceAndRemoveInstructionWith(
+ instruction, new (GetGraph()->GetArena()) HMul(type, input_other, reciprocal));
+ RecordSimplification();
+ return;
+ }
}
}
@@ -431,9 +511,16 @@
if (Primitive::IsIntOrLongType(type)) {
int64_t factor = Int64FromConstant(input_cst);
- // We expect the `0` case to have been handled in the constant folding pass.
- DCHECK_NE(factor, 0);
- if (IsPowerOfTwo(factor)) {
+ // Even though constant propagation also takes care of the zero case, other
+ // optimizations can lead to having a zero multiplication.
+ if (factor == 0) {
+ // Replace code looking like
+ // MUL dst, src, 0
+ // with
+ // 0
+ instruction->ReplaceWith(input_cst);
+ instruction->GetBlock()->RemoveInstruction(instruction);
+ } else if (IsPowerOfTwo(factor)) {
// Replace code looking like
// MUL dst, src, pow_of_2
// with
diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc
index 20aa45f..43fe374 100644
--- a/compiler/optimizing/intrinsics.cc
+++ b/compiler/optimizing/intrinsics.cc
@@ -186,6 +186,8 @@
return Intrinsics::kStringCharAt;
case kIntrinsicCompareTo:
return Intrinsics::kStringCompareTo;
+ case kIntrinsicGetCharsNoCheck:
+ return Intrinsics::kStringGetCharsNoCheck;
case kIntrinsicIsEmptyOrLength:
// The inliner can handle these two cases - and this is the preferred approach
// since after inlining the call is no longer visible (as opposed to waiting
@@ -194,6 +196,12 @@
case kIntrinsicIndexOf:
return ((method.d.data & kIntrinsicFlagBase0) == 0) ?
Intrinsics::kStringIndexOfAfter : Intrinsics::kStringIndexOf;
+ case kIntrinsicNewStringFromBytes:
+ return Intrinsics::kStringNewStringFromBytes;
+ case kIntrinsicNewStringFromChars:
+ return Intrinsics::kStringNewStringFromChars;
+ case kIntrinsicNewStringFromString:
+ return Intrinsics::kStringNewStringFromString;
case kIntrinsicCas:
switch (GetType(method.d.data, false)) {
@@ -280,6 +288,11 @@
case kInlineOpIPut:
return Intrinsics::kNone;
+ // String init cases, not intrinsics.
+
+ case kInlineStringInit:
+ return Intrinsics::kNone;
+
// No default case to make the compiler warn on missing cases.
}
return Intrinsics::kNone;
@@ -361,4 +374,3 @@
}
} // namespace art
-
diff --git a/compiler/optimizing/intrinsics.h b/compiler/optimizing/intrinsics.h
index dbb7cba..c243ef3 100644
--- a/compiler/optimizing/intrinsics.h
+++ b/compiler/optimizing/intrinsics.h
@@ -17,8 +17,10 @@
#ifndef ART_COMPILER_OPTIMIZING_INTRINSICS_H_
#define ART_COMPILER_OPTIMIZING_INTRINSICS_H_
+#include "code_generator.h"
#include "nodes.h"
#include "optimization.h"
+#include "parallel_move_resolver.h"
namespace art {
@@ -76,6 +78,38 @@
#undef INTRINSICS_LIST
#undef OPTIMIZING_INTRINSICS
+ static void MoveArguments(HInvoke* invoke,
+ CodeGenerator* codegen,
+ InvokeDexCallingConventionVisitor* calling_convention_visitor) {
+ if (kIsDebugBuild && invoke->IsInvokeStaticOrDirect()) {
+ HInvokeStaticOrDirect* invoke_static_or_direct = invoke->AsInvokeStaticOrDirect();
+ // When we do not run baseline, explicit clinit checks triggered by static
+ // invokes must have been pruned by art::PrepareForRegisterAllocation.
+ DCHECK(codegen->IsBaseline() || !invoke_static_or_direct->IsStaticWithExplicitClinitCheck());
+ }
+
+ if (invoke->GetNumberOfArguments() == 0) {
+ // No argument to move.
+ return;
+ }
+
+ LocationSummary* locations = invoke->GetLocations();
+
+ // We're moving potentially two or more locations to locations that could overlap, so we need
+ // a parallel move resolver.
+ HParallelMove parallel_move(codegen->GetGraph()->GetArena());
+
+ for (size_t i = 0; i < invoke->GetNumberOfArguments(); i++) {
+ HInstruction* input = invoke->InputAt(i);
+ Location cc_loc = calling_convention_visitor->GetNextLocation(input->GetType());
+ Location actual_loc = locations->InAt(i);
+
+ parallel_move.AddMove(actual_loc, cc_loc, input->GetType(), nullptr);
+ }
+
+ codegen->GetMoveResolver()->EmitNativeCode(¶llel_move);
+ }
+
protected:
IntrinsicVisitor() {}
diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc
index 9a6062f..e785bf9 100644
--- a/compiler/optimizing/intrinsics_arm.cc
+++ b/compiler/optimizing/intrinsics_arm.cc
@@ -48,7 +48,7 @@
DCHECK_NE(type, Primitive::kPrimVoid);
- if (Primitive::IsIntegralType(type)) {
+ if (Primitive::IsIntegralType(type) || type == Primitive::kPrimNot) {
if (type == Primitive::kPrimLong) {
Register trg_reg_lo = trg.AsRegisterPairLow<Register>();
Register trg_reg_hi = trg.AsRegisterPairHigh<Register>();
@@ -77,27 +77,9 @@
}
}
-static void MoveArguments(HInvoke* invoke, ArenaAllocator* arena, CodeGeneratorARM* codegen) {
- if (invoke->InputCount() == 0) {
- return;
- }
-
- LocationSummary* locations = invoke->GetLocations();
- InvokeDexCallingConventionVisitor calling_convention_visitor;
-
- // We're moving potentially two or more locations to locations that could overlap, so we need
- // a parallel move resolver.
- HParallelMove parallel_move(arena);
-
- for (size_t i = 0; i < invoke->InputCount(); i++) {
- HInstruction* input = invoke->InputAt(i);
- Location cc_loc = calling_convention_visitor.GetNextLocation(input->GetType());
- Location actual_loc = locations->InAt(i);
-
- parallel_move.AddMove(actual_loc, cc_loc, input->GetType(), nullptr);
- }
-
- codegen->GetMoveResolver()->EmitNativeCode(¶llel_move);
+static void MoveArguments(HInvoke* invoke, CodeGeneratorARM* codegen) {
+ InvokeDexCallingConventionVisitorARM calling_convention_visitor;
+ IntrinsicVisitor::MoveArguments(invoke, codegen, &calling_convention_visitor);
}
// Slow-path for fallback (calling the managed code to handle the intrinsic) in an intrinsified
@@ -116,7 +98,7 @@
SaveLiveRegisters(codegen, invoke_->GetLocations());
- MoveArguments(invoke_, codegen->GetGraph()->GetArena(), codegen);
+ MoveArguments(invoke_, codegen);
if (invoke_->IsInvokeStaticOrDirect()) {
codegen->GenerateStaticOrDirectCall(invoke_->AsInvokeStaticOrDirect(), kArtMethodRegister);
@@ -675,7 +657,8 @@
if (type == Primitive::kPrimNot) {
Register temp = locations->GetTemp(0).AsRegister<Register>();
Register card = locations->GetTemp(1).AsRegister<Register>();
- codegen->MarkGCCard(temp, card, base, value);
+ bool value_can_be_null = true; // TODO: Worth finding out this information?
+ codegen->MarkGCCard(temp, card, base, value, value_can_be_null);
}
}
@@ -743,7 +726,8 @@
if (type == Primitive::kPrimNot) {
// Mark card for object assuming new value is stored. Worst case we will mark an unchanged
// object and scan the receiver at the next GC for nothing.
- codegen->MarkGCCard(tmp_ptr, tmp_lo, base, value_lo);
+ bool value_can_be_null = true; // TODO: Worth finding out this information?
+ codegen->MarkGCCard(tmp_ptr, tmp_lo, base, value_lo, value_can_be_null);
}
// Prevent reordering with prior memory operations.
@@ -809,10 +793,6 @@
const MemberOffset value_offset = mirror::String::ValueOffset();
// Location of count
const MemberOffset count_offset = mirror::String::CountOffset();
- // Starting offset within data array
- const MemberOffset offset_offset = mirror::String::OffsetOffset();
- // Start of char data with array_
- const MemberOffset data_offset = mirror::Array::DataOffset(sizeof(uint16_t));
Register obj = locations->InAt(0).AsRegister<Register>(); // String object pointer.
Register idx = locations->InAt(1).AsRegister<Register>(); // Index of character.
@@ -834,15 +814,10 @@
__ cmp(idx, ShifterOperand(temp));
__ b(slow_path->GetEntryLabel(), CS);
- // Index computation.
- __ ldr(temp, Address(obj, offset_offset.Int32Value())); // temp := str.offset.
- __ ldr(array_temp, Address(obj, value_offset.Int32Value())); // array_temp := str.offset.
- __ add(temp, temp, ShifterOperand(idx));
- DCHECK_EQ(data_offset.Int32Value() % 2, 0); // We'll compensate by shifting.
- __ add(temp, temp, ShifterOperand(data_offset.Int32Value() / 2));
+ __ add(array_temp, obj, ShifterOperand(value_offset.Int32Value())); // array_temp := str.value.
// Load the value.
- __ ldrh(out, Address(array_temp, temp, LSL, 1)); // out := array_temp[temp].
+ __ ldrh(out, Address(array_temp, idx, LSL, 1)); // out := array_temp[idx].
__ Bind(slow_path->GetExitLabel());
}
@@ -863,7 +838,7 @@
LocationSummary* locations = invoke->GetLocations();
// Note that the null check must have been done earlier.
- DCHECK(!invoke->CanDoImplicitNullCheck());
+ DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0)));
Register argument = locations->InAt(1).AsRegister<Register>();
__ cmp(argument, ShifterOperand(0));
@@ -877,6 +852,169 @@
__ Bind(slow_path->GetExitLabel());
}
+static void GenerateVisitStringIndexOf(HInvoke* invoke,
+ ArmAssembler* assembler,
+ CodeGeneratorARM* codegen,
+ ArenaAllocator* allocator,
+ bool start_at_zero) {
+ LocationSummary* locations = invoke->GetLocations();
+ Register tmp_reg = locations->GetTemp(0).AsRegister<Register>();
+
+ // Note that the null check must have been done earlier.
+ DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0)));
+
+ // Check for code points > 0xFFFF. Either a slow-path check when we don't know statically,
+ // or directly dispatch if we have a constant.
+ SlowPathCodeARM* slow_path = nullptr;
+ if (invoke->InputAt(1)->IsIntConstant()) {
+ if (static_cast<uint32_t>(invoke->InputAt(1)->AsIntConstant()->GetValue()) >
+ std::numeric_limits<uint16_t>::max()) {
+ // Always needs the slow-path. We could directly dispatch to it, but this case should be
+ // rare, so for simplicity just put the full slow-path down and branch unconditionally.
+ slow_path = new (allocator) IntrinsicSlowPathARM(invoke);
+ codegen->AddSlowPath(slow_path);
+ __ b(slow_path->GetEntryLabel());
+ __ Bind(slow_path->GetExitLabel());
+ return;
+ }
+ } else {
+ Register char_reg = locations->InAt(1).AsRegister<Register>();
+ __ LoadImmediate(tmp_reg, std::numeric_limits<uint16_t>::max());
+ __ cmp(char_reg, ShifterOperand(tmp_reg));
+ slow_path = new (allocator) IntrinsicSlowPathARM(invoke);
+ codegen->AddSlowPath(slow_path);
+ __ b(slow_path->GetEntryLabel(), HI);
+ }
+
+ if (start_at_zero) {
+ DCHECK_EQ(tmp_reg, R2);
+ // Start-index = 0.
+ __ LoadImmediate(tmp_reg, 0);
+ }
+
+ __ LoadFromOffset(kLoadWord, LR, TR,
+ QUICK_ENTRYPOINT_OFFSET(kArmWordSize, pIndexOf).Int32Value());
+ __ blx(LR);
+
+ if (slow_path != nullptr) {
+ __ Bind(slow_path->GetExitLabel());
+ }
+}
+
+void IntrinsicLocationsBuilderARM::VisitStringIndexOf(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kCall,
+ kIntrinsified);
+ // We have a hand-crafted assembly stub that follows the runtime calling convention. So it's
+ // best to align the inputs accordingly.
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ locations->SetOut(Location::RegisterLocation(R0));
+
+ // Need a temp for slow-path codepoint compare, and need to send start-index=0.
+ locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
+}
+
+void IntrinsicCodeGeneratorARM::VisitStringIndexOf(HInvoke* invoke) {
+ GenerateVisitStringIndexOf(invoke, GetAssembler(), codegen_, GetAllocator(), true);
+}
+
+void IntrinsicLocationsBuilderARM::VisitStringIndexOfAfter(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kCall,
+ kIntrinsified);
+ // We have a hand-crafted assembly stub that follows the runtime calling convention. So it's
+ // best to align the inputs accordingly.
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
+ locations->SetOut(Location::RegisterLocation(R0));
+
+ // Need a temp for slow-path codepoint compare.
+ locations->AddTemp(Location::RequiresRegister());
+}
+
+void IntrinsicCodeGeneratorARM::VisitStringIndexOfAfter(HInvoke* invoke) {
+ GenerateVisitStringIndexOf(invoke, GetAssembler(), codegen_, GetAllocator(), false);
+}
+
+void IntrinsicLocationsBuilderARM::VisitStringNewStringFromBytes(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kCall,
+ kIntrinsified);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
+ locations->SetInAt(3, Location::RegisterLocation(calling_convention.GetRegisterAt(3)));
+ locations->SetOut(Location::RegisterLocation(R0));
+}
+
+void IntrinsicCodeGeneratorARM::VisitStringNewStringFromBytes(HInvoke* invoke) {
+ ArmAssembler* assembler = GetAssembler();
+ LocationSummary* locations = invoke->GetLocations();
+
+ Register byte_array = locations->InAt(0).AsRegister<Register>();
+ __ cmp(byte_array, ShifterOperand(0));
+ SlowPathCodeARM* slow_path = new (GetAllocator()) IntrinsicSlowPathARM(invoke);
+ codegen_->AddSlowPath(slow_path);
+ __ b(slow_path->GetEntryLabel(), EQ);
+
+ __ LoadFromOffset(
+ kLoadWord, LR, TR, QUICK_ENTRYPOINT_OFFSET(kArmWordSize, pAllocStringFromBytes).Int32Value());
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+ __ blx(LR);
+ __ Bind(slow_path->GetExitLabel());
+}
+
+void IntrinsicLocationsBuilderARM::VisitStringNewStringFromChars(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kCall,
+ kIntrinsified);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
+ locations->SetOut(Location::RegisterLocation(R0));
+}
+
+void IntrinsicCodeGeneratorARM::VisitStringNewStringFromChars(HInvoke* invoke) {
+ ArmAssembler* assembler = GetAssembler();
+
+ __ LoadFromOffset(
+ kLoadWord, LR, TR, QUICK_ENTRYPOINT_OFFSET(kArmWordSize, pAllocStringFromChars).Int32Value());
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+ __ blx(LR);
+}
+
+void IntrinsicLocationsBuilderARM::VisitStringNewStringFromString(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kCall,
+ kIntrinsified);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetOut(Location::RegisterLocation(R0));
+}
+
+void IntrinsicCodeGeneratorARM::VisitStringNewStringFromString(HInvoke* invoke) {
+ ArmAssembler* assembler = GetAssembler();
+ LocationSummary* locations = invoke->GetLocations();
+
+ Register string_to_copy = locations->InAt(0).AsRegister<Register>();
+ __ cmp(string_to_copy, ShifterOperand(0));
+ SlowPathCodeARM* slow_path = new (GetAllocator()) IntrinsicSlowPathARM(invoke);
+ codegen_->AddSlowPath(slow_path);
+ __ b(slow_path->GetEntryLabel(), EQ);
+
+ __ LoadFromOffset(kLoadWord,
+ LR, TR, QUICK_ENTRYPOINT_OFFSET(kArmWordSize, pAllocStringFromString).Int32Value());
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+ __ blx(LR);
+ __ Bind(slow_path->GetExitLabel());
+}
+
// Unimplemented intrinsics.
#define UNIMPLEMENTED_INTRINSIC(Name) \
@@ -903,9 +1041,8 @@
UNIMPLEMENTED_INTRINSIC(MathRoundFloat) // Could be done by changing rounding mode, maybe?
UNIMPLEMENTED_INTRINSIC(UnsafeCASLong) // High register pressure.
UNIMPLEMENTED_INTRINSIC(SystemArrayCopyChar)
-UNIMPLEMENTED_INTRINSIC(StringIndexOf)
-UNIMPLEMENTED_INTRINSIC(StringIndexOfAfter)
UNIMPLEMENTED_INTRINSIC(ReferenceGetReferent)
+UNIMPLEMENTED_INTRINSIC(StringGetCharsNoCheck)
} // namespace arm
} // namespace art
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index d3a4e6c..53497b6 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -75,7 +75,7 @@
DCHECK_NE(type, Primitive::kPrimVoid);
- if (Primitive::IsIntegralType(type)) {
+ if (Primitive::IsIntegralType(type) || type == Primitive::kPrimNot) {
Register trg_reg = RegisterFrom(trg, type);
Register res_reg = RegisterFrom(ARM64ReturnLocation(type), type);
__ Mov(trg_reg, res_reg, kDiscardForSameWReg);
@@ -86,27 +86,9 @@
}
}
-static void MoveArguments(HInvoke* invoke, ArenaAllocator* arena, CodeGeneratorARM64* codegen) {
- if (invoke->InputCount() == 0) {
- return;
- }
-
- LocationSummary* locations = invoke->GetLocations();
- InvokeDexCallingConventionVisitor calling_convention_visitor;
-
- // We're moving potentially two or more locations to locations that could overlap, so we need
- // a parallel move resolver.
- HParallelMove parallel_move(arena);
-
- for (size_t i = 0; i < invoke->InputCount(); i++) {
- HInstruction* input = invoke->InputAt(i);
- Location cc_loc = calling_convention_visitor.GetNextLocation(input->GetType());
- Location actual_loc = locations->InAt(i);
-
- parallel_move.AddMove(actual_loc, cc_loc, input->GetType(), nullptr);
- }
-
- codegen->GetMoveResolver()->EmitNativeCode(¶llel_move);
+static void MoveArguments(HInvoke* invoke, CodeGeneratorARM64* codegen) {
+ InvokeDexCallingConventionVisitorARM64 calling_convention_visitor;
+ IntrinsicVisitor::MoveArguments(invoke, codegen, &calling_convention_visitor);
}
// Slow-path for fallback (calling the managed code to handle the intrinsic) in an intrinsified
@@ -125,7 +107,7 @@
SaveLiveRegisters(codegen, invoke_->GetLocations());
- MoveArguments(invoke_, codegen->GetGraph()->GetArena(), codegen);
+ MoveArguments(invoke_, codegen);
if (invoke_->IsInvokeStaticOrDirect()) {
codegen->GenerateStaticOrDirectCall(invoke_->AsInvokeStaticOrDirect(), kArtMethodRegister);
@@ -815,7 +797,8 @@
}
if (type == Primitive::kPrimNot) {
- codegen->MarkGCCard(base, value);
+ bool value_can_be_null = true; // TODO: Worth finding out this information?
+ codegen->MarkGCCard(base, value, value_can_be_null);
}
}
@@ -874,7 +857,8 @@
// This needs to be before the temp registers, as MarkGCCard also uses VIXL temps.
if (type == Primitive::kPrimNot) {
// Mark card for object assuming new value is stored.
- codegen->MarkGCCard(base, value);
+ bool value_can_be_null = true; // TODO: Worth finding out this information?
+ codegen->MarkGCCard(base, value, value_can_be_null);
}
UseScratchRegisterScope temps(masm);
@@ -952,10 +936,6 @@
const MemberOffset value_offset = mirror::String::ValueOffset();
// Location of count
const MemberOffset count_offset = mirror::String::CountOffset();
- // Starting offset within data array
- const MemberOffset offset_offset = mirror::String::OffsetOffset();
- // Start of char data with array_
- const MemberOffset data_offset = mirror::Array::DataOffset(sizeof(uint16_t));
Register obj = WRegisterFrom(locations->InAt(0)); // String object pointer.
Register idx = WRegisterFrom(locations->InAt(1)); // Index of character.
@@ -978,21 +958,15 @@
__ Cmp(idx, temp);
__ B(hs, slow_path->GetEntryLabel());
- // Index computation.
- __ Ldr(temp, HeapOperand(obj, offset_offset)); // temp := str.offset.
- __ Ldr(array_temp, HeapOperand(obj, value_offset)); // array_temp := str.offset.
- __ Add(temp, temp, idx);
- DCHECK_EQ(data_offset.Int32Value() % 2, 0); // We'll compensate by shifting.
- __ Add(temp, temp, Operand(data_offset.Int32Value() / 2));
+ __ Add(array_temp, obj, Operand(value_offset.Int32Value())); // array_temp := str.value.
// Load the value.
- __ Ldrh(out, MemOperand(array_temp.X(), temp, UXTW, 1)); // out := array_temp[temp].
+ __ Ldrh(out, MemOperand(array_temp.X(), idx, UXTW, 1)); // out := array_temp[idx].
__ Bind(slow_path->GetExitLabel());
}
void IntrinsicLocationsBuilderARM64::VisitStringCompareTo(HInvoke* invoke) {
- // The inputs plus one temp.
LocationSummary* locations = new (arena_) LocationSummary(invoke,
LocationSummary::kCall,
kIntrinsified);
@@ -1007,7 +981,7 @@
LocationSummary* locations = invoke->GetLocations();
// Note that the null check must have been done earlier.
- DCHECK(!invoke->CanDoImplicitNullCheck());
+ DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0)));
Register argument = WRegisterFrom(locations->InAt(1));
__ Cmp(argument, 0);
@@ -1021,6 +995,169 @@
__ Bind(slow_path->GetExitLabel());
}
+static void GenerateVisitStringIndexOf(HInvoke* invoke,
+ vixl::MacroAssembler* masm,
+ CodeGeneratorARM64* codegen,
+ ArenaAllocator* allocator,
+ bool start_at_zero) {
+ LocationSummary* locations = invoke->GetLocations();
+ Register tmp_reg = WRegisterFrom(locations->GetTemp(0));
+
+ // Note that the null check must have been done earlier.
+ DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0)));
+
+ // Check for code points > 0xFFFF. Either a slow-path check when we don't know statically,
+ // or directly dispatch if we have a constant.
+ SlowPathCodeARM64* slow_path = nullptr;
+ if (invoke->InputAt(1)->IsIntConstant()) {
+ if (static_cast<uint32_t>(invoke->InputAt(1)->AsIntConstant()->GetValue()) > 0xFFFFU) {
+ // Always needs the slow-path. We could directly dispatch to it, but this case should be
+ // rare, so for simplicity just put the full slow-path down and branch unconditionally.
+ slow_path = new (allocator) IntrinsicSlowPathARM64(invoke);
+ codegen->AddSlowPath(slow_path);
+ __ B(slow_path->GetEntryLabel());
+ __ Bind(slow_path->GetExitLabel());
+ return;
+ }
+ } else {
+ Register char_reg = WRegisterFrom(locations->InAt(1));
+ __ Mov(tmp_reg, 0xFFFF);
+ __ Cmp(char_reg, Operand(tmp_reg));
+ slow_path = new (allocator) IntrinsicSlowPathARM64(invoke);
+ codegen->AddSlowPath(slow_path);
+ __ B(hi, slow_path->GetEntryLabel());
+ }
+
+ if (start_at_zero) {
+ // Start-index = 0.
+ __ Mov(tmp_reg, 0);
+ }
+
+ __ Ldr(lr, MemOperand(tr, QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, pIndexOf).Int32Value()));
+ __ Blr(lr);
+
+ if (slow_path != nullptr) {
+ __ Bind(slow_path->GetExitLabel());
+ }
+}
+
+void IntrinsicLocationsBuilderARM64::VisitStringIndexOf(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kCall,
+ kIntrinsified);
+ // We have a hand-crafted assembly stub that follows the runtime calling convention. So it's
+ // best to align the inputs accordingly.
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1)));
+ locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimInt));
+
+ // Need a temp for slow-path codepoint compare, and need to send start_index=0.
+ locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(2)));
+}
+
+void IntrinsicCodeGeneratorARM64::VisitStringIndexOf(HInvoke* invoke) {
+ GenerateVisitStringIndexOf(invoke, GetVIXLAssembler(), codegen_, GetAllocator(), true);
+}
+
+void IntrinsicLocationsBuilderARM64::VisitStringIndexOfAfter(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kCall,
+ kIntrinsified);
+ // We have a hand-crafted assembly stub that follows the runtime calling convention. So it's
+ // best to align the inputs accordingly.
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1)));
+ locations->SetInAt(2, LocationFrom(calling_convention.GetRegisterAt(2)));
+ locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimInt));
+
+ // Need a temp for slow-path codepoint compare.
+ locations->AddTemp(Location::RequiresRegister());
+}
+
+void IntrinsicCodeGeneratorARM64::VisitStringIndexOfAfter(HInvoke* invoke) {
+ GenerateVisitStringIndexOf(invoke, GetVIXLAssembler(), codegen_, GetAllocator(), false);
+}
+
+void IntrinsicLocationsBuilderARM64::VisitStringNewStringFromBytes(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kCall,
+ kIntrinsified);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1)));
+ locations->SetInAt(2, LocationFrom(calling_convention.GetRegisterAt(2)));
+ locations->SetInAt(3, LocationFrom(calling_convention.GetRegisterAt(3)));
+ locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot));
+}
+
+void IntrinsicCodeGeneratorARM64::VisitStringNewStringFromBytes(HInvoke* invoke) {
+ vixl::MacroAssembler* masm = GetVIXLAssembler();
+ LocationSummary* locations = invoke->GetLocations();
+
+ Register byte_array = WRegisterFrom(locations->InAt(0));
+ __ Cmp(byte_array, 0);
+ SlowPathCodeARM64* slow_path = new (GetAllocator()) IntrinsicSlowPathARM64(invoke);
+ codegen_->AddSlowPath(slow_path);
+ __ B(eq, slow_path->GetEntryLabel());
+
+ __ Ldr(lr,
+ MemOperand(tr, QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, pAllocStringFromBytes).Int32Value()));
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+ __ Blr(lr);
+ __ Bind(slow_path->GetExitLabel());
+}
+
+void IntrinsicLocationsBuilderARM64::VisitStringNewStringFromChars(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kCall,
+ kIntrinsified);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1)));
+ locations->SetInAt(2, LocationFrom(calling_convention.GetRegisterAt(2)));
+ locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot));
+}
+
+void IntrinsicCodeGeneratorARM64::VisitStringNewStringFromChars(HInvoke* invoke) {
+ vixl::MacroAssembler* masm = GetVIXLAssembler();
+
+ __ Ldr(lr,
+ MemOperand(tr, QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, pAllocStringFromChars).Int32Value()));
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+ __ Blr(lr);
+}
+
+void IntrinsicLocationsBuilderARM64::VisitStringNewStringFromString(HInvoke* invoke) {
+ // The inputs plus one temp.
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kCall,
+ kIntrinsified);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1)));
+ locations->SetInAt(2, LocationFrom(calling_convention.GetRegisterAt(2)));
+ locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot));
+}
+
+void IntrinsicCodeGeneratorARM64::VisitStringNewStringFromString(HInvoke* invoke) {
+ vixl::MacroAssembler* masm = GetVIXLAssembler();
+ LocationSummary* locations = invoke->GetLocations();
+
+ Register string_to_copy = WRegisterFrom(locations->InAt(0));
+ __ Cmp(string_to_copy, 0);
+ SlowPathCodeARM64* slow_path = new (GetAllocator()) IntrinsicSlowPathARM64(invoke);
+ codegen_->AddSlowPath(slow_path);
+ __ B(eq, slow_path->GetEntryLabel());
+
+ __ Ldr(lr,
+ MemOperand(tr, QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, pAllocStringFromString).Int32Value()));
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+ __ Blr(lr);
+ __ Bind(slow_path->GetExitLabel());
+}
+
// Unimplemented intrinsics.
#define UNIMPLEMENTED_INTRINSIC(Name) \
@@ -1030,9 +1167,8 @@
}
UNIMPLEMENTED_INTRINSIC(SystemArrayCopyChar)
-UNIMPLEMENTED_INTRINSIC(StringIndexOf)
-UNIMPLEMENTED_INTRINSIC(StringIndexOfAfter)
UNIMPLEMENTED_INTRINSIC(ReferenceGetReferent)
+UNIMPLEMENTED_INTRINSIC(StringGetCharsNoCheck)
} // namespace arm64
} // namespace art
diff --git a/compiler/optimizing/intrinsics_list.h b/compiler/optimizing/intrinsics_list.h
index 10f6e1d..2c9248f 100644
--- a/compiler/optimizing/intrinsics_list.h
+++ b/compiler/optimizing/intrinsics_list.h
@@ -60,8 +60,12 @@
V(MemoryPokeShortNative, kStatic) \
V(StringCharAt, kDirect) \
V(StringCompareTo, kDirect) \
+ V(StringGetCharsNoCheck, kDirect) \
V(StringIndexOf, kDirect) \
V(StringIndexOfAfter, kDirect) \
+ V(StringNewStringFromBytes, kStatic) \
+ V(StringNewStringFromChars, kStatic) \
+ V(StringNewStringFromString, kStatic) \
V(UnsafeCASInt, kDirect) \
V(UnsafeCASLong, kDirect) \
V(UnsafeCASObject, kDirect) \
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index 95ab90d..d2ca42d 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -16,6 +16,8 @@
#include "intrinsics_x86.h"
+#include <limits>
+
#include "arch/x86/instruction_set_features_x86.h"
#include "code_generator_x86.h"
#include "entrypoints/quick/quick_entrypoints.h"
@@ -111,27 +113,9 @@
}
}
-static void MoveArguments(HInvoke* invoke, ArenaAllocator* arena, CodeGeneratorX86* codegen) {
- if (invoke->InputCount() == 0) {
- return;
- }
-
- LocationSummary* locations = invoke->GetLocations();
- InvokeDexCallingConventionVisitor calling_convention_visitor;
-
- // We're moving potentially two or more locations to locations that could overlap, so we need
- // a parallel move resolver.
- HParallelMove parallel_move(arena);
-
- for (size_t i = 0; i < invoke->InputCount(); i++) {
- HInstruction* input = invoke->InputAt(i);
- Location cc_loc = calling_convention_visitor.GetNextLocation(input->GetType());
- Location actual_loc = locations->InAt(i);
-
- parallel_move.AddMove(actual_loc, cc_loc, input->GetType(), nullptr);
- }
-
- codegen->GetMoveResolver()->EmitNativeCode(¶llel_move);
+static void MoveArguments(HInvoke* invoke, CodeGeneratorX86* codegen) {
+ InvokeDexCallingConventionVisitorX86 calling_convention_visitor;
+ IntrinsicVisitor::MoveArguments(invoke, codegen, &calling_convention_visitor);
}
// Slow-path for fallback (calling the managed code to handle the intrinsic) in an intrinsified
@@ -142,11 +126,8 @@
// restored!
class IntrinsicSlowPathX86 : public SlowPathCodeX86 {
public:
- explicit IntrinsicSlowPathX86(HInvoke* invoke, Register temp)
- : invoke_(invoke) {
- // The temporary register has to be EAX for x86 invokes.
- DCHECK_EQ(temp, EAX);
- }
+ explicit IntrinsicSlowPathX86(HInvoke* invoke)
+ : invoke_(invoke) { }
void EmitNativeCode(CodeGenerator* codegen_in) OVERRIDE {
CodeGeneratorX86* codegen = down_cast<CodeGeneratorX86*>(codegen_in);
@@ -154,7 +135,7 @@
SaveLiveRegisters(codegen, invoke_->GetLocations());
- MoveArguments(invoke_, codegen->GetGraph()->GetArena(), codegen);
+ MoveArguments(invoke_, codegen);
if (invoke_->IsInvokeStaticOrDirect()) {
codegen->GenerateStaticOrDirectCall(invoke_->AsInvokeStaticOrDirect(), EAX);
@@ -748,7 +729,7 @@
}
static void InvokeOutOfLineIntrinsic(CodeGeneratorX86* codegen, HInvoke* invoke) {
- MoveArguments(invoke, codegen->GetGraph()->GetArena(), codegen);
+ MoveArguments(invoke, codegen);
DCHECK(invoke->IsInvokeStaticOrDirect());
codegen->GenerateStaticOrDirectCall(invoke->AsInvokeStaticOrDirect(), EAX);
@@ -898,8 +879,6 @@
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
locations->SetOut(Location::SameAsFirstInput());
- // Needs to be EAX for the invoke.
- locations->AddTemp(Location::RegisterLocation(EAX));
}
void IntrinsicCodeGeneratorX86::VisitStringCharAt(HInvoke* invoke) {
@@ -909,23 +888,17 @@
const int32_t value_offset = mirror::String::ValueOffset().Int32Value();
// Location of count
const int32_t count_offset = mirror::String::CountOffset().Int32Value();
- // Starting offset within data array
- const int32_t offset_offset = mirror::String::OffsetOffset().Int32Value();
- // Start of char data with array_
- const int32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Int32Value();
Register obj = locations->InAt(0).AsRegister<Register>();
Register idx = locations->InAt(1).AsRegister<Register>();
Register out = locations->Out().AsRegister<Register>();
- Location temp_loc = locations->GetTemp(0);
- Register temp = temp_loc.AsRegister<Register>();
// TODO: Maybe we can support range check elimination. Overall, though, I think it's not worth
// the cost.
// TODO: For simplicity, the index parameter is requested in a register, so different from Quick
// we will not optimize the code for constants (which would save a register).
- SlowPathCodeX86* slow_path = new (GetAllocator()) IntrinsicSlowPathX86(invoke, temp);
+ SlowPathCodeX86* slow_path = new (GetAllocator()) IntrinsicSlowPathX86(invoke);
codegen_->AddSlowPath(slow_path);
X86Assembler* assembler = GetAssembler();
@@ -934,12 +907,8 @@
codegen_->MaybeRecordImplicitNullCheck(invoke);
__ j(kAboveEqual, slow_path->GetEntryLabel());
- // Get the actual element.
- __ movl(temp, idx); // temp := idx.
- __ addl(temp, Address(obj, offset_offset)); // temp := offset + idx.
- __ movl(out, Address(obj, value_offset)); // obj := obj.array.
- // out = out[2*temp].
- __ movzxw(out, Address(out, temp, ScaleFactor::TIMES_2, data_offset));
+ // out = out[2*idx].
+ __ movzxw(out, Address(out, idx, ScaleFactor::TIMES_2, value_offset));
__ Bind(slow_path->GetExitLabel());
}
@@ -953,8 +922,6 @@
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
locations->SetOut(Location::RegisterLocation(EAX));
- // Needs to be EAX for the invoke.
- locations->AddTemp(Location::RegisterLocation(EAX));
}
void IntrinsicCodeGeneratorX86::VisitStringCompareTo(HInvoke* invoke) {
@@ -962,12 +929,11 @@
LocationSummary* locations = invoke->GetLocations();
// Note that the null check must have been done earlier.
- DCHECK(!invoke->CanDoImplicitNullCheck());
+ DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0)));
Register argument = locations->InAt(1).AsRegister<Register>();
__ testl(argument, argument);
- SlowPathCodeX86* slow_path = new (GetAllocator()) IntrinsicSlowPathX86(
- invoke, locations->GetTemp(0).AsRegister<Register>());
+ SlowPathCodeX86* slow_path = new (GetAllocator()) IntrinsicSlowPathX86(invoke);
codegen_->AddSlowPath(slow_path);
__ j(kEqual, slow_path->GetEntryLabel());
@@ -975,6 +941,227 @@
__ Bind(slow_path->GetExitLabel());
}
+static void CreateStringIndexOfLocations(HInvoke* invoke,
+ ArenaAllocator* allocator,
+ bool start_at_zero) {
+ LocationSummary* locations = new (allocator) LocationSummary(invoke,
+ LocationSummary::kCallOnSlowPath,
+ kIntrinsified);
+ // The data needs to be in EDI for scasw. So request that the string is there, anyways.
+ locations->SetInAt(0, Location::RegisterLocation(EDI));
+ // If we look for a constant char, we'll still have to copy it into EAX. So just request the
+ // allocator to do that, anyways. We can still do the constant check by checking the parameter
+ // of the instruction explicitly.
+ // Note: This works as we don't clobber EAX anywhere.
+ locations->SetInAt(1, Location::RegisterLocation(EAX));
+ if (!start_at_zero) {
+ locations->SetInAt(2, Location::RequiresRegister()); // The starting index.
+ }
+ // As we clobber EDI during execution anyways, also use it as the output.
+ locations->SetOut(Location::SameAsFirstInput());
+
+ // repne scasw uses ECX as the counter.
+ locations->AddTemp(Location::RegisterLocation(ECX));
+ // Need another temporary to be able to compute the result.
+ locations->AddTemp(Location::RequiresRegister());
+}
+
+static void GenerateStringIndexOf(HInvoke* invoke,
+ X86Assembler* assembler,
+ CodeGeneratorX86* codegen,
+ ArenaAllocator* allocator,
+ bool start_at_zero) {
+ LocationSummary* locations = invoke->GetLocations();
+
+ // Note that the null check must have been done earlier.
+ DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0)));
+
+ Register string_obj = locations->InAt(0).AsRegister<Register>();
+ Register search_value = locations->InAt(1).AsRegister<Register>();
+ Register counter = locations->GetTemp(0).AsRegister<Register>();
+ Register string_length = locations->GetTemp(1).AsRegister<Register>();
+ Register out = locations->Out().AsRegister<Register>();
+
+ // Check our assumptions for registers.
+ DCHECK_EQ(string_obj, EDI);
+ DCHECK_EQ(search_value, EAX);
+ DCHECK_EQ(counter, ECX);
+ DCHECK_EQ(out, EDI);
+
+ // Check for code points > 0xFFFF. Either a slow-path check when we don't know statically,
+ // or directly dispatch if we have a constant.
+ SlowPathCodeX86* slow_path = nullptr;
+ if (invoke->InputAt(1)->IsIntConstant()) {
+ if (static_cast<uint32_t>(invoke->InputAt(1)->AsIntConstant()->GetValue()) >
+ std::numeric_limits<uint16_t>::max()) {
+ // Always needs the slow-path. We could directly dispatch to it, but this case should be
+ // rare, so for simplicity just put the full slow-path down and branch unconditionally.
+ slow_path = new (allocator) IntrinsicSlowPathX86(invoke);
+ codegen->AddSlowPath(slow_path);
+ __ jmp(slow_path->GetEntryLabel());
+ __ Bind(slow_path->GetExitLabel());
+ return;
+ }
+ } else {
+ __ cmpl(search_value, Immediate(std::numeric_limits<uint16_t>::max()));
+ slow_path = new (allocator) IntrinsicSlowPathX86(invoke);
+ codegen->AddSlowPath(slow_path);
+ __ j(kAbove, slow_path->GetEntryLabel());
+ }
+
+ // From here down, we know that we are looking for a char that fits in 16 bits.
+ // Location of reference to data array within the String object.
+ int32_t value_offset = mirror::String::ValueOffset().Int32Value();
+ // Location of count within the String object.
+ int32_t count_offset = mirror::String::CountOffset().Int32Value();
+
+ // Load string length, i.e., the count field of the string.
+ __ movl(string_length, Address(string_obj, count_offset));
+
+ // Do a zero-length check.
+ // TODO: Support jecxz.
+ Label not_found_label;
+ __ testl(string_length, string_length);
+ __ j(kEqual, ¬_found_label);
+
+ if (start_at_zero) {
+ // Number of chars to scan is the same as the string length.
+ __ movl(counter, string_length);
+
+ // Move to the start of the string.
+ __ addl(string_obj, Immediate(value_offset));
+ } else {
+ Register start_index = locations->InAt(2).AsRegister<Register>();
+
+ // Do a start_index check.
+ __ cmpl(start_index, string_length);
+ __ j(kGreaterEqual, ¬_found_label);
+
+ // Ensure we have a start index >= 0;
+ __ xorl(counter, counter);
+ __ cmpl(start_index, Immediate(0));
+ __ cmovl(kGreater, counter, start_index);
+
+ // Move to the start of the string: string_obj + value_offset + 2 * start_index.
+ __ leal(string_obj, Address(string_obj, counter, ScaleFactor::TIMES_2, value_offset));
+
+ // Now update ecx (the repne scasw work counter). We have string.length - start_index left to
+ // compare.
+ __ negl(counter);
+ __ leal(counter, Address(string_length, counter, ScaleFactor::TIMES_1, 0));
+ }
+
+ // Everything is set up for repne scasw:
+ // * Comparison address in EDI.
+ // * Counter in ECX.
+ __ repne_scasw();
+
+ // Did we find a match?
+ __ j(kNotEqual, ¬_found_label);
+
+ // Yes, we matched. Compute the index of the result.
+ __ subl(string_length, counter);
+ __ leal(out, Address(string_length, -1));
+
+ Label done;
+ __ jmp(&done);
+
+ // Failed to match; return -1.
+ __ Bind(¬_found_label);
+ __ movl(out, Immediate(-1));
+
+ // And join up at the end.
+ __ Bind(&done);
+ if (slow_path != nullptr) {
+ __ Bind(slow_path->GetExitLabel());
+ }
+}
+
+void IntrinsicLocationsBuilderX86::VisitStringIndexOf(HInvoke* invoke) {
+ CreateStringIndexOfLocations(invoke, arena_, true);
+}
+
+void IntrinsicCodeGeneratorX86::VisitStringIndexOf(HInvoke* invoke) {
+ GenerateStringIndexOf(invoke, GetAssembler(), codegen_, GetAllocator(), true);
+}
+
+void IntrinsicLocationsBuilderX86::VisitStringIndexOfAfter(HInvoke* invoke) {
+ CreateStringIndexOfLocations(invoke, arena_, false);
+}
+
+void IntrinsicCodeGeneratorX86::VisitStringIndexOfAfter(HInvoke* invoke) {
+ GenerateStringIndexOf(invoke, GetAssembler(), codegen_, GetAllocator(), false);
+}
+
+void IntrinsicLocationsBuilderX86::VisitStringNewStringFromBytes(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kCall,
+ kIntrinsified);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
+ locations->SetInAt(3, Location::RegisterLocation(calling_convention.GetRegisterAt(3)));
+ locations->SetOut(Location::RegisterLocation(EAX));
+}
+
+void IntrinsicCodeGeneratorX86::VisitStringNewStringFromBytes(HInvoke* invoke) {
+ X86Assembler* assembler = GetAssembler();
+ LocationSummary* locations = invoke->GetLocations();
+
+ Register byte_array = locations->InAt(0).AsRegister<Register>();
+ __ testl(byte_array, byte_array);
+ SlowPathCodeX86* slow_path = new (GetAllocator()) IntrinsicSlowPathX86(invoke);
+ codegen_->AddSlowPath(slow_path);
+ __ j(kEqual, slow_path->GetEntryLabel());
+
+ __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pAllocStringFromBytes)));
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+ __ Bind(slow_path->GetExitLabel());
+}
+
+void IntrinsicLocationsBuilderX86::VisitStringNewStringFromChars(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kCall,
+ kIntrinsified);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
+ locations->SetOut(Location::RegisterLocation(EAX));
+}
+
+void IntrinsicCodeGeneratorX86::VisitStringNewStringFromChars(HInvoke* invoke) {
+ X86Assembler* assembler = GetAssembler();
+
+ __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pAllocStringFromChars)));
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+}
+
+void IntrinsicLocationsBuilderX86::VisitStringNewStringFromString(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kCall,
+ kIntrinsified);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetOut(Location::RegisterLocation(EAX));
+}
+
+void IntrinsicCodeGeneratorX86::VisitStringNewStringFromString(HInvoke* invoke) {
+ X86Assembler* assembler = GetAssembler();
+ LocationSummary* locations = invoke->GetLocations();
+
+ Register string_to_copy = locations->InAt(0).AsRegister<Register>();
+ __ testl(string_to_copy, string_to_copy);
+ SlowPathCodeX86* slow_path = new (GetAllocator()) IntrinsicSlowPathX86(invoke);
+ codegen_->AddSlowPath(slow_path);
+ __ j(kEqual, slow_path->GetEntryLabel());
+
+ __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pAllocStringFromString)));
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+ __ Bind(slow_path->GetExitLabel());
+}
+
static void GenPeek(LocationSummary* locations, Primitive::Type size, X86Assembler* assembler) {
Register address = locations->InAt(0).AsRegisterPairLow<Register>();
Location out_loc = locations->Out();
@@ -1038,7 +1225,7 @@
LocationSummary::kNoCall,
kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
- HInstruction *value = invoke->InputAt(1);
+ HInstruction* value = invoke->InputAt(1);
if (size == Primitive::kPrimByte) {
locations->SetInAt(1, Location::ByteRegisterOrConstant(EDX, value));
} else {
@@ -1316,10 +1503,12 @@
}
if (type == Primitive::kPrimNot) {
+ bool value_can_be_null = true; // TODO: Worth finding out this information?
codegen->MarkGCCard(locations->GetTemp(0).AsRegister<Register>(),
locations->GetTemp(1).AsRegister<Register>(),
base,
- value_loc.AsRegister<Register>());
+ value_loc.AsRegister<Register>(),
+ value_can_be_null);
}
}
@@ -1415,10 +1604,12 @@
Register value = locations->InAt(4).AsRegister<Register>();
if (type == Primitive::kPrimNot) {
// Mark card for object assuming new value is stored.
+ bool value_can_be_null = true; // TODO: Worth finding out this information?
codegen->MarkGCCard(locations->GetTemp(0).AsRegister<Register>(),
locations->GetTemp(1).AsRegister<Register>(),
base,
- value);
+ value,
+ value_can_be_null);
}
__ LockCmpxchgl(Address(base, offset, TIMES_1, 0), value);
@@ -1535,8 +1726,7 @@
}
UNIMPLEMENTED_INTRINSIC(MathRoundDouble)
-UNIMPLEMENTED_INTRINSIC(StringIndexOf)
-UNIMPLEMENTED_INTRINSIC(StringIndexOfAfter)
+UNIMPLEMENTED_INTRINSIC(StringGetCharsNoCheck)
UNIMPLEMENTED_INTRINSIC(SystemArrayCopyChar)
UNIMPLEMENTED_INTRINSIC(ReferenceGetReferent)
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index d9a1c31..2ccecfe 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -16,6 +16,8 @@
#include "intrinsics_x86_64.h"
+#include <limits>
+
#include "arch/x86_64/instruction_set_features_x86_64.h"
#include "code_generator_x86_64.h"
#include "entrypoints/quick/quick_entrypoints.h"
@@ -103,27 +105,9 @@
}
}
-static void MoveArguments(HInvoke* invoke, ArenaAllocator* arena, CodeGeneratorX86_64* codegen) {
- if (invoke->InputCount() == 0) {
- return;
- }
-
- LocationSummary* locations = invoke->GetLocations();
- InvokeDexCallingConventionVisitor calling_convention_visitor;
-
- // We're moving potentially two or more locations to locations that could overlap, so we need
- // a parallel move resolver.
- HParallelMove parallel_move(arena);
-
- for (size_t i = 0; i < invoke->InputCount(); i++) {
- HInstruction* input = invoke->InputAt(i);
- Location cc_loc = calling_convention_visitor.GetNextLocation(input->GetType());
- Location actual_loc = locations->InAt(i);
-
- parallel_move.AddMove(actual_loc, cc_loc, input->GetType(), nullptr);
- }
-
- codegen->GetMoveResolver()->EmitNativeCode(¶llel_move);
+static void MoveArguments(HInvoke* invoke, CodeGeneratorX86_64* codegen) {
+ InvokeDexCallingConventionVisitorX86_64 calling_convention_visitor;
+ IntrinsicVisitor::MoveArguments(invoke, codegen, &calling_convention_visitor);
}
// Slow-path for fallback (calling the managed code to handle the intrinsic) in an intrinsified
@@ -142,7 +126,7 @@
SaveLiveRegisters(codegen, invoke_->GetLocations());
- MoveArguments(invoke_, codegen->GetGraph()->GetArena(), codegen);
+ MoveArguments(invoke_, codegen);
if (invoke_->IsInvokeStaticOrDirect()) {
codegen->GenerateStaticOrDirectCall(invoke_->AsInvokeStaticOrDirect(), CpuRegister(RDI));
@@ -622,7 +606,7 @@
}
static void InvokeOutOfLineIntrinsic(CodeGeneratorX86_64* codegen, HInvoke* invoke) {
- MoveArguments(invoke, codegen->GetGraph()->GetArena(), codegen);
+ MoveArguments(invoke, codegen);
DCHECK(invoke->IsInvokeStaticOrDirect());
codegen->GenerateStaticOrDirectCall(invoke->AsInvokeStaticOrDirect(), CpuRegister(RDI));
@@ -704,7 +688,6 @@
locations->SetInAt(0, Location::RequiresFpuRegister());
locations->SetOut(Location::RequiresFpuRegister());
locations->AddTemp(Location::RequiresFpuRegister());
- locations->AddTemp(Location::RequiresFpuRegister());
return;
}
@@ -732,14 +715,12 @@
// Implement RoundFloat as t1 = floor(input + 0.5f); convert to int.
XmmRegister in = locations->InAt(0).AsFpuRegister<XmmRegister>();
CpuRegister out = locations->Out().AsRegister<CpuRegister>();
- XmmRegister maxInt = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
- XmmRegister inPlusPointFive = locations->GetTemp(1).AsFpuRegister<XmmRegister>();
+ XmmRegister inPlusPointFive = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
Label done, nan;
X86_64Assembler* assembler = GetAssembler();
- // Generate 0.5 into inPlusPointFive.
- __ movl(out, Immediate(bit_cast<int32_t, float>(0.5f)));
- __ movd(inPlusPointFive, out, false);
+ // Load 0.5 into inPlusPointFive.
+ __ movss(inPlusPointFive, codegen_->LiteralFloatAddress(0.5f));
// Add in the input.
__ addss(inPlusPointFive, in);
@@ -747,12 +728,8 @@
// And truncate to an integer.
__ roundss(inPlusPointFive, inPlusPointFive, Immediate(1));
- __ movl(out, Immediate(kPrimIntMax));
- // maxInt = int-to-float(out)
- __ cvtsi2ss(maxInt, out);
-
// if inPlusPointFive >= maxInt goto done
- __ comiss(inPlusPointFive, maxInt);
+ __ comiss(inPlusPointFive, codegen_->LiteralFloatAddress(static_cast<float>(kPrimIntMax)));
__ j(kAboveEqual, &done);
// if input == NaN goto nan
@@ -782,14 +759,12 @@
// Implement RoundDouble as t1 = floor(input + 0.5); convert to long.
XmmRegister in = locations->InAt(0).AsFpuRegister<XmmRegister>();
CpuRegister out = locations->Out().AsRegister<CpuRegister>();
- XmmRegister maxLong = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
- XmmRegister inPlusPointFive = locations->GetTemp(1).AsFpuRegister<XmmRegister>();
+ XmmRegister inPlusPointFive = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
Label done, nan;
X86_64Assembler* assembler = GetAssembler();
- // Generate 0.5 into inPlusPointFive.
- __ movq(out, Immediate(bit_cast<int64_t, double>(0.5)));
- __ movd(inPlusPointFive, out, true);
+ // Load 0.5 into inPlusPointFive.
+ __ movsd(inPlusPointFive, codegen_->LiteralDoubleAddress(0.5));
// Add in the input.
__ addsd(inPlusPointFive, in);
@@ -797,12 +772,8 @@
// And truncate to an integer.
__ roundsd(inPlusPointFive, inPlusPointFive, Immediate(1));
- __ movq(out, Immediate(kPrimLongMax));
- // maxLong = long-to-double(out)
- __ cvtsi2sd(maxLong, out, true);
-
// if inPlusPointFive >= maxLong goto done
- __ comisd(inPlusPointFive, maxLong);
+ __ comisd(inPlusPointFive, codegen_->LiteralDoubleAddress(static_cast<double>(kPrimLongMax)));
__ j(kAboveEqual, &done);
// if input == NaN goto nan
@@ -814,7 +785,7 @@
__ Bind(&nan);
// output = 0
- __ xorq(out, out);
+ __ xorl(out, out);
__ Bind(&done);
}
@@ -836,16 +807,10 @@
const int32_t value_offset = mirror::String::ValueOffset().Int32Value();
// Location of count
const int32_t count_offset = mirror::String::CountOffset().Int32Value();
- // Starting offset within data array
- const int32_t offset_offset = mirror::String::OffsetOffset().Int32Value();
- // Start of char data with array_
- const int32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Int32Value();
CpuRegister obj = locations->InAt(0).AsRegister<CpuRegister>();
CpuRegister idx = locations->InAt(1).AsRegister<CpuRegister>();
CpuRegister out = locations->Out().AsRegister<CpuRegister>();
- Location temp_loc = locations->GetTemp(0);
- CpuRegister temp = temp_loc.AsRegister<CpuRegister>();
// TODO: Maybe we can support range check elimination. Overall, though, I think it's not worth
// the cost.
@@ -861,12 +826,8 @@
codegen_->MaybeRecordImplicitNullCheck(invoke);
__ j(kAboveEqual, slow_path->GetEntryLabel());
- // Get the actual element.
- __ movl(temp, idx); // temp := idx.
- __ addl(temp, Address(obj, offset_offset)); // temp := offset + idx.
- __ movl(out, Address(obj, value_offset)); // obj := obj.array.
- // out = out[2*temp].
- __ movzxw(out, Address(out, temp, ScaleFactor::TIMES_2, data_offset));
+ // out = out[2*idx].
+ __ movzxw(out, Address(out, idx, ScaleFactor::TIMES_2, value_offset));
__ Bind(slow_path->GetExitLabel());
}
@@ -886,7 +847,7 @@
LocationSummary* locations = invoke->GetLocations();
// Note that the null check must have been done earlier.
- DCHECK(!invoke->CanDoImplicitNullCheck());
+ DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0)));
CpuRegister argument = locations->InAt(1).AsRegister<CpuRegister>();
__ testl(argument, argument);
@@ -899,6 +860,229 @@
__ Bind(slow_path->GetExitLabel());
}
+static void CreateStringIndexOfLocations(HInvoke* invoke,
+ ArenaAllocator* allocator,
+ bool start_at_zero) {
+ LocationSummary* locations = new (allocator) LocationSummary(invoke,
+ LocationSummary::kCallOnSlowPath,
+ kIntrinsified);
+ // The data needs to be in RDI for scasw. So request that the string is there, anyways.
+ locations->SetInAt(0, Location::RegisterLocation(RDI));
+ // If we look for a constant char, we'll still have to copy it into RAX. So just request the
+ // allocator to do that, anyways. We can still do the constant check by checking the parameter
+ // of the instruction explicitly.
+ // Note: This works as we don't clobber RAX anywhere.
+ locations->SetInAt(1, Location::RegisterLocation(RAX));
+ if (!start_at_zero) {
+ locations->SetInAt(2, Location::RequiresRegister()); // The starting index.
+ }
+ // As we clobber RDI during execution anyways, also use it as the output.
+ locations->SetOut(Location::SameAsFirstInput());
+
+ // repne scasw uses RCX as the counter.
+ locations->AddTemp(Location::RegisterLocation(RCX));
+ // Need another temporary to be able to compute the result.
+ locations->AddTemp(Location::RequiresRegister());
+}
+
+static void GenerateStringIndexOf(HInvoke* invoke,
+ X86_64Assembler* assembler,
+ CodeGeneratorX86_64* codegen,
+ ArenaAllocator* allocator,
+ bool start_at_zero) {
+ LocationSummary* locations = invoke->GetLocations();
+
+ // Note that the null check must have been done earlier.
+ DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0)));
+
+ CpuRegister string_obj = locations->InAt(0).AsRegister<CpuRegister>();
+ CpuRegister search_value = locations->InAt(1).AsRegister<CpuRegister>();
+ CpuRegister counter = locations->GetTemp(0).AsRegister<CpuRegister>();
+ CpuRegister string_length = locations->GetTemp(1).AsRegister<CpuRegister>();
+ CpuRegister out = locations->Out().AsRegister<CpuRegister>();
+
+ // Check our assumptions for registers.
+ DCHECK_EQ(string_obj.AsRegister(), RDI);
+ DCHECK_EQ(search_value.AsRegister(), RAX);
+ DCHECK_EQ(counter.AsRegister(), RCX);
+ DCHECK_EQ(out.AsRegister(), RDI);
+
+ // Check for code points > 0xFFFF. Either a slow-path check when we don't know statically,
+ // or directly dispatch if we have a constant.
+ SlowPathCodeX86_64* slow_path = nullptr;
+ if (invoke->InputAt(1)->IsIntConstant()) {
+ if (static_cast<uint32_t>(invoke->InputAt(1)->AsIntConstant()->GetValue()) >
+ std::numeric_limits<uint16_t>::max()) {
+ // Always needs the slow-path. We could directly dispatch to it, but this case should be
+ // rare, so for simplicity just put the full slow-path down and branch unconditionally.
+ slow_path = new (allocator) IntrinsicSlowPathX86_64(invoke);
+ codegen->AddSlowPath(slow_path);
+ __ jmp(slow_path->GetEntryLabel());
+ __ Bind(slow_path->GetExitLabel());
+ return;
+ }
+ } else {
+ __ cmpl(search_value, Immediate(std::numeric_limits<uint16_t>::max()));
+ slow_path = new (allocator) IntrinsicSlowPathX86_64(invoke);
+ codegen->AddSlowPath(slow_path);
+ __ j(kAbove, slow_path->GetEntryLabel());
+ }
+
+ // From here down, we know that we are looking for a char that fits in 16 bits.
+ // Location of reference to data array within the String object.
+ int32_t value_offset = mirror::String::ValueOffset().Int32Value();
+ // Location of count within the String object.
+ int32_t count_offset = mirror::String::CountOffset().Int32Value();
+
+ // Load string length, i.e., the count field of the string.
+ __ movl(string_length, Address(string_obj, count_offset));
+
+ // Do a length check.
+ // TODO: Support jecxz.
+ Label not_found_label;
+ __ testl(string_length, string_length);
+ __ j(kEqual, ¬_found_label);
+
+ if (start_at_zero) {
+ // Number of chars to scan is the same as the string length.
+ __ movl(counter, string_length);
+
+ // Move to the start of the string.
+ __ addq(string_obj, Immediate(value_offset));
+ } else {
+ CpuRegister start_index = locations->InAt(2).AsRegister<CpuRegister>();
+
+ // Do a start_index check.
+ __ cmpl(start_index, string_length);
+ __ j(kGreaterEqual, ¬_found_label);
+
+ // Ensure we have a start index >= 0;
+ __ xorl(counter, counter);
+ __ cmpl(start_index, Immediate(0));
+ __ cmov(kGreater, counter, start_index, false); // 32-bit copy is enough.
+
+ // Move to the start of the string: string_obj + value_offset + 2 * start_index.
+ __ leaq(string_obj, Address(string_obj, counter, ScaleFactor::TIMES_2, value_offset));
+
+ // Now update ecx, the work counter: it's gonna be string.length - start_index.
+ __ negq(counter); // Needs to be 64-bit negation, as the address computation is 64-bit.
+ __ leaq(counter, Address(string_length, counter, ScaleFactor::TIMES_1, 0));
+ }
+
+ // Everything is set up for repne scasw:
+ // * Comparison address in RDI.
+ // * Counter in ECX.
+ __ repne_scasw();
+
+ // Did we find a match?
+ __ j(kNotEqual, ¬_found_label);
+
+ // Yes, we matched. Compute the index of the result.
+ __ subl(string_length, counter);
+ __ leal(out, Address(string_length, -1));
+
+ Label done;
+ __ jmp(&done);
+
+ // Failed to match; return -1.
+ __ Bind(¬_found_label);
+ __ movl(out, Immediate(-1));
+
+ // And join up at the end.
+ __ Bind(&done);
+ if (slow_path != nullptr) {
+ __ Bind(slow_path->GetExitLabel());
+ }
+}
+
+void IntrinsicLocationsBuilderX86_64::VisitStringIndexOf(HInvoke* invoke) {
+ CreateStringIndexOfLocations(invoke, arena_, true);
+}
+
+void IntrinsicCodeGeneratorX86_64::VisitStringIndexOf(HInvoke* invoke) {
+ GenerateStringIndexOf(invoke, GetAssembler(), codegen_, GetAllocator(), true);
+}
+
+void IntrinsicLocationsBuilderX86_64::VisitStringIndexOfAfter(HInvoke* invoke) {
+ CreateStringIndexOfLocations(invoke, arena_, false);
+}
+
+void IntrinsicCodeGeneratorX86_64::VisitStringIndexOfAfter(HInvoke* invoke) {
+ GenerateStringIndexOf(invoke, GetAssembler(), codegen_, GetAllocator(), false);
+}
+
+void IntrinsicLocationsBuilderX86_64::VisitStringNewStringFromBytes(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kCall,
+ kIntrinsified);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
+ locations->SetInAt(3, Location::RegisterLocation(calling_convention.GetRegisterAt(3)));
+ locations->SetOut(Location::RegisterLocation(RAX));
+}
+
+void IntrinsicCodeGeneratorX86_64::VisitStringNewStringFromBytes(HInvoke* invoke) {
+ X86_64Assembler* assembler = GetAssembler();
+ LocationSummary* locations = invoke->GetLocations();
+
+ CpuRegister byte_array = locations->InAt(0).AsRegister<CpuRegister>();
+ __ testl(byte_array, byte_array);
+ SlowPathCodeX86_64* slow_path = new (GetAllocator()) IntrinsicSlowPathX86_64(invoke);
+ codegen_->AddSlowPath(slow_path);
+ __ j(kEqual, slow_path->GetEntryLabel());
+
+ __ gs()->call(Address::Absolute(
+ QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pAllocStringFromBytes), true));
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+ __ Bind(slow_path->GetExitLabel());
+}
+
+void IntrinsicLocationsBuilderX86_64::VisitStringNewStringFromChars(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kCall,
+ kIntrinsified);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
+ locations->SetOut(Location::RegisterLocation(RAX));
+}
+
+void IntrinsicCodeGeneratorX86_64::VisitStringNewStringFromChars(HInvoke* invoke) {
+ X86_64Assembler* assembler = GetAssembler();
+
+ __ gs()->call(Address::Absolute(
+ QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pAllocStringFromChars), true));
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+}
+
+void IntrinsicLocationsBuilderX86_64::VisitStringNewStringFromString(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kCall,
+ kIntrinsified);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetOut(Location::RegisterLocation(RAX));
+}
+
+void IntrinsicCodeGeneratorX86_64::VisitStringNewStringFromString(HInvoke* invoke) {
+ X86_64Assembler* assembler = GetAssembler();
+ LocationSummary* locations = invoke->GetLocations();
+
+ CpuRegister string_to_copy = locations->InAt(0).AsRegister<CpuRegister>();
+ __ testl(string_to_copy, string_to_copy);
+ SlowPathCodeX86_64* slow_path = new (GetAllocator()) IntrinsicSlowPathX86_64(invoke);
+ codegen_->AddSlowPath(slow_path);
+ __ j(kEqual, slow_path->GetEntryLabel());
+
+ __ gs()->call(Address::Absolute(
+ QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pAllocStringFromString), true));
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+ __ Bind(slow_path->GetExitLabel());
+}
+
static void GenPeek(LocationSummary* locations, Primitive::Type size, X86_64Assembler* assembler) {
CpuRegister address = locations->InAt(0).AsRegister<CpuRegister>();
CpuRegister out = locations->Out().AsRegister<CpuRegister>(); // == address, here for clarity.
@@ -960,26 +1144,48 @@
LocationSummary::kNoCall,
kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrInt32LongConstant(invoke->InputAt(1)));
}
static void GenPoke(LocationSummary* locations, Primitive::Type size, X86_64Assembler* assembler) {
CpuRegister address = locations->InAt(0).AsRegister<CpuRegister>();
- CpuRegister value = locations->InAt(1).AsRegister<CpuRegister>();
+ Location value = locations->InAt(1);
// x86 allows unaligned access. We do not have to check the input or use specific instructions
// to avoid a SIGBUS.
switch (size) {
case Primitive::kPrimByte:
- __ movb(Address(address, 0), value);
+ if (value.IsConstant()) {
+ __ movb(Address(address, 0),
+ Immediate(CodeGenerator::GetInt32ValueOf(value.GetConstant())));
+ } else {
+ __ movb(Address(address, 0), value.AsRegister<CpuRegister>());
+ }
break;
case Primitive::kPrimShort:
- __ movw(Address(address, 0), value);
+ if (value.IsConstant()) {
+ __ movw(Address(address, 0),
+ Immediate(CodeGenerator::GetInt32ValueOf(value.GetConstant())));
+ } else {
+ __ movw(Address(address, 0), value.AsRegister<CpuRegister>());
+ }
break;
case Primitive::kPrimInt:
- __ movl(Address(address, 0), value);
+ if (value.IsConstant()) {
+ __ movl(Address(address, 0),
+ Immediate(CodeGenerator::GetInt32ValueOf(value.GetConstant())));
+ } else {
+ __ movl(Address(address, 0), value.AsRegister<CpuRegister>());
+ }
break;
case Primitive::kPrimLong:
- __ movq(Address(address, 0), value);
+ if (value.IsConstant()) {
+ int64_t v = value.GetConstant()->AsLongConstant()->GetValue();
+ DCHECK(IsInt<32>(v));
+ int32_t v_32 = v;
+ __ movq(Address(address, 0), Immediate(v_32));
+ } else {
+ __ movq(Address(address, 0), value.AsRegister<CpuRegister>());
+ }
break;
default:
LOG(FATAL) << "Type not recognized for poke: " << size;
@@ -1168,10 +1374,12 @@
}
if (type == Primitive::kPrimNot) {
+ bool value_can_be_null = true; // TODO: Worth finding out this information?
codegen->MarkGCCard(locations->GetTemp(0).AsRegister<CpuRegister>(),
locations->GetTemp(1).AsRegister<CpuRegister>(),
base,
- value);
+ value,
+ value_can_be_null);
}
}
@@ -1253,10 +1461,12 @@
// Integer or object.
if (type == Primitive::kPrimNot) {
// Mark card for object assuming new value is stored.
+ bool value_can_be_null = true; // TODO: Worth finding out this information?
codegen->MarkGCCard(locations->GetTemp(0).AsRegister<CpuRegister>(),
locations->GetTemp(1).AsRegister<CpuRegister>(),
base,
- value);
+ value,
+ value_can_be_null);
}
__ LockCmpxchgl(Address(base, offset, TIMES_1, 0), value);
@@ -1380,8 +1590,7 @@
void IntrinsicCodeGeneratorX86_64::Visit ## Name(HInvoke* invoke ATTRIBUTE_UNUSED) { \
}
-UNIMPLEMENTED_INTRINSIC(StringIndexOf)
-UNIMPLEMENTED_INTRINSIC(StringIndexOfAfter)
+UNIMPLEMENTED_INTRINSIC(StringGetCharsNoCheck)
UNIMPLEMENTED_INTRINSIC(SystemArrayCopyChar)
UNIMPLEMENTED_INTRINSIC(ReferenceGetReferent)
diff --git a/compiler/optimizing/licm.cc b/compiler/optimizing/licm.cc
index bf9b8e5..2535ea2 100644
--- a/compiler/optimizing/licm.cc
+++ b/compiler/optimizing/licm.cc
@@ -39,8 +39,9 @@
}
}
- if (instruction->HasEnvironment()) {
- HEnvironment* environment = instruction->GetEnvironment();
+ for (HEnvironment* environment = instruction->GetEnvironment();
+ environment != nullptr;
+ environment = environment->GetParent()) {
for (size_t i = 0, e = environment->Size(); i < e; ++i) {
HInstruction* input = environment->GetInstructionAt(i);
if (input != nullptr) {
@@ -63,13 +64,15 @@
* If `environment` has a loop header phi, we replace it with its first input.
*/
static void UpdateLoopPhisIn(HEnvironment* environment, HLoopInformation* info) {
- for (size_t i = 0, e = environment->Size(); i < e; ++i) {
- HInstruction* input = environment->GetInstructionAt(i);
- if (input != nullptr && IsPhiOf(input, info->GetHeader())) {
- environment->RemoveAsUserOfInput(i);
- HInstruction* incoming = input->InputAt(0);
- environment->SetRawEnvAt(i, incoming);
- incoming->AddEnvUseAt(environment, i);
+ for (; environment != nullptr; environment = environment->GetParent()) {
+ for (size_t i = 0, e = environment->Size(); i < e; ++i) {
+ HInstruction* input = environment->GetInstructionAt(i);
+ if (input != nullptr && IsPhiOf(input, info->GetHeader())) {
+ environment->RemoveAsUserOfInput(i);
+ HInstruction* incoming = input->InputAt(0);
+ environment->SetRawEnvAt(i, incoming);
+ incoming->AddEnvUseAt(environment, i);
+ }
}
}
}
diff --git a/compiler/optimizing/linearize_test.cc b/compiler/optimizing/linearize_test.cc
index 7818c60..4f259b5 100644
--- a/compiler/optimizing/linearize_test.cc
+++ b/compiler/optimizing/linearize_test.cc
@@ -39,7 +39,7 @@
static void TestCode(const uint16_t* data, const int* expected_order, size_t number_of_blocks) {
ArenaPool pool;
ArenaAllocator allocator(&pool);
- HGraph* graph = new (&allocator) HGraph(&allocator);
+ HGraph* graph = CreateGraph(&allocator);
HGraphBuilder builder(graph);
const DexFile::CodeItem* item = reinterpret_cast<const DexFile::CodeItem*>(data);
bool graph_built = builder.BuildGraph(*item);
diff --git a/compiler/optimizing/live_ranges_test.cc b/compiler/optimizing/live_ranges_test.cc
index 5236773..7cb00a1 100644
--- a/compiler/optimizing/live_ranges_test.cc
+++ b/compiler/optimizing/live_ranges_test.cc
@@ -32,7 +32,7 @@
namespace art {
static HGraph* BuildGraph(const uint16_t* data, ArenaAllocator* allocator) {
- HGraph* graph = new (allocator) HGraph(allocator);
+ HGraph* graph = CreateGraph(allocator);
HGraphBuilder builder(graph);
const DexFile::CodeItem* item = reinterpret_cast<const DexFile::CodeItem*>(data);
builder.BuildGraph(*item);
diff --git a/compiler/optimizing/liveness_test.cc b/compiler/optimizing/liveness_test.cc
index 8a96ee9..9d7d0b6 100644
--- a/compiler/optimizing/liveness_test.cc
+++ b/compiler/optimizing/liveness_test.cc
@@ -46,7 +46,7 @@
static void TestCode(const uint16_t* data, const char* expected) {
ArenaPool pool;
ArenaAllocator allocator(&pool);
- HGraph* graph = new (&allocator) HGraph(&allocator);
+ HGraph* graph = CreateGraph(&allocator);
HGraphBuilder builder(graph);
const DexFile::CodeItem* item = reinterpret_cast<const DexFile::CodeItem*>(data);
bool graph_built = builder.BuildGraph(*item);
@@ -445,44 +445,40 @@
TEST(LivenessTest, Loop6) {
// Bitsets are made of:
- // (constant0, constant4, constant5, phi in block 2, phi in block 8)
+ // (constant0, constant4, constant5, phi in block 2)
const char* expected =
"Block 0\n"
- " live in: (00000)\n"
- " live out: (11100)\n"
- " kill: (11100)\n"
+ " live in: (0000)\n"
+ " live out: (1110)\n"
+ " kill: (1110)\n"
"Block 1\n"
- " live in: (11100)\n"
- " live out: (01100)\n"
- " kill: (00000)\n"
+ " live in: (1110)\n"
+ " live out: (0110)\n"
+ " kill: (0000)\n"
"Block 2\n" // loop header
- " live in: (01100)\n"
- " live out: (01110)\n"
- " kill: (00010)\n"
+ " live in: (0110)\n"
+ " live out: (0111)\n"
+ " kill: (0001)\n"
"Block 3\n"
- " live in: (01100)\n"
- " live out: (01100)\n"
- " kill: (00000)\n"
- "Block 4\n" // original back edge
- " live in: (01100)\n"
- " live out: (01100)\n"
- " kill: (00000)\n"
- "Block 5\n" // original back edge
- " live in: (01100)\n"
- " live out: (01100)\n"
- " kill: (00000)\n"
+ " live in: (0110)\n"
+ " live out: (0110)\n"
+ " kill: (0000)\n"
+ "Block 4\n" // back edge
+ " live in: (0110)\n"
+ " live out: (0110)\n"
+ " kill: (0000)\n"
+ "Block 5\n" // back edge
+ " live in: (0110)\n"
+ " live out: (0110)\n"
+ " kill: (0000)\n"
"Block 6\n" // return block
- " live in: (00010)\n"
- " live out: (00000)\n"
- " kill: (00000)\n"
+ " live in: (0001)\n"
+ " live out: (0000)\n"
+ " kill: (0000)\n"
"Block 7\n" // exit block
- " live in: (00000)\n"
- " live out: (00000)\n"
- " kill: (00000)\n"
- "Block 8\n" // synthesized back edge
- " live in: (01100)\n"
- " live out: (01100)\n"
- " kill: (00001)\n";
+ " live in: (0000)\n"
+ " live out: (0000)\n"
+ " kill: (0000)\n";
const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
diff --git a/compiler/optimizing/locations.cc b/compiler/optimizing/locations.cc
index a1ae670..42aba04 100644
--- a/compiler/optimizing/locations.cc
+++ b/compiler/optimizing/locations.cc
@@ -25,8 +25,6 @@
bool intrinsified)
: inputs_(instruction->GetBlock()->GetGraph()->GetArena(), instruction->InputCount()),
temps_(instruction->GetBlock()->GetGraph()->GetArena(), 0),
- environment_(instruction->GetBlock()->GetGraph()->GetArena(),
- instruction->EnvironmentSize()),
output_overlaps_(Location::kOutputOverlap),
call_kind_(call_kind),
stack_mask_(nullptr),
@@ -37,10 +35,6 @@
for (size_t i = 0; i < instruction->InputCount(); ++i) {
inputs_.Put(i, Location());
}
- environment_.SetSize(instruction->EnvironmentSize());
- for (size_t i = 0; i < instruction->EnvironmentSize(); ++i) {
- environment_.Put(i, Location());
- }
instruction->SetLocations(this);
if (NeedsSafepoint()) {
diff --git a/compiler/optimizing/locations.h b/compiler/optimizing/locations.h
index c3a9915..09bbb33 100644
--- a/compiler/optimizing/locations.h
+++ b/compiler/optimizing/locations.h
@@ -525,14 +525,6 @@
return temps_.Size();
}
- void SetEnvironmentAt(uint32_t at, Location location) {
- environment_.Put(at, location);
- }
-
- Location GetEnvironmentAt(uint32_t at) const {
- return environment_.Get(at);
- }
-
Location Out() const { return output_; }
bool CanCall() const { return call_kind_ != kNoCall; }
@@ -602,7 +594,6 @@
private:
GrowableArray<Location> inputs_;
GrowableArray<Location> temps_;
- GrowableArray<Location> environment_;
// Whether the output overlaps with any of the inputs. If it overlaps, then it cannot
// share the same register as the inputs.
Location::OutputOverlap output_overlaps_;
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 4b9d4fc..47da9cc 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -16,7 +16,9 @@
#include "nodes.h"
+#include "code_generator.h"
#include "ssa_builder.h"
+#include "base/bit_vector-inl.h"
#include "utils/growable_array.h"
#include "scoped_thread_state_change.h"
@@ -37,8 +39,9 @@
instruction->RemoveAsUserOfInput(i);
}
- HEnvironment* environment = instruction->GetEnvironment();
- if (environment != nullptr) {
+ for (HEnvironment* environment = instruction->GetEnvironment();
+ environment != nullptr;
+ environment = environment->GetParent()) {
for (size_t i = 0, e = environment->Size(); i < e; ++i) {
if (environment->GetInstructionAt(i) != nullptr) {
environment->RemoveAsUserOfInput(i);
@@ -191,24 +194,6 @@
void HGraph::SimplifyLoop(HBasicBlock* header) {
HLoopInformation* info = header->GetLoopInformation();
- // If there are more than one back edge, make them branch to the same block that
- // will become the only back edge. This simplifies finding natural loops in the
- // graph.
- // Also, if the loop is a do/while (that is the back edge is an if), change the
- // back edge to be a goto. This simplifies code generation of suspend cheks.
- if (info->NumberOfBackEdges() > 1 || info->GetBackEdges().Get(0)->GetLastInstruction()->IsIf()) {
- HBasicBlock* new_back_edge = new (arena_) HBasicBlock(this, header->GetDexPc());
- AddBlock(new_back_edge);
- new_back_edge->AddInstruction(new (arena_) HGoto());
- for (size_t pred = 0, e = info->GetBackEdges().Size(); pred < e; ++pred) {
- HBasicBlock* back_edge = info->GetBackEdges().Get(pred);
- back_edge->ReplaceSuccessor(header, new_back_edge);
- }
- info->ClearBackEdges();
- info->AddBackEdge(new_back_edge);
- new_back_edge->AddSuccessor(header);
- }
-
// Make sure the loop has only one pre header. This simplifies SSA building by having
// to just look at the pre header to know which locals are initialized at entry of the
// loop.
@@ -218,11 +203,9 @@
AddBlock(pre_header);
pre_header->AddInstruction(new (arena_) HGoto());
- ArenaBitVector back_edges(arena_, GetBlocks().Size(), false);
- HBasicBlock* back_edge = info->GetBackEdges().Get(0);
for (size_t pred = 0; pred < header->GetPredecessors().Size(); ++pred) {
HBasicBlock* predecessor = header->GetPredecessors().Get(pred);
- if (predecessor != back_edge) {
+ if (!info->IsBackEdge(*predecessor)) {
predecessor->ReplaceSuccessor(header, pre_header);
pred--;
}
@@ -230,9 +213,17 @@
pre_header->AddSuccessor(header);
}
- // Make sure the second predecessor of a loop header is the back edge.
- if (header->GetPredecessors().Get(1) != info->GetBackEdges().Get(0)) {
- header->SwapPredecessors();
+ // Make sure the first predecessor of a loop header is the incoming block.
+ if (info->IsBackEdge(*header->GetPredecessors().Get(0))) {
+ HBasicBlock* to_swap = header->GetPredecessors().Get(0);
+ for (size_t pred = 1, e = header->GetPredecessors().Size(); pred < e; ++pred) {
+ HBasicBlock* predecessor = header->GetPredecessors().Get(pred);
+ if (!info->IsBackEdge(*predecessor)) {
+ header->predecessors_.Put(pred, to_swap);
+ header->predecessors_.Put(0, predecessor);
+ break;
+ }
+ }
}
// Place the suspend check at the beginning of the header, so that live registers
@@ -303,25 +294,6 @@
return cached_null_constant_;
}
-template <class InstructionType, typename ValueType>
-InstructionType* HGraph::CreateConstant(ValueType value,
- ArenaSafeMap<ValueType, InstructionType*>* cache) {
- // Try to find an existing constant of the given value.
- InstructionType* constant = nullptr;
- auto cached_constant = cache->find(value);
- if (cached_constant != cache->end()) {
- constant = cached_constant->second;
- }
-
- // If not found or previously deleted, create and cache a new instruction.
- if (constant == nullptr || constant->GetBlock() == nullptr) {
- constant = new (arena_) InstructionType(value);
- cache->Overwrite(value, constant);
- InsertConstant(constant);
- }
- return constant;
-}
-
HConstant* HGraph::GetConstant(Primitive::Type type, int64_t value) {
switch (type) {
case Primitive::Type::kPrimBoolean:
@@ -343,6 +315,18 @@
}
}
+void HGraph::CacheFloatConstant(HFloatConstant* constant) {
+ int32_t value = bit_cast<int32_t, float>(constant->GetValue());
+ DCHECK(cached_float_constants_.find(value) == cached_float_constants_.end());
+ cached_float_constants_.Overwrite(value, constant);
+}
+
+void HGraph::CacheDoubleConstant(HDoubleConstant* constant) {
+ int64_t value = bit_cast<int64_t, double>(constant->GetValue());
+ DCHECK(cached_double_constants_.find(value) == cached_double_constants_.end());
+ cached_double_constants_.Overwrite(value, constant);
+}
+
void HLoopInformation::Add(HBasicBlock* block) {
blocks_.SetBit(block->GetBlockId());
}
@@ -364,26 +348,60 @@
}
bool HLoopInformation::Populate() {
- DCHECK_EQ(GetBackEdges().Size(), 1u);
- HBasicBlock* back_edge = GetBackEdges().Get(0);
- DCHECK(back_edge->GetDominator() != nullptr);
- if (!header_->Dominates(back_edge)) {
- // This loop is not natural. Do not bother going further.
- return false;
- }
+ DCHECK_EQ(blocks_.NumSetBits(), 0u) << "Loop information has already been populated";
+ for (size_t i = 0, e = GetBackEdges().Size(); i < e; ++i) {
+ HBasicBlock* back_edge = GetBackEdges().Get(i);
+ DCHECK(back_edge->GetDominator() != nullptr);
+ if (!header_->Dominates(back_edge)) {
+ // This loop is not natural. Do not bother going further.
+ return false;
+ }
- // Populate this loop: starting with the back edge, recursively add predecessors
- // that are not already part of that loop. Set the header as part of the loop
- // to end the recursion.
- // This is a recursive implementation of the algorithm described in
- // "Advanced Compiler Design & Implementation" (Muchnick) p192.
- blocks_.SetBit(header_->GetBlockId());
- PopulateRecursive(back_edge);
+ // Populate this loop: starting with the back edge, recursively add predecessors
+ // that are not already part of that loop. Set the header as part of the loop
+ // to end the recursion.
+ // This is a recursive implementation of the algorithm described in
+ // "Advanced Compiler Design & Implementation" (Muchnick) p192.
+ blocks_.SetBit(header_->GetBlockId());
+ PopulateRecursive(back_edge);
+ }
return true;
}
+void HLoopInformation::Update() {
+ HGraph* graph = header_->GetGraph();
+ for (uint32_t id : blocks_.Indexes()) {
+ HBasicBlock* block = graph->GetBlocks().Get(id);
+ // Reset loop information of non-header blocks inside the loop, except
+ // members of inner nested loops because those should already have been
+ // updated by their own LoopInformation.
+ if (block->GetLoopInformation() == this && block != header_) {
+ block->SetLoopInformation(nullptr);
+ }
+ }
+ blocks_.ClearAllBits();
+
+ if (back_edges_.IsEmpty()) {
+ // The loop has been dismantled, delete its suspend check and remove info
+ // from the header.
+ DCHECK(HasSuspendCheck());
+ header_->RemoveInstruction(suspend_check_);
+ header_->SetLoopInformation(nullptr);
+ header_ = nullptr;
+ suspend_check_ = nullptr;
+ } else {
+ if (kIsDebugBuild) {
+ for (size_t i = 0, e = back_edges_.Size(); i < e; ++i) {
+ DCHECK(header_->Dominates(back_edges_.Get(i)));
+ }
+ }
+ // This loop still has reachable back edges. Repopulate the list of blocks.
+ bool populate_successful = Populate();
+ DCHECK(populate_successful);
+ }
+}
+
HBasicBlock* HLoopInformation::GetPreHeader() const {
- DCHECK_EQ(header_->GetPredecessors().Size(), 2u);
return header_->GetDominator();
}
@@ -395,6 +413,14 @@
return other.blocks_.IsBitSet(header_->GetBlockId());
}
+size_t HLoopInformation::GetLifetimeEnd() const {
+ size_t last_position = 0;
+ for (size_t i = 0, e = back_edges_.Size(); i < e; ++i) {
+ last_position = std::max(back_edges_.Get(i)->GetLifetimeEnd(), last_position);
+ }
+ return last_position;
+}
+
bool HBasicBlock::Dominates(HBasicBlock* other) const {
// Walk up the dominator tree from `other`, to find out if `this`
// is an ancestor.
@@ -416,26 +442,6 @@
DCHECK(!instruction->HasEnvironment());
}
-void HBasicBlock::InsertInstructionBefore(HInstruction* instruction, HInstruction* cursor) {
- DCHECK(!cursor->IsPhi());
- DCHECK(!instruction->IsPhi());
- DCHECK_EQ(instruction->GetId(), -1);
- DCHECK_NE(cursor->GetId(), -1);
- DCHECK_EQ(cursor->GetBlock(), this);
- DCHECK(!instruction->IsControlFlow());
- instruction->next_ = cursor;
- instruction->previous_ = cursor->previous_;
- cursor->previous_ = instruction;
- if (GetFirstInstruction() == cursor) {
- instructions_.first_instruction_ = instruction;
- } else {
- instruction->previous_->next_ = instruction;
- }
- instruction->SetBlock(this);
- instruction->SetId(GetGraph()->GetNextInstructionId());
- UpdateInputsUsers(instruction);
-}
-
void HBasicBlock::ReplaceAndRemoveInstructionWith(HInstruction* initial,
HInstruction* replacement) {
DCHECK(initial->GetBlock() == this);
@@ -463,23 +469,41 @@
Add(&phis_, this, phi);
}
+void HBasicBlock::InsertInstructionBefore(HInstruction* instruction, HInstruction* cursor) {
+ DCHECK(!cursor->IsPhi());
+ DCHECK(!instruction->IsPhi());
+ DCHECK_EQ(instruction->GetId(), -1);
+ DCHECK_NE(cursor->GetId(), -1);
+ DCHECK_EQ(cursor->GetBlock(), this);
+ DCHECK(!instruction->IsControlFlow());
+ instruction->SetBlock(this);
+ instruction->SetId(GetGraph()->GetNextInstructionId());
+ UpdateInputsUsers(instruction);
+ instructions_.InsertInstructionBefore(instruction, cursor);
+}
+
+void HBasicBlock::InsertInstructionAfter(HInstruction* instruction, HInstruction* cursor) {
+ DCHECK(!cursor->IsPhi());
+ DCHECK(!instruction->IsPhi());
+ DCHECK_EQ(instruction->GetId(), -1);
+ DCHECK_NE(cursor->GetId(), -1);
+ DCHECK_EQ(cursor->GetBlock(), this);
+ DCHECK(!instruction->IsControlFlow());
+ DCHECK(!cursor->IsControlFlow());
+ instruction->SetBlock(this);
+ instruction->SetId(GetGraph()->GetNextInstructionId());
+ UpdateInputsUsers(instruction);
+ instructions_.InsertInstructionAfter(instruction, cursor);
+}
+
void HBasicBlock::InsertPhiAfter(HPhi* phi, HPhi* cursor) {
DCHECK_EQ(phi->GetId(), -1);
DCHECK_NE(cursor->GetId(), -1);
DCHECK_EQ(cursor->GetBlock(), this);
- if (cursor->next_ == nullptr) {
- cursor->next_ = phi;
- phi->previous_ = cursor;
- DCHECK(phi->next_ == nullptr);
- } else {
- phi->next_ = cursor->next_;
- phi->previous_ = cursor;
- cursor->next_ = phi;
- phi->next_->previous_ = phi;
- }
phi->SetBlock(this);
phi->SetId(GetGraph()->GetNextInstructionId());
UpdateInputsUsers(phi);
+ phis_.InsertInstructionAfter(phi, cursor);
}
static void Remove(HInstructionList* instruction_list,
@@ -497,6 +521,7 @@
}
void HBasicBlock::RemoveInstruction(HInstruction* instruction, bool ensure_safety) {
+ DCHECK(!instruction->IsPhi());
Remove(&instructions_, this, instruction, ensure_safety);
}
@@ -504,6 +529,24 @@
Remove(&phis_, this, phi, ensure_safety);
}
+void HBasicBlock::RemoveInstructionOrPhi(HInstruction* instruction, bool ensure_safety) {
+ if (instruction->IsPhi()) {
+ RemovePhi(instruction->AsPhi(), ensure_safety);
+ } else {
+ RemoveInstruction(instruction, ensure_safety);
+ }
+}
+
+void HEnvironment::CopyFrom(const GrowableArray<HInstruction*>& locals) {
+ for (size_t i = 0; i < locals.Size(); i++) {
+ HInstruction* instruction = locals.Get(i);
+ SetRawEnvAt(i, instruction);
+ if (instruction != nullptr) {
+ instruction->AddEnvUseAt(this, i);
+ }
+ }
+}
+
void HEnvironment::CopyFrom(HEnvironment* env) {
for (size_t i = 0; i < env->Size(); i++) {
HInstruction* instruction = env->GetInstructionAt(i);
@@ -514,6 +557,28 @@
}
}
+void HEnvironment::CopyFromWithLoopPhiAdjustment(HEnvironment* env,
+ HBasicBlock* loop_header) {
+ DCHECK(loop_header->IsLoopHeader());
+ for (size_t i = 0; i < env->Size(); i++) {
+ HInstruction* instruction = env->GetInstructionAt(i);
+ SetRawEnvAt(i, instruction);
+ if (instruction == nullptr) {
+ continue;
+ }
+ if (instruction->IsLoopHeaderPhi() && (instruction->GetBlock() == loop_header)) {
+ // At the end of the loop pre-header, the corresponding value for instruction
+ // is the first input of the phi.
+ HInstruction* initial = instruction->AsPhi()->InputAt(0);
+ DCHECK(initial->GetBlock()->Dominates(loop_header));
+ SetRawEnvAt(i, initial);
+ initial->AddEnvUseAt(this, i);
+ } else {
+ instruction->AddEnvUseAt(this, i);
+ }
+ }
+}
+
void HEnvironment::RemoveAsUserOfInput(size_t index) const {
const HUserRecord<HEnvironment*> user_record = vregs_.Get(index);
user_record.GetInstruction()->RemoveEnvironmentUser(user_record.GetUseNode());
@@ -546,6 +611,34 @@
}
}
+void HInstructionList::InsertInstructionBefore(HInstruction* instruction, HInstruction* cursor) {
+ DCHECK(Contains(cursor));
+ if (cursor == first_instruction_) {
+ cursor->previous_ = instruction;
+ instruction->next_ = cursor;
+ first_instruction_ = instruction;
+ } else {
+ instruction->previous_ = cursor->previous_;
+ instruction->next_ = cursor;
+ cursor->previous_ = instruction;
+ instruction->previous_->next_ = instruction;
+ }
+}
+
+void HInstructionList::InsertInstructionAfter(HInstruction* instruction, HInstruction* cursor) {
+ DCHECK(Contains(cursor));
+ if (cursor == last_instruction_) {
+ cursor->next_ = instruction;
+ instruction->previous_ = cursor;
+ last_instruction_ = instruction;
+ } else {
+ instruction->next_ = cursor->next_;
+ instruction->previous_ = cursor;
+ cursor->next_ = instruction;
+ instruction->next_->previous_ = instruction;
+ }
+}
+
void HInstructionList::RemoveInstruction(HInstruction* instruction) {
if (instruction->previous_ != nullptr) {
instruction->previous_->next_ = instruction->next_;
@@ -660,6 +753,14 @@
input->AddUseAt(this, inputs_.Size() - 1);
}
+void HPhi::RemoveInputAt(size_t index) {
+ RemoveAsUserOfInput(index);
+ inputs_.DeleteAt(index);
+ for (size_t i = index, e = InputCount(); i < e; ++i) {
+ InputRecordAt(i).GetUseNode()->SetIndex(i);
+ }
+}
+
#define DEFINE_ACCEPT(name, super) \
void H##name::Accept(HGraphVisitor* visitor) { \
visitor->Visit##name(this); \
@@ -694,6 +795,84 @@
}
}
+HConstant* HTypeConversion::TryStaticEvaluation() const {
+ HGraph* graph = GetBlock()->GetGraph();
+ if (GetInput()->IsIntConstant()) {
+ int32_t value = GetInput()->AsIntConstant()->GetValue();
+ switch (GetResultType()) {
+ case Primitive::kPrimLong:
+ return graph->GetLongConstant(static_cast<int64_t>(value));
+ case Primitive::kPrimFloat:
+ return graph->GetFloatConstant(static_cast<float>(value));
+ case Primitive::kPrimDouble:
+ return graph->GetDoubleConstant(static_cast<double>(value));
+ default:
+ return nullptr;
+ }
+ } else if (GetInput()->IsLongConstant()) {
+ int64_t value = GetInput()->AsLongConstant()->GetValue();
+ switch (GetResultType()) {
+ case Primitive::kPrimInt:
+ return graph->GetIntConstant(static_cast<int32_t>(value));
+ case Primitive::kPrimFloat:
+ return graph->GetFloatConstant(static_cast<float>(value));
+ case Primitive::kPrimDouble:
+ return graph->GetDoubleConstant(static_cast<double>(value));
+ default:
+ return nullptr;
+ }
+ } else if (GetInput()->IsFloatConstant()) {
+ float value = GetInput()->AsFloatConstant()->GetValue();
+ switch (GetResultType()) {
+ case Primitive::kPrimInt:
+ if (std::isnan(value))
+ return graph->GetIntConstant(0);
+ if (value >= kPrimIntMax)
+ return graph->GetIntConstant(kPrimIntMax);
+ if (value <= kPrimIntMin)
+ return graph->GetIntConstant(kPrimIntMin);
+ return graph->GetIntConstant(static_cast<int32_t>(value));
+ case Primitive::kPrimLong:
+ if (std::isnan(value))
+ return graph->GetLongConstant(0);
+ if (value >= kPrimLongMax)
+ return graph->GetLongConstant(kPrimLongMax);
+ if (value <= kPrimLongMin)
+ return graph->GetLongConstant(kPrimLongMin);
+ return graph->GetLongConstant(static_cast<int64_t>(value));
+ case Primitive::kPrimDouble:
+ return graph->GetDoubleConstant(static_cast<double>(value));
+ default:
+ return nullptr;
+ }
+ } else if (GetInput()->IsDoubleConstant()) {
+ double value = GetInput()->AsDoubleConstant()->GetValue();
+ switch (GetResultType()) {
+ case Primitive::kPrimInt:
+ if (std::isnan(value))
+ return graph->GetIntConstant(0);
+ if (value >= kPrimIntMax)
+ return graph->GetIntConstant(kPrimIntMax);
+ if (value <= kPrimLongMin)
+ return graph->GetIntConstant(kPrimIntMin);
+ return graph->GetIntConstant(static_cast<int32_t>(value));
+ case Primitive::kPrimLong:
+ if (std::isnan(value))
+ return graph->GetLongConstant(0);
+ if (value >= kPrimLongMax)
+ return graph->GetLongConstant(kPrimLongMax);
+ if (value <= kPrimLongMin)
+ return graph->GetLongConstant(kPrimLongMin);
+ return graph->GetLongConstant(static_cast<int64_t>(value));
+ case Primitive::kPrimFloat:
+ return graph->GetFloatConstant(static_cast<float>(value));
+ default:
+ return nullptr;
+ }
+ }
+ return nullptr;
+}
+
HConstant* HUnaryOperation::TryStaticEvaluation() const {
if (GetInput()->IsIntConstant()) {
int32_t value = Evaluate(GetInput()->AsIntConstant()->GetValue());
@@ -702,7 +881,7 @@
// TODO: Implement static evaluation of long unary operations.
//
// Do not exit with a fatal condition here. Instead, simply
- // return `nullptr' to notify the caller that this instruction
+ // return `null' to notify the caller that this instruction
// cannot (yet) be statically evaluated.
return nullptr;
}
@@ -738,7 +917,7 @@
}
// If `GetConstantRight()` returns one of the input, this returns the other
-// one. Otherwise it returns nullptr.
+// one. Otherwise it returns null.
HInstruction* HBinaryOperation::GetLeastConstantLeft() const {
HInstruction* most_constant_right = GetConstantRight();
if (most_constant_right == nullptr) {
@@ -855,6 +1034,15 @@
return !GetPhis().IsEmpty() && GetFirstPhi()->GetNext() == nullptr;
}
+size_t HInstructionList::CountSize() const {
+ size_t size = 0;
+ HInstruction* current = first_instruction_;
+ for (; current != nullptr; current = current->GetNext()) {
+ size++;
+ }
+ return size;
+}
+
void HInstructionList::SetBlockOfInstructions(HBasicBlock* block) const {
for (HInstruction* current = first_instruction_;
current != nullptr;
@@ -886,40 +1074,167 @@
}
}
-void HBasicBlock::DisconnectFromAll() {
- DCHECK(dominated_blocks_.IsEmpty()) << "Unimplemented scenario";
+void HBasicBlock::DisconnectAndDelete() {
+ // Dominators must be removed after all the blocks they dominate. This way
+ // a loop header is removed last, a requirement for correct loop information
+ // iteration.
+ DCHECK(dominated_blocks_.IsEmpty());
+ // Remove the block from all loops it is included in.
+ for (HLoopInformationOutwardIterator it(*this); !it.Done(); it.Advance()) {
+ HLoopInformation* loop_info = it.Current();
+ loop_info->Remove(this);
+ if (loop_info->IsBackEdge(*this)) {
+ // If this was the last back edge of the loop, we deliberately leave the
+ // loop in an inconsistent state and will fail SSAChecker unless the
+ // entire loop is removed during the pass.
+ loop_info->RemoveBackEdge(this);
+ }
+ }
+
+ // Disconnect the block from its predecessors and update their control-flow
+ // instructions.
for (size_t i = 0, e = predecessors_.Size(); i < e; ++i) {
- predecessors_.Get(i)->successors_.Delete(this);
+ HBasicBlock* predecessor = predecessors_.Get(i);
+ HInstruction* last_instruction = predecessor->GetLastInstruction();
+ predecessor->RemoveInstruction(last_instruction);
+ predecessor->RemoveSuccessor(this);
+ if (predecessor->GetSuccessors().Size() == 1u) {
+ DCHECK(last_instruction->IsIf());
+ predecessor->AddInstruction(new (graph_->GetArena()) HGoto());
+ } else {
+ // The predecessor has no remaining successors and therefore must be dead.
+ // We deliberately leave it without a control-flow instruction so that the
+ // SSAChecker fails unless it is not removed during the pass too.
+ DCHECK_EQ(predecessor->GetSuccessors().Size(), 0u);
+ }
}
- for (size_t i = 0, e = successors_.Size(); i < e; ++i) {
- successors_.Get(i)->predecessors_.Delete(this);
- }
- dominator_->dominated_blocks_.Delete(this);
-
predecessors_.Reset();
+
+ // Disconnect the block from its successors and update their dominators
+ // and phis.
+ for (size_t i = 0, e = successors_.Size(); i < e; ++i) {
+ HBasicBlock* successor = successors_.Get(i);
+ // Delete this block from the list of predecessors.
+ size_t this_index = successor->GetPredecessorIndexOf(this);
+ successor->predecessors_.DeleteAt(this_index);
+
+ // Check that `successor` has other predecessors, otherwise `this` is the
+ // dominator of `successor` which violates the order DCHECKed at the top.
+ DCHECK(!successor->predecessors_.IsEmpty());
+
+ // Recompute the successor's dominator.
+ HBasicBlock* old_dominator = successor->GetDominator();
+ HBasicBlock* new_dominator = successor->predecessors_.Get(0);
+ for (size_t j = 1, f = successor->predecessors_.Size(); j < f; ++j) {
+ new_dominator = graph_->FindCommonDominator(
+ new_dominator, successor->predecessors_.Get(j));
+ }
+ if (old_dominator != new_dominator) {
+ successor->SetDominator(new_dominator);
+ old_dominator->RemoveDominatedBlock(successor);
+ new_dominator->AddDominatedBlock(successor);
+ }
+
+ // Remove this block's entries in the successor's phis.
+ if (successor->predecessors_.Size() == 1u) {
+ // The successor has just one predecessor left. Replace phis with the only
+ // remaining input.
+ for (HInstructionIterator phi_it(successor->GetPhis()); !phi_it.Done(); phi_it.Advance()) {
+ HPhi* phi = phi_it.Current()->AsPhi();
+ phi->ReplaceWith(phi->InputAt(1 - this_index));
+ successor->RemovePhi(phi);
+ }
+ } else {
+ for (HInstructionIterator phi_it(successor->GetPhis()); !phi_it.Done(); phi_it.Advance()) {
+ phi_it.Current()->AsPhi()->RemoveInputAt(this_index);
+ }
+ }
+ }
successors_.Reset();
- dominator_ = nullptr;
- graph_ = nullptr;
+
+ // Disconnect from the dominator.
+ dominator_->RemoveDominatedBlock(this);
+ SetDominator(nullptr);
+
+ // Delete from the graph. The function safely deletes remaining instructions
+ // and updates the reverse post order.
+ graph_->DeleteDeadBlock(this);
+ SetGraph(nullptr);
}
void HBasicBlock::MergeWith(HBasicBlock* other) {
- DCHECK(successors_.IsEmpty()) << "Unimplemented block merge scenario";
- DCHECK(dominated_blocks_.IsEmpty()
- || (dominated_blocks_.Size() == 1 && dominated_blocks_.Get(0) == other))
- << "Unimplemented block merge scenario";
+ DCHECK_EQ(GetGraph(), other->GetGraph());
+ DCHECK(GetDominatedBlocks().Contains(other));
+ DCHECK_EQ(GetSuccessors().Size(), 1u);
+ DCHECK_EQ(GetSuccessors().Get(0), other);
+ DCHECK_EQ(other->GetPredecessors().Size(), 1u);
+ DCHECK_EQ(other->GetPredecessors().Get(0), this);
DCHECK(other->GetPhis().IsEmpty());
- successors_.Reset();
- dominated_blocks_.Reset();
+ // Move instructions from `other` to `this`.
+ DCHECK(EndsWithControlFlowInstruction());
+ RemoveInstruction(GetLastInstruction());
instructions_.Add(other->GetInstructions());
- other->GetInstructions().SetBlockOfInstructions(this);
+ other->instructions_.SetBlockOfInstructions(this);
+ other->instructions_.Clear();
- while (!other->GetSuccessors().IsEmpty()) {
- HBasicBlock* successor = other->GetSuccessors().Get(0);
+ // Remove `other` from the loops it is included in.
+ for (HLoopInformationOutwardIterator it(*other); !it.Done(); it.Advance()) {
+ HLoopInformation* loop_info = it.Current();
+ loop_info->Remove(other);
+ if (loop_info->IsBackEdge(*other)) {
+ loop_info->ReplaceBackEdge(other, this);
+ }
+ }
+
+ // Update links to the successors of `other`.
+ successors_.Reset();
+ while (!other->successors_.IsEmpty()) {
+ HBasicBlock* successor = other->successors_.Get(0);
successor->ReplacePredecessor(other, this);
}
+ // Update the dominator tree.
+ dominated_blocks_.Delete(other);
+ for (size_t i = 0, e = other->GetDominatedBlocks().Size(); i < e; ++i) {
+ HBasicBlock* dominated = other->GetDominatedBlocks().Get(i);
+ dominated_blocks_.Add(dominated);
+ dominated->SetDominator(this);
+ }
+ other->dominated_blocks_.Reset();
+ other->dominator_ = nullptr;
+
+ // Clear the list of predecessors of `other` in preparation of deleting it.
+ other->predecessors_.Reset();
+
+ // Delete `other` from the graph. The function updates reverse post order.
+ graph_->DeleteDeadBlock(other);
+ other->SetGraph(nullptr);
+}
+
+void HBasicBlock::MergeWithInlined(HBasicBlock* other) {
+ DCHECK_NE(GetGraph(), other->GetGraph());
+ DCHECK(GetDominatedBlocks().IsEmpty());
+ DCHECK(GetSuccessors().IsEmpty());
+ DCHECK(!EndsWithControlFlowInstruction());
+ DCHECK_EQ(other->GetPredecessors().Size(), 1u);
+ DCHECK(other->GetPredecessors().Get(0)->IsEntryBlock());
+ DCHECK(other->GetPhis().IsEmpty());
+ DCHECK(!other->IsInLoop());
+
+ // Move instructions from `other` to `this`.
+ instructions_.Add(other->GetInstructions());
+ other->instructions_.SetBlockOfInstructions(this);
+
+ // Update links to the successors of `other`.
+ successors_.Reset();
+ while (!other->successors_.IsEmpty()) {
+ HBasicBlock* successor = other->successors_.Get(0);
+ successor->ReplacePredecessor(other, this);
+ }
+
+ // Update the dominator tree.
for (size_t i = 0, e = other->GetDominatedBlocks().Size(); i < e; ++i) {
HBasicBlock* dominated = other->GetDominatedBlocks().Get(i);
dominated_blocks_.Add(dominated);
@@ -961,6 +1276,24 @@
}
}
+void HGraph::DeleteDeadBlock(HBasicBlock* block) {
+ DCHECK_EQ(block->GetGraph(), this);
+ DCHECK(block->GetSuccessors().IsEmpty());
+ DCHECK(block->GetPredecessors().IsEmpty());
+ DCHECK(block->GetDominatedBlocks().IsEmpty());
+ DCHECK(block->GetDominator() == nullptr);
+
+ for (HBackwardInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
+ block->RemoveInstruction(it.Current());
+ }
+ for (HBackwardInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
+ block->RemovePhi(it.Current()->AsPhi());
+ }
+
+ reverse_post_order_.Delete(block);
+ blocks_.Put(block->GetBlockId(), nullptr);
+}
+
void HGraph::InlineInto(HGraph* outer_graph, HInvoke* invoke) {
if (GetBlocks().Size() == 3) {
// Simple case of an entry block, a body block, and an exit block.
@@ -993,7 +1326,7 @@
HBasicBlock* first = entry_block_->GetSuccessors().Get(0);
DCHECK(!first->IsInLoop());
- at->MergeWith(first);
+ at->MergeWithInlined(first);
exit_block_->ReplaceWith(to);
// Update all predecessors of the exit block (now the `to` block)
@@ -1082,11 +1415,9 @@
loop_it.Current()->Add(to);
}
if (info->IsBackEdge(*at)) {
- // Only `at` can become a back edge, as the inlined blocks
- // are predecessors of `at`.
- DCHECK_EQ(1u, info->NumberOfBackEdges());
- info->ClearBackEdges();
- info->AddBackEdge(to);
+ // Only `to` can become a back edge, as the inlined blocks
+ // are predecessors of `to`.
+ info->ReplaceBackEdge(at, to);
}
}
}
@@ -1101,7 +1432,7 @@
// - Remove suspend checks, that hold an environment.
// We must do this after the other blocks have been inlined, otherwise ids of
// constants could overlap with the inner graph.
- int parameter_index = 0;
+ size_t parameter_index = 0;
for (HInstructionIterator it(entry_block_->GetInstructions()); !it.Done(); it.Advance()) {
HInstruction* current = it.Current();
if (current->IsNullConstant()) {
@@ -1110,10 +1441,19 @@
current->ReplaceWith(outer_graph->GetIntConstant(current->AsIntConstant()->GetValue()));
} else if (current->IsLongConstant()) {
current->ReplaceWith(outer_graph->GetLongConstant(current->AsLongConstant()->GetValue()));
- } else if (current->IsFloatConstant() || current->IsDoubleConstant()) {
- // TODO: Don't duplicate floating-point constants.
- current->MoveBefore(outer_graph->GetEntryBlock()->GetLastInstruction());
+ } else if (current->IsFloatConstant()) {
+ current->ReplaceWith(outer_graph->GetFloatConstant(current->AsFloatConstant()->GetValue()));
+ } else if (current->IsDoubleConstant()) {
+ current->ReplaceWith(outer_graph->GetDoubleConstant(current->AsDoubleConstant()->GetValue()));
} else if (current->IsParameterValue()) {
+ if (kIsDebugBuild
+ && invoke->IsInvokeStaticOrDirect()
+ && invoke->AsInvokeStaticOrDirect()->IsStaticWithExplicitClinitCheck()) {
+ // Ensure we do not use the last input of `invoke`, as it
+ // contains a clinit check which is not an actual argument.
+ size_t last_input_index = invoke->InputCount() - 1;
+ DCHECK(parameter_index != last_input_index);
+ }
current->ReplaceWith(invoke->InputAt(parameter_index++));
} else {
DCHECK(current->IsGoto() || current->IsSuspendCheck());
@@ -1125,53 +1465,6 @@
invoke->GetBlock()->RemoveInstruction(invoke);
}
-void HGraph::MergeEmptyBranches(HBasicBlock* start_block, HBasicBlock* end_block) {
- // Find the two branches of an If.
- DCHECK_EQ(start_block->GetSuccessors().Size(), 2u);
- HBasicBlock* left_branch = start_block->GetSuccessors().Get(0);
- HBasicBlock* right_branch = start_block->GetSuccessors().Get(1);
-
- // Make sure this is a diamond control-flow path.
- DCHECK_EQ(left_branch->GetSuccessors().Get(0), end_block);
- DCHECK_EQ(right_branch->GetSuccessors().Get(0), end_block);
- DCHECK_EQ(end_block->GetPredecessors().Size(), 2u);
- DCHECK_EQ(start_block, end_block->GetDominator());
-
- // Disconnect the branches and merge the two blocks. This will move
- // all instructions from 'end_block' to 'start_block'.
- DCHECK(left_branch->IsSingleGoto());
- DCHECK(right_branch->IsSingleGoto());
- left_branch->DisconnectFromAll();
- right_branch->DisconnectFromAll();
- start_block->RemoveInstruction(start_block->GetLastInstruction());
- start_block->MergeWith(end_block);
-
- // Delete the now redundant blocks from the graph.
- blocks_.Put(left_branch->GetBlockId(), nullptr);
- blocks_.Put(right_branch->GetBlockId(), nullptr);
- blocks_.Put(end_block->GetBlockId(), nullptr);
-
- // Update reverse post order.
- reverse_post_order_.Delete(left_branch);
- reverse_post_order_.Delete(right_branch);
- reverse_post_order_.Delete(end_block);
-
- // Update loops which contain the code.
- for (HLoopInformationOutwardIterator it(*start_block); !it.Done(); it.Advance()) {
- HLoopInformation* loop_info = it.Current();
- DCHECK(loop_info->Contains(*left_branch));
- DCHECK(loop_info->Contains(*right_branch));
- DCHECK(loop_info->Contains(*end_block));
- loop_info->Remove(left_branch);
- loop_info->Remove(right_branch);
- loop_info->Remove(end_block);
- if (loop_info->IsBackEdge(*end_block)) {
- loop_info->RemoveBackEdge(end_block);
- loop_info->AddBackEdge(start_block);
- }
- }
-}
-
std::ostream& operator<<(std::ostream& os, const ReferenceTypeInfo& rhs) {
ScopedObjectAccess soa(Thread::Current());
os << "["
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 1565f58..12ace41 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -48,6 +48,7 @@
class HSuspendCheck;
class LiveInterval;
class LocationSummary;
+class SlowPathCode;
class SsaBuilder;
static const int kDefaultNumberOfBlocks = 8;
@@ -59,6 +60,8 @@
static constexpr uint32_t kMaxIntShiftValue = 0x1f;
static constexpr uint64_t kMaxLongShiftValue = 0x3f;
+static constexpr InvokeType kInvalidInvokeType = static_cast<InvokeType>(-1);
+
enum IfCondition {
kCondEQ,
kCondNE,
@@ -75,6 +78,10 @@
void AddInstruction(HInstruction* instruction);
void RemoveInstruction(HInstruction* instruction);
+ // Insert `instruction` before/after an existing instruction `cursor`.
+ void InsertInstructionBefore(HInstruction* instruction, HInstruction* cursor);
+ void InsertInstructionAfter(HInstruction* instruction, HInstruction* cursor);
+
// Return true if this list contains `instruction`.
bool Contains(HInstruction* instruction) const;
@@ -93,6 +100,9 @@
void AddAfter(HInstruction* cursor, const HInstructionList& instruction_list);
void Add(const HInstructionList& instruction_list);
+ // Return the number of instructions in the list. This is an expensive operation.
+ size_t CountSize() const;
+
private:
HInstruction* first_instruction_;
HInstruction* last_instruction_;
@@ -109,7 +119,13 @@
// Control-flow graph of a method. Contains a list of basic blocks.
class HGraph : public ArenaObject<kArenaAllocMisc> {
public:
- HGraph(ArenaAllocator* arena, bool debuggable = false, int start_instruction_id = 0)
+ HGraph(ArenaAllocator* arena,
+ const DexFile& dex_file,
+ uint32_t method_idx,
+ bool should_generate_constructor_barrier,
+ InvokeType invoke_type = kInvalidInvokeType,
+ bool debuggable = false,
+ int start_instruction_id = 0)
: arena_(arena),
blocks_(arena, kDefaultNumberOfBlocks),
reverse_post_order_(arena, kDefaultNumberOfBlocks),
@@ -120,12 +136,18 @@
number_of_vregs_(0),
number_of_in_vregs_(0),
temporaries_vreg_slots_(0),
- has_array_accesses_(false),
+ has_bounds_checks_(false),
debuggable_(debuggable),
current_instruction_id_(start_instruction_id),
+ dex_file_(dex_file),
+ method_idx_(method_idx),
+ invoke_type_(invoke_type),
+ should_generate_constructor_barrier_(should_generate_constructor_barrier),
cached_null_constant_(nullptr),
cached_int_constants_(std::less<int32_t>(), arena->Adapter()),
- cached_long_constants_(std::less<int64_t>(), arena->Adapter()) {}
+ cached_float_constants_(std::less<int32_t>(), arena->Adapter()),
+ cached_long_constants_(std::less<int64_t>(), arena->Adapter()),
+ cached_double_constants_(std::less<int64_t>(), arena->Adapter()) {}
ArenaAllocator* GetArena() const { return arena_; }
const GrowableArray<HBasicBlock*>& GetBlocks() const { return blocks_; }
@@ -164,7 +186,8 @@
// Inline this graph in `outer_graph`, replacing the given `invoke` instruction.
void InlineInto(HGraph* outer_graph, HInvoke* invoke);
- void MergeEmptyBranches(HBasicBlock* start_block, HBasicBlock* end_block);
+ // Removes `block` from the graph.
+ void DeleteDeadBlock(HBasicBlock* block);
void SplitCriticalEdge(HBasicBlock* block, HBasicBlock* successor);
void SimplifyLoop(HBasicBlock* header);
@@ -222,19 +245,23 @@
return linear_order_;
}
- bool HasArrayAccesses() const {
- return has_array_accesses_;
+ bool HasBoundsChecks() const {
+ return has_bounds_checks_;
}
- void SetHasArrayAccesses(bool value) {
- has_array_accesses_ = value;
+ void SetHasBoundsChecks(bool value) {
+ has_bounds_checks_ = value;
+ }
+
+ bool ShouldGenerateConstructorBarrier() const {
+ return should_generate_constructor_barrier_;
}
bool IsDebuggable() const { return debuggable_; }
// Returns a constant of the given type and value. If it does not exist
- // already, it is created and inserted into the graph. Only integral types
- // are currently supported.
+ // already, it is created and inserted into the graph. This method is only for
+ // integral types.
HConstant* GetConstant(Primitive::Type type, int64_t value);
HNullConstant* GetNullConstant();
HIntConstant* GetIntConstant(int32_t value) {
@@ -243,9 +270,28 @@
HLongConstant* GetLongConstant(int64_t value) {
return CreateConstant(value, &cached_long_constants_);
}
+ HFloatConstant* GetFloatConstant(float value) {
+ return CreateConstant(bit_cast<int32_t, float>(value), &cached_float_constants_);
+ }
+ HDoubleConstant* GetDoubleConstant(double value) {
+ return CreateConstant(bit_cast<int64_t, double>(value), &cached_double_constants_);
+ }
+
+ HBasicBlock* FindCommonDominator(HBasicBlock* first, HBasicBlock* second) const;
+
+ const DexFile& GetDexFile() const {
+ return dex_file_;
+ }
+
+ uint32_t GetMethodIdx() const {
+ return method_idx_;
+ }
+
+ InvokeType GetInvokeType() const {
+ return invoke_type_;
+ }
private:
- HBasicBlock* FindCommonDominator(HBasicBlock* first, HBasicBlock* second) const;
void VisitBlockForDominatorTree(HBasicBlock* block,
HBasicBlock* predecessor,
GrowableArray<size_t>* visits);
@@ -256,10 +302,34 @@
void RemoveInstructionsAsUsersFromDeadBlocks(const ArenaBitVector& visited) const;
void RemoveDeadBlocks(const ArenaBitVector& visited);
- template <class InstType, typename ValueType>
- InstType* CreateConstant(ValueType value, ArenaSafeMap<ValueType, InstType*>* cache);
+ template <class InstructionType, typename ValueType>
+ InstructionType* CreateConstant(ValueType value,
+ ArenaSafeMap<ValueType, InstructionType*>* cache) {
+ // Try to find an existing constant of the given value.
+ InstructionType* constant = nullptr;
+ auto cached_constant = cache->find(value);
+ if (cached_constant != cache->end()) {
+ constant = cached_constant->second;
+ }
+
+ // If not found or previously deleted, create and cache a new instruction.
+ if (constant == nullptr || constant->GetBlock() == nullptr) {
+ constant = new (arena_) InstructionType(value);
+ cache->Overwrite(value, constant);
+ InsertConstant(constant);
+ }
+ return constant;
+ }
+
void InsertConstant(HConstant* instruction);
+ // Cache a float constant into the graph. This method should only be
+ // called by the SsaBuilder when creating "equivalent" instructions.
+ void CacheFloatConstant(HFloatConstant* constant);
+
+ // See CacheFloatConstant comment.
+ void CacheDoubleConstant(HDoubleConstant* constant);
+
ArenaAllocator* const arena_;
// List of blocks in insertion order.
@@ -286,8 +356,8 @@
// Number of vreg size slots that the temporaries use (used in baseline compiler).
size_t temporaries_vreg_slots_;
- // Has array accesses. We can totally skip BCE if it's false.
- bool has_array_accesses_;
+ // Has bounds checks. We can totally skip BCE if it's false.
+ bool has_bounds_checks_;
// Indicates whether the graph should be compiled in a way that
// ensures full debuggability. If false, we can apply more
@@ -297,11 +367,25 @@
// The current id to assign to a newly added instruction. See HInstruction.id_.
int32_t current_instruction_id_;
- // Cached common constants often needed by optimization passes.
+ // The dex file from which the method is from.
+ const DexFile& dex_file_;
+
+ // The method index in the dex file.
+ const uint32_t method_idx_;
+
+ // If inlined, this encodes how the callee is being invoked.
+ const InvokeType invoke_type_;
+
+ const bool should_generate_constructor_barrier_;
+
+ // Cached constants.
HNullConstant* cached_null_constant_;
ArenaSafeMap<int32_t, HIntConstant*> cached_int_constants_;
+ ArenaSafeMap<int32_t, HFloatConstant*> cached_float_constants_;
ArenaSafeMap<int64_t, HLongConstant*> cached_long_constants_;
+ ArenaSafeMap<int64_t, HDoubleConstant*> cached_double_constants_;
+ friend class SsaBuilder; // For caching constants.
friend class SsaLivenessAnalysis; // For the linear order.
ART_FRIEND_TEST(GraphTest, IfSuccessorSimpleJoinBlock1);
DISALLOW_COPY_AND_ASSIGN(HGraph);
@@ -353,14 +437,30 @@
return back_edges_;
}
- void ClearBackEdges() {
- back_edges_.Reset();
+ // Returns the lifetime position of the back edge that has the
+ // greatest lifetime position.
+ size_t GetLifetimeEnd() const;
+
+ void ReplaceBackEdge(HBasicBlock* existing, HBasicBlock* new_back_edge) {
+ for (size_t i = 0, e = back_edges_.Size(); i < e; ++i) {
+ if (back_edges_.Get(i) == existing) {
+ back_edges_.Put(i, new_back_edge);
+ return;
+ }
+ }
+ UNREACHABLE();
}
- // Find blocks that are part of this loop. Returns whether the loop is a natural loop,
+ // Finds blocks that are part of this loop. Returns whether the loop is a natural loop,
// that is the header dominates the back edge.
bool Populate();
+ // Reanalyzes the loop by removing loop info from its blocks and re-running
+ // Populate(). If there are no back edges left, the loop info is completely
+ // removed as well as its SuspendCheck instruction. It must be run on nested
+ // inner loops first.
+ void Update();
+
// Returns whether this loop information contains `block`.
// Note that this loop information *must* be populated before entering this function.
bool Contains(const HBasicBlock& block) const;
@@ -447,6 +547,7 @@
HBasicBlock* GetDominator() const { return dominator_; }
void SetDominator(HBasicBlock* dominator) { dominator_ = dominator; }
void AddDominatedBlock(HBasicBlock* block) { dominated_blocks_.Add(block); }
+ void RemoveDominatedBlock(HBasicBlock* block) { dominated_blocks_.Delete(block); }
void ReplaceDominatedBlock(HBasicBlock* existing, HBasicBlock* new_block) {
for (size_t i = 0, e = dominated_blocks_.Size(); i < e; ++i) {
if (dominated_blocks_.Get(i) == existing) {
@@ -467,8 +568,9 @@
HInstruction* GetFirstInstruction() const { return instructions_.first_instruction_; }
HInstruction* GetLastInstruction() const { return instructions_.last_instruction_; }
const HInstructionList& GetInstructions() const { return instructions_; }
- const HInstructionList& GetPhis() const { return phis_; }
HInstruction* GetFirstPhi() const { return phis_.first_instruction_; }
+ HInstruction* GetLastPhi() const { return phis_.last_instruction_; }
+ const HInstructionList& GetPhis() const { return phis_; }
void AddSuccessor(HBasicBlock* block) {
successors_.Add(block);
@@ -515,6 +617,13 @@
predecessors_.Put(1, temp);
}
+ void SwapSuccessors() {
+ DCHECK_EQ(successors_.Size(), 2u);
+ HBasicBlock* temp = successors_.Get(0);
+ successors_.Put(0, successors_.Get(1));
+ successors_.Put(1, temp);
+ }
+
size_t GetPredecessorIndexOf(HBasicBlock* predecessor) {
for (size_t i = 0, e = predecessors_.Size(); i < e; ++i) {
if (predecessors_.Get(i) == predecessor) {
@@ -545,7 +654,7 @@
// that this method does not update the graph, reverse post order, loop
// information, nor make sure the blocks are consistent (for example ending
// with a control flow instruction).
- void MergeWith(HBasicBlock* other);
+ void MergeWithInlined(HBasicBlock* other);
// Replace `this` with `other`. Predecessors, successors, and dominated blocks
// of `this` are moved to `other`.
@@ -554,15 +663,22 @@
// with a control flow instruction).
void ReplaceWith(HBasicBlock* other);
- // Disconnects `this` from all its predecessors, successors and the dominator.
- // It assumes that `this` does not dominate any blocks.
- // Note that this method does not update the graph, reverse post order, loop
- // information, nor make sure the blocks are consistent (for example ending
- // with a control flow instruction).
- void DisconnectFromAll();
+ // Merge `other` at the end of `this`. This method updates loops, reverse post
+ // order, links to predecessors, successors, dominators and deletes the block
+ // from the graph. The two blocks must be successive, i.e. `this` the only
+ // predecessor of `other` and vice versa.
+ void MergeWith(HBasicBlock* other);
+
+ // Disconnects `this` from all its predecessors, successors and dominator,
+ // removes it from all loops it is included in and eventually from the graph.
+ // The block must not dominate any other block. Predecessors and successors
+ // are safely updated.
+ void DisconnectAndDelete();
void AddInstruction(HInstruction* instruction);
+ // Insert `instruction` before/after an existing instruction `cursor`.
void InsertInstructionBefore(HInstruction* instruction, HInstruction* cursor);
+ void InsertInstructionAfter(HInstruction* instruction, HInstruction* cursor);
// Replace instruction `initial` with `replacement` within this block.
void ReplaceAndRemoveInstructionWith(HInstruction* initial,
HInstruction* replacement);
@@ -573,9 +689,10 @@
// instruction is not in use and removes it from the use lists of its inputs.
void RemoveInstruction(HInstruction* instruction, bool ensure_safety = true);
void RemovePhi(HPhi* phi, bool ensure_safety = true);
+ void RemoveInstructionOrPhi(HInstruction* instruction, bool ensure_safety = true);
bool IsLoopHeader() const {
- return (loop_information_ != nullptr) && (loop_information_->GetHeader() == this);
+ return IsInLoop() && (loop_information_->GetHeader() == this);
}
bool IsLoopPreHeaderFirstPredecessor() const {
@@ -594,7 +711,7 @@
void SetInLoop(HLoopInformation* info) {
if (IsLoopHeader()) {
// Nothing to do. This just means `info` is an outer loop.
- } else if (loop_information_ == nullptr) {
+ } else if (!IsInLoop()) {
loop_information_ = info;
} else if (loop_information_->Contains(*info->GetHeader())) {
// Block is currently part of an outer loop. Make it part of this inner loop.
@@ -615,7 +732,7 @@
bool IsInLoop() const { return loop_information_ != nullptr; }
- // Returns wheter this block dominates the blocked passed as parameter.
+ // Returns whether this block dominates the blocked passed as parameter.
bool Dominates(HBasicBlock* block) const;
size_t GetLifetimeStart() const { return lifetime_start_; }
@@ -666,7 +783,7 @@
void Advance() {
DCHECK(!Done());
- current_ = current_->GetHeader()->GetDominator()->GetLoopInformation();
+ current_ = current_->GetPreHeader()->GetLoopInformation();
}
HLoopInformation* Current() const {
@@ -779,13 +896,14 @@
HUseListNode* GetNext() const { return next_; }
T GetUser() const { return user_; }
size_t GetIndex() const { return index_; }
+ void SetIndex(size_t index) { index_ = index; }
private:
HUseListNode(T user, size_t index)
: user_(user), index_(index), prev_(nullptr), next_(nullptr) {}
T const user_;
- const size_t index_;
+ size_t index_;
HUseListNode<T>* prev_;
HUseListNode<T>* next_;
@@ -856,6 +974,14 @@
return first_ != nullptr && first_->next_ == nullptr;
}
+ size_t SizeSlow() const {
+ size_t count = 0;
+ for (HUseListNode<T>* current = first_; current != nullptr; current = current->GetNext()) {
+ ++count;
+ }
+ return count;
+ }
+
private:
HUseListNode<T>* first_;
};
@@ -982,15 +1108,53 @@
// A HEnvironment object contains the values of virtual registers at a given location.
class HEnvironment : public ArenaObject<kArenaAllocMisc> {
public:
- HEnvironment(ArenaAllocator* arena, size_t number_of_vregs)
- : vregs_(arena, number_of_vregs) {
+ HEnvironment(ArenaAllocator* arena,
+ size_t number_of_vregs,
+ const DexFile& dex_file,
+ uint32_t method_idx,
+ uint32_t dex_pc,
+ InvokeType invoke_type)
+ : vregs_(arena, number_of_vregs),
+ locations_(arena, number_of_vregs),
+ parent_(nullptr),
+ dex_file_(dex_file),
+ method_idx_(method_idx),
+ dex_pc_(dex_pc),
+ invoke_type_(invoke_type) {
vregs_.SetSize(number_of_vregs);
for (size_t i = 0; i < number_of_vregs; i++) {
vregs_.Put(i, HUserRecord<HEnvironment*>());
}
+
+ locations_.SetSize(number_of_vregs);
+ for (size_t i = 0; i < number_of_vregs; ++i) {
+ locations_.Put(i, Location());
+ }
}
- void CopyFrom(HEnvironment* env);
+ HEnvironment(ArenaAllocator* arena, const HEnvironment& to_copy)
+ : HEnvironment(arena,
+ to_copy.Size(),
+ to_copy.GetDexFile(),
+ to_copy.GetMethodIdx(),
+ to_copy.GetDexPc(),
+ to_copy.GetInvokeType()) {}
+
+ void SetAndCopyParentChain(ArenaAllocator* allocator, HEnvironment* parent) {
+ parent_ = new (allocator) HEnvironment(allocator, *parent);
+ parent_->CopyFrom(parent);
+ if (parent->GetParent() != nullptr) {
+ parent_->SetAndCopyParentChain(allocator, parent->GetParent());
+ }
+ }
+
+ void CopyFrom(const GrowableArray<HInstruction*>& locals);
+ void CopyFrom(HEnvironment* environment);
+
+ // Copy from `env`. If it's a loop phi for `loop_header`, copy the first
+ // input to the loop phi instead. This is for inserting instructions that
+ // require an environment (like HDeoptimization) in the loop pre-header.
+ void CopyFromWithLoopPhiAdjustment(HEnvironment* env, HBasicBlock* loop_header);
void SetRawEnvAt(size_t index, HInstruction* instruction) {
vregs_.Put(index, HUserRecord<HEnvironment*>(instruction));
@@ -1004,6 +1168,32 @@
size_t Size() const { return vregs_.Size(); }
+ HEnvironment* GetParent() const { return parent_; }
+
+ void SetLocationAt(size_t index, Location location) {
+ locations_.Put(index, location);
+ }
+
+ Location GetLocationAt(size_t index) const {
+ return locations_.Get(index);
+ }
+
+ uint32_t GetDexPc() const {
+ return dex_pc_;
+ }
+
+ uint32_t GetMethodIdx() const {
+ return method_idx_;
+ }
+
+ InvokeType GetInvokeType() const {
+ return invoke_type_;
+ }
+
+ const DexFile& GetDexFile() const {
+ return dex_file_;
+ }
+
private:
// Record instructions' use entries of this environment for constant-time removal.
// It should only be called by HInstruction when a new environment use is added.
@@ -1014,8 +1204,14 @@
}
GrowableArray<HUserRecord<HEnvironment*> > vregs_;
+ GrowableArray<Location> locations_;
+ HEnvironment* parent_;
+ const DexFile& dex_file_;
+ const uint32_t method_idx_;
+ const uint32_t dex_pc_;
+ const InvokeType invoke_type_;
- friend HInstruction;
+ friend class HInstruction;
DISALLOW_COPY_AND_ASSIGN(HEnvironment);
};
@@ -1145,12 +1341,15 @@
}
virtual bool NeedsEnvironment() const { return false; }
+ virtual uint32_t GetDexPc() const {
+ LOG(FATAL) << "GetDexPc() cannot be called on an instruction that"
+ " does not need an environment";
+ UNREACHABLE();
+ }
virtual bool IsControlFlow() const { return false; }
virtual bool CanThrow() const { return false; }
bool HasSideEffects() const { return side_effects_.HasSideEffects(); }
- virtual bool ActAsNullConstant() const { return false; }
-
// Does not apply for all instructions, but having this at top level greatly
// simplifies the null check elimination.
virtual bool CanBeNull() const {
@@ -1158,7 +1357,10 @@
return true;
}
- virtual bool CanDoImplicitNullCheck() const { return false; }
+ virtual bool CanDoImplicitNullCheckOn(HInstruction* obj) const {
+ UNUSED(obj);
+ return false;
+ }
void SetReferenceTypeInfo(ReferenceTypeInfo reference_type_info) {
DCHECK_EQ(GetType(), Primitive::kPrimNot);
@@ -1221,8 +1423,21 @@
// copying, the uses lists are being updated.
void CopyEnvironmentFrom(HEnvironment* environment) {
ArenaAllocator* allocator = GetBlock()->GetGraph()->GetArena();
- environment_ = new (allocator) HEnvironment(allocator, environment->Size());
+ environment_ = new (allocator) HEnvironment(allocator, *environment);
environment_->CopyFrom(environment);
+ if (environment->GetParent() != nullptr) {
+ environment_->SetAndCopyParentChain(allocator, environment->GetParent());
+ }
+ }
+
+ void CopyEnvironmentFromWithLoopPhiAdjustment(HEnvironment* environment,
+ HBasicBlock* block) {
+ ArenaAllocator* allocator = GetBlock()->GetGraph()->GetArena();
+ environment_ = new (allocator) HEnvironment(allocator, *environment);
+ environment_->CopyFromWithLoopPhiAdjustment(environment, block);
+ if (environment->GetParent() != nullptr) {
+ environment_->SetAndCopyParentChain(allocator, environment->GetParent());
+ }
}
// Returns the number of entries in the environment. Typically, that is the
@@ -1598,7 +1813,7 @@
bool NeedsEnvironment() const OVERRIDE { return true; }
bool CanThrow() const OVERRIDE { return true; }
- uint32_t GetDexPc() const { return dex_pc_; }
+ uint32_t GetDexPc() const OVERRIDE { return dex_pc_; }
DECLARE_INSTRUCTION(Deoptimize);
@@ -1626,7 +1841,7 @@
// Try to statically evaluate `operation` and return a HConstant
// containing the result of this evaluation. If `operation` cannot
- // be evaluated as a constant, return nullptr.
+ // be evaluated as a constant, return null.
HConstant* TryStaticEvaluation() const;
// Apply this operation to `x`.
@@ -1694,7 +1909,7 @@
// Try to statically evaluate `operation` and return a HConstant
// containing the result of this evaluation. If `operation` cannot
- // be evaluated as a constant, return nullptr.
+ // be evaluated as a constant, return null.
HConstant* TryStaticEvaluation() const;
// Apply this operation to `x` and `y`.
@@ -1702,11 +1917,11 @@
virtual int64_t Evaluate(int64_t x, int64_t y) const = 0;
// Returns an input that can legally be used as the right input and is
- // constant, or nullptr.
+ // constant, or null.
HConstant* GetConstantRight() const;
// If `GetConstantRight()` returns one of the input, this returns the other
- // one. Otherwise it returns nullptr.
+ // one. Otherwise it returns null.
HInstruction* GetLeastConstantLeft() const;
DECLARE_INSTRUCTION(BinaryOperation);
@@ -2002,28 +2217,30 @@
size_t ComputeHashCode() const OVERRIDE { return static_cast<size_t>(GetValue()); }
bool IsMinusOne() const OVERRIDE {
- return bit_cast<uint32_t, float>(AsFloatConstant()->GetValue()) ==
- bit_cast<uint32_t, float>((-1.0f));
+ return bit_cast<uint32_t, float>(value_) == bit_cast<uint32_t, float>((-1.0f));
}
bool IsZero() const OVERRIDE {
- return AsFloatConstant()->GetValue() == 0.0f;
+ return value_ == 0.0f;
}
bool IsOne() const OVERRIDE {
- return bit_cast<uint32_t, float>(AsFloatConstant()->GetValue()) ==
- bit_cast<uint32_t, float>(1.0f);
+ return bit_cast<uint32_t, float>(value_) == bit_cast<uint32_t, float>(1.0f);
+ }
+ bool IsNaN() const {
+ return std::isnan(value_);
}
DECLARE_INSTRUCTION(FloatConstant);
private:
explicit HFloatConstant(float value) : HConstant(Primitive::kPrimFloat), value_(value) {}
+ explicit HFloatConstant(int32_t value)
+ : HConstant(Primitive::kPrimFloat), value_(bit_cast<float, int32_t>(value)) {}
const float value_;
- // Only the SsaBuilder can currently create floating-point constants. If we
- // ever need to create them later in the pipeline, we will have to handle them
- // the same way as integral constants.
+ // Only the SsaBuilder and HGraph can create floating-point constants.
friend class SsaBuilder;
+ friend class HGraph;
DISALLOW_COPY_AND_ASSIGN(HFloatConstant);
};
@@ -2039,28 +2256,30 @@
size_t ComputeHashCode() const OVERRIDE { return static_cast<size_t>(GetValue()); }
bool IsMinusOne() const OVERRIDE {
- return bit_cast<uint64_t, double>(AsDoubleConstant()->GetValue()) ==
- bit_cast<uint64_t, double>((-1.0));
+ return bit_cast<uint64_t, double>(value_) == bit_cast<uint64_t, double>((-1.0));
}
bool IsZero() const OVERRIDE {
- return AsDoubleConstant()->GetValue() == 0.0;
+ return value_ == 0.0;
}
bool IsOne() const OVERRIDE {
- return bit_cast<uint64_t, double>(AsDoubleConstant()->GetValue()) ==
- bit_cast<uint64_t, double>(1.0);
+ return bit_cast<uint64_t, double>(value_) == bit_cast<uint64_t, double>(1.0);
+ }
+ bool IsNaN() const {
+ return std::isnan(value_);
}
DECLARE_INSTRUCTION(DoubleConstant);
private:
explicit HDoubleConstant(double value) : HConstant(Primitive::kPrimDouble), value_(value) {}
+ explicit HDoubleConstant(int64_t value)
+ : HConstant(Primitive::kPrimDouble), value_(bit_cast<double, int64_t>(value)) {}
const double value_;
- // Only the SsaBuilder can currently create floating-point constants. If we
- // ever need to create them later in the pipeline, we will have to handle them
- // the same way as integral constants.
+ // Only the SsaBuilder and HGraph can create floating-point constants.
friend class SsaBuilder;
+ friend class HGraph;
DISALLOW_COPY_AND_ASSIGN(HDoubleConstant);
};
@@ -2072,8 +2291,6 @@
size_t ComputeHashCode() const OVERRIDE { return 0; }
- bool ActAsNullConstant() const OVERRIDE { return true; }
-
DECLARE_INSTRUCTION(NullConstant);
private:
@@ -2095,11 +2312,6 @@
size_t ComputeHashCode() const OVERRIDE { return GetValue(); }
- // TODO: Null is represented by the `0` constant. In most cases we replace it
- // with a HNullConstant but we don't do it when comparing (a != null). This
- // method is an workaround until we fix the above.
- bool ActAsNullConstant() const OVERRIDE { return value_ == 0; }
-
bool IsMinusOne() const OVERRIDE { return GetValue() == -1; }
bool IsZero() const OVERRIDE { return GetValue() == 0; }
bool IsOne() const OVERRIDE { return GetValue() == 1; }
@@ -2164,12 +2376,20 @@
SetRawInputAt(index, argument);
}
+ // Return the number of arguments. This number can be lower than
+ // the number of inputs returned by InputCount(), as some invoke
+ // instructions (e.g. HInvokeStaticOrDirect) can have non-argument
+ // inputs at the end of their list of inputs.
+ uint32_t GetNumberOfArguments() const { return number_of_arguments_; }
+
Primitive::Type GetType() const OVERRIDE { return return_type_; }
- uint32_t GetDexPc() const { return dex_pc_; }
+ uint32_t GetDexPc() const OVERRIDE { return dex_pc_; }
uint32_t GetDexMethodIndex() const { return dex_method_index_; }
+ InvokeType GetOriginalInvokeType() const { return original_invoke_type_; }
+
Intrinsics GetIntrinsic() const {
return intrinsic_;
}
@@ -2183,16 +2403,21 @@
protected:
HInvoke(ArenaAllocator* arena,
uint32_t number_of_arguments,
+ uint32_t number_of_other_inputs,
Primitive::Type return_type,
uint32_t dex_pc,
- uint32_t dex_method_index)
+ uint32_t dex_method_index,
+ InvokeType original_invoke_type)
: HInstruction(SideEffects::All()),
+ number_of_arguments_(number_of_arguments),
inputs_(arena, number_of_arguments),
return_type_(return_type),
dex_pc_(dex_pc),
dex_method_index_(dex_method_index),
+ original_invoke_type_(original_invoke_type),
intrinsic_(Intrinsics::kNone) {
- inputs_.SetSize(number_of_arguments);
+ uint32_t number_of_inputs = number_of_arguments + number_of_other_inputs;
+ inputs_.SetSize(number_of_inputs);
}
const HUserRecord<HInstruction*> InputRecordAt(size_t i) const OVERRIDE { return inputs_.Get(i); }
@@ -2200,10 +2425,12 @@
inputs_.Put(index, input);
}
+ uint32_t number_of_arguments_;
GrowableArray<HUserRecord<HInstruction*> > inputs_;
const Primitive::Type return_type_;
const uint32_t dex_pc_;
const uint32_t dex_method_index_;
+ const InvokeType original_invoke_type_;
Intrinsics intrinsic_;
private:
@@ -2212,36 +2439,106 @@
class HInvokeStaticOrDirect : public HInvoke {
public:
+ // Requirements of this method call regarding the class
+ // initialization (clinit) check of its declaring class.
+ enum class ClinitCheckRequirement {
+ kNone, // Class already initialized.
+ kExplicit, // Static call having explicit clinit check as last input.
+ kImplicit, // Static call implicitly requiring a clinit check.
+ };
+
HInvokeStaticOrDirect(ArenaAllocator* arena,
uint32_t number_of_arguments,
Primitive::Type return_type,
uint32_t dex_pc,
uint32_t dex_method_index,
bool is_recursive,
+ int32_t string_init_offset,
InvokeType original_invoke_type,
- InvokeType invoke_type)
- : HInvoke(arena, number_of_arguments, return_type, dex_pc, dex_method_index),
- original_invoke_type_(original_invoke_type),
+ InvokeType invoke_type,
+ ClinitCheckRequirement clinit_check_requirement)
+ : HInvoke(arena,
+ number_of_arguments,
+ clinit_check_requirement == ClinitCheckRequirement::kExplicit ? 1u : 0u,
+ return_type,
+ dex_pc,
+ dex_method_index,
+ original_invoke_type),
invoke_type_(invoke_type),
- is_recursive_(is_recursive) {}
+ is_recursive_(is_recursive),
+ clinit_check_requirement_(clinit_check_requirement),
+ string_init_offset_(string_init_offset) {}
- bool CanDoImplicitNullCheck() const OVERRIDE {
+ bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE {
+ UNUSED(obj);
// We access the method via the dex cache so we can't do an implicit null check.
// TODO: for intrinsics we can generate implicit null checks.
return false;
}
- InvokeType GetOriginalInvokeType() const { return original_invoke_type_; }
InvokeType GetInvokeType() const { return invoke_type_; }
bool IsRecursive() const { return is_recursive_; }
bool NeedsDexCache() const OVERRIDE { return !IsRecursive(); }
+ bool IsStringInit() const { return string_init_offset_ != 0; }
+ int32_t GetStringInitOffset() const { return string_init_offset_; }
+
+ // Is this instruction a call to a static method?
+ bool IsStatic() const {
+ return GetInvokeType() == kStatic;
+ }
+
+ // Remove the art::HLoadClass instruction set as last input by
+ // art::PrepareForRegisterAllocation::VisitClinitCheck in lieu of
+ // the initial art::HClinitCheck instruction (only relevant for
+ // static calls with explicit clinit check).
+ void RemoveLoadClassAsLastInput() {
+ DCHECK(IsStaticWithExplicitClinitCheck());
+ size_t last_input_index = InputCount() - 1;
+ HInstruction* last_input = InputAt(last_input_index);
+ DCHECK(last_input != nullptr);
+ DCHECK(last_input->IsLoadClass()) << last_input->DebugName();
+ RemoveAsUserOfInput(last_input_index);
+ inputs_.DeleteAt(last_input_index);
+ clinit_check_requirement_ = ClinitCheckRequirement::kImplicit;
+ DCHECK(IsStaticWithImplicitClinitCheck());
+ }
+
+ // Is this a call to a static method whose declaring class has an
+ // explicit intialization check in the graph?
+ bool IsStaticWithExplicitClinitCheck() const {
+ return IsStatic() && (clinit_check_requirement_ == ClinitCheckRequirement::kExplicit);
+ }
+
+ // Is this a call to a static method whose declaring class has an
+ // implicit intialization check requirement?
+ bool IsStaticWithImplicitClinitCheck() const {
+ return IsStatic() && (clinit_check_requirement_ == ClinitCheckRequirement::kImplicit);
+ }
DECLARE_INSTRUCTION(InvokeStaticOrDirect);
+ protected:
+ const HUserRecord<HInstruction*> InputRecordAt(size_t i) const OVERRIDE {
+ const HUserRecord<HInstruction*> input_record = HInvoke::InputRecordAt(i);
+ if (kIsDebugBuild && IsStaticWithExplicitClinitCheck() && (i == InputCount() - 1)) {
+ HInstruction* input = input_record.GetInstruction();
+ // `input` is the last input of a static invoke marked as having
+ // an explicit clinit check. It must either be:
+ // - an art::HClinitCheck instruction, set by art::HGraphBuilder; or
+ // - an art::HLoadClass instruction, set by art::PrepareForRegisterAllocation.
+ DCHECK(input != nullptr);
+ DCHECK(input->IsClinitCheck() || input->IsLoadClass()) << input->DebugName();
+ }
+ return input_record;
+ }
+
private:
- const InvokeType original_invoke_type_;
const InvokeType invoke_type_;
const bool is_recursive_;
+ ClinitCheckRequirement clinit_check_requirement_;
+ // Thread entrypoint offset for string init method if this is a string init invoke.
+ // Note that there are multiple string init methods, each having its own offset.
+ int32_t string_init_offset_;
DISALLOW_COPY_AND_ASSIGN(HInvokeStaticOrDirect);
};
@@ -2254,12 +2551,12 @@
uint32_t dex_pc,
uint32_t dex_method_index,
uint32_t vtable_index)
- : HInvoke(arena, number_of_arguments, return_type, dex_pc, dex_method_index),
+ : HInvoke(arena, number_of_arguments, 0u, return_type, dex_pc, dex_method_index, kVirtual),
vtable_index_(vtable_index) {}
- bool CanDoImplicitNullCheck() const OVERRIDE {
+ bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE {
// TODO: Add implicit null checks in intrinsics.
- return !GetLocations()->Intrinsified();
+ return (obj == InputAt(0)) && !GetLocations()->Intrinsified();
}
uint32_t GetVTableIndex() const { return vtable_index_; }
@@ -2280,12 +2577,12 @@
uint32_t dex_pc,
uint32_t dex_method_index,
uint32_t imt_index)
- : HInvoke(arena, number_of_arguments, return_type, dex_pc, dex_method_index),
+ : HInvoke(arena, number_of_arguments, 0u, return_type, dex_pc, dex_method_index, kInterface),
imt_index_(imt_index) {}
- bool CanDoImplicitNullCheck() const OVERRIDE {
+ bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE {
// TODO: Add implicit null checks in intrinsics.
- return !GetLocations()->Intrinsified();
+ return (obj == InputAt(0)) && !GetLocations()->Intrinsified();
}
uint32_t GetImtIndex() const { return imt_index_; }
@@ -2307,7 +2604,7 @@
type_index_(type_index),
entrypoint_(entrypoint) {}
- uint32_t GetDexPc() const { return dex_pc_; }
+ uint32_t GetDexPc() const OVERRIDE { return dex_pc_; }
uint16_t GetTypeIndex() const { return type_index_; }
// Calls runtime so needs an environment.
@@ -2359,7 +2656,7 @@
SetRawInputAt(0, length);
}
- uint32_t GetDexPc() const { return dex_pc_; }
+ uint32_t GetDexPc() const OVERRIDE { return dex_pc_; }
uint16_t GetTypeIndex() const { return type_index_; }
// Calls runtime so needs an environment.
@@ -2454,7 +2751,7 @@
return (y == -1) ? -x : x / y;
}
- uint32_t GetDexPc() const { return dex_pc_; }
+ uint32_t GetDexPc() const OVERRIDE { return dex_pc_; }
DECLARE_INSTRUCTION(Div);
@@ -2481,7 +2778,7 @@
return (y == -1) ? 0 : x % y;
}
- uint32_t GetDexPc() const { return dex_pc_; }
+ uint32_t GetDexPc() const OVERRIDE { return dex_pc_; }
DECLARE_INSTRUCTION(Rem);
@@ -2508,7 +2805,7 @@
bool NeedsEnvironment() const OVERRIDE { return true; }
bool CanThrow() const OVERRIDE { return true; }
- uint32_t GetDexPc() const { return dex_pc_; }
+ uint32_t GetDexPc() const OVERRIDE { return dex_pc_; }
DECLARE_INSTRUCTION(DivZeroCheck);
@@ -2628,6 +2925,8 @@
bool CanBeNull() const OVERRIDE { return !is_this_; }
+ bool IsThis() const { return is_this_; }
+
DECLARE_INSTRUCTION(ParameterValue);
private:
@@ -2703,11 +3002,15 @@
// Required by the x86 and ARM code generators when producing calls
// to the runtime.
- uint32_t GetDexPc() const { return dex_pc_; }
+ uint32_t GetDexPc() const OVERRIDE { return dex_pc_; }
bool CanBeMoved() const OVERRIDE { return true; }
bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE { return true; }
+ // Try to statically evaluate the conversion and return a HConstant
+ // containing the result. If the input cannot be converted, return nullptr.
+ HConstant* TryStaticEvaluation() const;
+
DECLARE_INSTRUCTION(TypeConversion);
private:
@@ -2746,6 +3049,7 @@
size_t InputCount() const OVERRIDE { return inputs_.Size(); }
void AddInput(HInstruction* input);
+ void RemoveInputAt(size_t index);
Primitive::Type GetType() const OVERRIDE { return type_; }
void SetType(Primitive::Type type) { type_ = type; }
@@ -2812,7 +3116,7 @@
bool CanBeNull() const OVERRIDE { return false; }
- uint32_t GetDexPc() const { return dex_pc_; }
+ uint32_t GetDexPc() const OVERRIDE { return dex_pc_; }
DECLARE_INSTRUCTION(NullCheck);
@@ -2855,8 +3159,8 @@
return GetFieldOffset().SizeValue() == other_get->GetFieldOffset().SizeValue();
}
- bool CanDoImplicitNullCheck() const OVERRIDE {
- return GetFieldOffset().Uint32Value() < kPageSize;
+ bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE {
+ return (obj == InputAt(0)) && GetFieldOffset().Uint32Value() < kPageSize;
}
size_t ComputeHashCode() const OVERRIDE {
@@ -2884,13 +3188,14 @@
MemberOffset field_offset,
bool is_volatile)
: HTemplateInstruction(SideEffects::ChangesSomething()),
- field_info_(field_offset, field_type, is_volatile) {
+ field_info_(field_offset, field_type, is_volatile),
+ value_can_be_null_(true) {
SetRawInputAt(0, object);
SetRawInputAt(1, value);
}
- bool CanDoImplicitNullCheck() const OVERRIDE {
- return GetFieldOffset().Uint32Value() < kPageSize;
+ bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE {
+ return (obj == InputAt(0)) && GetFieldOffset().Uint32Value() < kPageSize;
}
const FieldInfo& GetFieldInfo() const { return field_info_; }
@@ -2898,11 +3203,14 @@
Primitive::Type GetFieldType() const { return field_info_.GetFieldType(); }
bool IsVolatile() const { return field_info_.IsVolatile(); }
HInstruction* GetValue() const { return InputAt(1); }
+ bool GetValueCanBeNull() const { return value_can_be_null_; }
+ void ClearValueCanBeNull() { value_can_be_null_ = false; }
DECLARE_INSTRUCTION(InstanceFieldSet);
private:
const FieldInfo field_info_;
+ bool value_can_be_null_;
DISALLOW_COPY_AND_ASSIGN(HInstanceFieldSet);
};
@@ -2920,7 +3228,8 @@
UNUSED(other);
return true;
}
- bool CanDoImplicitNullCheck() const OVERRIDE {
+ bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE {
+ UNUSED(obj);
// TODO: We can be smarter here.
// Currently, the array access is always preceded by an ArrayLength or a NullCheck
// which generates the implicit null check. There are cases when these can be removed
@@ -2950,7 +3259,8 @@
: HTemplateInstruction(SideEffects::ChangesSomething()),
dex_pc_(dex_pc),
expected_component_type_(expected_component_type),
- needs_type_check_(value->GetType() == Primitive::kPrimNot) {
+ needs_type_check_(value->GetType() == Primitive::kPrimNot),
+ value_can_be_null_(true) {
SetRawInputAt(0, array);
SetRawInputAt(1, index);
SetRawInputAt(2, value);
@@ -2962,7 +3272,8 @@
return needs_type_check_;
}
- bool CanDoImplicitNullCheck() const OVERRIDE {
+ bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE {
+ UNUSED(obj);
// TODO: Same as for ArrayGet.
return false;
}
@@ -2971,9 +3282,14 @@
needs_type_check_ = false;
}
+ void ClearValueCanBeNull() {
+ value_can_be_null_ = false;
+ }
+
+ bool GetValueCanBeNull() const { return value_can_be_null_; }
bool NeedsTypeCheck() const { return needs_type_check_; }
- uint32_t GetDexPc() const { return dex_pc_; }
+ uint32_t GetDexPc() const OVERRIDE { return dex_pc_; }
HInstruction* GetArray() const { return InputAt(0); }
HInstruction* GetIndex() const { return InputAt(1); }
@@ -2996,6 +3312,7 @@
const uint32_t dex_pc_;
const Primitive::Type expected_component_type_;
bool needs_type_check_;
+ bool value_can_be_null_;
DISALLOW_COPY_AND_ASSIGN(HArraySet);
};
@@ -3014,7 +3331,9 @@
UNUSED(other);
return true;
}
- bool CanDoImplicitNullCheck() const OVERRIDE { return true; }
+ bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE {
+ return obj == InputAt(0);
+ }
DECLARE_INSTRUCTION(ArrayLength);
@@ -3041,7 +3360,7 @@
bool CanThrow() const OVERRIDE { return true; }
- uint32_t GetDexPc() const { return dex_pc_; }
+ uint32_t GetDexPc() const OVERRIDE { return dex_pc_; }
DECLARE_INSTRUCTION(BoundsCheck);
@@ -3081,19 +3400,25 @@
class HSuspendCheck : public HTemplateInstruction<0> {
public:
explicit HSuspendCheck(uint32_t dex_pc)
- : HTemplateInstruction(SideEffects::None()), dex_pc_(dex_pc) {}
+ : HTemplateInstruction(SideEffects::None()), dex_pc_(dex_pc), slow_path_(nullptr) {}
bool NeedsEnvironment() const OVERRIDE {
return true;
}
- uint32_t GetDexPc() const { return dex_pc_; }
+ uint32_t GetDexPc() const OVERRIDE { return dex_pc_; }
+ void SetSlowPath(SlowPathCode* slow_path) { slow_path_ = slow_path; }
+ SlowPathCode* GetSlowPath() const { return slow_path_; }
DECLARE_INSTRUCTION(SuspendCheck);
private:
const uint32_t dex_pc_;
+ // Only used for code generation, in order to share the same slow path between back edges
+ // of a same loop.
+ SlowPathCode* slow_path_;
+
DISALLOW_COPY_AND_ASSIGN(HSuspendCheck);
};
@@ -3120,7 +3445,7 @@
size_t ComputeHashCode() const OVERRIDE { return type_index_; }
- uint32_t GetDexPc() const { return dex_pc_; }
+ uint32_t GetDexPc() const OVERRIDE { return dex_pc_; }
uint16_t GetTypeIndex() const { return type_index_; }
bool IsReferrersClass() const { return is_referrers_class_; }
@@ -3134,8 +3459,8 @@
return generate_clinit_check_;
}
- void SetMustGenerateClinitCheck() {
- generate_clinit_check_ = true;
+ void SetMustGenerateClinitCheck(bool generate_clinit_check) {
+ generate_clinit_check_ = generate_clinit_check;
}
bool CanCallRuntime() const {
@@ -3194,7 +3519,7 @@
size_t ComputeHashCode() const OVERRIDE { return string_index_; }
- uint32_t GetDexPc() const { return dex_pc_; }
+ uint32_t GetDexPc() const OVERRIDE { return dex_pc_; }
uint32_t GetStringIndex() const { return string_index_; }
// TODO: Can we deopt or debug when we resolve a string?
@@ -3210,7 +3535,6 @@
DISALLOW_COPY_AND_ASSIGN(HLoadString);
};
-// TODO: Pass this check to HInvokeStaticOrDirect nodes.
/**
* Performs an initialization check on its Class object input.
*/
@@ -3233,7 +3557,7 @@
return true;
}
- uint32_t GetDexPc() const { return dex_pc_; }
+ uint32_t GetDexPc() const OVERRIDE { return dex_pc_; }
HLoadClass* GetLoadClass() const { return InputAt(0)->AsLoadClass(); }
@@ -3289,7 +3613,8 @@
MemberOffset field_offset,
bool is_volatile)
: HTemplateInstruction(SideEffects::ChangesSomething()),
- field_info_(field_offset, field_type, is_volatile) {
+ field_info_(field_offset, field_type, is_volatile),
+ value_can_be_null_(true) {
SetRawInputAt(0, cls);
SetRawInputAt(1, value);
}
@@ -3300,11 +3625,14 @@
bool IsVolatile() const { return field_info_.IsVolatile(); }
HInstruction* GetValue() const { return InputAt(1); }
+ bool GetValueCanBeNull() const { return value_can_be_null_; }
+ void ClearValueCanBeNull() { value_can_be_null_ = false; }
DECLARE_INSTRUCTION(StaticFieldSet);
private:
const FieldInfo field_info_;
+ bool value_can_be_null_;
DISALLOW_COPY_AND_ASSIGN(HStaticFieldSet);
};
@@ -3333,7 +3661,7 @@
bool CanThrow() const OVERRIDE { return true; }
- uint32_t GetDexPc() const { return dex_pc_; }
+ uint32_t GetDexPc() const OVERRIDE { return dex_pc_; }
DECLARE_INSTRUCTION(Throw);
@@ -3351,6 +3679,7 @@
uint32_t dex_pc)
: HExpression(Primitive::kPrimBoolean, SideEffects::None()),
class_is_final_(class_is_final),
+ must_do_null_check_(true),
dex_pc_(dex_pc) {
SetRawInputAt(0, object);
SetRawInputAt(1, constant);
@@ -3366,14 +3695,19 @@
return false;
}
- uint32_t GetDexPc() const { return dex_pc_; }
+ uint32_t GetDexPc() const OVERRIDE { return dex_pc_; }
bool IsClassFinal() const { return class_is_final_; }
+ // Used only in code generation.
+ bool MustDoNullCheck() const { return must_do_null_check_; }
+ void ClearMustDoNullCheck() { must_do_null_check_ = false; }
+
DECLARE_INSTRUCTION(InstanceOf);
private:
const bool class_is_final_;
+ bool must_do_null_check_;
const uint32_t dex_pc_;
DISALLOW_COPY_AND_ASSIGN(HInstanceOf);
@@ -3414,6 +3748,7 @@
uint32_t dex_pc)
: HTemplateInstruction(SideEffects::None()),
class_is_final_(class_is_final),
+ must_do_null_check_(true),
dex_pc_(dex_pc) {
SetRawInputAt(0, object);
SetRawInputAt(1, constant);
@@ -3432,7 +3767,10 @@
bool CanThrow() const OVERRIDE { return true; }
- uint32_t GetDexPc() const { return dex_pc_; }
+ bool MustDoNullCheck() const { return must_do_null_check_; }
+ void ClearMustDoNullCheck() { must_do_null_check_ = false; }
+
+ uint32_t GetDexPc() const OVERRIDE { return dex_pc_; }
bool IsClassFinal() const { return class_is_final_; }
@@ -3440,6 +3778,7 @@
private:
const bool class_is_final_;
+ bool must_do_null_check_;
const uint32_t dex_pc_;
DISALLOW_COPY_AND_ASSIGN(HCheckCast);
@@ -3477,7 +3816,7 @@
bool NeedsEnvironment() const OVERRIDE { return true; }
bool CanThrow() const OVERRIDE { return true; }
- uint32_t GetDexPc() const { return dex_pc_; }
+ uint32_t GetDexPc() const OVERRIDE { return dex_pc_; }
bool IsEnter() const { return kind_ == kEnter; }
@@ -3596,7 +3935,9 @@
}
for (size_t i = 0, e = moves_.Size(); i < e; ++i) {
DCHECK(!destination.OverlapsWith(moves_.Get(i).GetDestination()))
- << "Overlapped destination for two moves in a parallel move.";
+ << "Overlapped destination for two moves in a parallel move: "
+ << moves_.Get(i).GetSource() << " ==> " << moves_.Get(i).GetDestination() << " and "
+ << source << " ==> " << destination;
}
}
moves_.Add(MoveOperands(source, destination, type, instruction));
diff --git a/compiler/optimizing/nodes_test.cc b/compiler/optimizing/nodes_test.cc
index 4e83ce5..782cde4 100644
--- a/compiler/optimizing/nodes_test.cc
+++ b/compiler/optimizing/nodes_test.cc
@@ -16,6 +16,7 @@
#include "base/arena_allocator.h"
#include "nodes.h"
+#include "optimizing_unit_test.h"
#include "gtest/gtest.h"
@@ -29,7 +30,7 @@
ArenaPool pool;
ArenaAllocator allocator(&pool);
- HGraph* graph = new (&allocator) HGraph(&allocator);
+ HGraph* graph = CreateGraph(&allocator);
HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
@@ -49,7 +50,8 @@
first_block->AddSuccessor(exit_block);
exit_block->AddInstruction(new (&allocator) HExit());
- HEnvironment* environment = new (&allocator) HEnvironment(&allocator, 1);
+ HEnvironment* environment = new (&allocator) HEnvironment(
+ &allocator, 1, graph->GetDexFile(), graph->GetMethodIdx(), 0, kStatic);
null_check->SetRawEnvironment(environment);
environment->SetRawEnvAt(0, parameter);
parameter->AddEnvUseAt(null_check->GetEnvironment(), 0);
@@ -70,7 +72,7 @@
ArenaPool pool;
ArenaAllocator allocator(&pool);
- HGraph* graph = new (&allocator) HGraph(&allocator);
+ HGraph* graph = CreateGraph(&allocator);
HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
@@ -96,7 +98,7 @@
ArenaPool pool;
ArenaAllocator allocator(&pool);
- HGraph* graph = new (&allocator) HGraph(&allocator);
+ HGraph* graph = CreateGraph(&allocator);
HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
@@ -112,4 +114,51 @@
ASSERT_TRUE(parameter->GetUses().HasOnlyOneUse());
}
+TEST(Node, ParentEnvironment) {
+ ArenaPool pool;
+ ArenaAllocator allocator(&pool);
+
+ HGraph* graph = CreateGraph(&allocator);
+ HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
+ graph->AddBlock(entry);
+ graph->SetEntryBlock(entry);
+ HInstruction* parameter1 = new (&allocator) HParameterValue(0, Primitive::kPrimNot);
+ HInstruction* with_environment = new (&allocator) HNullCheck(parameter1, 0);
+ entry->AddInstruction(parameter1);
+ entry->AddInstruction(with_environment);
+ entry->AddInstruction(new (&allocator) HExit());
+
+ ASSERT_TRUE(parameter1->HasUses());
+ ASSERT_TRUE(parameter1->GetUses().HasOnlyOneUse());
+
+ HEnvironment* environment = new (&allocator) HEnvironment(
+ &allocator, 1, graph->GetDexFile(), graph->GetMethodIdx(), 0, kStatic);
+ GrowableArray<HInstruction*> array(&allocator, 1);
+ array.Add(parameter1);
+
+ environment->CopyFrom(array);
+ with_environment->SetRawEnvironment(environment);
+
+ ASSERT_TRUE(parameter1->HasEnvironmentUses());
+ ASSERT_TRUE(parameter1->GetEnvUses().HasOnlyOneUse());
+
+ HEnvironment* parent1 = new (&allocator) HEnvironment(
+ &allocator, 1, graph->GetDexFile(), graph->GetMethodIdx(), 0, kStatic);
+ parent1->CopyFrom(array);
+
+ ASSERT_EQ(parameter1->GetEnvUses().SizeSlow(), 2u);
+
+ HEnvironment* parent2 = new (&allocator) HEnvironment(
+ &allocator, 1, graph->GetDexFile(), graph->GetMethodIdx(), 0, kStatic);
+ parent2->CopyFrom(array);
+ parent1->SetAndCopyParentChain(&allocator, parent2);
+
+ // One use for parent2, and one other use for the new parent of parent1.
+ ASSERT_EQ(parameter1->GetEnvUses().SizeSlow(), 4u);
+
+ // We have copied the parent chain. So we now have two more uses.
+ environment->SetAndCopyParentChain(&allocator, parent1);
+ ASSERT_EQ(parameter1->GetEnvUses().SizeSlow(), 6u);
+}
+
} // namespace art
diff --git a/compiler/optimizing/optimization.cc b/compiler/optimizing/optimization.cc
index b13e07e..c46a219 100644
--- a/compiler/optimizing/optimization.cc
+++ b/compiler/optimizing/optimization.cc
@@ -21,9 +21,9 @@
namespace art {
-void HOptimization::MaybeRecordStat(MethodCompilationStat compilation_stat) const {
+void HOptimization::MaybeRecordStat(MethodCompilationStat compilation_stat, size_t count) const {
if (stats_ != nullptr) {
- stats_->RecordStat(compilation_stat);
+ stats_->RecordStat(compilation_stat, count);
}
}
diff --git a/compiler/optimizing/optimization.h b/compiler/optimizing/optimization.h
index 8b20281..ccf8de9 100644
--- a/compiler/optimizing/optimization.h
+++ b/compiler/optimizing/optimization.h
@@ -48,7 +48,7 @@
void Check();
protected:
- void MaybeRecordStat(MethodCompilationStat compilation_stat) const;
+ void MaybeRecordStat(MethodCompilationStat compilation_stat, size_t count = 1) const;
HGraph* const graph_;
// Used to record stats about the optimization.
diff --git a/compiler/optimizing/optimizing_cfi_test.cc b/compiler/optimizing/optimizing_cfi_test.cc
index b2c13ad..7aea249 100644
--- a/compiler/optimizing/optimizing_cfi_test.cc
+++ b/compiler/optimizing/optimizing_cfi_test.cc
@@ -21,6 +21,7 @@
#include "cfi_test.h"
#include "gtest/gtest.h"
#include "optimizing/code_generator.h"
+#include "optimizing/optimizing_unit_test.h"
#include "utils/assembler.h"
#include "optimizing/optimizing_cfi_test_expected.inc"
@@ -45,10 +46,10 @@
std::unique_ptr<const InstructionSetFeatures> isa_features;
std::string error;
isa_features.reset(InstructionSetFeatures::FromVariant(isa, "default", &error));
- HGraph graph(&allocator);
+ HGraph* graph = CreateGraph(&allocator);
// Generate simple frame with some spills.
std::unique_ptr<CodeGenerator> code_gen(
- CodeGenerator::Create(&graph, isa, *isa_features.get(), opts));
+ CodeGenerator::Create(graph, isa, *isa_features.get(), opts));
const int frame_size = 64;
int core_reg = 0;
int fp_reg = 0;
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 2ec8536..b2e8ecd 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -320,14 +320,17 @@
const DexCompilationUnit& dex_compilation_unit,
PassInfoPrinter* pass_info_printer,
StackHandleScopeCollection* handles) {
- HDeadCodeElimination dce(graph);
+ HDeadCodeElimination dce1(graph, stats,
+ HDeadCodeElimination::kInitialDeadCodeEliminationPassName);
+ HDeadCodeElimination dce2(graph, stats,
+ HDeadCodeElimination::kFinalDeadCodeEliminationPassName);
HConstantFolding fold1(graph);
InstructionSimplifier simplify1(graph, stats);
- HBooleanSimplifier boolean_not(graph);
+ HBooleanSimplifier boolean_simplify(graph);
HInliner inliner(graph, dex_compilation_unit, dex_compilation_unit, driver, stats);
- HConstantFolding fold2(graph);
+ HConstantFolding fold2(graph, "constant_folding_after_inlining");
SideEffectsAnalysis side_effects(graph);
GVNOptimization gvn(graph, side_effects);
LICM licm(graph, side_effects);
@@ -339,20 +342,21 @@
HOptimization* optimizations[] = {
&intrinsics,
- &dce,
+ &dce1,
&fold1,
&simplify1,
+ &inliner,
// BooleanSimplifier depends on the InstructionSimplifier removing redundant
// suspend checks to recognize empty blocks.
- &boolean_not,
- &inliner,
+ &boolean_simplify,
&fold2,
&side_effects,
&gvn,
&licm,
&bce,
&type_propagation,
- &simplify2
+ &simplify2,
+ &dce2,
};
RunOptimizations(optimizations, arraysize(optimizations), pass_info_printer);
@@ -508,9 +512,14 @@
class_def_idx, method_idx, access_flags,
compiler_driver->GetVerifiedMethod(&dex_file, method_idx));
+ bool requires_barrier = dex_compilation_unit.IsConstructor()
+ && compiler_driver->RequiresConstructorBarrier(Thread::Current(),
+ dex_compilation_unit.GetDexFile(),
+ dex_compilation_unit.GetClassDefIndex());
ArenaAllocator arena(Runtime::Current()->GetArenaPool());
HGraph* graph = new (&arena) HGraph(
- &arena, compiler_driver->GetCompilerOptions().GetDebuggable());
+ &arena, dex_file, method_idx, requires_barrier, kInvalidInvokeType,
+ compiler_driver->GetCompilerOptions().GetDebuggable());
// For testing purposes, we put a special marker on method names that should be compiled
// with this compiler. This makes sure we're not regressing.
diff --git a/compiler/optimizing/optimizing_compiler_stats.h b/compiler/optimizing/optimizing_compiler_stats.h
index 9bfa543..b6b1bb1 100644
--- a/compiler/optimizing/optimizing_compiler_stats.h
+++ b/compiler/optimizing/optimizing_compiler_stats.h
@@ -30,26 +30,28 @@
kCompiledOptimized,
kCompiledQuick,
kInlinedInvoke,
- kNotCompiledUnsupportedIsa,
- kNotCompiledPathological,
+ kInstructionSimplifications,
+ kNotCompiledBranchOutsideMethodCode,
+ kNotCompiledCannotBuildSSA,
+ kNotCompiledCantAccesType,
+ kNotCompiledClassNotVerified,
kNotCompiledHugeMethod,
kNotCompiledLargeMethodNoBranches,
- kNotCompiledCannotBuildSSA,
kNotCompiledNoCodegen,
- kNotCompiledUnresolvedMethod,
- kNotCompiledUnresolvedField,
kNotCompiledNonSequentialRegPair,
+ kNotCompiledPathological,
kNotCompiledSpaceFilter,
- kNotOptimizedTryCatch,
- kNotOptimizedDisabled,
- kNotCompiledCantAccesType,
- kNotOptimizedRegisterAllocator,
kNotCompiledUnhandledInstruction,
+ kNotCompiledUnresolvedField,
+ kNotCompiledUnresolvedMethod,
+ kNotCompiledUnsupportedIsa,
kNotCompiledVerifyAtRuntime,
- kNotCompiledClassNotVerified,
+ kNotOptimizedDisabled,
+ kNotOptimizedRegisterAllocator,
+ kNotOptimizedTryCatch,
kRemovedCheckedCast,
+ kRemovedDeadInstruction,
kRemovedNullCheck,
- kInstructionSimplifications,
kLastStat
};
@@ -57,8 +59,8 @@
public:
OptimizingCompilerStats() {}
- void RecordStat(MethodCompilationStat stat) {
- compile_stats_[stat]++;
+ void RecordStat(MethodCompilationStat stat, size_t count = 1) {
+ compile_stats_[stat] += count;
}
void Log() const {
@@ -96,26 +98,28 @@
case kCompiledOptimized : return "kCompiledOptimized";
case kCompiledQuick : return "kCompiledQuick";
case kInlinedInvoke : return "kInlinedInvoke";
- case kNotCompiledUnsupportedIsa : return "kNotCompiledUnsupportedIsa";
- case kNotCompiledPathological : return "kNotCompiledPathological";
+ case kInstructionSimplifications: return "kInstructionSimplifications";
+ case kNotCompiledBranchOutsideMethodCode: return "kNotCompiledBranchOutsideMethodCode";
+ case kNotCompiledCannotBuildSSA : return "kNotCompiledCannotBuildSSA";
+ case kNotCompiledCantAccesType : return "kNotCompiledCantAccesType";
+ case kNotCompiledClassNotVerified : return "kNotCompiledClassNotVerified";
case kNotCompiledHugeMethod : return "kNotCompiledHugeMethod";
case kNotCompiledLargeMethodNoBranches : return "kNotCompiledLargeMethodNoBranches";
- case kNotCompiledCannotBuildSSA : return "kNotCompiledCannotBuildSSA";
case kNotCompiledNoCodegen : return "kNotCompiledNoCodegen";
- case kNotCompiledUnresolvedMethod : return "kNotCompiledUnresolvedMethod";
- case kNotCompiledUnresolvedField : return "kNotCompiledUnresolvedField";
case kNotCompiledNonSequentialRegPair : return "kNotCompiledNonSequentialRegPair";
- case kNotOptimizedDisabled : return "kNotOptimizedDisabled";
- case kNotOptimizedTryCatch : return "kNotOptimizedTryCatch";
- case kNotCompiledCantAccesType : return "kNotCompiledCantAccesType";
+ case kNotCompiledPathological : return "kNotCompiledPathological";
case kNotCompiledSpaceFilter : return "kNotCompiledSpaceFilter";
- case kNotOptimizedRegisterAllocator : return "kNotOptimizedRegisterAllocator";
case kNotCompiledUnhandledInstruction : return "kNotCompiledUnhandledInstruction";
+ case kNotCompiledUnresolvedField : return "kNotCompiledUnresolvedField";
+ case kNotCompiledUnresolvedMethod : return "kNotCompiledUnresolvedMethod";
+ case kNotCompiledUnsupportedIsa : return "kNotCompiledUnsupportedIsa";
case kNotCompiledVerifyAtRuntime : return "kNotCompiledVerifyAtRuntime";
- case kNotCompiledClassNotVerified : return "kNotCompiledClassNotVerified";
+ case kNotOptimizedDisabled : return "kNotOptimizedDisabled";
+ case kNotOptimizedRegisterAllocator : return "kNotOptimizedRegisterAllocator";
+ case kNotOptimizedTryCatch : return "kNotOptimizedTryCatch";
case kRemovedCheckedCast: return "kRemovedCheckedCast";
+ case kRemovedDeadInstruction: return "kRemovedDeadInstruction";
case kRemovedNullCheck: return "kRemovedNullCheck";
- case kInstructionSimplifications: return "kInstructionSimplifications";
default: LOG(FATAL) << "invalid stat";
}
return "";
diff --git a/compiler/optimizing/optimizing_unit_test.h b/compiler/optimizing/optimizing_unit_test.h
index 6b23692..1fe9346 100644
--- a/compiler/optimizing/optimizing_unit_test.h
+++ b/compiler/optimizing/optimizing_unit_test.h
@@ -72,11 +72,16 @@
}
}
+inline HGraph* CreateGraph(ArenaAllocator* allocator) {
+ return new (allocator) HGraph(
+ allocator, *reinterpret_cast<DexFile*>(allocator->Alloc(sizeof(DexFile))), -1, false);
+}
+
// Create a control-flow graph from Dex instructions.
inline HGraph* CreateCFG(ArenaAllocator* allocator,
const uint16_t* data,
Primitive::Type return_type = Primitive::kPrimInt) {
- HGraph* graph = new (allocator) HGraph(allocator);
+ HGraph* graph = CreateGraph(allocator);
HGraphBuilder builder(graph, return_type);
const DexFile::CodeItem* item =
reinterpret_cast<const DexFile::CodeItem*>(data);
diff --git a/compiler/optimizing/prepare_for_register_allocation.cc b/compiler/optimizing/prepare_for_register_allocation.cc
index f5d8d82..538736b 100644
--- a/compiler/optimizing/prepare_for_register_allocation.cc
+++ b/compiler/optimizing/prepare_for_register_allocation.cc
@@ -53,7 +53,7 @@
if (check->GetPrevious() == cls) {
// Pass the initialization duty to the `HLoadClass` instruction,
// and remove the instruction from the graph.
- cls->SetMustGenerateClinitCheck();
+ cls->SetMustGenerateClinitCheck(true);
check->GetBlock()->RemoveInstruction(check);
}
}
@@ -79,4 +79,33 @@
}
}
+void PrepareForRegisterAllocation::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
+ if (invoke->IsStaticWithExplicitClinitCheck()) {
+ size_t last_input_index = invoke->InputCount() - 1;
+ HLoadClass* last_input = invoke->InputAt(last_input_index)->AsLoadClass();
+ DCHECK(last_input != nullptr)
+ << "Last input is not HLoadClass. It is " << last_input->DebugName();
+
+ // The static call will initialize the class so there's no need for a clinit check if
+ // it's the first user.
+ if (last_input == invoke->GetPrevious()) {
+ last_input->SetMustGenerateClinitCheck(false);
+ }
+
+ // Remove a load class instruction as last input of a static
+ // invoke, which has been added (along with a clinit check,
+ // removed by PrepareForRegisterAllocation::VisitClinitCheck
+ // previously) by the graph builder during the creation of the
+ // static invoke instruction, but is no longer required at this
+ // stage (i.e., after inlining has been performed).
+ invoke->RemoveLoadClassAsLastInput();
+
+ // If the load class instruction is no longer used, remove it from
+ // the graph.
+ if (!last_input->HasUses()) {
+ last_input->GetBlock()->RemoveInstruction(last_input);
+ }
+ }
+}
+
} // namespace art
diff --git a/compiler/optimizing/prepare_for_register_allocation.h b/compiler/optimizing/prepare_for_register_allocation.h
index c28507c..d7f277f 100644
--- a/compiler/optimizing/prepare_for_register_allocation.h
+++ b/compiler/optimizing/prepare_for_register_allocation.h
@@ -39,6 +39,7 @@
void VisitBoundType(HBoundType* bound_type) OVERRIDE;
void VisitClinitCheck(HClinitCheck* check) OVERRIDE;
void VisitCondition(HCondition* condition) OVERRIDE;
+ void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE;
DISALLOW_COPY_AND_ASSIGN(PrepareForRegisterAllocation);
};
diff --git a/compiler/optimizing/pretty_printer_test.cc b/compiler/optimizing/pretty_printer_test.cc
index 293fde9..c56100d 100644
--- a/compiler/optimizing/pretty_printer_test.cc
+++ b/compiler/optimizing/pretty_printer_test.cc
@@ -30,7 +30,7 @@
static void TestCode(const uint16_t* data, const char* expected) {
ArenaPool pool;
ArenaAllocator allocator(&pool);
- HGraph* graph = new (&allocator) HGraph(&allocator);
+ HGraph* graph = CreateGraph(&allocator);
HGraphBuilder builder(graph);
const DexFile::CodeItem* item = reinterpret_cast<const DexFile::CodeItem*>(data);
bool graph_built = builder.BuildGraph(*item);
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index de6941c..12b1c2b 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -68,11 +68,11 @@
}
HInstruction* input0 = ifInput->InputAt(0);
HInstruction* input1 = ifInput->InputAt(1);
- HInstruction* obj;
+ HInstruction* obj = nullptr;
- if ((input0->GetType() == Primitive::kPrimNot) && input1->ActAsNullConstant()) {
+ if (input1->IsNullConstant()) {
obj = input0;
- } else if ((input1->GetType() == Primitive::kPrimNot) && input0->ActAsNullConstant()) {
+ } else if (input0->IsNullConstant()) {
obj = input1;
} else {
return;
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index f8e00f6..925099a 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -378,7 +378,7 @@
// Split just before first register use.
size_t first_register_use = current->FirstRegisterUse();
if (first_register_use != kNoLifetime) {
- LiveInterval* split = Split(current, first_register_use - 1);
+ LiveInterval* split = SplitBetween(current, current->GetStart(), first_register_use - 1);
// Don't add directly to `unhandled`, it needs to be sorted and the start
// of this new interval might be after intervals already in the list.
AddSorted(&unhandled, split);
@@ -768,14 +768,14 @@
}
} else {
DCHECK(!current->IsHighInterval());
- int hint = current->FindFirstRegisterHint(free_until);
+ int hint = current->FindFirstRegisterHint(free_until, liveness_);
if (hint != kNoRegister) {
DCHECK(!IsBlocked(hint));
reg = hint;
} else if (current->IsLowInterval()) {
reg = FindAvailableRegisterPair(free_until, current->GetStart());
} else {
- reg = FindAvailableRegister(free_until);
+ reg = FindAvailableRegister(free_until, current);
}
}
@@ -839,14 +839,52 @@
return reg;
}
-int RegisterAllocator::FindAvailableRegister(size_t* next_use) const {
+bool RegisterAllocator::IsCallerSaveRegister(int reg) const {
+ return processing_core_registers_
+ ? !codegen_->IsCoreCalleeSaveRegister(reg)
+ : !codegen_->IsFloatingPointCalleeSaveRegister(reg);
+}
+
+int RegisterAllocator::FindAvailableRegister(size_t* next_use, LiveInterval* current) const {
+ // We special case intervals that do not span a safepoint to try to find a caller-save
+ // register if one is available. We iterate from 0 to the number of registers,
+ // so if there are caller-save registers available at the end, we continue the iteration.
+ bool prefers_caller_save = !current->HasWillCallSafepoint();
int reg = kNoRegister;
- // Pick the register that is used the last.
for (size_t i = 0; i < number_of_registers_; ++i) {
- if (IsBlocked(i)) continue;
- if (reg == kNoRegister || next_use[i] > next_use[reg]) {
+ if (IsBlocked(i)) {
+ // Register cannot be used. Continue.
+ continue;
+ }
+
+ // Best case: we found a register fully available.
+ if (next_use[i] == kMaxLifetimePosition) {
+ if (prefers_caller_save && !IsCallerSaveRegister(i)) {
+ // We can get shorter encodings on some platforms by using
+ // small register numbers. So only update the candidate if the previous
+ // one was not available for the whole method.
+ if (reg == kNoRegister || next_use[reg] != kMaxLifetimePosition) {
+ reg = i;
+ }
+ // Continue the iteration in the hope of finding a caller save register.
+ continue;
+ } else {
+ reg = i;
+ // We know the register is good enough. Return it.
+ break;
+ }
+ }
+
+ // If we had no register before, take this one as a reference.
+ if (reg == kNoRegister) {
reg = i;
- if (next_use[i] == kMaxLifetimePosition) break;
+ continue;
+ }
+
+ // Pick the register that is used the last.
+ if (next_use[i] > next_use[reg]) {
+ reg = i;
+ continue;
}
}
return reg;
@@ -971,7 +1009,7 @@
|| (first_use >= next_use[GetHighForLowRegister(reg)]);
} else {
DCHECK(!current->IsHighInterval());
- reg = FindAvailableRegister(next_use);
+ reg = FindAvailableRegister(next_use, current);
should_spill = (first_use >= next_use[reg]);
}
@@ -997,7 +1035,7 @@
// If the first use of that instruction is after the last use of the found
// register, we split this interval just before its first register use.
AllocateSpillSlotFor(current);
- LiveInterval* split = Split(current, first_register_use - 1);
+ LiveInterval* split = SplitBetween(current, current->GetStart(), first_register_use - 1);
if (current == split) {
DumpInterval(std::cerr, current);
DumpAllIntervals(std::cerr);
@@ -1100,6 +1138,66 @@
}
}
+LiveInterval* RegisterAllocator::SplitBetween(LiveInterval* interval, size_t from, size_t to) {
+ HBasicBlock* block_from = liveness_.GetBlockFromPosition(from / 2);
+ HBasicBlock* block_to = liveness_.GetBlockFromPosition(to / 2);
+ DCHECK(block_from != nullptr);
+ DCHECK(block_to != nullptr);
+
+ // Both locations are in the same block. We split at the given location.
+ if (block_from == block_to) {
+ return Split(interval, to);
+ }
+
+ /*
+ * Non-linear control flow will force moves at every branch instruction to the new location.
+ * To avoid having all branches doing the moves, we find the next non-linear position and
+ * split the interval at this position. Take the following example (block number is the linear
+ * order position):
+ *
+ * B1
+ * / \
+ * B2 B3
+ * \ /
+ * B4
+ *
+ * B2 needs to split an interval, whose next use is in B4. If we were to split at the
+ * beginning of B4, B3 would need to do a move between B3 and B4 to ensure the interval
+ * is now in the correct location. It makes performance worst if the interval is spilled
+ * and both B2 and B3 need to reload it before entering B4.
+ *
+ * By splitting at B3, we give a chance to the register allocator to allocate the
+ * interval to the same register as in B1, and therefore avoid doing any
+ * moves in B3.
+ */
+ if (block_from->GetDominator() != nullptr) {
+ const GrowableArray<HBasicBlock*>& dominated = block_from->GetDominator()->GetDominatedBlocks();
+ for (size_t i = 0; i < dominated.Size(); ++i) {
+ size_t position = dominated.Get(i)->GetLifetimeStart();
+ if ((position > from) && (block_to->GetLifetimeStart() > position)) {
+ // Even if we found a better block, we continue iterating in case
+ // a dominated block is closer.
+ // Note that dominated blocks are not sorted in liveness order.
+ block_to = dominated.Get(i);
+ DCHECK_NE(block_to, block_from);
+ }
+ }
+ }
+
+ // If `to` is in a loop, find the outermost loop header which does not contain `from`.
+ for (HLoopInformationOutwardIterator it(*block_to); !it.Done(); it.Advance()) {
+ HBasicBlock* header = it.Current()->GetHeader();
+ if (block_from->GetLifetimeStart() >= header->GetLifetimeStart()) {
+ break;
+ }
+ block_to = header;
+ }
+
+ // Split at the start of the found block, to piggy back on existing moves
+ // due to resolution if non-linear control flow (see `ConnectSplitSiblings`).
+ return Split(interval, block_to->GetLifetimeStart());
+}
+
LiveInterval* RegisterAllocator::Split(LiveInterval* interval, size_t position) {
DCHECK_GE(position, interval->GetStart());
DCHECK(!interval->IsDeadAt(position));
@@ -1430,6 +1528,7 @@
: Location::StackSlot(interval->GetParent()->GetSpillSlot()));
}
UsePosition* use = current->GetFirstUse();
+ UsePosition* env_use = current->GetFirstEnvironmentUse();
// Walk over all siblings, updating locations of use positions, and
// connecting them when they are adjacent.
@@ -1442,15 +1541,14 @@
LiveRange* range = current->GetFirstRange();
while (range != nullptr) {
while (use != nullptr && use->GetPosition() < range->GetStart()) {
- DCHECK(use->GetIsEnvironment());
+ DCHECK(use->IsSynthesized());
use = use->GetNext();
}
while (use != nullptr && use->GetPosition() <= range->GetEnd()) {
+ DCHECK(!use->GetIsEnvironment());
DCHECK(current->CoversSlow(use->GetPosition()) || (use->GetPosition() == range->GetEnd()));
- LocationSummary* locations = use->GetUser()->GetLocations();
- if (use->GetIsEnvironment()) {
- locations->SetEnvironmentAt(use->GetInputIndex(), source);
- } else {
+ if (!use->IsSynthesized()) {
+ LocationSummary* locations = use->GetUser()->GetLocations();
Location expected_location = locations->InAt(use->GetInputIndex());
// The expected (actual) location may be invalid in case the input is unused. Currently
// this only happens for intrinsics.
@@ -1467,6 +1565,20 @@
}
use = use->GetNext();
}
+
+ // Walk over the environment uses, and update their locations.
+ while (env_use != nullptr && env_use->GetPosition() < range->GetStart()) {
+ env_use = env_use->GetNext();
+ }
+
+ while (env_use != nullptr && env_use->GetPosition() <= range->GetEnd()) {
+ DCHECK(current->CoversSlow(env_use->GetPosition())
+ || (env_use->GetPosition() == range->GetEnd()));
+ HEnvironment* environment = env_use->GetUser()->GetEnvironment();
+ environment->SetLocationAt(env_use->GetInputIndex(), source);
+ env_use = env_use->GetNext();
+ }
+
range = range->GetNext();
}
@@ -1529,10 +1641,9 @@
} while (current != nullptr);
if (kIsDebugBuild) {
- // Following uses can only be environment uses. The location for
- // these environments will be none.
+ // Following uses can only be synthesized uses.
while (use != nullptr) {
- DCHECK(use->GetIsEnvironment());
+ DCHECK(use->IsSynthesized());
use = use->GetNext();
}
}
diff --git a/compiler/optimizing/register_allocator.h b/compiler/optimizing/register_allocator.h
index 717be75..6d5bfc3 100644
--- a/compiler/optimizing/register_allocator.h
+++ b/compiler/optimizing/register_allocator.h
@@ -86,8 +86,12 @@
// Add `interval` in the given sorted list.
static void AddSorted(GrowableArray<LiveInterval*>* array, LiveInterval* interval);
- // Split `interval` at the position `at`. The new interval starts at `at`.
- LiveInterval* Split(LiveInterval* interval, size_t at);
+ // Split `interval` at the position `position`. The new interval starts at `position`.
+ LiveInterval* Split(LiveInterval* interval, size_t position);
+
+ // Split `interval` at a position between `from` and `to`. The method will try
+ // to find an optimal split position.
+ LiveInterval* SplitBetween(LiveInterval* interval, size_t from, size_t to);
// Returns whether `reg` is blocked by the code generator.
bool IsBlocked(int reg) const;
@@ -136,7 +140,8 @@
void DumpInterval(std::ostream& stream, LiveInterval* interval) const;
void DumpAllIntervals(std::ostream& stream) const;
int FindAvailableRegisterPair(size_t* next_use, size_t starting_at) const;
- int FindAvailableRegister(size_t* next_use) const;
+ int FindAvailableRegister(size_t* next_use, LiveInterval* current) const;
+ bool IsCallerSaveRegister(int reg) const;
// Try splitting an active non-pair or unaligned pair interval at the given `position`.
// Returns whether it was successful at finding such an interval.
diff --git a/compiler/optimizing/register_allocator_test.cc b/compiler/optimizing/register_allocator_test.cc
index 182cd0e..b72ffb8 100644
--- a/compiler/optimizing/register_allocator_test.cc
+++ b/compiler/optimizing/register_allocator_test.cc
@@ -38,7 +38,7 @@
static bool Check(const uint16_t* data) {
ArenaPool pool;
ArenaAllocator allocator(&pool);
- HGraph* graph = new (&allocator) HGraph(&allocator);
+ HGraph* graph = CreateGraph(&allocator);
HGraphBuilder builder(graph);
const DexFile::CodeItem* item = reinterpret_cast<const DexFile::CodeItem*>(data);
builder.BuildGraph(*item);
@@ -60,7 +60,7 @@
TEST(RegisterAllocatorTest, ValidateIntervals) {
ArenaPool pool;
ArenaAllocator allocator(&pool);
- HGraph* graph = new (&allocator) HGraph(&allocator);
+ HGraph* graph = CreateGraph(&allocator);
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
@@ -255,7 +255,7 @@
}
static HGraph* BuildSSAGraph(const uint16_t* data, ArenaAllocator* allocator) {
- HGraph* graph = new (allocator) HGraph(allocator);
+ HGraph* graph = CreateGraph(allocator);
HGraphBuilder builder(graph);
const DexFile::CodeItem* item = reinterpret_cast<const DexFile::CodeItem*>(data);
builder.BuildGraph(*item);
@@ -463,7 +463,7 @@
HPhi** phi,
HInstruction** input1,
HInstruction** input2) {
- HGraph* graph = new (allocator) HGraph(allocator);
+ HGraph* graph = CreateGraph(allocator);
HBasicBlock* entry = new (allocator) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
@@ -593,7 +593,7 @@
static HGraph* BuildFieldReturn(ArenaAllocator* allocator,
HInstruction** field,
HInstruction** ret) {
- HGraph* graph = new (allocator) HGraph(allocator);
+ HGraph* graph = CreateGraph(allocator);
HBasicBlock* entry = new (allocator) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
@@ -661,7 +661,7 @@
static HGraph* BuildTwoSubs(ArenaAllocator* allocator,
HInstruction** first_sub,
HInstruction** second_sub) {
- HGraph* graph = new (allocator) HGraph(allocator);
+ HGraph* graph = CreateGraph(allocator);
HBasicBlock* entry = new (allocator) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
@@ -731,7 +731,7 @@
static HGraph* BuildDiv(ArenaAllocator* allocator,
HInstruction** div) {
- HGraph* graph = new (allocator) HGraph(allocator);
+ HGraph* graph = CreateGraph(allocator);
HBasicBlock* entry = new (allocator) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
@@ -783,7 +783,7 @@
// Create a synthesized graph to please the register_allocator and
// ssa_liveness_analysis code.
ArenaAllocator allocator(&pool);
- HGraph* graph = new (&allocator) HGraph(&allocator);
+ HGraph* graph = CreateGraph(&allocator);
HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
@@ -854,6 +854,10 @@
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
SsaLivenessAnalysis liveness(graph, &codegen);
+ // Populate the instructions in the liveness object, to please the register allocator.
+ for (size_t i = 0; i < 32; ++i) {
+ liveness.instructions_from_lifetime_position_.Add(user);
+ }
RegisterAllocator register_allocator(&allocator, &codegen, liveness);
register_allocator.unhandled_core_intervals_.Add(fourth);
diff --git a/compiler/optimizing/ssa_builder.cc b/compiler/optimizing/ssa_builder.cc
index 7a252af..c51d248 100644
--- a/compiler/optimizing/ssa_builder.cc
+++ b/compiler/optimizing/ssa_builder.cc
@@ -332,7 +332,7 @@
}
HInstruction* SsaBuilder::ValueOfLocal(HBasicBlock* block, size_t local) {
- return GetLocalsFor(block)->GetInstructionAt(local);
+ return GetLocalsFor(block)->Get(local);
}
void SsaBuilder::VisitBasicBlock(HBasicBlock* block) {
@@ -349,7 +349,7 @@
HPhi* phi = new (GetGraph()->GetArena()) HPhi(
GetGraph()->GetArena(), local, 0, Primitive::kPrimVoid);
block->AddPhi(phi);
- current_locals_->SetRawEnvAt(local, phi);
+ current_locals_->Put(local, phi);
}
}
// Save the loop header so that the last phase of the analysis knows which
@@ -389,7 +389,7 @@
block->AddPhi(phi);
value = phi;
}
- current_locals_->SetRawEnvAt(local, value);
+ current_locals_->Put(local, value);
}
}
@@ -417,6 +417,7 @@
ArenaAllocator* allocator = graph->GetArena();
result = new (allocator) HFloatConstant(bit_cast<float, int32_t>(constant->GetValue()));
constant->GetBlock()->InsertInstructionBefore(result, constant->GetNext());
+ graph->CacheFloatConstant(result);
} else {
// If there is already a constant with the expected type, we know it is
// the floating point equivalent of this constant.
@@ -439,6 +440,7 @@
ArenaAllocator* allocator = graph->GetArena();
result = new (allocator) HDoubleConstant(bit_cast<double, int64_t>(constant->GetValue()));
constant->GetBlock()->InsertInstructionBefore(result, constant->GetNext());
+ graph->CacheDoubleConstant(result);
} else {
// If there is already a constant with the expected type, we know it is
// the floating point equivalent of this constant.
@@ -502,7 +504,7 @@
// typed and the value in a dex register will not be used for both floating point and
// non-floating point operations. So the only reason an instruction would want a floating
// point equivalent is for an unused phi that will be removed by the dead phi elimination phase.
- DCHECK(user->IsPhi());
+ DCHECK(user->IsPhi()) << "is actually " << user->DebugName() << " (" << user->GetId() << ")";
return value;
}
}
@@ -518,7 +520,7 @@
}
void SsaBuilder::VisitLoadLocal(HLoadLocal* load) {
- HInstruction* value = current_locals_->GetInstructionAt(load->GetLocal()->GetRegNumber());
+ HInstruction* value = current_locals_->Get(load->GetLocal()->GetRegNumber());
// If the operation requests a specific type, we make sure its input is of that type.
if (load->GetType() != value->GetType()) {
if (load->GetType() == Primitive::kPrimFloat || load->GetType() == Primitive::kPrimDouble) {
@@ -532,7 +534,7 @@
}
void SsaBuilder::VisitStoreLocal(HStoreLocal* store) {
- current_locals_->SetRawEnvAt(store->GetLocal()->GetRegNumber(), store->InputAt(1));
+ current_locals_->Put(store->GetLocal()->GetRegNumber(), store->InputAt(1));
store->GetBlock()->RemoveInstruction(store);
}
@@ -541,8 +543,13 @@
return;
}
HEnvironment* environment = new (GetGraph()->GetArena()) HEnvironment(
- GetGraph()->GetArena(), current_locals_->Size());
- environment->CopyFrom(current_locals_);
+ GetGraph()->GetArena(),
+ current_locals_->Size(),
+ GetGraph()->GetDexFile(),
+ GetGraph()->GetMethodIdx(),
+ instruction->GetDexPc(),
+ GetGraph()->GetInvokeType());
+ environment->CopyFrom(*current_locals_);
instruction->SetRawEnvironment(environment);
}
diff --git a/compiler/optimizing/ssa_builder.h b/compiler/optimizing/ssa_builder.h
index 265e95b..1c83c4b 100644
--- a/compiler/optimizing/ssa_builder.h
+++ b/compiler/optimizing/ssa_builder.h
@@ -58,14 +58,15 @@
void BuildSsa();
- HEnvironment* GetLocalsFor(HBasicBlock* block) {
- HEnvironment* env = locals_for_.Get(block->GetBlockId());
- if (env == nullptr) {
- env = new (GetGraph()->GetArena()) HEnvironment(
+ GrowableArray<HInstruction*>* GetLocalsFor(HBasicBlock* block) {
+ GrowableArray<HInstruction*>* locals = locals_for_.Get(block->GetBlockId());
+ if (locals == nullptr) {
+ locals = new (GetGraph()->GetArena()) GrowableArray<HInstruction*>(
GetGraph()->GetArena(), GetGraph()->GetNumberOfVRegs());
- locals_for_.Put(block->GetBlockId(), env);
+ locals->SetSize(GetGraph()->GetNumberOfVRegs());
+ locals_for_.Put(block->GetBlockId(), locals);
}
- return env;
+ return locals;
}
HInstruction* ValueOfLocal(HBasicBlock* block, size_t local);
@@ -93,14 +94,14 @@
static HPhi* GetFloatDoubleOrReferenceEquivalentOfPhi(HPhi* phi, Primitive::Type type);
// Locals for the current block being visited.
- HEnvironment* current_locals_;
+ GrowableArray<HInstruction*>* current_locals_;
// Keep track of loop headers found. The last phase of the analysis iterates
// over these blocks to set the inputs of their phis.
GrowableArray<HBasicBlock*> loop_headers_;
// HEnvironment for each block.
- GrowableArray<HEnvironment*> locals_for_;
+ GrowableArray<GrowableArray<HInstruction*>*> locals_for_;
DISALLOW_COPY_AND_ASSIGN(SsaBuilder);
};
diff --git a/compiler/optimizing/ssa_liveness_analysis.cc b/compiler/optimizing/ssa_liveness_analysis.cc
index ea0e7c3..250eb04 100644
--- a/compiler/optimizing/ssa_liveness_analysis.cc
+++ b/compiler/optimizing/ssa_liveness_analysis.cc
@@ -75,9 +75,7 @@
HBasicBlock* block = it.Current();
size_t number_of_forward_predecessors = block->GetPredecessors().Size();
if (block->IsLoopHeader()) {
- // We rely on having simplified the CFG.
- DCHECK_EQ(1u, block->GetLoopInformation()->NumberOfBackEdges());
- number_of_forward_predecessors--;
+ number_of_forward_predecessors -= block->GetLoopInformation()->NumberOfBackEdges();
}
forward_predecessors.Put(block->GetBlockId(), number_of_forward_predecessors);
}
@@ -220,10 +218,11 @@
// Process the environment first, because we know their uses come after
// or at the same liveness position of inputs.
- if (current->HasEnvironment()) {
+ for (HEnvironment* environment = current->GetEnvironment();
+ environment != nullptr;
+ environment = environment->GetParent()) {
// Handle environment uses. See statements (b) and (c) of the
// SsaLivenessAnalysis.
- HEnvironment* environment = current->GetEnvironment();
for (size_t i = 0, e = environment->Size(); i < e; ++i) {
HInstruction* instruction = environment->GetInstructionAt(i);
bool should_be_live = ShouldBeLiveForEnvironment(instruction);
@@ -233,7 +232,7 @@
}
if (instruction != nullptr) {
instruction->GetLiveInterval()->AddUse(
- current, i, /* is_environment */ true, should_be_live);
+ current, environment, i, should_be_live);
}
}
}
@@ -245,7 +244,7 @@
// to be materialized.
if (input->HasSsaIndex()) {
live_in->SetBit(input->GetSsaIndex());
- input->GetLiveInterval()->AddUse(current, i, /* is_environment */ false);
+ input->GetLiveInterval()->AddUse(current, /* environment */ nullptr, i);
}
}
}
@@ -264,13 +263,12 @@
}
if (block->IsLoopHeader()) {
- HBasicBlock* back_edge = block->GetLoopInformation()->GetBackEdges().Get(0);
+ size_t last_position = block->GetLoopInformation()->GetLifetimeEnd();
// For all live_in instructions at the loop header, we need to create a range
// that covers the full loop.
for (uint32_t idx : live_in->Indexes()) {
HInstruction* current = instructions_from_ssa_index_.Get(idx);
- current->GetLiveInterval()->AddLoopRange(block->GetLifetimeStart(),
- back_edge->GetLifetimeEnd());
+ current->GetLiveInterval()->AddLoopRange(block->GetLifetimeStart(), last_position);
}
}
}
@@ -322,7 +320,8 @@
return location.IsPair() ? location.low() : location.reg();
}
-int LiveInterval::FindFirstRegisterHint(size_t* free_until) const {
+int LiveInterval::FindFirstRegisterHint(size_t* free_until,
+ const SsaLivenessAnalysis& liveness) const {
DCHECK(!IsHighInterval());
if (IsTemp()) return kNoRegister;
@@ -336,12 +335,32 @@
}
}
+ if (IsSplit() && liveness.IsAtBlockBoundary(GetStart() / 2)) {
+ // If the start of this interval is at a block boundary, we look at the
+ // location of the interval in blocks preceding the block this interval
+ // starts at. If one location is a register we return it as a hint. This
+ // will avoid a move between the two blocks.
+ HBasicBlock* block = liveness.GetBlockFromPosition(GetStart() / 2);
+ for (size_t i = 0; i < block->GetPredecessors().Size(); ++i) {
+ size_t position = block->GetPredecessors().Get(i)->GetLifetimeEnd() - 1;
+ // We know positions above GetStart() do not have a location yet.
+ if (position < GetStart()) {
+ LiveInterval* existing = GetParent()->GetSiblingAt(position);
+ if (existing != nullptr
+ && existing->HasRegister()
+ && (free_until[existing->GetRegister()] > GetStart())) {
+ return existing->GetRegister();
+ }
+ }
+ }
+ }
+
UsePosition* use = first_use_;
size_t start = GetStart();
size_t end = GetEnd();
while (use != nullptr && use->GetPosition() <= end) {
size_t use_position = use->GetPosition();
- if (use_position >= start && !use->GetIsEnvironment()) {
+ if (use_position >= start && !use->IsSynthesized()) {
HInstruction* user = use->GetUser();
size_t input_index = use->GetInputIndex();
if (user->IsPhi()) {
diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h
index 03f5545..4b19c5b 100644
--- a/compiler/optimizing/ssa_liveness_analysis.h
+++ b/compiler/optimizing/ssa_liveness_analysis.h
@@ -23,6 +23,7 @@
namespace art {
class CodeGenerator;
+class SsaLivenessAnalysis;
static constexpr int kNoRegister = -1;
@@ -75,7 +76,7 @@
}
void Dump(std::ostream& stream) const {
- stream << "[" << start_ << ", " << end_ << ")";
+ stream << "[" << start_ << "," << end_ << ")";
}
LiveRange* Dup(ArenaAllocator* allocator) const {
@@ -103,21 +104,24 @@
class UsePosition : public ArenaObject<kArenaAllocMisc> {
public:
UsePosition(HInstruction* user,
+ HEnvironment* environment,
size_t input_index,
- bool is_environment,
size_t position,
UsePosition* next)
: user_(user),
+ environment_(environment),
input_index_(input_index),
- is_environment_(is_environment),
position_(position),
next_(next) {
- DCHECK(user->IsPhi()
+ DCHECK((user == nullptr)
+ || user->IsPhi()
|| (GetPosition() == user->GetLifetimePosition() + 1)
|| (GetPosition() == user->GetLifetimePosition()));
DCHECK(next_ == nullptr || next->GetPosition() >= GetPosition());
}
+ static constexpr size_t kNoInput = -1;
+
size_t GetPosition() const { return position_; }
UsePosition* GetNext() const { return next_; }
@@ -125,27 +129,38 @@
HInstruction* GetUser() const { return user_; }
- bool GetIsEnvironment() const { return is_environment_; }
+ bool GetIsEnvironment() const { return environment_ != nullptr; }
+ bool IsSynthesized() const { return user_ == nullptr; }
size_t GetInputIndex() const { return input_index_; }
void Dump(std::ostream& stream) const {
stream << position_;
- if (is_environment_) {
- stream << " (env)";
- }
+ }
+
+ HLoopInformation* GetLoopInformation() const {
+ return user_->GetBlock()->GetLoopInformation();
}
UsePosition* Dup(ArenaAllocator* allocator) const {
return new (allocator) UsePosition(
- user_, input_index_, is_environment_, position_,
+ user_, environment_, input_index_, position_,
next_ == nullptr ? nullptr : next_->Dup(allocator));
}
+ bool RequiresRegister() const {
+ if (GetIsEnvironment()) return false;
+ if (IsSynthesized()) return false;
+ Location location = GetUser()->GetLocations()->InAt(GetInputIndex());
+ return location.IsUnallocated()
+ && (location.GetPolicy() == Location::kRequiresRegister
+ || location.GetPolicy() == Location::kRequiresFpuRegister);
+ }
+
private:
HInstruction* const user_;
+ HEnvironment* const environment_;
const size_t input_index_;
- const bool is_environment_;
const size_t position_;
UsePosition* next_;
@@ -219,17 +234,19 @@
void AddTempUse(HInstruction* instruction, size_t temp_index) {
DCHECK(IsTemp());
DCHECK(first_use_ == nullptr) << "A temporary can only have one user";
+ DCHECK(first_env_use_ == nullptr) << "A temporary cannot have environment user";
size_t position = instruction->GetLifetimePosition();
first_use_ = new (allocator_) UsePosition(
- instruction, temp_index, /* is_environment */ false, position, first_use_);
+ instruction, /* environment */ nullptr, temp_index, position, first_use_);
AddRange(position, position + 1);
}
void AddUse(HInstruction* instruction,
+ HEnvironment* environment,
size_t input_index,
- bool is_environment,
bool keep_alive = false) {
// Set the use within the instruction.
+ bool is_environment = (environment != nullptr);
size_t position = instruction->GetLifetimePosition() + 1;
LocationSummary* locations = instruction->GetLocations();
if (!is_environment) {
@@ -239,9 +256,15 @@
// location of the input just before that instruction (and not potential moves due
// to splitting).
position = instruction->GetLifetimePosition();
+ } else if (!locations->InAt(input_index).IsValid()) {
+ return;
}
}
+ if (!is_environment && instruction->IsInLoop()) {
+ AddBackEdgeUses(*instruction->GetBlock());
+ }
+
DCHECK(position == instruction->GetLifetimePosition()
|| position == instruction->GetLifetimePosition() + 1);
@@ -257,7 +280,7 @@
}
DCHECK(first_use_->GetPosition() + 1 == position);
UsePosition* new_use = new (allocator_) UsePosition(
- instruction, input_index, is_environment, position, cursor->GetNext());
+ instruction, environment, input_index, position, cursor->GetNext());
cursor->SetNext(new_use);
if (first_range_->GetEnd() == first_use_->GetPosition()) {
first_range_->end_ = position;
@@ -265,8 +288,13 @@
return;
}
- first_use_ = new (allocator_) UsePosition(
- instruction, input_index, is_environment, position, first_use_);
+ if (is_environment) {
+ first_env_use_ = new (allocator_) UsePosition(
+ instruction, environment, input_index, position, first_env_use_);
+ } else {
+ first_use_ = new (allocator_) UsePosition(
+ instruction, environment, input_index, position, first_use_);
+ }
if (is_environment && !keep_alive) {
// If this environment use does not keep the instruction live, it does not
@@ -300,8 +328,11 @@
void AddPhiUse(HInstruction* instruction, size_t input_index, HBasicBlock* block) {
DCHECK(instruction->IsPhi());
+ if (block->IsInLoop()) {
+ AddBackEdgeUses(*block);
+ }
first_use_ = new (allocator_) UsePosition(
- instruction, input_index, false, block->GetLifetimeEnd(), first_use_);
+ instruction, /* environment */ nullptr, input_index, block->GetLifetimeEnd(), first_use_);
}
void AddRange(size_t start, size_t end) {
@@ -333,7 +364,8 @@
}
if (after_loop == nullptr) {
// Uses are only in the loop.
- first_range_ = last_range_ = range_search_start_ = new (allocator_) LiveRange(start, end, nullptr);
+ first_range_ = last_range_ = range_search_start_ =
+ new (allocator_) LiveRange(start, end, nullptr);
} else if (after_loop->GetStart() <= end) {
first_range_ = range_search_start_ = after_loop;
// There are uses after the loop.
@@ -449,38 +481,17 @@
if (is_temp_) {
return position == GetStart() ? position : kNoLifetime;
}
- if (position == GetStart() && IsParent()) {
- LocationSummary* locations = defined_by_->GetLocations();
- Location location = locations->Out();
- // This interval is the first interval of the instruction. If the output
- // of the instruction requires a register, we return the position of that instruction
- // as the first register use.
- if (location.IsUnallocated()) {
- if ((location.GetPolicy() == Location::kRequiresRegister)
- || (location.GetPolicy() == Location::kSameAsFirstInput
- && (locations->InAt(0).IsRegister()
- || locations->InAt(0).IsRegisterPair()
- || locations->InAt(0).GetPolicy() == Location::kRequiresRegister))) {
- return position;
- } else if ((location.GetPolicy() == Location::kRequiresFpuRegister)
- || (location.GetPolicy() == Location::kSameAsFirstInput
- && locations->InAt(0).GetPolicy() == Location::kRequiresFpuRegister)) {
- return position;
- }
- } else if (location.IsRegister() || location.IsRegisterPair()) {
- return position;
- }
+
+ if (IsDefiningPosition(position) && DefinitionRequiresRegister()) {
+ return position;
}
UsePosition* use = first_use_;
size_t end = GetEnd();
while (use != nullptr && use->GetPosition() <= end) {
size_t use_position = use->GetPosition();
- if (use_position > position && !use->GetIsEnvironment()) {
- Location location = use->GetUser()->GetLocations()->InAt(use->GetInputIndex());
- if (location.IsUnallocated()
- && (location.GetPolicy() == Location::kRequiresRegister
- || location.GetPolicy() == Location::kRequiresFpuRegister)) {
+ if (use_position > position) {
+ if (use->RequiresRegister()) {
return use_position;
}
}
@@ -498,21 +509,17 @@
return position == GetStart() ? position : kNoLifetime;
}
- if (position == GetStart() && IsParent()) {
- if (defined_by_->GetLocations()->Out().IsValid()) {
- return position;
- }
+ if (IsDefiningPosition(position)) {
+ DCHECK(defined_by_->GetLocations()->Out().IsValid());
+ return position;
}
UsePosition* use = first_use_;
size_t end = GetEnd();
while (use != nullptr && use->GetPosition() <= end) {
- if (!use->GetIsEnvironment()) {
- Location location = use->GetUser()->GetLocations()->InAt(use->GetInputIndex());
- size_t use_position = use->GetPosition();
- if (use_position > position && location.IsValid()) {
- return use_position;
- }
+ size_t use_position = use->GetPosition();
+ if (use_position > position) {
+ return use_position;
}
use = use->GetNext();
}
@@ -523,6 +530,10 @@
return first_use_;
}
+ UsePosition* GetFirstEnvironmentUse() const {
+ return first_env_use_;
+ }
+
Primitive::Type GetType() const {
return type_;
}
@@ -531,6 +542,15 @@
return defined_by_;
}
+ bool HasWillCallSafepoint() const {
+ for (SafepointPosition* safepoint = first_safepoint_;
+ safepoint != nullptr;
+ safepoint = safepoint->GetNext()) {
+ if (safepoint->GetLocations()->WillCall()) return true;
+ }
+ return false;
+ }
+
SafepointPosition* FindSafepointJustBefore(size_t position) const {
for (SafepointPosition* safepoint = first_safepoint_, *previous = nullptr;
safepoint != nullptr;
@@ -576,6 +596,7 @@
new_interval->parent_ = parent_;
new_interval->first_use_ = first_use_;
+ new_interval->first_env_use_ = first_env_use_;
LiveRange* current = first_range_;
LiveRange* previous = nullptr;
// Iterate over the ranges, and either find a range that covers this position, or
@@ -596,7 +617,7 @@
previous->next_ = nullptr;
new_interval->first_range_ = current;
if (range_search_start_ != nullptr && range_search_start_->GetEnd() >= current->GetEnd()) {
- // Search start point is inside `new_interval`. Change it to nullptr
+ // Search start point is inside `new_interval`. Change it to null
// (i.e. the end of the interval) in the original interval.
range_search_start_ = nullptr;
}
@@ -654,10 +675,18 @@
stream << " ";
} while ((use = use->GetNext()) != nullptr);
}
+ stream << "}, { ";
+ use = first_env_use_;
+ if (use != nullptr) {
+ do {
+ use->Dump(stream);
+ stream << " ";
+ } while ((use = use->GetNext()) != nullptr);
+ }
stream << "}";
stream << " is_fixed: " << is_fixed_ << ", is_split: " << IsSplit();
- stream << " is_high: " << IsHighInterval();
stream << " is_low: " << IsLowInterval();
+ stream << " is_high: " << IsHighInterval();
}
LiveInterval* GetNextSibling() const { return next_sibling_; }
@@ -672,7 +701,7 @@
// Returns the first register hint that is at least free before
// the value contained in `free_until`. If none is found, returns
// `kNoRegister`.
- int FindFirstRegisterHint(size_t* free_until) const;
+ int FindFirstRegisterHint(size_t* free_until, const SsaLivenessAnalysis& liveness) const;
// If there is enough at the definition site to find a register (for example
// it uses the same input as the first input), returns the register as a hint.
@@ -753,6 +782,10 @@
if (first_use_ != nullptr) {
high_or_low_interval_->first_use_ = first_use_->Dup(allocator_);
}
+
+ if (first_env_use_ != nullptr) {
+ high_or_low_interval_->first_env_use_ = first_env_use_->Dup(allocator_);
+ }
}
// Returns whether an interval, when it is non-split, is using
@@ -850,6 +883,7 @@
first_safepoint_(nullptr),
last_safepoint_(nullptr),
first_use_(nullptr),
+ first_env_use_(nullptr),
type_(type),
next_sibling_(nullptr),
parent_(this),
@@ -863,7 +897,7 @@
defined_by_(defined_by) {}
// Searches for a LiveRange that either covers the given position or is the
- // first next LiveRange. Returns nullptr if no such LiveRange exists. Ranges
+ // first next LiveRange. Returns null if no such LiveRange exists. Ranges
// known to end before `position` can be skipped with `search_start`.
LiveRange* FindRangeAtOrAfter(size_t position, LiveRange* search_start) const {
if (kIsDebugBuild) {
@@ -887,6 +921,107 @@
return range;
}
+ bool DefinitionRequiresRegister() const {
+ DCHECK(IsParent());
+ LocationSummary* locations = defined_by_->GetLocations();
+ Location location = locations->Out();
+ // This interval is the first interval of the instruction. If the output
+ // of the instruction requires a register, we return the position of that instruction
+ // as the first register use.
+ if (location.IsUnallocated()) {
+ if ((location.GetPolicy() == Location::kRequiresRegister)
+ || (location.GetPolicy() == Location::kSameAsFirstInput
+ && (locations->InAt(0).IsRegister()
+ || locations->InAt(0).IsRegisterPair()
+ || locations->InAt(0).GetPolicy() == Location::kRequiresRegister))) {
+ return true;
+ } else if ((location.GetPolicy() == Location::kRequiresFpuRegister)
+ || (location.GetPolicy() == Location::kSameAsFirstInput
+ && (locations->InAt(0).IsFpuRegister()
+ || locations->InAt(0).IsFpuRegisterPair()
+ || locations->InAt(0).GetPolicy() == Location::kRequiresFpuRegister))) {
+ return true;
+ }
+ } else if (location.IsRegister() || location.IsRegisterPair()) {
+ return true;
+ }
+ return false;
+ }
+
+ bool IsDefiningPosition(size_t position) const {
+ return IsParent() && (position == GetStart());
+ }
+
+ bool HasSynthesizeUseAt(size_t position) const {
+ UsePosition* use = first_use_;
+ while (use != nullptr) {
+ size_t use_position = use->GetPosition();
+ if ((use_position == position) && use->IsSynthesized()) {
+ return true;
+ }
+ if (use_position > position) break;
+ use = use->GetNext();
+ }
+ return false;
+ }
+
+ void AddBackEdgeUses(const HBasicBlock& block_at_use) {
+ DCHECK(block_at_use.IsInLoop());
+ // Add synthesized uses at the back edge of loops to help the register allocator.
+ // Note that this method is called in decreasing liveness order, to faciliate adding
+ // uses at the head of the `first_use_` linked list. Because below
+ // we iterate from inner-most to outer-most, which is in increasing liveness order,
+ // we need to take extra care of how the `first_use_` linked list is being updated.
+ UsePosition* first_in_new_list = nullptr;
+ UsePosition* last_in_new_list = nullptr;
+ for (HLoopInformationOutwardIterator it(block_at_use);
+ !it.Done();
+ it.Advance()) {
+ HLoopInformation* current = it.Current();
+ if (GetDefinedBy()->GetLifetimePosition() >= current->GetHeader()->GetLifetimeStart()) {
+ // This interval is defined in the loop. We can stop going outward.
+ break;
+ }
+
+ // We're only adding a synthesized use at the last back edge. Adding syntehsized uses on
+ // all back edges is not necessary: anything used in the loop will have its use at the
+ // last back edge. If we want branches in a loop to have better register allocation than
+ // another branch, then it is the linear order we should change.
+ size_t back_edge_use_position = current->GetLifetimeEnd();
+ if ((first_use_ != nullptr) && (first_use_->GetPosition() <= back_edge_use_position)) {
+ // There was a use already seen in this loop. Therefore the previous call to `AddUse`
+ // already inserted the backedge use. We can stop going outward.
+ DCHECK(HasSynthesizeUseAt(back_edge_use_position));
+ break;
+ }
+
+ DCHECK(last_in_new_list == nullptr
+ || back_edge_use_position > last_in_new_list->GetPosition());
+
+ UsePosition* new_use = new (allocator_) UsePosition(
+ /* user */ nullptr,
+ /* environment */ nullptr,
+ UsePosition::kNoInput,
+ back_edge_use_position,
+ /* next */ nullptr);
+
+ if (last_in_new_list != nullptr) {
+ // Going outward. The latest created use needs to point to the new use.
+ last_in_new_list->SetNext(new_use);
+ } else {
+ // This is the inner-most loop.
+ DCHECK_EQ(current, block_at_use.GetLoopInformation());
+ first_in_new_list = new_use;
+ }
+ last_in_new_list = new_use;
+ }
+ // Link the newly created linked list with `first_use_`.
+ if (last_in_new_list != nullptr) {
+ last_in_new_list->SetNext(first_use_);
+ first_use_ = first_in_new_list;
+ }
+ }
+
ArenaAllocator* const allocator_;
// Ranges of this interval. We need a quick access to the last range to test
@@ -904,6 +1039,7 @@
// Uses of this interval. Note that this linked list is shared amongst siblings.
UsePosition* first_use_;
+ UsePosition* first_env_use_;
// The instruction type this interval corresponds to.
const Primitive::Type type_;
@@ -997,6 +1133,19 @@
return instructions_from_lifetime_position_.Get(index);
}
+ HBasicBlock* GetBlockFromPosition(size_t index) const {
+ HInstruction* instruction = GetInstructionFromPosition(index);
+ if (instruction == nullptr) {
+ // If we are at a block boundary, get the block following.
+ instruction = GetInstructionFromPosition(index + 1);
+ }
+ return instruction->GetBlock();
+ }
+
+ bool IsAtBlockBoundary(size_t index) const {
+ return GetInstructionFromPosition(index) == nullptr;
+ }
+
HInstruction* GetTempUser(LiveInterval* temp) const {
// A temporary shares the same lifetime start as the instruction that requires it.
DCHECK(temp->IsTemp());
@@ -1067,6 +1216,8 @@
GrowableArray<HInstruction*> instructions_from_lifetime_position_;
size_t number_of_ssa_values_;
+ ART_FRIEND_TEST(RegisterAllocatorTest, SpillInactive);
+
DISALLOW_COPY_AND_ASSIGN(SsaLivenessAnalysis);
};
diff --git a/compiler/optimizing/ssa_test.cc b/compiler/optimizing/ssa_test.cc
index 00c241b..fb3e7d7 100644
--- a/compiler/optimizing/ssa_test.cc
+++ b/compiler/optimizing/ssa_test.cc
@@ -78,7 +78,7 @@
static void TestCode(const uint16_t* data, const char* expected) {
ArenaPool pool;
ArenaAllocator allocator(&pool);
- HGraph* graph = new (&allocator) HGraph(&allocator);
+ HGraph* graph = CreateGraph(&allocator);
HGraphBuilder builder(graph);
const DexFile::CodeItem* item = reinterpret_cast<const DexFile::CodeItem*>(data);
bool graph_built = builder.BuildGraph(*item);
@@ -373,30 +373,26 @@
const char* expected =
"BasicBlock 0, succ: 1\n"
" 0: IntConstant 0 [5]\n"
- " 1: IntConstant 4 [14, 8, 8]\n"
- " 2: IntConstant 5 [14]\n"
+ " 1: IntConstant 4 [5, 8, 8]\n"
+ " 2: IntConstant 5 [5]\n"
" 3: Goto\n"
"BasicBlock 1, pred: 0, succ: 2\n"
" 4: Goto\n"
- "BasicBlock 2, pred: 1, 8, succ: 6, 3\n"
- " 5: Phi(0, 14) [12, 6, 6]\n"
+ "BasicBlock 2, pred: 1, 4, 5, succ: 6, 3\n"
+ " 5: Phi(0, 2, 1) [12, 6, 6]\n"
" 6: Equal(5, 5) [7]\n"
" 7: If(6)\n"
"BasicBlock 3, pred: 2, succ: 5, 4\n"
" 8: Equal(1, 1) [9]\n"
" 9: If(8)\n"
- "BasicBlock 4, pred: 3, succ: 8\n"
+ "BasicBlock 4, pred: 3, succ: 2\n"
" 10: Goto\n"
- "BasicBlock 5, pred: 3, succ: 8\n"
+ "BasicBlock 5, pred: 3, succ: 2\n"
" 11: Goto\n"
"BasicBlock 6, pred: 2, succ: 7\n"
" 12: Return(5)\n"
"BasicBlock 7, pred: 6\n"
- " 13: Exit\n"
- // Synthesized single back edge of loop.
- "BasicBlock 8, pred: 5, 4, succ: 2\n"
- " 14: Phi(1, 2) [5]\n"
- " 15: Goto\n";
+ " 13: Exit\n";
const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
diff --git a/compiler/optimizing/stack_map_stream.cc b/compiler/optimizing/stack_map_stream.cc
new file mode 100644
index 0000000..b446815
--- /dev/null
+++ b/compiler/optimizing/stack_map_stream.cc
@@ -0,0 +1,422 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "stack_map_stream.h"
+
+namespace art {
+
+void StackMapStream::BeginStackMapEntry(uint32_t dex_pc,
+ uint32_t native_pc_offset,
+ uint32_t register_mask,
+ BitVector* sp_mask,
+ uint32_t num_dex_registers,
+ uint8_t inlining_depth) {
+ DCHECK_EQ(0u, current_entry_.dex_pc) << "EndStackMapEntry not called after BeginStackMapEntry";
+ current_entry_.dex_pc = dex_pc;
+ current_entry_.native_pc_offset = native_pc_offset;
+ current_entry_.register_mask = register_mask;
+ current_entry_.sp_mask = sp_mask;
+ current_entry_.num_dex_registers = num_dex_registers;
+ current_entry_.inlining_depth = inlining_depth;
+ current_entry_.dex_register_locations_start_index = dex_register_locations_.Size();
+ current_entry_.inline_infos_start_index = inline_infos_.Size();
+ current_entry_.dex_register_map_hash = 0;
+ current_entry_.same_dex_register_map_as_ = kNoSameDexMapFound;
+ if (num_dex_registers != 0) {
+ current_entry_.live_dex_registers_mask =
+ new (allocator_) ArenaBitVector(allocator_, num_dex_registers, true);
+ } else {
+ current_entry_.live_dex_registers_mask = nullptr;
+ }
+
+ if (sp_mask != nullptr) {
+ stack_mask_max_ = std::max(stack_mask_max_, sp_mask->GetHighestBitSet());
+ }
+ if (inlining_depth > 0) {
+ number_of_stack_maps_with_inline_info_++;
+ }
+
+ dex_pc_max_ = std::max(dex_pc_max_, dex_pc);
+ native_pc_offset_max_ = std::max(native_pc_offset_max_, native_pc_offset);
+ register_mask_max_ = std::max(register_mask_max_, register_mask);
+ current_dex_register_ = 0;
+}
+
+void StackMapStream::EndStackMapEntry() {
+ current_entry_.same_dex_register_map_as_ = FindEntryWithTheSameDexMap();
+ stack_maps_.Add(current_entry_);
+ current_entry_ = StackMapEntry();
+}
+
+void StackMapStream::AddDexRegisterEntry(DexRegisterLocation::Kind kind, int32_t value) {
+ if (kind != DexRegisterLocation::Kind::kNone) {
+ // Ensure we only use non-compressed location kind at this stage.
+ DCHECK(DexRegisterLocation::IsShortLocationKind(kind))
+ << DexRegisterLocation::PrettyDescriptor(kind);
+ DexRegisterLocation location(kind, value);
+
+ // Look for Dex register `location` in the location catalog (using the
+ // companion hash map of locations to indices). Use its index if it
+ // is already in the location catalog. If not, insert it (in the
+ // location catalog and the hash map) and use the newly created index.
+ auto it = location_catalog_entries_indices_.Find(location);
+ if (it != location_catalog_entries_indices_.end()) {
+ // Retrieve the index from the hash map.
+ dex_register_locations_.Add(it->second);
+ } else {
+ // Create a new entry in the location catalog and the hash map.
+ size_t index = location_catalog_entries_.Size();
+ location_catalog_entries_.Add(location);
+ dex_register_locations_.Add(index);
+ location_catalog_entries_indices_.Insert(std::make_pair(location, index));
+ }
+
+ if (in_inline_frame_) {
+ // TODO: Support sharing DexRegisterMap across InlineInfo.
+ DCHECK_LT(current_dex_register_, current_inline_info_.num_dex_registers);
+ current_inline_info_.live_dex_registers_mask->SetBit(current_dex_register_);
+ } else {
+ DCHECK_LT(current_dex_register_, current_entry_.num_dex_registers);
+ current_entry_.live_dex_registers_mask->SetBit(current_dex_register_);
+ current_entry_.dex_register_map_hash += (1 <<
+ (current_dex_register_ % (sizeof(current_entry_.dex_register_map_hash) * kBitsPerByte)));
+ current_entry_.dex_register_map_hash += static_cast<uint32_t>(value);
+ current_entry_.dex_register_map_hash += static_cast<uint32_t>(kind);
+ }
+ }
+ current_dex_register_++;
+}
+
+void StackMapStream::BeginInlineInfoEntry(uint32_t method_index,
+ uint32_t dex_pc,
+ InvokeType invoke_type,
+ uint32_t num_dex_registers) {
+ DCHECK(!in_inline_frame_);
+ in_inline_frame_ = true;
+ current_inline_info_.method_index = method_index;
+ current_inline_info_.dex_pc = dex_pc;
+ current_inline_info_.invoke_type = invoke_type;
+ current_inline_info_.num_dex_registers = num_dex_registers;
+ current_inline_info_.dex_register_locations_start_index = dex_register_locations_.Size();
+ if (num_dex_registers != 0) {
+ current_inline_info_.live_dex_registers_mask =
+ new (allocator_) ArenaBitVector(allocator_, num_dex_registers, true);
+ } else {
+ current_inline_info_.live_dex_registers_mask = nullptr;
+ }
+ current_dex_register_ = 0;
+}
+
+void StackMapStream::EndInlineInfoEntry() {
+ DCHECK(in_inline_frame_);
+ DCHECK_EQ(current_dex_register_, current_inline_info_.num_dex_registers)
+ << "Inline information contains less registers than expected";
+ in_inline_frame_ = false;
+ inline_infos_.Add(current_inline_info_);
+ current_inline_info_ = InlineInfoEntry();
+}
+
+size_t StackMapStream::PrepareForFillIn() {
+ int stack_mask_number_of_bits = stack_mask_max_ + 1; // Need room for max element too.
+ stack_mask_size_ = RoundUp(stack_mask_number_of_bits, kBitsPerByte) / kBitsPerByte;
+ inline_info_size_ = ComputeInlineInfoSize();
+ dex_register_maps_size_ = ComputeDexRegisterMapsSize();
+ stack_maps_size_ = stack_maps_.Size()
+ * StackMap::ComputeStackMapSize(stack_mask_size_,
+ inline_info_size_,
+ dex_register_maps_size_,
+ dex_pc_max_,
+ native_pc_offset_max_,
+ register_mask_max_);
+ dex_register_location_catalog_size_ = ComputeDexRegisterLocationCatalogSize();
+
+ // Note: use RoundUp to word-size here if you want CodeInfo objects to be word aligned.
+ needed_size_ = CodeInfo::kFixedSize
+ + dex_register_location_catalog_size_
+ + stack_maps_size_
+ + dex_register_maps_size_
+ + inline_info_size_;
+
+ dex_register_location_catalog_start_ = CodeInfo::kFixedSize;
+ stack_maps_start_ = dex_register_location_catalog_start_ + dex_register_location_catalog_size_;
+ dex_register_maps_start_ = stack_maps_start_ + stack_maps_size_;
+ inline_infos_start_ = dex_register_maps_start_ + dex_register_maps_size_;
+
+ return needed_size_;
+}
+
+size_t StackMapStream::ComputeDexRegisterLocationCatalogSize() const {
+ size_t size = DexRegisterLocationCatalog::kFixedSize;
+ for (size_t location_catalog_entry_index = 0;
+ location_catalog_entry_index < location_catalog_entries_.Size();
+ ++location_catalog_entry_index) {
+ DexRegisterLocation dex_register_location =
+ location_catalog_entries_.Get(location_catalog_entry_index);
+ size += DexRegisterLocationCatalog::EntrySize(dex_register_location);
+ }
+ return size;
+}
+
+size_t StackMapStream::ComputeDexRegisterMapSize(uint32_t num_dex_registers,
+ const BitVector& live_dex_registers_mask) const {
+ // Size of the map in bytes.
+ size_t size = DexRegisterMap::kFixedSize;
+ // Add the live bit mask for the Dex register liveness.
+ size += DexRegisterMap::GetLiveBitMaskSize(num_dex_registers);
+ // Compute the size of the set of live Dex register entries.
+ size_t number_of_live_dex_registers = 0;
+ for (size_t dex_register_number = 0;
+ dex_register_number < num_dex_registers;
+ ++dex_register_number) {
+ if (live_dex_registers_mask.IsBitSet(dex_register_number)) {
+ ++number_of_live_dex_registers;
+ }
+ }
+ size_t map_entries_size_in_bits =
+ DexRegisterMap::SingleEntrySizeInBits(location_catalog_entries_.Size())
+ * number_of_live_dex_registers;
+ size_t map_entries_size_in_bytes =
+ RoundUp(map_entries_size_in_bits, kBitsPerByte) / kBitsPerByte;
+ size += map_entries_size_in_bytes;
+ return size;
+}
+
+size_t StackMapStream::ComputeDexRegisterMapsSize() const {
+ size_t size = 0;
+ size_t inline_info_index = 0;
+ for (size_t i = 0; i < stack_maps_.Size(); ++i) {
+ StackMapEntry entry = stack_maps_.Get(i);
+ if (entry.same_dex_register_map_as_ == kNoSameDexMapFound) {
+ size += ComputeDexRegisterMapSize(entry.num_dex_registers, *entry.live_dex_registers_mask);
+ } else {
+ // Entries with the same dex map will have the same offset.
+ }
+ for (size_t j = 0; j < entry.inlining_depth; ++j) {
+ InlineInfoEntry inline_entry = inline_infos_.Get(inline_info_index++);
+ size += ComputeDexRegisterMapSize(inline_entry.num_dex_registers,
+ *inline_entry.live_dex_registers_mask);
+ }
+ }
+ return size;
+}
+
+size_t StackMapStream::ComputeInlineInfoSize() const {
+ return inline_infos_.Size() * InlineInfo::SingleEntrySize()
+ // For encoding the depth.
+ + (number_of_stack_maps_with_inline_info_ * InlineInfo::kFixedSize);
+}
+
+void StackMapStream::FillIn(MemoryRegion region) {
+ DCHECK_EQ(0u, current_entry_.dex_pc) << "EndStackMapEntry not called after BeginStackMapEntry";
+ DCHECK_NE(0u, needed_size_) << "PrepareForFillIn not called before FillIn";
+
+ CodeInfo code_info(region);
+ DCHECK_EQ(region.size(), needed_size_);
+ code_info.SetOverallSize(region.size());
+
+ MemoryRegion dex_register_locations_region = region.Subregion(
+ dex_register_maps_start_, dex_register_maps_size_);
+
+ MemoryRegion inline_infos_region = region.Subregion(
+ inline_infos_start_, inline_info_size_);
+
+ code_info.SetEncoding(inline_info_size_,
+ dex_register_maps_size_,
+ dex_pc_max_,
+ native_pc_offset_max_,
+ register_mask_max_);
+ code_info.SetNumberOfStackMaps(stack_maps_.Size());
+ code_info.SetStackMaskSize(stack_mask_size_);
+ DCHECK_EQ(code_info.GetStackMapsSize(), stack_maps_size_);
+
+ // Set the Dex register location catalog.
+ code_info.SetNumberOfDexRegisterLocationCatalogEntries(location_catalog_entries_.Size());
+ MemoryRegion dex_register_location_catalog_region = region.Subregion(
+ dex_register_location_catalog_start_, dex_register_location_catalog_size_);
+ DexRegisterLocationCatalog dex_register_location_catalog(dex_register_location_catalog_region);
+ // Offset in `dex_register_location_catalog` where to store the next
+ // register location.
+ size_t location_catalog_offset = DexRegisterLocationCatalog::kFixedSize;
+ for (size_t i = 0, e = location_catalog_entries_.Size(); i < e; ++i) {
+ DexRegisterLocation dex_register_location = location_catalog_entries_.Get(i);
+ dex_register_location_catalog.SetRegisterInfo(location_catalog_offset, dex_register_location);
+ location_catalog_offset += DexRegisterLocationCatalog::EntrySize(dex_register_location);
+ }
+ // Ensure we reached the end of the Dex registers location_catalog.
+ DCHECK_EQ(location_catalog_offset, dex_register_location_catalog_region.size());
+
+ uintptr_t next_dex_register_map_offset = 0;
+ uintptr_t next_inline_info_offset = 0;
+ for (size_t i = 0, e = stack_maps_.Size(); i < e; ++i) {
+ StackMap stack_map = code_info.GetStackMapAt(i);
+ StackMapEntry entry = stack_maps_.Get(i);
+
+ stack_map.SetDexPc(code_info, entry.dex_pc);
+ stack_map.SetNativePcOffset(code_info, entry.native_pc_offset);
+ stack_map.SetRegisterMask(code_info, entry.register_mask);
+ if (entry.sp_mask != nullptr) {
+ stack_map.SetStackMask(code_info, *entry.sp_mask);
+ }
+
+ if (entry.num_dex_registers == 0) {
+ // No dex map available.
+ stack_map.SetDexRegisterMapOffset(code_info, StackMap::kNoDexRegisterMap);
+ } else {
+ // Search for an entry with the same dex map.
+ if (entry.same_dex_register_map_as_ != kNoSameDexMapFound) {
+ // If we have a hit reuse the offset.
+ stack_map.SetDexRegisterMapOffset(code_info,
+ code_info.GetStackMapAt(entry.same_dex_register_map_as_)
+ .GetDexRegisterMapOffset(code_info));
+ } else {
+ // New dex registers maps should be added to the stack map.
+ MemoryRegion register_region = dex_register_locations_region.Subregion(
+ next_dex_register_map_offset,
+ ComputeDexRegisterMapSize(entry.num_dex_registers, *entry.live_dex_registers_mask));
+ next_dex_register_map_offset += register_region.size();
+ DexRegisterMap dex_register_map(register_region);
+ stack_map.SetDexRegisterMapOffset(
+ code_info, register_region.start() - dex_register_locations_region.start());
+
+ // Set the dex register location.
+ FillInDexRegisterMap(dex_register_map,
+ entry.num_dex_registers,
+ *entry.live_dex_registers_mask,
+ entry.dex_register_locations_start_index);
+ }
+ }
+
+ // Set the inlining info.
+ if (entry.inlining_depth != 0) {
+ MemoryRegion inline_region = inline_infos_region.Subregion(
+ next_inline_info_offset,
+ InlineInfo::kFixedSize + entry.inlining_depth * InlineInfo::SingleEntrySize());
+ next_inline_info_offset += inline_region.size();
+ InlineInfo inline_info(inline_region);
+
+ // Currently relative to the dex register map.
+ stack_map.SetInlineDescriptorOffset(
+ code_info, inline_region.start() - dex_register_locations_region.start());
+
+ inline_info.SetDepth(entry.inlining_depth);
+ for (size_t depth = 0; depth < entry.inlining_depth; ++depth) {
+ InlineInfoEntry inline_entry = inline_infos_.Get(depth + entry.inline_infos_start_index);
+ inline_info.SetMethodIndexAtDepth(depth, inline_entry.method_index);
+ inline_info.SetDexPcAtDepth(depth, inline_entry.dex_pc);
+ inline_info.SetInvokeTypeAtDepth(depth, inline_entry.invoke_type);
+ if (inline_entry.num_dex_registers == 0) {
+ // No dex map available.
+ inline_info.SetDexRegisterMapOffsetAtDepth(depth, StackMap::kNoDexRegisterMap);
+ DCHECK(inline_entry.live_dex_registers_mask == nullptr);
+ } else {
+ MemoryRegion register_region = dex_register_locations_region.Subregion(
+ next_dex_register_map_offset,
+ ComputeDexRegisterMapSize(inline_entry.num_dex_registers,
+ *inline_entry.live_dex_registers_mask));
+ next_dex_register_map_offset += register_region.size();
+ DexRegisterMap dex_register_map(register_region);
+ inline_info.SetDexRegisterMapOffsetAtDepth(
+ depth, register_region.start() - dex_register_locations_region.start());
+
+ FillInDexRegisterMap(dex_register_map,
+ inline_entry.num_dex_registers,
+ *inline_entry.live_dex_registers_mask,
+ inline_entry.dex_register_locations_start_index);
+ }
+ }
+ } else {
+ if (inline_info_size_ != 0) {
+ stack_map.SetInlineDescriptorOffset(code_info, StackMap::kNoInlineInfo);
+ }
+ }
+ }
+}
+
+void StackMapStream::FillInDexRegisterMap(DexRegisterMap dex_register_map,
+ uint32_t num_dex_registers,
+ const BitVector& live_dex_registers_mask,
+ uint32_t start_index_in_dex_register_locations) const {
+ dex_register_map.SetLiveBitMask(num_dex_registers, live_dex_registers_mask);
+ // Set the dex register location mapping data.
+ for (size_t dex_register_number = 0, index_in_dex_register_locations = 0;
+ dex_register_number < num_dex_registers;
+ ++dex_register_number) {
+ if (live_dex_registers_mask.IsBitSet(dex_register_number)) {
+ size_t location_catalog_entry_index = dex_register_locations_.Get(
+ start_index_in_dex_register_locations + index_in_dex_register_locations);
+ dex_register_map.SetLocationCatalogEntryIndex(
+ index_in_dex_register_locations,
+ location_catalog_entry_index,
+ num_dex_registers,
+ location_catalog_entries_.Size());
+ ++index_in_dex_register_locations;
+ }
+ }
+}
+
+size_t StackMapStream::FindEntryWithTheSameDexMap() {
+ size_t current_entry_index = stack_maps_.Size();
+ auto entries_it = dex_map_hash_to_stack_map_indices_.find(current_entry_.dex_register_map_hash);
+ if (entries_it == dex_map_hash_to_stack_map_indices_.end()) {
+ // We don't have a perfect hash functions so we need a list to collect all stack maps
+ // which might have the same dex register map.
+ GrowableArray<uint32_t> stack_map_indices(allocator_, 1);
+ stack_map_indices.Add(current_entry_index);
+ dex_map_hash_to_stack_map_indices_.Put(current_entry_.dex_register_map_hash, stack_map_indices);
+ return kNoSameDexMapFound;
+ }
+
+ // We might have collisions, so we need to check whether or not we really have a match.
+ for (size_t i = 0; i < entries_it->second.Size(); i++) {
+ size_t test_entry_index = entries_it->second.Get(i);
+ if (HaveTheSameDexMaps(stack_maps_.Get(test_entry_index), current_entry_)) {
+ return test_entry_index;
+ }
+ }
+ entries_it->second.Add(current_entry_index);
+ return kNoSameDexMapFound;
+}
+
+bool StackMapStream::HaveTheSameDexMaps(const StackMapEntry& a, const StackMapEntry& b) const {
+ if (a.live_dex_registers_mask == nullptr && b.live_dex_registers_mask == nullptr) {
+ return true;
+ }
+ if (a.live_dex_registers_mask == nullptr || b.live_dex_registers_mask == nullptr) {
+ return false;
+ }
+ if (a.num_dex_registers != b.num_dex_registers) {
+ return false;
+ }
+
+ int index_in_dex_register_locations = 0;
+ for (uint32_t i = 0; i < a.num_dex_registers; i++) {
+ if (a.live_dex_registers_mask->IsBitSet(i) != b.live_dex_registers_mask->IsBitSet(i)) {
+ return false;
+ }
+ if (a.live_dex_registers_mask->IsBitSet(i)) {
+ size_t a_loc = dex_register_locations_.Get(
+ a.dex_register_locations_start_index + index_in_dex_register_locations);
+ size_t b_loc = dex_register_locations_.Get(
+ b.dex_register_locations_start_index + index_in_dex_register_locations);
+ if (a_loc != b_loc) {
+ return false;
+ }
+ ++index_in_dex_register_locations;
+ }
+ }
+ return true;
+}
+
+} // namespace art
diff --git a/compiler/optimizing/stack_map_stream.h b/compiler/optimizing/stack_map_stream.h
index 9a9e068..0af983b 100644
--- a/compiler/optimizing/stack_map_stream.h
+++ b/compiler/optimizing/stack_map_stream.h
@@ -70,13 +70,21 @@
native_pc_offset_max_(0),
register_mask_max_(0),
number_of_stack_maps_with_inline_info_(0),
- dex_map_hash_to_stack_map_indices_(std::less<uint32_t>(), allocator->Adapter()) {}
-
- // Compute bytes needed to encode a mask with the given maximum element.
- static uint32_t StackMaskEncodingSize(int max_element) {
- int number_of_bits = max_element + 1; // Need room for max element too.
- return RoundUp(number_of_bits, kBitsPerByte) / kBitsPerByte;
- }
+ dex_map_hash_to_stack_map_indices_(std::less<uint32_t>(), allocator->Adapter()),
+ current_entry_(),
+ current_inline_info_(),
+ stack_mask_size_(0),
+ inline_info_size_(0),
+ dex_register_maps_size_(0),
+ stack_maps_size_(0),
+ dex_register_location_catalog_size_(0),
+ dex_register_location_catalog_start_(0),
+ stack_maps_start_(0),
+ dex_register_maps_start_(0),
+ inline_infos_start_(0),
+ needed_size_(0),
+ current_dex_register_(0),
+ in_inline_frame_(false) {}
// See runtime/stack_map.h to know what these fields contain.
struct StackMapEntry {
@@ -90,380 +98,54 @@
size_t inline_infos_start_index;
BitVector* live_dex_registers_mask;
uint32_t dex_register_map_hash;
+ size_t same_dex_register_map_as_;
};
struct InlineInfoEntry {
+ uint32_t dex_pc;
uint32_t method_index;
+ InvokeType invoke_type;
+ uint32_t num_dex_registers;
+ BitVector* live_dex_registers_mask;
+ size_t dex_register_locations_start_index;
};
- void AddStackMapEntry(uint32_t dex_pc,
- uint32_t native_pc_offset,
- uint32_t register_mask,
- BitVector* sp_mask,
- uint32_t num_dex_registers,
- uint8_t inlining_depth) {
- StackMapEntry entry;
- entry.dex_pc = dex_pc;
- entry.native_pc_offset = native_pc_offset;
- entry.register_mask = register_mask;
- entry.sp_mask = sp_mask;
- entry.num_dex_registers = num_dex_registers;
- entry.inlining_depth = inlining_depth;
- entry.dex_register_locations_start_index = dex_register_locations_.Size();
- entry.inline_infos_start_index = inline_infos_.Size();
- entry.dex_register_map_hash = 0;
- if (num_dex_registers != 0) {
- entry.live_dex_registers_mask =
- new (allocator_) ArenaBitVector(allocator_, num_dex_registers, true);
- } else {
- entry.live_dex_registers_mask = nullptr;
- }
- stack_maps_.Add(entry);
+ void BeginStackMapEntry(uint32_t dex_pc,
+ uint32_t native_pc_offset,
+ uint32_t register_mask,
+ BitVector* sp_mask,
+ uint32_t num_dex_registers,
+ uint8_t inlining_depth);
+ void EndStackMapEntry();
- if (sp_mask != nullptr) {
- stack_mask_max_ = std::max(stack_mask_max_, sp_mask->GetHighestBitSet());
- }
- if (inlining_depth > 0) {
- number_of_stack_maps_with_inline_info_++;
- }
+ void AddDexRegisterEntry(DexRegisterLocation::Kind kind, int32_t value);
- dex_pc_max_ = std::max(dex_pc_max_, dex_pc);
- native_pc_offset_max_ = std::max(native_pc_offset_max_, native_pc_offset);
- register_mask_max_ = std::max(register_mask_max_, register_mask);
- }
+ void BeginInlineInfoEntry(uint32_t method_index,
+ uint32_t dex_pc,
+ InvokeType invoke_type,
+ uint32_t num_dex_registers);
+ void EndInlineInfoEntry();
- void AddInlineInfoEntry(uint32_t method_index) {
- InlineInfoEntry entry;
- entry.method_index = method_index;
- inline_infos_.Add(entry);
- }
-
- size_t ComputeNeededSize() {
- size_t size = CodeInfo::kFixedSize
- + ComputeDexRegisterLocationCatalogSize()
- + ComputeStackMapsSize()
- + ComputeDexRegisterMapsSize()
- + ComputeInlineInfoSize();
- // Note: use RoundUp to word-size here if you want CodeInfo objects to be word aligned.
- return size;
- }
-
- size_t ComputeStackMaskSize() const {
- return StackMaskEncodingSize(stack_mask_max_);
- }
-
- size_t ComputeStackMapsSize() {
- return stack_maps_.Size() * StackMap::ComputeStackMapSize(
- ComputeStackMaskSize(),
- ComputeInlineInfoSize(),
- ComputeDexRegisterMapsSize(),
- dex_pc_max_,
- native_pc_offset_max_,
- register_mask_max_);
- }
-
- // Compute the size of the Dex register location catalog of `entry`.
- size_t ComputeDexRegisterLocationCatalogSize() const {
- size_t size = DexRegisterLocationCatalog::kFixedSize;
- for (size_t location_catalog_entry_index = 0;
- location_catalog_entry_index < location_catalog_entries_.Size();
- ++location_catalog_entry_index) {
- DexRegisterLocation dex_register_location =
- location_catalog_entries_.Get(location_catalog_entry_index);
- size += DexRegisterLocationCatalog::EntrySize(dex_register_location);
- }
- return size;
- }
-
- size_t ComputeDexRegisterMapSize(const StackMapEntry& entry) const {
- // Size of the map in bytes.
- size_t size = DexRegisterMap::kFixedSize;
- // Add the live bit mask for the Dex register liveness.
- size += DexRegisterMap::GetLiveBitMaskSize(entry.num_dex_registers);
- // Compute the size of the set of live Dex register entries.
- size_t number_of_live_dex_registers = 0;
- for (size_t dex_register_number = 0;
- dex_register_number < entry.num_dex_registers;
- ++dex_register_number) {
- if (entry.live_dex_registers_mask->IsBitSet(dex_register_number)) {
- ++number_of_live_dex_registers;
- }
- }
- size_t map_entries_size_in_bits =
- DexRegisterMap::SingleEntrySizeInBits(location_catalog_entries_.Size())
- * number_of_live_dex_registers;
- size_t map_entries_size_in_bytes =
- RoundUp(map_entries_size_in_bits, kBitsPerByte) / kBitsPerByte;
- size += map_entries_size_in_bytes;
- return size;
- }
-
- // Compute the size of all the Dex register maps.
- size_t ComputeDexRegisterMapsSize() {
- size_t size = 0;
- for (size_t i = 0; i < stack_maps_.Size(); ++i) {
- if (FindEntryWithTheSameDexMap(i) == kNoSameDexMapFound) {
- // Entries with the same dex map will have the same offset.
- size += ComputeDexRegisterMapSize(stack_maps_.Get(i));
- }
- }
- return size;
- }
-
- // Compute the size of all the inline information pieces.
- size_t ComputeInlineInfoSize() const {
- return inline_infos_.Size() * InlineInfo::SingleEntrySize()
- // For encoding the depth.
- + (number_of_stack_maps_with_inline_info_ * InlineInfo::kFixedSize);
- }
-
- size_t ComputeDexRegisterLocationCatalogStart() const {
- return CodeInfo::kFixedSize;
- }
-
- size_t ComputeStackMapsStart() const {
- return ComputeDexRegisterLocationCatalogStart() + ComputeDexRegisterLocationCatalogSize();
- }
-
- size_t ComputeDexRegisterMapsStart() {
- return ComputeStackMapsStart() + ComputeStackMapsSize();
- }
-
- size_t ComputeInlineInfoStart() {
- return ComputeDexRegisterMapsStart() + ComputeDexRegisterMapsSize();
- }
-
- void FillIn(MemoryRegion region) {
- CodeInfo code_info(region);
- DCHECK_EQ(region.size(), ComputeNeededSize());
- code_info.SetOverallSize(region.size());
-
- size_t stack_mask_size = ComputeStackMaskSize();
-
- size_t dex_register_map_size = ComputeDexRegisterMapsSize();
- size_t inline_info_size = ComputeInlineInfoSize();
-
- MemoryRegion dex_register_locations_region = region.Subregion(
- ComputeDexRegisterMapsStart(),
- dex_register_map_size);
-
- MemoryRegion inline_infos_region = region.Subregion(
- ComputeInlineInfoStart(),
- inline_info_size);
-
- code_info.SetEncoding(inline_info_size,
- dex_register_map_size,
- dex_pc_max_,
- native_pc_offset_max_,
- register_mask_max_);
- code_info.SetNumberOfStackMaps(stack_maps_.Size());
- code_info.SetStackMaskSize(stack_mask_size);
- DCHECK_EQ(code_info.GetStackMapsSize(), ComputeStackMapsSize());
-
- // Set the Dex register location catalog.
- code_info.SetNumberOfDexRegisterLocationCatalogEntries(
- location_catalog_entries_.Size());
- MemoryRegion dex_register_location_catalog_region = region.Subregion(
- ComputeDexRegisterLocationCatalogStart(),
- ComputeDexRegisterLocationCatalogSize());
- DexRegisterLocationCatalog dex_register_location_catalog(dex_register_location_catalog_region);
- // Offset in `dex_register_location_catalog` where to store the next
- // register location.
- size_t location_catalog_offset = DexRegisterLocationCatalog::kFixedSize;
- for (size_t i = 0, e = location_catalog_entries_.Size(); i < e; ++i) {
- DexRegisterLocation dex_register_location = location_catalog_entries_.Get(i);
- dex_register_location_catalog.SetRegisterInfo(location_catalog_offset, dex_register_location);
- location_catalog_offset += DexRegisterLocationCatalog::EntrySize(dex_register_location);
- }
- // Ensure we reached the end of the Dex registers location_catalog.
- DCHECK_EQ(location_catalog_offset, dex_register_location_catalog_region.size());
-
- uintptr_t next_dex_register_map_offset = 0;
- uintptr_t next_inline_info_offset = 0;
- for (size_t i = 0, e = stack_maps_.Size(); i < e; ++i) {
- StackMap stack_map = code_info.GetStackMapAt(i);
- StackMapEntry entry = stack_maps_.Get(i);
-
- stack_map.SetDexPc(code_info, entry.dex_pc);
- stack_map.SetNativePcOffset(code_info, entry.native_pc_offset);
- stack_map.SetRegisterMask(code_info, entry.register_mask);
- if (entry.sp_mask != nullptr) {
- stack_map.SetStackMask(code_info, *entry.sp_mask);
- }
-
- if (entry.num_dex_registers == 0) {
- // No dex map available.
- stack_map.SetDexRegisterMapOffset(code_info, StackMap::kNoDexRegisterMap);
- } else {
- // Search for an entry with the same dex map.
- size_t entry_with_same_map = FindEntryWithTheSameDexMap(i);
- if (entry_with_same_map != kNoSameDexMapFound) {
- // If we have a hit reuse the offset.
- stack_map.SetDexRegisterMapOffset(code_info,
- code_info.GetStackMapAt(entry_with_same_map).GetDexRegisterMapOffset(code_info));
- } else {
- // New dex registers maps should be added to the stack map.
- MemoryRegion register_region =
- dex_register_locations_region.Subregion(
- next_dex_register_map_offset,
- ComputeDexRegisterMapSize(entry));
- next_dex_register_map_offset += register_region.size();
- DexRegisterMap dex_register_map(register_region);
- stack_map.SetDexRegisterMapOffset(
- code_info, register_region.start() - dex_register_locations_region.start());
-
- // Set the live bit mask.
- dex_register_map.SetLiveBitMask(entry.num_dex_registers, *entry.live_dex_registers_mask);
-
- // Set the dex register location mapping data.
- for (size_t dex_register_number = 0, index_in_dex_register_locations = 0;
- dex_register_number < entry.num_dex_registers;
- ++dex_register_number) {
- if (entry.live_dex_registers_mask->IsBitSet(dex_register_number)) {
- size_t location_catalog_entry_index =
- dex_register_locations_.Get(entry.dex_register_locations_start_index
- + index_in_dex_register_locations);
- dex_register_map.SetLocationCatalogEntryIndex(
- index_in_dex_register_locations,
- location_catalog_entry_index,
- entry.num_dex_registers,
- location_catalog_entries_.Size());
- ++index_in_dex_register_locations;
- }
- }
- }
- }
-
- // Set the inlining info.
- if (entry.inlining_depth != 0) {
- MemoryRegion inline_region = inline_infos_region.Subregion(
- next_inline_info_offset,
- InlineInfo::kFixedSize + entry.inlining_depth * InlineInfo::SingleEntrySize());
- next_inline_info_offset += inline_region.size();
- InlineInfo inline_info(inline_region);
-
- // Currently relative to the dex register map.
- stack_map.SetInlineDescriptorOffset(
- code_info, inline_region.start() - dex_register_locations_region.start());
-
- inline_info.SetDepth(entry.inlining_depth);
- for (size_t j = 0; j < entry.inlining_depth; ++j) {
- InlineInfoEntry inline_entry = inline_infos_.Get(j + entry.inline_infos_start_index);
- inline_info.SetMethodReferenceIndexAtDepth(j, inline_entry.method_index);
- }
- } else {
- if (inline_info_size != 0) {
- stack_map.SetInlineDescriptorOffset(code_info, StackMap::kNoInlineInfo);
- }
- }
- }
- }
-
- void AddDexRegisterEntry(uint16_t dex_register, DexRegisterLocation::Kind kind, int32_t value) {
- StackMapEntry entry = stack_maps_.Get(stack_maps_.Size() - 1);
- DCHECK_LT(dex_register, entry.num_dex_registers);
-
- if (kind != DexRegisterLocation::Kind::kNone) {
- // Ensure we only use non-compressed location kind at this stage.
- DCHECK(DexRegisterLocation::IsShortLocationKind(kind))
- << DexRegisterLocation::PrettyDescriptor(kind);
- DexRegisterLocation location(kind, value);
-
- // Look for Dex register `location` in the location catalog (using the
- // companion hash map of locations to indices). Use its index if it
- // is already in the location catalog. If not, insert it (in the
- // location catalog and the hash map) and use the newly created index.
- auto it = location_catalog_entries_indices_.Find(location);
- if (it != location_catalog_entries_indices_.end()) {
- // Retrieve the index from the hash map.
- dex_register_locations_.Add(it->second);
- } else {
- // Create a new entry in the location catalog and the hash map.
- size_t index = location_catalog_entries_.Size();
- location_catalog_entries_.Add(location);
- dex_register_locations_.Add(index);
- location_catalog_entries_indices_.Insert(std::make_pair(location, index));
- }
-
- entry.live_dex_registers_mask->SetBit(dex_register);
- entry.dex_register_map_hash +=
- (1 << (dex_register % (sizeof(entry.dex_register_map_hash) * kBitsPerByte)));
- entry.dex_register_map_hash += static_cast<uint32_t>(value);
- entry.dex_register_map_hash += static_cast<uint32_t>(kind);
- stack_maps_.Put(stack_maps_.Size() - 1, entry);
- }
- }
+ // Prepares the stream to fill in a memory region. Must be called before FillIn.
+ // Returns the size (in bytes) needed to store this stream.
+ size_t PrepareForFillIn();
+ void FillIn(MemoryRegion region);
private:
- // Returns the index of an entry with the same dex register map
+ size_t ComputeDexRegisterLocationCatalogSize() const;
+ size_t ComputeDexRegisterMapSize(uint32_t num_dex_registers,
+ const BitVector& live_dex_registers_mask) const;
+ size_t ComputeDexRegisterMapsSize() const;
+ size_t ComputeInlineInfoSize() const;
+
+ // Returns the index of an entry with the same dex register map as the current_entry,
// or kNoSameDexMapFound if no such entry exists.
- size_t FindEntryWithTheSameDexMap(size_t entry_index) {
- StackMapEntry entry = stack_maps_.Get(entry_index);
- auto entries_it = dex_map_hash_to_stack_map_indices_.find(entry.dex_register_map_hash);
- if (entries_it == dex_map_hash_to_stack_map_indices_.end()) {
- // We don't have a perfect hash functions so we need a list to collect all stack maps
- // which might have the same dex register map.
- GrowableArray<uint32_t> stack_map_indices(allocator_, 1);
- stack_map_indices.Add(entry_index);
- dex_map_hash_to_stack_map_indices_.Put(entry.dex_register_map_hash, stack_map_indices);
- return kNoSameDexMapFound;
- }
-
- // TODO: We don't need to add ourselves to the map if we can guarantee that
- // FindEntryWithTheSameDexMap is called just once per stack map entry.
- // A good way to do this is to cache the offset in the stack map entry. This
- // is easier to do if we add markers when the stack map constructions begins
- // and when it ends.
-
- // We might have collisions, so we need to check whether or not we should
- // add the entry to the map. `needs_to_be_added` keeps track of this.
- bool needs_to_be_added = true;
- size_t result = kNoSameDexMapFound;
- for (size_t i = 0; i < entries_it->second.Size(); i++) {
- size_t test_entry_index = entries_it->second.Get(i);
- if (test_entry_index == entry_index) {
- needs_to_be_added = false;
- } else if (HaveTheSameDexMaps(stack_maps_.Get(test_entry_index), entry)) {
- result = test_entry_index;
- needs_to_be_added = false;
- break;
- }
- }
- if (needs_to_be_added) {
- entries_it->second.Add(entry_index);
- }
- return result;
- }
-
- bool HaveTheSameDexMaps(const StackMapEntry& a, const StackMapEntry& b) const {
- if (a.live_dex_registers_mask == nullptr && b.live_dex_registers_mask == nullptr) {
- return true;
- }
- if (a.live_dex_registers_mask == nullptr || b.live_dex_registers_mask == nullptr) {
- return false;
- }
- if (a.num_dex_registers != b.num_dex_registers) {
- return false;
- }
-
- int index_in_dex_register_locations = 0;
- for (uint32_t i = 0; i < a.num_dex_registers; i++) {
- if (a.live_dex_registers_mask->IsBitSet(i) != b.live_dex_registers_mask->IsBitSet(i)) {
- return false;
- }
- if (a.live_dex_registers_mask->IsBitSet(i)) {
- size_t a_loc = dex_register_locations_.Get(
- a.dex_register_locations_start_index + index_in_dex_register_locations);
- size_t b_loc = dex_register_locations_.Get(
- b.dex_register_locations_start_index + index_in_dex_register_locations);
- if (a_loc != b_loc) {
- return false;
- }
- ++index_in_dex_register_locations;
- }
- }
- return true;
- }
+ size_t FindEntryWithTheSameDexMap();
+ bool HaveTheSameDexMaps(const StackMapEntry& a, const StackMapEntry& b) const;
+ void FillInDexRegisterMap(DexRegisterMap dex_register_map,
+ uint32_t num_dex_registers,
+ const BitVector& live_dex_registers_mask,
+ uint32_t start_index_in_dex_register_locations) const;
ArenaAllocator* allocator_;
GrowableArray<StackMapEntry> stack_maps_;
@@ -476,8 +158,7 @@
DexRegisterLocationHashFn> LocationCatalogEntriesIndices;
LocationCatalogEntriesIndices location_catalog_entries_indices_;
- // A set of concatenated maps of Dex register locations indices to
- // `location_catalog_entries_`.
+ // A set of concatenated maps of Dex register locations indices to `location_catalog_entries_`.
GrowableArray<size_t> dex_register_locations_;
GrowableArray<InlineInfoEntry> inline_infos_;
int stack_mask_max_;
@@ -488,6 +169,21 @@
ArenaSafeMap<uint32_t, GrowableArray<uint32_t>> dex_map_hash_to_stack_map_indices_;
+ StackMapEntry current_entry_;
+ InlineInfoEntry current_inline_info_;
+ size_t stack_mask_size_;
+ size_t inline_info_size_;
+ size_t dex_register_maps_size_;
+ size_t stack_maps_size_;
+ size_t dex_register_location_catalog_size_;
+ size_t dex_register_location_catalog_start_;
+ size_t stack_maps_start_;
+ size_t dex_register_maps_start_;
+ size_t inline_infos_start_;
+ size_t needed_size_;
+ uint32_t current_dex_register_;
+ bool in_inline_frame_;
+
static constexpr uint32_t kNoSameDexMapFound = -1;
DISALLOW_COPY_AND_ASSIGN(StackMapStream);
diff --git a/compiler/optimizing/stack_map_test.cc b/compiler/optimizing/stack_map_test.cc
index 8d160bc..98e14ea 100644
--- a/compiler/optimizing/stack_map_test.cc
+++ b/compiler/optimizing/stack_map_test.cc
@@ -40,11 +40,12 @@
ArenaBitVector sp_mask(&arena, 0, false);
size_t number_of_dex_registers = 2;
- stream.AddStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0);
- stream.AddDexRegisterEntry(0, Kind::kInStack, 0); // Short location.
- stream.AddDexRegisterEntry(1, Kind::kConstant, -2); // Short location.
+ stream.BeginStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0);
+ stream.AddDexRegisterEntry(Kind::kInStack, 0); // Short location.
+ stream.AddDexRegisterEntry(Kind::kConstant, -2); // Short location.
+ stream.EndStackMapEntry();
- size_t size = stream.ComputeNeededSize();
+ size_t size = stream.PrepareForFillIn();
void* memory = arena.Alloc(size, kArenaAllocMisc);
MemoryRegion region(memory, size);
stream.FillIn(region);
@@ -123,20 +124,25 @@
sp_mask1.SetBit(2);
sp_mask1.SetBit(4);
size_t number_of_dex_registers = 2;
- stream.AddStackMapEntry(0, 64, 0x3, &sp_mask1, number_of_dex_registers, 2);
- stream.AddDexRegisterEntry(0, Kind::kInStack, 0); // Short location.
- stream.AddDexRegisterEntry(1, Kind::kConstant, -2); // Large location.
- stream.AddInlineInfoEntry(42);
- stream.AddInlineInfoEntry(82);
+ size_t number_of_dex_registers_in_inline_info = 0;
+ stream.BeginStackMapEntry(0, 64, 0x3, &sp_mask1, number_of_dex_registers, 2);
+ stream.AddDexRegisterEntry(Kind::kInStack, 0); // Short location.
+ stream.AddDexRegisterEntry(Kind::kConstant, -2); // Large location.
+ stream.BeginInlineInfoEntry(82, 3, kDirect, number_of_dex_registers_in_inline_info);
+ stream.EndInlineInfoEntry();
+ stream.BeginInlineInfoEntry(42, 2, kStatic, number_of_dex_registers_in_inline_info);
+ stream.EndInlineInfoEntry();
+ stream.EndStackMapEntry();
ArenaBitVector sp_mask2(&arena, 0, true);
sp_mask2.SetBit(3);
sp_mask1.SetBit(8);
- stream.AddStackMapEntry(1, 128, 0xFF, &sp_mask2, number_of_dex_registers, 0);
- stream.AddDexRegisterEntry(0, Kind::kInRegister, 18); // Short location.
- stream.AddDexRegisterEntry(1, Kind::kInFpuRegister, 3); // Short location.
+ stream.BeginStackMapEntry(1, 128, 0xFF, &sp_mask2, number_of_dex_registers, 0);
+ stream.AddDexRegisterEntry(Kind::kInRegister, 18); // Short location.
+ stream.AddDexRegisterEntry(Kind::kInFpuRegister, 3); // Short location.
+ stream.EndStackMapEntry();
- size_t size = stream.ComputeNeededSize();
+ size_t size = stream.PrepareForFillIn();
void* memory = arena.Alloc(size, kArenaAllocMisc);
MemoryRegion region(memory, size);
stream.FillIn(region);
@@ -208,8 +214,12 @@
ASSERT_TRUE(stack_map.HasInlineInfo(code_info));
InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map);
ASSERT_EQ(2u, inline_info.GetDepth());
- ASSERT_EQ(42u, inline_info.GetMethodReferenceIndexAtDepth(0));
- ASSERT_EQ(82u, inline_info.GetMethodReferenceIndexAtDepth(1));
+ ASSERT_EQ(82u, inline_info.GetMethodIndexAtDepth(0));
+ ASSERT_EQ(42u, inline_info.GetMethodIndexAtDepth(1));
+ ASSERT_EQ(3u, inline_info.GetDexPcAtDepth(0));
+ ASSERT_EQ(2u, inline_info.GetDexPcAtDepth(1));
+ ASSERT_EQ(kDirect, inline_info.GetInvokeTypeAtDepth(0));
+ ASSERT_EQ(kStatic, inline_info.GetInvokeTypeAtDepth(1));
}
// Second stack map.
@@ -273,11 +283,12 @@
ArenaBitVector sp_mask(&arena, 0, false);
uint32_t number_of_dex_registers = 2;
- stream.AddStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0);
- stream.AddDexRegisterEntry(0, Kind::kNone, 0); // No location.
- stream.AddDexRegisterEntry(1, Kind::kConstant, -2); // Large location.
+ stream.BeginStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0);
+ stream.AddDexRegisterEntry(Kind::kNone, 0); // No location.
+ stream.AddDexRegisterEntry(Kind::kConstant, -2); // Large location.
+ stream.EndStackMapEntry();
- size_t size = stream.ComputeNeededSize();
+ size_t size = stream.PrepareForFillIn();
void* memory = arena.Alloc(size, kArenaAllocMisc);
MemoryRegion region(memory, size);
stream.FillIn(region);
@@ -353,22 +364,24 @@
ArenaBitVector sp_mask(&arena, 0, false);
uint32_t number_of_dex_registers = 1024;
// Create the first stack map (and its Dex register map).
- stream.AddStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0);
+ stream.BeginStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0);
uint32_t number_of_dex_live_registers_in_dex_register_map_0 = number_of_dex_registers - 8;
for (uint32_t i = 0; i < number_of_dex_live_registers_in_dex_register_map_0; ++i) {
// Use two different Dex register locations to populate this map,
// as using a single value (in the whole CodeInfo object) would
// make this Dex register mapping data empty (see
// art::DexRegisterMap::SingleEntrySizeInBits).
- stream.AddDexRegisterEntry(i, Kind::kConstant, i % 2); // Short location.
+ stream.AddDexRegisterEntry(Kind::kConstant, i % 2); // Short location.
}
+ stream.EndStackMapEntry();
// Create the second stack map (and its Dex register map).
- stream.AddStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0);
+ stream.BeginStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0);
for (uint32_t i = 0; i < number_of_dex_registers; ++i) {
- stream.AddDexRegisterEntry(i, Kind::kConstant, 0); // Short location.
+ stream.AddDexRegisterEntry(Kind::kConstant, 0); // Short location.
}
+ stream.EndStackMapEntry();
- size_t size = stream.ComputeNeededSize();
+ size_t size = stream.PrepareForFillIn();
void* memory = arena.Alloc(size, kArenaAllocMisc);
MemoryRegion region(memory, size);
stream.FillIn(region);
@@ -413,19 +426,22 @@
ArenaBitVector sp_mask(&arena, 0, false);
uint32_t number_of_dex_registers = 2;
// First stack map.
- stream.AddStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0);
- stream.AddDexRegisterEntry(0, Kind::kInRegister, 0); // Short location.
- stream.AddDexRegisterEntry(1, Kind::kConstant, -2); // Large location.
+ stream.BeginStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0);
+ stream.AddDexRegisterEntry(Kind::kInRegister, 0); // Short location.
+ stream.AddDexRegisterEntry(Kind::kConstant, -2); // Large location.
+ stream.EndStackMapEntry();
// Second stack map, which should share the same dex register map.
- stream.AddStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0);
- stream.AddDexRegisterEntry(0, Kind::kInRegister, 0); // Short location.
- stream.AddDexRegisterEntry(1, Kind::kConstant, -2); // Large location.
+ stream.BeginStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0);
+ stream.AddDexRegisterEntry(Kind::kInRegister, 0); // Short location.
+ stream.AddDexRegisterEntry(Kind::kConstant, -2); // Large location.
+ stream.EndStackMapEntry();
// Third stack map (doesn't share the dex register map).
- stream.AddStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0);
- stream.AddDexRegisterEntry(0, Kind::kInRegister, 2); // Short location.
- stream.AddDexRegisterEntry(1, Kind::kConstant, -2); // Large location.
+ stream.BeginStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0);
+ stream.AddDexRegisterEntry(Kind::kInRegister, 2); // Short location.
+ stream.AddDexRegisterEntry(Kind::kConstant, -2); // Large location.
+ stream.EndStackMapEntry();
- size_t size = stream.ComputeNeededSize();
+ size_t size = stream.PrepareForFillIn();
void* memory = arena.Alloc(size, kArenaAllocMisc);
MemoryRegion region(memory, size);
stream.FillIn(region);
@@ -462,9 +478,10 @@
ArenaBitVector sp_mask(&arena, 0, false);
uint32_t number_of_dex_registers = 0;
- stream.AddStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0);
+ stream.BeginStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0);
+ stream.EndStackMapEntry();
- size_t size = stream.ComputeNeededSize();
+ size_t size = stream.PrepareForFillIn();
void* memory = arena.Alloc(size, kArenaAllocMisc);
MemoryRegion region(memory, size);
stream.FillIn(region);
@@ -490,4 +507,175 @@
ASSERT_FALSE(stack_map.HasInlineInfo(code_info));
}
+TEST(StackMapTest, InlineTest) {
+ ArenaPool pool;
+ ArenaAllocator arena(&pool);
+ StackMapStream stream(&arena);
+
+ ArenaBitVector sp_mask1(&arena, 0, true);
+ sp_mask1.SetBit(2);
+ sp_mask1.SetBit(4);
+
+ // First stack map.
+ stream.BeginStackMapEntry(0, 64, 0x3, &sp_mask1, 2, 2);
+ stream.AddDexRegisterEntry(Kind::kInStack, 0);
+ stream.AddDexRegisterEntry(Kind::kConstant, 4);
+
+ stream.BeginInlineInfoEntry(42, 2, kStatic, 1);
+ stream.AddDexRegisterEntry(Kind::kInStack, 8);
+ stream.EndInlineInfoEntry();
+ stream.BeginInlineInfoEntry(82, 3, kStatic, 3);
+ stream.AddDexRegisterEntry(Kind::kInStack, 16);
+ stream.AddDexRegisterEntry(Kind::kConstant, 20);
+ stream.AddDexRegisterEntry(Kind::kInRegister, 15);
+ stream.EndInlineInfoEntry();
+
+ stream.EndStackMapEntry();
+
+ // Second stack map.
+ stream.BeginStackMapEntry(2, 22, 0x3, &sp_mask1, 2, 3);
+ stream.AddDexRegisterEntry(Kind::kInStack, 56);
+ stream.AddDexRegisterEntry(Kind::kConstant, 0);
+
+ stream.BeginInlineInfoEntry(42, 2, kDirect, 1);
+ stream.AddDexRegisterEntry(Kind::kInStack, 12);
+ stream.EndInlineInfoEntry();
+ stream.BeginInlineInfoEntry(82, 3, kStatic, 3);
+ stream.AddDexRegisterEntry(Kind::kInStack, 80);
+ stream.AddDexRegisterEntry(Kind::kConstant, 10);
+ stream.AddDexRegisterEntry(Kind::kInRegister, 5);
+ stream.EndInlineInfoEntry();
+ stream.BeginInlineInfoEntry(52, 5, kVirtual, 0);
+ stream.EndInlineInfoEntry();
+
+ stream.EndStackMapEntry();
+
+ // Third stack map.
+ stream.BeginStackMapEntry(4, 56, 0x3, &sp_mask1, 2, 0);
+ stream.AddDexRegisterEntry(Kind::kNone, 0);
+ stream.AddDexRegisterEntry(Kind::kConstant, 4);
+ stream.EndStackMapEntry();
+
+ // Fourth stack map.
+ stream.BeginStackMapEntry(6, 78, 0x3, &sp_mask1, 2, 3);
+ stream.AddDexRegisterEntry(Kind::kInStack, 56);
+ stream.AddDexRegisterEntry(Kind::kConstant, 0);
+
+ stream.BeginInlineInfoEntry(42, 2, kVirtual, 0);
+ stream.EndInlineInfoEntry();
+ stream.BeginInlineInfoEntry(52, 5, kInterface, 1);
+ stream.AddDexRegisterEntry(Kind::kInRegister, 2);
+ stream.EndInlineInfoEntry();
+ stream.BeginInlineInfoEntry(52, 10, kStatic, 2);
+ stream.AddDexRegisterEntry(Kind::kNone, 0);
+ stream.AddDexRegisterEntry(Kind::kInRegister, 3);
+ stream.EndInlineInfoEntry();
+
+ stream.EndStackMapEntry();
+
+ size_t size = stream.PrepareForFillIn();
+ void* memory = arena.Alloc(size, kArenaAllocMisc);
+ MemoryRegion region(memory, size);
+ stream.FillIn(region);
+
+ CodeInfo ci(region);
+
+ {
+ // Verify first stack map.
+ StackMap sm0 = ci.GetStackMapAt(0);
+
+ DexRegisterMap dex_registers0 = ci.GetDexRegisterMapOf(sm0, 2);
+ ASSERT_EQ(0, dex_registers0.GetStackOffsetInBytes(0, 2, ci));
+ ASSERT_EQ(4, dex_registers0.GetConstant(1, 2, ci));
+
+ InlineInfo if0 = ci.GetInlineInfoOf(sm0);
+ ASSERT_EQ(2u, if0.GetDepth());
+ ASSERT_EQ(2u, if0.GetDexPcAtDepth(0));
+ ASSERT_EQ(42u, if0.GetMethodIndexAtDepth(0));
+ ASSERT_EQ(kStatic, if0.GetInvokeTypeAtDepth(0));
+ ASSERT_EQ(3u, if0.GetDexPcAtDepth(1));
+ ASSERT_EQ(82u, if0.GetMethodIndexAtDepth(1));
+ ASSERT_EQ(kStatic, if0.GetInvokeTypeAtDepth(1));
+
+ DexRegisterMap dex_registers1 = ci.GetDexRegisterMapAtDepth(0, if0, 1);
+ ASSERT_EQ(8, dex_registers1.GetStackOffsetInBytes(0, 1, ci));
+
+ DexRegisterMap dex_registers2 = ci.GetDexRegisterMapAtDepth(1, if0, 3);
+ ASSERT_EQ(16, dex_registers2.GetStackOffsetInBytes(0, 3, ci));
+ ASSERT_EQ(20, dex_registers2.GetConstant(1, 3, ci));
+ ASSERT_EQ(15, dex_registers2.GetMachineRegister(2, 3, ci));
+ }
+
+ {
+ // Verify second stack map.
+ StackMap sm1 = ci.GetStackMapAt(1);
+
+ DexRegisterMap dex_registers0 = ci.GetDexRegisterMapOf(sm1, 2);
+ ASSERT_EQ(56, dex_registers0.GetStackOffsetInBytes(0, 2, ci));
+ ASSERT_EQ(0, dex_registers0.GetConstant(1, 2, ci));
+
+ InlineInfo if1 = ci.GetInlineInfoOf(sm1);
+ ASSERT_EQ(3u, if1.GetDepth());
+ ASSERT_EQ(2u, if1.GetDexPcAtDepth(0));
+ ASSERT_EQ(42u, if1.GetMethodIndexAtDepth(0));
+ ASSERT_EQ(kDirect, if1.GetInvokeTypeAtDepth(0));
+ ASSERT_EQ(3u, if1.GetDexPcAtDepth(1));
+ ASSERT_EQ(82u, if1.GetMethodIndexAtDepth(1));
+ ASSERT_EQ(kStatic, if1.GetInvokeTypeAtDepth(1));
+ ASSERT_EQ(5u, if1.GetDexPcAtDepth(2));
+ ASSERT_EQ(52u, if1.GetMethodIndexAtDepth(2));
+ ASSERT_EQ(kVirtual, if1.GetInvokeTypeAtDepth(2));
+
+ DexRegisterMap dex_registers1 = ci.GetDexRegisterMapAtDepth(0, if1, 1);
+ ASSERT_EQ(12, dex_registers1.GetStackOffsetInBytes(0, 1, ci));
+
+ DexRegisterMap dex_registers2 = ci.GetDexRegisterMapAtDepth(1, if1, 3);
+ ASSERT_EQ(80, dex_registers2.GetStackOffsetInBytes(0, 3, ci));
+ ASSERT_EQ(10, dex_registers2.GetConstant(1, 3, ci));
+ ASSERT_EQ(5, dex_registers2.GetMachineRegister(2, 3, ci));
+
+ ASSERT_FALSE(if1.HasDexRegisterMapAtDepth(2));
+ }
+
+ {
+ // Verify third stack map.
+ StackMap sm2 = ci.GetStackMapAt(2);
+
+ DexRegisterMap dex_registers0 = ci.GetDexRegisterMapOf(sm2, 2);
+ ASSERT_FALSE(dex_registers0.IsDexRegisterLive(0));
+ ASSERT_EQ(4, dex_registers0.GetConstant(1, 2, ci));
+ ASSERT_FALSE(sm2.HasInlineInfo(ci));
+ }
+
+ {
+ // Verify fourth stack map.
+ StackMap sm3 = ci.GetStackMapAt(3);
+
+ DexRegisterMap dex_registers0 = ci.GetDexRegisterMapOf(sm3, 2);
+ ASSERT_EQ(56, dex_registers0.GetStackOffsetInBytes(0, 2, ci));
+ ASSERT_EQ(0, dex_registers0.GetConstant(1, 2, ci));
+
+ InlineInfo if2 = ci.GetInlineInfoOf(sm3);
+ ASSERT_EQ(3u, if2.GetDepth());
+ ASSERT_EQ(2u, if2.GetDexPcAtDepth(0));
+ ASSERT_EQ(42u, if2.GetMethodIndexAtDepth(0));
+ ASSERT_EQ(kVirtual, if2.GetInvokeTypeAtDepth(0));
+ ASSERT_EQ(5u, if2.GetDexPcAtDepth(1));
+ ASSERT_EQ(52u, if2.GetMethodIndexAtDepth(1));
+ ASSERT_EQ(kInterface, if2.GetInvokeTypeAtDepth(1));
+ ASSERT_EQ(10u, if2.GetDexPcAtDepth(2));
+ ASSERT_EQ(52u, if2.GetMethodIndexAtDepth(2));
+ ASSERT_EQ(kStatic, if2.GetInvokeTypeAtDepth(2));
+
+ ASSERT_FALSE(if2.HasDexRegisterMapAtDepth(0));
+
+ DexRegisterMap dex_registers1 = ci.GetDexRegisterMapAtDepth(1, if2, 1);
+ ASSERT_EQ(2, dex_registers1.GetMachineRegister(0, 1, ci));
+
+ DexRegisterMap dex_registers2 = ci.GetDexRegisterMapAtDepth(2, if2, 2);
+ ASSERT_FALSE(dex_registers2.IsDexRegisterLive(0));
+ ASSERT_EQ(3, dex_registers2.GetMachineRegister(1, 2, ci));
+ }
+}
+
} // namespace art
diff --git a/compiler/optimizing/suspend_check_test.cc b/compiler/optimizing/suspend_check_test.cc
index a5a0eb2..5ca66a1 100644
--- a/compiler/optimizing/suspend_check_test.cc
+++ b/compiler/optimizing/suspend_check_test.cc
@@ -30,7 +30,7 @@
static void TestCode(const uint16_t* data) {
ArenaPool pool;
ArenaAllocator allocator(&pool);
- HGraph* graph = new (&allocator) HGraph(&allocator);
+ HGraph* graph = CreateGraph(&allocator);
HGraphBuilder builder(graph);
const DexFile::CodeItem* item = reinterpret_cast<const DexFile::CodeItem*>(data);
bool graph_built = builder.BuildGraph(*item);
diff --git a/compiler/output_stream_test.cc b/compiler/output_stream_test.cc
index bba9892..fbc9d0d 100644
--- a/compiler/output_stream_test.cc
+++ b/compiler/output_stream_test.cc
@@ -66,7 +66,7 @@
SetOutputStream(output_stream);
GenerateTestOutput();
std::unique_ptr<File> in(OS::OpenFileForReading(tmp.GetFilename().c_str()));
- EXPECT_TRUE(in.get() != NULL);
+ EXPECT_TRUE(in.get() != nullptr);
std::vector<uint8_t> actual(in->GetLength());
bool readSuccess = in->ReadFully(&actual[0], actual.size());
EXPECT_TRUE(readSuccess);
@@ -76,12 +76,12 @@
TEST_F(OutputStreamTest, Buffered) {
ScratchFile tmp;
std::unique_ptr<FileOutputStream> file_output_stream(new FileOutputStream(tmp.GetFile()));
- CHECK(file_output_stream.get() != NULL);
+ CHECK(file_output_stream.get() != nullptr);
BufferedOutputStream buffered_output_stream(file_output_stream.release());
SetOutputStream(buffered_output_stream);
GenerateTestOutput();
std::unique_ptr<File> in(OS::OpenFileForReading(tmp.GetFilename().c_str()));
- EXPECT_TRUE(in.get() != NULL);
+ EXPECT_TRUE(in.get() != nullptr);
std::vector<uint8_t> actual(in->GetLength());
bool readSuccess = in->ReadFully(&actual[0], actual.size());
EXPECT_TRUE(readSuccess);
diff --git a/compiler/utils/arm/assembler_arm.cc b/compiler/utils/arm/assembler_arm.cc
index c410660..eca6f5a 100644
--- a/compiler/utils/arm/assembler_arm.cc
+++ b/compiler/utils/arm/assembler_arm.cc
@@ -860,8 +860,6 @@
// Set up call to Thread::Current()->pDeliverException.
__ LoadFromOffset(kLoadWord, R12, TR, QUICK_ENTRYPOINT_OFFSET(4, pDeliverException).Int32Value());
__ blx(R12);
- // Call never returns.
- __ bkpt(0);
#undef __
}
diff --git a/compiler/utils/arm/assembler_arm.h b/compiler/utils/arm/assembler_arm.h
index dd0dba2..dee8287 100644
--- a/compiler/utils/arm/assembler_arm.h
+++ b/compiler/utils/arm/assembler_arm.h
@@ -398,6 +398,8 @@
Condition cond = AL) = 0;
virtual void mls(Register rd, Register rn, Register rm, Register ra,
Condition cond = AL) = 0;
+ virtual void smull(Register rd_lo, Register rd_hi, Register rn, Register rm,
+ Condition cond = AL) = 0;
virtual void umull(Register rd_lo, Register rd_hi, Register rn, Register rm,
Condition cond = AL) = 0;
@@ -739,17 +741,17 @@
void GetCurrentThread(ManagedRegister tr) OVERRIDE;
void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
- // Set up out_reg to hold a Object** into the handle scope, or to be NULL if the
+ // Set up out_reg to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed. in_reg holds a possibly stale reference
// that can be used to avoid loading the handle scope entry to see if the value is
- // NULL.
- void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset, ManagedRegister in_reg,
- bool null_allowed) OVERRIDE;
+ // null.
+ void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
+ ManagedRegister in_reg, bool null_allowed) OVERRIDE;
- // Set up out_off to hold a Object** into the handle scope, or to be NULL if the
+ // Set up out_off to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed.
- void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset, ManagedRegister scratch,
- bool null_allowed) OVERRIDE;
+ void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset,
+ ManagedRegister scratch, bool null_allowed) OVERRIDE;
// src holds a handle scope entry (Object**) load this into dst
void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
diff --git a/compiler/utils/arm/assembler_arm32.cc b/compiler/utils/arm/assembler_arm32.cc
index 9579691..6e165fc 100644
--- a/compiler/utils/arm/assembler_arm32.cc
+++ b/compiler/utils/arm/assembler_arm32.cc
@@ -200,6 +200,13 @@
}
+void Arm32Assembler::smull(Register rd_lo, Register rd_hi, Register rn,
+ Register rm, Condition cond) {
+ // Assembler registers rd_lo, rd_hi, rn, rm are encoded as rd, rn, rm, rs.
+ EmitMulOp(cond, B23 | B22, rd_lo, rd_hi, rn, rm);
+}
+
+
void Arm32Assembler::umull(Register rd_lo, Register rd_hi, Register rn,
Register rm, Condition cond) {
// Assembler registers rd_lo, rd_hi, rn, rm are encoded as rd, rn, rm, rs.
diff --git a/compiler/utils/arm/assembler_arm32.h b/compiler/utils/arm/assembler_arm32.h
index b922d66..55ec7b4 100644
--- a/compiler/utils/arm/assembler_arm32.h
+++ b/compiler/utils/arm/assembler_arm32.h
@@ -90,6 +90,8 @@
Condition cond = AL) OVERRIDE;
void mls(Register rd, Register rn, Register rm, Register ra,
Condition cond = AL) OVERRIDE;
+ void smull(Register rd_lo, Register rd_hi, Register rn, Register rm,
+ Condition cond = AL) OVERRIDE;
void umull(Register rd_lo, Register rd_hi, Register rn, Register rm,
Condition cond = AL) OVERRIDE;
diff --git a/compiler/utils/arm/assembler_arm32_test.cc b/compiler/utils/arm/assembler_arm32_test.cc
index 4a0ae0b..efd517b 100644
--- a/compiler/utils/arm/assembler_arm32_test.cc
+++ b/compiler/utils/arm/assembler_arm32_test.cc
@@ -293,12 +293,29 @@
f();
}
+ // NOTE: Only support simple test like "aaa=bbb"
+ bool EvalFilterString(std::string filter) {
+ if (filter.compare("") == 0) {
+ return false;
+ }
+
+ size_t equal_sign_index = filter.find('=');
+ if (equal_sign_index == std::string::npos) {
+ EXPECT_TRUE(false) << "Unsupported filter string.";
+ }
+
+ std::string lhs = filter.substr(0, equal_sign_index);
+ std::string rhs = filter.substr(equal_sign_index + 1, std::string::npos);
+ return lhs.compare(rhs) == 0;
+ }
+
void TemplateHelper(std::function<void(arm::Register)> f, int depth ATTRIBUTE_UNUSED,
- bool without_pc,
- std::string fmt, std::ostringstream& oss) {
+ bool without_pc, std::string fmt, std::string filter,
+ std::ostringstream& oss) {
std::vector<arm::Register*> registers = without_pc ? GetRegistersWithoutPC() : GetRegisters();
for (auto reg : registers) {
std::string after_reg = fmt;
+ std::string after_reg_filter = filter;
std::string reg_string = GetRegName<RegisterView::kUsePrimaryName>(*reg);
size_t reg_index;
@@ -308,14 +325,23 @@
after_reg.replace(reg_index, strlen(reg_token), reg_string);
}
+ while ((reg_index = after_reg_filter.find(reg_token)) != std::string::npos) {
+ after_reg_filter.replace(reg_index, strlen(reg_token), reg_string);
+ }
+ if (EvalFilterString(after_reg_filter)) {
+ continue;
+ }
+
ExecuteAndPrint([&] () { f(*reg); }, after_reg, oss);
}
}
void TemplateHelper(std::function<void(const arm::ShifterOperand&)> f, int depth ATTRIBUTE_UNUSED,
- bool without_pc ATTRIBUTE_UNUSED, std::string fmt, std::ostringstream& oss) {
+ bool without_pc ATTRIBUTE_UNUSED, std::string fmt, std::string filter,
+ std::ostringstream& oss) {
for (const arm::ShifterOperand& shift : GetShiftOperands()) {
std::string after_shift = fmt;
+ std::string after_shift_filter = filter;
std::string shift_string = GetShiftString(shift);
size_t shift_index;
@@ -323,30 +349,48 @@
after_shift.replace(shift_index, ConstexprStrLen(SHIFT_TOKEN), shift_string);
}
+ while ((shift_index = after_shift_filter.find(SHIFT_TOKEN)) != std::string::npos) {
+ after_shift_filter.replace(shift_index, ConstexprStrLen(SHIFT_TOKEN), shift_string);
+ }
+ if (EvalFilterString(after_shift_filter)) {
+ continue;
+ }
+
ExecuteAndPrint([&] () { f(shift); }, after_shift, oss);
}
}
void TemplateHelper(std::function<void(arm::Condition)> f, int depth ATTRIBUTE_UNUSED,
- bool without_pc ATTRIBUTE_UNUSED, std::string fmt, std::ostringstream& oss) {
+ bool without_pc ATTRIBUTE_UNUSED, std::string fmt, std::string filter,
+ std::ostringstream& oss) {
for (arm::Condition c : GetConditions()) {
std::string after_cond = fmt;
+ std::string after_cond_filter = filter;
size_t cond_index = after_cond.find(COND_TOKEN);
if (cond_index != std::string::npos) {
after_cond.replace(cond_index, ConstexprStrLen(IMM1_TOKEN), GetConditionString(c));
}
+ cond_index = after_cond_filter.find(COND_TOKEN);
+ if (cond_index != std::string::npos) {
+ after_cond_filter.replace(cond_index, ConstexprStrLen(IMM1_TOKEN), GetConditionString(c));
+ }
+ if (EvalFilterString(after_cond_filter)) {
+ continue;
+ }
+
ExecuteAndPrint([&] () { f(c); }, after_cond, oss);
}
}
template <typename... Args>
void TemplateHelper(std::function<void(arm::Register, Args...)> f, int depth, bool without_pc,
- std::string fmt, std::ostringstream& oss) {
+ std::string fmt, std::string filter, std::ostringstream& oss) {
std::vector<arm::Register*> registers = without_pc ? GetRegistersWithoutPC() : GetRegisters();
for (auto reg : registers) {
std::string after_reg = fmt;
+ std::string after_reg_filter = filter;
std::string reg_string = GetRegName<RegisterView::kUsePrimaryName>(*reg);
size_t reg_index;
@@ -356,17 +400,26 @@
after_reg.replace(reg_index, strlen(reg_token), reg_string);
}
+ while ((reg_index = after_reg_filter.find(reg_token)) != std::string::npos) {
+ after_reg_filter.replace(reg_index, strlen(reg_token), reg_string);
+ }
+ if (EvalFilterString(after_reg_filter)) {
+ continue;
+ }
+
auto lambda = [&] (Args... args) { f(*reg, args...); }; // NOLINT [readability/braces] [4]
TemplateHelper(std::function<void(Args...)>(lambda), depth + 1, without_pc,
- after_reg, oss);
+ after_reg, after_reg_filter, oss);
}
}
template <typename... Args>
void TemplateHelper(std::function<void(const arm::ShifterOperand&, Args...)> f, int depth,
- bool without_pc, std::string fmt, std::ostringstream& oss) {
+ bool without_pc, std::string fmt, std::string filter,
+ std::ostringstream& oss) {
for (const arm::ShifterOperand& shift : GetShiftOperands()) {
std::string after_shift = fmt;
+ std::string after_shift_filter = filter;
std::string shift_string = GetShiftString(shift);
size_t shift_index;
@@ -374,26 +427,42 @@
after_shift.replace(shift_index, ConstexprStrLen(SHIFT_TOKEN), shift_string);
}
+ while ((shift_index = after_shift_filter.find(SHIFT_TOKEN)) != std::string::npos) {
+ after_shift_filter.replace(shift_index, ConstexprStrLen(SHIFT_TOKEN), shift_string);
+ }
+ if (EvalFilterString(after_shift_filter)) {
+ continue;
+ }
+
auto lambda = [&] (Args... args) { f(shift, args...); }; // NOLINT [readability/braces] [4]
TemplateHelper(std::function<void(Args...)>(lambda), depth, without_pc,
- after_shift, oss);
+ after_shift, after_shift_filter, oss);
}
}
template <typename... Args>
void TemplateHelper(std::function<void(arm::Condition, Args...)> f, int depth, bool without_pc,
- std::string fmt, std::ostringstream& oss) {
+ std::string fmt, std::string filter, std::ostringstream& oss) {
for (arm::Condition c : GetConditions()) {
std::string after_cond = fmt;
+ std::string after_cond_filter = filter;
size_t cond_index = after_cond.find(COND_TOKEN);
if (cond_index != std::string::npos) {
after_cond.replace(cond_index, ConstexprStrLen(IMM1_TOKEN), GetConditionString(c));
}
+ cond_index = after_cond_filter.find(COND_TOKEN);
+ if (cond_index != std::string::npos) {
+ after_cond_filter.replace(cond_index, ConstexprStrLen(IMM1_TOKEN), GetConditionString(c));
+ }
+ if (EvalFilterString(after_cond_filter)) {
+ continue;
+ }
+
auto lambda = [&] (Args... args) { f(c, args...); }; // NOLINT [readability/braces] [4]
TemplateHelper(std::function<void(Args...)>(lambda), depth, without_pc,
- after_cond, oss);
+ after_cond, after_cond_filter, oss);
}
}
@@ -421,13 +490,13 @@
template <typename... Args>
void GenericTemplateHelper(std::function<void(Args...)> f, bool without_pc,
- std::string fmt, std::string test_name) {
+ std::string fmt, std::string test_name, std::string filter) {
first_ = false;
WarnOnCombinations(CountHelper<Args...>(without_pc));
std::ostringstream oss;
- TemplateHelper(f, 0, without_pc, fmt, oss);
+ TemplateHelper(f, 0, without_pc, fmt, filter, oss);
oss << "\n"; // Trailing newline.
@@ -436,26 +505,26 @@
template <typename... Args>
void T2Helper(void (arm::Arm32Assembler::*f)(Args...), bool without_pc, std::string fmt,
- std::string test_name) {
- GenericTemplateHelper(GetBoundFunction2(f), without_pc, fmt, test_name);
+ std::string test_name, std::string filter = "") {
+ GenericTemplateHelper(GetBoundFunction2(f), without_pc, fmt, test_name, filter);
}
template <typename... Args>
void T3Helper(void (arm::Arm32Assembler::*f)(Args...), bool without_pc, std::string fmt,
- std::string test_name) {
- GenericTemplateHelper(GetBoundFunction3(f), without_pc, fmt, test_name);
+ std::string test_name, std::string filter = "") {
+ GenericTemplateHelper(GetBoundFunction3(f), without_pc, fmt, test_name, filter);
}
template <typename... Args>
void T4Helper(void (arm::Arm32Assembler::*f)(Args...), bool without_pc, std::string fmt,
- std::string test_name) {
- GenericTemplateHelper(GetBoundFunction4(f), without_pc, fmt, test_name);
+ std::string test_name, std::string filter = "") {
+ GenericTemplateHelper(GetBoundFunction4(f), without_pc, fmt, test_name, filter);
}
template <typename... Args>
void T5Helper(void (arm::Arm32Assembler::*f)(Args...), bool without_pc, std::string fmt,
- std::string test_name) {
- GenericTemplateHelper(GetBoundFunction5(f), without_pc, fmt, test_name);
+ std::string test_name, std::string filter = "") {
+ GenericTemplateHelper(GetBoundFunction5(f), without_pc, fmt, test_name, filter);
}
private:
@@ -565,15 +634,18 @@
}
TEST_F(AssemblerArm32Test, Mla) {
- T5Helper(&arm::Arm32Assembler::mla, true, "mla{cond} {reg1}, {reg2}, {reg3}, {reg4}", "mul");
+ T5Helper(&arm::Arm32Assembler::mla, true, "mla{cond} {reg1}, {reg2}, {reg3}, {reg4}", "mla");
}
-/* TODO: Needs support to filter out register combinations, as rdhi must not be equal to rdlo.
TEST_F(AssemblerArm32Test, Umull) {
T5Helper(&arm::Arm32Assembler::umull, true, "umull{cond} {reg1}, {reg2}, {reg3}, {reg4}",
- "umull");
+ "umull", "{reg1}={reg2}"); // Skip the cases where reg1 == reg2.
}
-*/
+
+TEST_F(AssemblerArm32Test, Smull) {
+ T5Helper(&arm::Arm32Assembler::smull, true, "smull{cond} {reg1}, {reg2}, {reg3}, {reg4}",
+ "smull", "{reg1}={reg2}"); // Skip the cases where reg1 == reg2.
+}
TEST_F(AssemblerArm32Test, Sdiv) {
T4Helper(&arm::Arm32Assembler::sdiv, true, "sdiv{cond} {reg1}, {reg2}, {reg3}", "sdiv");
@@ -655,9 +727,10 @@
T4Helper(&arm::Arm32Assembler::rsc, true, "rsc{cond} {reg1}, {reg2}, {shift}", "rsc");
}
-/* TODO: Needs support to filter out register combinations, as reg1 must not be equal to reg3.
+/* TODO: Need better filter support.
TEST_F(AssemblerArm32Test, Strex) {
- RRRCWithoutPCHelper(&arm::Arm32Assembler::strex, "strex{cond} {reg1}, {reg2}, [{reg3}]", "strex");
+ T4Helper(&arm::Arm32Assembler::strex, "strex{cond} {reg1}, {reg2}, [{reg3}]", "strex",
+ "{reg1}={reg2}||{reg1}={reg3}"); // Skip the cases where reg1 == reg2 || reg1 == reg3.
}
*/
diff --git a/compiler/utils/arm/assembler_thumb2.cc b/compiler/utils/arm/assembler_thumb2.cc
index 3b42f63..75f2b77 100644
--- a/compiler/utils/arm/assembler_thumb2.cc
+++ b/compiler/utils/arm/assembler_thumb2.cc
@@ -238,6 +238,24 @@
}
+void Thumb2Assembler::smull(Register rd_lo, Register rd_hi, Register rn,
+ Register rm, Condition cond) {
+ CheckCondition(cond);
+
+ uint32_t op1 = 0U /* 0b000; */;
+ uint32_t op2 = 0U /* 0b0000 */;
+ int32_t encoding = B31 | B30 | B29 | B28 | B27 | B25 | B24 | B23 |
+ op1 << 20 |
+ op2 << 4 |
+ static_cast<uint32_t>(rd_lo) << 12 |
+ static_cast<uint32_t>(rd_hi) << 8 |
+ static_cast<uint32_t>(rn) << 16 |
+ static_cast<uint32_t>(rm);
+
+ Emit32(encoding);
+}
+
+
void Thumb2Assembler::umull(Register rd_lo, Register rd_hi, Register rn,
Register rm, Condition cond) {
CheckCondition(cond);
@@ -740,13 +758,6 @@
return true;
}
- // Check for MOV with an ROR.
- if (opcode == MOV && so.IsRegister() && so.IsShift() && so.GetShift() == ROR) {
- if (so.GetImmediate() != 0) {
- return true;
- }
- }
-
bool rn_is_valid = true;
// Check for single operand instructions and ADD/SUB.
@@ -792,6 +803,19 @@
}
}
+ // Check for register shift operand.
+ if (so.IsRegister() && so.IsShift()) {
+ if (opcode != MOV) {
+ return true;
+ }
+ // Check for MOV with an ROR.
+ if (so.GetShift() == ROR) {
+ if (so.GetImmediate() != 0) {
+ return true;
+ }
+ }
+ }
+
// The instruction can be encoded in 16 bits.
return false;
}
@@ -1614,7 +1638,6 @@
// branch the size may change if it so happens that other branches change size that change
// the distance to the target and that distance puts this branch over the limit for 16 bits.
if (size == Branch::k16Bit) {
- DCHECK(!force_32bit_branches_);
Emit16(0); // Space for a 16 bit branch.
} else {
Emit32(0); // Space for a 32 bit branch.
@@ -1622,7 +1645,7 @@
} else {
// Branch is to an unbound label. Emit space for it.
uint16_t branch_id = AddBranch(branch_type, pc, cond); // Unresolved branch.
- if (force_32bit_branches_ || force_32bit_) {
+ if (!CanRelocateBranches() || force_32bit_) {
Emit16(static_cast<uint16_t>(label->position_)); // Emit current label link.
Emit16(0); // another 16 bits.
} else {
@@ -2258,7 +2281,7 @@
uint32_t branch_location = branch->GetLocation();
uint16_t next = buffer_.Load<uint16_t>(branch_location); // Get next in chain.
if (changed) {
- DCHECK(!force_32bit_branches_);
+ DCHECK(CanRelocateBranches());
MakeHoleForBranch(branch->GetLocation(), 2);
if (branch->IsCompareAndBranch()) {
// A cbz/cbnz instruction has changed size. There is no valid encoding for
@@ -2718,21 +2741,21 @@
void Thumb2Assembler::CompareAndBranchIfZero(Register r, Label* label) {
- if (force_32bit_branches_) {
+ if (CanRelocateBranches()) {
+ cbz(r, label);
+ } else {
cmp(r, ShifterOperand(0));
b(label, EQ);
- } else {
- cbz(r, label);
}
}
void Thumb2Assembler::CompareAndBranchIfNonZero(Register r, Label* label) {
- if (force_32bit_branches_) {
+ if (CanRelocateBranches()) {
+ cbnz(r, label);
+ } else {
cmp(r, ShifterOperand(0));
b(label, NE);
- } else {
- cbnz(r, label);
}
}
} // namespace arm
diff --git a/compiler/utils/arm/assembler_thumb2.h b/compiler/utils/arm/assembler_thumb2.h
index e33c240..90d489f 100644
--- a/compiler/utils/arm/assembler_thumb2.h
+++ b/compiler/utils/arm/assembler_thumb2.h
@@ -31,8 +31,8 @@
class Thumb2Assembler FINAL : public ArmAssembler {
public:
- explicit Thumb2Assembler(bool force_32bit_branches = false)
- : force_32bit_branches_(force_32bit_branches),
+ explicit Thumb2Assembler(bool can_relocate_branches = true)
+ : can_relocate_branches_(can_relocate_branches),
force_32bit_(false),
it_cond_index_(kNoItCondition),
next_condition_(AL) {
@@ -52,8 +52,8 @@
return force_32bit_;
}
- bool IsForced32BitBranches() const {
- return force_32bit_branches_;
+ bool CanRelocateBranches() const {
+ return can_relocate_branches_;
}
void FinalizeInstructions(const MemoryRegion& region) OVERRIDE {
@@ -112,6 +112,8 @@
Condition cond = AL) OVERRIDE;
void mls(Register rd, Register rn, Register rm, Register ra,
Condition cond = AL) OVERRIDE;
+ void smull(Register rd_lo, Register rd_hi, Register rn, Register rm,
+ Condition cond = AL) OVERRIDE;
void umull(Register rd_lo, Register rd_hi, Register rn, Register rm,
Condition cond = AL) OVERRIDE;
@@ -437,8 +439,12 @@
void EmitShift(Register rd, Register rm, Shift shift, uint8_t amount, bool setcc = false);
void EmitShift(Register rd, Register rn, Shift shift, Register rm, bool setcc = false);
- bool force_32bit_branches_; // Force the assembler to use 32 bit branch instructions.
- bool force_32bit_; // Force the assembler to use 32 bit thumb2 instructions.
+ // Whether the assembler can relocate branches. If false, unresolved branches will be
+ // emitted on 32bits.
+ bool can_relocate_branches_;
+
+ // Force the assembler to use 32 bit thumb2 instructions.
+ bool force_32bit_;
// IfThen conditions. Used to check that conditional instructions match the preceding IT.
Condition it_conditions_[4];
@@ -554,12 +560,21 @@
// size of the branch to change return true. Otherwise return false.
bool Resolve(uint32_t target) {
target_ = target;
- Size newsize = CalculateSize();
- if (size_ != newsize) {
- size_ = newsize;
- return true;
+ if (assembler_->CanRelocateBranches()) {
+ Size new_size = CalculateSize();
+ if (size_ != new_size) {
+ size_ = new_size;
+ return true;
+ }
+ return false;
+ } else {
+ if (kIsDebugBuild) {
+ Size new_size = CalculateSize();
+ // Check that the size has not increased.
+ DCHECK(!(new_size == k32Bit && size_ == k16Bit));
+ }
+ return false;
}
- return false;
}
// Move a cbz/cbnz branch. This is always forward.
@@ -575,6 +590,7 @@
// size of the branch instruction. It returns true if the branch
// has changed size.
bool Relocate(uint32_t oldlocation, int32_t delta) {
+ DCHECK(assembler_->CanRelocateBranches());
if (location_ > oldlocation) {
location_ += delta;
}
@@ -587,9 +603,9 @@
}
// Calculate the new size.
- Size newsize = CalculateSize();
- if (size_ != newsize) {
- size_ = newsize;
+ Size new_size = CalculateSize();
+ if (size_ != new_size) {
+ size_ = new_size;
return true;
}
return false;
@@ -631,15 +647,13 @@
private:
// Calculate the size of the branch instruction based on its type and offset.
Size CalculateSize() const {
- if (assembler_->IsForced32BitBranches()) {
- return k32Bit;
- }
if (target_ == kUnresolved) {
if (assembler_->IsForced32Bit() && (type_ == kUnconditional || type_ == kConditional)) {
return k32Bit;
}
- return k16Bit;
+ return assembler_->CanRelocateBranches() ? k16Bit : k32Bit;
}
+ // When the target is resolved, we know the best encoding for it.
int32_t delta = target_ - location_ - 4;
if (delta < 0) {
delta = -delta;
diff --git a/compiler/utils/arm/assembler_thumb2_test.cc b/compiler/utils/arm/assembler_thumb2_test.cc
index 5f5561a..733441b 100644
--- a/compiler/utils/arm/assembler_thumb2_test.cc
+++ b/compiler/utils/arm/assembler_thumb2_test.cc
@@ -89,23 +89,24 @@
EXPECT_TRUE(CheckTools());
}
+#define __ GetAssembler()->
TEST_F(AssemblerThumb2Test, Sbfx) {
- GetAssembler()->sbfx(arm::R0, arm::R1, 0, 1);
- GetAssembler()->sbfx(arm::R0, arm::R1, 0, 8);
- GetAssembler()->sbfx(arm::R0, arm::R1, 0, 16);
- GetAssembler()->sbfx(arm::R0, arm::R1, 0, 32);
+ __ sbfx(arm::R0, arm::R1, 0, 1);
+ __ sbfx(arm::R0, arm::R1, 0, 8);
+ __ sbfx(arm::R0, arm::R1, 0, 16);
+ __ sbfx(arm::R0, arm::R1, 0, 32);
- GetAssembler()->sbfx(arm::R0, arm::R1, 8, 1);
- GetAssembler()->sbfx(arm::R0, arm::R1, 8, 8);
- GetAssembler()->sbfx(arm::R0, arm::R1, 8, 16);
- GetAssembler()->sbfx(arm::R0, arm::R1, 8, 24);
+ __ sbfx(arm::R0, arm::R1, 8, 1);
+ __ sbfx(arm::R0, arm::R1, 8, 8);
+ __ sbfx(arm::R0, arm::R1, 8, 16);
+ __ sbfx(arm::R0, arm::R1, 8, 24);
- GetAssembler()->sbfx(arm::R0, arm::R1, 16, 1);
- GetAssembler()->sbfx(arm::R0, arm::R1, 16, 8);
- GetAssembler()->sbfx(arm::R0, arm::R1, 16, 16);
+ __ sbfx(arm::R0, arm::R1, 16, 1);
+ __ sbfx(arm::R0, arm::R1, 16, 8);
+ __ sbfx(arm::R0, arm::R1, 16, 16);
- GetAssembler()->sbfx(arm::R0, arm::R1, 31, 1);
+ __ sbfx(arm::R0, arm::R1, 31, 1);
const char* expected =
"sbfx r0, r1, #0, #1\n"
@@ -127,21 +128,21 @@
}
TEST_F(AssemblerThumb2Test, Ubfx) {
- GetAssembler()->ubfx(arm::R0, arm::R1, 0, 1);
- GetAssembler()->ubfx(arm::R0, arm::R1, 0, 8);
- GetAssembler()->ubfx(arm::R0, arm::R1, 0, 16);
- GetAssembler()->ubfx(arm::R0, arm::R1, 0, 32);
+ __ ubfx(arm::R0, arm::R1, 0, 1);
+ __ ubfx(arm::R0, arm::R1, 0, 8);
+ __ ubfx(arm::R0, arm::R1, 0, 16);
+ __ ubfx(arm::R0, arm::R1, 0, 32);
- GetAssembler()->ubfx(arm::R0, arm::R1, 8, 1);
- GetAssembler()->ubfx(arm::R0, arm::R1, 8, 8);
- GetAssembler()->ubfx(arm::R0, arm::R1, 8, 16);
- GetAssembler()->ubfx(arm::R0, arm::R1, 8, 24);
+ __ ubfx(arm::R0, arm::R1, 8, 1);
+ __ ubfx(arm::R0, arm::R1, 8, 8);
+ __ ubfx(arm::R0, arm::R1, 8, 16);
+ __ ubfx(arm::R0, arm::R1, 8, 24);
- GetAssembler()->ubfx(arm::R0, arm::R1, 16, 1);
- GetAssembler()->ubfx(arm::R0, arm::R1, 16, 8);
- GetAssembler()->ubfx(arm::R0, arm::R1, 16, 16);
+ __ ubfx(arm::R0, arm::R1, 16, 1);
+ __ ubfx(arm::R0, arm::R1, 16, 8);
+ __ ubfx(arm::R0, arm::R1, 16, 16);
- GetAssembler()->ubfx(arm::R0, arm::R1, 31, 1);
+ __ ubfx(arm::R0, arm::R1, 31, 1);
const char* expected =
"ubfx r0, r1, #0, #1\n"
@@ -163,7 +164,7 @@
}
TEST_F(AssemblerThumb2Test, Vmstat) {
- GetAssembler()->vmstat();
+ __ vmstat();
const char* expected = "vmrs APSR_nzcv, FPSCR\n";
@@ -171,10 +172,10 @@
}
TEST_F(AssemblerThumb2Test, ldrexd) {
- GetAssembler()->ldrexd(arm::R0, arm::R1, arm::R0);
- GetAssembler()->ldrexd(arm::R0, arm::R1, arm::R1);
- GetAssembler()->ldrexd(arm::R0, arm::R1, arm::R2);
- GetAssembler()->ldrexd(arm::R5, arm::R3, arm::R7);
+ __ ldrexd(arm::R0, arm::R1, arm::R0);
+ __ ldrexd(arm::R0, arm::R1, arm::R1);
+ __ ldrexd(arm::R0, arm::R1, arm::R2);
+ __ ldrexd(arm::R5, arm::R3, arm::R7);
const char* expected =
"ldrexd r0, r1, [r0]\n"
@@ -185,10 +186,10 @@
}
TEST_F(AssemblerThumb2Test, strexd) {
- GetAssembler()->strexd(arm::R9, arm::R0, arm::R1, arm::R0);
- GetAssembler()->strexd(arm::R9, arm::R0, arm::R1, arm::R1);
- GetAssembler()->strexd(arm::R9, arm::R0, arm::R1, arm::R2);
- GetAssembler()->strexd(arm::R9, arm::R5, arm::R3, arm::R7);
+ __ strexd(arm::R9, arm::R0, arm::R1, arm::R0);
+ __ strexd(arm::R9, arm::R0, arm::R1, arm::R1);
+ __ strexd(arm::R9, arm::R0, arm::R1, arm::R2);
+ __ strexd(arm::R9, arm::R5, arm::R3, arm::R7);
const char* expected =
"strexd r9, r0, r1, [r0]\n"
@@ -199,9 +200,9 @@
}
TEST_F(AssemblerThumb2Test, LdrdStrd) {
- GetAssembler()->ldrd(arm::R0, arm::Address(arm::R2, 8));
- GetAssembler()->ldrd(arm::R0, arm::Address(arm::R12));
- GetAssembler()->strd(arm::R0, arm::Address(arm::R2, 8));
+ __ ldrd(arm::R0, arm::Address(arm::R2, 8));
+ __ ldrd(arm::R0, arm::Address(arm::R12));
+ __ strd(arm::R0, arm::Address(arm::R2, 8));
const char* expected =
"ldrd r0, r1, [r2, #8]\n"
@@ -211,7 +212,6 @@
}
TEST_F(AssemblerThumb2Test, eor) {
-#define __ GetAssembler()->
__ eor(arm::R1, arm::R1, arm::ShifterOperand(arm::R0));
__ eor(arm::R1, arm::R0, arm::ShifterOperand(arm::R1));
__ eor(arm::R1, arm::R8, arm::ShifterOperand(arm::R0));
@@ -230,23 +230,47 @@
TEST_F(AssemblerThumb2Test, sub) {
__ subs(arm::R1, arm::R0, arm::ShifterOperand(42));
__ sub(arm::R1, arm::R0, arm::ShifterOperand(42));
+ __ subs(arm::R1, arm::R0, arm::ShifterOperand(arm::R2, arm::ASR, 31));
+ __ sub(arm::R1, arm::R0, arm::ShifterOperand(arm::R2, arm::ASR, 31));
const char* expected =
"subs r1, r0, #42\n"
- "subw r1, r0, #42\n";
+ "subw r1, r0, #42\n"
+ "subs r1, r0, r2, asr #31\n"
+ "sub r1, r0, r2, asr #31\n";
DriverStr(expected, "sub");
}
TEST_F(AssemblerThumb2Test, add) {
__ adds(arm::R1, arm::R0, arm::ShifterOperand(42));
__ add(arm::R1, arm::R0, arm::ShifterOperand(42));
+ __ adds(arm::R1, arm::R0, arm::ShifterOperand(arm::R2, arm::ASR, 31));
+ __ add(arm::R1, arm::R0, arm::ShifterOperand(arm::R2, arm::ASR, 31));
const char* expected =
"adds r1, r0, #42\n"
- "addw r1, r0, #42\n";
+ "addw r1, r0, #42\n"
+ "adds r1, r0, r2, asr #31\n"
+ "add r1, r0, r2, asr #31\n";
DriverStr(expected, "add");
}
+TEST_F(AssemblerThumb2Test, umull) {
+ __ umull(arm::R0, arm::R1, arm::R2, arm::R3);
+
+ const char* expected =
+ "umull r0, r1, r2, r3\n";
+ DriverStr(expected, "umull");
+}
+
+TEST_F(AssemblerThumb2Test, smull) {
+ __ smull(arm::R0, arm::R1, arm::R2, arm::R3);
+
+ const char* expected =
+ "smull r0, r1, r2, r3\n";
+ DriverStr(expected, "smull");
+}
+
TEST_F(AssemblerThumb2Test, StoreWordToThumbOffset) {
arm::StoreOperandType type = arm::kStoreWord;
int32_t offset = 4092;
diff --git a/compiler/utils/arm64/assembler_arm64.h b/compiler/utils/arm64/assembler_arm64.h
index b7715af..e47b531 100644
--- a/compiler/utils/arm64/assembler_arm64.h
+++ b/compiler/utils/arm64/assembler_arm64.h
@@ -149,14 +149,14 @@
void GetCurrentThread(ManagedRegister tr) OVERRIDE;
void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
- // Set up out_reg to hold a Object** into the handle scope, or to be NULL if the
+ // Set up out_reg to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed. in_reg holds a possibly stale reference
// that can be used to avoid loading the handle scope entry to see if the value is
- // NULL.
+ // null.
void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
ManagedRegister in_reg, bool null_allowed) OVERRIDE;
- // Set up out_off to hold a Object** into the handle scope, or to be NULL if the
+ // Set up out_off to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed.
void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset,
ManagedRegister scratch, bool null_allowed) OVERRIDE;
diff --git a/compiler/utils/assembler.cc b/compiler/utils/assembler.cc
index 36342c6..b016e74 100644
--- a/compiler/utils/assembler.cc
+++ b/compiler/utils/assembler.cc
@@ -41,8 +41,8 @@
contents_ = NewContents(kInitialBufferCapacity);
cursor_ = contents_;
limit_ = ComputeLimit(contents_, kInitialBufferCapacity);
- fixup_ = NULL;
- slow_path_ = NULL;
+ fixup_ = nullptr;
+ slow_path_ = nullptr;
#ifndef NDEBUG
has_ensured_capacity_ = false;
fixups_processed_ = false;
@@ -61,7 +61,7 @@
void AssemblerBuffer::ProcessFixups(const MemoryRegion& region) {
AssemblerFixup* fixup = fixup_;
- while (fixup != NULL) {
+ while (fixup != nullptr) {
fixup->Process(region, fixup->position());
fixup = fixup->previous();
}
@@ -127,7 +127,7 @@
return new x86_64::X86_64Assembler();
default:
LOG(FATAL) << "Unknown InstructionSet: " << instruction_set;
- return NULL;
+ return nullptr;
}
}
diff --git a/compiler/utils/assembler.h b/compiler/utils/assembler.h
index ebafd3d..2e3a47b 100644
--- a/compiler/utils/assembler.h
+++ b/compiler/utils/assembler.h
@@ -156,7 +156,7 @@
// Parent of all queued slow paths, emitted during finalization
class SlowPath {
public:
- SlowPath() : next_(NULL) {}
+ SlowPath() : next_(nullptr) {}
virtual ~SlowPath() {}
Label* Continuation() { return &continuation_; }
@@ -216,20 +216,20 @@
}
void EnqueueSlowPath(SlowPath* slowpath) {
- if (slow_path_ == NULL) {
+ if (slow_path_ == nullptr) {
slow_path_ = slowpath;
} else {
SlowPath* cur = slow_path_;
- for ( ; cur->next_ != NULL ; cur = cur->next_) {}
+ for ( ; cur->next_ != nullptr ; cur = cur->next_) {}
cur->next_ = slowpath;
}
}
void EmitSlowPaths(Assembler* sp_asm) {
SlowPath* cur = slow_path_;
- SlowPath* next = NULL;
- slow_path_ = NULL;
- for ( ; cur != NULL ; cur = next) {
+ SlowPath* next = nullptr;
+ slow_path_ = nullptr;
+ for ( ; cur != nullptr ; cur = next) {
cur->Emit(sp_asm);
next = cur->next_;
delete cur;
@@ -489,14 +489,14 @@
virtual void GetCurrentThread(FrameOffset dest_offset,
ManagedRegister scratch) = 0;
- // Set up out_reg to hold a Object** into the handle scope, or to be NULL if the
+ // Set up out_reg to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed. in_reg holds a possibly stale reference
// that can be used to avoid loading the handle scope entry to see if the value is
- // NULL.
+ // null.
virtual void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
ManagedRegister in_reg, bool null_allowed) = 0;
- // Set up out_off to hold a Object** into the handle scope, or to be NULL if the
+ // Set up out_off to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed.
virtual void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset,
ManagedRegister scratch, bool null_allowed) = 0;
diff --git a/compiler/utils/assembler_test.h b/compiler/utils/assembler_test.h
index 3fe1a31..a339633 100644
--- a/compiler/utils/assembler_test.h
+++ b/compiler/utils/assembler_test.h
@@ -19,6 +19,7 @@
#include "assembler.h"
+#include "assembler_test_base.h"
#include "common_runtime_test.h" // For ScratchFile
#include <cstdio>
@@ -29,19 +30,11 @@
namespace art {
-// If you want to take a look at the differences between the ART assembler and GCC, set this flag
-// to true. The disassembled files will then remain in the tmp directory.
-static constexpr bool kKeepDisassembledFiles = false;
-
// Helper for a constexpr string length.
constexpr size_t ConstexprStrLen(char const* str, size_t count = 0) {
return ('\0' == str[0]) ? count : ConstexprStrLen(str+1, count+1);
}
-// Use a glocal static variable to keep the same name for all test data. Else we'll just spam the
-// temp directory.
-static std::string tmpnam_;
-
enum class RegisterView { // private
kUsePrimaryName,
kUseSecondaryName,
@@ -59,12 +52,12 @@
typedef std::string (*TestFn)(AssemblerTest* assembler_test, Ass* assembler);
void DriverFn(TestFn f, std::string test_name) {
- Driver(f(this, assembler_.get()), test_name);
+ DriverWrapper(f(this, assembler_.get()), test_name);
}
// This driver assumes the assembler has already been called.
void DriverStr(std::string assembly_string, std::string test_name) {
- Driver(assembly_string, test_name);
+ DriverWrapper(assembly_string, test_name);
}
std::string RepeatR(void (Ass::*f)(Reg), std::string fmt) {
@@ -212,28 +205,7 @@
// This is intended to be run as a test.
bool CheckTools() {
- if (!FileExists(FindTool(GetAssemblerCmdName()))) {
- return false;
- }
- LOG(INFO) << "Chosen assembler command: " << GetAssemblerCommand();
-
- if (!FileExists(FindTool(GetObjdumpCmdName()))) {
- return false;
- }
- LOG(INFO) << "Chosen objdump command: " << GetObjdumpCommand();
-
- // Disassembly is optional.
- std::string disassembler = GetDisassembleCommand();
- if (disassembler.length() != 0) {
- if (!FileExists(FindTool(GetDisassembleCmdName()))) {
- return false;
- }
- LOG(INFO) << "Chosen disassemble command: " << GetDisassembleCommand();
- } else {
- LOG(INFO) << "No disassembler given.";
- }
-
- return true;
+ return test_helper_->CheckTools();
}
// The following functions are public so that TestFn can use them...
@@ -272,17 +244,21 @@
void SetUp() OVERRIDE {
assembler_.reset(new Ass());
-
- // Fake a runtime test for ScratchFile
- CommonRuntimeTest::SetUpAndroidData(android_data_);
+ test_helper_.reset(
+ new AssemblerTestInfrastructure(GetArchitectureString(),
+ GetAssemblerCmdName(),
+ GetAssemblerParameters(),
+ GetObjdumpCmdName(),
+ GetObjdumpParameters(),
+ GetDisassembleCmdName(),
+ GetDisassembleParameters(),
+ GetAssemblyHeader()));
SetUpHelpers();
}
void TearDown() OVERRIDE {
- // We leave temporaries in case this failed so we can debug issues.
- CommonRuntimeTest::TearDownAndroidData(android_data_, false);
- tmpnam_ = "";
+ test_helper_.reset(); // Clean up the helper.
}
// Override this to set up any architecture-specific things, e.g., register vectors.
@@ -301,23 +277,6 @@
return "";
}
- // Return the host assembler command for this test.
- virtual std::string GetAssemblerCommand() {
- // Already resolved it once?
- if (resolved_assembler_cmd_.length() != 0) {
- return resolved_assembler_cmd_;
- }
-
- std::string line = FindTool(GetAssemblerCmdName());
- if (line.length() == 0) {
- return line;
- }
-
- resolved_assembler_cmd_ = line + GetAssemblerParameters();
-
- return resolved_assembler_cmd_;
- }
-
// Get the name of the objdump, e.g., "objdump" by default.
virtual std::string GetObjdumpCmdName() {
return "objdump";
@@ -328,23 +287,6 @@
return " -h";
}
- // Return the host objdump command for this test.
- virtual std::string GetObjdumpCommand() {
- // Already resolved it once?
- if (resolved_objdump_cmd_.length() != 0) {
- return resolved_objdump_cmd_;
- }
-
- std::string line = FindTool(GetObjdumpCmdName());
- if (line.length() == 0) {
- return line;
- }
-
- resolved_objdump_cmd_ = line + GetObjdumpParameters();
-
- return resolved_objdump_cmd_;
- }
-
// Get the name of the objdump, e.g., "objdump" by default.
virtual std::string GetDisassembleCmdName() {
return "objdump";
@@ -354,23 +296,6 @@
// such to objdump, so it's architecture-specific and there is no default.
virtual std::string GetDisassembleParameters() = 0;
- // Return the host disassembler command for this test.
- virtual std::string GetDisassembleCommand() {
- // Already resolved it once?
- if (resolved_disassemble_cmd_.length() != 0) {
- return resolved_disassemble_cmd_;
- }
-
- std::string line = FindTool(GetDisassembleCmdName());
- if (line.length() == 0) {
- return line;
- }
-
- resolved_disassemble_cmd_ = line + GetDisassembleParameters();
-
- return resolved_disassemble_cmd_;
- }
-
// Create a couple of immediate values up to the number of bytes given.
virtual std::vector<int64_t> CreateImmediateValues(size_t imm_bytes, bool as_uint = false) {
std::vector<int64_t> res;
@@ -618,395 +543,18 @@
return str;
}
- // Driver() assembles and compares the results. If the results are not equal and we have a
- // disassembler, disassemble both and check whether they have the same mnemonics (in which case
- // we just warn).
- void Driver(std::string assembly_text, std::string test_name) {
- EXPECT_NE(assembly_text.length(), 0U) << "Empty assembly";
-
- NativeAssemblerResult res;
- Compile(assembly_text, &res, test_name);
-
- EXPECT_TRUE(res.ok) << res.error_msg;
- if (!res.ok) {
- // No way of continuing.
- return;
- }
-
+ void DriverWrapper(std::string assembly_text, std::string test_name) {
size_t cs = assembler_->CodeSize();
std::unique_ptr<std::vector<uint8_t>> data(new std::vector<uint8_t>(cs));
MemoryRegion code(&(*data)[0], data->size());
assembler_->FinalizeInstructions(code);
-
- if (*data == *res.code) {
- Clean(&res);
- } else {
- if (DisassembleBinaries(*data, *res.code, test_name)) {
- if (data->size() > res.code->size()) {
- // Fail this test with a fancy colored warning being printed.
- EXPECT_TRUE(false) << "Assembly code is not identical, but disassembly of machine code "
- "is equal: this implies sub-optimal encoding! Our code size=" << data->size() <<
- ", gcc size=" << res.code->size();
- } else {
- // Otherwise just print an info message and clean up.
- LOG(INFO) << "GCC chose a different encoding than ours, but the overall length is the "
- "same.";
- Clean(&res);
- }
- } else {
- // This will output the assembly.
- EXPECT_EQ(*res.code, *data) << "Outputs (and disassembly) not identical.";
- }
- }
- }
-
- // Structure to store intermediates and results.
- struct NativeAssemblerResult {
- bool ok;
- std::string error_msg;
- std::string base_name;
- std::unique_ptr<std::vector<uint8_t>> code;
- uintptr_t length;
- };
-
- // Compile the assembly file from_file to a binary file to_file. Returns true on success.
- bool Assemble(const char* from_file, const char* to_file, std::string* error_msg) {
- bool have_assembler = FileExists(FindTool(GetAssemblerCmdName()));
- EXPECT_TRUE(have_assembler) << "Cannot find assembler:" << GetAssemblerCommand();
- if (!have_assembler) {
- return false;
- }
-
- std::vector<std::string> args;
-
- // Encaspulate the whole command line in a single string passed to
- // the shell, so that GetAssemblerCommand() may contain arguments
- // in addition to the program name.
- args.push_back(GetAssemblerCommand());
- args.push_back("-o");
- args.push_back(to_file);
- args.push_back(from_file);
- std::string cmd = Join(args, ' ');
-
- args.clear();
- args.push_back("/bin/sh");
- args.push_back("-c");
- args.push_back(cmd);
-
- bool success = Exec(args, error_msg);
- if (!success) {
- LOG(INFO) << "Assembler command line:";
- for (std::string arg : args) {
- LOG(INFO) << arg;
- }
- }
- return success;
- }
-
- // Runs objdump -h on the binary file and extracts the first line with .text.
- // Returns "" on failure.
- std::string Objdump(std::string file) {
- bool have_objdump = FileExists(FindTool(GetObjdumpCmdName()));
- EXPECT_TRUE(have_objdump) << "Cannot find objdump: " << GetObjdumpCommand();
- if (!have_objdump) {
- return "";
- }
-
- std::string error_msg;
- std::vector<std::string> args;
-
- // Encaspulate the whole command line in a single string passed to
- // the shell, so that GetObjdumpCommand() may contain arguments
- // in addition to the program name.
- args.push_back(GetObjdumpCommand());
- args.push_back(file);
- args.push_back(">");
- args.push_back(file+".dump");
- std::string cmd = Join(args, ' ');
-
- args.clear();
- args.push_back("/bin/sh");
- args.push_back("-c");
- args.push_back(cmd);
-
- if (!Exec(args, &error_msg)) {
- EXPECT_TRUE(false) << error_msg;
- }
-
- std::ifstream dump(file+".dump");
-
- std::string line;
- bool found = false;
- while (std::getline(dump, line)) {
- if (line.find(".text") != line.npos) {
- found = true;
- break;
- }
- }
-
- dump.close();
-
- if (found) {
- return line;
- } else {
- return "";
- }
- }
-
- // Disassemble both binaries and compare the text.
- bool DisassembleBinaries(std::vector<uint8_t>& data, std::vector<uint8_t>& as,
- std::string test_name) {
- std::string disassembler = GetDisassembleCommand();
- if (disassembler.length() == 0) {
- LOG(WARNING) << "No dissassembler command.";
- return false;
- }
-
- std::string data_name = WriteToFile(data, test_name + ".ass");
- std::string error_msg;
- if (!DisassembleBinary(data_name, &error_msg)) {
- LOG(INFO) << "Error disassembling: " << error_msg;
- std::remove(data_name.c_str());
- return false;
- }
-
- std::string as_name = WriteToFile(as, test_name + ".gcc");
- if (!DisassembleBinary(as_name, &error_msg)) {
- LOG(INFO) << "Error disassembling: " << error_msg;
- std::remove(data_name.c_str());
- std::remove((data_name + ".dis").c_str());
- std::remove(as_name.c_str());
- return false;
- }
-
- bool result = CompareFiles(data_name + ".dis", as_name + ".dis");
-
- if (!kKeepDisassembledFiles) {
- std::remove(data_name.c_str());
- std::remove(as_name.c_str());
- std::remove((data_name + ".dis").c_str());
- std::remove((as_name + ".dis").c_str());
- }
-
- return result;
- }
-
- bool DisassembleBinary(std::string file, std::string* error_msg) {
- std::vector<std::string> args;
-
- // Encaspulate the whole command line in a single string passed to
- // the shell, so that GetDisassembleCommand() may contain arguments
- // in addition to the program name.
- args.push_back(GetDisassembleCommand());
- args.push_back(file);
- args.push_back("| sed -n \'/<.data>/,$p\' | sed -e \'s/.*://\'");
- args.push_back(">");
- args.push_back(file+".dis");
- std::string cmd = Join(args, ' ');
-
- args.clear();
- args.push_back("/bin/sh");
- args.push_back("-c");
- args.push_back(cmd);
-
- return Exec(args, error_msg);
- }
-
- std::string WriteToFile(std::vector<uint8_t>& buffer, std::string test_name) {
- std::string file_name = GetTmpnam() + std::string("---") + test_name;
- const char* data = reinterpret_cast<char*>(buffer.data());
- std::ofstream s_out(file_name + ".o");
- s_out.write(data, buffer.size());
- s_out.close();
- return file_name + ".o";
- }
-
- bool CompareFiles(std::string f1, std::string f2) {
- std::ifstream f1_in(f1);
- std::ifstream f2_in(f2);
-
- bool result = std::equal(std::istreambuf_iterator<char>(f1_in),
- std::istreambuf_iterator<char>(),
- std::istreambuf_iterator<char>(f2_in));
-
- f1_in.close();
- f2_in.close();
-
- return result;
- }
-
- // Compile the given assembly code and extract the binary, if possible. Put result into res.
- bool Compile(std::string assembly_code, NativeAssemblerResult* res, std::string test_name) {
- res->ok = false;
- res->code.reset(nullptr);
-
- res->base_name = GetTmpnam() + std::string("---") + test_name;
-
- // TODO: Lots of error checking.
-
- std::ofstream s_out(res->base_name + ".S");
- const char* header = GetAssemblyHeader();
- if (header != nullptr) {
- s_out << header;
- }
- s_out << assembly_code;
- s_out.close();
-
- if (!Assemble((res->base_name + ".S").c_str(), (res->base_name + ".o").c_str(),
- &res->error_msg)) {
- res->error_msg = "Could not compile.";
- return false;
- }
-
- std::string odump = Objdump(res->base_name + ".o");
- if (odump.length() == 0) {
- res->error_msg = "Objdump failed.";
- return false;
- }
-
- std::istringstream iss(odump);
- std::istream_iterator<std::string> start(iss);
- std::istream_iterator<std::string> end;
- std::vector<std::string> tokens(start, end);
-
- if (tokens.size() < OBJDUMP_SECTION_LINE_MIN_TOKENS) {
- res->error_msg = "Objdump output not recognized: too few tokens.";
- return false;
- }
-
- if (tokens[1] != ".text") {
- res->error_msg = "Objdump output not recognized: .text not second token.";
- return false;
- }
-
- std::string lengthToken = "0x" + tokens[2];
- std::istringstream(lengthToken) >> std::hex >> res->length;
-
- std::string offsetToken = "0x" + tokens[5];
- uintptr_t offset;
- std::istringstream(offsetToken) >> std::hex >> offset;
-
- std::ifstream obj(res->base_name + ".o");
- obj.seekg(offset);
- res->code.reset(new std::vector<uint8_t>(res->length));
- obj.read(reinterpret_cast<char*>(&(*res->code)[0]), res->length);
- obj.close();
-
- res->ok = true;
- return true;
- }
-
- // Remove temporary files.
- void Clean(const NativeAssemblerResult* res) {
- std::remove((res->base_name + ".S").c_str());
- std::remove((res->base_name + ".o").c_str());
- std::remove((res->base_name + ".o.dump").c_str());
- }
-
- // Check whether file exists. Is used for commands, so strips off any parameters: anything after
- // the first space. We skip to the last slash for this, so it should work with directories with
- // spaces.
- static bool FileExists(std::string file) {
- if (file.length() == 0) {
- return false;
- }
-
- // Need to strip any options.
- size_t last_slash = file.find_last_of('/');
- if (last_slash == std::string::npos) {
- // No slash, start looking at the start.
- last_slash = 0;
- }
- size_t space_index = file.find(' ', last_slash);
-
- if (space_index == std::string::npos) {
- std::ifstream infile(file.c_str());
- return infile.good();
- } else {
- std::string copy = file.substr(0, space_index - 1);
-
- struct stat buf;
- return stat(copy.c_str(), &buf) == 0;
- }
- }
-
- static std::string GetGCCRootPath() {
- return "prebuilts/gcc/linux-x86";
- }
-
- static std::string GetRootPath() {
- // 1) Check ANDROID_BUILD_TOP
- char* build_top = getenv("ANDROID_BUILD_TOP");
- if (build_top != nullptr) {
- return std::string(build_top) + "/";
- }
-
- // 2) Do cwd
- char temp[1024];
- return getcwd(temp, 1024) ? std::string(temp) + "/" : std::string("");
- }
-
- std::string FindTool(std::string tool_name) {
- // Find the current tool. Wild-card pattern is "arch-string*tool-name".
- std::string gcc_path = GetRootPath() + GetGCCRootPath();
- std::vector<std::string> args;
- args.push_back("find");
- args.push_back(gcc_path);
- args.push_back("-name");
- args.push_back(GetArchitectureString() + "*" + tool_name);
- args.push_back("|");
- args.push_back("sort");
- args.push_back("|");
- args.push_back("tail");
- args.push_back("-n");
- args.push_back("1");
- std::string tmp_file = GetTmpnam();
- args.push_back(">");
- args.push_back(tmp_file);
- std::string sh_args = Join(args, ' ');
-
- args.clear();
- args.push_back("/bin/sh");
- args.push_back("-c");
- args.push_back(sh_args);
-
- std::string error_msg;
- if (!Exec(args, &error_msg)) {
- EXPECT_TRUE(false) << error_msg;
- return "";
- }
-
- std::ifstream in(tmp_file.c_str());
- std::string line;
- if (!std::getline(in, line)) {
- in.close();
- std::remove(tmp_file.c_str());
- return "";
- }
- in.close();
- std::remove(tmp_file.c_str());
- return line;
- }
-
- // Use a consistent tmpnam, so store it.
- std::string GetTmpnam() {
- if (tmpnam_.length() == 0) {
- ScratchFile tmp;
- tmpnam_ = tmp.GetFilename() + "asm";
- }
- return tmpnam_;
+ test_helper_->Driver(*data, assembly_text, test_name);
}
static constexpr size_t kWarnManyCombinationsThreshold = 500;
- static constexpr size_t OBJDUMP_SECTION_LINE_MIN_TOKENS = 6;
std::unique_ptr<Ass> assembler_;
-
- std::string resolved_assembler_cmd_;
- std::string resolved_objdump_cmd_;
- std::string resolved_disassemble_cmd_;
-
- std::string android_data_;
+ std::unique_ptr<AssemblerTestInfrastructure> test_helper_;
DISALLOW_COPY_AND_ASSIGN(AssemblerTest);
};
diff --git a/compiler/utils/assembler_test_base.h b/compiler/utils/assembler_test_base.h
new file mode 100644
index 0000000..3341151
--- /dev/null
+++ b/compiler/utils/assembler_test_base.h
@@ -0,0 +1,544 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_UTILS_ASSEMBLER_TEST_BASE_H_
+#define ART_COMPILER_UTILS_ASSEMBLER_TEST_BASE_H_
+
+#include "common_runtime_test.h" // For ScratchFile
+
+#include <cstdio>
+#include <cstdlib>
+#include <fstream>
+#include <iterator>
+#include <sys/stat.h>
+
+namespace art {
+
+// If you want to take a look at the differences between the ART assembler and GCC, set this flag
+// to true. The disassembled files will then remain in the tmp directory.
+static constexpr bool kKeepDisassembledFiles = false;
+
+// Use a glocal static variable to keep the same name for all test data. Else we'll just spam the
+// temp directory.
+static std::string tmpnam_;
+
+// We put this into a class as gtests are self-contained, so this helper needs to be in an h-file.
+class AssemblerTestInfrastructure {
+ public:
+ AssemblerTestInfrastructure(std::string architecture,
+ std::string as,
+ std::string as_params,
+ std::string objdump,
+ std::string objdump_params,
+ std::string disasm,
+ std::string disasm_params,
+ const char* asm_header) :
+ architecture_string_(architecture),
+ asm_header_(asm_header),
+ assembler_cmd_name_(as),
+ assembler_parameters_(as_params),
+ objdump_cmd_name_(objdump),
+ objdump_parameters_(objdump_params),
+ disassembler_cmd_name_(disasm),
+ disassembler_parameters_(disasm_params) {
+ // Fake a runtime test for ScratchFile
+ CommonRuntimeTest::SetUpAndroidData(android_data_);
+ }
+
+ virtual ~AssemblerTestInfrastructure() {
+ // We leave temporaries in case this failed so we can debug issues.
+ CommonRuntimeTest::TearDownAndroidData(android_data_, false);
+ tmpnam_ = "";
+ }
+
+ // This is intended to be run as a test.
+ bool CheckTools() {
+ if (!FileExists(FindTool(assembler_cmd_name_))) {
+ return false;
+ }
+ LOG(INFO) << "Chosen assembler command: " << GetAssemblerCommand();
+
+ if (!FileExists(FindTool(objdump_cmd_name_))) {
+ return false;
+ }
+ LOG(INFO) << "Chosen objdump command: " << GetObjdumpCommand();
+
+ // Disassembly is optional.
+ std::string disassembler = GetDisassembleCommand();
+ if (disassembler.length() != 0) {
+ if (!FileExists(FindTool(disassembler_cmd_name_))) {
+ return false;
+ }
+ LOG(INFO) << "Chosen disassemble command: " << GetDisassembleCommand();
+ } else {
+ LOG(INFO) << "No disassembler given.";
+ }
+
+ return true;
+ }
+
+ // Driver() assembles and compares the results. If the results are not equal and we have a
+ // disassembler, disassemble both and check whether they have the same mnemonics (in which case
+ // we just warn).
+ void Driver(const std::vector<uint8_t>& data, std::string assembly_text, std::string test_name) {
+ EXPECT_NE(assembly_text.length(), 0U) << "Empty assembly";
+
+ NativeAssemblerResult res;
+ Compile(assembly_text, &res, test_name);
+
+ EXPECT_TRUE(res.ok) << res.error_msg;
+ if (!res.ok) {
+ // No way of continuing.
+ return;
+ }
+
+ if (data == *res.code) {
+ Clean(&res);
+ } else {
+ if (DisassembleBinaries(data, *res.code, test_name)) {
+ if (data.size() > res.code->size()) {
+ // Fail this test with a fancy colored warning being printed.
+ EXPECT_TRUE(false) << "Assembly code is not identical, but disassembly of machine code "
+ "is equal: this implies sub-optimal encoding! Our code size=" << data.size() <<
+ ", gcc size=" << res.code->size();
+ } else {
+ // Otherwise just print an info message and clean up.
+ LOG(INFO) << "GCC chose a different encoding than ours, but the overall length is the "
+ "same.";
+ Clean(&res);
+ }
+ } else {
+ // This will output the assembly.
+ EXPECT_EQ(*res.code, data) << "Outputs (and disassembly) not identical.";
+ }
+ }
+ }
+
+ protected:
+ // Return the host assembler command for this test.
+ virtual std::string GetAssemblerCommand() {
+ // Already resolved it once?
+ if (resolved_assembler_cmd_.length() != 0) {
+ return resolved_assembler_cmd_;
+ }
+
+ std::string line = FindTool(assembler_cmd_name_);
+ if (line.length() == 0) {
+ return line;
+ }
+
+ resolved_assembler_cmd_ = line + assembler_parameters_;
+
+ return resolved_assembler_cmd_;
+ }
+
+ // Return the host objdump command for this test.
+ virtual std::string GetObjdumpCommand() {
+ // Already resolved it once?
+ if (resolved_objdump_cmd_.length() != 0) {
+ return resolved_objdump_cmd_;
+ }
+
+ std::string line = FindTool(objdump_cmd_name_);
+ if (line.length() == 0) {
+ return line;
+ }
+
+ resolved_objdump_cmd_ = line + objdump_parameters_;
+
+ return resolved_objdump_cmd_;
+ }
+
+ // Return the host disassembler command for this test.
+ virtual std::string GetDisassembleCommand() {
+ // Already resolved it once?
+ if (resolved_disassemble_cmd_.length() != 0) {
+ return resolved_disassemble_cmd_;
+ }
+
+ std::string line = FindTool(disassembler_cmd_name_);
+ if (line.length() == 0) {
+ return line;
+ }
+
+ resolved_disassemble_cmd_ = line + disassembler_parameters_;
+
+ return resolved_disassemble_cmd_;
+ }
+
+ private:
+ // Structure to store intermediates and results.
+ struct NativeAssemblerResult {
+ bool ok;
+ std::string error_msg;
+ std::string base_name;
+ std::unique_ptr<std::vector<uint8_t>> code;
+ uintptr_t length;
+ };
+
+ // Compile the assembly file from_file to a binary file to_file. Returns true on success.
+ bool Assemble(const char* from_file, const char* to_file, std::string* error_msg) {
+ bool have_assembler = FileExists(FindTool(assembler_cmd_name_));
+ EXPECT_TRUE(have_assembler) << "Cannot find assembler:" << GetAssemblerCommand();
+ if (!have_assembler) {
+ return false;
+ }
+
+ std::vector<std::string> args;
+
+ // Encaspulate the whole command line in a single string passed to
+ // the shell, so that GetAssemblerCommand() may contain arguments
+ // in addition to the program name.
+ args.push_back(GetAssemblerCommand());
+ args.push_back("-o");
+ args.push_back(to_file);
+ args.push_back(from_file);
+ std::string cmd = Join(args, ' ');
+
+ args.clear();
+ args.push_back("/bin/sh");
+ args.push_back("-c");
+ args.push_back(cmd);
+
+ bool success = Exec(args, error_msg);
+ if (!success) {
+ LOG(INFO) << "Assembler command line:";
+ for (std::string arg : args) {
+ LOG(INFO) << arg;
+ }
+ }
+ return success;
+ }
+
+ // Runs objdump -h on the binary file and extracts the first line with .text.
+ // Returns "" on failure.
+ std::string Objdump(std::string file) {
+ bool have_objdump = FileExists(FindTool(objdump_cmd_name_));
+ EXPECT_TRUE(have_objdump) << "Cannot find objdump: " << GetObjdumpCommand();
+ if (!have_objdump) {
+ return "";
+ }
+
+ std::string error_msg;
+ std::vector<std::string> args;
+
+ // Encaspulate the whole command line in a single string passed to
+ // the shell, so that GetObjdumpCommand() may contain arguments
+ // in addition to the program name.
+ args.push_back(GetObjdumpCommand());
+ args.push_back(file);
+ args.push_back(">");
+ args.push_back(file+".dump");
+ std::string cmd = Join(args, ' ');
+
+ args.clear();
+ args.push_back("/bin/sh");
+ args.push_back("-c");
+ args.push_back(cmd);
+
+ if (!Exec(args, &error_msg)) {
+ EXPECT_TRUE(false) << error_msg;
+ }
+
+ std::ifstream dump(file+".dump");
+
+ std::string line;
+ bool found = false;
+ while (std::getline(dump, line)) {
+ if (line.find(".text") != line.npos) {
+ found = true;
+ break;
+ }
+ }
+
+ dump.close();
+
+ if (found) {
+ return line;
+ } else {
+ return "";
+ }
+ }
+
+ // Disassemble both binaries and compare the text.
+ bool DisassembleBinaries(const std::vector<uint8_t>& data, const std::vector<uint8_t>& as,
+ std::string test_name) {
+ std::string disassembler = GetDisassembleCommand();
+ if (disassembler.length() == 0) {
+ LOG(WARNING) << "No dissassembler command.";
+ return false;
+ }
+
+ std::string data_name = WriteToFile(data, test_name + ".ass");
+ std::string error_msg;
+ if (!DisassembleBinary(data_name, &error_msg)) {
+ LOG(INFO) << "Error disassembling: " << error_msg;
+ std::remove(data_name.c_str());
+ return false;
+ }
+
+ std::string as_name = WriteToFile(as, test_name + ".gcc");
+ if (!DisassembleBinary(as_name, &error_msg)) {
+ LOG(INFO) << "Error disassembling: " << error_msg;
+ std::remove(data_name.c_str());
+ std::remove((data_name + ".dis").c_str());
+ std::remove(as_name.c_str());
+ return false;
+ }
+
+ bool result = CompareFiles(data_name + ".dis", as_name + ".dis");
+
+ if (!kKeepDisassembledFiles) {
+ std::remove(data_name.c_str());
+ std::remove(as_name.c_str());
+ std::remove((data_name + ".dis").c_str());
+ std::remove((as_name + ".dis").c_str());
+ }
+
+ return result;
+ }
+
+ bool DisassembleBinary(std::string file, std::string* error_msg) {
+ std::vector<std::string> args;
+
+ // Encaspulate the whole command line in a single string passed to
+ // the shell, so that GetDisassembleCommand() may contain arguments
+ // in addition to the program name.
+ args.push_back(GetDisassembleCommand());
+ args.push_back(file);
+ args.push_back("| sed -n \'/<.data>/,$p\' | sed -e \'s/.*://\'");
+ args.push_back(">");
+ args.push_back(file+".dis");
+ std::string cmd = Join(args, ' ');
+
+ args.clear();
+ args.push_back("/bin/sh");
+ args.push_back("-c");
+ args.push_back(cmd);
+
+ return Exec(args, error_msg);
+ }
+
+ std::string WriteToFile(const std::vector<uint8_t>& buffer, std::string test_name) {
+ std::string file_name = GetTmpnam() + std::string("---") + test_name;
+ const char* data = reinterpret_cast<const char*>(buffer.data());
+ std::ofstream s_out(file_name + ".o");
+ s_out.write(data, buffer.size());
+ s_out.close();
+ return file_name + ".o";
+ }
+
+ bool CompareFiles(std::string f1, std::string f2) {
+ std::ifstream f1_in(f1);
+ std::ifstream f2_in(f2);
+
+ bool result = std::equal(std::istreambuf_iterator<char>(f1_in),
+ std::istreambuf_iterator<char>(),
+ std::istreambuf_iterator<char>(f2_in));
+
+ f1_in.close();
+ f2_in.close();
+
+ return result;
+ }
+
+ // Compile the given assembly code and extract the binary, if possible. Put result into res.
+ bool Compile(std::string assembly_code, NativeAssemblerResult* res, std::string test_name) {
+ res->ok = false;
+ res->code.reset(nullptr);
+
+ res->base_name = GetTmpnam() + std::string("---") + test_name;
+
+ // TODO: Lots of error checking.
+
+ std::ofstream s_out(res->base_name + ".S");
+ if (asm_header_ != nullptr) {
+ s_out << asm_header_;
+ }
+ s_out << assembly_code;
+ s_out.close();
+
+ if (!Assemble((res->base_name + ".S").c_str(), (res->base_name + ".o").c_str(),
+ &res->error_msg)) {
+ res->error_msg = "Could not compile.";
+ return false;
+ }
+
+ std::string odump = Objdump(res->base_name + ".o");
+ if (odump.length() == 0) {
+ res->error_msg = "Objdump failed.";
+ return false;
+ }
+
+ std::istringstream iss(odump);
+ std::istream_iterator<std::string> start(iss);
+ std::istream_iterator<std::string> end;
+ std::vector<std::string> tokens(start, end);
+
+ if (tokens.size() < OBJDUMP_SECTION_LINE_MIN_TOKENS) {
+ res->error_msg = "Objdump output not recognized: too few tokens.";
+ return false;
+ }
+
+ if (tokens[1] != ".text") {
+ res->error_msg = "Objdump output not recognized: .text not second token.";
+ return false;
+ }
+
+ std::string lengthToken = "0x" + tokens[2];
+ std::istringstream(lengthToken) >> std::hex >> res->length;
+
+ std::string offsetToken = "0x" + tokens[5];
+ uintptr_t offset;
+ std::istringstream(offsetToken) >> std::hex >> offset;
+
+ std::ifstream obj(res->base_name + ".o");
+ obj.seekg(offset);
+ res->code.reset(new std::vector<uint8_t>(res->length));
+ obj.read(reinterpret_cast<char*>(&(*res->code)[0]), res->length);
+ obj.close();
+
+ res->ok = true;
+ return true;
+ }
+
+ // Remove temporary files.
+ void Clean(const NativeAssemblerResult* res) {
+ std::remove((res->base_name + ".S").c_str());
+ std::remove((res->base_name + ".o").c_str());
+ std::remove((res->base_name + ".o.dump").c_str());
+ }
+
+ // Check whether file exists. Is used for commands, so strips off any parameters: anything after
+ // the first space. We skip to the last slash for this, so it should work with directories with
+ // spaces.
+ static bool FileExists(std::string file) {
+ if (file.length() == 0) {
+ return false;
+ }
+
+ // Need to strip any options.
+ size_t last_slash = file.find_last_of('/');
+ if (last_slash == std::string::npos) {
+ // No slash, start looking at the start.
+ last_slash = 0;
+ }
+ size_t space_index = file.find(' ', last_slash);
+
+ if (space_index == std::string::npos) {
+ std::ifstream infile(file.c_str());
+ return infile.good();
+ } else {
+ std::string copy = file.substr(0, space_index - 1);
+
+ struct stat buf;
+ return stat(copy.c_str(), &buf) == 0;
+ }
+ }
+
+ static std::string GetGCCRootPath() {
+ return "prebuilts/gcc/linux-x86";
+ }
+
+ static std::string GetRootPath() {
+ // 1) Check ANDROID_BUILD_TOP
+ char* build_top = getenv("ANDROID_BUILD_TOP");
+ if (build_top != nullptr) {
+ return std::string(build_top) + "/";
+ }
+
+ // 2) Do cwd
+ char temp[1024];
+ return getcwd(temp, 1024) ? std::string(temp) + "/" : std::string("");
+ }
+
+ std::string FindTool(std::string tool_name) {
+ // Find the current tool. Wild-card pattern is "arch-string*tool-name".
+ std::string gcc_path = GetRootPath() + GetGCCRootPath();
+ std::vector<std::string> args;
+ args.push_back("find");
+ args.push_back(gcc_path);
+ args.push_back("-name");
+ args.push_back(architecture_string_ + "*" + tool_name);
+ args.push_back("|");
+ args.push_back("sort");
+ args.push_back("|");
+ args.push_back("tail");
+ args.push_back("-n");
+ args.push_back("1");
+ std::string tmp_file = GetTmpnam();
+ args.push_back(">");
+ args.push_back(tmp_file);
+ std::string sh_args = Join(args, ' ');
+
+ args.clear();
+ args.push_back("/bin/sh");
+ args.push_back("-c");
+ args.push_back(sh_args);
+
+ std::string error_msg;
+ if (!Exec(args, &error_msg)) {
+ EXPECT_TRUE(false) << error_msg;
+ return "";
+ }
+
+ std::ifstream in(tmp_file.c_str());
+ std::string line;
+ if (!std::getline(in, line)) {
+ in.close();
+ std::remove(tmp_file.c_str());
+ return "";
+ }
+ in.close();
+ std::remove(tmp_file.c_str());
+ return line;
+ }
+
+ // Use a consistent tmpnam, so store it.
+ std::string GetTmpnam() {
+ if (tmpnam_.length() == 0) {
+ ScratchFile tmp;
+ tmpnam_ = tmp.GetFilename() + "asm";
+ }
+ return tmpnam_;
+ }
+
+ static constexpr size_t OBJDUMP_SECTION_LINE_MIN_TOKENS = 6;
+
+ std::string architecture_string_;
+ const char* asm_header_;
+
+ std::string assembler_cmd_name_;
+ std::string assembler_parameters_;
+
+ std::string objdump_cmd_name_;
+ std::string objdump_parameters_;
+
+ std::string disassembler_cmd_name_;
+ std::string disassembler_parameters_;
+
+ std::string resolved_assembler_cmd_;
+ std::string resolved_objdump_cmd_;
+ std::string resolved_disassemble_cmd_;
+
+ std::string android_data_;
+
+ DISALLOW_COPY_AND_ASSIGN(AssemblerTestInfrastructure);
+};
+
+} // namespace art
+
+#endif // ART_COMPILER_UTILS_ASSEMBLER_TEST_BASE_H_
diff --git a/compiler/utils/assembler_thumb_test.cc b/compiler/utils/assembler_thumb_test.cc
index 772fa9a..7738627 100644
--- a/compiler/utils/assembler_thumb_test.cc
+++ b/compiler/utils/assembler_thumb_test.cc
@@ -15,9 +15,11 @@
*/
#include <dirent.h>
+#include <errno.h>
#include <fstream>
-#include <sys/types.h>
#include <map>
+#include <string.h>
+#include <sys/types.h>
#include "gtest/gtest.h"
#include "utils/arm/assembler_thumb2.h"
@@ -105,12 +107,14 @@
// Assemble the .S
snprintf(cmd, sizeof(cmd), "%sas %s -o %s.o", toolsdir.c_str(), filename, filename);
- system(cmd);
+ int cmd_result = system(cmd);
+ ASSERT_EQ(cmd_result, 0) << strerror(errno);
// Remove the $d symbols to prevent the disassembler dumping the instructions
// as .word
snprintf(cmd, sizeof(cmd), "%sobjcopy -N '$d' %s.o %s.oo", toolsdir.c_str(), filename, filename);
- system(cmd);
+ int cmd_result2 = system(cmd);
+ ASSERT_EQ(cmd_result2, 0) << strerror(errno);
// Disassemble.
@@ -119,7 +123,8 @@
if (kPrintResults) {
// Print the results only, don't check. This is used to generate new output for inserting
// into the .inc file.
- system(cmd);
+ int cmd_result3 = system(cmd);
+ ASSERT_EQ(cmd_result3, 0) << strerror(errno);
} else {
// Check the results match the appropriate results in the .inc file.
FILE *fp = popen(cmd, "r");
diff --git a/compiler/utils/dedupe_set.h b/compiler/utils/dedupe_set.h
index b062a2a..a9a5781 100644
--- a/compiler/utils/dedupe_set.h
+++ b/compiler/utils/dedupe_set.h
@@ -40,8 +40,8 @@
struct HashedKey {
StoreKey* store_ptr;
union {
- HashType store_hash; // Valid if store_ptr != nullptr.
- const HashedInKey* in_key; // Valid if store_ptr == nullptr.
+ HashType store_hash; // Valid if store_ptr != null.
+ const HashedInKey* in_key; // Valid if store_ptr == null.
};
};
diff --git a/compiler/utils/growable_array.h b/compiler/utils/growable_array.h
index 821e28b..e4b1e7d 100644
--- a/compiler/utils/growable_array.h
+++ b/compiler/utils/growable_array.h
@@ -46,6 +46,14 @@
}
}
+ bool Contains(T value) const {
+ for (size_t i = 0; i < num_used_; ++i) {
+ if (elem_list_[i] == value) {
+ return true;
+ }
+ }
+ return false;
+ }
// Expand the list size to at least new length.
void Resize(size_t new_length) {
diff --git a/compiler/utils/mips/assembler_mips.h b/compiler/utils/mips/assembler_mips.h
index 216cb41..d4acf03 100644
--- a/compiler/utils/mips/assembler_mips.h
+++ b/compiler/utils/mips/assembler_mips.h
@@ -238,17 +238,17 @@
void GetCurrentThread(ManagedRegister tr) OVERRIDE;
void GetCurrentThread(FrameOffset dest_offset, ManagedRegister mscratch) OVERRIDE;
- // Set up out_reg to hold a Object** into the handle scope, or to be NULL if the
+ // Set up out_reg to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed. in_reg holds a possibly stale reference
// that can be used to avoid loading the handle scope entry to see if the value is
- // NULL.
- void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset, ManagedRegister in_reg,
- bool null_allowed) OVERRIDE;
+ // null.
+ void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
+ ManagedRegister in_reg, bool null_allowed) OVERRIDE;
- // Set up out_off to hold a Object** into the handle scope, or to be NULL if the
+ // Set up out_off to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed.
- void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset, ManagedRegister mscratch,
- bool null_allowed) OVERRIDE;
+ void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset,
+ ManagedRegister mscratch, bool null_allowed) OVERRIDE;
// src holds a handle scope entry (Object**) load this into dst
void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
diff --git a/compiler/utils/mips64/assembler_mips64.cc b/compiler/utils/mips64/assembler_mips64.cc
index 282ab96..5e9653d 100644
--- a/compiler/utils/mips64/assembler_mips64.cc
+++ b/compiler/utils/mips64/assembler_mips64.cc
@@ -272,6 +272,10 @@
EmitI(0x25, rs, rt, imm16);
}
+void Mips64Assembler::Lwu(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
+ EmitI(0x27, rs, rt, imm16);
+}
+
void Mips64Assembler::Lui(GpuRegister rt, uint16_t imm16) {
EmitI(0xf, static_cast<GpuRegister>(0), rt, imm16);
}
@@ -480,6 +484,9 @@
case kLoadWord:
Lw(reg, base, offset);
break;
+ case kLoadUnsignedWord:
+ Lwu(reg, base, offset);
+ break;
case kLoadDoubleword:
// TODO: alignment issues ???
Ld(reg, base, offset);
@@ -512,7 +519,6 @@
CHECK_EQ(0u, size) << dst;
} else if (dst.IsGpuRegister()) {
if (size == 4) {
- CHECK_EQ(4u, size) << dst;
LoadFromOffset(kLoadWord, dst.AsGpuRegister(), src_register, src_offset);
} else if (size == 8) {
CHECK_EQ(8u, size) << dst;
@@ -740,14 +746,13 @@
void Mips64Assembler::LoadRef(ManagedRegister mdest, FrameOffset src) {
Mips64ManagedRegister dest = mdest.AsMips64();
CHECK(dest.IsGpuRegister());
- LoadFromOffset(kLoadWord, dest.AsGpuRegister(), SP, src.Int32Value());
+ LoadFromOffset(kLoadUnsignedWord, dest.AsGpuRegister(), SP, src.Int32Value());
}
-void Mips64Assembler::LoadRef(ManagedRegister mdest, ManagedRegister base,
- MemberOffset offs) {
+void Mips64Assembler::LoadRef(ManagedRegister mdest, ManagedRegister base, MemberOffset offs) {
Mips64ManagedRegister dest = mdest.AsMips64();
- CHECK(dest.IsGpuRegister() && dest.IsGpuRegister());
- LoadFromOffset(kLoadWord, dest.AsGpuRegister(),
+ CHECK(dest.IsGpuRegister() && base.AsMips64().IsGpuRegister());
+ LoadFromOffset(kLoadUnsignedWord, dest.AsGpuRegister(),
base.AsMips64().AsGpuRegister(), offs.Int32Value());
if (kPoisonHeapReferences) {
Subu(dest.AsGpuRegister(), ZERO, dest.AsGpuRegister());
@@ -921,7 +926,7 @@
// the address in the handle scope holding the reference.
// e.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset)
if (in_reg.IsNoRegister()) {
- LoadFromOffset(kLoadWord, out_reg.AsGpuRegister(),
+ LoadFromOffset(kLoadUnsignedWord, out_reg.AsGpuRegister(),
SP, handle_scope_offset.Int32Value());
in_reg = out_reg;
}
@@ -944,7 +949,7 @@
CHECK(scratch.IsGpuRegister()) << scratch;
if (null_allowed) {
Label null_arg;
- LoadFromOffset(kLoadWord, scratch.AsGpuRegister(), SP,
+ LoadFromOffset(kLoadUnsignedWord, scratch.AsGpuRegister(), SP,
handle_scope_offset.Int32Value());
// Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is
// the address in the handle scope holding the reference.
@@ -998,7 +1003,7 @@
Mips64ManagedRegister scratch = mscratch.AsMips64();
CHECK(scratch.IsGpuRegister()) << scratch;
// Call *(*(SP + base) + offset)
- LoadFromOffset(kLoadWord, scratch.AsGpuRegister(),
+ LoadFromOffset(kLoadUnsignedWord, scratch.AsGpuRegister(),
SP, base.Int32Value());
LoadFromOffset(kLoadDoubleword, scratch.AsGpuRegister(),
scratch.AsGpuRegister(), offset.Int32Value());
diff --git a/compiler/utils/mips64/assembler_mips64.h b/compiler/utils/mips64/assembler_mips64.h
index 36e74d7..2d7c661 100644
--- a/compiler/utils/mips64/assembler_mips64.h
+++ b/compiler/utils/mips64/assembler_mips64.h
@@ -36,6 +36,7 @@
kLoadSignedHalfword,
kLoadUnsignedHalfword,
kLoadWord,
+ kLoadUnsignedWord,
kLoadDoubleword
};
@@ -85,6 +86,7 @@
void Ld(GpuRegister rt, GpuRegister rs, uint16_t imm16);
void Lbu(GpuRegister rt, GpuRegister rs, uint16_t imm16);
void Lhu(GpuRegister rt, GpuRegister rs, uint16_t imm16);
+ void Lwu(GpuRegister rt, GpuRegister rs, uint16_t imm16);
void Lui(GpuRegister rt, uint16_t imm16);
void Mfhi(GpuRegister rd);
void Mflo(GpuRegister rd);
@@ -235,14 +237,14 @@
void GetCurrentThread(ManagedRegister tr) OVERRIDE;
void GetCurrentThread(FrameOffset dest_offset, ManagedRegister mscratch) OVERRIDE;
- // Set up out_reg to hold a Object** into the handle scope, or to be NULL if the
+ // Set up out_reg to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed. in_reg holds a possibly stale reference
// that can be used to avoid loading the handle scope entry to see if the value is
- // NULL.
+ // null.
void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
ManagedRegister in_reg, bool null_allowed) OVERRIDE;
- // Set up out_off to hold a Object** into the handle scope, or to be NULL if the
+ // Set up out_off to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed.
void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset, ManagedRegister
mscratch, bool null_allowed) OVERRIDE;
diff --git a/compiler/utils/test_dex_file_builder.h b/compiler/utils/test_dex_file_builder.h
new file mode 100644
index 0000000..ab039aa
--- /dev/null
+++ b/compiler/utils/test_dex_file_builder.h
@@ -0,0 +1,372 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_UTILS_TEST_DEX_FILE_BUILDER_H_
+#define ART_COMPILER_UTILS_TEST_DEX_FILE_BUILDER_H_
+
+#include <cstring>
+#include <set>
+#include <map>
+#include <vector>
+
+#include "dex_file.h"
+#include "utils.h"
+
+namespace art {
+
+class TestDexFileBuilder {
+ public:
+ TestDexFileBuilder()
+ : strings_(), types_(), fields_(), protos_(), dex_file_data_() {
+ }
+
+ void AddString(const std::string& str) {
+ CHECK(dex_file_data_.empty());
+ auto it = strings_.emplace(str, IdxAndDataOffset()).first;
+ CHECK_LT(it->first.length(), 128u); // Don't allow multi-byte length in uleb128.
+ }
+
+ void AddType(const std::string& descriptor) {
+ CHECK(dex_file_data_.empty());
+ AddString(descriptor);
+ types_.emplace(descriptor, 0u);
+ }
+
+ void AddField(const std::string& class_descriptor, const std::string& type,
+ const std::string& name) {
+ CHECK(dex_file_data_.empty());
+ AddType(class_descriptor);
+ AddType(type);
+ AddString(name);
+ FieldKey key = { class_descriptor, type, name };
+ fields_.emplace(key, 0u);
+ }
+
+ void AddMethod(const std::string& class_descriptor, const std::string& signature,
+ const std::string& name) {
+ CHECK(dex_file_data_.empty());
+ AddType(class_descriptor);
+ AddString(name);
+
+ ProtoKey proto_key = CreateProtoKey(signature);
+ AddString(proto_key.shorty);
+ AddType(proto_key.return_type);
+ for (const auto& arg_type : proto_key.args) {
+ AddType(arg_type);
+ }
+ auto it = protos_.emplace(proto_key, IdxAndDataOffset()).first;
+ const ProtoKey* proto = &it->first; // Valid as long as the element remains in protos_.
+
+ MethodKey method_key = {
+ class_descriptor, name, proto
+ };
+ methods_.emplace(method_key, 0u);
+ }
+
+ // NOTE: The builder holds the actual data, so it must live as long as the dex file.
+ std::unique_ptr<const DexFile> Build(const std::string& dex_location) {
+ CHECK(dex_file_data_.empty());
+ union {
+ uint8_t data[sizeof(DexFile::Header)];
+ uint64_t force_alignment;
+ } header_data;
+ std::memset(header_data.data, 0, sizeof(header_data.data));
+ DexFile::Header* header = reinterpret_cast<DexFile::Header*>(&header_data.data);
+ std::copy_n(DexFile::kDexMagic, 4u, header->magic_);
+ std::copy_n(DexFile::kDexMagicVersion, 4u, header->magic_ + 4u);
+ header->header_size_ = sizeof(header);
+ header->endian_tag_ = DexFile::kDexEndianConstant;
+ header->link_size_ = 0u; // Unused.
+ header->link_off_ = 0u; // Unused.
+ header->map_off_ = 0u; // Unused.
+
+ uint32_t data_section_size = 0u;
+
+ uint32_t string_ids_offset = sizeof(DexFile::Header);
+ uint32_t string_idx = 0u;
+ for (auto& entry : strings_) {
+ entry.second.idx = string_idx;
+ string_idx += 1u;
+ entry.second.data_offset = data_section_size;
+ data_section_size += entry.first.length() + 1u /* length */ + 1u /* null-terminator */;
+ }
+ header->string_ids_size_ = strings_.size();
+ header->string_ids_off_ = strings_.empty() ? 0u : string_ids_offset;
+
+ uint32_t type_ids_offset = string_ids_offset + strings_.size() * sizeof(DexFile::StringId);
+ uint32_t type_idx = 0u;
+ for (auto& entry : types_) {
+ entry.second = type_idx;
+ type_idx += 1u;
+ }
+ header->type_ids_size_ = types_.size();
+ header->type_ids_off_ = types_.empty() ? 0u : type_ids_offset;
+
+ uint32_t proto_ids_offset = type_ids_offset + types_.size() * sizeof(DexFile::TypeId);
+ uint32_t proto_idx = 0u;
+ for (auto& entry : protos_) {
+ entry.second.idx = proto_idx;
+ proto_idx += 1u;
+ size_t num_args = entry.first.args.size();
+ if (num_args != 0u) {
+ entry.second.data_offset = RoundUp(data_section_size, 4u);
+ data_section_size = entry.second.data_offset + 4u + num_args * sizeof(DexFile::TypeItem);
+ } else {
+ entry.second.data_offset = 0u;
+ }
+ }
+ header->proto_ids_size_ = protos_.size();
+ header->proto_ids_off_ = protos_.empty() ? 0u : proto_ids_offset;
+
+ uint32_t field_ids_offset = proto_ids_offset + protos_.size() * sizeof(DexFile::ProtoId);
+ uint32_t field_idx = 0u;
+ for (auto& entry : fields_) {
+ entry.second = field_idx;
+ field_idx += 1u;
+ }
+ header->field_ids_size_ = fields_.size();
+ header->field_ids_off_ = fields_.empty() ? 0u : field_ids_offset;
+
+ uint32_t method_ids_offset = field_ids_offset + fields_.size() * sizeof(DexFile::FieldId);
+ uint32_t method_idx = 0u;
+ for (auto& entry : methods_) {
+ entry.second = method_idx;
+ method_idx += 1u;
+ }
+ header->method_ids_size_ = methods_.size();
+ header->method_ids_off_ = methods_.empty() ? 0u : method_ids_offset;
+
+ // No class defs.
+ header->class_defs_size_ = 0u;
+ header->class_defs_off_ = 0u;
+
+ uint32_t data_section_offset = method_ids_offset + methods_.size() * sizeof(DexFile::MethodId);
+ header->data_size_ = data_section_size;
+ header->data_off_ = (data_section_size != 0u) ? data_section_offset : 0u;
+
+ uint32_t total_size = data_section_offset + data_section_size;
+
+ dex_file_data_.resize(total_size);
+ std::memcpy(&dex_file_data_[0], header_data.data, sizeof(DexFile::Header));
+
+ for (const auto& entry : strings_) {
+ CHECK_LT(entry.first.size(), 128u);
+ uint32_t raw_offset = data_section_offset + entry.second.data_offset;
+ dex_file_data_[raw_offset] = static_cast<uint8_t>(entry.first.size());
+ std::memcpy(&dex_file_data_[raw_offset + 1], entry.first.c_str(), entry.first.size() + 1);
+ Write32(string_ids_offset + entry.second.idx * sizeof(DexFile::StringId), raw_offset);
+ }
+
+ for (const auto& entry : types_) {
+ Write32(type_ids_offset + entry.second * sizeof(DexFile::TypeId), GetStringIdx(entry.first));
+ ++type_idx;
+ }
+
+ for (const auto& entry : protos_) {
+ size_t num_args = entry.first.args.size();
+ uint32_t type_list_offset =
+ (num_args != 0u) ? data_section_offset + entry.second.data_offset : 0u;
+ uint32_t raw_offset = proto_ids_offset + entry.second.idx * sizeof(DexFile::ProtoId);
+ Write32(raw_offset + 0u, GetStringIdx(entry.first.shorty));
+ Write16(raw_offset + 4u, GetTypeIdx(entry.first.return_type));
+ Write32(raw_offset + 8u, type_list_offset);
+ if (num_args != 0u) {
+ CHECK_NE(entry.second.data_offset, 0u);
+ Write32(type_list_offset, num_args);
+ for (size_t i = 0; i != num_args; ++i) {
+ Write16(type_list_offset + 4u + i * sizeof(DexFile::TypeItem),
+ GetTypeIdx(entry.first.args[i]));
+ }
+ }
+ }
+
+ for (const auto& entry : fields_) {
+ uint32_t raw_offset = field_ids_offset + entry.second * sizeof(DexFile::FieldId);
+ Write16(raw_offset + 0u, GetTypeIdx(entry.first.class_descriptor));
+ Write16(raw_offset + 2u, GetTypeIdx(entry.first.type));
+ Write32(raw_offset + 4u, GetStringIdx(entry.first.name));
+ }
+
+ for (const auto& entry : methods_) {
+ uint32_t raw_offset = method_ids_offset + entry.second * sizeof(DexFile::MethodId);
+ Write16(raw_offset + 0u, GetTypeIdx(entry.first.class_descriptor));
+ auto it = protos_.find(*entry.first.proto);
+ CHECK(it != protos_.end());
+ Write16(raw_offset + 2u, it->second.idx);
+ Write32(raw_offset + 4u, GetStringIdx(entry.first.name));
+ }
+
+ // Leave checksum and signature as zeros.
+
+ std::string error_msg;
+ std::unique_ptr<const DexFile> dex_file(DexFile::Open(
+ &dex_file_data_[0], dex_file_data_.size(), dex_location, 0u, nullptr, &error_msg));
+ CHECK(dex_file != nullptr) << error_msg;
+ return std::move(dex_file);
+ }
+
+ uint32_t GetStringIdx(const std::string& type) {
+ auto it = strings_.find(type);
+ CHECK(it != strings_.end());
+ return it->second.idx;
+ }
+
+ uint32_t GetTypeIdx(const std::string& type) {
+ auto it = types_.find(type);
+ CHECK(it != types_.end());
+ return it->second;
+ }
+
+ uint32_t GetFieldIdx(const std::string& class_descriptor, const std::string& type,
+ const std::string& name) {
+ FieldKey key = { class_descriptor, type, name };
+ auto it = fields_.find(key);
+ CHECK(it != fields_.end());
+ return it->second;
+ }
+
+ uint32_t GetMethodIdx(const std::string& class_descriptor, const std::string& signature,
+ const std::string& name) {
+ ProtoKey proto_key = CreateProtoKey(signature);
+ MethodKey method_key = { class_descriptor, name, &proto_key };
+ auto it = methods_.find(method_key);
+ CHECK(it != methods_.end());
+ return it->second;
+ }
+
+ private:
+ struct IdxAndDataOffset {
+ uint32_t idx;
+ uint32_t data_offset;
+ };
+
+ struct FieldKey {
+ const std::string class_descriptor;
+ const std::string type;
+ const std::string name;
+ };
+ struct FieldKeyComparator {
+ bool operator()(const FieldKey& lhs, const FieldKey& rhs) const {
+ if (lhs.class_descriptor != rhs.class_descriptor) {
+ return lhs.class_descriptor < rhs.class_descriptor;
+ }
+ if (lhs.name != rhs.name) {
+ return lhs.name < rhs.name;
+ }
+ return lhs.type < rhs.type;
+ }
+ };
+
+ struct ProtoKey {
+ std::string shorty;
+ std::string return_type;
+ std::vector<std::string> args;
+ };
+ struct ProtoKeyComparator {
+ bool operator()(const ProtoKey& lhs, const ProtoKey& rhs) const {
+ if (lhs.return_type != rhs.return_type) {
+ return lhs.return_type < rhs.return_type;
+ }
+ size_t min_args = std::min(lhs.args.size(), rhs.args.size());
+ for (size_t i = 0; i != min_args; ++i) {
+ if (lhs.args[i] != rhs.args[i]) {
+ return lhs.args[i] < rhs.args[i];
+ }
+ }
+ return lhs.args.size() < rhs.args.size();
+ }
+ };
+
+ struct MethodKey {
+ std::string class_descriptor;
+ std::string name;
+ const ProtoKey* proto;
+ };
+ struct MethodKeyComparator {
+ bool operator()(const MethodKey& lhs, const MethodKey& rhs) const {
+ if (lhs.class_descriptor != rhs.class_descriptor) {
+ return lhs.class_descriptor < rhs.class_descriptor;
+ }
+ if (lhs.name != rhs.name) {
+ return lhs.name < rhs.name;
+ }
+ return ProtoKeyComparator()(*lhs.proto, *rhs.proto);
+ }
+ };
+
+ ProtoKey CreateProtoKey(const std::string& signature) {
+ CHECK_EQ(signature[0], '(');
+ const char* args = signature.c_str() + 1;
+ const char* args_end = std::strchr(args, ')');
+ CHECK(args_end != nullptr);
+ const char* return_type = args_end + 1;
+
+ ProtoKey key = {
+ std::string() + ((*return_type == '[') ? 'L' : *return_type),
+ return_type,
+ std::vector<std::string>()
+ };
+ while (args != args_end) {
+ key.shorty += (*args == '[') ? 'L' : *args;
+ const char* arg_start = args;
+ while (*args == '[') {
+ ++args;
+ }
+ if (*args == 'L') {
+ do {
+ ++args;
+ CHECK_NE(args, args_end);
+ } while (*args != ';');
+ }
+ ++args;
+ key.args.emplace_back(arg_start, args);
+ }
+ return key;
+ }
+
+ void Write32(size_t offset, uint32_t value) {
+ CHECK_LE(offset + 4u, dex_file_data_.size());
+ CHECK_EQ(dex_file_data_[offset + 0], 0u);
+ CHECK_EQ(dex_file_data_[offset + 1], 0u);
+ CHECK_EQ(dex_file_data_[offset + 2], 0u);
+ CHECK_EQ(dex_file_data_[offset + 3], 0u);
+ dex_file_data_[offset + 0] = static_cast<uint8_t>(value >> 0);
+ dex_file_data_[offset + 1] = static_cast<uint8_t>(value >> 8);
+ dex_file_data_[offset + 2] = static_cast<uint8_t>(value >> 16);
+ dex_file_data_[offset + 3] = static_cast<uint8_t>(value >> 24);
+ }
+
+ void Write16(size_t offset, uint32_t value) {
+ CHECK_LE(value, 0xffffu);
+ CHECK_LE(offset + 2u, dex_file_data_.size());
+ CHECK_EQ(dex_file_data_[offset + 0], 0u);
+ CHECK_EQ(dex_file_data_[offset + 1], 0u);
+ dex_file_data_[offset + 0] = static_cast<uint8_t>(value >> 0);
+ dex_file_data_[offset + 1] = static_cast<uint8_t>(value >> 8);
+ }
+
+ std::map<std::string, IdxAndDataOffset> strings_;
+ std::map<std::string, uint32_t> types_;
+ std::map<FieldKey, uint32_t, FieldKeyComparator> fields_;
+ std::map<ProtoKey, IdxAndDataOffset, ProtoKeyComparator> protos_;
+ std::map<MethodKey, uint32_t, MethodKeyComparator> methods_;
+
+ std::vector<uint8_t> dex_file_data_;
+};
+
+} // namespace art
+
+#endif // ART_COMPILER_UTILS_TEST_DEX_FILE_BUILDER_H_
diff --git a/compiler/utils/test_dex_file_builder_test.cc b/compiler/utils/test_dex_file_builder_test.cc
new file mode 100644
index 0000000..ee6e35d
--- /dev/null
+++ b/compiler/utils/test_dex_file_builder_test.cc
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "test_dex_file_builder.h"
+
+#include "dex_file-inl.h"
+#include "gtest/gtest.h"
+
+namespace art {
+
+TEST(TestDexFileBuilderTest, SimpleTest) {
+ TestDexFileBuilder builder;
+ builder.AddString("Arbitrary string");
+ builder.AddType("Ljava/lang/Class;");
+ builder.AddField("LTestClass;", "[I", "intField");
+ builder.AddMethod("LTestClass;", "()I", "foo");
+ builder.AddMethod("LTestClass;", "(Ljava/lang/Object;[Ljava/lang/Object;)LTestClass;", "bar");
+ const char* dex_location = "TestDexFileBuilder/SimpleTest";
+ std::unique_ptr<const DexFile> dex_file(builder.Build(dex_location));
+ ASSERT_TRUE(dex_file != nullptr);
+ EXPECT_STREQ(dex_location, dex_file->GetLocation().c_str());
+
+ static const char* const expected_strings[] = {
+ "Arbitrary string",
+ "I",
+ "LLL", // shorty
+ "LTestClass;",
+ "Ljava/lang/Class;",
+ "Ljava/lang/Object;",
+ "[I",
+ "[Ljava/lang/Object;",
+ "bar",
+ "foo",
+ "intField",
+ };
+ ASSERT_EQ(arraysize(expected_strings), dex_file->NumStringIds());
+ for (size_t i = 0; i != arraysize(expected_strings); ++i) {
+ EXPECT_STREQ(expected_strings[i], dex_file->GetStringData(dex_file->GetStringId(i))) << i;
+ }
+
+ static const char* const expected_types[] = {
+ "I",
+ "LTestClass;",
+ "Ljava/lang/Class;",
+ "Ljava/lang/Object;",
+ "[I",
+ "[Ljava/lang/Object;",
+ };
+ ASSERT_EQ(arraysize(expected_types), dex_file->NumTypeIds());
+ for (size_t i = 0; i != arraysize(expected_types); ++i) {
+ EXPECT_STREQ(expected_types[i], dex_file->GetTypeDescriptor(dex_file->GetTypeId(i))) << i;
+ }
+
+ ASSERT_EQ(1u, dex_file->NumFieldIds());
+ EXPECT_STREQ("[I TestClass.intField", PrettyField(0u, *dex_file).c_str());
+
+ ASSERT_EQ(2u, dex_file->NumProtoIds());
+ ASSERT_EQ(2u, dex_file->NumMethodIds());
+ EXPECT_STREQ("TestClass TestClass.bar(java.lang.Object, java.lang.Object[])",
+ PrettyMethod(0u, *dex_file).c_str());
+ EXPECT_STREQ("int TestClass.foo()",
+ PrettyMethod(1u, *dex_file).c_str());
+
+ EXPECT_EQ(0u, builder.GetStringIdx("Arbitrary string"));
+ EXPECT_EQ(2u, builder.GetTypeIdx("Ljava/lang/Class;"));
+ EXPECT_EQ(0u, builder.GetFieldIdx("LTestClass;", "[I", "intField"));
+ EXPECT_EQ(1u, builder.GetMethodIdx("LTestClass;", "()I", "foo"));
+ EXPECT_EQ(0u, builder.GetMethodIdx("LTestClass;", "(Ljava/lang/Object;[Ljava/lang/Object;)LTestClass;", "bar"));
+}
+
+} // namespace art
diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc
index 329698c..7e75200 100644
--- a/compiler/utils/x86/assembler_x86.cc
+++ b/compiler/utils/x86/assembler_x86.cc
@@ -1292,32 +1292,62 @@
void X86Assembler::shll(Register reg, const Immediate& imm) {
- EmitGenericShift(4, reg, imm);
+ EmitGenericShift(4, Operand(reg), imm);
}
void X86Assembler::shll(Register operand, Register shifter) {
- EmitGenericShift(4, operand, shifter);
+ EmitGenericShift(4, Operand(operand), shifter);
+}
+
+
+void X86Assembler::shll(const Address& address, const Immediate& imm) {
+ EmitGenericShift(4, address, imm);
+}
+
+
+void X86Assembler::shll(const Address& address, Register shifter) {
+ EmitGenericShift(4, address, shifter);
}
void X86Assembler::shrl(Register reg, const Immediate& imm) {
- EmitGenericShift(5, reg, imm);
+ EmitGenericShift(5, Operand(reg), imm);
}
void X86Assembler::shrl(Register operand, Register shifter) {
- EmitGenericShift(5, operand, shifter);
+ EmitGenericShift(5, Operand(operand), shifter);
+}
+
+
+void X86Assembler::shrl(const Address& address, const Immediate& imm) {
+ EmitGenericShift(5, address, imm);
+}
+
+
+void X86Assembler::shrl(const Address& address, Register shifter) {
+ EmitGenericShift(5, address, shifter);
}
void X86Assembler::sarl(Register reg, const Immediate& imm) {
- EmitGenericShift(7, reg, imm);
+ EmitGenericShift(7, Operand(reg), imm);
}
void X86Assembler::sarl(Register operand, Register shifter) {
- EmitGenericShift(7, operand, shifter);
+ EmitGenericShift(7, Operand(operand), shifter);
+}
+
+
+void X86Assembler::sarl(const Address& address, const Immediate& imm) {
+ EmitGenericShift(7, address, imm);
+}
+
+
+void X86Assembler::sarl(const Address& address, Register shifter) {
+ EmitGenericShift(7, address, shifter);
}
@@ -1330,6 +1360,15 @@
}
+void X86Assembler::shld(Register dst, Register src, const Immediate& imm) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x0F);
+ EmitUint8(0xA4);
+ EmitRegisterOperand(src, dst);
+ EmitUint8(imm.value() & 0xFF);
+}
+
+
void X86Assembler::shrd(Register dst, Register src, Register shifter) {
DCHECK_EQ(ECX, shifter);
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
@@ -1339,6 +1378,15 @@
}
+void X86Assembler::shrd(Register dst, Register src, const Immediate& imm) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x0F);
+ EmitUint8(0xAC);
+ EmitRegisterOperand(src, dst);
+ EmitUint8(imm.value() & 0xFF);
+}
+
+
void X86Assembler::negl(Register reg) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0xF7);
@@ -1459,6 +1507,14 @@
}
+void X86Assembler::repne_scasw() {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0xF2);
+ EmitUint8(0xAF);
+}
+
+
X86Assembler* X86Assembler::lock() {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0xF0);
@@ -1622,28 +1678,28 @@
void X86Assembler::EmitGenericShift(int reg_or_opcode,
- Register reg,
+ const Operand& operand,
const Immediate& imm) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
CHECK(imm.is_int8());
if (imm.value() == 1) {
EmitUint8(0xD1);
- EmitOperand(reg_or_opcode, Operand(reg));
+ EmitOperand(reg_or_opcode, operand);
} else {
EmitUint8(0xC1);
- EmitOperand(reg_or_opcode, Operand(reg));
+ EmitOperand(reg_or_opcode, operand);
EmitUint8(imm.value() & 0xFF);
}
}
void X86Assembler::EmitGenericShift(int reg_or_opcode,
- Register operand,
+ const Operand& operand,
Register shifter) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
CHECK_EQ(shifter, ECX);
EmitUint8(0xD3);
- EmitOperand(reg_or_opcode, Operand(operand));
+ EmitOperand(reg_or_opcode, operand);
}
static dwarf::Reg DWARFReg(Register reg) {
diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h
index a933474..136b0cb 100644
--- a/compiler/utils/x86/assembler_x86.h
+++ b/compiler/utils/x86/assembler_x86.h
@@ -430,12 +430,20 @@
void shll(Register reg, const Immediate& imm);
void shll(Register operand, Register shifter);
+ void shll(const Address& address, const Immediate& imm);
+ void shll(const Address& address, Register shifter);
void shrl(Register reg, const Immediate& imm);
void shrl(Register operand, Register shifter);
+ void shrl(const Address& address, const Immediate& imm);
+ void shrl(const Address& address, Register shifter);
void sarl(Register reg, const Immediate& imm);
void sarl(Register operand, Register shifter);
+ void sarl(const Address& address, const Immediate& imm);
+ void sarl(const Address& address, Register shifter);
void shld(Register dst, Register src, Register shifter);
+ void shld(Register dst, Register src, const Immediate& imm);
void shrd(Register dst, Register src, Register shifter);
+ void shrd(Register dst, Register src, const Immediate& imm);
void negl(Register reg);
void notl(Register reg);
@@ -456,6 +464,8 @@
void jmp(const Address& address);
void jmp(Label* label);
+ void repne_scasw();
+
X86Assembler* lock();
void cmpxchgl(const Address& address, Register reg);
void cmpxchg8b(const Address& address);
@@ -576,17 +586,17 @@
void GetCurrentThread(ManagedRegister tr) OVERRIDE;
void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
- // Set up out_reg to hold a Object** into the handle scope, or to be NULL if the
+ // Set up out_reg to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed. in_reg holds a possibly stale reference
// that can be used to avoid loading the handle scope entry to see if the value is
- // NULL.
- void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset, ManagedRegister in_reg,
- bool null_allowed) OVERRIDE;
+ // null.
+ void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
+ ManagedRegister in_reg, bool null_allowed) OVERRIDE;
- // Set up out_off to hold a Object** into the handle scope, or to be NULL if the
+ // Set up out_off to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed.
- void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset, ManagedRegister scratch,
- bool null_allowed) OVERRIDE;
+ void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset,
+ ManagedRegister scratch, bool null_allowed) OVERRIDE;
// src holds a handle scope entry (Object**) load this into dst
void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
@@ -620,8 +630,8 @@
void EmitLabelLink(Label* label);
void EmitNearLabelLink(Label* label);
- void EmitGenericShift(int rm, Register reg, const Immediate& imm);
- void EmitGenericShift(int rm, Register operand, Register shifter);
+ void EmitGenericShift(int rm, const Operand& operand, const Immediate& imm);
+ void EmitGenericShift(int rm, const Operand& operand, Register shifter);
DISALLOW_COPY_AND_ASSIGN(X86Assembler);
};
diff --git a/compiler/utils/x86/assembler_x86_test.cc b/compiler/utils/x86/assembler_x86_test.cc
index f326e49..aacc57b 100644
--- a/compiler/utils/x86/assembler_x86_test.cc
+++ b/compiler/utils/x86/assembler_x86_test.cc
@@ -190,4 +190,10 @@
DriverStr(expected, "FPUIntegerStore");
}
+TEST_F(AssemblerX86Test, Repnescasw) {
+ GetAssembler()->repne_scasw();
+ const char* expected = "repne scasw\n";
+ DriverStr(expected, "Repnescasw");
+}
+
} // namespace art
diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc
index 32204a9..feceeca 100644
--- a/compiler/utils/x86_64/assembler_x86_64.cc
+++ b/compiler/utils/x86_64/assembler_x86_64.cc
@@ -128,6 +128,16 @@
}
+void X86_64Assembler::movq(const Address& dst, const Immediate& imm) {
+ CHECK(imm.is_int32());
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitRex64(dst);
+ EmitUint8(0xC7);
+ EmitOperand(0, dst);
+ EmitImmediate(imm);
+}
+
+
void X86_64Assembler::movq(CpuRegister dst, CpuRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
// 0x89 is movq r/m64 <- r64, with op1 in r/m and op2 in reg: so reverse EmitRex64
@@ -388,7 +398,7 @@
void X86_64Assembler::movsxd(CpuRegister dst, const Address& src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
- EmitRex64(dst);
+ EmitRex64(dst, src);
EmitUint8(0x63);
EmitOperand(dst.LowBits(), src);
}
@@ -652,6 +662,21 @@
}
+void X86_64Assembler::cvtsi2ss(XmmRegister dst, const Address& src, bool is64bit) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF3);
+ if (is64bit) {
+ // Emit a REX.W prefix if the operand size is 64 bits.
+ EmitRex64(dst, src);
+ } else {
+ EmitOptionalRex32(dst, src);
+ }
+ EmitUint8(0x0F);
+ EmitUint8(0x2A);
+ EmitOperand(dst.LowBits(), src);
+}
+
+
void X86_64Assembler::cvtsi2sd(XmmRegister dst, CpuRegister src) {
cvtsi2sd(dst, src, false);
}
@@ -672,6 +697,21 @@
}
+void X86_64Assembler::cvtsi2sd(XmmRegister dst, const Address& src, bool is64bit) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF2);
+ if (is64bit) {
+ // Emit a REX.W prefix if the operand size is 64 bits.
+ EmitRex64(dst, src);
+ } else {
+ EmitOptionalRex32(dst, src);
+ }
+ EmitUint8(0x0F);
+ EmitUint8(0x2A);
+ EmitOperand(dst.LowBits(), src);
+}
+
+
void X86_64Assembler::cvtss2si(CpuRegister dst, XmmRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0xF3);
@@ -692,6 +732,16 @@
}
+void X86_64Assembler::cvtss2sd(XmmRegister dst, const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF3);
+ EmitOptionalRex32(dst, src);
+ EmitUint8(0x0F);
+ EmitUint8(0x5A);
+ EmitOperand(dst.LowBits(), src);
+}
+
+
void X86_64Assembler::cvtsd2si(CpuRegister dst, XmmRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0xF2);
@@ -752,6 +802,16 @@
}
+void X86_64Assembler::cvtsd2ss(XmmRegister dst, const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF2);
+ EmitOptionalRex32(dst, src);
+ EmitUint8(0x0F);
+ EmitUint8(0x5A);
+ EmitOperand(dst.LowBits(), src);
+}
+
+
void X86_64Assembler::cvtdq2pd(XmmRegister dst, XmmRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0xF3);
@@ -771,6 +831,15 @@
}
+void X86_64Assembler::comiss(XmmRegister a, const Address& b) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitOptionalRex32(a, b);
+ EmitUint8(0x0F);
+ EmitUint8(0x2F);
+ EmitOperand(a.LowBits(), b);
+}
+
+
void X86_64Assembler::comisd(XmmRegister a, XmmRegister b) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0x66);
@@ -780,6 +849,17 @@
EmitXmmRegisterOperand(a.LowBits(), b);
}
+
+void X86_64Assembler::comisd(XmmRegister a, const Address& b) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitOptionalRex32(a, b);
+ EmitUint8(0x0F);
+ EmitUint8(0x2F);
+ EmitOperand(a.LowBits(), b);
+}
+
+
void X86_64Assembler::ucomiss(XmmRegister a, XmmRegister b) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitOptionalRex32(a, b);
@@ -789,6 +869,15 @@
}
+void X86_64Assembler::ucomiss(XmmRegister a, const Address& b) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitOptionalRex32(a, b);
+ EmitUint8(0x0F);
+ EmitUint8(0x2E);
+ EmitOperand(a.LowBits(), b);
+}
+
+
void X86_64Assembler::ucomisd(XmmRegister a, XmmRegister b) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0x66);
@@ -799,6 +888,16 @@
}
+void X86_64Assembler::ucomisd(XmmRegister a, const Address& b) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitOptionalRex32(a, b);
+ EmitUint8(0x0F);
+ EmitUint8(0x2E);
+ EmitOperand(a.LowBits(), b);
+}
+
+
void X86_64Assembler::roundsd(XmmRegister dst, XmmRegister src, const Immediate& imm) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0x66);
@@ -1161,7 +1260,7 @@
void X86_64Assembler::cmpq(CpuRegister reg, const Address& address) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
- EmitRex64(reg);
+ EmitRex64(reg, address);
EmitUint8(0x3B);
EmitOperand(reg.LowBits(), address);
}
@@ -1243,7 +1342,7 @@
void X86_64Assembler::testq(CpuRegister reg, const Address& address) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
- EmitRex64(reg);
+ EmitRex64(reg, address);
EmitUint8(0x85);
EmitOperand(reg.LowBits(), address);
}
@@ -1288,6 +1387,14 @@
}
+void X86_64Assembler::andq(CpuRegister dst, const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitRex64(dst, src);
+ EmitUint8(0x23);
+ EmitOperand(dst.LowBits(), src);
+}
+
+
void X86_64Assembler::orl(CpuRegister dst, CpuRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitOptionalRex32(dst, src);
@@ -1327,6 +1434,14 @@
}
+void X86_64Assembler::orq(CpuRegister dst, const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitRex64(dst, src);
+ EmitUint8(0x0B);
+ EmitOperand(dst.LowBits(), src);
+}
+
+
void X86_64Assembler::xorl(CpuRegister dst, CpuRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitOptionalRex32(dst, src);
@@ -1365,6 +1480,14 @@
EmitComplex(6, Operand(dst), imm);
}
+void X86_64Assembler::xorq(CpuRegister dst, const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitRex64(dst, src);
+ EmitUint8(0x33);
+ EmitOperand(dst.LowBits(), src);
+}
+
+
#if 0
void X86_64Assembler::rex(bool force, bool w, Register* r, Register* x, Register* b) {
// REX.WRXB
@@ -1435,7 +1558,7 @@
void X86_64Assembler::addq(CpuRegister dst, const Address& address) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
- EmitRex64(dst);
+ EmitRex64(dst, address);
EmitUint8(0x03);
EmitOperand(dst.LowBits(), address);
}
@@ -1498,7 +1621,7 @@
void X86_64Assembler::subq(CpuRegister reg, const Address& address) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
- EmitRex64(reg);
+ EmitRex64(reg, address);
EmitUint8(0x2B);
EmitOperand(reg.LowBits() & 7, address);
}
@@ -1942,6 +2065,14 @@
}
+void X86_64Assembler::repne_scasw() {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0xF2);
+ EmitUint8(0xAF);
+}
+
+
void X86_64Assembler::LoadDoubleConstant(XmmRegister dst, double value) {
// TODO: Need to have a code constants table.
int64_t constant = bit_cast<int64_t, double>(value);
@@ -2182,9 +2313,15 @@
if (dst.NeedsRex()) {
rex |= 0x44; // REX.0R00
}
- if (rex != 0) {
- EmitUint8(rex);
+ EmitUint8(rex);
+}
+
+void X86_64Assembler::EmitRex64(XmmRegister dst, const Operand& operand) {
+ uint8_t rex = 0x48 | operand.rex(); // REX.W000
+ if (dst.NeedsRex()) {
+ rex |= 0x44; // REX.0R00
}
+ EmitUint8(rex);
}
void X86_64Assembler::EmitOptionalByteRegNormalizingRex32(CpuRegister dst, CpuRegister src) {
@@ -2622,7 +2759,7 @@
X86_64ManagedRegister out_reg = mout_reg.AsX86_64();
X86_64ManagedRegister in_reg = min_reg.AsX86_64();
if (in_reg.IsNoRegister()) { // TODO(64): && null_allowed
- // Use out_reg as indicator of NULL
+ // Use out_reg as indicator of null.
in_reg = out_reg;
// TODO: movzwl
movl(in_reg.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h
index 16ef70b..162714a 100644
--- a/compiler/utils/x86_64/assembler_x86_64.h
+++ b/compiler/utils/x86_64/assembler_x86_64.h
@@ -328,6 +328,7 @@
void movq(CpuRegister dst, const Address& src);
void movl(CpuRegister dst, const Address& src);
void movq(const Address& dst, CpuRegister src);
+ void movq(const Address& dst, const Immediate& src);
void movl(const Address& dst, CpuRegister src);
void movl(const Address& dst, const Immediate& imm);
@@ -391,14 +392,18 @@
void cvtsi2ss(XmmRegister dst, CpuRegister src); // Note: this is the r/m32 version.
void cvtsi2ss(XmmRegister dst, CpuRegister src, bool is64bit);
+ void cvtsi2ss(XmmRegister dst, const Address& src, bool is64bit);
void cvtsi2sd(XmmRegister dst, CpuRegister src); // Note: this is the r/m32 version.
void cvtsi2sd(XmmRegister dst, CpuRegister src, bool is64bit);
+ void cvtsi2sd(XmmRegister dst, const Address& src, bool is64bit);
void cvtss2si(CpuRegister dst, XmmRegister src); // Note: this is the r32 version.
void cvtss2sd(XmmRegister dst, XmmRegister src);
+ void cvtss2sd(XmmRegister dst, const Address& src);
void cvtsd2si(CpuRegister dst, XmmRegister src); // Note: this is the r32 version.
void cvtsd2ss(XmmRegister dst, XmmRegister src);
+ void cvtsd2ss(XmmRegister dst, const Address& src);
void cvttss2si(CpuRegister dst, XmmRegister src); // Note: this is the r32 version.
void cvttss2si(CpuRegister dst, XmmRegister src, bool is64bit);
@@ -408,9 +413,13 @@
void cvtdq2pd(XmmRegister dst, XmmRegister src);
void comiss(XmmRegister a, XmmRegister b);
+ void comiss(XmmRegister a, const Address& b);
void comisd(XmmRegister a, XmmRegister b);
+ void comisd(XmmRegister a, const Address& b);
void ucomiss(XmmRegister a, XmmRegister b);
+ void ucomiss(XmmRegister a, const Address& b);
void ucomisd(XmmRegister a, XmmRegister b);
+ void ucomisd(XmmRegister a, const Address& b);
void roundsd(XmmRegister dst, XmmRegister src, const Immediate& imm);
void roundss(XmmRegister dst, XmmRegister src, const Immediate& imm);
@@ -487,18 +496,21 @@
void andl(CpuRegister reg, const Address& address);
void andq(CpuRegister dst, const Immediate& imm);
void andq(CpuRegister dst, CpuRegister src);
+ void andq(CpuRegister reg, const Address& address);
void orl(CpuRegister dst, const Immediate& imm);
void orl(CpuRegister dst, CpuRegister src);
void orl(CpuRegister reg, const Address& address);
void orq(CpuRegister dst, CpuRegister src);
void orq(CpuRegister dst, const Immediate& imm);
+ void orq(CpuRegister reg, const Address& address);
void xorl(CpuRegister dst, CpuRegister src);
void xorl(CpuRegister dst, const Immediate& imm);
void xorl(CpuRegister reg, const Address& address);
void xorq(CpuRegister dst, const Immediate& imm);
void xorq(CpuRegister dst, CpuRegister src);
+ void xorq(CpuRegister reg, const Address& address);
void addl(CpuRegister dst, CpuRegister src);
void addl(CpuRegister reg, const Immediate& imm);
@@ -589,6 +601,8 @@
void bswapl(CpuRegister dst);
void bswapq(CpuRegister dst);
+ void repne_scasw();
+
//
// Macros for High-level operations.
//
@@ -699,17 +713,17 @@
void GetCurrentThread(ManagedRegister tr) OVERRIDE;
void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
- // Set up out_reg to hold a Object** into the handle scope, or to be NULL if the
+ // Set up out_reg to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed. in_reg holds a possibly stale reference
// that can be used to avoid loading the handle scope entry to see if the value is
- // NULL.
- void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset, ManagedRegister in_reg,
- bool null_allowed) OVERRIDE;
+ // null.
+ void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
+ ManagedRegister in_reg, bool null_allowed) OVERRIDE;
- // Set up out_off to hold a Object** into the handle scope, or to be NULL if the
+ // Set up out_off to hold a Object** into the handle scope, or to be null if the
// value is null and null_allowed.
- void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset, ManagedRegister scratch,
- bool null_allowed) OVERRIDE;
+ void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset,
+ ManagedRegister scratch, bool null_allowed) OVERRIDE;
// src holds a handle scope entry (Object**) load this into dst
virtual void LoadReferenceFromHandleScope(ManagedRegister dst,
@@ -789,6 +803,7 @@
void EmitRex64(const Operand& operand);
void EmitRex64(CpuRegister dst, CpuRegister src);
void EmitRex64(CpuRegister dst, const Operand& operand);
+ void EmitRex64(XmmRegister dst, const Operand& operand);
void EmitRex64(XmmRegister dst, CpuRegister src);
void EmitRex64(CpuRegister dst, XmmRegister src);
diff --git a/compiler/utils/x86_64/assembler_x86_64_test.cc b/compiler/utils/x86_64/assembler_x86_64_test.cc
index 5ca0373..0be4d63 100644
--- a/compiler/utils/x86_64/assembler_x86_64_test.cc
+++ b/compiler/utils/x86_64/assembler_x86_64_test.cc
@@ -667,6 +667,135 @@
DriverStr(expected, "movw");
}
+TEST_F(AssemblerX86_64Test, MovqAddrImm) {
+ GetAssembler()->movq(x86_64::Address(x86_64::CpuRegister(x86_64::RAX), 0),
+ x86_64::Immediate(-5));
+ const char* expected = "movq $-5, 0(%RAX)\n";
+ DriverStr(expected, "movq");
+}
+
+TEST_F(AssemblerX86_64Test, Cvtsi2ssAddr) {
+ GetAssembler()->cvtsi2ss(x86_64::XmmRegister(x86_64::XMM0),
+ x86_64::Address(x86_64::CpuRegister(x86_64::RAX), 0),
+ false);
+ GetAssembler()->cvtsi2ss(x86_64::XmmRegister(x86_64::XMM0),
+ x86_64::Address(x86_64::CpuRegister(x86_64::RAX), 0),
+ true);
+ const char* expected = "cvtsi2ss 0(%RAX), %xmm0\n"
+ "cvtsi2ssq 0(%RAX), %xmm0\n";
+ DriverStr(expected, "cvtsi2ss");
+}
+
+TEST_F(AssemblerX86_64Test, Cvtsi2sdAddr) {
+ GetAssembler()->cvtsi2sd(x86_64::XmmRegister(x86_64::XMM0),
+ x86_64::Address(x86_64::CpuRegister(x86_64::RAX), 0),
+ false);
+ GetAssembler()->cvtsi2sd(x86_64::XmmRegister(x86_64::XMM0),
+ x86_64::Address(x86_64::CpuRegister(x86_64::RAX), 0),
+ true);
+ const char* expected = "cvtsi2sd 0(%RAX), %xmm0\n"
+ "cvtsi2sdq 0(%RAX), %xmm0\n";
+ DriverStr(expected, "cvtsi2sd");
+}
+
+TEST_F(AssemblerX86_64Test, CmpqAddr) {
+ GetAssembler()->cmpq(x86_64::CpuRegister(x86_64::R12),
+ x86_64::Address(x86_64::CpuRegister(x86_64::R9), 0));
+ const char* expected = "cmpq 0(%R9), %R12\n";
+ DriverStr(expected, "cmpq");
+}
+
+TEST_F(AssemblerX86_64Test, MovsxdAddr) {
+ GetAssembler()->movsxd(x86_64::CpuRegister(x86_64::R12),
+ x86_64::Address(x86_64::CpuRegister(x86_64::R9), 0));
+ const char* expected = "movslq 0(%R9), %R12\n";
+ DriverStr(expected, "movsxd");
+}
+
+TEST_F(AssemblerX86_64Test, TestqAddr) {
+ GetAssembler()->testq(x86_64::CpuRegister(x86_64::R12),
+ x86_64::Address(x86_64::CpuRegister(x86_64::R9), 0));
+ const char* expected = "testq 0(%R9), %R12\n";
+ DriverStr(expected, "testq");
+}
+
+TEST_F(AssemblerX86_64Test, AddqAddr) {
+ GetAssembler()->addq(x86_64::CpuRegister(x86_64::R12),
+ x86_64::Address(x86_64::CpuRegister(x86_64::R9), 0));
+ const char* expected = "addq 0(%R9), %R12\n";
+ DriverStr(expected, "addq");
+}
+
+TEST_F(AssemblerX86_64Test, SubqAddr) {
+ GetAssembler()->subq(x86_64::CpuRegister(x86_64::R12),
+ x86_64::Address(x86_64::CpuRegister(x86_64::R9), 0));
+ const char* expected = "subq 0(%R9), %R12\n";
+ DriverStr(expected, "subq");
+}
+
+TEST_F(AssemblerX86_64Test, Cvtss2sdAddr) {
+ GetAssembler()->cvtss2sd(x86_64::XmmRegister(x86_64::XMM0),
+ x86_64::Address(x86_64::CpuRegister(x86_64::RAX), 0));
+ const char* expected = "cvtss2sd 0(%RAX), %xmm0\n";
+ DriverStr(expected, "cvtss2sd");
+}
+
+TEST_F(AssemblerX86_64Test, Cvtsd2ssAddr) {
+ GetAssembler()->cvtsd2ss(x86_64::XmmRegister(x86_64::XMM0),
+ x86_64::Address(x86_64::CpuRegister(x86_64::RAX), 0));
+ const char* expected = "cvtsd2ss 0(%RAX), %xmm0\n";
+ DriverStr(expected, "cvtsd2ss");
+}
+
+TEST_F(AssemblerX86_64Test, ComissAddr) {
+ GetAssembler()->comiss(x86_64::XmmRegister(x86_64::XMM14),
+ x86_64::Address(x86_64::CpuRegister(x86_64::RAX), 0));
+ const char* expected = "comiss 0(%RAX), %xmm14\n";
+ DriverStr(expected, "comiss");
+}
+
+TEST_F(AssemblerX86_64Test, ComisdAddr) {
+ GetAssembler()->comisd(x86_64::XmmRegister(x86_64::XMM0),
+ x86_64::Address(x86_64::CpuRegister(x86_64::R9), 0));
+ const char* expected = "comisd 0(%R9), %xmm0\n";
+ DriverStr(expected, "comisd");
+}
+
+TEST_F(AssemblerX86_64Test, UComissAddr) {
+ GetAssembler()->ucomiss(x86_64::XmmRegister(x86_64::XMM0),
+ x86_64::Address(x86_64::CpuRegister(x86_64::RAX), 0));
+ const char* expected = "ucomiss 0(%RAX), %xmm0\n";
+ DriverStr(expected, "ucomiss");
+}
+
+TEST_F(AssemblerX86_64Test, UComisdAddr) {
+ GetAssembler()->ucomisd(x86_64::XmmRegister(x86_64::XMM0),
+ x86_64::Address(x86_64::CpuRegister(x86_64::RAX), 0));
+ const char* expected = "ucomisd 0(%RAX), %xmm0\n";
+ DriverStr(expected, "ucomisd");
+}
+
+TEST_F(AssemblerX86_64Test, Andq) {
+ GetAssembler()->andq(x86_64::CpuRegister(x86_64::R9),
+ x86_64::Address(x86_64::CpuRegister(x86_64::RAX), 0));
+ const char* expected = "andq 0(%RAX), %r9\n";
+ DriverStr(expected, "andq");
+}
+
+TEST_F(AssemblerX86_64Test, Orq) {
+ GetAssembler()->orq(x86_64::CpuRegister(x86_64::R9),
+ x86_64::Address(x86_64::CpuRegister(x86_64::RAX), 0));
+ const char* expected = "orq 0(%RAX), %r9\n";
+ DriverStr(expected, "orq");
+}
+
+TEST_F(AssemblerX86_64Test, Xorq) {
+ GetAssembler()->xorq(x86_64::CpuRegister(x86_64::R9),
+ x86_64::Address(x86_64::CpuRegister(x86_64::RAX), 0));
+ const char* expected = "xorq 0(%RAX), %r9\n";
+ DriverStr(expected, "xorq");
+}
+
TEST_F(AssemblerX86_64Test, Movsxd) {
DriverStr(RepeatRr(&x86_64::X86_64Assembler::movsxd, "movsxd %{reg2}, %{reg1}"), "movsxd");
}
@@ -1086,4 +1215,10 @@
DriverStr(Repeatrb(&x86_64::X86_64Assembler::movsxb, "movsbl %{reg2}, %{reg1}"), "movsxb");
}
+TEST_F(AssemblerX86_64Test, Repnescasw) {
+ GetAssembler()->repne_scasw();
+ const char* expected = "repne scasw\n";
+ DriverStr(expected, "Repnescasw");
+}
+
} // namespace art
diff --git a/dalvikvm/dalvikvm.cc b/dalvikvm/dalvikvm.cc
index 7839aa8..85debe4 100644
--- a/dalvikvm/dalvikvm.cc
+++ b/dalvikvm/dalvikvm.cc
@@ -31,25 +31,26 @@
// Determine whether or not the specified method is public.
static bool IsMethodPublic(JNIEnv* env, jclass c, jmethodID method_id) {
ScopedLocalRef<jobject> reflected(env, env->ToReflectedMethod(c, method_id, JNI_FALSE));
- if (reflected.get() == NULL) {
+ if (reflected.get() == nullptr) {
fprintf(stderr, "Failed to get reflected method\n");
return false;
}
// We now have a Method instance. We need to call its
// getModifiers() method.
jclass method_class = env->FindClass("java/lang/reflect/Method");
- if (method_class == NULL) {
+ if (method_class == nullptr) {
fprintf(stderr, "Failed to find class java.lang.reflect.Method\n");
return false;
}
jmethodID mid = env->GetMethodID(method_class, "getModifiers", "()I");
- if (mid == NULL) {
+ if (mid == nullptr) {
fprintf(stderr, "Failed to find java.lang.reflect.Method.getModifiers\n");
return false;
}
int modifiers = env->CallIntMethod(reflected.get(), mid);
static const int PUBLIC = 0x0001; // java.lang.reflect.Modifiers.PUBLIC
if ((modifiers & PUBLIC) == 0) {
+ fprintf(stderr, "Modifiers mismatch\n");
return false;
}
return true;
@@ -60,7 +61,7 @@
// it. Create an array and populate it. Note argv[0] is not
// included.
ScopedLocalRef<jobjectArray> args(env, toStringArray(env, argv + 1));
- if (args.get() == NULL) {
+ if (args.get() == nullptr) {
env->ExceptionDescribe();
return EXIT_FAILURE;
}
@@ -72,14 +73,14 @@
std::replace(class_name.begin(), class_name.end(), '.', '/');
ScopedLocalRef<jclass> klass(env, env->FindClass(class_name.c_str()));
- if (klass.get() == NULL) {
+ if (klass.get() == nullptr) {
fprintf(stderr, "Unable to locate class '%s'\n", class_name.c_str());
env->ExceptionDescribe();
return EXIT_FAILURE;
}
jmethodID method = env->GetStaticMethodID(klass.get(), "main", "([Ljava/lang/String;)V");
- if (method == NULL) {
+ if (method == nullptr) {
fprintf(stderr, "Unable to find static main(String[]) in '%s'\n", class_name.c_str());
env->ExceptionDescribe();
return EXIT_FAILURE;
@@ -105,7 +106,7 @@
// Parse arguments. Most of it just gets passed through to the runtime.
// The JNI spec defines a handful of standard arguments.
static int dalvikvm(int argc, char** argv) {
- setvbuf(stdout, NULL, _IONBF, 0);
+ setvbuf(stdout, nullptr, _IONBF, 0);
// Skip over argv[0].
argv++;
@@ -124,8 +125,8 @@
//
// [Do we need to catch & handle "-jar" here?]
bool need_extra = false;
- const char* lib = NULL;
- const char* what = NULL;
+ const char* lib = nullptr;
+ const char* what = nullptr;
int curr_opt, arg_idx;
for (curr_opt = arg_idx = 0; arg_idx < argc; arg_idx++) {
if (argv[arg_idx][0] != '-' && !need_extra) {
@@ -171,8 +172,8 @@
init_args.ignoreUnrecognized = JNI_FALSE;
// Start the runtime. The current thread becomes the main thread.
- JavaVM* vm = NULL;
- JNIEnv* env = NULL;
+ JavaVM* vm = nullptr;
+ JNIEnv* env = nullptr;
if (JNI_CreateJavaVM(&vm, &env, &init_args) != JNI_OK) {
fprintf(stderr, "Failed to initialize runtime (check log for details)\n");
return EXIT_FAILURE;
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 70b4213..43bec37 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -14,6 +14,7 @@
* limitations under the License.
*/
+#include <inttypes.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/stat.h>
@@ -324,26 +325,19 @@
return nullptr;
}
- static void Message(char severity, const std::string& message) {
- // TODO: Remove when we switch to LOG when we can guarantee it won't prevent shutdown in error
- // cases.
- fprintf(stderr, "dex2oat%s %c %d %d %s\n",
- kIsDebugBuild ? "d" : "",
- severity,
- getpid(),
- GetTid(),
- message.c_str());
- }
-
NO_RETURN static void Fatal(const std::string& message) {
- Message('F', message);
+ // TODO: When we can guarantee it won't prevent shutdown in error cases, move to LOG. However,
+ // it's rather easy to hang in unwinding.
+ // LogLine also avoids ART logging lock issues, as it's really only a wrapper around
+ // logcat logging or stderr output.
+ LogMessage::LogLine(__FILE__, __LINE__, LogSeverity::FATAL, message.c_str());
exit(1);
}
void Wait() {
// TODO: tune the multiplier for GC verification, the following is just to make the timeout
// large.
- int64_t multiplier = kVerifyObjectSupport > kVerifyObjectModeFast ? 100 : 1;
+ constexpr int64_t multiplier = kVerifyObjectSupport > kVerifyObjectModeFast ? 100 : 1;
timespec timeout_ts;
InitTimeSpec(true, CLOCK_REALTIME, multiplier * kWatchDogTimeoutSeconds * 1000, 0, &timeout_ts);
const char* reason = "dex2oat watch dog thread waiting";
@@ -351,7 +345,8 @@
while (!shutting_down_) {
int rc = TEMP_FAILURE_RETRY(pthread_cond_timedwait(&cond_, &mutex_, &timeout_ts));
if (rc == ETIMEDOUT) {
- Fatal(StringPrintf("dex2oat did not finish after %d seconds", kWatchDogTimeoutSeconds));
+ Fatal(StringPrintf("dex2oat did not finish after %" PRId64 " seconds",
+ kWatchDogTimeoutSeconds));
} else if (rc != 0) {
std::string message(StringPrintf("pthread_cond_timedwait failed: %s",
strerror(errno)));
@@ -363,10 +358,10 @@
// When setting timeouts, keep in mind that the build server may not be as fast as your desktop.
// Debug builds are slower so they have larger timeouts.
- static const unsigned int kSlowdownFactor = kIsDebugBuild ? 5U : 1U;
+ static constexpr int64_t kSlowdownFactor = kIsDebugBuild ? 5U : 1U;
- // 6 minutes scaled by kSlowdownFactor.
- static const unsigned int kWatchDogTimeoutSeconds = kSlowdownFactor * 6 * 60;
+ // 10 minutes scaled by kSlowdownFactor.
+ static constexpr int64_t kWatchDogTimeoutSeconds = kSlowdownFactor * 10 * 60;
bool is_watch_dog_enabled_;
bool shutting_down_;
@@ -445,6 +440,8 @@
image_classes_filename_(nullptr),
compiled_classes_zip_filename_(nullptr),
compiled_classes_filename_(nullptr),
+ compiled_methods_zip_filename_(nullptr),
+ compiled_methods_filename_(nullptr),
image_(false),
is_host_(false),
dump_stats_(false),
@@ -564,6 +561,10 @@
compiled_classes_filename_ = option.substr(strlen("--compiled-classes=")).data();
} else if (option.starts_with("--compiled-classes-zip=")) {
compiled_classes_zip_filename_ = option.substr(strlen("--compiled-classes-zip=")).data();
+ } else if (option.starts_with("--compiled-methods=")) {
+ compiled_methods_filename_ = option.substr(strlen("--compiled-methods=")).data();
+ } else if (option.starts_with("--compiled-methods-zip=")) {
+ compiled_methods_zip_filename_ = option.substr(strlen("--compiled-methods-zip=")).data();
} else if (option.starts_with("--base=")) {
const char* image_base_str = option.substr(strlen("--base=")).data();
char* end;
@@ -690,6 +691,8 @@
include_cfi = false;
} else if (option == "--debuggable") {
debuggable = true;
+ include_debug_symbols = true;
+ include_cfi = true;
} else if (option.starts_with("--profile-file=")) {
profile_file_ = option.substr(strlen("--profile-file=")).data();
VLOG(compiler) << "dex2oat: profile file is " << profile_file_;
@@ -976,7 +979,10 @@
oss.str(""); // Reset.
oss << kRuntimeISA;
key_value_store_->Put(OatHeader::kDex2OatHostKey, oss.str());
- key_value_store_->Put(OatHeader::kPicKey, compile_pic ? "true" : "false");
+ key_value_store_->Put(OatHeader::kPicKey,
+ compile_pic ? OatHeader::kTrueValue : OatHeader::kFalseValue);
+ key_value_store_->Put(OatHeader::kDebuggableKey,
+ debuggable ? OatHeader::kTrueValue : OatHeader::kFalseValue);
}
}
@@ -1092,8 +1098,8 @@
std::string error_msg;
if (image_classes_zip_filename_ != nullptr) {
image_classes_.reset(ReadImageClassesFromZip(image_classes_zip_filename_,
- image_classes_filename_,
- &error_msg));
+ image_classes_filename_,
+ &error_msg));
} else {
image_classes_.reset(ReadImageClassesFromFile(image_classes_filename_));
}
@@ -1121,9 +1127,29 @@
<< compiled_classes_filename_ << "': " << error_msg;
return false;
}
- } else if (image_) {
+ } else {
compiled_classes_.reset(nullptr); // By default compile everything.
}
+ // If --compiled-methods was specified, read the methods to compile from the given file(s).
+ if (compiled_methods_filename_ != nullptr) {
+ std::string error_msg;
+ if (compiled_methods_zip_filename_ != nullptr) {
+ compiled_methods_.reset(ReadCommentedInputFromZip(compiled_methods_zip_filename_,
+ compiled_methods_filename_,
+ nullptr, // No post-processing.
+ &error_msg));
+ } else {
+ compiled_methods_.reset(ReadCommentedInputFromFile(compiled_methods_filename_,
+ nullptr)); // No post-processing.
+ }
+ if (compiled_methods_.get() == nullptr) {
+ LOG(ERROR) << "Failed to create list of compiled methods from '"
+ << compiled_methods_filename_ << "': " << error_msg;
+ return false;
+ }
+ } else {
+ compiled_methods_.reset(nullptr); // By default compile everything.
+ }
if (boot_image_option_.empty()) {
dex_files_ = Runtime::Current()->GetClassLinker()->GetBootClassPath();
@@ -1191,9 +1217,9 @@
if (!UseSwap(image_, dex_files_)) {
close(swap_fd_);
swap_fd_ = -1;
- LOG(INFO) << "Decided to run without swap.";
+ VLOG(compiler) << "Decided to run without swap.";
} else {
- LOG(INFO) << "Accepted running with swap.";
+ LOG(INFO) << "Large app, accepted running with swap.";
}
}
// Note that dex2oat won't close the swap_fd_. The compiler driver's swap space will do that.
@@ -1258,6 +1284,7 @@
image_,
image_classes_.release(),
compiled_classes_.release(),
+ nullptr,
thread_count_,
dump_stats_,
dump_passes_,
@@ -1491,7 +1518,7 @@
static size_t OpenDexFiles(const std::vector<const char*>& dex_filenames,
const std::vector<const char*>& dex_locations,
std::vector<std::unique_ptr<const DexFile>>* dex_files) {
- DCHECK(dex_files != nullptr) << "OpenDexFiles out-param is NULL";
+ DCHECK(dex_files != nullptr) << "OpenDexFiles out-param is nullptr";
size_t failure_count = 0;
for (size_t i = 0; i < dex_filenames.size(); i++) {
const char* dex_filename = dex_filenames[i];
@@ -1532,7 +1559,7 @@
static void OpenClassPathFiles(const std::string& class_path,
std::vector<const DexFile*> dex_files,
std::vector<std::unique_ptr<const DexFile>>* opened_dex_files) {
- DCHECK(opened_dex_files != nullptr) << "OpenClassPathFiles out-param is NULL";
+ DCHECK(opened_dex_files != nullptr) << "OpenClassPathFiles out-param is nullptr";
std::vector<std::string> parsed;
Split(class_path, ':', &parsed);
// Take Locks::mutator_lock_ so that lock ordering on the ClassLinker::dex_lock_ is maintained.
@@ -1567,7 +1594,7 @@
// Initialize maps for unstarted runtime. This needs to be here, as running clinits needs this
// set up.
- interpreter::UnstartedRuntimeInitialize();
+ interpreter::UnstartedRuntime::Initialize();
runtime->GetClassLinker()->RunRootClinits();
runtime_ = runtime;
@@ -1618,59 +1645,86 @@
// Reads the class names (java.lang.Object) and returns a set of descriptors (Ljava/lang/Object;)
static std::unordered_set<std::string>* ReadImageClassesFromFile(
const char* image_classes_filename) {
- std::unique_ptr<std::ifstream> image_classes_file(new std::ifstream(image_classes_filename,
- std::ifstream::in));
- if (image_classes_file.get() == nullptr) {
- LOG(ERROR) << "Failed to open image classes file " << image_classes_filename;
- return nullptr;
- }
- std::unique_ptr<std::unordered_set<std::string>> result(ReadImageClasses(*image_classes_file));
- image_classes_file->close();
- return result.release();
- }
-
- static std::unordered_set<std::string>* ReadImageClasses(std::istream& image_classes_stream) {
- std::unique_ptr<std::unordered_set<std::string>> image_classes(
- new std::unordered_set<std::string>);
- while (image_classes_stream.good()) {
- std::string dot;
- std::getline(image_classes_stream, dot);
- if (StartsWith(dot, "#") || dot.empty()) {
- continue;
- }
- std::string descriptor(DotToDescriptor(dot.c_str()));
- image_classes->insert(descriptor);
- }
- return image_classes.release();
+ std::function<std::string(const char*)> process = DotToDescriptor;
+ return ReadCommentedInputFromFile(image_classes_filename, &process);
}
// Reads the class names (java.lang.Object) and returns a set of descriptors (Ljava/lang/Object;)
static std::unordered_set<std::string>* ReadImageClassesFromZip(
+ const char* zip_filename,
+ const char* image_classes_filename,
+ std::string* error_msg) {
+ std::function<std::string(const char*)> process = DotToDescriptor;
+ return ReadCommentedInputFromZip(zip_filename, image_classes_filename, &process, error_msg);
+ }
+
+ // Read lines from the given file, dropping comments and empty lines. Post-process each line with
+ // the given function.
+ static std::unordered_set<std::string>* ReadCommentedInputFromFile(
+ const char* input_filename, std::function<std::string(const char*)>* process) {
+ std::unique_ptr<std::ifstream> input_file(new std::ifstream(input_filename, std::ifstream::in));
+ if (input_file.get() == nullptr) {
+ LOG(ERROR) << "Failed to open input file " << input_filename;
+ return nullptr;
+ }
+ std::unique_ptr<std::unordered_set<std::string>> result(
+ ReadCommentedInputStream(*input_file, process));
+ input_file->close();
+ return result.release();
+ }
+
+ // Read lines from the given file from the given zip file, dropping comments and empty lines.
+ // Post-process each line with the given function.
+ static std::unordered_set<std::string>* ReadCommentedInputFromZip(
const char* zip_filename,
- const char* image_classes_filename,
+ const char* input_filename,
+ std::function<std::string(const char*)>* process,
std::string* error_msg) {
std::unique_ptr<ZipArchive> zip_archive(ZipArchive::Open(zip_filename, error_msg));
if (zip_archive.get() == nullptr) {
return nullptr;
}
- std::unique_ptr<ZipEntry> zip_entry(zip_archive->Find(image_classes_filename, error_msg));
+ std::unique_ptr<ZipEntry> zip_entry(zip_archive->Find(input_filename, error_msg));
if (zip_entry.get() == nullptr) {
- *error_msg = StringPrintf("Failed to find '%s' within '%s': %s", image_classes_filename,
+ *error_msg = StringPrintf("Failed to find '%s' within '%s': %s", input_filename,
zip_filename, error_msg->c_str());
return nullptr;
}
- std::unique_ptr<MemMap> image_classes_file(zip_entry->ExtractToMemMap(zip_filename,
- image_classes_filename,
- error_msg));
- if (image_classes_file.get() == nullptr) {
- *error_msg = StringPrintf("Failed to extract '%s' from '%s': %s", image_classes_filename,
+ std::unique_ptr<MemMap> input_file(zip_entry->ExtractToMemMap(zip_filename,
+ input_filename,
+ error_msg));
+ if (input_file.get() == nullptr) {
+ *error_msg = StringPrintf("Failed to extract '%s' from '%s': %s", input_filename,
zip_filename, error_msg->c_str());
return nullptr;
}
- const std::string image_classes_string(reinterpret_cast<char*>(image_classes_file->Begin()),
- image_classes_file->Size());
- std::istringstream image_classes_stream(image_classes_string);
- return ReadImageClasses(image_classes_stream);
+ const std::string input_string(reinterpret_cast<char*>(input_file->Begin()),
+ input_file->Size());
+ std::istringstream input_stream(input_string);
+ return ReadCommentedInputStream(input_stream, process);
+ }
+
+ // Read lines from the given stream, dropping comments and empty lines. Post-process each line
+ // with the given function.
+ static std::unordered_set<std::string>* ReadCommentedInputStream(
+ std::istream& in_stream,
+ std::function<std::string(const char*)>* process) {
+ std::unique_ptr<std::unordered_set<std::string>> image_classes(
+ new std::unordered_set<std::string>);
+ while (in_stream.good()) {
+ std::string dot;
+ std::getline(in_stream, dot);
+ if (StartsWith(dot, "#") || dot.empty()) {
+ continue;
+ }
+ if (process != nullptr) {
+ std::string descriptor((*process)(dot.c_str()));
+ image_classes->insert(descriptor);
+ } else {
+ image_classes->insert(dot);
+ }
+ }
+ return image_classes.release();
}
void LogCompletionTime() {
@@ -1724,8 +1778,11 @@
const char* image_classes_filename_;
const char* compiled_classes_zip_filename_;
const char* compiled_classes_filename_;
+ const char* compiled_methods_zip_filename_;
+ const char* compiled_methods_filename_;
std::unique_ptr<std::unordered_set<std::string>> image_classes_;
std::unique_ptr<std::unordered_set<std::string>> compiled_classes_;
+ std::unique_ptr<std::unordered_set<std::string>> compiled_methods_;
bool image_;
std::unique_ptr<ImageWriter> image_writer_;
bool is_host_;
@@ -1749,8 +1806,6 @@
DISALLOW_IMPLICIT_CONSTRUCTORS(Dex2Oat);
};
-const unsigned int WatchDog::kWatchDogTimeoutSeconds;
-
static void b13564922() {
#if defined(__linux__) && defined(__arm__)
int major, minor;
diff --git a/disassembler/disassembler.cc b/disassembler/disassembler.cc
index c05c3ed..6334717 100644
--- a/disassembler/disassembler.cc
+++ b/disassembler/disassembler.cc
@@ -42,7 +42,7 @@
return new x86::DisassemblerX86(options, true);
} else {
UNIMPLEMENTED(FATAL) << "no disassembler for " << instruction_set;
- return NULL;
+ return nullptr;
}
}
diff --git a/disassembler/disassembler_x86.cc b/disassembler/disassembler_x86.cc
index ba0c0bd..2ead4a2 100644
--- a/disassembler/disassembler_x86.cc
+++ b/disassembler/disassembler_x86.cc
@@ -587,6 +587,14 @@
src_reg_file = SSE;
immediate_bytes = 1;
break;
+ case 0x15:
+ opcode1 = "pextrw";
+ prefix[2] = 0;
+ has_modrm = true;
+ store = true;
+ src_reg_file = SSE;
+ immediate_bytes = 1;
+ break;
case 0x16:
opcode1 = "pextrd";
prefix[2] = 0;
diff --git a/imgdiag/imgdiag.cc b/imgdiag/imgdiag.cc
index 34a4c14..1056fe1 100644
--- a/imgdiag/imgdiag.cc
+++ b/imgdiag/imgdiag.cc
@@ -97,7 +97,8 @@
{
struct stat sts;
- std::string proc_pid_str = StringPrintf("/proc/%ld", static_cast<long>(image_diff_pid)); // NOLINT [runtime/int]
+ std::string proc_pid_str =
+ StringPrintf("/proc/%ld", static_cast<long>(image_diff_pid)); // NOLINT [runtime/int]
if (stat(proc_pid_str.c_str(), &sts) == -1) {
os << "Process does not exist";
return false;
@@ -144,7 +145,8 @@
const size_t pointer_size = InstructionSetPointerSize(
Runtime::Current()->GetInstructionSet());
- std::string file_name = StringPrintf("/proc/%ld/mem", static_cast<long>(image_diff_pid)); // NOLINT [runtime/int]
+ std::string file_name =
+ StringPrintf("/proc/%ld/mem", static_cast<long>(image_diff_pid)); // NOLINT [runtime/int]
size_t boot_map_size = boot_map.end - boot_map.start;
@@ -197,8 +199,8 @@
return false;
}
- std::string page_map_file_name = StringPrintf("/proc/%ld/pagemap",
- static_cast<long>(image_diff_pid)); // NOLINT [runtime/int]
+ std::string page_map_file_name = StringPrintf(
+ "/proc/%ld/pagemap", static_cast<long>(image_diff_pid)); // NOLINT [runtime/int]
auto page_map_file = std::unique_ptr<File>(OS::OpenFileForReading(page_map_file_name.c_str()));
if (page_map_file == nullptr) {
os << "Failed to open " << page_map_file_name << " for reading: " << strerror(errno);
@@ -226,8 +228,10 @@
return false;
}
- std::set<size_t> dirty_page_set_remote; // Set of the remote virtual page indices that are dirty
- std::set<size_t> dirty_page_set_local; // Set of the local virtual page indices that are dirty
+ // Set of the remote virtual page indices that are dirty
+ std::set<size_t> dirty_page_set_remote;
+ // Set of the local virtual page indices that are dirty
+ std::set<size_t> dirty_page_set_local;
size_t different_int32s = 0;
size_t different_bytes = 0;
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index a36e5b1..949c2cb 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -76,42 +76,38 @@
"kClassRoots",
};
-class OatSymbolizer FINAL : public CodeOutput {
+class OatSymbolizer FINAL {
public:
- explicit OatSymbolizer(const OatFile* oat_file, const std::string& output_name) :
- oat_file_(oat_file), builder_(nullptr), elf_output_(nullptr),
- output_name_(output_name.empty() ? "symbolized.oat" : output_name) {
- }
+ class RodataWriter FINAL : public CodeOutput {
+ public:
+ explicit RodataWriter(const OatFile* oat_file) : oat_file_(oat_file) {}
- bool Init() {
- Elf32_Word oat_data_size = oat_file_->GetOatHeader().GetExecutableOffset();
-
- uint32_t diff = static_cast<uint32_t>(oat_file_->End() - oat_file_->Begin());
- uint32_t oat_exec_size = diff - oat_data_size;
- uint32_t oat_bss_size = oat_file_->BssSize();
-
- elf_output_ = OS::CreateEmptyFile(output_name_.c_str());
-
- builder_.reset(new ElfBuilder<Elf32_Word, Elf32_Sword, Elf32_Addr, Elf32_Dyn,
- Elf32_Sym, Elf32_Ehdr, Elf32_Phdr, Elf32_Shdr>(
- this,
- elf_output_,
- oat_file_->GetOatHeader().GetInstructionSet(),
- 0,
- oat_data_size,
- oat_data_size,
- oat_exec_size,
- RoundUp(oat_data_size + oat_exec_size, kPageSize),
- oat_bss_size,
- true,
- false));
-
- if (!builder_->Init()) {
- builder_.reset(nullptr);
- return false;
+ bool Write(OutputStream* out) OVERRIDE {
+ const size_t rodata_size = oat_file_->GetOatHeader().GetExecutableOffset();
+ return out->WriteFully(oat_file_->Begin(), rodata_size);
}
- return true;
+ private:
+ const OatFile* oat_file_;
+ };
+
+ class TextWriter FINAL : public CodeOutput {
+ public:
+ explicit TextWriter(const OatFile* oat_file) : oat_file_(oat_file) {}
+
+ bool Write(OutputStream* out) OVERRIDE {
+ const size_t rodata_size = oat_file_->GetOatHeader().GetExecutableOffset();
+ const uint8_t* text_begin = oat_file_->Begin() + rodata_size;
+ return out->WriteFully(text_begin, oat_file_->End() - text_begin);
+ }
+
+ private:
+ const OatFile* oat_file_;
+ };
+
+ explicit OatSymbolizer(const OatFile* oat_file, const std::string& output_name) :
+ oat_file_(oat_file), builder_(nullptr),
+ output_name_(output_name.empty() ? "symbolized.oat" : output_name) {
}
typedef void (OatSymbolizer::*Callback)(const DexFile::ClassDef&,
@@ -123,9 +119,17 @@
uint32_t);
bool Symbolize() {
- if (builder_.get() == nullptr) {
- return false;
- }
+ Elf32_Word rodata_size = oat_file_->GetOatHeader().GetExecutableOffset();
+ uint32_t size = static_cast<uint32_t>(oat_file_->End() - oat_file_->Begin());
+ uint32_t text_size = size - rodata_size;
+ uint32_t bss_size = oat_file_->BssSize();
+ RodataWriter rodata_writer(oat_file_);
+ TextWriter text_writer(oat_file_);
+ builder_.reset(new ElfBuilder<ElfTypes32>(
+ oat_file_->GetOatHeader().GetInstructionSet(),
+ rodata_size, &rodata_writer,
+ text_size, &text_writer,
+ bss_size));
Walk(&art::OatSymbolizer::RegisterForDedup);
@@ -133,10 +137,11 @@
Walk(&art::OatSymbolizer::AddSymbol);
- bool result = builder_->Write();
+ File* elf_output = OS::CreateEmptyFile(output_name_.c_str());
+ bool result = builder_->Write(elf_output);
// Ignore I/O errors.
- UNUSED(elf_output_->FlushClose());
+ UNUSED(elf_output->FlushClose());
return result;
}
@@ -145,7 +150,7 @@
std::vector<const OatFile::OatDexFile*> oat_dex_files = oat_file_->GetOatDexFiles();
for (size_t i = 0; i < oat_dex_files.size(); i++) {
const OatFile::OatDexFile* oat_dex_file = oat_dex_files[i];
- CHECK(oat_dex_file != NULL);
+ CHECK(oat_dex_file != nullptr);
WalkOatDexFile(oat_dex_file, callback);
}
}
@@ -270,25 +275,14 @@
pretty_name = "[Dedup]" + pretty_name;
}
- ElfSymtabBuilder<Elf32_Word, Elf32_Sword, Elf32_Addr,
- Elf32_Sym, Elf32_Shdr>* symtab = builder_->GetSymtabBuilder();
+ auto* symtab = builder_->GetSymtab();
- symtab->AddSymbol(pretty_name, &builder_->GetTextBuilder(),
+ symtab->AddSymbol(pretty_name, builder_->GetText(),
oat_method.GetCodeOffset() - oat_file_->GetOatHeader().GetExecutableOffset(),
true, oat_method.GetQuickCodeSize(), STB_GLOBAL, STT_FUNC);
}
}
- // Set oat data offset. Required by ElfBuilder/CodeOutput.
- void SetCodeOffset(size_t offset ATTRIBUTE_UNUSED) {
- // Nothing to do.
- }
-
- // Write oat code. Required by ElfBuilder/CodeOutput.
- bool Write(OutputStream* out) {
- return out->WriteFully(oat_file_->Begin(), oat_file_->End() - oat_file_->Begin());
- }
-
private:
static void SkipAllFields(ClassDataItemIterator* it) {
while (it->HasNextStaticField()) {
@@ -300,9 +294,7 @@
}
const OatFile* oat_file_;
- std::unique_ptr<ElfBuilder<Elf32_Word, Elf32_Sword, Elf32_Addr, Elf32_Dyn,
- Elf32_Sym, Elf32_Ehdr, Elf32_Phdr, Elf32_Shdr> > builder_;
- File* elf_output_;
+ std::unique_ptr<ElfBuilder<ElfTypes32> > builder_;
std::unordered_map<uint32_t, uint32_t> state_;
const std::string output_name_;
};
@@ -2100,7 +2092,7 @@
gc::space::ImageSpace& image_space_;
const ImageHeader& image_header_;
std::unique_ptr<OatDumper> oat_dumper_;
- std::unique_ptr<OatDumperOptions> oat_dumper_options_;
+ OatDumperOptions* oat_dumper_options_;
DISALLOW_COPY_AND_ASSIGN(ImageDumper);
};
@@ -2206,10 +2198,6 @@
}
OatSymbolizer oat_symbolizer(oat_file, output_name);
- if (!oat_symbolizer.Init()) {
- fprintf(stderr, "Failed to initialize symbolizer\n");
- return EXIT_FAILURE;
- }
if (!oat_symbolizer.Symbolize()) {
fprintf(stderr, "Failed to symbolize\n");
return EXIT_FAILURE;
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
index 4dc0967..ef84a17 100644
--- a/patchoat/patchoat.cc
+++ b/patchoat/patchoat.cc
@@ -650,29 +650,34 @@
template <typename ElfFileImpl>
bool PatchOat::PatchElf(ElfFileImpl* oat_file) {
TimingLogger::ScopedTiming t("Fixup Elf Text Section", timings_);
+
+ // Fix up absolute references to locations within the boot image.
if (!oat_file->ApplyOatPatchesTo(".text", delta_)) {
return false;
}
+ // Update the OatHeader fields referencing the boot image.
if (!PatchOatHeader<ElfFileImpl>(oat_file)) {
return false;
}
- bool need_fixup = false;
+ bool need_boot_oat_fixup = true;
for (unsigned int i = 0; i < oat_file->GetProgramHeaderNum(); ++i) {
auto hdr = oat_file->GetProgramHeader(i);
- if ((hdr->p_vaddr != 0 && hdr->p_vaddr != hdr->p_offset) ||
- (hdr->p_paddr != 0 && hdr->p_paddr != hdr->p_offset)) {
- need_fixup = true;
+ if (hdr->p_type == PT_LOAD && hdr->p_vaddr == 0u) {
+ need_boot_oat_fixup = false;
break;
}
}
- if (!need_fixup) {
- // This was never passed through ElfFixup so all headers/symbols just have their offset as
- // their addr. Therefore we do not need to update these parts.
+ if (!need_boot_oat_fixup) {
+ // This is an app oat file that can be loaded at an arbitrary address in memory.
+ // Boot image references were patched above and there's nothing else to do.
return true;
}
+ // This is a boot oat file that's loaded at a particular address and we need
+ // to patch all absolute addresses, starting with ELF program headers.
+
t.NewTiming("Fixup Elf Headers");
// Fixup Phdr's
oat_file->FixupProgramHeaders(delta_);
diff --git a/runtime/Android.mk b/runtime/Android.mk
index d3488fc..ece9d4b 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -100,11 +100,13 @@
linear_alloc.cc \
mem_map.cc \
memory_region.cc \
+ mirror/abstract_method.cc \
mirror/art_method.cc \
mirror/array.cc \
mirror/class.cc \
mirror/dex_cache.cc \
mirror/field.cc \
+ mirror/method.cc \
mirror/object.cc \
mirror/reference.cc \
mirror/stack_trace_element.cc \
@@ -122,6 +124,7 @@
native/java_lang_Object.cc \
native/java_lang_Runtime.cc \
native/java_lang_String.cc \
+ native/java_lang_StringFactory.cc \
native/java_lang_System.cc \
native/java_lang_Thread.cc \
native/java_lang_Throwable.cc \
@@ -134,6 +137,7 @@
native/java_lang_reflect_Method.cc \
native/java_lang_reflect_Proxy.cc \
native/java_util_concurrent_atomic_AtomicLong.cc \
+ native/libcore_util_CharsetUtils.cc \
native/org_apache_harmony_dalvik_ddmc_DdmServer.cc \
native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc \
native/sun_misc_Unsafe.cc \
@@ -464,7 +468,7 @@
ifeq ($$(art_target_or_host),target)
LOCAL_SHARED_LIBRARIES += libdl
# ZipArchive support, the order matters here to get all symbols.
- LOCAL_STATIC_LIBRARIES := libziparchive libz
+ LOCAL_STATIC_LIBRARIES := libziparchive libz libbase
# For android::FileMap used by libziparchive.
LOCAL_SHARED_LIBRARIES += libutils
# For liblog, atrace, properties, ashmem, set_sched_policy and socket_peer_is_trusted.
diff --git a/runtime/arch/arm/context_arm.h b/runtime/arch/arm/context_arm.h
index 5bdeda7..a58aecb 100644
--- a/runtime/arch/arm/context_arm.h
+++ b/runtime/arch/arm/context_arm.h
@@ -80,7 +80,7 @@
NO_RETURN void DoLongJump() OVERRIDE;
private:
- // Pointers to register locations, initialized to NULL or the specific registers below.
+ // Pointers to register locations, initialized to null or the specific registers below.
uintptr_t* gprs_[kNumberOfCoreRegisters];
uint32_t* fprs_[kNumberOfSRegisters];
// Hold values for sp and pc if they are not located within a stack frame.
diff --git a/runtime/arch/arm/entrypoints_init_arm.cc b/runtime/arch/arm/entrypoints_init_arm.cc
index 055b5ab..cafc868 100644
--- a/runtime/arch/arm/entrypoints_init_arm.cc
+++ b/runtime/arch/arm/entrypoints_init_arm.cc
@@ -143,11 +143,16 @@
qpoints->pQuickImtConflictTrampoline = art_quick_imt_conflict_trampoline;
qpoints->pQuickResolutionTrampoline = art_quick_resolution_trampoline;
qpoints->pQuickToInterpreterBridge = art_quick_to_interpreter_bridge;
- qpoints->pInvokeDirectTrampolineWithAccessCheck = art_quick_invoke_direct_trampoline_with_access_check;
- qpoints->pInvokeInterfaceTrampolineWithAccessCheck = art_quick_invoke_interface_trampoline_with_access_check;
- qpoints->pInvokeStaticTrampolineWithAccessCheck = art_quick_invoke_static_trampoline_with_access_check;
- qpoints->pInvokeSuperTrampolineWithAccessCheck = art_quick_invoke_super_trampoline_with_access_check;
- qpoints->pInvokeVirtualTrampolineWithAccessCheck = art_quick_invoke_virtual_trampoline_with_access_check;
+ qpoints->pInvokeDirectTrampolineWithAccessCheck =
+ art_quick_invoke_direct_trampoline_with_access_check;
+ qpoints->pInvokeInterfaceTrampolineWithAccessCheck =
+ art_quick_invoke_interface_trampoline_with_access_check;
+ qpoints->pInvokeStaticTrampolineWithAccessCheck =
+ art_quick_invoke_static_trampoline_with_access_check;
+ qpoints->pInvokeSuperTrampolineWithAccessCheck =
+ art_quick_invoke_super_trampoline_with_access_check;
+ qpoints->pInvokeVirtualTrampolineWithAccessCheck =
+ art_quick_invoke_virtual_trampoline_with_access_check;
// Thread
qpoints->pTestSuspend = art_quick_test_suspend;
@@ -161,6 +166,9 @@
qpoints->pThrowStackOverflow = art_quick_throw_stack_overflow;
qpoints->pDeoptimize = art_quick_deoptimize;
+
+ // Read barrier
+ qpoints->pReadBarrierJni = ReadBarrierJni;
}
} // namespace art
diff --git a/runtime/arch/arm/fault_handler_arm.cc b/runtime/arch/arm/fault_handler_arm.cc
index 3e8b367..d84cb53 100644
--- a/runtime/arch/arm/fault_handler_arm.cc
+++ b/runtime/arch/arm/fault_handler_arm.cc
@@ -56,7 +56,7 @@
struct ucontext *uc = reinterpret_cast<struct ucontext*>(context);
struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
Thread* self = Thread::Current();
- CHECK(self != nullptr); // This will cause a SIGABRT if self is nullptr.
+ CHECK(self != nullptr); // This will cause a SIGABRT if self is null.
sc->arm_r0 = reinterpret_cast<uintptr_t>(*self->GetNestedSignalState());
sc->arm_r1 = 1;
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index 9bd8ba7..3c145d7 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -313,14 +313,13 @@
/*
* All generated callsites for interface invokes and invocation slow paths will load arguments
* as usual - except instead of loading arg0/r0 with the target Method*, arg0/r0 will contain
- * the method_idx. This wrapper will save arg1-arg3, load the caller's Method*, align the
- * stack and call the appropriate C helper.
+ * the method_idx. This wrapper will save arg1-arg3, and call the appropriate C helper.
* NOTE: "this" is first visible argument of the target, and so can be found in arg1/r1.
*
* The helper will attempt to locate the target and return a 64-bit result in r0/r1 consisting
* of the target Method* in r0 and method->code_ in r1.
*
- * If unsuccessful, the helper will return NULL/NULL. There will bea pending exception in the
+ * If unsuccessful, the helper will return null/null. There will bea pending exception in the
* thread and we branch to another stub to deliver it.
*
* On success this wrapper will restore arguments and *jump* to the target, leaving the lr
@@ -330,13 +329,10 @@
.extern \cxx_name
ENTRY \c_name
SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME r2, r3 @ save callee saves in case allocation triggers GC
- ldr r2, [sp, #FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE] @ pass caller Method*
- mov r3, r9 @ pass Thread::Current
- mov r12, sp
- str r12, [sp, #-16]! @ expand the frame and pass SP
+ mov r2, r9 @ pass Thread::Current
+ mov r3, sp
.cfi_adjust_cfa_offset 16
bl \cxx_name @ (method_idx, this, caller, Thread*, SP)
- add sp, #16 @ strip the extra frame
.cfi_adjust_cfa_offset -16
mov r12, r1 @ save Method*->code_
RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
@@ -359,7 +355,7 @@
* Quick invocation stub internal.
* On entry:
* r0 = method pointer
- * r1 = argument array or NULL for no argument methods
+ * r1 = argument array or null for no argument methods
* r2 = size of argument array in bytes
* r3 = (managed) thread pointer
* [sp] = JValue* result
@@ -409,7 +405,7 @@
add r0, sp, #4 @ pass stack pointer + method ptr as dest for memcpy
bl memcpy @ memcpy (dest, src, bytes)
mov ip, #0 @ set ip to 0
- str ip, [sp] @ store NULL for method* at bottom of frame
+ str ip, [sp] @ store null for method* at bottom of frame
ldr ip, [r11, #48] @ load fp register argument array pointer
vldm ip, {s0-s15} @ copy s0 - s15
@@ -669,6 +665,18 @@
END art_quick_aput_obj
// Macro to facilitate adding new allocation entrypoints.
+.macro ONE_ARG_DOWNCALL name, entrypoint, return
+ .extern \entrypoint
+ENTRY \name
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r1, r2 @ save callee saves in case of GC
+ mov r1, r9 @ pass Thread::Current
+ bl \entrypoint @ (uint32_t type_idx, Method* method, Thread*)
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
+ \return
+END \name
+.endm
+
+// Macro to facilitate adding new allocation entrypoints.
.macro TWO_ARG_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
@@ -693,10 +701,25 @@
END \name
.endm
-TWO_ARG_DOWNCALL art_quick_initialize_static_storage, artInitializeStaticStorageFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+// Macro to facilitate adding new allocation entrypoints.
+.macro FOUR_ARG_DOWNCALL name, entrypoint, return
+ .extern \entrypoint
+ENTRY \name
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r3, r12 @ save callee saves in case of GC
+ str r9, [sp, #-16]! @ expand the frame and pass Thread::Current
+ .pad #16
+ .cfi_adjust_cfa_offset 16
+ bl \entrypoint
+ add sp, #16 @ strip the extra frame
+ .cfi_adjust_cfa_offset -16
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
+ \return
+END \name
+.endm
-TWO_ARG_DOWNCALL art_quick_initialize_type, artInitializeTypeFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-TWO_ARG_DOWNCALL art_quick_initialize_type_and_verify_access, artInitializeTypeAndVerifyAccessFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ONE_ARG_DOWNCALL art_quick_initialize_static_storage, artInitializeStaticStorageFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ONE_ARG_DOWNCALL art_quick_initialize_type, artInitializeTypeFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ONE_ARG_DOWNCALL art_quick_initialize_type_and_verify_access, artInitializeTypeAndVerifyAccessFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
/*
* Called by managed code to resolve a static field and load a non-wide value.
@@ -805,11 +828,10 @@
/*
* Entry from managed code to resolve a string, this stub will allocate a String and deliver an
- * exception on error. On success the String is returned. R0 holds the referring method,
- * R1 holds the string index. The fast path check for hit in strings cache has already been
- * performed.
+ * exception on error. On success the String is returned. R0 holds the string index. The fast
+ * path check for hit in strings cache has already been performed.
*/
-TWO_ARG_DOWNCALL art_quick_resolve_string, artResolveStringFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ONE_ARG_DOWNCALL art_quick_resolve_string, artResolveStringFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
// Generate the allocation entrypoints for each allocator.
GENERATE_ALL_ALLOC_ENTRYPOINTS
@@ -1178,8 +1200,7 @@
.cfi_rel_offset r11, 8
.cfi_rel_offset lr, 12
ldr r3, [r0, #MIRROR_STRING_COUNT_OFFSET]
- ldr r12, [r0, #MIRROR_STRING_OFFSET_OFFSET]
- ldr r0, [r0, #MIRROR_STRING_VALUE_OFFSET]
+ add r0, #MIRROR_STRING_VALUE_OFFSET
/* Clamp start to [0..count] */
cmp r2, #0
@@ -1189,10 +1210,6 @@
it gt
movgt r2, r3
- /* Build a pointer to the start of string data */
- add r0, #MIRROR_CHAR_ARRAY_DATA_OFFSET
- add r0, r0, r12, lsl #1
-
/* Save a copy in r12 to later compute result */
mov r12, r0
@@ -1298,12 +1315,10 @@
.cfi_rel_offset r12, 24
.cfi_rel_offset lr, 28
- ldr r4, [r2, #MIRROR_STRING_OFFSET_OFFSET]
- ldr r9, [r1, #MIRROR_STRING_OFFSET_OFFSET]
ldr r7, [r2, #MIRROR_STRING_COUNT_OFFSET]
ldr r10, [r1, #MIRROR_STRING_COUNT_OFFSET]
- ldr r2, [r2, #MIRROR_STRING_VALUE_OFFSET]
- ldr r1, [r1, #MIRROR_STRING_VALUE_OFFSET]
+ add r2, #MIRROR_STRING_VALUE_OFFSET
+ add r1, #MIRROR_STRING_VALUE_OFFSET
/*
* At this point, we have:
@@ -1318,15 +1333,12 @@
it ls
movls r10, r7
- /* Now, build pointers to the string data */
- add r2, r2, r4, lsl #1
- add r1, r1, r9, lsl #1
/*
* Note: data pointers point to previous element so we can use pre-index
* mode with base writeback.
*/
- add r2, #MIRROR_CHAR_ARRAY_DATA_OFFSET-2 @ offset to contents[-1]
- add r1, #MIRROR_CHAR_ARRAY_DATA_OFFSET-2 @ offset to contents[-1]
+ subs r2, #2 @ offset to contents[-1]
+ subs r1, #2 @ offset to contents[-1]
/*
* At this point we have:
diff --git a/runtime/arch/arm64/context_arm64.h b/runtime/arch/arm64/context_arm64.h
index f486779..0383ad6 100644
--- a/runtime/arch/arm64/context_arm64.h
+++ b/runtime/arch/arm64/context_arm64.h
@@ -80,7 +80,7 @@
NO_RETURN void DoLongJump() OVERRIDE;
private:
- // Pointers to register locations, initialized to NULL or the specific registers below.
+ // Pointers to register locations, initialized to null or the specific registers below.
uintptr_t* gprs_[kNumberOfXRegisters];
uint64_t * fprs_[kNumberOfDRegisters];
// Hold values for sp and pc if they are not located within a stack frame.
diff --git a/runtime/arch/arm64/entrypoints_init_arm64.cc b/runtime/arch/arm64/entrypoints_init_arm64.cc
index 6c787e3..8c8f8d5 100644
--- a/runtime/arch/arm64/entrypoints_init_arm64.cc
+++ b/runtime/arch/arm64/entrypoints_init_arm64.cc
@@ -105,7 +105,7 @@
qpoints->pUnlockObject = art_quick_unlock_object;
// Math
- // TODO nullptr entrypoints not needed for ARM64 - generate inline.
+ // TODO null entrypoints not needed for ARM64 - generate inline.
qpoints->pCmpgDouble = nullptr;
qpoints->pCmpgFloat = nullptr;
qpoints->pCmplDouble = nullptr;
@@ -135,11 +135,16 @@
qpoints->pQuickImtConflictTrampoline = art_quick_imt_conflict_trampoline;
qpoints->pQuickResolutionTrampoline = art_quick_resolution_trampoline;
qpoints->pQuickToInterpreterBridge = art_quick_to_interpreter_bridge;
- qpoints->pInvokeDirectTrampolineWithAccessCheck = art_quick_invoke_direct_trampoline_with_access_check;
- qpoints->pInvokeInterfaceTrampolineWithAccessCheck = art_quick_invoke_interface_trampoline_with_access_check;
- qpoints->pInvokeStaticTrampolineWithAccessCheck = art_quick_invoke_static_trampoline_with_access_check;
- qpoints->pInvokeSuperTrampolineWithAccessCheck = art_quick_invoke_super_trampoline_with_access_check;
- qpoints->pInvokeVirtualTrampolineWithAccessCheck = art_quick_invoke_virtual_trampoline_with_access_check;
+ qpoints->pInvokeDirectTrampolineWithAccessCheck =
+ art_quick_invoke_direct_trampoline_with_access_check;
+ qpoints->pInvokeInterfaceTrampolineWithAccessCheck =
+ art_quick_invoke_interface_trampoline_with_access_check;
+ qpoints->pInvokeStaticTrampolineWithAccessCheck =
+ art_quick_invoke_static_trampoline_with_access_check;
+ qpoints->pInvokeSuperTrampolineWithAccessCheck =
+ art_quick_invoke_super_trampoline_with_access_check;
+ qpoints->pInvokeVirtualTrampolineWithAccessCheck =
+ art_quick_invoke_virtual_trampoline_with_access_check;
// Thread
qpoints->pTestSuspend = art_quick_test_suspend;
@@ -154,6 +159,9 @@
// Deoptimize
qpoints->pDeoptimize = art_quick_deoptimize;
+
+ // Read barrier
+ qpoints->pReadBarrierJni = ReadBarrierJni;
};
} // namespace art
diff --git a/runtime/arch/arm64/fault_handler_arm64.cc b/runtime/arch/arm64/fault_handler_arm64.cc
index c914d85..0448c76 100644
--- a/runtime/arch/arm64/fault_handler_arm64.cc
+++ b/runtime/arch/arm64/fault_handler_arm64.cc
@@ -45,7 +45,7 @@
struct ucontext *uc = reinterpret_cast<struct ucontext*>(context);
struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
Thread* self = Thread::Current();
- CHECK(self != nullptr); // This will cause a SIGABRT if self is nullptr.
+ CHECK(self != nullptr); // This will cause a SIGABRT if self is null.
sc->regs[0] = reinterpret_cast<uintptr_t>(*self->GetNestedSignalState());
sc->regs[1] = 1;
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index 4079436..6b16a2e5 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -182,7 +182,7 @@
// Restore xSELF as it might be scratched.
mov xSELF, xETR
// ETR
- ldr xETR, [sp, #16]
+ ldr xETR, [sp, #32]
.cfi_restore x21
add sp, sp, #112
@@ -459,14 +459,13 @@
/*
* All generated callsites for interface invokes and invocation slow paths will load arguments
* as usual - except instead of loading arg0/x0 with the target Method*, arg0/x0 will contain
- * the method_idx. This wrapper will save arg1-arg3, load the caller's Method*, align the
- * stack and call the appropriate C helper.
+ * the method_idx. This wrapper will save arg1-arg3, and call the appropriate C helper.
* NOTE: "this" is first visible argument of the target, and so can be found in arg1/x1.
*
* The helper will attempt to locate the target and return a 128-bit result in x0/x1 consisting
* of the target Method* in x0 and method->code_ in x1.
*
- * If unsuccessful, the helper will return NULL/????. There will be a pending exception in the
+ * If unsuccessful, the helper will return null/????. There will be a pending exception in the
* thread and we branch to another stub to deliver it.
*
* On success this wrapper will restore arguments and *jump* to the target, leaving the lr
@@ -483,10 +482,9 @@
// Helper signature is always
// (method_idx, *this_object, *caller_method, *self, sp)
- ldr w2, [sp, #FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE] // pass caller Method*
- mov x3, xSELF // pass Thread::Current
- mov x4, sp
- bl \cxx_name // (method_idx, this, caller, Thread*, SP)
+ mov x2, xSELF // pass Thread::Current
+ mov x3, sp
+ bl \cxx_name // (method_idx, this, Thread*, SP)
mov xIP0, x1 // save Method*->code_
RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
cbz x0, 1f // did we find the target? if not go to exception delivery
@@ -565,7 +563,7 @@
// W2 - args length
// X9 - destination address.
// W10 - temporary
- add x9, sp, #4 // Destination address is bottom of stack + NULL.
+ add x9, sp, #4 // Destination address is bottom of stack + null.
// Use \@ to differentiate between macro invocations.
.LcopyParams\@:
@@ -579,7 +577,7 @@
.LendCopyParams\@:
- // Store NULL into StackReference<Method>* at bottom of frame.
+ // Store null into StackReference<Method>* at bottom of frame.
str wzr, [sp]
#if (STACK_REFERENCE_SIZE != 4)
@@ -1261,10 +1259,22 @@
END art_quick_aput_obj
// Macro to facilitate adding new allocation entrypoints.
+.macro ONE_ARG_DOWNCALL name, entrypoint, return
+ .extern \entrypoint
+ENTRY \name
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save callee saves in case of GC
+ mov x1, xSELF // pass Thread::Current
+ bl \entrypoint // (uint32_t type_idx, Method* method, Thread*)
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
+ \return
+END \name
+.endm
+
+// Macro to facilitate adding new allocation entrypoints.
.macro TWO_ARG_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save callee saves in case of GC
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save callee saves in case of GC
mov x2, xSELF // pass Thread::Current
bl \entrypoint // (uint32_t type_idx, Method* method, Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
@@ -1272,11 +1282,11 @@
END \name
.endm
-// Macro to facilitate adding new array allocation entrypoints.
+// Macro to facilitate adding new allocation entrypoints.
.macro THREE_ARG_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save callee saves in case of GC
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save callee saves in case of GC
mov x3, xSELF // pass Thread::Current
bl \entrypoint
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
@@ -1284,6 +1294,19 @@
END \name
.endm
+// Macro to facilitate adding new allocation entrypoints.
+.macro FOUR_ARG_DOWNCALL name, entrypoint, return
+ .extern \entrypoint
+ENTRY \name
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save callee saves in case of GC
+ mov x4, xSELF // pass Thread::Current
+ bl \entrypoint //
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
+ \return
+ DELIVER_PENDING_EXCEPTION
+END \name
+.endm
+
// Macros taking opportunity of code similarities for downcalls with referrer.
.macro ONE_ARG_REF_DOWNCALL name, entrypoint, return
.extern \entrypoint
@@ -1339,10 +1362,10 @@
* initializer and deliver the exception on error. On success the static storage base is
* returned.
*/
-TWO_ARG_DOWNCALL art_quick_initialize_static_storage, artInitializeStaticStorageFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ONE_ARG_DOWNCALL art_quick_initialize_static_storage, artInitializeStaticStorageFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-TWO_ARG_DOWNCALL art_quick_initialize_type, artInitializeTypeFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-TWO_ARG_DOWNCALL art_quick_initialize_type_and_verify_access, artInitializeTypeAndVerifyAccessFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ONE_ARG_DOWNCALL art_quick_initialize_type, artInitializeTypeFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ONE_ARG_DOWNCALL art_quick_initialize_type_and_verify_access, artInitializeTypeAndVerifyAccessFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
ONE_ARG_REF_DOWNCALL art_quick_get_boolean_static, artGetBooleanStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
ONE_ARG_REF_DOWNCALL art_quick_get_byte_static, artGetByteStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
@@ -1386,11 +1409,10 @@
/*
* Entry from managed code to resolve a string, this stub will allocate a String and deliver an
- * exception on error. On success the String is returned. x0 holds the referring method,
- * w1 holds the string index. The fast path check for hit in strings cache has already been
- * performed.
+ * exception on error. On success the String is returned. w0 holds the string index. The fast
+ * path check for hit in strings cache has already been performed.
*/
-TWO_ARG_DOWNCALL art_quick_resolve_string, artResolveStringFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ONE_ARG_DOWNCALL art_quick_resolve_string, artResolveStringFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
// Generate the allocation entrypoints for each allocator.
GENERATE_ALL_ALLOC_ENTRYPOINTS
@@ -1714,8 +1736,7 @@
*/
ENTRY art_quick_indexof
ldr w3, [x0, #MIRROR_STRING_COUNT_OFFSET]
- ldr w4, [x0, #MIRROR_STRING_OFFSET_OFFSET]
- ldr w0, [x0, #MIRROR_STRING_VALUE_OFFSET] // x0 ?
+ add x0, x0, #MIRROR_STRING_VALUE_OFFSET
/* Clamp start to [0..count] */
cmp w2, #0
@@ -1723,10 +1744,6 @@
cmp w2, w3
csel w2, w3, w2, gt
- /* Build a pointer to the start of the string data */
- add x0, x0, #MIRROR_CHAR_ARRAY_DATA_OFFSET
- add x0, x0, x4, lsl #1
-
/* Save a copy to compute result */
mov x5, x0
@@ -1818,17 +1835,15 @@
ret
1: // Different string objects.
- ldr w6, [x2, #MIRROR_STRING_OFFSET_OFFSET]
- ldr w5, [x1, #MIRROR_STRING_OFFSET_OFFSET]
ldr w4, [x2, #MIRROR_STRING_COUNT_OFFSET]
ldr w3, [x1, #MIRROR_STRING_COUNT_OFFSET]
- ldr w2, [x2, #MIRROR_STRING_VALUE_OFFSET]
- ldr w1, [x1, #MIRROR_STRING_VALUE_OFFSET]
+ add x2, x2, #MIRROR_STRING_VALUE_OFFSET
+ add x1, x1, #MIRROR_STRING_VALUE_OFFSET
/*
- * Now: CharArray* Offset Count
- * first arg x2 w6 w4
- * second arg x1 w5 w3
+ * Now: Data* Count
+ * first arg x2 w4
+ * second arg x1 w3
*/
// x0 := str1.length(w4) - str2.length(w3). ldr zero-extended w3/w4 into x3/x4.
@@ -1836,16 +1851,6 @@
// Min(count1, count2) into w3.
csel x3, x3, x4, ge
- // Build pointer into string data.
-
- // Add offset in array (substr etc.) (sign extend and << 1).
- add x2, x2, w6, sxtw #1
- add x1, x1, w5, sxtw #1
-
- // Add offset in CharArray to array.
- add x2, x2, #MIRROR_CHAR_ARRAY_DATA_OFFSET
- add x1, x1, #MIRROR_CHAR_ARRAY_DATA_OFFSET
-
// TODO: Tune this value.
// Check for long string, do memcmp16 for them.
cmp w3, #28 // Constant from arm32.
diff --git a/runtime/arch/mips/context_mips.h b/runtime/arch/mips/context_mips.h
index cbad3f963..d01b95e 100644
--- a/runtime/arch/mips/context_mips.h
+++ b/runtime/arch/mips/context_mips.h
@@ -79,7 +79,7 @@
NO_RETURN void DoLongJump() OVERRIDE;
private:
- // Pointers to registers in the stack, initialized to NULL except for the special cases below.
+ // Pointers to registers in the stack, initialized to null except for the special cases below.
uintptr_t* gprs_[kNumberOfCoreRegisters];
uint32_t* fprs_[kNumberOfFRegisters];
// Hold values for sp and ra (return address) if they are not located within a stack frame.
diff --git a/runtime/arch/mips/entrypoints_init_mips.cc b/runtime/arch/mips/entrypoints_init_mips.cc
index e3ec27c..ff04106 100644
--- a/runtime/arch/mips/entrypoints_init_mips.cc
+++ b/runtime/arch/mips/entrypoints_init_mips.cc
@@ -199,7 +199,7 @@
static_assert(IsDirectEntrypoint(kQuickD2iz), "Direct C stub not marked direct.");
qpoints->pF2iz = art_f2i;
static_assert(IsDirectEntrypoint(kQuickF2iz), "Direct C stub not marked direct.");
- qpoints->pIdivmod = NULL;
+ qpoints->pIdivmod = nullptr;
qpoints->pD2l = art_d2l;
static_assert(IsDirectEntrypoint(kQuickD2l), "Direct C stub not marked direct.");
qpoints->pF2l = art_f2l;
@@ -228,19 +228,24 @@
qpoints->pQuickImtConflictTrampoline = art_quick_imt_conflict_trampoline;
qpoints->pQuickResolutionTrampoline = art_quick_resolution_trampoline;
qpoints->pQuickToInterpreterBridge = art_quick_to_interpreter_bridge;
- qpoints->pInvokeDirectTrampolineWithAccessCheck = art_quick_invoke_direct_trampoline_with_access_check;
+ qpoints->pInvokeDirectTrampolineWithAccessCheck =
+ art_quick_invoke_direct_trampoline_with_access_check;
static_assert(!IsDirectEntrypoint(kQuickInvokeDirectTrampolineWithAccessCheck),
"Non-direct C stub marked direct.");
- qpoints->pInvokeInterfaceTrampolineWithAccessCheck = art_quick_invoke_interface_trampoline_with_access_check;
+ qpoints->pInvokeInterfaceTrampolineWithAccessCheck =
+ art_quick_invoke_interface_trampoline_with_access_check;
static_assert(!IsDirectEntrypoint(kQuickInvokeInterfaceTrampolineWithAccessCheck),
"Non-direct C stub marked direct.");
- qpoints->pInvokeStaticTrampolineWithAccessCheck = art_quick_invoke_static_trampoline_with_access_check;
+ qpoints->pInvokeStaticTrampolineWithAccessCheck =
+ art_quick_invoke_static_trampoline_with_access_check;
static_assert(!IsDirectEntrypoint(kQuickInvokeStaticTrampolineWithAccessCheck),
"Non-direct C stub marked direct.");
- qpoints->pInvokeSuperTrampolineWithAccessCheck = art_quick_invoke_super_trampoline_with_access_check;
+ qpoints->pInvokeSuperTrampolineWithAccessCheck =
+ art_quick_invoke_super_trampoline_with_access_check;
static_assert(!IsDirectEntrypoint(kQuickInvokeSuperTrampolineWithAccessCheck),
"Non-direct C stub marked direct.");
- qpoints->pInvokeVirtualTrampolineWithAccessCheck = art_quick_invoke_virtual_trampoline_with_access_check;
+ qpoints->pInvokeVirtualTrampolineWithAccessCheck =
+ art_quick_invoke_virtual_trampoline_with_access_check;
static_assert(!IsDirectEntrypoint(kQuickInvokeVirtualTrampolineWithAccessCheck),
"Non-direct C stub marked direct.");
@@ -267,6 +272,9 @@
static_assert(IsDirectEntrypoint(kQuickA64Load), "Non-direct C stub marked direct.");
qpoints->pA64Store = QuasiAtomic::Write64;
static_assert(IsDirectEntrypoint(kQuickA64Store), "Non-direct C stub marked direct.");
+
+ qpoints->pReadBarrierJni = ReadBarrierJni;
+ static_assert(!IsDirectEntrypoint(kQuickReadBarrierJni), "Non-direct C stub marked direct.");
};
} // namespace art
diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S
index 0c2250e..92b180e 100644
--- a/runtime/arch/mips/quick_entrypoints_mips.S
+++ b/runtime/arch/mips/quick_entrypoints_mips.S
@@ -439,14 +439,13 @@
/*
* All generated callsites for interface invokes and invocation slow paths will load arguments
* as usual - except instead of loading arg0/$a0 with the target Method*, arg0/$a0 will contain
- * the method_idx. This wrapper will save arg1-arg3, load the caller's Method*, align the
- * stack and call the appropriate C helper.
+ * the method_idx. This wrapper will save arg1-arg3, and call the appropriate C helper.
* NOTE: "this" is first visable argument of the target, and so can be found in arg1/$a1.
*
* The helper will attempt to locate the target and return a 64-bit result in $v0/$v1 consisting
* of the target Method* in $v0 and method->code_ in $v1.
*
- * If unsuccessful, the helper will return NULL/NULL. There will be a pending exception in the
+ * If unsuccessful, the helper will return null/null. There will be a pending exception in the
* thread and we branch to another stub to deliver it.
*
* On success this wrapper will restore arguments and *jump* to the target, leaving the lr
@@ -456,15 +455,13 @@
.extern \cxx_name
ENTRY \c_name
SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME # save callee saves in case allocation triggers GC
- lw $a2, FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE+ARG_SLOT_SIZE($sp) # pass caller Method*
- addiu $t0, $sp, ARG_SLOT_SIZE # save $sp (remove arg slots)
- move $a3, rSELF # pass Thread::Current
- jal \cxx_name # (method_idx, this, caller, Thread*, $sp)
- sw $t0, 16($sp) # pass $sp
- move $a0, $v0 # save target Method*
+ move $a2, rSELF # pass Thread::Current
+ jal \cxx_name # (method_idx, this, Thread*, $sp)
+ addiu $a3, $sp, ARG_SLOT_SIZE # pass $sp (remove arg slots)
+ move $a0, $v0 # save target Method*
RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
beqz $v0, 1f
- move $t9, $v1 # save $v0->code_
+ move $t9, $v1 # save $v0->code_
jalr $zero, $t9
nop
1:
@@ -484,7 +481,7 @@
* Invocation stub for quick code.
* On entry:
* a0 = method pointer
- * a1 = argument array or NULL for no argument methods
+ * a1 = argument array or null for no argument methods
* a2 = size of argument array in bytes
* a3 = (managed) thread pointer
* [sp + 16] = JValue* result
@@ -520,7 +517,7 @@
lw $a3, 12($sp) # copy arg value for a3
lw $t9, MIRROR_ART_METHOD_QUICK_CODE_OFFSET_32($a0) # get pointer to the code
jalr $t9 # call the method
- sw $zero, 0($sp) # store NULL for method* at bottom of frame
+ sw $zero, 0($sp) # store null for method* at bottom of frame
move $sp, $fp # restore the stack
lw $s0, 0($sp)
.cfi_restore 16
@@ -983,6 +980,16 @@
END art_quick_set_obj_instance
// Macro to facilitate adding new allocation entrypoints.
+.macro ONE_ARG_DOWNCALL name, entrypoint, return
+ .extern \entrypoint
+ENTRY \name
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ jal \entrypoint
+ move $a1, rSELF # pass Thread::Current
+ \return
+END \name
+.endm
+
.macro TWO_ARG_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
@@ -1003,34 +1010,43 @@
END \name
.endm
+.macro FOUR_ARG_DOWNCALL name, entrypoint, return
+ .extern \entrypoint
+ENTRY \name
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ jal \entrypoint
+ sw rSELF, 16($sp) # pass Thread::Current
+ \return
+END \name
+.endm
+
// Generate the allocation entrypoints for each allocator.
GENERATE_ALL_ALLOC_ENTRYPOINTS
/*
* Entry from managed code to resolve a string, this stub will allocate a String and deliver an
- * exception on error. On success the String is returned. R0 holds the referring method,
- * R1 holds the string index. The fast path check for hit in strings cache has already been
- * performed.
+ * exception on error. On success the String is returned. A0 holds the string index. The fast
+ * path check for hit in strings cache has already been performed.
*/
-TWO_ARG_DOWNCALL art_quick_resolve_string, artResolveStringFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ONE_ARG_DOWNCALL art_quick_resolve_string, artResolveStringFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
/*
* Entry from managed code when uninitialized static storage, this stub will run the class
* initializer and deliver the exception on error. On success the static storage base is
* returned.
*/
-TWO_ARG_DOWNCALL art_quick_initialize_static_storage, artInitializeStaticStorageFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ONE_ARG_DOWNCALL art_quick_initialize_static_storage, artInitializeStaticStorageFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
/*
* Entry from managed code when dex cache misses for a type_idx.
*/
-TWO_ARG_DOWNCALL art_quick_initialize_type, artInitializeTypeFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ONE_ARG_DOWNCALL art_quick_initialize_type, artInitializeTypeFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
/*
* Entry from managed code when type_idx needs to be checked for access and dex cache may also
* miss.
*/
-TWO_ARG_DOWNCALL art_quick_initialize_type_and_verify_access, artInitializeTypeAndVerifyAccessFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ONE_ARG_DOWNCALL art_quick_initialize_type_and_verify_access, artInitializeTypeAndVerifyAccessFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
/*
* Called by managed code when the value in rSUSPEND has been decremented to 0.
diff --git a/runtime/arch/mips64/asm_support_mips64.S b/runtime/arch/mips64/asm_support_mips64.S
index 10976bb..2613777 100644
--- a/runtime/arch/mips64/asm_support_mips64.S
+++ b/runtime/arch/mips64/asm_support_mips64.S
@@ -27,7 +27,8 @@
#define rSELF $s1
- // Declare a function called name, sets up $gp.
+ // Declare a function called name, sets up $gp.
+ // This macro modifies t8.
.macro ENTRY name
.type \name, %function
.global \name
@@ -35,10 +36,11 @@
.balign 16
\name:
.cfi_startproc
+ // Set up $gp and store the previous $gp value to $t8. It will be pushed to the
+ // stack after the frame has been constructed.
+ .cpsetup $t9, $t8, \name
// Ensure we get a sane starting CFA.
.cfi_def_cfa $sp,0
- // Load $gp. We expect that ".set noreorder" is in effect.
- .cpload $t9
// Declare a local convenience label to be branched to when $gp is already set up.
.L\name\()_gp_set:
.endm
diff --git a/runtime/arch/mips64/context_mips64.cc b/runtime/arch/mips64/context_mips64.cc
index ce99b40..6b3f4c9 100644
--- a/runtime/arch/mips64/context_mips64.cc
+++ b/runtime/arch/mips64/context_mips64.cc
@@ -18,7 +18,7 @@
#include "mirror/art_method-inl.h"
#include "quick/quick_method_frame_info.h"
-#include "util.h"
+#include "utils.h"
namespace art {
namespace mips64 {
diff --git a/runtime/arch/mips64/context_mips64.h b/runtime/arch/mips64/context_mips64.h
index 2cc2b8d..ebc036c 100644
--- a/runtime/arch/mips64/context_mips64.h
+++ b/runtime/arch/mips64/context_mips64.h
@@ -79,7 +79,7 @@
NO_RETURN void DoLongJump() OVERRIDE;
private:
- // Pointers to registers in the stack, initialized to NULL except for the special cases below.
+ // Pointers to registers in the stack, initialized to null except for the special cases below.
uintptr_t* gprs_[kNumberOfGpuRegisters];
uint64_t* fprs_[kNumberOfFpuRegisters];
// Hold values for sp and ra (return address) if they are not located within a stack frame.
diff --git a/runtime/arch/mips64/entrypoints_init_mips64.cc b/runtime/arch/mips64/entrypoints_init_mips64.cc
index 4a3bf02..321c27b 100644
--- a/runtime/arch/mips64/entrypoints_init_mips64.cc
+++ b/runtime/arch/mips64/entrypoints_init_mips64.cc
@@ -135,15 +135,15 @@
qpoints->pL2f = art_l2f;
qpoints->pD2iz = art_d2i;
qpoints->pF2iz = art_f2i;
- qpoints->pIdivmod = NULL;
+ qpoints->pIdivmod = nullptr;
qpoints->pD2l = art_d2l;
qpoints->pF2l = art_f2l;
qpoints->pLdiv = artLdiv;
qpoints->pLmod = artLmod;
qpoints->pLmul = artLmul;
- qpoints->pShlLong = NULL;
- qpoints->pShrLong = NULL;
- qpoints->pUshrLong = NULL;
+ qpoints->pShlLong = nullptr;
+ qpoints->pShrLong = nullptr;
+ qpoints->pUshrLong = nullptr;
// Intrinsics
qpoints->pIndexOf = art_quick_indexof;
@@ -154,11 +154,16 @@
qpoints->pQuickImtConflictTrampoline = art_quick_imt_conflict_trampoline;
qpoints->pQuickResolutionTrampoline = art_quick_resolution_trampoline;
qpoints->pQuickToInterpreterBridge = art_quick_to_interpreter_bridge;
- qpoints->pInvokeDirectTrampolineWithAccessCheck = art_quick_invoke_direct_trampoline_with_access_check;
- qpoints->pInvokeInterfaceTrampolineWithAccessCheck = art_quick_invoke_interface_trampoline_with_access_check;
- qpoints->pInvokeStaticTrampolineWithAccessCheck = art_quick_invoke_static_trampoline_with_access_check;
- qpoints->pInvokeSuperTrampolineWithAccessCheck = art_quick_invoke_super_trampoline_with_access_check;
- qpoints->pInvokeVirtualTrampolineWithAccessCheck = art_quick_invoke_virtual_trampoline_with_access_check;
+ qpoints->pInvokeDirectTrampolineWithAccessCheck =
+ art_quick_invoke_direct_trampoline_with_access_check;
+ qpoints->pInvokeInterfaceTrampolineWithAccessCheck =
+ art_quick_invoke_interface_trampoline_with_access_check;
+ qpoints->pInvokeStaticTrampolineWithAccessCheck =
+ art_quick_invoke_static_trampoline_with_access_check;
+ qpoints->pInvokeSuperTrampolineWithAccessCheck =
+ art_quick_invoke_super_trampoline_with_access_check;
+ qpoints->pInvokeVirtualTrampolineWithAccessCheck =
+ art_quick_invoke_virtual_trampoline_with_access_check;
// Thread
qpoints->pTestSuspend = art_quick_test_suspend;
@@ -175,6 +180,9 @@
// Atomic 64-bit load/store
qpoints->pA64Load = QuasiAtomic::Read64;
qpoints->pA64Store = QuasiAtomic::Write64;
+
+ // Read barrier
+ qpoints->pReadBarrierJni = ReadBarrierJni;
};
} // namespace art
diff --git a/runtime/arch/mips64/jni_entrypoints_mips64.S b/runtime/arch/mips64/jni_entrypoints_mips64.S
index 1085666..70d7d97 100644
--- a/runtime/arch/mips64/jni_entrypoints_mips64.S
+++ b/runtime/arch/mips64/jni_entrypoints_mips64.S
@@ -44,8 +44,11 @@
.cfi_rel_offset 5, 8
sd $a0, 0($sp)
.cfi_rel_offset 4, 0
- jal artFindNativeMethod # (Thread*)
move $a0, $s1 # pass Thread::Current()
+ jal artFindNativeMethod # (Thread*)
+ .cpreturn # Restore gp from t8 in branch delay slot. gp is not used
+ # anymore, and t8 may be clobbered in artFindNativeMethod.
+
ld $a0, 0($sp) # restore registers from stack
.cfi_restore 4
ld $a1, 8($sp)
diff --git a/runtime/arch/mips64/quick_entrypoints_mips64.S b/runtime/arch/mips64/quick_entrypoints_mips64.S
index 3d502e6..b7320a6 100644
--- a/runtime/arch/mips64/quick_entrypoints_mips64.S
+++ b/runtime/arch/mips64/quick_entrypoints_mips64.S
@@ -27,6 +27,19 @@
.extern artDeliverPendingExceptionFromCode
/*
+ * Macro that sets up $gp and stores the previous $gp value to $t8.
+ * This macro modifies v1 and t8.
+ */
+.macro SETUP_GP
+ move $v1, $ra
+ bal 1f
+ nop
+1:
+ .cpsetup $ra, $t8, 1b
+ move $ra, $v1
+.endm
+
+ /*
* Macro that sets up the callee save frame to conform with
* Runtime::CreateCalleeSaveMethod(kSaveAll)
* callee-save: padding + $f24-$f31 + $s0-$s7 + $gp + $ra + $s8 = 19 total + 1x8 bytes padding
@@ -44,8 +57,8 @@
.cfi_rel_offset 31, 152
sd $s8, 144($sp)
.cfi_rel_offset 30, 144
- sd $gp, 136($sp)
- .cfi_rel_offset 28, 136
+ sd $t8, 136($sp) # t8 holds caller's gp, now save it to the stack.
+ .cfi_rel_offset 28, 136 # Value from gp is pushed, so set the cfi offset accordingly.
sd $s7, 128($sp)
.cfi_rel_offset 23, 128
sd $s6, 120($sp)
@@ -102,8 +115,8 @@
.cfi_rel_offset 31, 72
sd $s8, 64($sp)
.cfi_rel_offset 30, 64
- sd $gp, 56($sp)
- .cfi_rel_offset 28, 56
+ sd $t8, 56($sp) # t8 holds caller's gp, now save it to the stack.
+ .cfi_rel_offset 28, 56 # Value from gp is pushed, so set the cfi offset accordingly.
sd $s7, 48($sp)
.cfi_rel_offset 23, 48
sd $s6, 40($sp)
@@ -130,7 +143,7 @@
.cfi_restore 31
ld $s8, 64($sp)
.cfi_restore 30
- ld $gp, 56($sp)
+ ld $t8, 56($sp) # Restore gp back to it's temp storage.
.cfi_restore 28
ld $s7, 48($sp)
.cfi_restore 23
@@ -146,6 +159,7 @@
.cfi_restore 18
daddiu $sp, $sp, 80
.cfi_adjust_cfa_offset -80
+ .cpreturn
.endm
.macro RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
@@ -153,7 +167,7 @@
.cfi_restore 31
ld $s8, 64($sp)
.cfi_restore 30
- ld $gp, 56($sp)
+ ld $t8, 56($sp) # Restore gp back to it's temp storage.
.cfi_restore 28
ld $s7, 48($sp)
.cfi_restore 23
@@ -167,6 +181,7 @@
.cfi_restore 19
ld $s2, 8($sp)
.cfi_restore 18
+ .cpreturn
jalr $zero, $ra
daddiu $sp, $sp, 80
.cfi_adjust_cfa_offset -80
@@ -175,12 +190,6 @@
// This assumes the top part of these stack frame types are identical.
#define REFS_AND_ARGS_MINUS_REFS_SIZE (FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE - FRAME_SIZE_REFS_ONLY_CALLEE_SAVE)
- /*
- * Macro that sets up the callee save frame to conform with
- * Runtime::CreateCalleeSaveMethod(kRefsAndArgs). Restoration assumes
- * non-moving GC.
- * callee-save: padding + $f12-$f19 + $a1-$a7 + $s2-$s7 + $gp + $ra + $s8 = 24 total + 1 words padding + Method*
- */
.macro SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_INTERNAL
daddiu $sp, $sp, -208
.cfi_adjust_cfa_offset 208
@@ -194,8 +203,8 @@
.cfi_rel_offset 31, 200
sd $s8, 192($sp)
.cfi_rel_offset 30, 192
- sd $gp, 184($sp)
- .cfi_rel_offset 28, 184
+ sd $t8, 184($sp) # t8 holds caller's gp, now save it to the stack.
+ .cfi_rel_offset 28, 184 # Value from gp is pushed, so set the cfi offset accordingly.
sd $s7, 176($sp)
.cfi_rel_offset 23, 176
sd $s6, 168($sp)
@@ -232,16 +241,15 @@
s.d $f14, 32($sp)
s.d $f13, 24($sp) # = kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset
s.d $f12, 16($sp) # This isn't necessary to store.
-
- # 1x8 bytes paddig + Method*
- ld $v0, %got(_ZN3art7Runtime9instance_E)($gp)
- ld $v0, 0($v0)
- THIS_LOAD_REQUIRES_READ_BARRIER
- lwu $v0, RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET($v0)
- sw $v0, 0($sp) # Place Method* at bottom of stack.
- sd $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF) # Place sp in Thread::Current()->top_quick_frame.
+ # 1x8 bytes padding + Method*
.endm
+ /*
+ * Macro that sets up the callee save frame to conform with
+ * Runtime::CreateCalleeSaveMethod(kRefsAndArgs). Restoration assumes
+ * non-moving GC.
+ * callee-save: padding + $f12-$f19 + $a1-$a7 + $s2-$s7 + $gp + $ra + $s8 = 24 total + 1 words padding + Method*
+ */
.macro SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME
SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_INTERNAL
# load appropriate callee-save-method
@@ -253,12 +261,18 @@
sd $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF) # Place sp in Thread::Current()->top_quick_frame.
.endm
+.macro SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_WITH_METHOD_IN_A0
+ SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_INTERNAL
+ sw $a0, 0($sp) # Place Method* at bottom of stack.
+ sd $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF) # Place sp in Thread::Current()->top_quick_frame.
+.endm
+
.macro RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
ld $ra, 200($sp)
.cfi_restore 31
ld $s8, 192($sp)
.cfi_restore 30
- ld $gp, 184($sp)
+ ld $t8, 184($sp) # Restore gp back to it's temp storage.
.cfi_restore 28
ld $s7, 176($sp)
.cfi_restore 23
@@ -297,6 +311,7 @@
l.d $f13, 24($sp)
l.d $f12, 16($sp)
+ .cpreturn
daddiu $sp, $sp, 208
.cfi_adjust_cfa_offset -208
.endm
@@ -307,6 +322,7 @@
* exception is Thread::Current()->exception_
*/
.macro DELIVER_PENDING_EXCEPTION
+ SETUP_GP
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME # save callee saves for throw
dla $t9, artDeliverPendingExceptionFromCode
jalr $zero, $t9 # artDeliverPendingExceptionFromCode(Thread*)
@@ -348,7 +364,7 @@
* On entry $a0 is uint32_t* gprs_ and $a1 is uint32_t* fprs_
* FIXME: just guessing about the shape of the jmpbuf. Where will pc be?
*/
-ENTRY art_quick_do_long_jump
+ENTRY_NO_GP art_quick_do_long_jump
l.d $f0, 0($a1)
l.d $f1, 8($a1)
l.d $f2, 16($a1)
@@ -503,7 +519,7 @@
* The helper will attempt to locate the target and return a 128-bit result in $v0/$v1 consisting
* of the target Method* in $v0 and method->code_ in $v1.
*
- * If unsuccessful, the helper will return NULL/NULL. There will be a pending exception in the
+ * If unsuccessful, the helper will return null/null. There will be a pending exception in the
* thread and we branch to another stub to deliver it.
*
* On success this wrapper will restore arguments and *jump* to the target, leaving the ra
@@ -513,10 +529,9 @@
.extern \cxx_name
ENTRY \c_name
SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME # save callee saves in case allocation triggers GC
- lwu $a2, FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE($sp) # pass caller Method*
- move $a3, rSELF # pass Thread::Current
- jal \cxx_name # (method_idx, this, caller, Thread*, $sp)
- move $a4, $sp # pass $sp
+ move $a2, rSELF # pass Thread::Current
+ jal \cxx_name # (method_idx, this, Thread*, $sp)
+ move $a3, $sp # pass $sp
move $a0, $v0 # save target Method*
move $t9, $v1 # save $v0->code_
RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
@@ -605,7 +620,7 @@
* a4 = JValue* result
* a5 = shorty
*/
-ENTRY art_quick_invoke_stub
+ENTRY_NO_GP art_quick_invoke_stub
# push a4, a5, s0(rSUSPEND), s1(rSELF), s8, ra onto the stack
daddiu $sp, $sp, -48
.cfi_adjust_cfa_offset 48
@@ -656,7 +671,7 @@
# call method (a0 and a1 have been untouched)
lwu $a1, 0($a1) # make a1 = this ptr
sw $a1, 4($sp) # copy this ptr (skip 4 bytes for method*)
- sw $zero, 0($sp) # store NULL for method* at bottom of frame
+ sw $zero, 0($sp) # store null for method* at bottom of frame
ld $t9, MIRROR_ART_METHOD_QUICK_CODE_OFFSET_64($a0) # get pointer to the code
jalr $t9 # call the method
nop
@@ -707,7 +722,7 @@
* a4 = JValue* result
* a5 = shorty
*/
-ENTRY art_quick_invoke_static_stub
+ENTRY_NO_GP art_quick_invoke_static_stub
# push a4, a5, s0(rSUSPEND), s1(rSELF), s8, ra, onto the stack
daddiu $sp, $sp, -48
@@ -758,7 +773,7 @@
call_sfn:
# call method (a0 has been untouched)
- sw $zero, 0($sp) # store NULL for method* at bottom of frame
+ sw $zero, 0($sp) # store null for method* at bottom of frame
ld $t9, MIRROR_ART_METHOD_QUICK_CODE_OFFSET_64($a0) # get pointer to the code
jalr $t9 # call the method
nop
@@ -851,7 +866,8 @@
sd $a1, 8($sp)
sd $a0, 0($sp)
jal artIsAssignableFromCode
- nop
+ .cpreturn # Restore gp from t8 in branch delay slot.
+ # t8 may be clobbered in artIsAssignableFromCode.
beq $v0, $zero, .Lthrow_class_cast_exception
ld $ra, 24($sp)
jalr $zero, $ra
@@ -863,6 +879,7 @@
ld $a0, 0($sp)
daddiu $sp, $sp, 32
.cfi_adjust_cfa_offset -32
+ SETUP_GP
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
dla $t9, artThrowClassCastException
jalr $zero, $t9 # artThrowClassCastException (Class*, Class*, Thread*)
@@ -908,13 +925,13 @@
daddu $t1, $t1, $t0
sb $t0, ($t1)
jalr $zero, $ra
- nop
+ .cpreturn # Restore gp from t8 in branch delay slot.
.Ldo_aput_null:
dsll $a1, $a1, 2
daddu $t0, $a0, $a1
sw $a2, MIRROR_OBJECT_ARRAY_DATA_OFFSET($t0)
jalr $zero, $ra
- nop
+ .cpreturn # Restore gp from t8 in branch delay slot.
.Lcheck_assignability:
daddiu $sp, $sp, -64
.cfi_adjust_cfa_offset 64
@@ -927,7 +944,8 @@
move $a1, $t1
move $a0, $t0
jal artIsAssignableFromCode # (Class*, Class*)
- nop
+ .cpreturn # Restore gp from t8 in branch delay slot.
+ # t8 may be clobbered in artIsAssignableFromCode.
ld $ra, 56($sp)
ld $t9, 24($sp)
ld $a2, 16($sp)
@@ -935,6 +953,7 @@
ld $a0, 0($sp)
daddiu $sp, $sp, 64
.cfi_adjust_cfa_offset -64
+ SETUP_GP
bne $v0, $zero, .Ldo_aput
nop
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
@@ -945,45 +964,6 @@
END art_quick_aput_obj
/*
- * Entry from managed code when uninitialized static storage, this stub will run the class
- * initializer and deliver the exception on error. On success the static storage base is
- * returned.
- */
- .extern artInitializeStaticStorageFromCode
-ENTRY art_quick_initialize_static_storage
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- # artInitializeStaticStorageFromCode(uint32_t type_idx, Method* referrer, Thread*)
- jal artInitializeStaticStorageFromCode
- move $a2, rSELF # pass Thread::Current
- RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-END art_quick_initialize_static_storage
-
- /*
- * Entry from managed code when dex cache misses for a type_idx.
- */
- .extern artInitializeTypeFromCode
-ENTRY art_quick_initialize_type
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- # artInitializeTypeFromCode(uint32_t type_idx, Method* referrer, Thread*)
- jal artInitializeTypeFromCode
- move $a2, rSELF # pass Thread::Current
- RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-END art_quick_initialize_type
-
- /*
- * Entry from managed code when type_idx needs to be checked for access and dex cache may also
- * miss.
- */
- .extern artInitializeTypeAndVerifyAccessFromCode
-ENTRY art_quick_initialize_type_and_verify_access
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- # artInitializeTypeFromCode(uint32_t type_idx, Method* referrer, Thread*)
- jal artInitializeTypeAndVerifyAccessFromCode
- move $a2, rSELF # pass Thread::Current
- RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-END art_quick_initialize_type_and_verify_access
-
- /*
* Called by managed code to resolve a static field and load a boolean primitive value.
*/
.extern artGetBooleanStaticFromCode
@@ -1272,20 +1252,16 @@
RETURN_IF_ZERO
END art_quick_set_obj_instance
- /*
- * Entry from managed code to resolve a string, this stub will allocate a String and deliver an
- * exception on error. On success the String is returned. R0 holds the referring method,
- * R1 holds the string index. The fast path check for hit in strings cache has already been
- * performed.
- */
- .extern artResolveStringFromCode
-ENTRY art_quick_resolve_string
+// Macro to facilitate adding new allocation entrypoints.
+.macro ONE_ARG_DOWNCALL name, entrypoint, return
+ .extern \entrypoint
+ENTRY \name
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- # artResolveStringFromCode(Method* referrer, uint32_t string_idx, Thread*, $sp)
- jal artResolveStringFromCode
- move $a2, rSELF # pass Thread::Current
- RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-END art_quick_resolve_string
+ jal \entrypoint
+ move $a1, rSELF # pass Thread::Current
+ \return
+END \name
+.endm
// Macro to facilitate adding new allocation entrypoints.
.macro TWO_ARG_DOWNCALL name, entrypoint, return
@@ -1308,10 +1284,45 @@
END \name
.endm
+.macro FOUR_ARG_DOWNCALL name, entrypoint, return
+ .extern \entrypoint
+ENTRY \name
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ jal \entrypoint
+ move $a4, rSELF # pass Thread::Current
+ \return
+END \name
+.endm
+
// Generate the allocation entrypoints for each allocator.
GENERATE_ALL_ALLOC_ENTRYPOINTS
/*
+ * Entry from managed code to resolve a string, this stub will allocate a String and deliver an
+ * exception on error. On success the String is returned. A0 holds the string index. The fast
+ * path check for hit in strings cache has already been performed.
+ */
+ONE_ARG_DOWNCALL art_quick_resolve_string, artResolveStringFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+
+ /*
+ * Entry from managed code when uninitialized static storage, this stub will run the class
+ * initializer and deliver the exception on error. On success the static storage base is
+ * returned.
+ */
+ONE_ARG_DOWNCALL art_quick_initialize_static_storage, artInitializeStaticStorageFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+
+ /*
+ * Entry from managed code when dex cache misses for a type_idx.
+ */
+ONE_ARG_DOWNCALL art_quick_initialize_type, artInitializeTypeFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+
+ /*
+ * Entry from managed code when type_idx needs to be checked for access and dex cache may also
+ * miss.
+ */
+ONE_ARG_DOWNCALL art_quick_initialize_type_and_verify_access, artInitializeTypeAndVerifyAccessFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+
+ /*
* Called by managed code when the value in rSUSPEND has been decremented to 0.
*/
.extern artTestSuspendFromCode
@@ -1320,7 +1331,7 @@
bne $a0, $zero, 1f
daddiu rSUSPEND, $zero, SUSPEND_CHECK_INTERVAL # reset rSUSPEND to SUSPEND_CHECK_INTERVAL
jalr $zero, $ra
- nop
+ .cpreturn # Restore gp from t8 in branch delay slot.
1:
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves for stack crawl
jal artTestSuspendFromCode # (Thread*)
@@ -1334,8 +1345,7 @@
*/
.extern artQuickProxyInvokeHandler
ENTRY art_quick_proxy_invoke_handler
- SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME
- sd $a0, 0($sp) # place proxy method at bottom of frame
+ SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_WITH_METHOD_IN_A0
move $a2, rSELF # pass Thread::Current
jal artQuickProxyInvokeHandler # (Method* proxy method, receiver, Thread*, SP)
move $a3, $sp # pass $sp
@@ -1360,6 +1370,7 @@
dsll $t0, 2 # convert target method offset to bytes
daddu $a0, $t0 # get address of target method
dla $t9, art_quick_invoke_interface_trampoline
+ .cpreturn
jalr $zero, $t9
lwu $a0, MIRROR_OBJECT_ARRAY_DATA_OFFSET($a0) # load the target method
END art_quick_imt_conflict_trampoline
@@ -1385,8 +1396,7 @@
.extern artQuickGenericJniTrampoline
.extern artQuickGenericJniEndTrampoline
ENTRY art_quick_generic_jni_trampoline
- SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_INTERNAL
- sd $a0, 0($sp) # store native ArtMethod* to bottom of stack
+ SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_WITH_METHOD_IN_A0
move $s8, $sp # save $sp
# prepare for call to artQuickGenericJniTrampoline(Thread*, SP)
@@ -1489,8 +1499,7 @@
.global art_quick_instrumentation_exit
art_quick_instrumentation_exit:
.cfi_startproc
- daddiu $t9, $ra, 4 # put current address into $t9 to rebuild $gp
- .cpload $t9
+ SETUP_GP
move $ra, $zero # link register is to here, so clobber with 0 for later checks
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME
move $t0, $sp # remember bottom of caller's frame
@@ -1502,8 +1511,11 @@
mov.d $f15, $f0 # pass fpr result
move $a2, $v0 # pass gpr result
move $a1, $t0 # pass $sp
- jal artInstrumentationMethodExitFromCode # (Thread*, SP, gpr_res, fpr_res)
move $a0, rSELF # pass Thread::Current
+ jal artInstrumentationMethodExitFromCode # (Thread*, SP, gpr_res, fpr_res)
+ .cpreturn # Restore gp from t8 in branch delay slot. gp is not used anymore,
+ # and t8 may be clobbered in artInstrumentationMethodExitFromCode.
+
move $t9, $v0 # set aside returned link register
move $ra, $v1 # set link register for deoptimization
ld $v0, 0($sp) # restore return values
diff --git a/runtime/arch/quick_alloc_entrypoints.S b/runtime/arch/quick_alloc_entrypoints.S
index 037c26e..fe04bf5 100644
--- a/runtime/arch/quick_alloc_entrypoints.S
+++ b/runtime/arch/quick_alloc_entrypoints.S
@@ -35,6 +35,12 @@
THREE_ARG_DOWNCALL art_quick_check_and_alloc_array\c_suffix, artCheckAndAllocArrayFromCode\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
// Called by managed code to allocate an array in a special case for FILLED_NEW_ARRAY.
THREE_ARG_DOWNCALL art_quick_check_and_alloc_array_with_access_check\c_suffix, artCheckAndAllocArrayFromCodeWithAccessCheck\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+// Called by managed code to allocate a string from bytes
+FOUR_ARG_DOWNCALL art_quick_alloc_string_from_bytes\c_suffix, artAllocStringFromBytesFromCode\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+// Called by managed code to allocate a string from chars
+THREE_ARG_DOWNCALL art_quick_alloc_string_from_chars\c_suffix, artAllocStringFromCharsFromCode\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+// Called by managed code to allocate a string from string
+ONE_ARG_DOWNCALL art_quick_alloc_string_from_string\c_suffix, artAllocStringFromStringFromCode\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
.endm
.macro GENERATE_ALL_ALLOC_ENTRYPOINTS
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
index 9cccf7c..a7d24b8 100644
--- a/runtime/arch/stub_test.cc
+++ b/runtime/arch/stub_test.cc
@@ -117,7 +117,7 @@
"add sp, sp, #20\n\t"
"blx r3\n\t" // Call the stub
- "add sp, sp, #12\n\t" // Pop nullptr and padding
+ "add sp, sp, #12\n\t" // Pop null and padding
".cfi_adjust_cfa_offset -12\n\t"
"pop {r1-r12, lr}\n\t" // Restore state
".cfi_adjust_cfa_offset -52\n\t"
@@ -261,6 +261,132 @@
"d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
"d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
"memory"); // clobber.
+#elif defined(__mips__) && !defined(__LP64__)
+ __asm__ __volatile__ (
+ // Spill a0-a3 and t0-t7 which we say we don't clobber. May contain args.
+ "addiu $sp, $sp, -64\n\t"
+ "sw $a0, 0($sp)\n\t"
+ "sw $a1, 4($sp)\n\t"
+ "sw $a2, 8($sp)\n\t"
+ "sw $a3, 12($sp)\n\t"
+ "sw $t0, 16($sp)\n\t"
+ "sw $t1, 20($sp)\n\t"
+ "sw $t2, 24($sp)\n\t"
+ "sw $t3, 28($sp)\n\t"
+ "sw $t4, 32($sp)\n\t"
+ "sw $t5, 36($sp)\n\t"
+ "sw $t6, 40($sp)\n\t"
+ "sw $t7, 44($sp)\n\t"
+ // Spill gp register since it is caller save.
+ "sw $gp, 52($sp)\n\t"
+
+ "addiu $sp, $sp, -16\n\t" // Reserve stack space, 16B aligned.
+ "sw %[referrer], 0($sp)\n\t"
+
+ // Push everything on the stack, so we don't rely on the order.
+ "addiu $sp, $sp, -20\n\t"
+ "sw %[arg0], 0($sp)\n\t"
+ "sw %[arg1], 4($sp)\n\t"
+ "sw %[arg2], 8($sp)\n\t"
+ "sw %[code], 12($sp)\n\t"
+ "sw %[self], 16($sp)\n\t"
+
+ // Load call params into the right registers.
+ "lw $a0, 0($sp)\n\t"
+ "lw $a1, 4($sp)\n\t"
+ "lw $a2, 8($sp)\n\t"
+ "lw $t9, 12($sp)\n\t"
+ "lw $s1, 16($sp)\n\t"
+ "addiu $sp, $sp, 20\n\t"
+
+ "jalr $t9\n\t" // Call the stub.
+ "nop\n\t"
+ "addiu $sp, $sp, 16\n\t" // Drop the quick "frame".
+
+ // Restore stuff not named clobbered.
+ "lw $a0, 0($sp)\n\t"
+ "lw $a1, 4($sp)\n\t"
+ "lw $a2, 8($sp)\n\t"
+ "lw $a3, 12($sp)\n\t"
+ "lw $t0, 16($sp)\n\t"
+ "lw $t1, 20($sp)\n\t"
+ "lw $t2, 24($sp)\n\t"
+ "lw $t3, 28($sp)\n\t"
+ "lw $t4, 32($sp)\n\t"
+ "lw $t5, 36($sp)\n\t"
+ "lw $t6, 40($sp)\n\t"
+ "lw $t7, 44($sp)\n\t"
+ // Restore gp.
+ "lw $gp, 52($sp)\n\t"
+ "addiu $sp, $sp, 64\n\t" // Free stack space, now sp as on entry.
+
+ "move %[result], $v0\n\t" // Store the call result.
+ : [result] "=r" (result)
+ : [arg0] "r"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code), [self] "r"(self),
+ [referrer] "r"(referrer)
+ : "at", "v0", "v1", "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "t8", "t9", "k0", "k1",
+ "fp", "ra",
+ "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9", "f10", "f11", "f12", "f13",
+ "f14", "f15", "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23", "f24", "f25", "f26",
+ "f27", "f28", "f29", "f30", "f31",
+ "memory"); // clobber.
+#elif defined(__mips__) && defined(__LP64__)
+ __asm__ __volatile__ (
+ // Spill a0-a7 which we say we don't clobber. May contain args.
+ "daddiu $sp, $sp, -64\n\t"
+ "sd $a0, 0($sp)\n\t"
+ "sd $a1, 8($sp)\n\t"
+ "sd $a2, 16($sp)\n\t"
+ "sd $a3, 24($sp)\n\t"
+ "sd $a4, 32($sp)\n\t"
+ "sd $a5, 40($sp)\n\t"
+ "sd $a6, 48($sp)\n\t"
+ "sd $a7, 56($sp)\n\t"
+
+ "daddiu $sp, $sp, -16\n\t" // Reserve stack space, 16B aligned.
+ "sd %[referrer], 0($sp)\n\t"
+
+ // Push everything on the stack, so we don't rely on the order.
+ "daddiu $sp, $sp, -40\n\t"
+ "sd %[arg0], 0($sp)\n\t"
+ "sd %[arg1], 8($sp)\n\t"
+ "sd %[arg2], 16($sp)\n\t"
+ "sd %[code], 24($sp)\n\t"
+ "sd %[self], 32($sp)\n\t"
+
+ // Load call params into the right registers.
+ "ld $a0, 0($sp)\n\t"
+ "ld $a1, 8($sp)\n\t"
+ "ld $a2, 16($sp)\n\t"
+ "ld $t9, 24($sp)\n\t"
+ "ld $s1, 32($sp)\n\t"
+ "daddiu $sp, $sp, 40\n\t"
+
+ "jalr $t9\n\t" // Call the stub.
+ "nop\n\t"
+ "daddiu $sp, $sp, 16\n\t" // Drop the quick "frame".
+
+ // Restore stuff not named clobbered.
+ "ld $a0, 0($sp)\n\t"
+ "ld $a1, 8($sp)\n\t"
+ "ld $a2, 16($sp)\n\t"
+ "ld $a3, 24($sp)\n\t"
+ "ld $a4, 32($sp)\n\t"
+ "ld $a5, 40($sp)\n\t"
+ "ld $a6, 48($sp)\n\t"
+ "ld $a7, 56($sp)\n\t"
+ "daddiu $sp, $sp, 64\n\t"
+
+ "move %[result], $v0\n\t" // Store the call result.
+ : [result] "=r" (result)
+ : [arg0] "r"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code), [self] "r"(self),
+ [referrer] "r"(referrer)
+ : "at", "v0", "v1", "t0", "t1", "t2", "t3", "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
+ "t8", "t9", "k0", "k1", "fp", "ra",
+ "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9", "f10", "f11", "f12", "f13",
+ "f14", "f15", "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23", "f24", "f25", "f26",
+ "f27", "f28", "f29", "f30", "f31",
+ "memory"); // clobber.
#elif defined(__x86_64__) && !defined(__APPLE__) && defined(__clang__)
// Note: Uses the native convention
// TODO: Set the thread?
@@ -269,7 +395,7 @@
"pushq (%%rsp)\n\t" // & 16B alignment padding
".cfi_adjust_cfa_offset 16\n\t"
"call *%%rax\n\t" // Call the stub
- "addq $16, %%rsp\n\t" // Pop nullptr and padding
+ "addq $16, %%rsp\n\t" // Pop null and padding
".cfi_adjust_cfa_offset -16\n\t"
: "=a" (result)
// Use the result from rax
@@ -344,7 +470,7 @@
"add sp, sp, #24\n\t"
"blx r3\n\t" // Call the stub
- "add sp, sp, #12\n\t" // Pop nullptr and padding
+ "add sp, sp, #12\n\t" // Pop null and padding
".cfi_adjust_cfa_offset -12\n\t"
"pop {r1-r12, lr}\n\t" // Restore state
".cfi_adjust_cfa_offset -52\n\t"
@@ -487,6 +613,136 @@
"d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
"d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
"memory"); // clobber.
+#elif defined(__mips__) && !defined(__LP64__)
+ __asm__ __volatile__ (
+ // Spill a0-a3 and t0-t7 which we say we don't clobber. May contain args.
+ "addiu $sp, $sp, -64\n\t"
+ "sw $a0, 0($sp)\n\t"
+ "sw $a1, 4($sp)\n\t"
+ "sw $a2, 8($sp)\n\t"
+ "sw $a3, 12($sp)\n\t"
+ "sw $t0, 16($sp)\n\t"
+ "sw $t1, 20($sp)\n\t"
+ "sw $t2, 24($sp)\n\t"
+ "sw $t3, 28($sp)\n\t"
+ "sw $t4, 32($sp)\n\t"
+ "sw $t5, 36($sp)\n\t"
+ "sw $t6, 40($sp)\n\t"
+ "sw $t7, 44($sp)\n\t"
+ // Spill gp register since it is caller save.
+ "sw $gp, 52($sp)\n\t"
+
+ "addiu $sp, $sp, -16\n\t" // Reserve stack space, 16B aligned.
+ "sw %[referrer], 0($sp)\n\t"
+
+ // Push everything on the stack, so we don't rely on the order.
+ "addiu $sp, $sp, -24\n\t"
+ "sw %[arg0], 0($sp)\n\t"
+ "sw %[arg1], 4($sp)\n\t"
+ "sw %[arg2], 8($sp)\n\t"
+ "sw %[code], 12($sp)\n\t"
+ "sw %[self], 16($sp)\n\t"
+ "sw %[hidden], 20($sp)\n\t"
+
+ // Load call params into the right registers.
+ "lw $a0, 0($sp)\n\t"
+ "lw $a1, 4($sp)\n\t"
+ "lw $a2, 8($sp)\n\t"
+ "lw $t9, 12($sp)\n\t"
+ "lw $s1, 16($sp)\n\t"
+ "lw $t0, 20($sp)\n\t"
+ "addiu $sp, $sp, 24\n\t"
+
+ "jalr $t9\n\t" // Call the stub.
+ "nop\n\t"
+ "addiu $sp, $sp, 16\n\t" // Drop the quick "frame".
+
+ // Restore stuff not named clobbered.
+ "lw $a0, 0($sp)\n\t"
+ "lw $a1, 4($sp)\n\t"
+ "lw $a2, 8($sp)\n\t"
+ "lw $a3, 12($sp)\n\t"
+ "lw $t0, 16($sp)\n\t"
+ "lw $t1, 20($sp)\n\t"
+ "lw $t2, 24($sp)\n\t"
+ "lw $t3, 28($sp)\n\t"
+ "lw $t4, 32($sp)\n\t"
+ "lw $t5, 36($sp)\n\t"
+ "lw $t6, 40($sp)\n\t"
+ "lw $t7, 44($sp)\n\t"
+ // Restore gp.
+ "lw $gp, 52($sp)\n\t"
+ "addiu $sp, $sp, 64\n\t" // Free stack space, now sp as on entry.
+
+ "move %[result], $v0\n\t" // Store the call result.
+ : [result] "=r" (result)
+ : [arg0] "r"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code), [self] "r"(self),
+ [referrer] "r"(referrer), [hidden] "r"(hidden)
+ : "at", "v0", "v1", "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "t8", "t9", "k0", "k1",
+ "fp", "ra",
+ "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9", "f10", "f11", "f12", "f13",
+ "f14", "f15", "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23", "f24", "f25", "f26",
+ "f27", "f28", "f29", "f30", "f31",
+ "memory"); // clobber.
+#elif defined(__mips__) && defined(__LP64__)
+ __asm__ __volatile__ (
+ // Spill a0-a7 which we say we don't clobber. May contain args.
+ "daddiu $sp, $sp, -64\n\t"
+ "sd $a0, 0($sp)\n\t"
+ "sd $a1, 8($sp)\n\t"
+ "sd $a2, 16($sp)\n\t"
+ "sd $a3, 24($sp)\n\t"
+ "sd $a4, 32($sp)\n\t"
+ "sd $a5, 40($sp)\n\t"
+ "sd $a6, 48($sp)\n\t"
+ "sd $a7, 56($sp)\n\t"
+
+ "daddiu $sp, $sp, -16\n\t" // Reserve stack space, 16B aligned.
+ "sd %[referrer], 0($sp)\n\t"
+
+ // Push everything on the stack, so we don't rely on the order.
+ "daddiu $sp, $sp, -48\n\t"
+ "sd %[arg0], 0($sp)\n\t"
+ "sd %[arg1], 8($sp)\n\t"
+ "sd %[arg2], 16($sp)\n\t"
+ "sd %[code], 24($sp)\n\t"
+ "sd %[self], 32($sp)\n\t"
+ "sd %[hidden], 40($sp)\n\t"
+
+ // Load call params into the right registers.
+ "ld $a0, 0($sp)\n\t"
+ "ld $a1, 8($sp)\n\t"
+ "ld $a2, 16($sp)\n\t"
+ "ld $t9, 24($sp)\n\t"
+ "ld $s1, 32($sp)\n\t"
+ "ld $t0, 40($sp)\n\t"
+ "daddiu $sp, $sp, 48\n\t"
+
+ "jalr $t9\n\t" // Call the stub.
+ "nop\n\t"
+ "daddiu $sp, $sp, 16\n\t" // Drop the quick "frame".
+
+ // Restore stuff not named clobbered.
+ "ld $a0, 0($sp)\n\t"
+ "ld $a1, 8($sp)\n\t"
+ "ld $a2, 16($sp)\n\t"
+ "ld $a3, 24($sp)\n\t"
+ "ld $a4, 32($sp)\n\t"
+ "ld $a5, 40($sp)\n\t"
+ "ld $a6, 48($sp)\n\t"
+ "ld $a7, 56($sp)\n\t"
+ "daddiu $sp, $sp, 64\n\t"
+
+ "move %[result], $v0\n\t" // Store the call result.
+ : [result] "=r" (result)
+ : [arg0] "r"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code), [self] "r"(self),
+ [referrer] "r"(referrer), [hidden] "r"(hidden)
+ : "at", "v0", "v1", "t0", "t1", "t2", "t3", "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
+ "t8", "t9", "k0", "k1", "fp", "ra",
+ "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9", "f10", "f11", "f12", "f13",
+ "f14", "f15", "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23", "f24", "f25", "f26",
+ "f27", "f28", "f29", "f30", "f31",
+ "memory"); // clobber.
#elif defined(__x86_64__) && !defined(__APPLE__) && defined(__clang__)
// Note: Uses the native convention
// TODO: Set the thread?
@@ -495,7 +751,7 @@
"pushq (%%rsp)\n\t" // & 16B alignment padding
".cfi_adjust_cfa_offset 16\n\t"
"call *%%rbx\n\t" // Call the stub
- "addq $16, %%rsp\n\t" // Pop nullptr and padding
+ "addq $16, %%rsp\n\t" // Pop null and padding
".cfi_adjust_cfa_offset -16\n\t"
: "=a" (result)
// Use the result from rax
@@ -521,7 +777,8 @@
// Method with 32b arg0, 64b arg1
size_t Invoke3UWithReferrer(size_t arg0, uint64_t arg1, uintptr_t code, Thread* self,
mirror::ArtMethod* referrer) {
-#if (defined(__x86_64__) && !defined(__APPLE__)) || defined(__aarch64__)
+#if (defined(__x86_64__) && !defined(__APPLE__)) || (defined(__mips__) && defined(__LP64__)) || \
+ defined(__aarch64__)
// Just pass through.
return Invoke3WithReferrer(arg0, arg1, 0U, code, self, referrer);
#else
@@ -549,7 +806,7 @@
TEST_F(StubTest, Memcpy) {
-#if defined(__i386__) || (defined(__x86_64__) && !defined(__APPLE__))
+#if defined(__i386__) || (defined(__x86_64__) && !defined(__APPLE__)) || defined(__mips__)
Thread* self = Thread::Current();
uint32_t orig[20];
@@ -586,7 +843,8 @@
}
TEST_F(StubTest, LockObject) {
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
+ (defined(__x86_64__) && !defined(__APPLE__))
static constexpr size_t kThinLockLoops = 100;
Thread* self = Thread::Current();
@@ -659,7 +917,8 @@
// NO_THREAD_SAFETY_ANALYSIS as we do not want to grab exclusive mutator lock for MonitorInfo.
static void TestUnlockObject(StubTest* test) NO_THREAD_SAFETY_ANALYSIS {
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
+ (defined(__x86_64__) && !defined(__APPLE__))
static constexpr size_t kThinLockLoops = 100;
Thread* self = Thread::Current();
@@ -809,12 +1068,14 @@
TestUnlockObject(this);
}
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
+ (defined(__x86_64__) && !defined(__APPLE__))
extern "C" void art_quick_check_cast(void);
#endif
TEST_F(StubTest, CheckCast) {
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
+ (defined(__x86_64__) && !defined(__APPLE__))
Thread* self = Thread::Current();
const uintptr_t art_quick_check_cast = StubTest::GetEntrypoint(self, kQuickCheckCast);
@@ -865,7 +1126,8 @@
TEST_F(StubTest, APutObj) {
TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING();
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
+ (defined(__x86_64__) && !defined(__APPLE__))
Thread* self = Thread::Current();
// Do not check non-checked ones, we'd need handlers and stuff...
@@ -998,7 +1260,8 @@
TEST_F(StubTest, AllocObject) {
TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING();
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
+ (defined(__x86_64__) && !defined(__APPLE__))
// This will lead to OOM error messages in the log.
ScopedLogSeverity sls(LogSeverity::FATAL);
@@ -1032,7 +1295,7 @@
}
{
- // We can use nullptr in the second argument as we do not need a method here (not used in
+ // We can use null in the second argument as we do not need a method here (not used in
// resolved/initialized cases)
size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), reinterpret_cast<size_t>(nullptr), 0U,
StubTest::GetEntrypoint(self, kQuickAllocObjectResolved),
@@ -1046,7 +1309,7 @@
}
{
- // We can use nullptr in the second argument as we do not need a method here (not used in
+ // We can use null in the second argument as we do not need a method here (not used in
// resolved/initialized cases)
size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), reinterpret_cast<size_t>(nullptr), 0U,
StubTest::GetEntrypoint(self, kQuickAllocObjectInitialized),
@@ -1123,7 +1386,8 @@
TEST_F(StubTest, AllocObjectArray) {
TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING();
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
+ (defined(__x86_64__) && !defined(__APPLE__))
// TODO: Check the "Unresolved" allocation stubs
// This will lead to OOM error messages in the log.
@@ -1166,7 +1430,7 @@
}
{
- // We can use nullptr in the second argument as we do not need a method here (not used in
+ // We can use null in the second argument as we do not need a method here (not used in
// resolved/initialized cases)
size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), 10U,
reinterpret_cast<size_t>(nullptr),
@@ -1229,32 +1493,15 @@
"aacaacaacaacaacaacaacaacaacaacaacaac", // This one's over.
"aacaacaacaacaacaacaacaacaacaacaacaaca" }; // As is this one. We need a separate one to
// defeat object-equal optimizations.
- static constexpr size_t kBaseStringCount = arraysize(c);
- static constexpr size_t kStringCount = 2 * kBaseStringCount;
+ static constexpr size_t kStringCount = arraysize(c);
StackHandleScope<kStringCount> hs(self);
Handle<mirror::String> s[kStringCount];
- for (size_t i = 0; i < kBaseStringCount; ++i) {
+ for (size_t i = 0; i < kStringCount; ++i) {
s[i] = hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), c[i]));
}
- RandGen r(0x1234);
-
- for (size_t i = kBaseStringCount; i < kStringCount; ++i) {
- s[i] = hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), c[i - kBaseStringCount]));
- int32_t length = s[i]->GetLength();
- if (length > 1) {
- // Set a random offset and length.
- int32_t new_offset = 1 + (r.next() % (length - 1));
- int32_t rest = length - new_offset - 1;
- int32_t new_length = 1 + (rest > 0 ? r.next() % rest : 0);
-
- s[i]->SetField32<false>(mirror::String::CountOffset(), new_length);
- s[i]->SetField32<false>(mirror::String::OffsetOffset(), new_offset);
- }
- }
-
// TODO: wide characters
// Matrix of expectations. First component is first parameter. Note we only check against the
@@ -1309,7 +1556,8 @@
static void GetSetBooleanStatic(ArtField* f, Thread* self,
mirror::ArtMethod* referrer, StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
+ (defined(__x86_64__) && !defined(__APPLE__))
constexpr size_t num_values = 5;
uint8_t values[num_values] = { 0, 1, 2, 128, 0xFF };
@@ -1339,7 +1587,8 @@
static void GetSetByteStatic(ArtField* f, Thread* self, mirror::ArtMethod* referrer,
StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
+ (defined(__x86_64__) && !defined(__APPLE__))
int8_t values[] = { -128, -64, 0, 64, 127 };
for (size_t i = 0; i < arraysize(values); ++i) {
@@ -1369,7 +1618,8 @@
static void GetSetBooleanInstance(Handle<mirror::Object>* obj, ArtField* f, Thread* self,
mirror::ArtMethod* referrer, StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
+ (defined(__x86_64__) && !defined(__APPLE__))
uint8_t values[] = { 0, true, 2, 128, 0xFF };
for (size_t i = 0; i < arraysize(values); ++i) {
@@ -1403,7 +1653,8 @@
static void GetSetByteInstance(Handle<mirror::Object>* obj, ArtField* f,
Thread* self, mirror::ArtMethod* referrer, StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
+ (defined(__x86_64__) && !defined(__APPLE__))
int8_t values[] = { -128, -64, 0, 64, 127 };
for (size_t i = 0; i < arraysize(values); ++i) {
@@ -1437,7 +1688,8 @@
static void GetSetCharStatic(ArtField* f, Thread* self, mirror::ArtMethod* referrer,
StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
+ (defined(__x86_64__) && !defined(__APPLE__))
uint16_t values[] = { 0, 1, 2, 255, 32768, 0xFFFF };
for (size_t i = 0; i < arraysize(values); ++i) {
@@ -1466,7 +1718,8 @@
static void GetSetShortStatic(ArtField* f, Thread* self,
mirror::ArtMethod* referrer, StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
+ (defined(__x86_64__) && !defined(__APPLE__))
int16_t values[] = { -0x7FFF, -32768, 0, 255, 32767, 0x7FFE };
for (size_t i = 0; i < arraysize(values); ++i) {
@@ -1496,7 +1749,8 @@
static void GetSetCharInstance(Handle<mirror::Object>* obj, ArtField* f,
Thread* self, mirror::ArtMethod* referrer, StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
+ (defined(__x86_64__) && !defined(__APPLE__))
uint16_t values[] = { 0, 1, 2, 255, 32768, 0xFFFF };
for (size_t i = 0; i < arraysize(values); ++i) {
@@ -1529,7 +1783,8 @@
static void GetSetShortInstance(Handle<mirror::Object>* obj, ArtField* f,
Thread* self, mirror::ArtMethod* referrer, StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
+ (defined(__x86_64__) && !defined(__APPLE__))
int16_t values[] = { -0x7FFF, -32768, 0, 255, 32767, 0x7FFE };
for (size_t i = 0; i < arraysize(values); ++i) {
@@ -1563,7 +1818,8 @@
static void GetSet32Static(ArtField* f, Thread* self, mirror::ArtMethod* referrer,
StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
+ (defined(__x86_64__) && !defined(__APPLE__))
uint32_t values[] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF };
for (size_t i = 0; i < arraysize(values); ++i) {
@@ -1580,7 +1836,11 @@
self,
referrer);
+#if defined(__mips__) && defined(__LP64__)
+ EXPECT_EQ(static_cast<uint32_t>(res), values[i]) << "Iteration " << i;
+#else
EXPECT_EQ(res, values[i]) << "Iteration " << i;
+#endif
}
#else
UNUSED(f, self, referrer, test);
@@ -1594,7 +1854,8 @@
static void GetSet32Instance(Handle<mirror::Object>* obj, ArtField* f,
Thread* self, mirror::ArtMethod* referrer, StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
+ (defined(__x86_64__) && !defined(__APPLE__))
uint32_t values[] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF };
for (size_t i = 0; i < arraysize(values); ++i) {
@@ -1628,7 +1889,8 @@
}
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
+ (defined(__x86_64__) && !defined(__APPLE__))
static void set_and_check_static(uint32_t f_idx, mirror::Object* val, Thread* self,
mirror::ArtMethod* referrer, StubTest* test)
@@ -1653,7 +1915,8 @@
static void GetSetObjStatic(ArtField* f, Thread* self, mirror::ArtMethod* referrer,
StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
+ (defined(__x86_64__) && !defined(__APPLE__))
set_and_check_static(f->GetDexFieldIndex(), nullptr, self, referrer, test);
// Allocate a string object for simplicity.
@@ -1670,7 +1933,8 @@
}
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
+ (defined(__x86_64__) && !defined(__APPLE__))
static void set_and_check_instance(ArtField* f, mirror::Object* trg,
mirror::Object* val, Thread* self, mirror::ArtMethod* referrer,
StubTest* test)
@@ -1698,7 +1962,8 @@
static void GetSetObjInstance(Handle<mirror::Object>* obj, ArtField* f,
Thread* self, mirror::ArtMethod* referrer, StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
+ (defined(__x86_64__) && !defined(__APPLE__))
set_and_check_instance(f, obj->Get(), nullptr, self, referrer, test);
// Allocate a string object for simplicity.
@@ -1720,7 +1985,8 @@
static void GetSet64Static(ArtField* f, Thread* self, mirror::ArtMethod* referrer,
StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-#if (defined(__x86_64__) && !defined(__APPLE__)) || defined(__aarch64__)
+#if (defined(__x86_64__) && !defined(__APPLE__)) || (defined(__mips__) && defined(__LP64__)) || \
+ defined(__aarch64__)
uint64_t values[] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF, 0xFFFFFFFFFFFF };
for (size_t i = 0; i < arraysize(values); ++i) {
@@ -1750,7 +2016,8 @@
static void GetSet64Instance(Handle<mirror::Object>* obj, ArtField* f,
Thread* self, mirror::ArtMethod* referrer, StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-#if (defined(__x86_64__) && !defined(__APPLE__)) || defined(__aarch64__)
+#if (defined(__x86_64__) && !defined(__APPLE__)) || (defined(__mips__) && defined(__LP64__)) || \
+ defined(__aarch64__)
uint64_t values[] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF, 0xFFFFFFFFFFFF };
for (size_t i = 0; i < arraysize(values); ++i) {
@@ -1788,9 +2055,9 @@
JNIEnv* env = Thread::Current()->GetJniEnv();
jclass jc = env->FindClass("AllFields");
- CHECK(jc != NULL);
+ CHECK(jc != nullptr);
jobject o = env->AllocObject(jc);
- CHECK(o != NULL);
+ CHECK(o != nullptr);
ScopedObjectAccess soa(self);
StackHandleScope<4> hs(self);
@@ -1950,7 +2217,8 @@
}
TEST_F(StubTest, IMT) {
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
+ (defined(__x86_64__) && !defined(__APPLE__))
TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING();
Thread* self = Thread::Current();
diff --git a/runtime/arch/x86/context_x86.h b/runtime/arch/x86/context_x86.h
index ace4670..a783d48 100644
--- a/runtime/arch/x86/context_x86.h
+++ b/runtime/arch/x86/context_x86.h
@@ -92,7 +92,7 @@
XMM7_0, XMM7_1,
kNumberOfFloatRegisters};
- // Pointers to register locations. Values are initialized to NULL or the special registers below.
+ // Pointers to register locations. Values are initialized to null or the special registers below.
uintptr_t* gprs_[kNumberOfCpuRegisters];
uint32_t* fprs_[kNumberOfFloatRegisters];
// Hold values for esp and eip if they are not located within a stack frame. EIP is somewhat
diff --git a/runtime/arch/x86/entrypoints_init_x86.cc b/runtime/arch/x86/entrypoints_init_x86.cc
index c012173..737f4d1 100644
--- a/runtime/arch/x86/entrypoints_init_x86.cc
+++ b/runtime/arch/x86/entrypoints_init_x86.cc
@@ -96,17 +96,6 @@
qpoints->pUnlockObject = art_quick_unlock_object;
// Math
- // points->pCmpgDouble = NULL; // Not needed on x86.
- // points->pCmpgFloat = NULL; // Not needed on x86.
- // points->pCmplDouble = NULL; // Not needed on x86.
- // points->pCmplFloat = NULL; // Not needed on x86.
- // qpoints->pFmod = NULL; // Not needed on x86.
- // qpoints->pL2d = NULL; // Not needed on x86.
- // qpoints->pFmodf = NULL; // Not needed on x86.
- // qpoints->pL2f = NULL; // Not needed on x86.
- // points->pD2iz = NULL; // Not needed on x86.
- // points->pF2iz = NULL; // Not needed on x86.
- // qpoints->pIdivmod = NULL; // Not needed on x86.
qpoints->pD2l = art_quick_d2l;
qpoints->pF2l = art_quick_f2l;
qpoints->pLdiv = art_quick_ldiv;
@@ -125,11 +114,16 @@
qpoints->pQuickImtConflictTrampoline = art_quick_imt_conflict_trampoline;
qpoints->pQuickResolutionTrampoline = art_quick_resolution_trampoline;
qpoints->pQuickToInterpreterBridge = art_quick_to_interpreter_bridge;
- qpoints->pInvokeDirectTrampolineWithAccessCheck = art_quick_invoke_direct_trampoline_with_access_check;
- qpoints->pInvokeInterfaceTrampolineWithAccessCheck = art_quick_invoke_interface_trampoline_with_access_check;
- qpoints->pInvokeStaticTrampolineWithAccessCheck = art_quick_invoke_static_trampoline_with_access_check;
- qpoints->pInvokeSuperTrampolineWithAccessCheck = art_quick_invoke_super_trampoline_with_access_check;
- qpoints->pInvokeVirtualTrampolineWithAccessCheck = art_quick_invoke_virtual_trampoline_with_access_check;
+ qpoints->pInvokeDirectTrampolineWithAccessCheck =
+ art_quick_invoke_direct_trampoline_with_access_check;
+ qpoints->pInvokeInterfaceTrampolineWithAccessCheck =
+ art_quick_invoke_interface_trampoline_with_access_check;
+ qpoints->pInvokeStaticTrampolineWithAccessCheck =
+ art_quick_invoke_static_trampoline_with_access_check;
+ qpoints->pInvokeSuperTrampolineWithAccessCheck =
+ art_quick_invoke_super_trampoline_with_access_check;
+ qpoints->pInvokeVirtualTrampolineWithAccessCheck =
+ art_quick_invoke_virtual_trampoline_with_access_check;
// Thread
qpoints->pTestSuspend = art_quick_test_suspend;
@@ -144,6 +138,9 @@
// Deoptimize
qpoints->pDeoptimize = art_quick_deoptimize_from_compiled_slow_path;
+
+ // Read barrier
+ qpoints->pReadBarrierJni = ReadBarrierJni;
};
} // namespace art
diff --git a/runtime/arch/x86/fault_handler_x86.cc b/runtime/arch/x86/fault_handler_x86.cc
index 27a4adf..2de69aa 100644
--- a/runtime/arch/x86/fault_handler_x86.cc
+++ b/runtime/arch/x86/fault_handler_x86.cc
@@ -191,6 +191,7 @@
break;
case 0x81: // group 1, word immediate.
+ case 0xc7: // mov
modrm = *pc++;
has_modrm = true;
immediate_size = operand_size_prefix ? 2 : 4;
@@ -239,7 +240,7 @@
// this code the same for both 32 and 64 bit.
Thread* self = Thread::Current();
- CHECK(self != nullptr); // This will cause a SIGABRT if self is nullptr.
+ CHECK(self != nullptr); // This will cause a SIGABRT if self is null.
struct ucontext* uc = reinterpret_cast<struct ucontext*>(context);
uc->CTX_JMP_BUF = reinterpret_cast<uintptr_t>(*self->GetNestedSignalState());
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index c5a020a..d62c1bc 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -278,14 +278,13 @@
/*
* All generated callsites for interface invokes and invocation slow paths will load arguments
* as usual - except instead of loading arg0/r0 with the target Method*, arg0/r0 will contain
- * the method_idx. This wrapper will save arg1-arg3, load the caller's Method*, align the
- * stack and call the appropriate C helper.
+ * the method_idx. This wrapper will save arg1-arg3 and call the appropriate C helper.
* NOTE: "this" is first visible argument of the target, and so can be found in arg1/r1.
*
* The helper will attempt to locate the target and return a 64-bit result in r0/r1 consisting
* of the target Method* in r0 and method->code_ in r1.
*
- * If unsuccessful, the helper will return NULL/NULL. There will bea pending exception in the
+ * If unsuccessful, the helper will return null/null will bea pending exception in the
* thread and we branch to another stub to deliver it.
*
* On success this wrapper will restore arguments and *jump* to the target, leaving the lr
@@ -297,19 +296,15 @@
movl %esp, %edx // remember SP
// Outgoing argument set up
- subl MACRO_LITERAL(12), %esp // alignment padding
- CFI_ADJUST_CFA_OFFSET(12)
PUSH edx // pass SP
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
- pushl 32+32(%edx) // pass caller Method*
- CFI_ADJUST_CFA_OFFSET(4)
PUSH ecx // pass arg2
PUSH eax // pass arg1
call VAR(cxx_name, 1) // cxx_name(arg1, arg2, arg3, Thread*, SP)
movl %edx, %edi // save code pointer in EDI
- addl MACRO_LITERAL(36), %esp // Pop arguments skip eax
- CFI_ADJUST_CFA_OFFSET(-36)
+ addl MACRO_LITERAL(20), %esp // Pop arguments skip eax
+ CFI_ADJUST_CFA_OFFSET(-20)
// Restore FPRs.
movsd 0(%esp), %xmm0
@@ -408,7 +403,7 @@
* On entry:
* [sp] = return address
* [sp + 4] = method pointer
- * [sp + 8] = argument array or NULL for no argument methods
+ * [sp + 8] = argument array or null for no argument methods
* [sp + 12] = size of argument array in bytes
* [sp + 16] = (managed) thread pointer
* [sp + 20] = JValue* result
@@ -442,7 +437,7 @@
subl LITERAL(20), %ebx // remove space for return address, ebx, ebp, esi and edi
subl %ebx, %esp // reserve stack space for argument array
- movl LITERAL(0), (%esp) // store NULL for method*
+ movl LITERAL(0), (%esp) // store null for method*
// Copy arg array into stack.
movl 28(%ebp), %ecx // ECX = size of args
@@ -506,7 +501,7 @@
* On entry:
* [sp] = return address
* [sp + 4] = method pointer
- * [sp + 8] = argument array or NULL for no argument methods
+ * [sp + 8] = argument array or null for no argument methods
* [sp + 12] = size of argument array in bytes
* [sp + 16] = (managed) thread pointer
* [sp + 20] = JValue* result
@@ -539,7 +534,7 @@
subl LITERAL(20), %ebx // remove space for return address, ebx, ebp, esi and edi
subl %ebx, %esp // reserve stack space for argument array
- movl LITERAL(0), (%esp) // store NULL for method*
+ movl LITERAL(0), (%esp) // store null for method*
// Copy arg array into stack.
movl 28(%ebp), %ecx // ECX = size of args
@@ -685,6 +680,26 @@
END_FUNCTION RAW_VAR(c_name, 0)
END_MACRO
+MACRO3(FOUR_ARG_DOWNCALL, c_name, cxx_name, return_macro)
+ DEFINE_FUNCTION RAW_VAR(c_name, 0)
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC
+ // Outgoing argument set up
+ subl MACRO_LITERAL(12), %esp // alignment padding
+ CFI_ADJUST_CFA_OFFSET(12)
+ pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
+ CFI_ADJUST_CFA_OFFSET(4)
+ PUSH ebx // pass arg4
+ PUSH edx // pass arg3
+ PUSH ecx // pass arg2
+ PUSH eax // pass arg1
+ call VAR(cxx_name, 1) // cxx_name(arg1, arg2, arg3, arg4, Thread*)
+ addl MACRO_LITERAL(32), %esp // pop arguments
+ CFI_ADJUST_CFA_OFFSET(-32)
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ CALL_MACRO(return_macro, 2) // return or deliver exception
+ END_FUNCTION RAW_VAR(c_name, 0)
+END_MACRO
+
MACRO3(ONE_ARG_REF_DOWNCALL, c_name, cxx_name, return_macro)
DEFINE_FUNCTION RAW_VAR(c_name, 0)
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC
@@ -789,6 +804,12 @@
THREE_ARG_DOWNCALL art_quick_check_and_alloc_array ## c_suffix, artCheckAndAllocArrayFromCode ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO
#define GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(c_suffix, cxx_suffix) \
THREE_ARG_DOWNCALL art_quick_check_and_alloc_array_with_access_check ## c_suffix, artCheckAndAllocArrayFromCodeWithAccessCheck ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO
+#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(c_suffix, cxx_suffix) \
+ FOUR_ARG_DOWNCALL art_quick_alloc_string_from_bytes ## c_suffix, artAllocStringFromBytesFromCode ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO
+#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(c_suffix, cxx_suffix) \
+ THREE_ARG_DOWNCALL art_quick_alloc_string_from_chars ## c_suffix, artAllocStringFromCharsFromCode ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO
+#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(c_suffix, cxx_suffix) \
+ ONE_ARG_DOWNCALL art_quick_alloc_string_from_string ## c_suffix, artAllocStringFromStringFromCode ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_dlmalloc, DlMalloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_dlmalloc, DlMalloc)
@@ -799,6 +820,9 @@
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_dlmalloc, DlMalloc)
GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_dlmalloc, DlMalloc)
GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_dlmalloc, DlMalloc)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_dlmalloc, DlMalloc)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_dlmalloc, DlMalloc)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_dlmalloc, DlMalloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_dlmalloc_instrumented, DlMallocInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_dlmalloc_instrumented, DlMallocInstrumented)
@@ -809,6 +833,9 @@
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_dlmalloc_instrumented, DlMallocInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_dlmalloc_instrumented, DlMallocInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_dlmalloc_instrumented, DlMallocInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_dlmalloc_instrumented, DlMallocInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_dlmalloc_instrumented, DlMallocInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_dlmalloc_instrumented, DlMallocInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc, RosAlloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc, RosAlloc)
@@ -819,6 +846,9 @@
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_rosalloc, RosAlloc)
GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_rosalloc, RosAlloc)
GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_rosalloc, RosAlloc)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_rosalloc, RosAlloc)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_rosalloc, RosAlloc)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_rosalloc, RosAlloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc_instrumented, RosAllocInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc_instrumented, RosAllocInstrumented)
@@ -829,6 +859,9 @@
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_rosalloc_instrumented, RosAllocInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_rosalloc_instrumented, RosAllocInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_rosalloc_instrumented, RosAllocInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_rosalloc_instrumented, RosAllocInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_rosalloc_instrumented, RosAllocInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_rosalloc_instrumented, RosAllocInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_bump_pointer, BumpPointer)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_bump_pointer, BumpPointer)
@@ -839,6 +872,9 @@
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_bump_pointer, BumpPointer)
GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_bump_pointer, BumpPointer)
GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_bump_pointer, BumpPointer)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_bump_pointer, BumpPointer)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_bump_pointer, BumpPointer)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_bump_pointer, BumpPointer)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_bump_pointer_instrumented, BumpPointerInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_bump_pointer_instrumented, BumpPointerInstrumented)
@@ -849,6 +885,9 @@
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_bump_pointer_instrumented, BumpPointerInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_bump_pointer_instrumented, BumpPointerInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_bump_pointer_instrumented, BumpPointerInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_bump_pointer_instrumented, BumpPointerInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_bump_pointer_instrumented, BumpPointerInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_bump_pointer_instrumented, BumpPointerInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab, TLAB)
@@ -859,6 +898,9 @@
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_tlab, TLAB)
GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_tlab, TLAB)
GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_tlab, TLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab_instrumented, TLABInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab_instrumented, TLABInstrumented)
@@ -869,6 +911,9 @@
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_tlab_instrumented, TLABInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_tlab_instrumented, TLABInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_tlab_instrumented, TLABInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_tlab_instrumented, TLABInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_tlab_instrumented, TLABInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_tlab_instrumented, TLABInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region, Region)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region, Region)
@@ -879,6 +924,9 @@
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_region, Region)
GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_region, Region)
GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_region, Region)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_region, Region)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_region, Region)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region, Region)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_instrumented, RegionInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_instrumented, RegionInstrumented)
@@ -889,6 +937,9 @@
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_region_instrumented, RegionInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_region_instrumented, RegionInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_region_instrumented, RegionInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_region_instrumented, RegionInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_region_instrumented, RegionInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region_instrumented, RegionInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab, RegionTLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB)
@@ -899,6 +950,9 @@
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB)
GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_region_tlab, RegionTLAB)
GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region_tlab, RegionTLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab_instrumented, RegionTLABInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab_instrumented, RegionTLABInstrumented)
@@ -909,11 +963,14 @@
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_region_tlab_instrumented, RegionTLABInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_region_tlab_instrumented, RegionTLABInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_region_tlab_instrumented, RegionTLABInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_region_tlab_instrumented, RegionTLABInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_region_tlab_instrumented, RegionTLABInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region_tlab_instrumented, RegionTLABInstrumented)
-TWO_ARG_DOWNCALL art_quick_resolve_string, artResolveStringFromCode, RETURN_IF_RESULT_IS_NON_ZERO
-TWO_ARG_DOWNCALL art_quick_initialize_static_storage, artInitializeStaticStorageFromCode, RETURN_IF_RESULT_IS_NON_ZERO
-TWO_ARG_DOWNCALL art_quick_initialize_type, artInitializeTypeFromCode, RETURN_IF_RESULT_IS_NON_ZERO
-TWO_ARG_DOWNCALL art_quick_initialize_type_and_verify_access, artInitializeTypeAndVerifyAccessFromCode, RETURN_IF_RESULT_IS_NON_ZERO
+ONE_ARG_DOWNCALL art_quick_resolve_string, artResolveStringFromCode, RETURN_IF_RESULT_IS_NON_ZERO
+ONE_ARG_DOWNCALL art_quick_initialize_static_storage, artInitializeStaticStorageFromCode, RETURN_IF_RESULT_IS_NON_ZERO
+ONE_ARG_DOWNCALL art_quick_initialize_type, artInitializeTypeFromCode, RETURN_IF_RESULT_IS_NON_ZERO
+ONE_ARG_DOWNCALL art_quick_initialize_type_and_verify_access, artInitializeTypeAndVerifyAccessFromCode, RETURN_IF_RESULT_IS_NON_ZERO
TWO_ARG_REF_DOWNCALL art_quick_handle_fill_data, artHandleFillArrayDataFromCode, RETURN_IF_EAX_ZERO
@@ -1352,7 +1409,7 @@
call SYMBOL(artQuickResolutionTrampoline) // (Method* called, receiver, Thread*, SP)
movl %eax, %edi // remember code pointer in EDI
addl LITERAL(16), %esp // pop arguments
- test %eax, %eax // if code pointer is NULL goto deliver pending exception
+ test %eax, %eax // if code pointer is null goto deliver pending exception
jz 1f
RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME_AND_JUMP
1:
@@ -1567,13 +1624,8 @@
PUSH edi // push callee save reg
mov MIRROR_STRING_COUNT_OFFSET(%eax), %edx
mov MIRROR_STRING_COUNT_OFFSET(%ecx), %ebx
- mov MIRROR_STRING_VALUE_OFFSET(%eax), %esi
- mov MIRROR_STRING_VALUE_OFFSET(%ecx), %edi
- mov MIRROR_STRING_OFFSET_OFFSET(%eax), %eax
- mov MIRROR_STRING_OFFSET_OFFSET(%ecx), %ecx
- /* Build pointers to the start of string data */
- lea MIRROR_CHAR_ARRAY_DATA_OFFSET(%esi, %eax, 2), %esi
- lea MIRROR_CHAR_ARRAY_DATA_OFFSET(%edi, %ecx, 2), %edi
+ lea MIRROR_STRING_VALUE_OFFSET(%eax), %esi
+ lea MIRROR_STRING_VALUE_OFFSET(%ecx), %edi
/* Calculate min length and count diff */
mov %edx, %ecx
mov %edx, %eax
diff --git a/runtime/arch/x86_64/context_x86_64.h b/runtime/arch/x86_64/context_x86_64.h
index d03aa45..c9b0ff6 100644
--- a/runtime/arch/x86_64/context_x86_64.h
+++ b/runtime/arch/x86_64/context_x86_64.h
@@ -79,7 +79,7 @@
NO_RETURN void DoLongJump() OVERRIDE;
private:
- // Pointers to register locations. Values are initialized to NULL or the special registers below.
+ // Pointers to register locations. Values are initialized to null or the special registers below.
uintptr_t* gprs_[kNumberOfCpuRegisters];
uint64_t* fprs_[kNumberOfFloatRegisters];
// Hold values for rsp and rip if they are not located within a stack frame. RIP is somewhat
diff --git a/runtime/arch/x86_64/entrypoints_init_x86_64.cc b/runtime/arch/x86_64/entrypoints_init_x86_64.cc
index 3bc0dc4..d0ab9d5 100644
--- a/runtime/arch/x86_64/entrypoints_init_x86_64.cc
+++ b/runtime/arch/x86_64/entrypoints_init_x86_64.cc
@@ -101,17 +101,6 @@
qpoints->pUnlockObject = art_quick_unlock_object;
// Math
- // points->pCmpgDouble = NULL; // Not needed on x86.
- // points->pCmpgFloat = NULL; // Not needed on x86.
- // points->pCmplDouble = NULL; // Not needed on x86.
- // points->pCmplFloat = NULL; // Not needed on x86.
- // qpoints->pFmod = NULL; // Not needed on x86.
- // qpoints->pL2d = NULL; // Not needed on x86.
- // qpoints->pFmodf = NULL; // Not needed on x86.
- // qpoints->pL2f = NULL; // Not needed on x86.
- // points->pD2iz = NULL; // Not needed on x86.
- // points->pF2iz = NULL; // Not needed on x86.
- // qpoints->pIdivmod = NULL; // Not needed on x86.
qpoints->pD2l = art_d2l;
qpoints->pF2l = art_f2l;
qpoints->pLdiv = art_quick_ldiv;
@@ -122,7 +111,6 @@
qpoints->pUshrLong = art_quick_lushr;
// Intrinsics
- // qpoints->pIndexOf = NULL; // Not needed on x86.
qpoints->pStringCompareTo = art_quick_string_compareto;
qpoints->pMemcpy = art_quick_memcpy;
@@ -130,11 +118,16 @@
qpoints->pQuickImtConflictTrampoline = art_quick_imt_conflict_trampoline;
qpoints->pQuickResolutionTrampoline = art_quick_resolution_trampoline;
qpoints->pQuickToInterpreterBridge = art_quick_to_interpreter_bridge;
- qpoints->pInvokeDirectTrampolineWithAccessCheck = art_quick_invoke_direct_trampoline_with_access_check;
- qpoints->pInvokeInterfaceTrampolineWithAccessCheck = art_quick_invoke_interface_trampoline_with_access_check;
- qpoints->pInvokeStaticTrampolineWithAccessCheck = art_quick_invoke_static_trampoline_with_access_check;
- qpoints->pInvokeSuperTrampolineWithAccessCheck = art_quick_invoke_super_trampoline_with_access_check;
- qpoints->pInvokeVirtualTrampolineWithAccessCheck = art_quick_invoke_virtual_trampoline_with_access_check;
+ qpoints->pInvokeDirectTrampolineWithAccessCheck =
+ art_quick_invoke_direct_trampoline_with_access_check;
+ qpoints->pInvokeInterfaceTrampolineWithAccessCheck =
+ art_quick_invoke_interface_trampoline_with_access_check;
+ qpoints->pInvokeStaticTrampolineWithAccessCheck =
+ art_quick_invoke_static_trampoline_with_access_check;
+ qpoints->pInvokeSuperTrampolineWithAccessCheck =
+ art_quick_invoke_super_trampoline_with_access_check;
+ qpoints->pInvokeVirtualTrampolineWithAccessCheck =
+ art_quick_invoke_virtual_trampoline_with_access_check;
// Thread
qpoints->pTestSuspend = art_quick_test_suspend;
@@ -149,6 +142,9 @@
// Deoptimize
qpoints->pDeoptimize = art_quick_deoptimize_from_compiled_slow_path;
+
+ // Read barrier
+ qpoints->pReadBarrierJni = ReadBarrierJni;
#endif // __APPLE__
};
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index ce21f01..ddeb5b8 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -341,14 +341,13 @@
/*
* All generated callsites for interface invokes and invocation slow paths will load arguments
* as usual - except instead of loading arg0/rdi with the target Method*, arg0/rdi will contain
- * the method_idx. This wrapper will save arg1-arg3, load the caller's Method*, align the
- * stack and call the appropriate C helper.
+ * the method_idx. This wrapper will save arg1-arg3, and call the appropriate C helper.
* NOTE: "this" is first visible argument of the target, and so can be found in arg1/rsi.
*
* The helper will attempt to locate the target and return a 128-bit result in rax/rdx consisting
* of the target Method* in rax and method->code_ in rdx.
*
- * If unsuccessful, the helper will return NULL/????. There will be a pending exception in the
+ * If unsuccessful, the helper will return null/????. There will be a pending exception in the
* thread and we branch to another stub to deliver it.
*
* On success this wrapper will restore arguments and *jump* to the target, leaving the return
@@ -362,11 +361,10 @@
// Helper signature is always
// (method_idx, *this_object, *caller_method, *self, sp)
- movl FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE(%rsp), %edx // pass caller Method*
- movq %gs:THREAD_SELF_OFFSET, %rcx // pass Thread
- movq %rsp, %r8 // pass SP
+ movq %gs:THREAD_SELF_OFFSET, %rdx // pass Thread
+ movq %rsp, %rcx // pass SP
- call VAR(cxx_name, 1) // cxx_name(arg1, arg2, caller method*, Thread*, SP)
+ call VAR(cxx_name, 1) // cxx_name(arg1, arg2, Thread*, SP)
// save the code pointer
movq %rax, %rdi
movq %rdx, %rax
@@ -506,7 +504,7 @@
#if (STACK_REFERENCE_SIZE != 4)
#error "STACK_REFERENCE_SIZE(X86_64) size not as expected."
#endif
- movl LITERAL(0), (%rsp) // Store NULL for method*
+ movl LITERAL(0), (%rsp) // Store null for method*
movl %r10d, %ecx // Place size of args in rcx.
movq %rdi, %rax // rax := method to be called
@@ -554,7 +552,7 @@
* On entry:
* [sp] = return address
* rdi = method pointer
- * rsi = argument array or NULL if no arguments.
+ * rsi = argument array or null if no arguments.
* rdx = size of argument array in bytes
* rcx = (managed) thread pointer
* r8 = JValue* result
@@ -600,7 +598,7 @@
#if (STACK_REFERENCE_SIZE != 4)
#error "STACK_REFERENCE_SIZE(X86_64) size not as expected."
#endif
- movl LITERAL(0), (%rsp) // Store NULL for method*
+ movl LITERAL(0), (%rsp) // Store null for method*
movl %r10d, %ecx // Place size of args in rcx.
movq %rdi, %rax // rax := method to be called
@@ -738,6 +736,17 @@
END_FUNCTION VAR(c_name, 0)
END_MACRO
+MACRO3(FOUR_ARG_DOWNCALL, c_name, cxx_name, return_macro)
+ DEFINE_FUNCTION VAR(c_name, 0)
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
+ // Outgoing argument set up
+ movq %gs:THREAD_SELF_OFFSET, %r8 // pass Thread::Current()
+ call VAR(cxx_name, 1) // cxx_name(arg1, arg2, arg3, arg4, Thread*)
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ CALL_MACRO(return_macro, 2) // return or deliver exception
+ END_FUNCTION VAR(c_name, 0)
+END_MACRO
+
MACRO3(ONE_ARG_REF_DOWNCALL, c_name, cxx_name, return_macro)
DEFINE_FUNCTION VAR(c_name, 0)
movl 8(%rsp), %esi // pass referrer
@@ -822,6 +831,12 @@
THREE_ARG_DOWNCALL art_quick_check_and_alloc_array ## c_suffix, artCheckAndAllocArrayFromCode ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO
#define GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(c_suffix, cxx_suffix) \
THREE_ARG_DOWNCALL art_quick_check_and_alloc_array_with_access_check ## c_suffix, artCheckAndAllocArrayFromCodeWithAccessCheck ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO
+#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(c_suffix, cxx_suffix) \
+ FOUR_ARG_DOWNCALL art_quick_alloc_string_from_bytes ## c_suffix, artAllocStringFromBytesFromCode ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO
+#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(c_suffix, cxx_suffix) \
+ THREE_ARG_DOWNCALL art_quick_alloc_string_from_chars ## c_suffix, artAllocStringFromCharsFromCode ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO
+#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(c_suffix, cxx_suffix) \
+ ONE_ARG_DOWNCALL art_quick_alloc_string_from_string ## c_suffix, artAllocStringFromStringFromCode ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_dlmalloc, DlMalloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_dlmalloc, DlMalloc)
@@ -832,6 +847,9 @@
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_dlmalloc, DlMalloc)
GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_dlmalloc, DlMalloc)
GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_dlmalloc, DlMalloc)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_dlmalloc, DlMalloc)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_dlmalloc, DlMalloc)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_dlmalloc, DlMalloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_dlmalloc_instrumented, DlMallocInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_dlmalloc_instrumented, DlMallocInstrumented)
@@ -842,6 +860,9 @@
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_dlmalloc_instrumented, DlMallocInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_dlmalloc_instrumented, DlMallocInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_dlmalloc_instrumented, DlMallocInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_dlmalloc_instrumented, DlMallocInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_dlmalloc_instrumented, DlMallocInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_dlmalloc_instrumented, DlMallocInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc, RosAlloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc, RosAlloc)
@@ -852,6 +873,9 @@
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_rosalloc, RosAlloc)
GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_rosalloc, RosAlloc)
GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_rosalloc, RosAlloc)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_rosalloc, RosAlloc)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_rosalloc, RosAlloc)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_rosalloc, RosAlloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc_instrumented, RosAllocInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc_instrumented, RosAllocInstrumented)
@@ -862,6 +886,9 @@
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_rosalloc_instrumented, RosAllocInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_rosalloc_instrumented, RosAllocInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_rosalloc_instrumented, RosAllocInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_rosalloc_instrumented, RosAllocInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_rosalloc_instrumented, RosAllocInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_rosalloc_instrumented, RosAllocInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_bump_pointer, BumpPointer)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_bump_pointer, BumpPointer)
@@ -872,6 +899,9 @@
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_bump_pointer, BumpPointer)
GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_bump_pointer, BumpPointer)
GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_bump_pointer, BumpPointer)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_bump_pointer, BumpPointer)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_bump_pointer, BumpPointer)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_bump_pointer, BumpPointer)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_bump_pointer_instrumented, BumpPointerInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_bump_pointer_instrumented, BumpPointerInstrumented)
@@ -882,6 +912,9 @@
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_bump_pointer_instrumented, BumpPointerInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_bump_pointer_instrumented, BumpPointerInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_bump_pointer_instrumented, BumpPointerInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_bump_pointer_instrumented, BumpPointerInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_bump_pointer_instrumented, BumpPointerInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_bump_pointer_instrumented, BumpPointerInstrumented)
DEFINE_FUNCTION art_quick_alloc_object_tlab
// Fast path tlab allocation.
@@ -929,6 +962,9 @@
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_tlab, TLAB)
GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_tlab, TLAB)
GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_tlab, TLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab_instrumented, TLABInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab_instrumented, TLABInstrumented)
@@ -939,6 +975,9 @@
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_tlab_instrumented, TLABInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_tlab_instrumented, TLABInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_tlab_instrumented, TLABInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_tlab_instrumented, TLABInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_tlab_instrumented, TLABInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_tlab_instrumented, TLABInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region, Region)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region, Region)
@@ -949,6 +988,9 @@
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_region, Region)
GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_region, Region)
GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_region, Region)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_region, Region)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_region, Region)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region, Region)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_instrumented, RegionInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_instrumented, RegionInstrumented)
@@ -959,6 +1001,9 @@
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_region_instrumented, RegionInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_region_instrumented, RegionInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_region_instrumented, RegionInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_region_instrumented, RegionInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_region_instrumented, RegionInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region_instrumented, RegionInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab, RegionTLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB)
@@ -969,6 +1014,9 @@
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB)
GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_region_tlab, RegionTLAB)
GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region_tlab, RegionTLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab_instrumented, RegionTLABInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab_instrumented, RegionTLABInstrumented)
@@ -979,11 +1027,14 @@
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_region_tlab_instrumented, RegionTLABInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_region_tlab_instrumented, RegionTLABInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_region_tlab_instrumented, RegionTLABInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_region_tlab_instrumented, RegionTLABInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_region_tlab_instrumented, RegionTLABInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region_tlab_instrumented, RegionTLABInstrumented)
-TWO_ARG_DOWNCALL art_quick_resolve_string, artResolveStringFromCode, RETURN_IF_RESULT_IS_NON_ZERO
-TWO_ARG_DOWNCALL art_quick_initialize_static_storage, artInitializeStaticStorageFromCode, RETURN_IF_RESULT_IS_NON_ZERO
-TWO_ARG_DOWNCALL art_quick_initialize_type, artInitializeTypeFromCode, RETURN_IF_RESULT_IS_NON_ZERO
-TWO_ARG_DOWNCALL art_quick_initialize_type_and_verify_access, artInitializeTypeAndVerifyAccessFromCode, RETURN_IF_RESULT_IS_NON_ZERO
+ONE_ARG_DOWNCALL art_quick_resolve_string, artResolveStringFromCode, RETURN_IF_RESULT_IS_NON_ZERO
+ONE_ARG_DOWNCALL art_quick_initialize_static_storage, artInitializeStaticStorageFromCode, RETURN_IF_RESULT_IS_NON_ZERO
+ONE_ARG_DOWNCALL art_quick_initialize_type, artInitializeTypeFromCode, RETURN_IF_RESULT_IS_NON_ZERO
+ONE_ARG_DOWNCALL art_quick_initialize_type_and_verify_access, artInitializeTypeAndVerifyAccessFromCode, RETURN_IF_RESULT_IS_NON_ZERO
TWO_ARG_REF_DOWNCALL art_quick_handle_fill_data, artHandleFillArrayDataFromCode, RETURN_IF_EAX_ZERO
@@ -1302,7 +1353,7 @@
movq %rax, %r10 // Remember returned code pointer in R10.
movq (%rsp), %rdi // Load called method into RDI.
RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
- testq %r10, %r10 // If code pointer is NULL goto deliver pending exception.
+ testq %r10, %r10 // If code pointer is null goto deliver pending exception.
jz 1f
jmp *%r10 // Tail call into method.
1:
@@ -1622,13 +1673,9 @@
DEFINE_FUNCTION art_quick_string_compareto
movl MIRROR_STRING_COUNT_OFFSET(%edi), %r8d
movl MIRROR_STRING_COUNT_OFFSET(%esi), %r9d
- movl MIRROR_STRING_VALUE_OFFSET(%edi), %r10d
- movl MIRROR_STRING_VALUE_OFFSET(%esi), %r11d
- movl MIRROR_STRING_OFFSET_OFFSET(%edi), %eax
- movl MIRROR_STRING_OFFSET_OFFSET(%esi), %ecx
/* Build pointers to the start of string data */
- leal MIRROR_CHAR_ARRAY_DATA_OFFSET(%r10d, %eax, 2), %esi
- leal MIRROR_CHAR_ARRAY_DATA_OFFSET(%r11d, %ecx, 2), %edi
+ leal MIRROR_STRING_VALUE_OFFSET(%edi), %edi
+ leal MIRROR_STRING_VALUE_OFFSET(%esi), %esi
/* Calculate min length and count diff */
movl %r8d, %ecx
movl %r8d, %eax
@@ -1638,8 +1685,8 @@
* At this point we have:
* eax: value to return if first part of strings are equal
* ecx: minimum among the lengths of the two strings
- * esi: pointer to this string data
- * edi: pointer to comp string data
+ * esi: pointer to comp string data
+ * edi: pointer to this string data
*/
jecxz .Lkeep_length
repe cmpsw // find nonmatching chars in [%esi] and [%edi], up to length %ecx
@@ -1648,8 +1695,8 @@
ret
.balign 16
.Lnot_equal:
- movzwl -2(%esi), %eax // get last compared char from this string
- movzwl -2(%edi), %ecx // get last compared char from comp string
+ movzwl -2(%edi), %eax // get last compared char from this string
+ movzwl -2(%esi), %ecx // get last compared char from comp string
subl %ecx, %eax // return the difference
ret
END_FUNCTION art_quick_string_compareto
diff --git a/runtime/art_field-inl.h b/runtime/art_field-inl.h
index a2625e2..4991ad7 100644
--- a/runtime/art_field-inl.h
+++ b/runtime/art_field-inl.h
@@ -80,7 +80,7 @@
}
inline uint64_t ArtField::Get64(mirror::Object* object) {
- DCHECK(object != NULL) << PrettyField(this);
+ DCHECK(object != nullptr) << PrettyField(this);
DCHECK(!IsStatic() || (object == GetDeclaringClass()) || !Runtime::Current()->IsStarted());
if (UNLIKELY(IsVolatile())) {
return object->GetField64Volatile(GetOffset());
@@ -90,7 +90,7 @@
template<bool kTransactionActive>
inline void ArtField::Set64(mirror::Object* object, uint64_t new_value) {
- DCHECK(object != NULL) << PrettyField(this);
+ DCHECK(object != nullptr) << PrettyField(this);
DCHECK(!IsStatic() || (object == GetDeclaringClass()) || !Runtime::Current()->IsStarted());
if (UNLIKELY(IsVolatile())) {
object->SetField64Volatile<kTransactionActive>(GetOffset(), new_value);
@@ -100,7 +100,7 @@
}
inline mirror::Object* ArtField::GetObj(mirror::Object* object) {
- DCHECK(object != NULL) << PrettyField(this);
+ DCHECK(object != nullptr) << PrettyField(this);
DCHECK(!IsStatic() || (object == GetDeclaringClass()) || !Runtime::Current()->IsStarted());
if (UNLIKELY(IsVolatile())) {
return object->GetFieldObjectVolatile<mirror::Object>(GetOffset());
@@ -110,7 +110,7 @@
template<bool kTransactionActive>
inline void ArtField::SetObj(mirror::Object* object, mirror::Object* new_value) {
- DCHECK(object != NULL) << PrettyField(this);
+ DCHECK(object != nullptr) << PrettyField(this);
DCHECK(!IsStatic() || (object == GetDeclaringClass()) || !Runtime::Current()->IsStarted());
if (UNLIKELY(IsVolatile())) {
object->SetFieldObjectVolatile<kTransactionActive>(GetOffset(), new_value);
diff --git a/runtime/art_field.cc b/runtime/art_field.cc
index 2aed440..47d5a76 100644
--- a/runtime/art_field.cc
+++ b/runtime/art_field.cc
@@ -63,6 +63,17 @@
FindInstanceFieldWithOffset(klass->GetSuperClass(), field_offset) : nullptr;
}
+ArtField* ArtField::FindStaticFieldWithOffset(mirror::Class* klass, uint32_t field_offset) {
+ DCHECK(klass != nullptr);
+ auto* static_fields = klass->GetSFields();
+ for (size_t i = 0, count = klass->NumStaticFields(); i < count; ++i) {
+ if (static_fields[i].GetOffset().Uint32Value() == field_offset) {
+ return &static_fields[i];
+ }
+ }
+ return nullptr;
+}
+
mirror::Class* ArtField::ProxyFindSystemClass(const char* descriptor) {
DCHECK(GetDeclaringClass()->IsProxyClass());
return Runtime::Current()->GetClassLinker()->FindSystemClass(Thread::Current(), descriptor);
diff --git a/runtime/art_field.h b/runtime/art_field.h
index 16c46f0..9d3dbd9 100644
--- a/runtime/art_field.h
+++ b/runtime/art_field.h
@@ -158,9 +158,12 @@
return (GetAccessFlags() & kAccVolatile) != 0;
}
- // Returns an instance field with this offset in the given class or nullptr if not found.
+ // Returns an instance field with this offset in the given class or null if not found.
static ArtField* FindInstanceFieldWithOffset(mirror::Class* klass, uint32_t field_offset)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Returns a static field with this offset in the given class or null if not found.
+ static ArtField* FindStaticFieldWithOffset(mirror::Class* klass, uint32_t field_offset)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
const char* GetName() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/asm_support.h b/runtime/asm_support.h
index 8057dd1..3e677a4 100644
--- a/runtime/asm_support.h
+++ b/runtime/asm_support.h
@@ -89,7 +89,7 @@
art::Thread::ThinLockIdOffset<__SIZEOF_POINTER__>().Int32Value())
// Offset of field Thread::tlsPtr_.card_table.
-#define THREAD_CARD_TABLE_OFFSET 120
+#define THREAD_CARD_TABLE_OFFSET 128
ADD_TEST_EQ(THREAD_CARD_TABLE_OFFSET,
art::Thread::CardTableOffset<__SIZEOF_POINTER__>().Int32Value())
@@ -108,7 +108,7 @@
ADD_TEST_EQ(THREAD_SELF_OFFSET,
art::Thread::SelfOffset<__SIZEOF_POINTER__>().Int32Value())
-#define THREAD_LOCAL_POS_OFFSET (THREAD_CARD_TABLE_OFFSET + 126 * __SIZEOF_POINTER__)
+#define THREAD_LOCAL_POS_OFFSET (THREAD_CARD_TABLE_OFFSET + 146 * __SIZEOF_POINTER__)
ADD_TEST_EQ(THREAD_LOCAL_POS_OFFSET,
art::Thread::ThreadLocalPosOffset<__SIZEOF_POINTER__>().Int32Value())
#define THREAD_LOCAL_END_OFFSET (THREAD_LOCAL_POS_OFFSET + __SIZEOF_POINTER__)
@@ -124,7 +124,7 @@
#define MIRROR_OBJECT_LOCK_WORD_OFFSET 4
ADD_TEST_EQ(MIRROR_OBJECT_LOCK_WORD_OFFSET, art::mirror::Object::MonitorOffset().Int32Value())
-#if defined(USE_BAKER_OR_BROOKS_READ_BARRIER)
+#if defined(USE_BROOKS_READ_BARRIER)
#define MIRROR_OBJECT_HEADER_SIZE 16
#else
#define MIRROR_OBJECT_HEADER_SIZE 8
@@ -170,14 +170,11 @@
sizeof(art::mirror::HeapReference<art::mirror::Object>))
// Offsets within java.lang.String.
-#define MIRROR_STRING_VALUE_OFFSET MIRROR_OBJECT_HEADER_SIZE
-ADD_TEST_EQ(MIRROR_STRING_VALUE_OFFSET, art::mirror::String::ValueOffset().Int32Value())
-
-#define MIRROR_STRING_COUNT_OFFSET (4 + MIRROR_OBJECT_HEADER_SIZE)
+#define MIRROR_STRING_COUNT_OFFSET MIRROR_OBJECT_HEADER_SIZE
ADD_TEST_EQ(MIRROR_STRING_COUNT_OFFSET, art::mirror::String::CountOffset().Int32Value())
-#define MIRROR_STRING_OFFSET_OFFSET (12 + MIRROR_OBJECT_HEADER_SIZE)
-ADD_TEST_EQ(MIRROR_STRING_OFFSET_OFFSET, art::mirror::String::OffsetOffset().Int32Value())
+#define MIRROR_STRING_VALUE_OFFSET (8 + MIRROR_OBJECT_HEADER_SIZE)
+ADD_TEST_EQ(MIRROR_STRING_VALUE_OFFSET, art::mirror::String::ValueOffset().Int32Value())
// Offsets within java.lang.reflect.ArtMethod.
#define MIRROR_ART_METHOD_DEX_CACHE_METHODS_OFFSET (4 + MIRROR_OBJECT_HEADER_SIZE)
diff --git a/runtime/barrier.cc b/runtime/barrier.cc
index 66ee870..f80a65f 100644
--- a/runtime/barrier.cc
+++ b/runtime/barrier.cc
@@ -86,7 +86,7 @@
}
Barrier::~Barrier() {
- CHECK(!count_) << "Attempted to destroy barrier with non zero count";
+ CHECK_EQ(count_, 0) << "Attempted to destroy barrier with non zero count";
}
} // namespace art
diff --git a/runtime/base/bit_vector.cc b/runtime/base/bit_vector.cc
index 65cb028..39ce0d2 100644
--- a/runtime/base/bit_vector.cc
+++ b/runtime/base/bit_vector.cc
@@ -24,11 +24,6 @@
namespace art {
-// The number of words necessary to encode bits.
-static constexpr uint32_t BitsToWords(uint32_t bits) {
- return RoundUp(bits, 32) / 32;
-}
-
// TODO: replace excessive argument defaulting when we are at gcc 4.7
// or later on host with delegating constructor support. Specifically,
// starts_bits and storage_size/storage are mutually exclusive.
diff --git a/runtime/base/bit_vector.h b/runtime/base/bit_vector.h
index be4d363..6e4367a 100644
--- a/runtime/base/bit_vector.h
+++ b/runtime/base/bit_vector.h
@@ -20,6 +20,8 @@
#include <stdint.h>
#include <iterator>
+#include "utils.h"
+
namespace art {
class Allocator;
@@ -116,6 +118,11 @@
virtual ~BitVector();
+ // The number of words necessary to encode bits.
+ static constexpr uint32_t BitsToWords(uint32_t bits) {
+ return RoundUp(bits, kWordBits) / kWordBits;
+ }
+
// Mark the specified bit as "set".
void SetBit(uint32_t idx) {
/*
diff --git a/runtime/base/casts.h b/runtime/base/casts.h
index c7e39a2..f884649 100644
--- a/runtime/base/casts.h
+++ b/runtime/base/casts.h
@@ -18,9 +18,11 @@
#define ART_RUNTIME_BASE_CASTS_H_
#include <assert.h>
+#include <limits>
#include <string.h>
#include <type_traits>
+#include "base/logging.h"
#include "base/macros.h"
namespace art {
@@ -83,6 +85,23 @@
return dest;
}
+// A version of static_cast that DCHECKs that the value can be precisely represented
+// when converting to Dest.
+template <typename Dest, typename Source>
+inline Dest dchecked_integral_cast(const Source source) {
+ DCHECK(
+ // Check that the value is within the lower limit of Dest.
+ (static_cast<intmax_t>(std::numeric_limits<Dest>::min()) <=
+ static_cast<intmax_t>(std::numeric_limits<Source>::min()) ||
+ source >= static_cast<Source>(std::numeric_limits<Dest>::min())) &&
+ // Check that the value is within the upper limit of Dest.
+ (static_cast<uintmax_t>(std::numeric_limits<Dest>::max()) >=
+ static_cast<uintmax_t>(std::numeric_limits<Source>::max()) ||
+ source <= static_cast<Source>(std::numeric_limits<Dest>::max())));
+
+ return static_cast<Dest>(source);
+}
+
} // namespace art
#endif // ART_RUNTIME_BASE_CASTS_H_
diff --git a/runtime/base/hex_dump.cc b/runtime/base/hex_dump.cc
index 5423ff0..bce6b53 100644
--- a/runtime/base/hex_dump.cc
+++ b/runtime/base/hex_dump.cc
@@ -27,7 +27,7 @@
return;
}
- if (address_ == NULL) {
+ if (address_ == nullptr) {
os << "00000000:";
return;
}
diff --git a/runtime/base/logging.cc b/runtime/base/logging.cc
index 0764b87..859de4b 100644
--- a/runtime/base/logging.cc
+++ b/runtime/base/logging.cc
@@ -91,7 +91,7 @@
gProgramInvocationShortName.reset(new std::string((last_slash != nullptr) ? last_slash + 1
: argv[0]));
} else {
- // TODO: fall back to /proc/self/cmdline when argv is NULL on Linux.
+ // TODO: fall back to /proc/self/cmdline when argv is null on Linux.
gCmdLine.reset(new std::string("<unset>"));
}
const char* tags = getenv("ANDROID_LOG_TAGS");
@@ -289,17 +289,17 @@
CHECK_EQ(strlen(log_characters), INTERNAL_FATAL + 1U);
const char* program_name = ProgramInvocationShortName();
- write(STDERR_FILENO, program_name, strlen(program_name));
- write(STDERR_FILENO, " ", 1);
- write(STDERR_FILENO, &log_characters[log_severity], 1);
- write(STDERR_FILENO, " ", 1);
+ TEMP_FAILURE_RETRY(write(STDERR_FILENO, program_name, strlen(program_name)));
+ TEMP_FAILURE_RETRY(write(STDERR_FILENO, " ", 1));
+ TEMP_FAILURE_RETRY(write(STDERR_FILENO, &log_characters[log_severity], 1));
+ TEMP_FAILURE_RETRY(write(STDERR_FILENO, " ", 1));
// TODO: pid and tid.
- write(STDERR_FILENO, file, strlen(file));
+ TEMP_FAILURE_RETRY(write(STDERR_FILENO, file, strlen(file)));
// TODO: line.
UNUSED(line);
- write(STDERR_FILENO, "] ", 2);
- write(STDERR_FILENO, message, strlen(message));
- write(STDERR_FILENO, "\n", 1);
+ TEMP_FAILURE_RETRY(write(STDERR_FILENO, "] ", 2));
+ TEMP_FAILURE_RETRY(write(STDERR_FILENO, message, strlen(message)));
+ TEMP_FAILURE_RETRY(write(STDERR_FILENO, "\n", 1));
#endif
}
diff --git a/runtime/base/logging.h b/runtime/base/logging.h
index 014f4ab..35b50d1 100644
--- a/runtime/base/logging.h
+++ b/runtime/base/logging.h
@@ -39,6 +39,7 @@
struct LogVerbosity {
bool class_linker; // Enabled with "-verbose:class".
bool compiler;
+ bool deopt;
bool gc;
bool heap;
bool jdwp;
@@ -72,7 +73,7 @@
// This can be used to reveal or conceal logs with specific tags.
extern void InitLogging(char* argv[]);
-// Returns the command line used to invoke the current tool or nullptr if InitLogging hasn't been
+// Returns the command line used to invoke the current tool or null if InitLogging hasn't been
// performed.
extern const char* GetCmdLine();
diff --git a/runtime/base/mutex-inl.h b/runtime/base/mutex-inl.h
index cb69817..a727992 100644
--- a/runtime/base/mutex-inl.h
+++ b/runtime/base/mutex-inl.h
@@ -39,13 +39,14 @@
namespace art {
#if ART_USE_FUTEXES
-static inline int futex(volatile int *uaddr, int op, int val, const struct timespec *timeout, volatile int *uaddr2, int val3) {
+static inline int futex(volatile int *uaddr, int op, int val, const struct timespec *timeout,
+ volatile int *uaddr2, int val3) {
return syscall(SYS_futex, uaddr, op, val, timeout, uaddr2, val3);
}
#endif // ART_USE_FUTEXES
static inline uint64_t SafeGetTid(const Thread* self) {
- if (self != NULL) {
+ if (self != nullptr) {
return static_cast<uint64_t>(self->GetTid());
} else {
return static_cast<uint64_t>(GetTid());
@@ -77,7 +78,7 @@
}
inline void BaseMutex::RegisterAsLocked(Thread* self) {
- if (UNLIKELY(self == NULL)) {
+ if (UNLIKELY(self == nullptr)) {
CheckUnattachedThread(level_);
return;
}
@@ -86,7 +87,7 @@
bool bad_mutexes_held = false;
for (int i = level_; i >= 0; --i) {
BaseMutex* held_mutex = self->GetHeldMutex(static_cast<LockLevel>(i));
- if (UNLIKELY(held_mutex != NULL)) {
+ if (UNLIKELY(held_mutex != nullptr)) {
LOG(ERROR) << "Lock level violation: holding \"" << held_mutex->name_ << "\" "
<< "(level " << LockLevel(i) << " - " << i
<< ") while locking \"" << name_ << "\" "
@@ -109,7 +110,7 @@
}
inline void BaseMutex::RegisterAsUnlocked(Thread* self) {
- if (UNLIKELY(self == NULL)) {
+ if (UNLIKELY(self == nullptr)) {
CheckUnattachedThread(level_);
return;
}
@@ -117,12 +118,12 @@
if (kDebugLocking && gAborting == 0) { // Avoid recursive aborts.
CHECK(self->GetHeldMutex(level_) == this) << "Unlocking on unacquired mutex: " << name_;
}
- self->SetHeldMutex(level_, NULL);
+ self->SetHeldMutex(level_, nullptr);
}
}
inline void ReaderWriterMutex::SharedLock(Thread* self) {
- DCHECK(self == NULL || self == Thread::Current());
+ DCHECK(self == nullptr || self == Thread::Current());
#if ART_USE_FUTEXES
bool done = false;
do {
@@ -143,7 +144,7 @@
}
inline void ReaderWriterMutex::SharedUnlock(Thread* self) {
- DCHECK(self == NULL || self == Thread::Current());
+ DCHECK(self == nullptr || self == Thread::Current());
DCHECK(exclusive_owner_ == 0U || exclusive_owner_ == -1U);
AssertSharedHeld(self);
RegisterAsUnlocked(self);
@@ -161,7 +162,7 @@
if (num_pending_writers_.LoadRelaxed() > 0 ||
num_pending_readers_.LoadRelaxed() > 0) {
// Wake any exclusive waiters as there are now no readers.
- futex(state_.Address(), FUTEX_WAKE, -1, NULL, NULL, 0);
+ futex(state_.Address(), FUTEX_WAKE, -1, nullptr, nullptr, 0);
}
}
} else {
@@ -174,11 +175,11 @@
}
inline bool Mutex::IsExclusiveHeld(const Thread* self) const {
- DCHECK(self == NULL || self == Thread::Current());
+ DCHECK(self == nullptr || self == Thread::Current());
bool result = (GetExclusiveOwnerTid() == SafeGetTid(self));
if (kDebugLocking) {
// Sanity debug check that if we think it is locked we have it in our held mutexes.
- if (result && self != NULL && level_ != kMonitorLock && !gAborting) {
+ if (result && self != nullptr && level_ != kMonitorLock && !gAborting) {
CHECK_EQ(self->GetHeldMutex(level_), this);
}
}
@@ -190,11 +191,11 @@
}
inline bool ReaderWriterMutex::IsExclusiveHeld(const Thread* self) const {
- DCHECK(self == NULL || self == Thread::Current());
+ DCHECK(self == nullptr || self == Thread::Current());
bool result = (GetExclusiveOwnerTid() == SafeGetTid(self));
if (kDebugLocking) {
// Sanity that if the pthread thinks we own the lock the Thread agrees.
- if (self != NULL && result) {
+ if (self != nullptr && result) {
CHECK_EQ(self->GetHeldMutex(level_), this);
}
}
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index 13dcb8c..99c7246 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -67,7 +67,7 @@
Atomic<const BaseMutex*> all_mutexes_guard;
// All created mutexes guarded by all_mutexes_guard_.
std::set<BaseMutex*>* all_mutexes;
- AllMutexData() : all_mutexes(NULL) {}
+ AllMutexData() : all_mutexes(nullptr) {}
};
static struct AllMutexData gAllMutexData[kAllMutexDataSize];
@@ -114,7 +114,7 @@
class ScopedContentionRecorder FINAL : public ValueObject {
public:
ScopedContentionRecorder(BaseMutex* mutex, uint64_t blocked_tid, uint64_t owner_tid)
- : mutex_(kLogLockContentions ? mutex : NULL),
+ : mutex_(kLogLockContentions ? mutex : nullptr),
blocked_tid_(kLogLockContentions ? blocked_tid : 0),
owner_tid_(kLogLockContentions ? owner_tid : 0),
start_nano_time_(kLogLockContentions ? NanoTime() : 0) {
@@ -144,7 +144,7 @@
if (kLogLockContentions) {
ScopedAllMutexesLock mu(this);
std::set<BaseMutex*>** all_mutexes_ptr = &gAllMutexData->all_mutexes;
- if (*all_mutexes_ptr == NULL) {
+ if (*all_mutexes_ptr == nullptr) {
// We leak the global set of all mutexes to avoid ordering issues in global variable
// construction/destruction.
*all_mutexes_ptr = new std::set<BaseMutex*>();
@@ -165,7 +165,7 @@
os << "Mutex logging:\n";
ScopedAllMutexesLock mu(reinterpret_cast<const BaseMutex*>(-1));
std::set<BaseMutex*>* all_mutexes = gAllMutexData->all_mutexes;
- if (all_mutexes == NULL) {
+ if (all_mutexes == nullptr) {
// No mutexes have been created yet during at startup.
return;
}
@@ -190,7 +190,7 @@
}
void BaseMutex::CheckSafeToWait(Thread* self) {
- if (self == NULL) {
+ if (self == nullptr) {
CheckUnattachedThread(level_);
return;
}
@@ -202,7 +202,7 @@
if (i != level_) {
BaseMutex* held_mutex = self->GetHeldMutex(static_cast<LockLevel>(i));
// We expect waits to happen while holding the thread list suspend thread lock.
- if (held_mutex != NULL) {
+ if (held_mutex != nullptr) {
LOG(ERROR) << "Holding \"" << held_mutex->name_ << "\" "
<< "(level " << LockLevel(i) << ") while performing wait on "
<< "\"" << name_ << "\" (level " << level_ << ")";
@@ -354,7 +354,7 @@
}
void Mutex::ExclusiveLock(Thread* self) {
- DCHECK(self == NULL || self == Thread::Current());
+ DCHECK(self == nullptr || self == Thread::Current());
if (kDebugLocking && !recursive_) {
AssertNotHeld(self);
}
@@ -370,7 +370,7 @@
// Failed to acquire, hang up.
ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
num_contenders_++;
- if (futex(state_.Address(), FUTEX_WAIT, 1, NULL, NULL, 0) != 0) {
+ if (futex(state_.Address(), FUTEX_WAIT, 1, nullptr, nullptr, 0) != 0) {
// EAGAIN and EINTR both indicate a spurious failure, try again from the beginning.
// We don't use TEMP_FAILURE_RETRY so we can intentionally retry to acquire the lock.
if ((errno != EAGAIN) && (errno != EINTR)) {
@@ -397,7 +397,7 @@
}
bool Mutex::ExclusiveTryLock(Thread* self) {
- DCHECK(self == NULL || self == Thread::Current());
+ DCHECK(self == nullptr || self == Thread::Current());
if (kDebugLocking && !recursive_) {
AssertNotHeld(self);
}
@@ -474,7 +474,7 @@
if (LIKELY(done)) { // Spurious fail?
// Wake a contender.
if (UNLIKELY(num_contenders_.LoadRelaxed() > 0)) {
- futex(state_.Address(), FUTEX_WAKE, 1, NULL, NULL, 0);
+ futex(state_.Address(), FUTEX_WAKE, 1, nullptr, nullptr, 0);
}
}
} else {
@@ -537,14 +537,14 @@
// TODO: should we just not log at all if shutting down? this could be the logging mutex!
MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
Runtime* runtime = Runtime::Current();
- bool shutting_down = runtime == NULL || runtime->IsShuttingDownLocked();
+ bool shutting_down = runtime == nullptr || runtime->IsShuttingDownLocked();
PLOG(shutting_down ? WARNING : FATAL) << "pthread_rwlock_destroy failed for " << name_;
}
#endif
}
void ReaderWriterMutex::ExclusiveLock(Thread* self) {
- DCHECK(self == NULL || self == Thread::Current());
+ DCHECK(self == nullptr || self == Thread::Current());
AssertNotExclusiveHeld(self);
#if ART_USE_FUTEXES
bool done = false;
@@ -557,7 +557,7 @@
// Failed to acquire, hang up.
ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
++num_pending_writers_;
- if (futex(state_.Address(), FUTEX_WAIT, cur_state, NULL, NULL, 0) != 0) {
+ if (futex(state_.Address(), FUTEX_WAIT, cur_state, nullptr, nullptr, 0) != 0) {
// EAGAIN and EINTR both indicate a spurious failure, try again from the beginning.
// We don't use TEMP_FAILURE_RETRY so we can intentionally retry to acquire the lock.
if ((errno != EAGAIN) && (errno != EINTR)) {
@@ -578,7 +578,7 @@
}
void ReaderWriterMutex::ExclusiveUnlock(Thread* self) {
- DCHECK(self == NULL || self == Thread::Current());
+ DCHECK(self == nullptr || self == Thread::Current());
AssertExclusiveHeld(self);
RegisterAsUnlocked(self);
DCHECK_NE(exclusive_owner_, 0U);
@@ -598,7 +598,7 @@
// Wake any waiters.
if (UNLIKELY(num_pending_readers_.LoadRelaxed() > 0 ||
num_pending_writers_.LoadRelaxed() > 0)) {
- futex(state_.Address(), FUTEX_WAKE, -1, NULL, NULL, 0);
+ futex(state_.Address(), FUTEX_WAKE, -1, nullptr, nullptr, 0);
}
}
} else {
@@ -613,7 +613,7 @@
#if HAVE_TIMED_RWLOCK
bool ReaderWriterMutex::ExclusiveLockWithTimeout(Thread* self, int64_t ms, int32_t ns) {
- DCHECK(self == NULL || self == Thread::Current());
+ DCHECK(self == nullptr || self == Thread::Current());
#if ART_USE_FUTEXES
bool done = false;
timespec end_abs_ts;
@@ -633,7 +633,7 @@
}
ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
++num_pending_writers_;
- if (futex(state_.Address(), FUTEX_WAIT, cur_state, &rel_ts, NULL, 0) != 0) {
+ if (futex(state_.Address(), FUTEX_WAIT, cur_state, &rel_ts, nullptr, 0) != 0) {
if (errno == ETIMEDOUT) {
--num_pending_writers_;
return false; // Timed out.
@@ -671,7 +671,7 @@
// Owner holds it exclusively, hang up.
ScopedContentionRecorder scr(this, GetExclusiveOwnerTid(), SafeGetTid(self));
++num_pending_readers_;
- if (futex(state_.Address(), FUTEX_WAIT, cur_state, NULL, NULL, 0) != 0) {
+ if (futex(state_.Address(), FUTEX_WAIT, cur_state, nullptr, nullptr, 0) != 0) {
if (errno != EAGAIN) {
PLOG(FATAL) << "futex wait failed for " << name_;
}
@@ -681,7 +681,7 @@
#endif
bool ReaderWriterMutex::SharedTryLock(Thread* self) {
- DCHECK(self == NULL || self == Thread::Current());
+ DCHECK(self == nullptr || self == Thread::Current());
#if ART_USE_FUTEXES
bool done = false;
do {
@@ -710,9 +710,9 @@
}
bool ReaderWriterMutex::IsSharedHeld(const Thread* self) const {
- DCHECK(self == NULL || self == Thread::Current());
+ DCHECK(self == nullptr || self == Thread::Current());
bool result;
- if (UNLIKELY(self == NULL)) { // Handle unattached threads.
+ if (UNLIKELY(self == nullptr)) { // Handle unattached threads.
result = IsExclusiveHeld(self); // TODO: a better best effort here.
} else {
result = (self->GetHeldMutex(level_) == this);
@@ -770,14 +770,14 @@
errno = rc;
MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
Runtime* runtime = Runtime::Current();
- bool shutting_down = (runtime == NULL) || runtime->IsShuttingDownLocked();
+ bool shutting_down = (runtime == nullptr) || runtime->IsShuttingDownLocked();
PLOG(shutting_down ? WARNING : FATAL) << "pthread_cond_destroy failed for " << name_;
}
#endif
}
void ConditionVariable::Broadcast(Thread* self) {
- DCHECK(self == NULL || self == Thread::Current());
+ DCHECK(self == nullptr || self == Thread::Current());
// TODO: enable below, there's a race in thread creation that causes false failures currently.
// guard_.AssertExclusiveHeld(self);
DCHECK_EQ(guard_.GetExclusiveOwnerTid(), SafeGetTid(self));
@@ -805,14 +805,14 @@
}
void ConditionVariable::Signal(Thread* self) {
- DCHECK(self == NULL || self == Thread::Current());
+ DCHECK(self == nullptr || self == Thread::Current());
guard_.AssertExclusiveHeld(self);
#if ART_USE_FUTEXES
if (num_waiters_ > 0) {
sequence_++; // Indicate a signal occurred.
// Futex wake 1 waiter who will then come and in contend on mutex. It'd be nice to requeue them
// to avoid this, however, requeueing can only move all waiters.
- int num_woken = futex(sequence_.Address(), FUTEX_WAKE, 1, NULL, NULL, 0);
+ int num_woken = futex(sequence_.Address(), FUTEX_WAKE, 1, nullptr, nullptr, 0);
// Check something was woken or else we changed sequence_ before they had chance to wait.
CHECK((num_woken == 0) || (num_woken == 1));
}
@@ -827,7 +827,7 @@
}
void ConditionVariable::WaitHoldingLocks(Thread* self) {
- DCHECK(self == NULL || self == Thread::Current());
+ DCHECK(self == nullptr || self == Thread::Current());
guard_.AssertExclusiveHeld(self);
unsigned int old_recursion_count = guard_.recursion_count_;
#if ART_USE_FUTEXES
@@ -837,7 +837,7 @@
guard_.recursion_count_ = 1;
int32_t cur_sequence = sequence_.LoadRelaxed();
guard_.ExclusiveUnlock(self);
- if (futex(sequence_.Address(), FUTEX_WAIT, cur_sequence, NULL, NULL, 0) != 0) {
+ if (futex(sequence_.Address(), FUTEX_WAIT, cur_sequence, nullptr, nullptr, 0) != 0) {
// Futex failed, check it is an expected error.
// EAGAIN == EWOULDBLK, so we let the caller try again.
// EINTR implies a signal was sent to this thread.
@@ -862,7 +862,7 @@
}
bool ConditionVariable::TimedWait(Thread* self, int64_t ms, int32_t ns) {
- DCHECK(self == NULL || self == Thread::Current());
+ DCHECK(self == nullptr || self == Thread::Current());
bool timed_out = false;
guard_.AssertExclusiveHeld(self);
guard_.CheckSafeToWait(self);
@@ -876,7 +876,7 @@
guard_.recursion_count_ = 1;
int32_t cur_sequence = sequence_.LoadRelaxed();
guard_.ExclusiveUnlock(self);
- if (futex(sequence_.Address(), FUTEX_WAIT, cur_sequence, &rel_ts, NULL, 0) != 0) {
+ if (futex(sequence_.Address(), FUTEX_WAIT, cur_sequence, &rel_ts, nullptr, 0) != 0) {
if (errno == ETIMEDOUT) {
// Timed out we're done.
timed_out = true;
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index 6e4b96c..f2be85e 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -344,8 +344,8 @@
// Assert the current thread has shared access to the ReaderWriterMutex.
void AssertSharedHeld(const Thread* self) {
if (kDebugLocking && (gAborting == 0)) {
- // TODO: we can only assert this well when self != NULL.
- CHECK(IsSharedHeld(self) || self == NULL) << *this;
+ // TODO: we can only assert this well when self != null.
+ CHECK(IsSharedHeld(self) || self == nullptr) << *this;
}
}
void AssertReaderHeld(const Thread* self) { AssertSharedHeld(self); }
diff --git a/runtime/base/mutex_test.cc b/runtime/base/mutex_test.cc
index 289d3ef..3750c81 100644
--- a/runtime/base/mutex_test.cc
+++ b/runtime/base/mutex_test.cc
@@ -106,7 +106,7 @@
state->mu.Lock(Thread::Current());
state->cv.Signal(Thread::Current());
state->mu.Unlock(Thread::Current());
- return NULL;
+ return nullptr;
}
Mutex mu;
@@ -120,14 +120,15 @@
state.mu.Lock(Thread::Current());
pthread_t pthread;
- int pthread_create_result = pthread_create(&pthread, NULL, RecursiveLockWait::Callback, &state);
+ int pthread_create_result = pthread_create(&pthread, nullptr, RecursiveLockWait::Callback,
+ &state);
ASSERT_EQ(0, pthread_create_result);
state.cv.Wait(Thread::Current());
state.mu.Unlock(Thread::Current());
state.mu.Unlock(Thread::Current());
- EXPECT_EQ(pthread_join(pthread, NULL), 0);
+ EXPECT_EQ(pthread_join(pthread, nullptr), 0);
}
// This ensures we don't hang when waiting on a recursively locked mutex,
diff --git a/runtime/base/scoped_flock.cc b/runtime/base/scoped_flock.cc
index 0e93eee..71e0590 100644
--- a/runtime/base/scoped_flock.cc
+++ b/runtime/base/scoped_flock.cc
@@ -31,7 +31,7 @@
UNUSED(file_->FlushCloseOrErase()); // Ignore result.
}
file_.reset(OS::OpenFileWithFlags(filename, O_CREAT | O_RDWR));
- if (file_.get() == NULL) {
+ if (file_.get() == nullptr) {
*error_msg = StringPrintf("Failed to open file '%s': %s", filename, strerror(errno));
return false;
}
@@ -71,14 +71,15 @@
}
if (0 != TEMP_FAILURE_RETRY(flock(file_->Fd(), LOCK_EX))) {
file_.reset();
- *error_msg = StringPrintf("Failed to lock file '%s': %s", file->GetPath().c_str(), strerror(errno));
+ *error_msg = StringPrintf(
+ "Failed to lock file '%s': %s", file->GetPath().c_str(), strerror(errno));
return false;
}
return true;
}
File* ScopedFlock::GetFile() {
- CHECK(file_.get() != NULL);
+ CHECK(file_.get() != nullptr);
return file_.get();
}
@@ -89,7 +90,7 @@
ScopedFlock::ScopedFlock() { }
ScopedFlock::~ScopedFlock() {
- if (file_.get() != NULL) {
+ if (file_.get() != nullptr) {
int flock_result = TEMP_FAILURE_RETRY(flock(file_->Fd(), LOCK_UN));
CHECK_EQ(0, flock_result);
if (file_->FlushCloseOrErase() != 0) {
diff --git a/runtime/base/stl_util.h b/runtime/base/stl_util.h
index 3c5565c..901f25f 100644
--- a/runtime/base/stl_util.h
+++ b/runtime/base/stl_util.h
@@ -54,28 +54,30 @@
// hash_set, or any other STL container which defines sensible begin(), end(),
// and clear() methods.
//
-// If container is NULL, this function is a no-op.
+// If container is null, this function is a no-op.
//
// As an alternative to calling STLDeleteElements() directly, consider
// using a container of std::unique_ptr, which ensures that your container's
// elements are deleted when the container goes out of scope.
template <class T>
void STLDeleteElements(T *container) {
- if (!container) return;
- STLDeleteContainerPointers(container->begin(), container->end());
- container->clear();
+ if (container != nullptr) {
+ STLDeleteContainerPointers(container->begin(), container->end());
+ container->clear();
+ }
}
// Given an STL container consisting of (key, value) pairs, STLDeleteValues
// deletes all the "value" components and clears the container. Does nothing
-// in the case it's given a NULL pointer.
+// in the case it's given a null pointer.
template <class T>
void STLDeleteValues(T *v) {
- if (!v) return;
- for (typename T::iterator i = v->begin(); i != v->end(); ++i) {
- delete i->second;
+ if (v != nullptr) {
+ for (typename T::iterator i = v->begin(); i != v->end(); ++i) {
+ delete i->second;
+ }
+ v->clear();
}
- v->clear();
}
template <class T>
diff --git a/runtime/base/unix_file/fd_file.cc b/runtime/base/unix_file/fd_file.cc
index f272d88..07cadc4 100644
--- a/runtime/base/unix_file/fd_file.cc
+++ b/runtime/base/unix_file/fd_file.cc
@@ -107,7 +107,7 @@
}
int FdFile::Close() {
- int result = TEMP_FAILURE_RETRY(close(fd_));
+ int result = close(fd_);
// Test here, so the file is closed and not leaked.
if (kCheckSafeUsage) {
diff --git a/runtime/base/variant_map.h b/runtime/base/variant_map.h
index 8655a9e..1d7596a 100644
--- a/runtime/base/variant_map.h
+++ b/runtime/base/variant_map.h
@@ -31,7 +31,7 @@
//
// struct VariantMap {
// template <typename TValue>
-// TValue* Get(Key<T> key); // nullptr if the value was never set, otherwise the value.
+// TValue* Get(Key<T> key); // null if the value was never set, otherwise the value.
//
// template <typename TValue>
// void Set(Key<T> key, TValue value);
diff --git a/runtime/base/variant_map_test.cc b/runtime/base/variant_map_test.cc
index f306a48..ccb22eb 100644
--- a/runtime/base/variant_map_test.cc
+++ b/runtime/base/variant_map_test.cc
@@ -18,7 +18,7 @@
#include "gtest/gtest.h"
#define EXPECT_NULL(expected) EXPECT_EQ(reinterpret_cast<const void*>(expected), \
- reinterpret_cast<void*>(NULL));
+ static_cast<void*>(nullptr));
namespace art {
diff --git a/runtime/check_jni.cc b/runtime/check_jni.cc
index c6940d3..30084d2 100644
--- a/runtime/check_jni.cc
+++ b/runtime/check_jni.cc
@@ -183,7 +183,7 @@
}
/*
- * Verify that the pointer value is non-NULL.
+ * Verify that the pointer value is non-null.
*/
bool CheckNonNull(const void* ptr) {
if (UNLIKELY(ptr == nullptr)) {
@@ -612,7 +612,7 @@
};
/*
- * Verify that "jobj" is a valid non-NULL object reference, and points to
+ * Verify that "jobj" is a valid non-null object reference, and points to
* an instance of expectedClass.
*
* Because we're looking at an object on the GC heap, we have to switch
@@ -941,7 +941,7 @@
}
}
/*
- * Verify that "array" is non-NULL and points to an Array object.
+ * Verify that "array" is non-null and points to an Array object.
*
* Since we're dealing with objects, switch to "running" mode.
*/
@@ -1277,7 +1277,7 @@
* Verify the guard area and, if "modOkay" is false, that the data itself
* has not been altered.
*
- * The caller has already checked that "dataBuf" is non-NULL.
+ * The caller has already checked that "dataBuf" is non-null.
*/
static bool Check(const char* function_name, const void* embedded_buf, bool mod_okay) {
const GuardedCopy* copy = FromEmbedded(embedded_buf);
diff --git a/runtime/check_reference_map_visitor.h b/runtime/check_reference_map_visitor.h
index 5d9cd35..d87a563 100644
--- a/runtime/check_reference_map_visitor.h
+++ b/runtime/check_reference_map_visitor.h
@@ -29,7 +29,7 @@
class CheckReferenceMapVisitor : public StackVisitor {
public:
explicit CheckReferenceMapVisitor(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- : StackVisitor(thread, nullptr) {}
+ : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {}
bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::ArtMethod* m = GetMethod();
diff --git a/runtime/class_linker-inl.h b/runtime/class_linker-inl.h
index 87d1c4c..1428749 100644
--- a/runtime/class_linker-inl.h
+++ b/runtime/class_linker-inl.h
@@ -60,7 +60,7 @@
mirror::ArtMethod* referrer) {
mirror::Class* declaring_class = referrer->GetDeclaringClass();
mirror::String* resolved_string = declaring_class->GetDexCacheStrings()->Get(string_idx);
- if (UNLIKELY(resolved_string == NULL)) {
+ if (UNLIKELY(resolved_string == nullptr)) {
StackHandleScope<1> hs(Thread::Current());
Handle<mirror::DexCache> dex_cache(hs.NewHandle(declaring_class->GetDexCache()));
const DexFile& dex_file = *dex_cache->GetDexFile();
@@ -92,7 +92,7 @@
mirror::Class* declaring_class = referrer->GetDeclaringClass();
mirror::DexCache* dex_cache_ptr = declaring_class->GetDexCache();
mirror::Class* resolved_type = dex_cache_ptr->GetResolvedType(type_idx);
- if (UNLIKELY(resolved_type == NULL)) {
+ if (UNLIKELY(resolved_type == nullptr)) {
StackHandleScope<2> hs(Thread::Current());
Handle<mirror::DexCache> dex_cache(hs.NewHandle(dex_cache_ptr));
Handle<mirror::ClassLoader> class_loader(hs.NewHandle(declaring_class->GetClassLoader()));
@@ -146,7 +146,7 @@
bool is_static) {
mirror::Class* declaring_class = referrer->GetDeclaringClass();
ArtField* resolved_field = GetResolvedField(field_idx, declaring_class);
- if (UNLIKELY(resolved_field == NULL)) {
+ if (UNLIKELY(resolved_field == nullptr)) {
StackHandleScope<2> hs(Thread::Current());
Handle<mirror::DexCache> dex_cache(hs.NewHandle(declaring_class->GetDexCache()));
Handle<mirror::ClassLoader> class_loader(hs.NewHandle(declaring_class->GetClassLoader()));
@@ -196,7 +196,7 @@
DCHECK(!class_roots_.IsNull());
mirror::ObjectArray<mirror::Class>* class_roots = class_roots_.Read();
mirror::Class* klass = class_roots->Get(class_root);
- DCHECK(klass != NULL);
+ DCHECK(klass != nullptr);
return klass;
}
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 4e59217..292f830 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -31,6 +31,7 @@
#include "base/scoped_flock.h"
#include "base/stl_util.h"
#include "base/unix_file/fd_file.h"
+#include "base/value_object.h"
#include "class_linker-inl.h"
#include "compiler_callbacks.h"
#include "debugger.h"
@@ -59,6 +60,7 @@
#include "mirror/dex_cache-inl.h"
#include "mirror/field.h"
#include "mirror/iftable-inl.h"
+#include "mirror/method.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
#include "mirror/proxy.h"
@@ -91,8 +93,28 @@
va_end(args);
}
-static void ThrowEarlierClassFailure(mirror::Class* c)
+static bool HasInitWithString(Thread* self, ClassLinker* class_linker, const char* descriptor)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::ArtMethod* method = self->GetCurrentMethod(nullptr);
+ StackHandleScope<1> hs(self);
+ Handle<mirror::ClassLoader> class_loader(hs.NewHandle(method != nullptr ?
+ method->GetDeclaringClass()->GetClassLoader()
+ : nullptr));
+ mirror::Class* exception_class = class_linker->FindClass(self, descriptor, class_loader);
+
+ if (exception_class == nullptr) {
+ // No exc class ~ no <init>-with-string.
+ CHECK(self->IsExceptionPending());
+ self->ClearException();
+ return false;
+ }
+
+ mirror::ArtMethod* exception_init_method =
+ exception_class->FindDeclaredDirectMethod("<init>", "(Ljava/lang/String;)V");
+ return exception_init_method != nullptr;
+}
+
+void ClassLinker::ThrowEarlierClassFailure(mirror::Class* c) {
// The class failed to initialize on a previous attempt, so we want to throw
// a NoClassDefFoundError (v2 2.17.5). The exception to this rule is if we
// failed in verification, in which case v2 5.4.1 says we need to re-throw
@@ -109,11 +131,17 @@
mirror::Throwable* pre_allocated = runtime->GetPreAllocatedNoClassDefFoundError();
self->SetException(pre_allocated);
} else {
- if (c->GetVerifyErrorClass() != NULL) {
+ if (c->GetVerifyErrorClass() != nullptr) {
// TODO: change the verifier to store an _instance_, with a useful detail message?
+ // It's possible the exception doesn't have a <init>(String).
std::string temp;
- self->ThrowNewException(c->GetVerifyErrorClass()->GetDescriptor(&temp),
- PrettyDescriptor(c).c_str());
+ const char* descriptor = c->GetVerifyErrorClass()->GetDescriptor(&temp);
+
+ if (HasInitWithString(self, this, descriptor)) {
+ self->ThrowNewException(descriptor, PrettyDescriptor(c).c_str());
+ } else {
+ self->ThrowNewException(descriptor, nullptr);
+ }
} else {
self->ThrowNewException("Ljava/lang/NoClassDefFoundError;",
PrettyDescriptor(c).c_str());
@@ -258,8 +286,8 @@
CHECK(!init_done_);
// java_lang_Class comes first, it's needed for AllocClass
- Thread* self = Thread::Current();
- gc::Heap* heap = Runtime::Current()->GetHeap();
+ Thread* const self = Thread::Current();
+ gc::Heap* const heap = Runtime::Current()->GetHeap();
// The GC can't handle an object with a null class since we can't get the size of this object.
heap->IncrementDisableMovingGC(self);
StackHandleScope<64> hs(self); // 64 is picked arbitrarily.
@@ -317,8 +345,8 @@
Handle<mirror::Class> java_lang_String(hs.NewHandle(
AllocClass(self, java_lang_Class.Get(), mirror::String::ClassSize())));
mirror::String::SetClass(java_lang_String.Get());
- java_lang_String->SetObjectSize(mirror::String::InstanceSize());
mirror::Class::SetStatus(java_lang_String, mirror::Class::kStatusResolved, self);
+ java_lang_String->SetStringClass();
// Setup java.lang.ref.Reference.
Handle<mirror::Class> java_lang_ref_Reference(hs.NewHandle(
@@ -436,20 +464,18 @@
// Object, String and DexCache need to be rerun through FindSystemClass to finish init
mirror::Class::SetStatus(java_lang_Object, mirror::Class::kStatusNotReady, self);
- mirror::Class* Object_class = FindSystemClass(self, "Ljava/lang/Object;");
- CHECK_EQ(java_lang_Object.Get(), Object_class);
+ CHECK_EQ(java_lang_Object.Get(), FindSystemClass(self, "Ljava/lang/Object;"));
CHECK_EQ(java_lang_Object->GetObjectSize(), mirror::Object::InstanceSize());
mirror::Class::SetStatus(java_lang_String, mirror::Class::kStatusNotReady, self);
mirror::Class* String_class = FindSystemClass(self, "Ljava/lang/String;");
- std::ostringstream os1, os2;
- java_lang_String->DumpClass(os1, mirror::Class::kDumpClassFullDetail);
- String_class->DumpClass(os2, mirror::Class::kDumpClassFullDetail);
- CHECK_EQ(java_lang_String.Get(), String_class) << os1.str() << "\n\n" << os2.str();
- CHECK_EQ(java_lang_String->GetObjectSize(), mirror::String::InstanceSize());
+ if (java_lang_String.Get() != String_class) {
+ std::ostringstream os1, os2;
+ java_lang_String->DumpClass(os1, mirror::Class::kDumpClassFullDetail);
+ String_class->DumpClass(os2, mirror::Class::kDumpClassFullDetail);
+ LOG(FATAL) << os1.str() << "\n\n" << os2.str();
+ }
mirror::Class::SetStatus(java_lang_DexCache, mirror::Class::kStatusNotReady, self);
- mirror::Class* DexCache_class = FindSystemClass(self, "Ljava/lang/DexCache;");
- CHECK_EQ(java_lang_String.Get(), String_class);
- CHECK_EQ(java_lang_DexCache.Get(), DexCache_class);
+ CHECK_EQ(java_lang_DexCache.Get(), FindSystemClass(self, "Ljava/lang/DexCache;"));
CHECK_EQ(java_lang_DexCache->GetObjectSize(), mirror::DexCache::InstanceSize());
// Setup the primitive array type classes - can't be done until Object has a vtable.
@@ -459,17 +485,14 @@
SetClassRoot(kByteArrayClass, FindSystemClass(self, "[B"));
mirror::ByteArray::SetArrayClass(GetClassRoot(kByteArrayClass));
- mirror::Class* found_char_array_class = FindSystemClass(self, "[C");
- CHECK_EQ(char_array_class.Get(), found_char_array_class);
+ CHECK_EQ(char_array_class.Get(), FindSystemClass(self, "[C"));
SetClassRoot(kShortArrayClass, FindSystemClass(self, "[S"));
mirror::ShortArray::SetArrayClass(GetClassRoot(kShortArrayClass));
- mirror::Class* found_int_array_class = FindSystemClass(self, "[I");
- CHECK_EQ(int_array_class.Get(), found_int_array_class);
+ CHECK_EQ(int_array_class.Get(), FindSystemClass(self, "[I"));
- mirror::Class* found_long_array_class = FindSystemClass(self, "[J");
- CHECK_EQ(long_array_class.Get(), found_long_array_class);
+ CHECK_EQ(long_array_class.Get(), FindSystemClass(self, "[J"));
SetClassRoot(kFloatArrayClass, FindSystemClass(self, "[F"));
mirror::FloatArray::SetArrayClass(GetClassRoot(kFloatArrayClass));
@@ -477,97 +500,101 @@
SetClassRoot(kDoubleArrayClass, FindSystemClass(self, "[D"));
mirror::DoubleArray::SetArrayClass(GetClassRoot(kDoubleArrayClass));
- mirror::Class* found_class_array_class = FindSystemClass(self, "[Ljava/lang/Class;");
- CHECK_EQ(class_array_class.Get(), found_class_array_class);
+ CHECK_EQ(class_array_class.Get(), FindSystemClass(self, "[Ljava/lang/Class;"));
- mirror::Class* found_object_array_class = FindSystemClass(self, "[Ljava/lang/Object;");
- CHECK_EQ(object_array_class.Get(), found_object_array_class);
+ CHECK_EQ(object_array_class.Get(), FindSystemClass(self, "[Ljava/lang/Object;"));
// Setup the single, global copy of "iftable".
- mirror::Class* java_lang_Cloneable = FindSystemClass(self, "Ljava/lang/Cloneable;");
- CHECK(java_lang_Cloneable != nullptr);
- mirror::Class* java_io_Serializable = FindSystemClass(self, "Ljava/io/Serializable;");
- CHECK(java_io_Serializable != nullptr);
+ auto java_lang_Cloneable = hs.NewHandle(FindSystemClass(self, "Ljava/lang/Cloneable;"));
+ CHECK(java_lang_Cloneable.Get() != nullptr);
+ auto java_io_Serializable = hs.NewHandle(FindSystemClass(self, "Ljava/io/Serializable;"));
+ CHECK(java_io_Serializable.Get() != nullptr);
// We assume that Cloneable/Serializable don't have superinterfaces -- normally we'd have to
// crawl up and explicitly list all of the supers as well.
- {
- mirror::IfTable* array_iftable = array_iftable_.Read();
- array_iftable->SetInterface(0, java_lang_Cloneable);
- array_iftable->SetInterface(1, java_io_Serializable);
- }
+ array_iftable_.Read()->SetInterface(0, java_lang_Cloneable.Get());
+ array_iftable_.Read()->SetInterface(1, java_io_Serializable.Get());
- // Sanity check Class[] and Object[]'s interfaces.
- CHECK_EQ(java_lang_Cloneable, mirror::Class::GetDirectInterface(self, class_array_class, 0));
- CHECK_EQ(java_io_Serializable, mirror::Class::GetDirectInterface(self, class_array_class, 1));
- CHECK_EQ(java_lang_Cloneable, mirror::Class::GetDirectInterface(self, object_array_class, 0));
- CHECK_EQ(java_io_Serializable, mirror::Class::GetDirectInterface(self, object_array_class, 1));
+ // Sanity check Class[] and Object[]'s interfaces. GetDirectInterface may cause thread
+ // suspension.
+ CHECK_EQ(java_lang_Cloneable.Get(),
+ mirror::Class::GetDirectInterface(self, class_array_class, 0));
+ CHECK_EQ(java_io_Serializable.Get(),
+ mirror::Class::GetDirectInterface(self, class_array_class, 1));
+ CHECK_EQ(java_lang_Cloneable.Get(),
+ mirror::Class::GetDirectInterface(self, object_array_class, 0));
+ CHECK_EQ(java_io_Serializable.Get(),
+ mirror::Class::GetDirectInterface(self, object_array_class, 1));
// Run Class, ArtField, and ArtMethod through FindSystemClass. This initializes their
// dex_cache_ fields and register them in class_table_.
- mirror::Class* Class_class = FindSystemClass(self, "Ljava/lang/Class;");
- CHECK_EQ(java_lang_Class.Get(), Class_class);
+ CHECK_EQ(java_lang_Class.Get(), FindSystemClass(self, "Ljava/lang/Class;"));
mirror::Class::SetStatus(java_lang_reflect_ArtMethod, mirror::Class::kStatusNotReady, self);
- mirror::Class* Art_method_class = FindSystemClass(self, "Ljava/lang/reflect/ArtMethod;");
- CHECK_EQ(java_lang_reflect_ArtMethod.Get(), Art_method_class);
-
- mirror::Class* String_array_class =
- FindSystemClass(self, GetClassRootDescriptor(kJavaLangStringArrayClass));
- CHECK_EQ(object_array_string.Get(), String_array_class);
-
- mirror::Class* Art_method_array_class =
- FindSystemClass(self, GetClassRootDescriptor(kJavaLangReflectArtMethodArrayClass));
- CHECK_EQ(object_array_art_method.Get(), Art_method_array_class);
+ CHECK_EQ(java_lang_reflect_ArtMethod.Get(),
+ FindSystemClass(self, "Ljava/lang/reflect/ArtMethod;"));
+ CHECK_EQ(object_array_string.Get(),
+ FindSystemClass(self, GetClassRootDescriptor(kJavaLangStringArrayClass)));
+ CHECK_EQ(object_array_art_method.Get(),
+ FindSystemClass(self, GetClassRootDescriptor(kJavaLangReflectArtMethodArrayClass)));
// End of special init trickery, subsequent classes may be loaded via FindSystemClass.
// Create java.lang.reflect.Proxy root.
- mirror::Class* java_lang_reflect_Proxy = FindSystemClass(self, "Ljava/lang/reflect/Proxy;");
- SetClassRoot(kJavaLangReflectProxy, java_lang_reflect_Proxy);
+ SetClassRoot(kJavaLangReflectProxy, FindSystemClass(self, "Ljava/lang/reflect/Proxy;"));
// Create java.lang.reflect.Field.class root.
- mirror::Class* java_lang_reflect_Field = FindSystemClass(self, "Ljava/lang/reflect/Field;");
- CHECK(java_lang_reflect_Field != nullptr);
- SetClassRoot(kJavaLangReflectField, java_lang_reflect_Field);
- mirror::Field::SetClass(java_lang_reflect_Field);
+ auto* class_root = FindSystemClass(self, "Ljava/lang/reflect/Field;");
+ CHECK(class_root != nullptr);
+ SetClassRoot(kJavaLangReflectField, class_root);
+ mirror::Field::SetClass(class_root);
// Create java.lang.reflect.Field array root.
- mirror::Class* java_lang_reflect_Field_array =
- FindSystemClass(self, "[Ljava/lang/reflect/Field;");
- CHECK(java_lang_reflect_Field_array != nullptr);
- SetClassRoot(kJavaLangReflectFieldArrayClass, java_lang_reflect_Field_array);
- mirror::Field::SetArrayClass(java_lang_reflect_Field_array);
+ class_root = FindSystemClass(self, "[Ljava/lang/reflect/Field;");
+ CHECK(class_root != nullptr);
+ SetClassRoot(kJavaLangReflectFieldArrayClass, class_root);
+ mirror::Field::SetArrayClass(class_root);
+
+ // Create java.lang.reflect.Constructor.class root and array root.
+ class_root = FindSystemClass(self, "Ljava/lang/reflect/Constructor;");
+ CHECK(class_root != nullptr);
+ SetClassRoot(kJavaLangReflectConstructor, class_root);
+ mirror::Constructor::SetClass(class_root);
+ class_root = FindSystemClass(self, "[Ljava/lang/reflect/Constructor;");
+ CHECK(class_root != nullptr);
+ SetClassRoot(kJavaLangReflectConstructorArrayClass, class_root);
+ mirror::Constructor::SetArrayClass(class_root);
+
+ // Create java.lang.reflect.Method.class root and array root.
+ class_root = FindSystemClass(self, "Ljava/lang/reflect/Method;");
+ CHECK(class_root != nullptr);
+ SetClassRoot(kJavaLangReflectMethod, class_root);
+ mirror::Method::SetClass(class_root);
+ class_root = FindSystemClass(self, "[Ljava/lang/reflect/Method;");
+ CHECK(class_root != nullptr);
+ SetClassRoot(kJavaLangReflectMethodArrayClass, class_root);
+ mirror::Method::SetArrayClass(class_root);
// java.lang.ref classes need to be specially flagged, but otherwise are normal classes
// finish initializing Reference class
mirror::Class::SetStatus(java_lang_ref_Reference, mirror::Class::kStatusNotReady, self);
- mirror::Class* Reference_class = FindSystemClass(self, "Ljava/lang/ref/Reference;");
- CHECK_EQ(java_lang_ref_Reference.Get(), Reference_class);
+ CHECK_EQ(java_lang_ref_Reference.Get(), FindSystemClass(self, "Ljava/lang/ref/Reference;"));
CHECK_EQ(java_lang_ref_Reference->GetObjectSize(), mirror::Reference::InstanceSize());
CHECK_EQ(java_lang_ref_Reference->GetClassSize(), mirror::Reference::ClassSize());
- mirror::Class* java_lang_ref_FinalizerReference =
- FindSystemClass(self, "Ljava/lang/ref/FinalizerReference;");
- java_lang_ref_FinalizerReference->SetAccessFlags(
- java_lang_ref_FinalizerReference->GetAccessFlags() |
- kAccClassIsReference | kAccClassIsFinalizerReference);
- mirror::Class* java_lang_ref_PhantomReference =
- FindSystemClass(self, "Ljava/lang/ref/PhantomReference;");
- java_lang_ref_PhantomReference->SetAccessFlags(
- java_lang_ref_PhantomReference->GetAccessFlags() |
- kAccClassIsReference | kAccClassIsPhantomReference);
- mirror::Class* java_lang_ref_SoftReference =
- FindSystemClass(self, "Ljava/lang/ref/SoftReference;");
- java_lang_ref_SoftReference->SetAccessFlags(
- java_lang_ref_SoftReference->GetAccessFlags() | kAccClassIsReference);
- mirror::Class* java_lang_ref_WeakReference =
- FindSystemClass(self, "Ljava/lang/ref/WeakReference;");
- java_lang_ref_WeakReference->SetAccessFlags(
- java_lang_ref_WeakReference->GetAccessFlags() |
- kAccClassIsReference | kAccClassIsWeakReference);
+ class_root = FindSystemClass(self, "Ljava/lang/ref/FinalizerReference;");
+ class_root->SetAccessFlags(class_root->GetAccessFlags() |
+ kAccClassIsReference | kAccClassIsFinalizerReference);
+ class_root = FindSystemClass(self, "Ljava/lang/ref/PhantomReference;");
+ class_root->SetAccessFlags(class_root->GetAccessFlags() | kAccClassIsReference |
+ kAccClassIsPhantomReference);
+ class_root = FindSystemClass(self, "Ljava/lang/ref/SoftReference;");
+ class_root->SetAccessFlags(class_root->GetAccessFlags() | kAccClassIsReference);
+ class_root = FindSystemClass(self, "Ljava/lang/ref/WeakReference;");
+ class_root->SetAccessFlags(class_root->GetAccessFlags() | kAccClassIsReference |
+ kAccClassIsWeakReference);
// Setup the ClassLoader, verifying the object_size_.
- mirror::Class* java_lang_ClassLoader = FindSystemClass(self, "Ljava/lang/ClassLoader;");
- CHECK_EQ(java_lang_ClassLoader->GetObjectSize(), mirror::ClassLoader::InstanceSize());
- SetClassRoot(kJavaLangClassLoader, java_lang_ClassLoader);
+ class_root = FindSystemClass(self, "Ljava/lang/ClassLoader;");
+ CHECK_EQ(class_root->GetObjectSize(), mirror::ClassLoader::InstanceSize());
+ SetClassRoot(kJavaLangClassLoader, class_root);
// Set up java.lang.Throwable, java.lang.ClassNotFoundException, and
// java.lang.StackTraceElement as a convenience.
@@ -685,6 +712,208 @@
return *oat_file;
}
+class DexFileAndClassPair : ValueObject {
+ public:
+ DexFileAndClassPair(const DexFile* dex_file, size_t current_class_index, bool from_loaded_oat)
+ : cached_descriptor_(GetClassDescriptor(dex_file, current_class_index)),
+ dex_file_(dex_file),
+ current_class_index_(current_class_index),
+ from_loaded_oat_(from_loaded_oat) {}
+
+ DexFileAndClassPair(const DexFileAndClassPair&) = default;
+
+ DexFileAndClassPair& operator=(const DexFileAndClassPair& rhs) {
+ cached_descriptor_ = rhs.cached_descriptor_;
+ dex_file_ = rhs.dex_file_;
+ current_class_index_ = rhs.current_class_index_;
+ from_loaded_oat_ = rhs.from_loaded_oat_;
+ return *this;
+ }
+
+ const char* GetCachedDescriptor() const {
+ return cached_descriptor_;
+ }
+
+ bool operator<(const DexFileAndClassPair& rhs) const {
+ const char* lhsDescriptor = cached_descriptor_;
+ const char* rhsDescriptor = rhs.cached_descriptor_;
+ int cmp = strcmp(lhsDescriptor, rhsDescriptor);
+ if (cmp != 0) {
+ // Note that the order must be reversed. We want to iterate over the classes in dex files.
+ // They are sorted lexicographically. Thus, the priority-queue must be a min-queue.
+ return cmp > 0;
+ }
+ return dex_file_ < rhs.dex_file_;
+ }
+
+ bool DexFileHasMoreClasses() const {
+ return current_class_index_ + 1 < dex_file_->NumClassDefs();
+ }
+
+ DexFileAndClassPair GetNext() const {
+ return DexFileAndClassPair(dex_file_, current_class_index_ + 1, from_loaded_oat_);
+ }
+
+ size_t GetCurrentClassIndex() const {
+ return current_class_index_;
+ }
+
+ bool FromLoadedOat() const {
+ return from_loaded_oat_;
+ }
+
+ const DexFile* GetDexFile() const {
+ return dex_file_;
+ }
+
+ void DeleteDexFile() {
+ delete dex_file_;
+ dex_file_ = nullptr;
+ }
+
+ private:
+ static const char* GetClassDescriptor(const DexFile* dex_file, size_t index) {
+ const DexFile::ClassDef& class_def = dex_file->GetClassDef(static_cast<uint16_t>(index));
+ return dex_file->StringByTypeIdx(class_def.class_idx_);
+ }
+
+ const char* cached_descriptor_;
+ const DexFile* dex_file_;
+ size_t current_class_index_;
+ bool from_loaded_oat_; // We only need to compare mismatches between what we load now
+ // and what was loaded before. Any old duplicates must have been
+ // OK, and any new "internal" duplicates are as well (they must
+ // be from multidex, which resolves correctly).
+};
+
+static void AddDexFilesFromOat(const OatFile* oat_file, bool already_loaded,
+ std::priority_queue<DexFileAndClassPair>* heap) {
+ const std::vector<const OatDexFile*>& oat_dex_files = oat_file->GetOatDexFiles();
+ for (const OatDexFile* oat_dex_file : oat_dex_files) {
+ std::string error;
+ std::unique_ptr<const DexFile> dex_file = oat_dex_file->OpenDexFile(&error);
+ if (dex_file.get() == nullptr) {
+ LOG(WARNING) << "Could not create dex file from oat file: " << error;
+ } else {
+ if (dex_file->NumClassDefs() > 0U) {
+ heap->emplace(dex_file.release(), 0U, already_loaded);
+ }
+ }
+ }
+}
+
+static void AddNext(DexFileAndClassPair* original,
+ std::priority_queue<DexFileAndClassPair>* heap) {
+ if (original->DexFileHasMoreClasses()) {
+ heap->push(original->GetNext());
+ } else {
+ // Need to delete the dex file.
+ original->DeleteDexFile();
+ }
+}
+
+static void FreeDexFilesInHeap(std::priority_queue<DexFileAndClassPair>* heap) {
+ while (!heap->empty()) {
+ delete heap->top().GetDexFile();
+ heap->pop();
+ }
+}
+
+const OatFile* ClassLinker::GetBootOatFile() {
+ // To grab the boot oat, look at the dex files in the boot classpath. Any of those is fine, as
+ // they were all compiled into the same oat file. So grab the first one, which is guaranteed to
+ // exist if the boot class-path isn't empty.
+ if (boot_class_path_.empty()) {
+ return nullptr;
+ }
+ const DexFile* boot_dex_file = boot_class_path_[0];
+ // Is it from an oat file?
+ if (boot_dex_file->GetOatDexFile() != nullptr) {
+ return boot_dex_file->GetOatDexFile()->GetOatFile();
+ }
+ return nullptr;
+}
+
+const OatFile* ClassLinker::GetPrimaryOatFile() {
+ ReaderMutexLock mu(Thread::Current(), dex_lock_);
+ const OatFile* boot_oat_file = GetBootOatFile();
+ if (boot_oat_file != nullptr) {
+ for (const OatFile* oat_file : oat_files_) {
+ if (oat_file != boot_oat_file) {
+ return oat_file;
+ }
+ }
+ }
+ return nullptr;
+}
+
+// Check for class-def collisions in dex files.
+//
+// This works by maintaining a heap with one class from each dex file, sorted by the class
+// descriptor. Then a dex-file/class pair is continually removed from the heap and compared
+// against the following top element. If the descriptor is the same, it is now checked whether
+// the two elements agree on whether their dex file was from an already-loaded oat-file or the
+// new oat file. Any disagreement indicates a collision.
+bool ClassLinker::HasCollisions(const OatFile* oat_file, std::string* error_msg) {
+ // Dex files are registered late - once a class is actually being loaded. We have to compare
+ // against the open oat files. Take the dex_lock_ that protects oat_files_ accesses.
+ ReaderMutexLock mu(Thread::Current(), dex_lock_);
+
+ std::priority_queue<DexFileAndClassPair> queue;
+
+ // Add dex files from already loaded oat files, but skip boot.
+ {
+ const OatFile* boot_oat = GetBootOatFile();
+ for (const OatFile* loaded_oat_file : oat_files_) {
+ if (loaded_oat_file == boot_oat) {
+ continue;
+ }
+ AddDexFilesFromOat(loaded_oat_file, true, &queue);
+ }
+ }
+
+ if (queue.empty()) {
+ // No other oat files, return early.
+ return false;
+ }
+
+ // Add dex files from the oat file to check.
+ AddDexFilesFromOat(oat_file, false, &queue);
+
+ // Now drain the queue.
+ while (!queue.empty()) {
+ DexFileAndClassPair compare_pop = queue.top();
+ queue.pop();
+
+ // Compare against the following elements.
+ while (!queue.empty()) {
+ DexFileAndClassPair top = queue.top();
+
+ if (strcmp(compare_pop.GetCachedDescriptor(), top.GetCachedDescriptor()) == 0) {
+ // Same descriptor. Check whether it's crossing old-oat-files to new-oat-files.
+ if (compare_pop.FromLoadedOat() != top.FromLoadedOat()) {
+ *error_msg =
+ StringPrintf("Found duplicated class when checking oat files: '%s' in %s and %s",
+ compare_pop.GetCachedDescriptor(),
+ compare_pop.GetDexFile()->GetLocation().c_str(),
+ top.GetDexFile()->GetLocation().c_str());
+ FreeDexFilesInHeap(&queue);
+ return true;
+ }
+ // Pop it.
+ queue.pop();
+ AddNext(&top, &queue);
+ } else {
+ // Something else. Done here.
+ break;
+ }
+ }
+ AddNext(&compare_pop, &queue);
+ }
+
+ return false;
+}
+
std::vector<std::unique_ptr<const DexFile>> ClassLinker::OpenDexFilesFromOat(
const char* dex_location, const char* oat_location,
std::vector<std::string>* error_msgs) {
@@ -730,8 +959,32 @@
// Get the oat file on disk.
std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile();
if (oat_file.get() != nullptr) {
- source_oat_file = oat_file.release();
- RegisterOatFile(source_oat_file);
+ // Take the file only if it has no collisions, or we must take it because of preopting.
+ bool accept_oat_file = !HasCollisions(oat_file.get(), &error_msg);
+ if (!accept_oat_file) {
+ // Failed the collision check. Print warning.
+ if (Runtime::Current()->IsDexFileFallbackEnabled()) {
+ LOG(WARNING) << "Found duplicate classes, falling back to interpreter mode for "
+ << dex_location;
+ } else {
+ LOG(WARNING) << "Found duplicate classes, dex-file-fallback disabled, will be failing to "
+ " load classes for " << dex_location;
+ }
+ LOG(WARNING) << error_msg;
+
+ // However, if the app was part of /system and preopted, there is no original dex file
+ // available. In that case grudgingly accept the oat file.
+ if (!DexFile::MaybeDex(dex_location)) {
+ accept_oat_file = true;
+ LOG(WARNING) << "Dex location " << dex_location << " does not seem to include dex file. "
+ << "Allow oat file use. This is potentially dangerous.";
+ }
+ }
+
+ if (accept_oat_file) {
+ source_oat_file = oat_file.release();
+ RegisterOatFile(source_oat_file);
+ }
}
}
@@ -752,8 +1005,7 @@
if (Runtime::Current()->IsDexFileFallbackEnabled()) {
if (!DexFile::Open(dex_location, dex_location, &error_msg, &dex_files)) {
LOG(WARNING) << error_msg;
- error_msgs->push_back("Failed to open dex files from "
- + std::string(dex_location));
+ error_msgs->push_back("Failed to open dex files from " + std::string(dex_location));
}
} else {
error_msgs->push_back("Fallback mode disabled, skipping dex files.");
@@ -911,6 +1163,10 @@
// String class root was set above
mirror::Field::SetClass(GetClassRoot(kJavaLangReflectField));
mirror::Field::SetArrayClass(GetClassRoot(kJavaLangReflectFieldArrayClass));
+ mirror::Constructor::SetClass(GetClassRoot(kJavaLangReflectConstructor));
+ mirror::Constructor::SetArrayClass(GetClassRoot(kJavaLangReflectConstructorArrayClass));
+ mirror::Method::SetClass(GetClassRoot(kJavaLangReflectMethod));
+ mirror::Method::SetArrayClass(GetClassRoot(kJavaLangReflectMethodArrayClass));
mirror::Reference::SetClass(GetClassRoot(kJavaLangRefReference));
mirror::BooleanArray::SetArrayClass(GetClassRoot(kBooleanArrayClass));
mirror::ByteArray::SetArrayClass(GetClassRoot(kByteArrayClass));
@@ -1096,22 +1352,26 @@
}
ClassLinker::~ClassLinker() {
- mirror::Class::ResetClass();
- mirror::String::ResetClass();
- mirror::Reference::ResetClass();
mirror::ArtMethod::ResetClass();
+ mirror::Class::ResetClass();
+ mirror::Constructor::ResetClass();
mirror::Field::ResetClass();
- mirror::Field::ResetArrayClass();
+ mirror::Method::ResetClass();
+ mirror::Reference::ResetClass();
+ mirror::StackTraceElement::ResetClass();
+ mirror::String::ResetClass();
+ mirror::Throwable::ResetClass();
mirror::BooleanArray::ResetArrayClass();
mirror::ByteArray::ResetArrayClass();
mirror::CharArray::ResetArrayClass();
+ mirror::Constructor::ResetArrayClass();
mirror::DoubleArray::ResetArrayClass();
+ mirror::Field::ResetArrayClass();
mirror::FloatArray::ResetArrayClass();
+ mirror::Method::ResetArrayClass();
mirror::IntArray::ResetArrayClass();
mirror::LongArray::ResetArrayClass();
mirror::ShortArray::ResetArrayClass();
- mirror::Throwable::ResetClass();
- mirror::StackTraceElement::ResetClass();
STLDeleteElements(&oat_files_);
}
@@ -1508,6 +1768,13 @@
SetupClass(dex_file, dex_class_def, klass, class_loader.Get());
+ // Mark the string class by setting its access flag.
+ if (UNLIKELY(!init_done_)) {
+ if (strcmp(descriptor, "Ljava/lang/String;") == 0) {
+ klass->SetStringClass();
+ }
+ }
+
ObjectLock<mirror::Class> lock(self, klass);
klass->SetClinitThreadId(self->GetTid());
@@ -2262,7 +2529,7 @@
// the right context. It does NOT become the class loader for the
// array class; that always comes from the base element class.
//
-// Returns nullptr with an exception raised on failure.
+// Returns null with an exception raised on failure.
mirror::Class* ClassLinker::CreateArrayClass(Thread* self, const char* descriptor, size_t hash,
Handle<mirror::ClassLoader> class_loader) {
// Identify the underlying component type
@@ -2947,7 +3214,7 @@
jobjectArray interfaces, jobject loader,
jobjectArray methods, jobjectArray throws) {
Thread* self = soa.Self();
- StackHandleScope<8> hs(self);
+ StackHandleScope<9> hs(self);
MutableHandle<mirror::Class> klass(hs.NewHandle(
AllocClass(self, GetClassRoot(kJavaLangClass), sizeof(mirror::Class))));
if (klass.Get() == nullptr) {
@@ -3001,8 +3268,10 @@
}
// Create virtual method using specified prototypes.
- size_t num_virtual_methods =
- soa.Decode<mirror::ObjectArray<mirror::ArtMethod>*>(methods)->GetLength();
+ auto h_methods = hs.NewHandle(soa.Decode<mirror::ObjectArray<mirror::Method>*>(methods));
+ DCHECK_EQ(h_methods->GetClass(), mirror::Method::ArrayClass())
+ << PrettyClass(h_methods->GetClass());
+ const size_t num_virtual_methods = h_methods->GetLength();
{
mirror::ObjectArray<mirror::ArtMethod>* virtuals = AllocArtMethodArray(self,
num_virtual_methods);
@@ -3014,9 +3283,7 @@
}
for (size_t i = 0; i < num_virtual_methods; ++i) {
StackHandleScope<1> hs2(self);
- mirror::ObjectArray<mirror::ArtMethod>* decoded_methods =
- soa.Decode<mirror::ObjectArray<mirror::ArtMethod>*>(methods);
- Handle<mirror::ArtMethod> prototype(hs2.NewHandle(decoded_methods->Get(i)));
+ Handle<mirror::ArtMethod> prototype(hs2.NewHandle(h_methods->Get(i)->GetArtMethod()));
mirror::ArtMethod* clone = CreateProxyMethod(self, klass, prototype);
if (UNLIKELY(clone == nullptr)) {
CHECK(self->IsExceptionPending()); // OOME.
@@ -3066,9 +3333,7 @@
CheckProxyConstructor(klass->GetDirectMethod(0));
for (size_t i = 0; i < num_virtual_methods; ++i) {
StackHandleScope<2> hs2(self);
- mirror::ObjectArray<mirror::ArtMethod>* decoded_methods =
- soa.Decode<mirror::ObjectArray<mirror::ArtMethod>*>(methods);
- Handle<mirror::ArtMethod> prototype(hs2.NewHandle(decoded_methods->Get(i)));
+ Handle<mirror::ArtMethod> prototype(hs2.NewHandle(h_methods->Get(i)->GetArtMethod()));
Handle<mirror::ArtMethod> virtual_method(hs2.NewHandle(klass->GetVirtualMethod(i)));
CheckProxyMethod(virtual_method, prototype);
}
@@ -3104,23 +3369,22 @@
mirror::ArtMethod* proxy_method) {
DCHECK(proxy_class->IsProxyClass());
DCHECK(proxy_method->IsProxyMethod());
- // Locate the dex cache of the original interface/Object
- mirror::DexCache* dex_cache = nullptr;
{
ReaderMutexLock mu(Thread::Current(), dex_lock_);
- for (size_t i = 0; i != dex_caches_.size(); ++i) {
- mirror::DexCache* a_dex_cache = GetDexCache(i);
- if (proxy_method->HasSameDexCacheResolvedTypes(a_dex_cache->GetResolvedTypes())) {
- dex_cache = a_dex_cache;
- break;
+ // Locate the dex cache of the original interface/Object
+ for (const GcRoot<mirror::DexCache>& root : dex_caches_) {
+ auto* dex_cache = root.Read();
+ if (proxy_method->HasSameDexCacheResolvedTypes(dex_cache->GetResolvedTypes())) {
+ mirror::ArtMethod* resolved_method = dex_cache->GetResolvedMethod(
+ proxy_method->GetDexMethodIndex());
+ CHECK(resolved_method != nullptr);
+ return resolved_method;
}
}
}
- CHECK(dex_cache != nullptr);
- uint32_t method_idx = proxy_method->GetDexMethodIndex();
- mirror::ArtMethod* resolved_method = dex_cache->GetResolvedMethod(method_idx);
- CHECK(resolved_method != nullptr);
- return resolved_method;
+ LOG(FATAL) << "Didn't find dex cache for " << PrettyClass(proxy_class) << " "
+ << PrettyMethod(proxy_method);
+ UNREACHABLE();
}
@@ -3163,8 +3427,11 @@
Handle<mirror::ArtMethod> prototype) {
// Ensure prototype is in dex cache so that we can use the dex cache to look up the overridden
// prototype method
- prototype->GetDeclaringClass()->GetDexCache()->SetResolvedMethod(prototype->GetDexMethodIndex(),
- prototype.Get());
+ auto* dex_cache = prototype->GetDeclaringClass()->GetDexCache();
+ // Avoid dirtying the dex cache unless we need to.
+ if (dex_cache->GetResolvedMethod(prototype->GetDexMethodIndex()) != prototype.Get()) {
+ dex_cache->SetResolvedMethod(prototype->GetDexMethodIndex(), prototype.Get());
+ }
// We steal everything from the prototype (such as DexCache, invoke stub, etc.) then specialize
// as necessary
mirror::ArtMethod* method = down_cast<mirror::ArtMethod*>(prototype->Clone(self));
@@ -3198,6 +3465,7 @@
// interface prototype. The exception to this are Constructors and the Class of the Proxy itself.
CHECK(prototype->HasSameDexCacheResolvedMethods(method.Get()));
CHECK(prototype->HasSameDexCacheResolvedTypes(method.Get()));
+ CHECK_EQ(prototype->GetDeclaringClass()->GetDexCache(), method->GetDexCache());
CHECK_EQ(prototype->GetDexMethodIndex(), method->GetDexMethodIndex());
CHECK_STREQ(method->GetName(), prototype->GetName());
@@ -5210,11 +5478,15 @@
"Ljava/lang/DexCache;",
"Ljava/lang/ref/Reference;",
"Ljava/lang/reflect/ArtMethod;",
+ "Ljava/lang/reflect/Constructor;",
"Ljava/lang/reflect/Field;",
+ "Ljava/lang/reflect/Method;",
"Ljava/lang/reflect/Proxy;",
"[Ljava/lang/String;",
"[Ljava/lang/reflect/ArtMethod;",
+ "[Ljava/lang/reflect/Constructor;",
"[Ljava/lang/reflect/Field;",
+ "[Ljava/lang/reflect/Method;",
"Ljava/lang/ClassLoader;",
"Ljava/lang/Throwable;",
"Ljava/lang/ClassNotFoundException;",
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 68624b0..95c8aa0 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -71,11 +71,15 @@
kJavaLangDexCache,
kJavaLangRefReference,
kJavaLangReflectArtMethod,
+ kJavaLangReflectConstructor,
kJavaLangReflectField,
+ kJavaLangReflectMethod,
kJavaLangReflectProxy,
kJavaLangStringArrayClass,
kJavaLangReflectArtMethodArrayClass,
+ kJavaLangReflectConstructorArrayClass,
kJavaLangReflectFieldArrayClass,
+ kJavaLangReflectMethodArrayClass,
kJavaLangClassLoader,
kJavaLangThrowable,
kJavaLangClassNotFoundException,
@@ -148,7 +152,7 @@
const DexFile& dex_file, const DexFile::ClassDef& dex_class_def)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Finds a class by its descriptor, returning NULL if it isn't wasn't loaded
+ // Finds a class by its descriptor, returning null if it isn't wasn't loaded
// by the given 'class_loader'.
mirror::Class* LookupClass(Thread* self, const char* descriptor, size_t hash,
mirror::ClassLoader* class_loader)
@@ -291,6 +295,10 @@
return boot_class_path_;
}
+ // Returns the first non-image oat file in the class path.
+ const OatFile* GetPrimaryOatFile()
+ LOCKS_EXCLUDED(dex_lock_);
+
void VisitClasses(ClassVisitor* visitor, void* arg)
LOCKS_EXCLUDED(Locks::classlinker_classes_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -428,7 +436,7 @@
void SetEntryPointsToInterpreter(mirror::ArtMethod* method) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Attempts to insert a class into a class table. Returns NULL if
+ // Attempts to insert a class into a class table. Returns null if
// the class was inserted, otherwise returns an existing class with
// the same descriptor and ClassLoader.
mirror::Class* InsertClass(const char* descriptor, mirror::Class* klass, size_t hash)
@@ -440,7 +448,7 @@
mirror::ObjectArray<mirror::Class>* GetClassRoots() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::ObjectArray<mirror::Class>* class_roots = class_roots_.Read();
- DCHECK(class_roots != NULL);
+ DCHECK(class_roots != nullptr);
return class_roots;
}
@@ -611,6 +619,9 @@
const OatFile* FindOpenedOatFileFromOatLocation(const std::string& oat_location)
LOCKS_EXCLUDED(dex_lock_);
+ // Returns the boot image oat file.
+ const OatFile* GetBootOatFile() SHARED_LOCKS_REQUIRED(dex_lock_);
+
mirror::ArtMethod* CreateProxyConstructor(Thread* self, Handle<mirror::Class> klass,
mirror::Class* proxy_class)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -653,6 +664,15 @@
// Return the quick generic JNI stub for testing.
const void* GetRuntimeQuickGenericJniStub() const;
+ // Throw the class initialization failure recorded when first trying to initialize the given
+ // class.
+ // Note: Currently we only store the descriptor, so we cannot throw the exact throwable, only
+ // a recreation with a custom string.
+ void ThrowEarlierClassFailure(mirror::Class* c) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Check for duplicate class definitions of the given oat file against all open oat files.
+ bool HasCollisions(const OatFile* oat_file, std::string* error_msg) LOCKS_EXCLUDED(dex_lock_);
+
std::vector<const DexFile*> boot_class_path_;
std::vector<std::unique_ptr<const DexFile>> opened_dex_files_;
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index a31a785..d155941 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -25,6 +25,7 @@
#include "dex_file.h"
#include "entrypoints/entrypoint_utils-inl.h"
#include "gc/heap.h"
+#include "mirror/abstract_method.h"
#include "mirror/accessible_object.h"
#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
@@ -393,8 +394,9 @@
bool error = false;
- // Art method have a different size due to the padding field.
- if (!klass->IsArtMethodClass() && !klass->IsClassClass() && !is_static) {
+ // Methods and classes have a different size due to padding field. Strings are variable length.
+ if (!klass->IsArtMethodClass() && !klass->IsClassClass() && !klass->IsStringClass() &&
+ !is_static) {
// Currently only required for AccessibleObject since of the padding fields. The class linker
// says AccessibleObject is 9 bytes but sizeof(AccessibleObject) is 12 bytes due to padding.
// The RoundUp is to get around this case.
@@ -463,6 +465,10 @@
return !error;
};
+ void addOffset(size_t offset, const char* name) {
+ offsets.push_back(CheckOffset(offset, name));
+ }
+
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(CheckOffsets);
};
@@ -472,142 +478,160 @@
struct ObjectOffsets : public CheckOffsets<mirror::Object> {
ObjectOffsets() : CheckOffsets<mirror::Object>(false, "Ljava/lang/Object;") {
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Object, klass_), "shadow$_klass_"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Object, monitor_), "shadow$_monitor_"));
-#ifdef USE_BAKER_OR_BROOKS_READ_BARRIER
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Object, x_rb_ptr_), "shadow$_x_rb_ptr_"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Object, x_xpadding_), "shadow$_x_xpadding_"));
+ addOffset(OFFSETOF_MEMBER(mirror::Object, klass_), "shadow$_klass_");
+ addOffset(OFFSETOF_MEMBER(mirror::Object, monitor_), "shadow$_monitor_");
+#ifdef USE_BROOKS_READ_BARRIER
+ addOffset(OFFSETOF_MEMBER(mirror::Object, x_rb_ptr_), "shadow$_x_rb_ptr_");
+ addOffset(OFFSETOF_MEMBER(mirror::Object, x_xpadding_), "shadow$_x_xpadding_");
#endif
};
};
struct ArtMethodOffsets : public CheckOffsets<mirror::ArtMethod> {
ArtMethodOffsets() : CheckOffsets<mirror::ArtMethod>(false, "Ljava/lang/reflect/ArtMethod;") {
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, access_flags_), "accessFlags"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, declaring_class_), "declaringClass"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, dex_cache_resolved_methods_), "dexCacheResolvedMethods"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, dex_cache_resolved_types_), "dexCacheResolvedTypes"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, dex_code_item_offset_), "dexCodeItemOffset"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, dex_method_index_), "dexMethodIndex"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, method_index_), "methodIndex"));
+ addOffset(OFFSETOF_MEMBER(mirror::ArtMethod, access_flags_), "accessFlags");
+ addOffset(OFFSETOF_MEMBER(mirror::ArtMethod, declaring_class_), "declaringClass");
+ addOffset(OFFSETOF_MEMBER(mirror::ArtMethod, dex_cache_resolved_methods_),
+ "dexCacheResolvedMethods");
+ addOffset(OFFSETOF_MEMBER(mirror::ArtMethod, dex_cache_resolved_types_),
+ "dexCacheResolvedTypes");
+ addOffset(OFFSETOF_MEMBER(mirror::ArtMethod, dex_code_item_offset_), "dexCodeItemOffset");
+ addOffset(OFFSETOF_MEMBER(mirror::ArtMethod, dex_method_index_), "dexMethodIndex");
+ addOffset(OFFSETOF_MEMBER(mirror::ArtMethod, method_index_), "methodIndex");
};
};
struct ClassOffsets : public CheckOffsets<mirror::Class> {
ClassOffsets() : CheckOffsets<mirror::Class>(false, "Ljava/lang/Class;") {
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, access_flags_), "accessFlags"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, class_loader_), "classLoader"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, class_size_), "classSize"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, clinit_thread_id_), "clinitThreadId"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, component_type_), "componentType"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, dex_cache_), "dexCache"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, dex_cache_strings_), "dexCacheStrings"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, dex_class_def_idx_), "dexClassDefIndex"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, dex_type_idx_), "dexTypeIndex"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, direct_methods_), "directMethods"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, ifields_), "iFields"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, iftable_), "ifTable"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, name_), "name"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, num_instance_fields_), "numInstanceFields"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, num_reference_instance_fields_), "numReferenceInstanceFields"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, num_reference_static_fields_), "numReferenceStaticFields"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, num_static_fields_), "numStaticFields"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, object_size_), "objectSize"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, primitive_type_), "primitiveType"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, reference_instance_offsets_), "referenceInstanceOffsets"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, sfields_), "sFields"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, status_), "status"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, super_class_), "superClass"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, verify_error_class_), "verifyErrorClass"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, virtual_methods_), "virtualMethods"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Class, vtable_), "vtable"));
+ addOffset(OFFSETOF_MEMBER(mirror::Class, access_flags_), "accessFlags");
+ addOffset(OFFSETOF_MEMBER(mirror::Class, class_loader_), "classLoader");
+ addOffset(OFFSETOF_MEMBER(mirror::Class, class_size_), "classSize");
+ addOffset(OFFSETOF_MEMBER(mirror::Class, clinit_thread_id_), "clinitThreadId");
+ addOffset(OFFSETOF_MEMBER(mirror::Class, component_type_), "componentType");
+ addOffset(OFFSETOF_MEMBER(mirror::Class, dex_cache_), "dexCache");
+ addOffset(OFFSETOF_MEMBER(mirror::Class, dex_cache_strings_), "dexCacheStrings");
+ addOffset(OFFSETOF_MEMBER(mirror::Class, dex_class_def_idx_), "dexClassDefIndex");
+ addOffset(OFFSETOF_MEMBER(mirror::Class, dex_type_idx_), "dexTypeIndex");
+ addOffset(OFFSETOF_MEMBER(mirror::Class, direct_methods_), "directMethods");
+ addOffset(OFFSETOF_MEMBER(mirror::Class, ifields_), "iFields");
+ addOffset(OFFSETOF_MEMBER(mirror::Class, iftable_), "ifTable");
+ addOffset(OFFSETOF_MEMBER(mirror::Class, name_), "name");
+ addOffset(OFFSETOF_MEMBER(mirror::Class, num_instance_fields_), "numInstanceFields");
+ addOffset(OFFSETOF_MEMBER(mirror::Class, num_reference_instance_fields_),
+ "numReferenceInstanceFields");
+ addOffset(OFFSETOF_MEMBER(mirror::Class, num_reference_static_fields_),
+ "numReferenceStaticFields");
+ addOffset(OFFSETOF_MEMBER(mirror::Class, num_static_fields_), "numStaticFields");
+ addOffset(OFFSETOF_MEMBER(mirror::Class, object_size_), "objectSize");
+ addOffset(OFFSETOF_MEMBER(mirror::Class, primitive_type_), "primitiveType");
+ addOffset(OFFSETOF_MEMBER(mirror::Class, reference_instance_offsets_),
+ "referenceInstanceOffsets");
+ addOffset(OFFSETOF_MEMBER(mirror::Class, sfields_), "sFields");
+ addOffset(OFFSETOF_MEMBER(mirror::Class, status_), "status");
+ addOffset(OFFSETOF_MEMBER(mirror::Class, super_class_), "superClass");
+ addOffset(OFFSETOF_MEMBER(mirror::Class, verify_error_class_), "verifyErrorClass");
+ addOffset(OFFSETOF_MEMBER(mirror::Class, virtual_methods_), "virtualMethods");
+ addOffset(OFFSETOF_MEMBER(mirror::Class, vtable_), "vtable");
};
};
struct StringOffsets : public CheckOffsets<mirror::String> {
StringOffsets() : CheckOffsets<mirror::String>(false, "Ljava/lang/String;") {
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::String, count_), "count"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::String, hash_code_), "hashCode"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::String, offset_), "offset"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::String, array_), "value"));
+ addOffset(OFFSETOF_MEMBER(mirror::String, count_), "count");
+ addOffset(OFFSETOF_MEMBER(mirror::String, hash_code_), "hashCode");
};
};
struct ThrowableOffsets : public CheckOffsets<mirror::Throwable> {
ThrowableOffsets() : CheckOffsets<mirror::Throwable>(false, "Ljava/lang/Throwable;") {
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Throwable, cause_), "cause"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Throwable, detail_message_), "detailMessage"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Throwable, stack_state_), "stackState"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Throwable, stack_trace_), "stackTrace"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Throwable, suppressed_exceptions_), "suppressedExceptions"));
+ addOffset(OFFSETOF_MEMBER(mirror::Throwable, cause_), "cause");
+ addOffset(OFFSETOF_MEMBER(mirror::Throwable, detail_message_), "detailMessage");
+ addOffset(OFFSETOF_MEMBER(mirror::Throwable, stack_state_), "stackState");
+ addOffset(OFFSETOF_MEMBER(mirror::Throwable, stack_trace_), "stackTrace");
+ addOffset(OFFSETOF_MEMBER(mirror::Throwable, suppressed_exceptions_), "suppressedExceptions");
};
};
struct StackTraceElementOffsets : public CheckOffsets<mirror::StackTraceElement> {
- StackTraceElementOffsets() : CheckOffsets<mirror::StackTraceElement>(false, "Ljava/lang/StackTraceElement;") {
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::StackTraceElement, declaring_class_), "declaringClass"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::StackTraceElement, file_name_), "fileName"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::StackTraceElement, line_number_), "lineNumber"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::StackTraceElement, method_name_), "methodName"));
+ StackTraceElementOffsets() : CheckOffsets<mirror::StackTraceElement>(
+ false, "Ljava/lang/StackTraceElement;") {
+ addOffset(OFFSETOF_MEMBER(mirror::StackTraceElement, declaring_class_), "declaringClass");
+ addOffset(OFFSETOF_MEMBER(mirror::StackTraceElement, file_name_), "fileName");
+ addOffset(OFFSETOF_MEMBER(mirror::StackTraceElement, line_number_), "lineNumber");
+ addOffset(OFFSETOF_MEMBER(mirror::StackTraceElement, method_name_), "methodName");
};
};
struct ClassLoaderOffsets : public CheckOffsets<mirror::ClassLoader> {
ClassLoaderOffsets() : CheckOffsets<mirror::ClassLoader>(false, "Ljava/lang/ClassLoader;") {
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ClassLoader, packages_), "packages"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ClassLoader, parent_), "parent"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ClassLoader, proxyCache_), "proxyCache"));
+ addOffset(OFFSETOF_MEMBER(mirror::ClassLoader, packages_), "packages");
+ addOffset(OFFSETOF_MEMBER(mirror::ClassLoader, parent_), "parent");
+ addOffset(OFFSETOF_MEMBER(mirror::ClassLoader, proxyCache_), "proxyCache");
};
};
struct ProxyOffsets : public CheckOffsets<mirror::Proxy> {
ProxyOffsets() : CheckOffsets<mirror::Proxy>(false, "Ljava/lang/reflect/Proxy;") {
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Proxy, h_), "h"));
+ addOffset(OFFSETOF_MEMBER(mirror::Proxy, h_), "h");
};
};
struct DexCacheOffsets : public CheckOffsets<mirror::DexCache> {
DexCacheOffsets() : CheckOffsets<mirror::DexCache>(false, "Ljava/lang/DexCache;") {
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::DexCache, dex_), "dex"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::DexCache, dex_file_), "dexFile"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::DexCache, location_), "location"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::DexCache, resolved_fields_), "resolvedFields"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::DexCache, resolved_methods_), "resolvedMethods"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::DexCache, resolved_types_), "resolvedTypes"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::DexCache, strings_), "strings"));
+ addOffset(OFFSETOF_MEMBER(mirror::DexCache, dex_), "dex");
+ addOffset(OFFSETOF_MEMBER(mirror::DexCache, dex_file_), "dexFile");
+ addOffset(OFFSETOF_MEMBER(mirror::DexCache, location_), "location");
+ addOffset(OFFSETOF_MEMBER(mirror::DexCache, resolved_fields_), "resolvedFields");
+ addOffset(OFFSETOF_MEMBER(mirror::DexCache, resolved_methods_), "resolvedMethods");
+ addOffset(OFFSETOF_MEMBER(mirror::DexCache, resolved_types_), "resolvedTypes");
+ addOffset(OFFSETOF_MEMBER(mirror::DexCache, strings_), "strings");
};
};
struct ReferenceOffsets : public CheckOffsets<mirror::Reference> {
ReferenceOffsets() : CheckOffsets<mirror::Reference>(false, "Ljava/lang/ref/Reference;") {
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Reference, pending_next_), "pendingNext"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Reference, queue_), "queue"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Reference, queue_next_), "queueNext"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Reference, referent_), "referent"));
+ addOffset(OFFSETOF_MEMBER(mirror::Reference, pending_next_), "pendingNext");
+ addOffset(OFFSETOF_MEMBER(mirror::Reference, queue_), "queue");
+ addOffset(OFFSETOF_MEMBER(mirror::Reference, queue_next_), "queueNext");
+ addOffset(OFFSETOF_MEMBER(mirror::Reference, referent_), "referent");
};
};
struct FinalizerReferenceOffsets : public CheckOffsets<mirror::FinalizerReference> {
- FinalizerReferenceOffsets() : CheckOffsets<mirror::FinalizerReference>(false, "Ljava/lang/ref/FinalizerReference;") {
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::FinalizerReference, next_), "next"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::FinalizerReference, prev_), "prev"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::FinalizerReference, zombie_), "zombie"));
+ FinalizerReferenceOffsets() : CheckOffsets<mirror::FinalizerReference>(
+ false, "Ljava/lang/ref/FinalizerReference;") {
+ addOffset(OFFSETOF_MEMBER(mirror::FinalizerReference, next_), "next");
+ addOffset(OFFSETOF_MEMBER(mirror::FinalizerReference, prev_), "prev");
+ addOffset(OFFSETOF_MEMBER(mirror::FinalizerReference, zombie_), "zombie");
};
};
struct AccessibleObjectOffsets : public CheckOffsets<mirror::AccessibleObject> {
- AccessibleObjectOffsets() : CheckOffsets<mirror::AccessibleObject>(false, "Ljava/lang/reflect/AccessibleObject;") {
- offsets.push_back(CheckOffset(mirror::AccessibleObject::FlagOffset().Uint32Value(), "flag"));
+ AccessibleObjectOffsets() : CheckOffsets<mirror::AccessibleObject>(
+ false, "Ljava/lang/reflect/AccessibleObject;") {
+ addOffset(mirror::AccessibleObject::FlagOffset().Uint32Value(), "flag");
};
};
struct FieldOffsets : public CheckOffsets<mirror::Field> {
FieldOffsets() : CheckOffsets<mirror::Field>(false, "Ljava/lang/reflect/Field;") {
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Field, access_flags_), "accessFlags"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Field, declaring_class_), "declaringClass"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Field, dex_field_index_), "dexFieldIndex"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Field, offset_), "offset"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::Field, type_), "type"));
+ addOffset(OFFSETOF_MEMBER(mirror::Field, access_flags_), "accessFlags");
+ addOffset(OFFSETOF_MEMBER(mirror::Field, declaring_class_), "declaringClass");
+ addOffset(OFFSETOF_MEMBER(mirror::Field, dex_field_index_), "dexFieldIndex");
+ addOffset(OFFSETOF_MEMBER(mirror::Field, offset_), "offset");
+ addOffset(OFFSETOF_MEMBER(mirror::Field, type_), "type");
+ };
+};
+
+struct AbstractMethodOffsets : public CheckOffsets<mirror::AbstractMethod> {
+ AbstractMethodOffsets() : CheckOffsets<mirror::AbstractMethod>(
+ false, "Ljava/lang/reflect/AbstractMethod;") {
+ addOffset(OFFSETOF_MEMBER(mirror::AbstractMethod, access_flags_), "accessFlags");
+ addOffset(OFFSETOF_MEMBER(mirror::AbstractMethod, art_method_), "artMethod");
+ addOffset(OFFSETOF_MEMBER(mirror::AbstractMethod, declaring_class_), "declaringClass");
+ addOffset(OFFSETOF_MEMBER(mirror::AbstractMethod, declaring_class_of_overridden_method_),
+ "declaringClassOfOverriddenMethod");
+ addOffset(OFFSETOF_MEMBER(mirror::AbstractMethod, dex_method_index_), "dexMethodIndex");
};
};
@@ -629,6 +653,7 @@
EXPECT_TRUE(FinalizerReferenceOffsets().Check());
EXPECT_TRUE(AccessibleObjectOffsets().Check());
EXPECT_TRUE(FieldOffsets().Check());
+ EXPECT_TRUE(AbstractMethodOffsets().Check());
}
TEST_F(ClassLinkerTest, FindClassNonexistent) {
@@ -710,14 +735,14 @@
EXPECT_FALSE(JavaLangObject->IsSynthetic());
EXPECT_EQ(2U, JavaLangObject->NumDirectMethods());
EXPECT_EQ(11U, JavaLangObject->NumVirtualMethods());
- if (!kUseBakerOrBrooksReadBarrier) {
+ if (!kUseBrooksReadBarrier) {
EXPECT_EQ(2U, JavaLangObject->NumInstanceFields());
} else {
EXPECT_EQ(4U, JavaLangObject->NumInstanceFields());
}
EXPECT_STREQ(JavaLangObject->GetInstanceField(0)->GetName(), "shadow$_klass_");
EXPECT_STREQ(JavaLangObject->GetInstanceField(1)->GetName(), "shadow$_monitor_");
- if (kUseBakerOrBrooksReadBarrier) {
+ if (kUseBrooksReadBarrier) {
EXPECT_STREQ(JavaLangObject->GetInstanceField(2)->GetName(), "shadow$_x_rb_ptr_");
EXPECT_STREQ(JavaLangObject->GetInstanceField(3)->GetName(), "shadow$_x_xpadding_");
}
diff --git a/runtime/common_runtime_test.cc b/runtime/common_runtime_test.cc
index e17b885..de3a29b 100644
--- a/runtime/common_runtime_test.cc
+++ b/runtime/common_runtime_test.cc
@@ -81,7 +81,7 @@
}
ScratchFile::ScratchFile(File* file) {
- CHECK(file != NULL);
+ CHECK(file != nullptr);
filename_ = file->GetPath();
file_.reset(file);
}
@@ -327,7 +327,7 @@
// Initialize maps for unstarted runtime. This needs to be here, as running clinits needs this
// set up.
if (!unstarted_initialized_) {
- interpreter::UnstartedRuntimeInitialize();
+ interpreter::UnstartedRuntime::Initialize();
unstarted_initialized_ = true;
}
@@ -559,7 +559,7 @@
std::string location;
if (IsHost()) {
const char* host_dir = getenv("ANDROID_HOST_OUT");
- CHECK(host_dir != NULL);
+ CHECK(host_dir != nullptr);
location = StringPrintf("%s/framework/core.%s", host_dir, suffix);
} else {
location = StringPrintf("/data/art-test/core.%s", suffix);
diff --git a/runtime/common_runtime_test.h b/runtime/common_runtime_test.h
index 9917378..34fdd8d 100644
--- a/runtime/common_runtime_test.h
+++ b/runtime/common_runtime_test.h
@@ -182,7 +182,7 @@
}
#define TEST_DISABLED_FOR_MIPS() \
- if (kRuntimeISA == kMips || kRuntimeISA == kMips64) { \
+ if (kRuntimeISA == kMips) { \
printf("WARNING: TEST DISABLED FOR MIPS\n"); \
return; \
}
diff --git a/runtime/common_throws.cc b/runtime/common_throws.cc
index 407746f..b401066 100644
--- a/runtime/common_throws.cc
+++ b/runtime/common_throws.cc
@@ -35,7 +35,7 @@
static void AddReferrerLocation(std::ostream& os, mirror::Class* referrer)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- if (referrer != NULL) {
+ if (referrer != nullptr) {
std::string location(referrer->GetLocation());
if (!location.empty()) {
os << " (declaration of '" << PrettyDescriptor(referrer)
@@ -45,10 +45,10 @@
}
static void ThrowException(const char* exception_descriptor,
- mirror::Class* referrer, const char* fmt, va_list* args = NULL)
+ mirror::Class* referrer, const char* fmt, va_list* args = nullptr)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
std::ostringstream msg;
- if (args != NULL) {
+ if (args != nullptr) {
std::string vmsg;
StringAppendV(&vmsg, fmt, *args);
msg << vmsg;
@@ -61,10 +61,10 @@
}
static void ThrowWrappedException(const char* exception_descriptor,
- mirror::Class* referrer, const char* fmt, va_list* args = NULL)
+ mirror::Class* referrer, const char* fmt, va_list* args = nullptr)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
std::ostringstream msg;
- if (args != NULL) {
+ if (args != nullptr) {
std::string vmsg;
StringAppendV(&vmsg, fmt, *args);
msg << vmsg;
@@ -79,7 +79,7 @@
// AbstractMethodError
void ThrowAbstractMethodError(mirror::ArtMethod* method) {
- ThrowException("Ljava/lang/AbstractMethodError;", NULL,
+ ThrowException("Ljava/lang/AbstractMethodError;", nullptr,
StringPrintf("abstract method \"%s\"",
PrettyMethod(method).c_str()).c_str());
}
@@ -87,20 +87,20 @@
// ArithmeticException
void ThrowArithmeticExceptionDivideByZero() {
- ThrowException("Ljava/lang/ArithmeticException;", NULL, "divide by zero");
+ ThrowException("Ljava/lang/ArithmeticException;", nullptr, "divide by zero");
}
// ArrayIndexOutOfBoundsException
void ThrowArrayIndexOutOfBoundsException(int index, int length) {
- ThrowException("Ljava/lang/ArrayIndexOutOfBoundsException;", NULL,
+ ThrowException("Ljava/lang/ArrayIndexOutOfBoundsException;", nullptr,
StringPrintf("length=%d; index=%d", length, index).c_str());
}
// ArrayStoreException
void ThrowArrayStoreException(mirror::Class* element_class, mirror::Class* array_class) {
- ThrowException("Ljava/lang/ArrayStoreException;", NULL,
+ ThrowException("Ljava/lang/ArrayStoreException;", nullptr,
StringPrintf("%s cannot be stored in an array of type %s",
PrettyDescriptor(element_class).c_str(),
PrettyDescriptor(array_class).c_str()).c_str());
@@ -109,14 +109,14 @@
// ClassCastException
void ThrowClassCastException(mirror::Class* dest_type, mirror::Class* src_type) {
- ThrowException("Ljava/lang/ClassCastException;", NULL,
+ ThrowException("Ljava/lang/ClassCastException;", nullptr,
StringPrintf("%s cannot be cast to %s",
PrettyDescriptor(src_type).c_str(),
PrettyDescriptor(dest_type).c_str()).c_str());
}
void ThrowClassCastException(const char* msg) {
- ThrowException("Ljava/lang/ClassCastException;", NULL, msg);
+ ThrowException("Ljava/lang/ClassCastException;", nullptr, msg);
}
// ClassCircularityError
@@ -174,7 +174,7 @@
msg << "Final field '" << PrettyField(accessed, false) << "' cannot be written to by method '"
<< PrettyMethod(referrer) << "'";
ThrowException("Ljava/lang/IllegalAccessError;",
- referrer != NULL ? referrer->GetClass() : NULL,
+ referrer != nullptr ? referrer->GetClass() : nullptr,
msg.str().c_str());
}
@@ -188,13 +188,13 @@
// IllegalAccessException
void ThrowIllegalAccessException(const char* msg) {
- ThrowException("Ljava/lang/IllegalAccessException;", NULL, msg);
+ ThrowException("Ljava/lang/IllegalAccessException;", nullptr, msg);
}
// IllegalArgumentException
void ThrowIllegalArgumentException(const char* msg) {
- ThrowException("Ljava/lang/IllegalArgumentException;", NULL, msg);
+ ThrowException("Ljava/lang/IllegalArgumentException;", nullptr, msg);
}
@@ -207,7 +207,7 @@
msg << "The method '" << PrettyMethod(method) << "' was expected to be of type "
<< expected_type << " but instead was found to be of type " << found_type;
ThrowException("Ljava/lang/IncompatibleClassChangeError;",
- referrer != NULL ? referrer->GetClass() : NULL,
+ referrer != nullptr ? referrer->GetClass() : nullptr,
msg.str().c_str());
}
@@ -216,14 +216,14 @@
mirror::ArtMethod* referrer) {
// Referrer is calling interface_method on this_object, however, the interface_method isn't
// implemented by this_object.
- CHECK(this_object != NULL);
+ CHECK(this_object != nullptr);
std::ostringstream msg;
msg << "Class '" << PrettyDescriptor(this_object->GetClass())
<< "' does not implement interface '"
<< PrettyDescriptor(interface_method->GetDeclaringClass())
<< "' in call to '" << PrettyMethod(interface_method) << "'";
ThrowException("Ljava/lang/IncompatibleClassChangeError;",
- referrer != NULL ? referrer->GetClass() : NULL,
+ referrer != nullptr ? referrer->GetClass() : nullptr,
msg.str().c_str());
}
@@ -249,14 +249,14 @@
void ThrowIOException(const char* fmt, ...) {
va_list args;
va_start(args, fmt);
- ThrowException("Ljava/io/IOException;", NULL, fmt, &args);
+ ThrowException("Ljava/io/IOException;", nullptr, fmt, &args);
va_end(args);
}
void ThrowWrappedIOException(const char* fmt, ...) {
va_list args;
va_start(args, fmt);
- ThrowWrappedException("Ljava/io/IOException;", NULL, fmt, &args);
+ ThrowWrappedException("Ljava/io/IOException;", nullptr, fmt, &args);
va_end(args);
}
@@ -272,19 +272,18 @@
// NegativeArraySizeException
void ThrowNegativeArraySizeException(int size) {
- ThrowException("Ljava/lang/NegativeArraySizeException;", NULL,
+ ThrowException("Ljava/lang/NegativeArraySizeException;", nullptr,
StringPrintf("%d", size).c_str());
}
void ThrowNegativeArraySizeException(const char* msg) {
- ThrowException("Ljava/lang/NegativeArraySizeException;", NULL, msg);
+ ThrowException("Ljava/lang/NegativeArraySizeException;", nullptr, msg);
}
// NoSuchFieldError
void ThrowNoSuchFieldError(const StringPiece& scope, mirror::Class* c,
- const StringPiece& type, const StringPiece& name)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ const StringPiece& type, const StringPiece& name) {
std::ostringstream msg;
std::string temp;
msg << "No " << scope << "field " << name << " of type " << type
@@ -292,6 +291,13 @@
ThrowException("Ljava/lang/NoSuchFieldError;", c, msg.str().c_str());
}
+void ThrowNoSuchFieldException(mirror::Class* c, const StringPiece& name) {
+ std::ostringstream msg;
+ std::string temp;
+ msg << "No field " << name << " in class " << c->GetDescriptor(&temp);
+ ThrowException("Ljava/lang/NoSuchFieldException;", c, msg.str().c_str());
+}
+
// NoSuchMethodError
void ThrowNoSuchMethodError(InvokeType type, mirror::Class* c, const StringPiece& name,
@@ -319,7 +325,7 @@
std::ostringstream msg;
msg << "Attempt to " << (is_read ? "read from" : "write to")
<< " field '" << PrettyField(field, true) << "' on a null object reference";
- ThrowException("Ljava/lang/NullPointerException;", NULL, msg.str().c_str());
+ ThrowException("Ljava/lang/NullPointerException;", nullptr, msg.str().c_str());
}
static void ThrowNullPointerExceptionForMethodAccessImpl(uint32_t method_idx,
@@ -329,7 +335,7 @@
std::ostringstream msg;
msg << "Attempt to invoke " << type << " method '"
<< PrettyMethod(method_idx, dex_file, true) << "' on a null object reference";
- ThrowException("Ljava/lang/NullPointerException;", NULL, msg.str().c_str());
+ ThrowException("Ljava/lang/NullPointerException;", nullptr, msg.str().c_str());
}
void ThrowNullPointerExceptionForMethodAccess(uint32_t method_idx,
@@ -379,7 +385,7 @@
// method is invoked at this location.
mirror::ArtMethod* invoked_method =
verifier::MethodVerifier::FindInvokedMethodAtDexPc(method, throw_dex_pc);
- if (invoked_method != NULL) {
+ if (invoked_method != nullptr) {
// NPE with precise message.
ThrowNullPointerExceptionForMethodAccess(invoked_method, kVirtual);
} else {
@@ -411,7 +417,7 @@
// field is accessed at this location.
ArtField* field =
verifier::MethodVerifier::FindAccessedFieldAtDexPc(method, throw_dex_pc);
- if (field != NULL) {
+ if (field != nullptr) {
// NPE with precise message.
ThrowNullPointerExceptionForFieldAccess(field, true /* read */);
} else {
@@ -443,7 +449,7 @@
// field is accessed at this location.
ArtField* field =
verifier::MethodVerifier::FindAccessedFieldAtDexPc(method, throw_dex_pc);
- if (field != NULL) {
+ if (field != nullptr) {
// NPE with precise message.
ThrowNullPointerExceptionForFieldAccess(field, false /* write */);
} else {
@@ -459,7 +465,7 @@
case Instruction::AGET_BYTE:
case Instruction::AGET_CHAR:
case Instruction::AGET_SHORT:
- ThrowException("Ljava/lang/NullPointerException;", NULL,
+ ThrowException("Ljava/lang/NullPointerException;", nullptr,
"Attempt to read from null array");
break;
case Instruction::APUT:
@@ -469,11 +475,11 @@
case Instruction::APUT_BYTE:
case Instruction::APUT_CHAR:
case Instruction::APUT_SHORT:
- ThrowException("Ljava/lang/NullPointerException;", NULL,
+ ThrowException("Ljava/lang/NullPointerException;", nullptr,
"Attempt to write to null array");
break;
case Instruction::ARRAY_LENGTH:
- ThrowException("Ljava/lang/NullPointerException;", NULL,
+ ThrowException("Ljava/lang/NullPointerException;", nullptr,
"Attempt to get length of null array");
break;
default: {
@@ -481,7 +487,7 @@
// message/logging is so we can improve any cases we've missed in the future.
const DexFile* dex_file =
method->GetDeclaringClass()->GetDexCache()->GetDexFile();
- ThrowException("Ljava/lang/NullPointerException;", NULL,
+ ThrowException("Ljava/lang/NullPointerException;", nullptr,
StringPrintf("Null pointer exception during instruction '%s'",
instr->DumpString(dex_file).c_str()).c_str());
break;
@@ -490,7 +496,7 @@
}
void ThrowNullPointerException(const char* msg) {
- ThrowException("Ljava/lang/NullPointerException;", NULL, msg);
+ ThrowException("Ljava/lang/NullPointerException;", nullptr, msg);
}
// RuntimeException
@@ -498,7 +504,7 @@
void ThrowRuntimeException(const char* fmt, ...) {
va_list args;
va_start(args, fmt);
- ThrowException("Ljava/lang/RuntimeException;", NULL, fmt, &args);
+ ThrowException("Ljava/lang/RuntimeException;", nullptr, fmt, &args);
va_end(args);
}
diff --git a/runtime/common_throws.h b/runtime/common_throws.h
index df95cf9..49890e2 100644
--- a/runtime/common_throws.h
+++ b/runtime/common_throws.h
@@ -149,6 +149,9 @@
const StringPiece& type, const StringPiece& name)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+void ThrowNoSuchFieldException(mirror::Class* c, const StringPiece& name)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
// NoSuchMethodError
void ThrowNoSuchMethodError(InvokeType type, mirror::Class* c, const StringPiece& name,
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index c074b54..0eb7f2b 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -57,6 +57,9 @@
namespace art {
+// The key identifying the debugger to update instrumentation.
+static constexpr const char* kDbgInstrumentationKey = "Debugger";
+
static const size_t kMaxAllocRecordStackDepth = 16; // Max 255.
static const size_t kDefaultNumAllocRecords = 64*1024; // Must be a power of 2. 2BE can hold 64k-1.
@@ -174,7 +177,8 @@
jobject type_; // This is a weak global.
size_t byte_count_;
uint16_t thin_lock_id_;
- AllocRecordStackTraceElement stack_[kMaxAllocRecordStackDepth]; // Unused entries have nullptr method.
+ // Unused entries have null method.
+ AllocRecordStackTraceElement stack_[kMaxAllocRecordStackDepth];
};
class Breakpoint {
@@ -231,13 +235,29 @@
virtual ~DebugInstrumentationListener() {}
void MethodEntered(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
- uint32_t dex_pc ATTRIBUTE_UNUSED)
+ uint32_t dex_pc)
OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (method->IsNative()) {
// TODO: post location events is a suspension point and native method entry stubs aren't.
return;
}
- Dbg::UpdateDebugger(thread, this_object, method, 0, Dbg::kMethodEntry, nullptr);
+ if (IsListeningToDexPcMoved()) {
+ // We also listen to kDexPcMoved instrumentation event so we know the DexPcMoved method is
+ // going to be called right after us. To avoid sending JDWP events twice for this location,
+ // we report the event in DexPcMoved. However, we must remind this is method entry so we
+ // send the METHOD_ENTRY event. And we can also group it with other events for this location
+ // like BREAKPOINT or SINGLE_STEP (or even METHOD_EXIT if this is a RETURN instruction).
+ thread->SetDebugMethodEntry();
+ } else if (IsListeningToMethodExit() && IsReturn(method, dex_pc)) {
+ // We also listen to kMethodExited instrumentation event and the current instruction is a
+ // RETURN so we know the MethodExited method is going to be called right after us. To avoid
+ // sending JDWP events twice for this location, we report the event(s) in MethodExited.
+ // However, we must remind this is method entry so we send the METHOD_ENTRY event. And we can
+ // also group it with other events for this location like BREAKPOINT or SINGLE_STEP.
+ thread->SetDebugMethodEntry();
+ } else {
+ Dbg::UpdateDebugger(thread, this_object, method, 0, Dbg::kMethodEntry, nullptr);
+ }
}
void MethodExited(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
@@ -247,14 +267,20 @@
// TODO: post location events is a suspension point and native method entry stubs aren't.
return;
}
- Dbg::UpdateDebugger(thread, this_object, method, dex_pc, Dbg::kMethodExit, &return_value);
+ uint32_t events = Dbg::kMethodExit;
+ if (thread->IsDebugMethodEntry()) {
+ // It is also the method entry.
+ DCHECK(IsReturn(method, dex_pc));
+ events |= Dbg::kMethodEntry;
+ thread->ClearDebugMethodEntry();
+ }
+ Dbg::UpdateDebugger(thread, this_object, method, dex_pc, events, &return_value);
}
- void MethodUnwind(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
- uint32_t dex_pc)
+ void MethodUnwind(Thread* thread ATTRIBUTE_UNUSED, mirror::Object* this_object ATTRIBUTE_UNUSED,
+ mirror::ArtMethod* method, uint32_t dex_pc)
OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// We're not recorded to listen to this kind of event, so complain.
- UNUSED(thread, this_object, method, dex_pc);
LOG(ERROR) << "Unexpected method unwind event in debugger " << PrettyMethod(method)
<< " " << dex_pc;
}
@@ -262,13 +288,27 @@
void DexPcMoved(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
uint32_t new_dex_pc)
OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Dbg::UpdateDebugger(thread, this_object, method, new_dex_pc, 0, nullptr);
+ if (IsListeningToMethodExit() && IsReturn(method, new_dex_pc)) {
+ // We also listen to kMethodExited instrumentation event and the current instruction is a
+ // RETURN so we know the MethodExited method is going to be called right after us. Like in
+ // MethodEntered, we delegate event reporting to MethodExited.
+ // Besides, if this RETURN instruction is the only one in the method, we can send multiple
+ // JDWP events in the same packet: METHOD_ENTRY, METHOD_EXIT, BREAKPOINT and/or SINGLE_STEP.
+ // Therefore, we must not clear the debug method entry flag here.
+ } else {
+ uint32_t events = 0;
+ if (thread->IsDebugMethodEntry()) {
+ // It is also the method entry.
+ events = Dbg::kMethodEntry;
+ thread->ClearDebugMethodEntry();
+ }
+ Dbg::UpdateDebugger(thread, this_object, method, new_dex_pc, events, nullptr);
+ }
}
- void FieldRead(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
- uint32_t dex_pc, ArtField* field)
+ void FieldRead(Thread* thread ATTRIBUTE_UNUSED, mirror::Object* this_object,
+ mirror::ArtMethod* method, uint32_t dex_pc, ArtField* field)
OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- UNUSED(thread);
Dbg::PostFieldAccessEvent(method, dex_pc, this_object, field);
}
@@ -292,6 +332,26 @@
}
private:
+ static bool IsReturn(mirror::ArtMethod* method, uint32_t dex_pc)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ const DexFile::CodeItem* code_item = method->GetCodeItem();
+ const Instruction* instruction = Instruction::At(&code_item->insns_[dex_pc]);
+ return instruction->IsReturn();
+ }
+
+ static bool IsListeningToDexPcMoved() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return IsListeningTo(instrumentation::Instrumentation::kDexPcMoved);
+ }
+
+ static bool IsListeningToMethodExit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return IsListeningTo(instrumentation::Instrumentation::kMethodExited);
+ }
+
+ static bool IsListeningTo(instrumentation::Instrumentation::InstrumentationEvent event)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return (Dbg::GetInstrumentationEvents() & event) != 0;
+ }
+
DISALLOW_COPY_AND_ASSIGN(DebugInstrumentationListener);
} gDebugInstrumentationListener;
@@ -351,7 +411,7 @@
}
void SingleStepControl::VisitRoots(RootVisitor* visitor, const RootInfo& root_info) {
- visitor->VisitRootIfNonNull(reinterpret_cast<mirror::Object**>(&method_), root_info);
+ method_.VisitRootIfNonNull(visitor, root_info);
}
void SingleStepControl::AddDexPc(uint32_t dex_pc) {
@@ -676,7 +736,7 @@
instrumentation_events_ = 0;
}
if (RequiresDeoptimization()) {
- runtime->GetInstrumentation()->DisableDeoptimization();
+ runtime->GetInstrumentation()->DisableDeoptimization(kDbgInstrumentationKey);
}
gDebuggerActive = false;
}
@@ -714,7 +774,7 @@
mirror::Object* o = gRegistry->Get<mirror::Object*>(class_id, &error);
if (o == nullptr) {
if (error == JDWP::ERR_NONE) {
- return "NULL";
+ return "null";
} else {
return StringPrintf("invalid object %p", reinterpret_cast<void*>(class_id));
}
@@ -727,7 +787,7 @@
std::string Dbg::GetClassName(mirror::Class* klass) {
if (klass == nullptr) {
- return "NULL";
+ return "null";
}
std::string temp;
return DescriptorToName(klass->GetDescriptor(&temp));
@@ -830,8 +890,10 @@
std::vector<JDWP::ObjectId>* monitor_vector,
std::vector<uint32_t>* stack_depth_vector)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- : StackVisitor(thread, context), current_stack_depth(0),
- monitors(monitor_vector), stack_depths(stack_depth_vector) {}
+ : StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
+ current_stack_depth(0),
+ monitors(monitor_vector),
+ stack_depths(stack_depth_vector) {}
// TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
// annotalysis.
@@ -1409,7 +1471,7 @@
std::string Dbg::GetMethodName(JDWP::MethodId method_id) {
mirror::ArtMethod* m = FromMethodId(method_id);
if (m == nullptr) {
- return "NULL";
+ return "null";
}
return m->GetName();
}
@@ -1417,7 +1479,7 @@
std::string Dbg::GetFieldName(JDWP::FieldId field_id) {
ArtField* f = FromFieldId(field_id);
if (f == nullptr) {
- return "NULL";
+ return "null";
}
return f->GetName();
}
@@ -1721,7 +1783,7 @@
if (receiver_class == nullptr && o != nullptr) {
receiver_class = o->GetClass();
}
- // TODO: should we give up now if receiver_class is nullptr?
+ // TODO: should we give up now if receiver_class is null?
if (receiver_class != nullptr && !f->GetDeclaringClass()->IsAssignableFrom(receiver_class)) {
LOG(INFO) << "ERR_INVALID_FIELDID: " << PrettyField(f) << " " << PrettyClass(receiver_class);
return JDWP::ERR_INVALID_FIELDID;
@@ -2081,6 +2143,7 @@
case kWaitingForDebuggerToAttach:
case kWaitingForDeoptimization:
case kWaitingForGcToComplete:
+ case kWaitingForGetObjectsAllocated:
case kWaitingForJniOnLoad:
case kWaitingForMethodTracingStart:
case kWaitingForSignalCatcherOutput:
@@ -2176,7 +2239,7 @@
}
mirror::Object* peer = t->GetPeer();
if (peer == nullptr) {
- // peer might be NULL if the thread is still starting up. We can't tell the debugger about
+ // peer might be null if the thread is still starting up. We can't tell the debugger about
// this thread yet.
// TODO: if we identified threads to the debugger by their Thread*
// rather than their peer's mirror::Object*, we could fix this.
@@ -2192,7 +2255,8 @@
static int GetStackDepth(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
struct CountStackDepthVisitor : public StackVisitor {
explicit CountStackDepthVisitor(Thread* thread_in)
- : StackVisitor(thread_in, nullptr), depth(0) {}
+ : StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
+ depth(0) {}
// TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
// annotalysis.
@@ -2232,8 +2296,11 @@
GetFrameVisitor(Thread* thread, size_t start_frame_in, size_t frame_count_in,
JDWP::ExpandBuf* buf_in)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- : StackVisitor(thread, nullptr), depth_(0),
- start_frame_(start_frame_in), frame_count_(frame_count_in), buf_(buf_in) {
+ : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
+ depth_(0),
+ start_frame_(start_frame_in),
+ frame_count_(frame_count_in),
+ buf_(buf_in) {
expandBufAdd4BE(buf_, frame_count_);
}
@@ -2350,7 +2417,9 @@
struct GetThisVisitor : public StackVisitor {
GetThisVisitor(Thread* thread, Context* context, JDWP::FrameId frame_id_in)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- : StackVisitor(thread, context), this_object(nullptr), frame_id(frame_id_in) {}
+ : StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
+ this_object(nullptr),
+ frame_id(frame_id_in) {}
// TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
// annotalysis.
@@ -2390,7 +2459,9 @@
public:
FindFrameVisitor(Thread* thread, Context* context, JDWP::FrameId frame_id)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- : StackVisitor(thread, context), frame_id_(frame_id), error_(JDWP::ERR_INVALID_FRAMEID) {}
+ : StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
+ frame_id_(frame_id),
+ error_(JDWP::ERR_INVALID_FRAMEID) {}
// TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
// annotalysis.
@@ -2774,7 +2845,7 @@
public:
CatchLocationFinder(Thread* self, const Handle<mirror::Throwable>& exception, Context* context)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- : StackVisitor(self, context),
+ : StackVisitor(self, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
self_(self),
exception_(exception),
handle_scope_(self),
@@ -2858,10 +2929,11 @@
if (!IsDebuggerActive()) {
return;
}
- StackHandleScope<1> handle_scope(Thread::Current());
+ Thread* const self = Thread::Current();
+ StackHandleScope<1> handle_scope(self);
Handle<mirror::Throwable> h_exception(handle_scope.NewHandle(exception_object));
std::unique_ptr<Context> context(Context::Create());
- CatchLocationFinder clf(Thread::Current(), h_exception, context.get());
+ CatchLocationFinder clf(self, h_exception, context.get());
clf.WalkStack(/* include_transitions */ false);
JDWP::EventLocation exception_throw_location;
SetEventLocation(&exception_throw_location, clf.GetThrowMethod(), clf.GetThrowDexPc());
@@ -2997,12 +3069,12 @@
break;
case DeoptimizationRequest::kFullDeoptimization:
VLOG(jdwp) << "Deoptimize the world ...";
- instrumentation->DeoptimizeEverything();
+ instrumentation->DeoptimizeEverything(kDbgInstrumentationKey);
VLOG(jdwp) << "Deoptimize the world DONE";
break;
case DeoptimizationRequest::kFullUndeoptimization:
VLOG(jdwp) << "Undeoptimize the world ...";
- instrumentation->UndeoptimizeEverything();
+ instrumentation->UndeoptimizeEverything(kDbgInstrumentationKey);
VLOG(jdwp) << "Undeoptimize the world DONE";
break;
case DeoptimizationRequest::kSelectiveDeoptimization:
@@ -3390,7 +3462,7 @@
}
bool Dbg::IsForcedInstrumentationNeededForResolutionImpl(Thread* thread, mirror::ArtMethod* m) {
- // The upcall can be nullptr and in that case we don't need to do anything.
+ // The upcall can be null and in that case we don't need to do anything.
if (m == nullptr) {
return false;
}
@@ -3427,7 +3499,7 @@
}
bool Dbg::IsForcedInterpreterNeededForUpcallImpl(Thread* thread, mirror::ArtMethod* m) {
- // The upcall can be nullptr and in that case we don't need to do anything.
+ // The upcall can be null and in that case we don't need to do anything.
if (m == nullptr) {
return false;
}
@@ -3522,8 +3594,10 @@
// is for step-out.
struct SingleStepStackVisitor : public StackVisitor {
explicit SingleStepStackVisitor(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- : StackVisitor(thread, nullptr), stack_depth(0), method(nullptr), line_number(-1) {
- }
+ : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
+ stack_depth(0),
+ method(nullptr),
+ line_number(-1) {}
// TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
// annotalysis.
@@ -3900,7 +3974,8 @@
CHECK_EQ(sizeof(jvalue), sizeof(uint64_t));
- JValue result = InvokeWithJValues(soa, pReq->receiver.Read(), soa.EncodeMethod(m.Get()),
+ ScopedLocalRef<jobject> ref(soa.Env(), soa.AddLocalReference<jobject>(pReq->receiver.Read()));
+ JValue result = InvokeWithJValues(soa, ref.get(), soa.EncodeMethod(m.Get()),
reinterpret_cast<jvalue*>(pReq->arg_values));
pReq->result_tag = BasicTagFromDescriptor(m.Get()->GetShorty());
@@ -3908,7 +3983,7 @@
Handle<mirror::Object> object_result = hs.NewHandle(is_object_result ? result.GetL() : nullptr);
Handle<mirror::Throwable> exception = hs.NewHandle(soa.Self()->GetException());
soa.Self()->ClearException();
- pReq->exception = gRegistry->Add(exception.Get());
+ pReq->exception = gRegistry->Add(exception);
if (pReq->exception != 0) {
VLOG(jdwp) << " JDWP invocation returning with exception=" << exception.Get()
<< " " << exception->Dump();
@@ -4077,7 +4152,7 @@
StackHandleScope<1> hs(soa.Self());
Handle<mirror::String> name(hs.NewHandle(t->GetThreadName(soa)));
size_t char_count = (name.Get() != nullptr) ? name->GetLength() : 0;
- const jchar* chars = (name.Get() != nullptr) ? name->GetCharArray()->GetData() : nullptr;
+ const jchar* chars = (name.Get() != nullptr) ? name->GetValue() : nullptr;
std::vector<uint8_t> bytes;
JDWP::Append4BE(bytes, t->GetThreadId());
@@ -4636,7 +4711,9 @@
struct AllocRecordStackVisitor : public StackVisitor {
AllocRecordStackVisitor(Thread* thread, AllocRecord* record_in)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- : StackVisitor(thread, nullptr), record(record_in), depth(0) {}
+ : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
+ record(record_in),
+ depth(0) {}
// TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
// annotalysis.
@@ -4773,7 +4850,7 @@
for (const std::string& str : table_) {
const char* s = str.c_str();
size_t s_len = CountModifiedUtf8Chars(s);
- std::unique_ptr<uint16_t> s_utf16(new uint16_t[s_len]);
+ std::unique_ptr<uint16_t[]> s_utf16(new uint16_t[s_len]);
ConvertModifiedUtf8ToUtf16(s_utf16.get(), s);
JDWP::AppendUtf16BE(bytes, s_utf16.get(), s_len);
}
diff --git a/runtime/debugger.h b/runtime/debugger.h
index c287121..811d345 100644
--- a/runtime/debugger.h
+++ b/runtime/debugger.h
@@ -68,7 +68,7 @@
GcRoot<mirror::Class> klass;
GcRoot<mirror::ArtMethod> method;
const uint32_t arg_count;
- uint64_t* const arg_values; // will be NULL if arg_count_ == 0
+ uint64_t* const arg_values; // will be null if arg_count_ == 0
const uint32_t options;
/* result */
@@ -109,8 +109,8 @@
return stack_depth_;
}
- mirror::ArtMethod* GetMethod() const {
- return method_;
+ mirror::ArtMethod* GetMethod() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return method_.Read();
}
const std::set<uint32_t>& GetDexPcs() const {
@@ -138,7 +138,7 @@
// set of DEX pcs associated to the source line number where the suspension occurred.
// This is used to support SD_INTO and SD_OVER single-step depths so we detect when a single-step
// causes the execution of an instruction in a different method or at a different line number.
- mirror::ArtMethod* method_;
+ GcRoot<mirror::ArtMethod> method_;
std::set<uint32_t> dex_pcs_;
DISALLOW_COPY_AND_ASSIGN(SingleStepControl);
@@ -714,6 +714,10 @@
static JDWP::JdwpState* GetJdwpState();
+ static uint32_t GetInstrumentationEvents() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return instrumentation_events_;
+ }
+
private:
static JDWP::JdwpError GetLocalValue(const StackVisitor& visitor,
ScopedObjectAccessUnchecked& soa, int slot,
diff --git a/runtime/dex_file-inl.h b/runtime/dex_file-inl.h
index c68fdca..760006a 100644
--- a/runtime/dex_file-inl.h
+++ b/runtime/dex_file-inl.h
@@ -32,7 +32,7 @@
inline const char* DexFile::GetStringDataAndUtf16Length(const StringId& string_id,
uint32_t* utf16_length) const {
- DCHECK(utf16_length != NULL) << GetLocation();
+ DCHECK(utf16_length != nullptr) << GetLocation();
const uint8_t* ptr = begin_ + string_id.string_data_off_;
*utf16_length = DecodeUnsignedLeb128(&ptr);
return reinterpret_cast<const char*>(ptr);
diff --git a/runtime/dex_file.cc b/runtime/dex_file.cc
index 03a47a3..dfe5a04 100644
--- a/runtime/dex_file.cc
+++ b/runtime/dex_file.cc
@@ -57,7 +57,7 @@
const uint8_t DexFile::kDexMagicVersion[] = { '0', '3', '5', '\0' };
static int OpenAndReadMagic(const char* filename, uint32_t* magic, std::string* error_msg) {
- CHECK(magic != NULL);
+ CHECK(magic != nullptr);
ScopedFd fd(open(filename, O_RDONLY, 0));
if (fd.get() == -1) {
*error_msg = StringPrintf("Unable to open '%s' : %s", filename, strerror(errno));
@@ -77,7 +77,7 @@
}
bool DexFile::GetChecksum(const char* filename, uint32_t* checksum, std::string* error_msg) {
- CHECK(checksum != NULL);
+ CHECK(checksum != nullptr);
uint32_t magic;
// Strip ":...", which is the location
@@ -98,14 +98,15 @@
return false;
}
if (IsZipMagic(magic)) {
- std::unique_ptr<ZipArchive> zip_archive(ZipArchive::OpenFromFd(fd.release(), filename, error_msg));
- if (zip_archive.get() == NULL) {
+ std::unique_ptr<ZipArchive> zip_archive(
+ ZipArchive::OpenFromFd(fd.release(), filename, error_msg));
+ if (zip_archive.get() == nullptr) {
*error_msg = StringPrintf("Failed to open zip archive '%s' (error msg: %s)", file_part,
error_msg->c_str());
return false;
}
std::unique_ptr<ZipEntry> zip_entry(zip_archive->Find(zip_entry_name, error_msg));
- if (zip_entry.get() == NULL) {
+ if (zip_entry.get() == nullptr) {
*error_msg = StringPrintf("Zip archive '%s' doesn't contain %s (error msg: %s)", file_part,
zip_entry_name, error_msg->c_str());
return false;
@@ -114,8 +115,9 @@
return true;
}
if (IsDexMagic(magic)) {
- std::unique_ptr<const DexFile> dex_file(DexFile::OpenFile(fd.release(), filename, false, error_msg));
- if (dex_file.get() == NULL) {
+ std::unique_ptr<const DexFile> dex_file(
+ DexFile::OpenFile(fd.release(), filename, false, error_msg));
+ if (dex_file.get() == nullptr) {
return false;
}
*checksum = dex_file->GetHeader().checksum_;
@@ -127,7 +129,7 @@
bool DexFile::Open(const char* filename, const char* location, std::string* error_msg,
std::vector<std::unique_ptr<const DexFile>>* dex_files) {
- DCHECK(dex_files != nullptr) << "DexFile::Open: out-param is NULL";
+ DCHECK(dex_files != nullptr) << "DexFile::Open: out-param is nullptr";
uint32_t magic;
ScopedFd fd(OpenAndReadMagic(filename, &magic, error_msg));
if (fd.get() == -1) {
@@ -151,8 +153,33 @@
return false;
}
+static bool ContainsClassesDex(int fd, const char* filename) {
+ std::string error_msg;
+ std::unique_ptr<ZipArchive> zip_archive(ZipArchive::OpenFromFd(fd, filename, &error_msg));
+ if (zip_archive.get() == nullptr) {
+ return false;
+ }
+ std::unique_ptr<ZipEntry> zip_entry(zip_archive->Find(DexFile::kClassesDex, &error_msg));
+ return (zip_entry.get() != nullptr);
+}
+
+bool DexFile::MaybeDex(const char* filename) {
+ uint32_t magic;
+ std::string error_msg;
+ ScopedFd fd(OpenAndReadMagic(filename, &magic, &error_msg));
+ if (fd.get() == -1) {
+ return false;
+ }
+ if (IsZipMagic(magic)) {
+ return ContainsClassesDex(fd.release(), filename);
+ } else if (IsDexMagic(magic)) {
+ return true;
+ }
+ return false;
+}
+
int DexFile::GetPermissions() const {
- if (mem_map_.get() == NULL) {
+ if (mem_map_.get() == nullptr) {
return 0;
} else {
return mem_map_->GetProtect();
@@ -165,7 +192,7 @@
bool DexFile::EnableWrite() const {
CHECK(IsReadOnly());
- if (mem_map_.get() == NULL) {
+ if (mem_map_.get() == nullptr) {
return false;
} else {
return mem_map_->Protect(PROT_READ | PROT_WRITE);
@@ -174,7 +201,7 @@
bool DexFile::DisableWrite() const {
CHECK(!IsReadOnly());
- if (mem_map_.get() == NULL) {
+ if (mem_map_.get() == nullptr) {
return false;
} else {
return mem_map_->Protect(PROT_READ);
@@ -233,7 +260,7 @@
bool DexFile::OpenZip(int fd, const std::string& location, std::string* error_msg,
std::vector<std::unique_ptr<const DexFile>>* dex_files) {
- DCHECK(dex_files != nullptr) << "DexFile::OpenZip: out-param is NULL";
+ DCHECK(dex_files != nullptr) << "DexFile::OpenZip: out-param is nullptr";
std::unique_ptr<ZipArchive> zip_archive(ZipArchive::OpenFromFd(fd, location.c_str(), error_msg));
if (zip_archive.get() == nullptr) {
DCHECK(!error_msg->empty());
@@ -260,12 +287,12 @@
ZipOpenErrorCode* error_code) {
CHECK(!location.empty());
std::unique_ptr<ZipEntry> zip_entry(zip_archive.Find(entry_name, error_msg));
- if (zip_entry.get() == NULL) {
+ if (zip_entry.get() == nullptr) {
*error_code = ZipOpenErrorCode::kEntryNotFound;
return nullptr;
}
std::unique_ptr<MemMap> map(zip_entry->ExtractToMemMap(location.c_str(), entry_name, error_msg));
- if (map.get() == NULL) {
+ if (map.get() == nullptr) {
*error_msg = StringPrintf("Failed to extract '%s' from '%s': %s", entry_name, location.c_str(),
error_msg->c_str());
*error_code = ZipOpenErrorCode::kExtractToMemoryError;
@@ -294,10 +321,16 @@
return dex_file;
}
+// Technically we do not have a limitation with respect to the number of dex files that can be in a
+// multidex APK. However, it's bad practice, as each dex file requires its own tables for symbols
+// (types, classes, methods, ...) and dex caches. So warn the user that we open a zip with what
+// seems an excessive number.
+static constexpr size_t kWarnOnManyDexFilesThreshold = 100;
+
bool DexFile::OpenFromZip(const ZipArchive& zip_archive, const std::string& location,
std::string* error_msg,
std::vector<std::unique_ptr<const DexFile>>* dex_files) {
- DCHECK(dex_files != nullptr) << "DexFile::OpenFromZip: out-param is NULL";
+ DCHECK(dex_files != nullptr) << "DexFile::OpenFromZip: out-param is nullptr";
ZipOpenErrorCode error_code;
std::unique_ptr<const DexFile> dex_file(Open(zip_archive, kClassesDex, location, error_msg,
&error_code));
@@ -308,14 +341,13 @@
dex_files->push_back(std::move(dex_file));
// Now try some more.
- size_t i = 2;
// We could try to avoid std::string allocations by working on a char array directly. As we
// do not expect a lot of iterations, this seems too involved and brittle.
- while (i < 100) {
- std::string name = StringPrintf("classes%zu.dex", i);
- std::string fake_location = location + kMultiDexSeparator + name;
+ for (size_t i = 1; ; ++i) {
+ std::string name = GetMultiDexClassesDexName(i);
+ std::string fake_location = GetMultiDexLocation(i, location.c_str());
std::unique_ptr<const DexFile> next_dex_file(Open(zip_archive, name.c_str(), fake_location,
error_msg, &error_code));
if (next_dex_file.get() == nullptr) {
@@ -327,7 +359,16 @@
dex_files->push_back(std::move(next_dex_file));
}
- i++;
+ if (i == kWarnOnManyDexFilesThreshold) {
+ LOG(WARNING) << location << " has in excess of " << kWarnOnManyDexFilesThreshold
+ << " dex files. Please consider coalescing and shrinking the number to "
+ " avoid runtime overhead.";
+ }
+
+ if (i == std::numeric_limits<size_t>::max()) {
+ LOG(ERROR) << "Overflow in number of dex files!";
+ break;
+ }
}
return true;
@@ -371,7 +412,7 @@
find_class_def_misses_(0),
class_def_index_(nullptr),
oat_dex_file_(oat_dex_file) {
- CHECK(begin_ != NULL) << GetLocation();
+ CHECK(begin_ != nullptr) << GetLocation();
CHECK_GT(size_, 0U) << GetLocation();
}
@@ -487,7 +528,7 @@
return &class_def;
}
}
- return NULL;
+ return nullptr;
}
const DexFile::FieldId* DexFile::FindFieldId(const DexFile::TypeId& declaring_klass,
@@ -522,7 +563,7 @@
}
}
}
- return NULL;
+ return nullptr;
}
const DexFile::MethodId* DexFile::FindMethodId(const DexFile::TypeId& declaring_klass,
@@ -557,7 +598,7 @@
}
}
}
- return NULL;
+ return nullptr;
}
const DexFile::StringId* DexFile::FindStringId(const char* string) const {
@@ -576,7 +617,7 @@
return &str_id;
}
}
- return NULL;
+ return nullptr;
}
const DexFile::StringId* DexFile::FindStringId(const uint16_t* string, size_t length) const {
@@ -595,7 +636,7 @@
return &str_id;
}
}
- return NULL;
+ return nullptr;
}
const DexFile::TypeId* DexFile::FindTypeId(uint32_t string_idx) const {
@@ -612,7 +653,7 @@
return &type_id;
}
}
- return NULL;
+ return nullptr;
}
const DexFile::ProtoId* DexFile::FindProtoId(uint16_t return_type_idx,
@@ -648,7 +689,7 @@
return &proto;
}
}
- return NULL;
+ return nullptr;
}
// Given a signature place the type ids into the given vector
@@ -687,11 +728,11 @@
// TODO: avoid creating a std::string just to get a 0-terminated char array
std::string descriptor(signature.data() + start_offset, offset - start_offset);
const DexFile::StringId* string_id = FindStringId(descriptor.c_str());
- if (string_id == NULL) {
+ if (string_id == nullptr) {
return false;
}
const DexFile::TypeId* type_id = FindTypeId(GetIndexForStringId(*string_id));
- if (type_id == NULL) {
+ if (type_id == nullptr) {
return false;
}
uint16_t type_idx = GetIndexForTypeId(*type_id);
@@ -713,7 +754,7 @@
return Signature::NoSignature();
}
const ProtoId* proto_id = FindProtoId(return_type_idx, param_type_indices);
- if (proto_id == NULL) {
+ if (proto_id == nullptr) {
return Signature::NoSignature();
}
return Signature(this, *proto_id);
@@ -727,12 +768,12 @@
}
const CodeItem* code_item = GetCodeItem(method->GetCodeItemOffset());
- DCHECK(code_item != NULL) << PrettyMethod(method) << " " << GetLocation();
+ DCHECK(code_item != nullptr) << PrettyMethod(method) << " " << GetLocation();
// A method with no line number info should return -1
LineNumFromPcContext context(rel_pc, -1);
DecodeDebugInfo(code_item, method->IsStatic(), method->GetDexMethodIndex(), LineNumForPcCb,
- NULL, &context);
+ nullptr, &context);
return context.line_num_;
}
@@ -771,19 +812,20 @@
void DexFile::DecodeDebugInfo0(const CodeItem* code_item, bool is_static, uint32_t method_idx,
DexDebugNewPositionCb position_cb, DexDebugNewLocalCb local_cb,
- void* context, const uint8_t* stream, LocalInfo* local_in_reg) const {
+ void* context, const uint8_t* stream, LocalInfo* local_in_reg)
+ const {
uint32_t line = DecodeUnsignedLeb128(&stream);
uint32_t parameters_size = DecodeUnsignedLeb128(&stream);
uint16_t arg_reg = code_item->registers_size_ - code_item->ins_size_;
uint32_t address = 0;
- bool need_locals = (local_cb != NULL);
+ bool need_locals = (local_cb != nullptr);
if (!is_static) {
if (need_locals) {
const char* descriptor = GetMethodDeclaringClassDescriptor(GetMethodId(method_idx));
local_in_reg[arg_reg].name_ = "this";
local_in_reg[arg_reg].descriptor_ = descriptor;
- local_in_reg[arg_reg].signature_ = NULL;
+ local_in_reg[arg_reg].signature_ = nullptr;
local_in_reg[arg_reg].start_address_ = 0;
local_in_reg[arg_reg].is_live_ = true;
}
@@ -803,7 +845,7 @@
const char* name = StringDataByIdx(id);
local_in_reg[arg_reg].name_ = name;
local_in_reg[arg_reg].descriptor_ = descriptor;
- local_in_reg[arg_reg].signature_ = NULL;
+ local_in_reg[arg_reg].signature_ = nullptr;
local_in_reg[arg_reg].start_address_ = address;
local_in_reg[arg_reg].is_live_ = true;
}
@@ -895,7 +937,7 @@
}
if (need_locals) {
- if (local_in_reg[reg].name_ == NULL || local_in_reg[reg].descriptor_ == NULL) {
+ if (local_in_reg[reg].name_ == nullptr || local_in_reg[reg].descriptor_ == nullptr) {
LOG(ERROR) << "invalid stream - no name or descriptor in " << GetLocation();
return;
}
@@ -920,7 +962,7 @@
address += adjopcode / DBG_LINE_RANGE;
line += DBG_LINE_BASE + (adjopcode % DBG_LINE_RANGE);
- if (position_cb != NULL) {
+ if (position_cb != nullptr) {
if (position_cb(context, address, line)) {
// early exit
return;
@@ -937,14 +979,16 @@
void* context) const {
DCHECK(code_item != nullptr);
const uint8_t* stream = GetDebugInfoStream(code_item);
- std::unique_ptr<LocalInfo[]> local_in_reg(local_cb != NULL ?
+ std::unique_ptr<LocalInfo[]> local_in_reg(local_cb != nullptr ?
new LocalInfo[code_item->registers_size_] :
- NULL);
- if (stream != NULL) {
- DecodeDebugInfo0(code_item, is_static, method_idx, position_cb, local_cb, context, stream, &local_in_reg[0]);
+ nullptr);
+ if (stream != nullptr) {
+ DecodeDebugInfo0(code_item, is_static, method_idx, position_cb, local_cb, context, stream,
+ &local_in_reg[0]);
}
for (int reg = 0; reg < code_item->registers_size_; reg++) {
- InvokeLocalCbIfLive(context, reg, code_item->insns_size_in_code_units_, &local_in_reg[0], local_cb);
+ InvokeLocalCbIfLive(context, reg, code_item->insns_size_in_code_units_, &local_in_reg[0],
+ local_cb);
}
}
@@ -968,11 +1012,19 @@
return strrchr(location, kMultiDexSeparator) != nullptr;
}
-std::string DexFile::GetMultiDexClassesDexName(size_t number, const char* dex_location) {
- if (number == 0) {
+std::string DexFile::GetMultiDexClassesDexName(size_t index) {
+ if (index == 0) {
+ return "classes.dex";
+ } else {
+ return StringPrintf("classes%zu.dex", index + 1);
+ }
+}
+
+std::string DexFile::GetMultiDexLocation(size_t index, const char* dex_location) {
+ if (index == 0) {
return dex_location;
} else {
- return StringPrintf("%s" kMultiDexSeparatorString "classes%zu.dex", dex_location, number + 1);
+ return StringPrintf("%s" kMultiDexSeparatorString "classes%zu.dex", dex_location, index + 1);
}
}
@@ -1051,7 +1103,7 @@
// Decodes the header section from the class data bytes.
void ClassDataItemIterator::ReadClassDataHeader() {
- CHECK(ptr_pos_ != NULL);
+ CHECK(ptr_pos_ != nullptr);
header_.static_fields_size_ = DecodeUnsignedLeb128(&ptr_pos_);
header_.instance_fields_size_ = DecodeUnsignedLeb128(&ptr_pos_);
header_.direct_methods_size_ = DecodeUnsignedLeb128(&ptr_pos_);
@@ -1129,17 +1181,16 @@
return val;
}
-EncodedStaticFieldValueIterator::EncodedStaticFieldValueIterator(const DexFile& dex_file,
- Handle<mirror::DexCache>* dex_cache,
- Handle<mirror::ClassLoader>* class_loader,
- ClassLinker* linker,
- const DexFile::ClassDef& class_def)
+EncodedStaticFieldValueIterator::EncodedStaticFieldValueIterator(
+ const DexFile& dex_file, Handle<mirror::DexCache>* dex_cache,
+ Handle<mirror::ClassLoader>* class_loader, ClassLinker* linker,
+ const DexFile::ClassDef& class_def)
: dex_file_(dex_file), dex_cache_(dex_cache), class_loader_(class_loader), linker_(linker),
array_size_(), pos_(-1), type_(kByte) {
DCHECK(dex_cache != nullptr);
DCHECK(class_loader != nullptr);
ptr_ = dex_file.GetEncodedStaticFieldValuesArray(class_def);
- if (ptr_ == NULL) {
+ if (ptr_ == nullptr) {
array_size_ = 0;
} else {
array_size_ = DecodeUnsignedLeb128(&ptr_);
@@ -1199,7 +1250,7 @@
UNIMPLEMENTED(FATAL) << ": type " << type_;
UNREACHABLE();
case kNull:
- jval_.l = NULL;
+ jval_.l = nullptr;
width = 0;
break;
default:
@@ -1212,7 +1263,8 @@
template<bool kTransactionActive>
void EncodedStaticFieldValueIterator::ReadValueToField(ArtField* field) const {
switch (type_) {
- case kBoolean: field->SetBoolean<kTransactionActive>(field->GetDeclaringClass(), jval_.z); break;
+ case kBoolean: field->SetBoolean<kTransactionActive>(field->GetDeclaringClass(), jval_.z);
+ break;
case kByte: field->SetByte<kTransactionActive>(field->GetDeclaringClass(), jval_.b); break;
case kShort: field->SetShort<kTransactionActive>(field->GetDeclaringClass(), jval_.s); break;
case kChar: field->SetChar<kTransactionActive>(field->GetDeclaringClass(), jval_.c); break;
@@ -1220,7 +1272,7 @@
case kLong: field->SetLong<kTransactionActive>(field->GetDeclaringClass(), jval_.j); break;
case kFloat: field->SetFloat<kTransactionActive>(field->GetDeclaringClass(), jval_.f); break;
case kDouble: field->SetDouble<kTransactionActive>(field->GetDeclaringClass(), jval_.d); break;
- case kNull: field->SetObject<kTransactionActive>(field->GetDeclaringClass(), NULL); break;
+ case kNull: field->SetObject<kTransactionActive>(field->GetDeclaringClass(), nullptr); break;
case kString: {
mirror::String* resolved = linker_->ResolveString(dex_file_, jval_.i, *dex_cache_);
field->SetObject<kTransactionActive>(field->GetDeclaringClass(), resolved);
@@ -1275,7 +1327,7 @@
Init(DexFile::GetCatchHandlerData(code_item, offset));
} else {
// Not found, initialize as empty
- current_data_ = NULL;
+ current_data_ = nullptr;
remaining_count_ = -1;
catch_all_ = false;
DCHECK(!HasNext());
diff --git a/runtime/dex_file.h b/runtime/dex_file.h
index 5bdd9b6..84eaa4a 100644
--- a/runtime/dex_file.h
+++ b/runtime/dex_file.h
@@ -388,13 +388,17 @@
static bool Open(const char* filename, const char* location, std::string* error_msg,
std::vector<std::unique_ptr<const DexFile>>* dex_files);
+ // Checks whether the given file has the dex magic, or is a zip file with a classes.dex entry.
+ // If this function returns false, Open will not succeed. The inverse is not true, however.
+ static bool MaybeDex(const char* filename);
+
// Opens .dex file, backed by existing memory
static std::unique_ptr<const DexFile> Open(const uint8_t* base, size_t size,
const std::string& location,
uint32_t location_checksum,
const OatDexFile* oat_dex_file,
std::string* error_msg) {
- return OpenMemory(base, size, location, location_checksum, NULL, oat_dex_file, error_msg);
+ return OpenMemory(base, size, location, location_checksum, nullptr, oat_dex_file, error_msg);
}
// Open all classesXXX.dex files from a zip archive.
@@ -448,7 +452,7 @@
}
const Header& GetHeader() const {
- DCHECK(header_ != NULL) << GetLocation();
+ DCHECK(header_ != nullptr) << GetLocation();
return *header_;
}
@@ -463,7 +467,7 @@
// Returns the number of string identifiers in the .dex file.
size_t NumStringIds() const {
- DCHECK(header_ != NULL) << GetLocation();
+ DCHECK(header_ != nullptr) << GetLocation();
return header_->string_ids_size_;
}
@@ -495,7 +499,7 @@
const char* StringDataAndUtf16LengthByIdx(uint32_t idx, uint32_t* utf16_length) const {
if (idx == kDexNoIndex) {
*utf16_length = 0;
- return NULL;
+ return nullptr;
}
const StringId& string_id = GetStringId(idx);
return GetStringDataAndUtf16Length(string_id, utf16_length);
@@ -514,7 +518,7 @@
// Returns the number of type identifiers in the .dex file.
uint32_t NumTypeIds() const {
- DCHECK(header_ != NULL) << GetLocation();
+ DCHECK(header_ != nullptr) << GetLocation();
return header_->type_ids_size_;
}
@@ -553,7 +557,7 @@
// Returns the number of field identifiers in the .dex file.
size_t NumFieldIds() const {
- DCHECK(header_ != NULL) << GetLocation();
+ DCHECK(header_ != nullptr) << GetLocation();
return header_->field_ids_size_;
}
@@ -593,7 +597,7 @@
// Returns the number of method identifiers in the .dex file.
size_t NumMethodIds() const {
- DCHECK(header_ != NULL) << GetLocation();
+ DCHECK(header_ != nullptr) << GetLocation();
return header_->method_ids_size_;
}
@@ -643,7 +647,7 @@
}
// Returns the number of class definitions in the .dex file.
uint32_t NumClassDefs() const {
- DCHECK(header_ != NULL) << GetLocation();
+ DCHECK(header_ != nullptr) << GetLocation();
return header_->class_defs_size_;
}
@@ -673,7 +677,7 @@
const TypeList* GetInterfacesList(const ClassDef& class_def) const {
if (class_def.interfaces_off_ == 0) {
- return NULL;
+ return nullptr;
} else {
const uint8_t* addr = begin_ + class_def.interfaces_off_;
return reinterpret_cast<const TypeList*>(addr);
@@ -683,7 +687,7 @@
// Returns a pointer to the raw memory mapped class_data_item
const uint8_t* GetClassData(const ClassDef& class_def) const {
if (class_def.class_data_off_ == 0) {
- return NULL;
+ return nullptr;
} else {
return begin_ + class_def.class_data_off_;
}
@@ -692,7 +696,7 @@
//
const CodeItem* GetCodeItem(const uint32_t code_off) const {
if (code_off == 0) {
- return NULL; // native or abstract method
+ return nullptr; // native or abstract method
} else {
const uint8_t* addr = begin_ + code_off;
return reinterpret_cast<const CodeItem*>(addr);
@@ -705,7 +709,7 @@
// Returns the number of prototype identifiers in the .dex file.
size_t NumProtoIds() const {
- DCHECK(header_ != NULL) << GetLocation();
+ DCHECK(header_ != nullptr) << GetLocation();
return header_->proto_ids_size_;
}
@@ -745,7 +749,7 @@
const TypeList* GetProtoParameters(const ProtoId& proto_id) const {
if (proto_id.parameters_off_ == 0) {
- return NULL;
+ return nullptr;
} else {
const uint8_t* addr = begin_ + proto_id.parameters_off_;
return reinterpret_cast<const TypeList*>(addr);
@@ -778,7 +782,7 @@
// Get the pointer to the start of the debugging data
const uint8_t* GetDebugInfoStream(const CodeItem* code_item) const {
if (code_item->debug_info_off_ == 0) {
- return NULL;
+ return nullptr;
} else {
return begin_ + code_item->debug_info_off_;
}
@@ -818,7 +822,8 @@
struct LocalInfo {
LocalInfo()
- : name_(NULL), descriptor_(NULL), signature_(NULL), start_address_(0), is_live_(false) {}
+ : name_(nullptr), descriptor_(nullptr), signature_(nullptr), start_address_(0),
+ is_live_(false) {}
const char* name_; // E.g., list
const char* descriptor_; // E.g., Ljava/util/LinkedList;
@@ -841,10 +846,10 @@
void InvokeLocalCbIfLive(void* context, int reg, uint32_t end_address,
LocalInfo* local_in_reg, DexDebugNewLocalCb local_cb) const {
- if (local_cb != NULL && local_in_reg[reg].is_live_) {
+ if (local_cb != nullptr && local_in_reg[reg].is_live_) {
local_cb(context, reg, local_in_reg[reg].start_address_, end_address,
local_in_reg[reg].name_, local_in_reg[reg].descriptor_,
- local_in_reg[reg].signature_ != NULL ? local_in_reg[reg].signature_ : "");
+ local_in_reg[reg].signature_ != nullptr ? local_in_reg[reg].signature_ : "");
}
}
@@ -865,7 +870,7 @@
const char* GetSourceFile(const ClassDef& class_def) const {
if (class_def.source_file_idx_ == 0xffffffff) {
- return NULL;
+ return nullptr;
} else {
return StringDataByIdx(class_def.source_file_idx_);
}
@@ -887,7 +892,13 @@
return size_;
}
- static std::string GetMultiDexClassesDexName(size_t number, const char* dex_location);
+ // Return the name of the index-th classes.dex in a multidex zip file. This is classes.dex for
+ // index == 0, and classes{index + 1}.dex else.
+ static std::string GetMultiDexClassesDexName(size_t index);
+
+ // Return the (possibly synthetic) dex location for a multidex entry. This is dex_location for
+ // index == 0, and dex_location + multi-dex-separator + GetMultiDexClassesDexName(index) else.
+ static std::string GetMultiDexLocation(size_t index, const char* dex_location);
// Returns the canonical form of the given dex location.
//
@@ -926,7 +937,7 @@
kVerifyError
};
- // Opens .dex file from the entry_name in a zip archive. error_code is undefined when non-nullptr
+ // Opens .dex file from the entry_name in a zip archive. error_code is undefined when non-null
// return.
static std::unique_ptr<const DexFile> Open(const ZipArchive& zip_archive, const char* entry_name,
const std::string& location, std::string* error_msg,
@@ -1055,7 +1066,7 @@
DexFileParameterIterator(const DexFile& dex_file, const DexFile::ProtoId& proto_id)
: dex_file_(dex_file), size_(0), pos_(0) {
type_list_ = dex_file_.GetProtoParameters(proto_id);
- if (type_list_ != NULL) {
+ if (type_list_ != nullptr) {
size_ = type_list_->Size();
}
}
diff --git a/runtime/dex_file_test.cc b/runtime/dex_file_test.cc
index 09ef3ee..90b35a3 100644
--- a/runtime/dex_file_test.cc
+++ b/runtime/dex_file_test.cc
@@ -33,7 +33,7 @@
TEST_F(DexFileTest, Open) {
ScopedObjectAccess soa(Thread::Current());
std::unique_ptr<const DexFile> dex(OpenTestDexFile("Nested"));
- ASSERT_TRUE(dex.get() != NULL);
+ ASSERT_TRUE(dex.get() != nullptr);
}
static const uint8_t kBase64Map[256] = {
@@ -136,14 +136,14 @@
static std::unique_ptr<const DexFile> OpenDexFileBase64(const char* base64,
const char* location) {
// decode base64
- CHECK(base64 != NULL);
+ CHECK(base64 != nullptr);
size_t length;
std::unique_ptr<uint8_t[]> dex_bytes(DecodeBase64(base64, &length));
- CHECK(dex_bytes.get() != NULL);
+ CHECK(dex_bytes.get() != nullptr);
// write to provided file
std::unique_ptr<File> file(OS::CreateEmptyFile(location));
- CHECK(file.get() != NULL);
+ CHECK(file.get() != nullptr);
if (!file->WriteFully(dex_bytes.get(), length)) {
PLOG(FATAL) << "Failed to write base64 as dex file";
}
@@ -168,7 +168,7 @@
TEST_F(DexFileTest, Header) {
ScratchFile tmp;
std::unique_ptr<const DexFile> raw(OpenDexFileBase64(kRawDex, tmp.GetFilename().c_str()));
- ASSERT_TRUE(raw.get() != NULL);
+ ASSERT_TRUE(raw.get() != nullptr);
const DexFile::Header& header = raw->GetHeader();
// TODO: header.magic_
@@ -234,7 +234,7 @@
ASSERT_STREQ("LGetMethodSignature;", raw->GetClassDescriptor(class_def));
const uint8_t* class_data = raw->GetClassData(class_def);
- ASSERT_TRUE(class_data != NULL);
+ ASSERT_TRUE(class_data != nullptr);
ClassDataItemIterator it(*raw, class_data);
EXPECT_EQ(1u, it.NumDirectMethods());
@@ -281,8 +281,8 @@
EXPECT_EQ(1U, raw->NumClassDefs());
const char* strings[] = { "LGetMethodSignature;", "Ljava/lang/Float;", "Ljava/lang/Object;",
- "D", "I", "J", NULL };
- for (size_t i = 0; strings[i] != NULL; i++) {
+ "D", "I", "J", nullptr };
+ for (size_t i = 0; strings[i] != nullptr; i++) {
const char* str = strings[i];
const DexFile::StringId* str_id = raw->FindStringId(str);
const char* dex_str = raw->GetStringData(*str_id);
@@ -294,10 +294,10 @@
for (size_t i = 0; i < java_lang_dex_file_->NumTypeIds(); i++) {
const char* type_str = java_lang_dex_file_->StringByTypeIdx(i);
const DexFile::StringId* type_str_id = java_lang_dex_file_->FindStringId(type_str);
- ASSERT_TRUE(type_str_id != NULL);
+ ASSERT_TRUE(type_str_id != nullptr);
uint32_t type_str_idx = java_lang_dex_file_->GetIndexForStringId(*type_str_id);
const DexFile::TypeId* type_id = java_lang_dex_file_->FindTypeId(type_str_idx);
- ASSERT_TRUE(type_id != NULL);
+ ASSERT_TRUE(type_id != nullptr);
EXPECT_EQ(java_lang_dex_file_->GetIndexForTypeId(*type_id), i);
}
}
@@ -307,14 +307,14 @@
const DexFile::ProtoId& to_find = java_lang_dex_file_->GetProtoId(i);
const DexFile::TypeList* to_find_tl = java_lang_dex_file_->GetProtoParameters(to_find);
std::vector<uint16_t> to_find_types;
- if (to_find_tl != NULL) {
+ if (to_find_tl != nullptr) {
for (size_t j = 0; j < to_find_tl->Size(); j++) {
to_find_types.push_back(to_find_tl->GetTypeItem(j).type_idx_);
}
}
const DexFile::ProtoId* found =
java_lang_dex_file_->FindProtoId(to_find.return_type_idx_, to_find_types);
- ASSERT_TRUE(found != NULL);
+ ASSERT_TRUE(found != nullptr);
EXPECT_EQ(java_lang_dex_file_->GetIndexForProtoId(*found), i);
}
}
@@ -326,7 +326,7 @@
const DexFile::StringId& name = java_lang_dex_file_->GetStringId(to_find.name_idx_);
const DexFile::ProtoId& signature = java_lang_dex_file_->GetProtoId(to_find.proto_idx_);
const DexFile::MethodId* found = java_lang_dex_file_->FindMethodId(klass, name, signature);
- ASSERT_TRUE(found != NULL) << "Didn't find method " << i << ": "
+ ASSERT_TRUE(found != nullptr) << "Didn't find method " << i << ": "
<< java_lang_dex_file_->StringByTypeIdx(to_find.class_idx_) << "."
<< java_lang_dex_file_->GetStringData(name)
<< java_lang_dex_file_->GetMethodSignature(to_find);
@@ -341,7 +341,7 @@
const DexFile::StringId& name = java_lang_dex_file_->GetStringId(to_find.name_idx_);
const DexFile::TypeId& type = java_lang_dex_file_->GetTypeId(to_find.type_idx_);
const DexFile::FieldId* found = java_lang_dex_file_->FindFieldId(klass, name, type);
- ASSERT_TRUE(found != NULL) << "Didn't find field " << i << ": "
+ ASSERT_TRUE(found != nullptr) << "Didn't find field " << i << ": "
<< java_lang_dex_file_->StringByTypeIdx(to_find.type_idx_) << " "
<< java_lang_dex_file_->StringByTypeIdx(to_find.class_idx_) << "."
<< java_lang_dex_file_->GetStringData(name);
@@ -350,11 +350,20 @@
}
TEST_F(DexFileTest, GetMultiDexClassesDexName) {
+ ASSERT_EQ("classes.dex", DexFile::GetMultiDexClassesDexName(0));
+ ASSERT_EQ("classes2.dex", DexFile::GetMultiDexClassesDexName(1));
+ ASSERT_EQ("classes3.dex", DexFile::GetMultiDexClassesDexName(2));
+ ASSERT_EQ("classes100.dex", DexFile::GetMultiDexClassesDexName(99));
+}
+
+TEST_F(DexFileTest, GetMultiDexLocation) {
std::string dex_location_str = "/system/app/framework.jar";
const char* dex_location = dex_location_str.c_str();
- ASSERT_EQ("/system/app/framework.jar", DexFile::GetMultiDexClassesDexName(0, dex_location));
- ASSERT_EQ("/system/app/framework.jar:classes2.dex", DexFile::GetMultiDexClassesDexName(1, dex_location));
- ASSERT_EQ("/system/app/framework.jar:classes101.dex", DexFile::GetMultiDexClassesDexName(100, dex_location));
+ ASSERT_EQ("/system/app/framework.jar", DexFile::GetMultiDexLocation(0, dex_location));
+ ASSERT_EQ("/system/app/framework.jar:classes2.dex",
+ DexFile::GetMultiDexLocation(1, dex_location));
+ ASSERT_EQ("/system/app/framework.jar:classes101.dex",
+ DexFile::GetMultiDexLocation(100, dex_location));
}
TEST_F(DexFileTest, GetDexCanonicalLocation) {
@@ -363,7 +372,7 @@
std::string dex_location(dex_location_real.get());
ASSERT_EQ(dex_location, DexFile::GetDexCanonicalLocation(dex_location.c_str()));
- std::string multidex_location = DexFile::GetMultiDexClassesDexName(1, dex_location.c_str());
+ std::string multidex_location = DexFile::GetMultiDexLocation(1, dex_location.c_str());
ASSERT_EQ(multidex_location, DexFile::GetDexCanonicalLocation(multidex_location.c_str()));
std::string dex_location_sym = dex_location + "symlink";
@@ -371,7 +380,7 @@
ASSERT_EQ(dex_location, DexFile::GetDexCanonicalLocation(dex_location_sym.c_str()));
- std::string multidex_location_sym = DexFile::GetMultiDexClassesDexName(1, dex_location_sym.c_str());
+ std::string multidex_location_sym = DexFile::GetMultiDexLocation(1, dex_location_sym.c_str());
ASSERT_EQ(multidex_location, DexFile::GetDexCanonicalLocation(multidex_location_sym.c_str()));
ASSERT_EQ(0, unlink(dex_location_sym.c_str()));
diff --git a/runtime/dex_file_verifier.cc b/runtime/dex_file_verifier.cc
index a3f3de8..a66c38e 100644
--- a/runtime/dex_file_verifier.cc
+++ b/runtime/dex_file_verifier.cc
@@ -944,7 +944,7 @@
uint32_t type_idx = DecodeUnsignedLeb128(&ptr_);
if (type_idx != 0) {
type_idx--;
- if (!CheckIndex(type_idx, header_->string_ids_size_, "DBG_START_LOCAL type_idx")) {
+ if (!CheckIndex(type_idx, header_->type_ids_size_, "DBG_START_LOCAL type_idx")) {
return false;
}
}
@@ -975,7 +975,7 @@
uint32_t type_idx = DecodeUnsignedLeb128(&ptr_);
if (type_idx != 0) {
type_idx--;
- if (!CheckIndex(type_idx, header_->string_ids_size_, "DBG_START_LOCAL_EXTENDED type_idx")) {
+ if (!CheckIndex(type_idx, header_->type_ids_size_, "DBG_START_LOCAL_EXTENDED type_idx")) {
return false;
}
}
@@ -1473,7 +1473,7 @@
}
// Check ordering between items.
- if (previous_item_ != NULL) {
+ if (previous_item_ != nullptr) {
const DexFile::StringId* prev_item = reinterpret_cast<const DexFile::StringId*>(previous_item_);
const char* prev_str = dex_file_->GetStringData(*prev_item);
const char* str = dex_file_->GetStringData(*item);
@@ -1499,7 +1499,7 @@
}
// Check ordering between items.
- if (previous_item_ != NULL) {
+ if (previous_item_ != nullptr) {
const DexFile::TypeId* prev_item = reinterpret_cast<const DexFile::TypeId*>(previous_item_);
if (UNLIKELY(prev_item->descriptor_idx_ >= item->descriptor_idx_)) {
ErrorStringPrintf("Out-of-order type_ids: %x then %x",
@@ -1548,7 +1548,7 @@
}
// Check ordering between items. This relies on type_ids being in order.
- if (previous_item_ != NULL) {
+ if (previous_item_ != nullptr) {
const DexFile::ProtoId* prev = reinterpret_cast<const DexFile::ProtoId*>(previous_item_);
if (UNLIKELY(prev->return_type_idx_ > item->return_type_idx_)) {
ErrorStringPrintf("Out-of-order proto_id return types");
@@ -1610,7 +1610,7 @@
}
// Check ordering between items. This relies on the other sections being in order.
- if (previous_item_ != NULL) {
+ if (previous_item_ != nullptr) {
const DexFile::FieldId* prev_item = reinterpret_cast<const DexFile::FieldId*>(previous_item_);
if (UNLIKELY(prev_item->class_idx_ > item->class_idx_)) {
ErrorStringPrintf("Out-of-order field_ids");
@@ -1657,7 +1657,7 @@
}
// Check ordering between items. This relies on the other sections being in order.
- if (previous_item_ != NULL) {
+ if (previous_item_ != nullptr) {
const DexFile::MethodId* prev_item = reinterpret_cast<const DexFile::MethodId*>(previous_item_);
if (UNLIKELY(prev_item->class_idx_ > item->class_idx_)) {
ErrorStringPrintf("Out-of-order method_ids");
@@ -1728,7 +1728,7 @@
}
const DexFile::TypeList* interfaces = dex_file_->GetInterfacesList(*item);
- if (interfaces != NULL) {
+ if (interfaces != nullptr) {
uint32_t size = interfaces->Size();
// Ensure that all interfaces refer to classes (not arrays or primitives).
@@ -1952,7 +1952,7 @@
}
// Iterate through the items in the section.
- previous_item_ = NULL;
+ previous_item_ = nullptr;
for (uint32_t i = 0; i < count; i++) {
uint32_t new_offset = (offset + alignment_mask) & ~alignment_mask;
ptr_ = begin_ + new_offset;
diff --git a/runtime/dex_file_verifier.h b/runtime/dex_file_verifier.h
index 18bf2e7..877dfc2 100644
--- a/runtime/dex_file_verifier.h
+++ b/runtime/dex_file_verifier.h
@@ -36,7 +36,7 @@
private:
DexFileVerifier(const DexFile* dex_file, const uint8_t* begin, size_t size, const char* location)
: dex_file_(dex_file), begin_(begin), size_(size), location_(location),
- header_(&dex_file->GetHeader()), ptr_(NULL), previous_item_(NULL) {
+ header_(&dex_file->GetHeader()), ptr_(nullptr), previous_item_(nullptr) {
}
bool Verify();
@@ -99,12 +99,12 @@
bool CheckInterSection();
// Load a string by (type) index. Checks whether the index is in bounds, printing the error if
- // not. If there is an error, nullptr is returned.
+ // not. If there is an error, null is returned.
const char* CheckLoadStringByIdx(uint32_t idx, const char* error_fmt);
const char* CheckLoadStringByTypeIdx(uint32_t type_idx, const char* error_fmt);
// Load a field/method Id by index. Checks whether the index is in bounds, printing the error if
- // not. If there is an error, nullptr is returned.
+ // not. If there is an error, null is returned.
const DexFile::FieldId* CheckLoadFieldId(uint32_t idx, const char* error_fmt);
const DexFile::MethodId* CheckLoadMethodId(uint32_t idx, const char* error_fmt);
diff --git a/runtime/dex_file_verifier_test.cc b/runtime/dex_file_verifier_test.cc
index 00ca8a9..9f1ffec 100644
--- a/runtime/dex_file_verifier_test.cc
+++ b/runtime/dex_file_verifier_test.cc
@@ -105,14 +105,14 @@
const char* location,
std::string* error_msg) {
// decode base64
- CHECK(base64 != NULL);
+ CHECK(base64 != nullptr);
size_t length;
std::unique_ptr<uint8_t[]> dex_bytes(DecodeBase64(base64, &length));
- CHECK(dex_bytes.get() != NULL);
+ CHECK(dex_bytes.get() != nullptr);
// write to provided file
std::unique_ptr<File> file(OS::CreateEmptyFile(location));
- CHECK(file.get() != NULL);
+ CHECK(file.get() != nullptr);
if (!file->WriteFully(dex_bytes.get(), length)) {
PLOG(FATAL) << "Failed to write base64 as dex file";
}
@@ -178,7 +178,7 @@
// write to provided file
std::unique_ptr<File> file(OS::CreateEmptyFile(location));
- CHECK(file.get() != NULL);
+ CHECK(file.get() != nullptr);
if (!file->WriteFully(bytes, length)) {
PLOG(FATAL) << "Failed to write base64 as dex file";
}
@@ -200,12 +200,12 @@
return dex_file;
}
-static bool ModifyAndLoad(const char* location, size_t offset, uint8_t new_val,
- std::string* error_msg) {
+static bool ModifyAndLoad(const char* dex_file_content, const char* location, size_t offset,
+ uint8_t new_val, std::string* error_msg) {
// Decode base64.
size_t length;
- std::unique_ptr<uint8_t[]> dex_bytes(DecodeBase64(kGoodTestDex, &length));
- CHECK(dex_bytes.get() != NULL);
+ std::unique_ptr<uint8_t[]> dex_bytes(DecodeBase64(dex_file_content, &length));
+ CHECK(dex_bytes.get() != nullptr);
// Make modifications.
dex_bytes.get()[offset] = new_val;
@@ -221,7 +221,7 @@
// Class error.
ScratchFile tmp;
std::string error_msg;
- bool success = !ModifyAndLoad(tmp.GetFilename().c_str(), 220, 0xFFU, &error_msg);
+ bool success = !ModifyAndLoad(kGoodTestDex, tmp.GetFilename().c_str(), 220, 0xFFU, &error_msg);
ASSERT_TRUE(success);
ASSERT_NE(error_msg.find("inter_method_id_item class_idx"), std::string::npos) << error_msg;
}
@@ -230,7 +230,7 @@
// Proto error.
ScratchFile tmp;
std::string error_msg;
- bool success = !ModifyAndLoad(tmp.GetFilename().c_str(), 222, 0xFFU, &error_msg);
+ bool success = !ModifyAndLoad(kGoodTestDex, tmp.GetFilename().c_str(), 222, 0xFFU, &error_msg);
ASSERT_TRUE(success);
ASSERT_NE(error_msg.find("inter_method_id_item proto_idx"), std::string::npos) << error_msg;
}
@@ -239,10 +239,81 @@
// Name error.
ScratchFile tmp;
std::string error_msg;
- bool success = !ModifyAndLoad(tmp.GetFilename().c_str(), 224, 0xFFU, &error_msg);
+ bool success = !ModifyAndLoad(kGoodTestDex, tmp.GetFilename().c_str(), 224, 0xFFU, &error_msg);
ASSERT_TRUE(success);
ASSERT_NE(error_msg.find("inter_method_id_item name_idx"), std::string::npos) << error_msg;
}
}
+// Generated from:
+//
+// .class public LTest;
+// .super Ljava/lang/Object;
+// .source "Test.java"
+//
+// .method public constructor <init>()V
+// .registers 1
+//
+// .prologue
+// .line 1
+// invoke-direct {p0}, Ljava/lang/Object;-><init>()V
+//
+// return-void
+// .end method
+//
+// .method public static main()V
+// .registers 2
+//
+// const-string v0, "a"
+// const-string v0, "b"
+// const-string v0, "c"
+// const-string v0, "d"
+// const-string v0, "e"
+// const-string v0, "f"
+// const-string v0, "g"
+// const-string v0, "h"
+// const-string v0, "i"
+// const-string v0, "j"
+// const-string v0, "k"
+//
+// .local v1, "local_var":Ljava/lang/String;
+// const-string v1, "test"
+// .end method
+
+static const char kDebugInfoTestDex[] =
+ "ZGV4CjAzNQCHRkHix2eIMQgvLD/0VGrlllZLo0Rb6VyUAgAAcAAAAHhWNBIAAAAAAAAAAAwCAAAU"
+ "AAAAcAAAAAQAAADAAAAAAQAAANAAAAAAAAAAAAAAAAMAAADcAAAAAQAAAPQAAACAAQAAFAEAABQB"
+ "AAAcAQAAJAEAADgBAABMAQAAVwEAAFoBAABdAQAAYAEAAGMBAABmAQAAaQEAAGwBAABvAQAAcgEA"
+ "AHUBAAB4AQAAewEAAIYBAACMAQAAAQAAAAIAAAADAAAABQAAAAUAAAADAAAAAAAAAAAAAAAAAAAA"
+ "AAAAABIAAAABAAAAAAAAAAAAAAABAAAAAQAAAAAAAAAEAAAAAAAAAPwBAAAAAAAABjxpbml0PgAG"
+ "TFRlc3Q7ABJMamF2YS9sYW5nL09iamVjdDsAEkxqYXZhL2xhbmcvU3RyaW5nOwAJVGVzdC5qYXZh"
+ "AAFWAAFhAAFiAAFjAAFkAAFlAAFmAAFnAAFoAAFpAAFqAAFrAAlsb2NhbF92YXIABG1haW4ABHRl"
+ "c3QAAAABAAcOAAAAARYDARIDAAAAAQABAAEAAACUAQAABAAAAHAQAgAAAA4AAgAAAAAAAACZAQAA"
+ "GAAAABoABgAaAAcAGgAIABoACQAaAAoAGgALABoADAAaAA0AGgAOABoADwAaABAAGgETAAAAAgAA"
+ "gYAEpAMBCbwDAAALAAAAAAAAAAEAAAAAAAAAAQAAABQAAABwAAAAAgAAAAQAAADAAAAAAwAAAAEA"
+ "AADQAAAABQAAAAMAAADcAAAABgAAAAEAAAD0AAAAAiAAABQAAAAUAQAAAyAAAAIAAACUAQAAASAA"
+ "AAIAAACkAQAAACAAAAEAAAD8AQAAABAAAAEAAAAMAgAA";
+
+TEST_F(DexFileVerifierTest, DebugInfoTypeIdxTest) {
+ {
+ // The input dex file should be good before modification.
+ ScratchFile tmp;
+ std::string error_msg;
+ std::unique_ptr<const DexFile> raw(OpenDexFileBase64(kDebugInfoTestDex,
+ tmp.GetFilename().c_str(),
+ &error_msg));
+ ASSERT_TRUE(raw.get() != nullptr) << error_msg;
+ }
+
+ {
+ // Modify the debug information entry.
+ ScratchFile tmp;
+ std::string error_msg;
+ bool success = !ModifyAndLoad(kDebugInfoTestDex, tmp.GetFilename().c_str(), 416, 0x14U,
+ &error_msg);
+ ASSERT_TRUE(success);
+ ASSERT_NE(error_msg.find("DBG_START_LOCAL type_idx"), std::string::npos) << error_msg;
+ }
+}
+
} // namespace art
diff --git a/runtime/dex_instruction.h b/runtime/dex_instruction.h
index d3b9eb4..c64c21e 100644
--- a/runtime/dex_instruction.h
+++ b/runtime/dex_instruction.h
@@ -183,7 +183,7 @@
// Reads an instruction out of the stream at the specified address.
static const Instruction* At(const uint16_t* code) {
- DCHECK(code != NULL);
+ DCHECK(code != nullptr);
return reinterpret_cast<const Instruction*>(code);
}
diff --git a/runtime/dex_method_iterator.h b/runtime/dex_method_iterator.h
index 14e316f..7fae277 100644
--- a/runtime/dex_method_iterator.h
+++ b/runtime/dex_method_iterator.h
@@ -30,8 +30,8 @@
found_next_(false),
dex_file_index_(0),
class_def_index_(0),
- class_def_(NULL),
- class_data_(NULL),
+ class_def_(nullptr),
+ class_data_(nullptr),
direct_method_(false) {
CHECK_NE(0U, dex_files_.size());
}
@@ -51,20 +51,20 @@
dex_file_index_++;
continue;
}
- if (class_def_ == NULL) {
+ if (class_def_ == nullptr) {
class_def_ = &GetDexFileInternal().GetClassDef(class_def_index_);
}
- if (class_data_ == NULL) {
+ if (class_data_ == nullptr) {
class_data_ = GetDexFileInternal().GetClassData(*class_def_);
- if (class_data_ == NULL) {
+ if (class_data_ == nullptr) {
// empty class, such as a marker interface
// End of this class, advance and retry.
- class_def_ = NULL;
+ class_def_ = nullptr;
class_def_index_++;
continue;
}
}
- if (it_.get() == NULL) {
+ if (it_.get() == nullptr) {
it_.reset(new ClassDataItemIterator(GetDexFileInternal(), class_data_));
// Skip fields
while (GetIterator().HasNextStaticField()) {
@@ -88,16 +88,16 @@
}
// End of this class, advance and retry.
DCHECK(!GetIterator().HasNext());
- it_.reset(NULL);
- class_data_ = NULL;
- class_def_ = NULL;
+ it_.reset(nullptr);
+ class_data_ = nullptr;
+ class_def_ = nullptr;
class_def_index_++;
}
}
void Next() {
found_next_ = false;
- if (it_.get() != NULL) {
+ if (it_.get() != nullptr) {
// Advance to next method if we currently are looking at a class.
GetIterator().Next();
}
@@ -115,20 +115,20 @@
InvokeType GetInvokeType() {
CHECK(HasNext());
- CHECK(class_def_ != NULL);
+ CHECK(class_def_ != nullptr);
return GetIterator().GetMethodInvokeType(*class_def_);
}
private:
ClassDataItemIterator& GetIterator() const {
- CHECK(it_.get() != NULL);
+ CHECK(it_.get() != nullptr);
return *it_.get();
}
const DexFile& GetDexFileInternal() const {
CHECK_LT(dex_file_index_, dex_files_.size());
const DexFile* dex_file = dex_files_[dex_file_index_];
- CHECK(dex_file != NULL);
+ CHECK(dex_file != nullptr);
return *dex_file;
}
diff --git a/runtime/elf.h b/runtime/elf.h
index 60b5248..4514bb2 100644
--- a/runtime/elf.h
+++ b/runtime/elf.h
@@ -1852,6 +1852,38 @@
VER_NEED_CURRENT = 1
};
+struct ElfTypes32 {
+ typedef Elf32_Addr Addr;
+ typedef Elf32_Off Off;
+ typedef Elf32_Half Half;
+ typedef Elf32_Word Word;
+ typedef Elf32_Sword Sword;
+ typedef Elf32_Ehdr Ehdr;
+ typedef Elf32_Shdr Shdr;
+ typedef Elf32_Sym Sym;
+ typedef Elf32_Rel Rel;
+ typedef Elf32_Rela Rela;
+ typedef Elf32_Phdr Phdr;
+ typedef Elf32_Dyn Dyn;
+};
+
+struct ElfTypes64 {
+ typedef Elf64_Addr Addr;
+ typedef Elf64_Off Off;
+ typedef Elf64_Half Half;
+ typedef Elf64_Word Word;
+ typedef Elf64_Sword Sword;
+ typedef Elf64_Xword Xword;
+ typedef Elf64_Sxword Sxword;
+ typedef Elf64_Ehdr Ehdr;
+ typedef Elf64_Shdr Shdr;
+ typedef Elf64_Sym Sym;
+ typedef Elf64_Rel Rel;
+ typedef Elf64_Rela Rela;
+ typedef Elf64_Phdr Phdr;
+ typedef Elf64_Dyn Dyn;
+};
+
// BEGIN android-changed
#endif // ART_RUNTIME_ELF_H_
// END android-changed
diff --git a/runtime/elf_file.cc b/runtime/elf_file.cc
index 37e391d..b1d933d 100644
--- a/runtime/elf_file.cc
+++ b/runtime/elf_file.cc
@@ -110,12 +110,10 @@
delete entry;
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::ElfFileImpl(File* file, bool writable, bool program_header_only, uint8_t* requested_base)
+template <typename ElfTypes>
+ElfFileImpl<ElfTypes>::ElfFileImpl(File* file, bool writable,
+ bool program_header_only,
+ uint8_t* requested_base)
: file_(file),
writable_(writable),
program_header_only_(program_header_only),
@@ -138,20 +136,12 @@
CHECK(file != nullptr);
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>*
- ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::Open(File* file, bool writable, bool program_header_only,
- std::string* error_msg, uint8_t* requested_base) {
- std::unique_ptr<ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>>
- elf_file(new ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- (file, writable, program_header_only, requested_base));
+template <typename ElfTypes>
+ElfFileImpl<ElfTypes>* ElfFileImpl<ElfTypes>::Open(
+ File* file, bool writable, bool program_header_only,
+ std::string* error_msg, uint8_t* requested_base) {
+ std::unique_ptr<ElfFileImpl<ElfTypes>> elf_file(new ElfFileImpl<ElfTypes>
+ (file, writable, program_header_only, requested_base));
int prot;
int flags;
if (writable) {
@@ -167,32 +157,20 @@
return elf_file.release();
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>*
- ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::Open(File* file, int prot, int flags, std::string* error_msg) {
- std::unique_ptr<ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>>
- elf_file(new ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- (file, (prot & PROT_WRITE) == PROT_WRITE, /*program_header_only*/false,
- /*requested_base*/nullptr));
+template <typename ElfTypes>
+ElfFileImpl<ElfTypes>* ElfFileImpl<ElfTypes>::Open(
+ File* file, int prot, int flags, std::string* error_msg) {
+ std::unique_ptr<ElfFileImpl<ElfTypes>> elf_file(new ElfFileImpl<ElfTypes>
+ (file, (prot & PROT_WRITE) == PROT_WRITE, /*program_header_only*/false,
+ /*requested_base*/nullptr));
if (!elf_file->Setup(prot, flags, error_msg)) {
return nullptr;
}
return elf_file.release();
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-bool ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::Setup(int prot, int flags, std::string* error_msg) {
+template <typename ElfTypes>
+bool ElfFileImpl<ElfTypes>::Setup(int prot, int flags, std::string* error_msg) {
int64_t temp_file_length = file_->GetLength();
if (temp_file_length < 0) {
errno = -temp_file_length;
@@ -349,12 +327,8 @@
return true;
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::~ElfFileImpl() {
+template <typename ElfTypes>
+ElfFileImpl<ElfTypes>::~ElfFileImpl() {
STLDeleteElements(&segments_);
delete symtab_symbol_table_;
delete dynsym_symbol_table_;
@@ -364,13 +338,9 @@
}
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-bool ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::CheckAndSet(Elf32_Off offset, const char* label,
- uint8_t** target, std::string* error_msg) {
+template <typename ElfTypes>
+bool ElfFileImpl<ElfTypes>::CheckAndSet(Elf32_Off offset, const char* label,
+ uint8_t** target, std::string* error_msg) {
if (Begin() + offset >= End()) {
*error_msg = StringPrintf("Offset %d is out of range for %s in ELF file: '%s'", offset, label,
file_->GetPath().c_str());
@@ -380,12 +350,9 @@
return true;
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-bool ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::CheckSectionsLinked(const uint8_t* source, const uint8_t* target) const {
+template <typename ElfTypes>
+bool ElfFileImpl<ElfTypes>::CheckSectionsLinked(const uint8_t* source,
+ const uint8_t* target) const {
// Only works in whole-program mode, as we need to iterate over the sections.
// Note that we normally can't search by type, as duplicates are allowed for most section types.
if (program_header_only_) {
@@ -416,12 +383,8 @@
return target_found && source_section != nullptr && source_section->sh_link == target_index;
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-bool ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::CheckSectionsExist(std::string* error_msg) const {
+template <typename ElfTypes>
+bool ElfFileImpl<ElfTypes>::CheckSectionsExist(std::string* error_msg) const {
if (!program_header_only_) {
// If in full mode, need section headers.
if (section_headers_start_ == nullptr) {
@@ -504,12 +467,8 @@
return true;
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-bool ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::SetMap(MemMap* map, std::string* error_msg) {
+template <typename ElfTypes>
+bool ElfFileImpl<ElfTypes>::SetMap(MemMap* map, std::string* error_msg) {
if (map == nullptr) {
// MemMap::Open should have already set an error.
DCHECK(!error_msg->empty());
@@ -643,64 +602,41 @@
return true;
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-Elf_Ehdr& ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::GetHeader() const {
+template <typename ElfTypes>
+typename ElfTypes::Ehdr& ElfFileImpl<ElfTypes>::GetHeader() const {
CHECK(header_ != nullptr); // Header has been checked in SetMap. This is a sanity check.
return *header_;
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-uint8_t* ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::GetProgramHeadersStart() const {
+template <typename ElfTypes>
+uint8_t* ElfFileImpl<ElfTypes>::GetProgramHeadersStart() const {
CHECK(program_headers_start_ != nullptr); // Header has been set in Setup. This is a sanity
// check.
return program_headers_start_;
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-uint8_t* ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::GetSectionHeadersStart() const {
+template <typename ElfTypes>
+uint8_t* ElfFileImpl<ElfTypes>::GetSectionHeadersStart() const {
CHECK(!program_header_only_); // Only used in "full" mode.
CHECK(section_headers_start_ != nullptr); // Is checked in CheckSectionsExist. Sanity check.
return section_headers_start_;
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-Elf_Phdr& ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::GetDynamicProgramHeader() const {
+template <typename ElfTypes>
+typename ElfTypes::Phdr& ElfFileImpl<ElfTypes>::GetDynamicProgramHeader() const {
CHECK(dynamic_program_header_ != nullptr); // Is checked in CheckSectionsExist. Sanity check.
return *dynamic_program_header_;
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-Elf_Dyn* ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::GetDynamicSectionStart() const {
+template <typename ElfTypes>
+typename ElfTypes::Dyn* ElfFileImpl<ElfTypes>::GetDynamicSectionStart() const {
CHECK(dynamic_section_start_ != nullptr); // Is checked in CheckSectionsExist. Sanity check.
return dynamic_section_start_;
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-Elf_Sym* ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::GetSymbolSectionStart(Elf_Word section_type) const {
+template <typename ElfTypes>
+typename ElfTypes::Sym* ElfFileImpl<ElfTypes>::GetSymbolSectionStart(
+ Elf_Word section_type) const {
CHECK(IsSymbolSectionType(section_type)) << file_->GetPath() << " " << section_type;
switch (section_type) {
case SHT_SYMTAB: {
@@ -718,12 +654,9 @@
}
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-const char* ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::GetStringSectionStart(Elf_Word section_type) const {
+template <typename ElfTypes>
+const char* ElfFileImpl<ElfTypes>::GetStringSectionStart(
+ Elf_Word section_type) const {
CHECK(IsSymbolSectionType(section_type)) << file_->GetPath() << " " << section_type;
switch (section_type) {
case SHT_SYMTAB: {
@@ -739,12 +672,9 @@
}
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-const char* ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::GetString(Elf_Word section_type, Elf_Word i) const {
+template <typename ElfTypes>
+const char* ElfFileImpl<ElfTypes>::GetString(Elf_Word section_type,
+ Elf_Word i) const {
CHECK(IsSymbolSectionType(section_type)) << file_->GetPath() << " " << section_type;
if (i == 0) {
return nullptr;
@@ -759,39 +689,23 @@
// WARNING: The following methods do not check for an error condition (non-existent hash section).
// It is the caller's job to do this.
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-Elf_Word* ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::GetHashSectionStart() const {
+template <typename ElfTypes>
+typename ElfTypes::Word* ElfFileImpl<ElfTypes>::GetHashSectionStart() const {
return hash_section_start_;
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-Elf_Word ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::GetHashBucketNum() const {
+template <typename ElfTypes>
+typename ElfTypes::Word ElfFileImpl<ElfTypes>::GetHashBucketNum() const {
return GetHashSectionStart()[0];
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-Elf_Word ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::GetHashChainNum() const {
+template <typename ElfTypes>
+typename ElfTypes::Word ElfFileImpl<ElfTypes>::GetHashChainNum() const {
return GetHashSectionStart()[1];
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-Elf_Word ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::GetHashBucket(size_t i, bool* ok) const {
+template <typename ElfTypes>
+typename ElfTypes::Word ElfFileImpl<ElfTypes>::GetHashBucket(size_t i, bool* ok) const {
if (i >= GetHashBucketNum()) {
*ok = false;
return 0;
@@ -801,12 +715,8 @@
return GetHashSectionStart()[2 + i];
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-Elf_Word ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::GetHashChain(size_t i, bool* ok) const {
+template <typename ElfTypes>
+typename ElfTypes::Word ElfFileImpl<ElfTypes>::GetHashChain(size_t i, bool* ok) const {
if (i >= GetHashChainNum()) {
*ok = false;
return 0;
@@ -816,21 +726,13 @@
return GetHashSectionStart()[2 + GetHashBucketNum() + i];
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-Elf_Word ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::GetProgramHeaderNum() const {
+template <typename ElfTypes>
+typename ElfTypes::Word ElfFileImpl<ElfTypes>::GetProgramHeaderNum() const {
return GetHeader().e_phnum;
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-Elf_Phdr* ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::GetProgramHeader(Elf_Word i) const {
+template <typename ElfTypes>
+typename ElfTypes::Phdr* ElfFileImpl<ElfTypes>::GetProgramHeader(Elf_Word i) const {
CHECK_LT(i, GetProgramHeaderNum()) << file_->GetPath(); // Sanity check for caller.
uint8_t* program_header = GetProgramHeadersStart() + (i * GetHeader().e_phentsize);
if (program_header >= End()) {
@@ -839,12 +741,8 @@
return reinterpret_cast<Elf_Phdr*>(program_header);
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-Elf_Phdr* ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::FindProgamHeaderByType(Elf_Word type) const {
+template <typename ElfTypes>
+typename ElfTypes::Phdr* ElfFileImpl<ElfTypes>::FindProgamHeaderByType(Elf_Word type) const {
for (Elf_Word i = 0; i < GetProgramHeaderNum(); i++) {
Elf_Phdr* program_header = GetProgramHeader(i);
if (program_header->p_type == type) {
@@ -854,21 +752,13 @@
return nullptr;
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-Elf_Word ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::GetSectionHeaderNum() const {
+template <typename ElfTypes>
+typename ElfTypes::Word ElfFileImpl<ElfTypes>::GetSectionHeaderNum() const {
return GetHeader().e_shnum;
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-Elf_Shdr* ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::GetSectionHeader(Elf_Word i) const {
+template <typename ElfTypes>
+typename ElfTypes::Shdr* ElfFileImpl<ElfTypes>::GetSectionHeader(Elf_Word i) const {
// Can only access arbitrary sections when we have the whole file, not just program header.
// Even if we Load(), it doesn't bring in all the sections.
CHECK(!program_header_only_) << file_->GetPath();
@@ -882,12 +772,8 @@
return reinterpret_cast<Elf_Shdr*>(section_header);
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-Elf_Shdr* ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::FindSectionByType(Elf_Word type) const {
+template <typename ElfTypes>
+typename ElfTypes::Shdr* ElfFileImpl<ElfTypes>::FindSectionByType(Elf_Word type) const {
// Can only access arbitrary sections when we have the whole file, not just program header.
// We could change this to switch on known types if they were detected during loading.
CHECK(!program_header_only_) << file_->GetPath();
@@ -914,21 +800,14 @@
return h;
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-Elf_Shdr* ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::GetSectionNameStringSection() const {
+template <typename ElfTypes>
+typename ElfTypes::Shdr* ElfFileImpl<ElfTypes>::GetSectionNameStringSection() const {
return GetSectionHeader(GetHeader().e_shstrndx);
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-const uint8_t* ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::FindDynamicSymbolAddress(const std::string& symbol_name) const {
+template <typename ElfTypes>
+const uint8_t* ElfFileImpl<ElfTypes>::FindDynamicSymbolAddress(
+ const std::string& symbol_name) const {
// Check that we have a hash section.
if (GetHashSectionStart() == nullptr) {
return nullptr; // Failure condition.
@@ -944,12 +823,9 @@
}
// WARNING: Only called from FindDynamicSymbolAddress. Elides check for hash section.
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-const Elf_Sym* ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::FindDynamicSymbol(const std::string& symbol_name) const {
+template <typename ElfTypes>
+const typename ElfTypes::Sym* ElfFileImpl<ElfTypes>::FindDynamicSymbol(
+ const std::string& symbol_name) const {
if (GetHashBucketNum() == 0) {
// No dynamic symbols at all.
return nullptr;
@@ -978,34 +854,21 @@
return nullptr;
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-bool ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::IsSymbolSectionType(Elf_Word section_type) {
+template <typename ElfTypes>
+bool ElfFileImpl<ElfTypes>::IsSymbolSectionType(Elf_Word section_type) {
return ((section_type == SHT_SYMTAB) || (section_type == SHT_DYNSYM));
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-Elf_Word ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::GetSymbolNum(Elf_Shdr& section_header) const {
+template <typename ElfTypes>
+typename ElfTypes::Word ElfFileImpl<ElfTypes>::GetSymbolNum(Elf_Shdr& section_header) const {
CHECK(IsSymbolSectionType(section_header.sh_type))
<< file_->GetPath() << " " << section_header.sh_type;
CHECK_NE(0U, section_header.sh_entsize) << file_->GetPath();
return section_header.sh_size / section_header.sh_entsize;
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-Elf_Sym* ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::GetSymbol(Elf_Word section_type,
- Elf_Word i) const {
+template <typename ElfTypes>
+typename ElfTypes::Sym* ElfFileImpl<ElfTypes>::GetSymbol(Elf_Word section_type, Elf_Word i) const {
Elf_Sym* sym_start = GetSymbolSectionStart(section_type);
if (sym_start == nullptr) {
return nullptr;
@@ -1013,14 +876,9 @@
return sym_start + i;
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-typename ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::SymbolTable** ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::GetSymbolTable(Elf_Word section_type) {
+template <typename ElfTypes>
+typename ElfFileImpl<ElfTypes>::SymbolTable**
+ElfFileImpl<ElfTypes>::GetSymbolTable(Elf_Word section_type) {
CHECK(IsSymbolSectionType(section_type)) << file_->GetPath() << " " << section_type;
switch (section_type) {
case SHT_SYMTAB: {
@@ -1036,14 +894,9 @@
}
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-Elf_Sym* ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::FindSymbolByName(Elf_Word section_type,
- const std::string& symbol_name,
- bool build_map) {
+template <typename ElfTypes>
+typename ElfTypes::Sym* ElfFileImpl<ElfTypes>::FindSymbolByName(
+ Elf_Word section_type, const std::string& symbol_name, bool build_map) {
CHECK(!program_header_only_) << file_->GetPath();
CHECK(IsSymbolSectionType(section_type)) << file_->GetPath() << " " << section_type;
@@ -1122,14 +975,9 @@
return nullptr;
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-Elf_Addr ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::FindSymbolAddress(Elf_Word section_type,
- const std::string& symbol_name,
- bool build_map) {
+template <typename ElfTypes>
+typename ElfTypes::Addr ElfFileImpl<ElfTypes>::FindSymbolAddress(
+ Elf_Word section_type, const std::string& symbol_name, bool build_map) {
Elf_Sym* symbol = FindSymbolByName(section_type, symbol_name, build_map);
if (symbol == nullptr) {
return 0;
@@ -1137,12 +985,9 @@
return symbol->st_value;
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-const char* ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::GetString(Elf_Shdr& string_section, Elf_Word i) const {
+template <typename ElfTypes>
+const char* ElfFileImpl<ElfTypes>::GetString(Elf_Shdr& string_section,
+ Elf_Word i) const {
CHECK(!program_header_only_) << file_->GetPath();
// TODO: remove this static_cast from enum when using -std=gnu++0x
if (static_cast<Elf_Word>(SHT_STRTAB) != string_section.sh_type) {
@@ -1162,126 +1007,82 @@
return reinterpret_cast<const char*>(string);
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-Elf_Word ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::GetDynamicNum() const {
+template <typename ElfTypes>
+typename ElfTypes::Word ElfFileImpl<ElfTypes>::GetDynamicNum() const {
return GetDynamicProgramHeader().p_filesz / sizeof(Elf_Dyn);
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-Elf_Dyn& ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::GetDynamic(Elf_Word i) const {
+template <typename ElfTypes>
+typename ElfTypes::Dyn& ElfFileImpl<ElfTypes>::GetDynamic(Elf_Word i) const {
CHECK_LT(i, GetDynamicNum()) << file_->GetPath();
return *(GetDynamicSectionStart() + i);
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-Elf_Dyn* ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::FindDynamicByType(Elf_Sword type) const {
+template <typename ElfTypes>
+typename ElfTypes::Dyn* ElfFileImpl<ElfTypes>::FindDynamicByType(Elf_Sword type) const {
for (Elf_Word i = 0; i < GetDynamicNum(); i++) {
Elf_Dyn* dyn = &GetDynamic(i);
if (dyn->d_tag == type) {
return dyn;
}
}
- return NULL;
+ return nullptr;
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-Elf_Word ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::FindDynamicValueByType(Elf_Sword type) const {
+template <typename ElfTypes>
+typename ElfTypes::Word ElfFileImpl<ElfTypes>::FindDynamicValueByType(Elf_Sword type) const {
Elf_Dyn* dyn = FindDynamicByType(type);
- if (dyn == NULL) {
+ if (dyn == nullptr) {
return 0;
} else {
return dyn->d_un.d_val;
}
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-Elf_Rel* ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::GetRelSectionStart(Elf_Shdr& section_header) const {
+template <typename ElfTypes>
+typename ElfTypes::Rel* ElfFileImpl<ElfTypes>::GetRelSectionStart(Elf_Shdr& section_header) const {
CHECK(SHT_REL == section_header.sh_type) << file_->GetPath() << " " << section_header.sh_type;
return reinterpret_cast<Elf_Rel*>(Begin() + section_header.sh_offset);
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-Elf_Word ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::GetRelNum(Elf_Shdr& section_header) const {
+template <typename ElfTypes>
+typename ElfTypes::Word ElfFileImpl<ElfTypes>::GetRelNum(Elf_Shdr& section_header) const {
CHECK(SHT_REL == section_header.sh_type) << file_->GetPath() << " " << section_header.sh_type;
CHECK_NE(0U, section_header.sh_entsize) << file_->GetPath();
return section_header.sh_size / section_header.sh_entsize;
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-Elf_Rel& ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::GetRel(Elf_Shdr& section_header, Elf_Word i) const {
+template <typename ElfTypes>
+typename ElfTypes::Rel& ElfFileImpl<ElfTypes>::GetRel(Elf_Shdr& section_header, Elf_Word i) const {
CHECK(SHT_REL == section_header.sh_type) << file_->GetPath() << " " << section_header.sh_type;
CHECK_LT(i, GetRelNum(section_header)) << file_->GetPath();
return *(GetRelSectionStart(section_header) + i);
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-Elf_Rela* ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::GetRelaSectionStart(Elf_Shdr& section_header) const {
+template <typename ElfTypes>
+typename ElfTypes::Rela* ElfFileImpl<ElfTypes>::GetRelaSectionStart(Elf_Shdr& section_header) const {
CHECK(SHT_RELA == section_header.sh_type) << file_->GetPath() << " " << section_header.sh_type;
return reinterpret_cast<Elf_Rela*>(Begin() + section_header.sh_offset);
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-Elf_Word ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::GetRelaNum(Elf_Shdr& section_header) const {
+template <typename ElfTypes>
+typename ElfTypes::Word ElfFileImpl<ElfTypes>::GetRelaNum(Elf_Shdr& section_header) const {
CHECK(SHT_RELA == section_header.sh_type) << file_->GetPath() << " " << section_header.sh_type;
return section_header.sh_size / section_header.sh_entsize;
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-Elf_Rela& ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::GetRela(Elf_Shdr& section_header, Elf_Word i) const {
+template <typename ElfTypes>
+typename ElfTypes::Rela& ElfFileImpl<ElfTypes>::GetRela(Elf_Shdr& section_header, Elf_Word i) const {
CHECK(SHT_RELA == section_header.sh_type) << file_->GetPath() << " " << section_header.sh_type;
CHECK_LT(i, GetRelaNum(section_header)) << file_->GetPath();
return *(GetRelaSectionStart(section_header) + i);
}
// Base on bionic phdr_table_get_load_size
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-size_t ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::GetLoadedSize() const {
- Elf_Addr min_vaddr = 0xFFFFFFFFu;
- Elf_Addr max_vaddr = 0x00000000u;
+template <typename ElfTypes>
+bool ElfFileImpl<ElfTypes>::GetLoadedSize(size_t* size, std::string* error_msg) const {
+ Elf_Addr min_vaddr = static_cast<Elf_Addr>(-1);
+ Elf_Addr max_vaddr = 0u;
for (Elf_Word i = 0; i < GetProgramHeaderNum(); i++) {
Elf_Phdr* program_header = GetProgramHeader(i);
if (program_header->p_type != PT_LOAD) {
@@ -1292,6 +1093,15 @@
min_vaddr = begin_vaddr;
}
Elf_Addr end_vaddr = program_header->p_vaddr + program_header->p_memsz;
+ if (UNLIKELY(begin_vaddr > end_vaddr)) {
+ std::ostringstream oss;
+ oss << "Program header #" << i << " has overflow in p_vaddr+p_memsz: 0x" << std::hex
+ << program_header->p_vaddr << "+0x" << program_header->p_memsz << "=0x" << end_vaddr
+ << " in ELF file \"" << file_->GetPath() << "\"";
+ *error_msg = oss.str();
+ *size = static_cast<size_t>(-1);
+ return false;
+ }
if (end_vaddr > max_vaddr) {
max_vaddr = end_vaddr;
}
@@ -1299,16 +1109,22 @@
min_vaddr = RoundDown(min_vaddr, kPageSize);
max_vaddr = RoundUp(max_vaddr, kPageSize);
CHECK_LT(min_vaddr, max_vaddr) << file_->GetPath();
- size_t loaded_size = max_vaddr - min_vaddr;
- return loaded_size;
+ Elf_Addr loaded_size = max_vaddr - min_vaddr;
+ // Check that the loaded_size fits in size_t.
+ if (UNLIKELY(loaded_size > std::numeric_limits<size_t>::max())) {
+ std::ostringstream oss;
+ oss << "Loaded size is 0x" << std::hex << loaded_size << " but maximum size_t is 0x"
+ << std::numeric_limits<size_t>::max() << " for ELF file \"" << file_->GetPath() << "\"";
+ *error_msg = oss.str();
+ *size = static_cast<size_t>(-1);
+ return false;
+ }
+ *size = loaded_size;
+ return true;
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-bool ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::Load(bool executable, std::string* error_msg) {
+template <typename ElfTypes>
+bool ElfFileImpl<ElfTypes>::Load(bool executable, std::string* error_msg) {
CHECK(program_header_only_) << file_->GetPath();
if (executable) {
@@ -1367,9 +1183,14 @@
}
std::string reservation_name("ElfFile reservation for ");
reservation_name += file_->GetPath();
+ size_t loaded_size;
+ if (!GetLoadedSize(&loaded_size, error_msg)) {
+ DCHECK(!error_msg->empty());
+ return false;
+ }
std::unique_ptr<MemMap> reserve(MemMap::MapAnonymous(reservation_name.c_str(),
reserve_base_override,
- GetLoadedSize(), PROT_NONE, false, false,
+ loaded_size, PROT_NONE, false, false,
error_msg));
if (reserve.get() == nullptr) {
*error_msg = StringPrintf("Failed to allocate %s: %s",
@@ -1543,12 +1364,8 @@
return true;
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-bool ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::ValidPointer(const uint8_t* start) const {
+template <typename ElfTypes>
+bool ElfFileImpl<ElfTypes>::ValidPointer(const uint8_t* start) const {
for (size_t i = 0; i < segments_.size(); ++i) {
const MemMap* segment = segments_[i];
if (segment->Begin() <= start && start < segment->End()) {
@@ -1559,12 +1376,9 @@
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-Elf_Shdr* ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::FindSectionByName(const std::string& name) const {
+template <typename ElfTypes>
+typename ElfTypes::Shdr* ElfFileImpl<ElfTypes>::FindSectionByName(
+ const std::string& name) const {
CHECK(!program_header_only_);
Elf_Shdr* shstrtab_sec = GetSectionNameStringSection();
if (shstrtab_sec == nullptr) {
@@ -1586,42 +1400,33 @@
return nullptr;
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-bool ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::FixupDebugSections(typename std::make_signed<Elf_Off>::type base_address_delta) {
- const Elf_Shdr* debug_info = FindSectionByName(".debug_info");
- const Elf_Shdr* debug_abbrev = FindSectionByName(".debug_abbrev");
- const Elf_Shdr* debug_str = FindSectionByName(".debug_str");
- const Elf_Shdr* strtab_sec = FindSectionByName(".strtab");
- const Elf_Shdr* symtab_sec = FindSectionByName(".symtab");
-
- if (debug_info == nullptr || debug_abbrev == nullptr ||
- debug_str == nullptr || strtab_sec == nullptr || symtab_sec == nullptr) {
- // Release version of ART does not generate debug info.
- return true;
- }
+template <typename ElfTypes>
+bool ElfFileImpl<ElfTypes>::FixupDebugSections(typename std::make_signed<Elf_Off>::type base_address_delta) {
if (base_address_delta == 0) {
return true;
}
- if (!ApplyOatPatchesTo(".debug_info", base_address_delta)) {
- return false;
+ if (FindSectionByName(".debug_frame") != nullptr) {
+ if (!ApplyOatPatchesTo(".debug_frame", base_address_delta)) {
+ return false;
+ }
}
- if (!ApplyOatPatchesTo(".debug_line", base_address_delta)) {
- return false;
+ if (FindSectionByName(".debug_info") != nullptr) {
+ if (!ApplyOatPatchesTo(".debug_info", base_address_delta)) {
+ return false;
+ }
+ }
+ if (FindSectionByName(".debug_line") != nullptr) {
+ if (!ApplyOatPatchesTo(".debug_line", base_address_delta)) {
+ return false;
+ }
}
return true;
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-bool ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::ApplyOatPatchesTo(const char* target_section_name,
- typename std::make_signed<Elf_Off>::type delta) {
+template <typename ElfTypes>
+bool ElfFileImpl<ElfTypes>::ApplyOatPatchesTo(
+ const char* target_section_name,
+ typename std::make_signed<Elf_Off>::type delta) {
auto patches_section = FindSectionByName(".oat_patches");
if (patches_section == nullptr) {
LOG(ERROR) << ".oat_patches section not found.";
@@ -1648,15 +1453,12 @@
}
// Apply .oat_patches to given section.
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-bool ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::ApplyOatPatches(const uint8_t* patches, const uint8_t* patches_end,
- const char* target_section_name,
- typename std::make_signed<Elf_Off>::type delta,
- uint8_t* to_patch, const uint8_t* to_patch_end) {
+template <typename ElfTypes>
+bool ElfFileImpl<ElfTypes>::ApplyOatPatches(
+ const uint8_t* patches, const uint8_t* patches_end,
+ const char* target_section_name,
+ typename std::make_signed<Elf_Off>::type delta,
+ uint8_t* to_patch, const uint8_t* to_patch_end) {
// Read null-terminated section name.
const char* section_name;
while ((section_name = reinterpret_cast<const char*>(patches))[0] != '\0') {
@@ -1681,12 +1483,8 @@
return false;
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-void ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::GdbJITSupport() {
+template <typename ElfTypes>
+void ElfFileImpl<ElfTypes>::GdbJITSupport() {
// We only get here if we only are mapping the program header.
DCHECK(program_header_only_);
@@ -1694,15 +1492,12 @@
std::string error_msg;
// Make it MAP_PRIVATE so we can just give it to gdb if all the necessary
// sections are there.
- std::unique_ptr<ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>>
- all_ptr(Open(const_cast<File*>(file_), PROT_READ | PROT_WRITE,
- MAP_PRIVATE, &error_msg));
+ std::unique_ptr<ElfFileImpl<ElfTypes>> all_ptr(
+ Open(const_cast<File*>(file_), PROT_READ | PROT_WRITE, MAP_PRIVATE, &error_msg));
if (all_ptr.get() == nullptr) {
return;
}
- ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>& all = *all_ptr;
+ ElfFileImpl<ElfTypes>& all = *all_ptr;
// We need the eh_frame for gdb but debug info might be present without it.
const Elf_Shdr* eh_frame = all.FindSectionByName(".eh_frame");
@@ -1732,12 +1527,8 @@
gdb_file_mapping_.reset(all_ptr.release());
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-bool ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::Strip(std::string* error_msg) {
+template <typename ElfTypes>
+bool ElfFileImpl<ElfTypes>::Strip(std::string* error_msg) {
// ELF files produced by MCLinker look roughly like this
//
// +------------+
@@ -1798,7 +1589,7 @@
CHECK_NE(0U, section_headers.size());
CHECK_EQ(section_headers.size(), section_headers_original_indexes.size());
- // section 0 is the NULL section, sections start at offset of first section
+ // section 0 is the null section, sections start at offset of first section
CHECK(GetSectionHeader(1) != nullptr);
Elf_Off offset = GetSectionHeader(1)->sh_offset;
for (size_t i = 1; i < section_headers.size(); i++) {
@@ -1840,12 +1631,8 @@
static const bool DEBUG_FIXUP = false;
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-bool ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::Fixup(Elf_Addr base_address) {
+template <typename ElfTypes>
+bool ElfFileImpl<ElfTypes>::Fixup(Elf_Addr base_address) {
if (!FixupDynamic(base_address)) {
LOG(WARNING) << "Failed to fixup .dynamic in " << file_->GetPath();
return false;
@@ -1878,12 +1665,8 @@
return true;
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-bool ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::FixupDynamic(Elf_Addr base_address) {
+template <typename ElfTypes>
+bool ElfFileImpl<ElfTypes>::FixupDynamic(Elf_Addr base_address) {
for (Elf_Word i = 0; i < GetDynamicNum(); i++) {
Elf_Dyn& elf_dyn = GetDynamic(i);
Elf_Word d_tag = elf_dyn.d_tag;
@@ -1902,12 +1685,8 @@
return true;
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-bool ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::FixupSectionHeaders(Elf_Addr base_address) {
+template <typename ElfTypes>
+bool ElfFileImpl<ElfTypes>::FixupSectionHeaders(Elf_Addr base_address) {
for (Elf_Word i = 0; i < GetSectionHeaderNum(); i++) {
Elf_Shdr* sh = GetSectionHeader(i);
CHECK(sh != nullptr);
@@ -1926,12 +1705,8 @@
return true;
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-bool ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::FixupProgramHeaders(Elf_Addr base_address) {
+template <typename ElfTypes>
+bool ElfFileImpl<ElfTypes>::FixupProgramHeaders(Elf_Addr base_address) {
// TODO: ELFObjectFile doesn't have give to Elf_Phdr, so we do that ourselves for now.
for (Elf_Word i = 0; i < GetProgramHeaderNum(); i++) {
Elf_Phdr* ph = GetProgramHeader(i);
@@ -1953,12 +1728,8 @@
return true;
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-bool ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::FixupSymbols(Elf_Addr base_address, bool dynamic) {
+template <typename ElfTypes>
+bool ElfFileImpl<ElfTypes>::FixupSymbols(Elf_Addr base_address, bool dynamic) {
Elf_Word section_type = dynamic ? SHT_DYNSYM : SHT_SYMTAB;
// TODO: Unfortunate ELFObjectFile has protected symbol access, so use ElfFile
Elf_Shdr* symbol_section = FindSectionByType(section_type);
@@ -1983,12 +1754,8 @@
return true;
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
-bool ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel, Elf_Rela, Elf_Dyn, Elf_Off>
- ::FixupRelocations(Elf_Addr base_address) {
+template <typename ElfTypes>
+bool ElfFileImpl<ElfTypes>::FixupRelocations(Elf_Addr base_address) {
for (Elf_Word i = 0; i < GetSectionHeaderNum(); i++) {
Elf_Shdr* sh = GetSectionHeader(i);
CHECK(sh != nullptr);
@@ -2020,10 +1787,8 @@
}
// Explicit instantiations
-template class ElfFileImpl<Elf32_Ehdr, Elf32_Phdr, Elf32_Shdr, Elf32_Word,
- Elf32_Sword, Elf32_Addr, Elf32_Sym, Elf32_Rel, Elf32_Rela, Elf32_Dyn, Elf32_Off>;
-template class ElfFileImpl<Elf64_Ehdr, Elf64_Phdr, Elf64_Shdr, Elf64_Word,
- Elf64_Sword, Elf64_Addr, Elf64_Sym, Elf64_Rel, Elf64_Rela, Elf64_Dyn, Elf64_Off>;
+template class ElfFileImpl<ElfTypes32>;
+template class ElfFileImpl<ElfTypes64>;
ElfFile::ElfFile(ElfFileImpl32* elf32) : elf32_(elf32), elf64_(nullptr) {
}
@@ -2172,8 +1937,8 @@
DELEGATE_TO_IMPL(FindSymbolAddress, section_type, symbol_name, build_map);
}
-size_t ElfFile::GetLoadedSize() const {
- DELEGATE_TO_IMPL(GetLoadedSize);
+bool ElfFile::GetLoadedSize(size_t* size, std::string* error_msg) const {
+ DELEGATE_TO_IMPL(GetLoadedSize, size, error_msg);
}
bool ElfFile::Strip(File* file, std::string* error_msg) {
diff --git a/runtime/elf_file.h b/runtime/elf_file.h
index 286c2a6..48cb4b8 100644
--- a/runtime/elf_file.h
+++ b/runtime/elf_file.h
@@ -26,16 +26,12 @@
#include "os.h"
namespace art {
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
+template <typename ElfTypes>
class ElfFileImpl;
// Explicitly instantiated in elf_file.cc
-typedef ElfFileImpl<Elf32_Ehdr, Elf32_Phdr, Elf32_Shdr, Elf32_Word, Elf32_Sword,
- Elf32_Addr, Elf32_Sym, Elf32_Rel, Elf32_Rela, Elf32_Dyn, Elf32_Off> ElfFileImpl32;
-typedef ElfFileImpl<Elf64_Ehdr, Elf64_Phdr, Elf64_Shdr, Elf64_Word, Elf64_Sword,
- Elf64_Addr, Elf64_Sym, Elf64_Rel, Elf64_Rela, Elf64_Dyn, Elf64_Off> ElfFileImpl64;
+typedef ElfFileImpl<ElfTypes32> ElfFileImpl32;
+typedef ElfFileImpl<ElfTypes64> ElfFileImpl64;
// Used for compile time and runtime for ElfFile access. Because of
// the need for use at runtime, cannot directly use LLVM classes such as
@@ -70,7 +66,7 @@
const std::string& symbol_name,
bool build_map);
- size_t GetLoadedSize() const;
+ bool GetLoadedSize(size_t* size, std::string* error_msg) const;
// Strip an ELF file of unneeded debugging information.
// Returns true on success, false on failure.
diff --git a/runtime/elf_file_impl.h b/runtime/elf_file_impl.h
index 383dc41..3ad096f 100644
--- a/runtime/elf_file_impl.h
+++ b/runtime/elf_file_impl.h
@@ -32,11 +32,22 @@
struct JITCodeEntry;
}
-template <typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr, typename Elf_Word,
- typename Elf_Sword, typename Elf_Addr, typename Elf_Sym, typename Elf_Rel,
- typename Elf_Rela, typename Elf_Dyn, typename Elf_Off>
+template <typename ElfTypes>
class ElfFileImpl {
public:
+ using Elf_Addr = typename ElfTypes::Addr;
+ using Elf_Off = typename ElfTypes::Off;
+ using Elf_Half = typename ElfTypes::Half;
+ using Elf_Word = typename ElfTypes::Word;
+ using Elf_Sword = typename ElfTypes::Sword;
+ using Elf_Ehdr = typename ElfTypes::Ehdr;
+ using Elf_Shdr = typename ElfTypes::Shdr;
+ using Elf_Sym = typename ElfTypes::Sym;
+ using Elf_Rel = typename ElfTypes::Rel;
+ using Elf_Rela = typename ElfTypes::Rela;
+ using Elf_Phdr = typename ElfTypes::Phdr;
+ using Elf_Dyn = typename ElfTypes::Dyn;
+
static ElfFileImpl* Open(File* file, bool writable, bool program_header_only,
std::string* error_msg, uint8_t* requested_base = nullptr);
static ElfFileImpl* Open(File* file, int mmap_prot, int mmap_flags, std::string* error_msg);
@@ -83,8 +94,7 @@
const std::string& symbol_name,
bool build_map);
- // Lookup a string given string section and offset. Returns nullptr for
- // special 0 offset.
+ // Lookup a string given string section and offset. Returns null for special 0 offset.
const char* GetString(Elf_Shdr&, Elf_Word) const;
Elf_Word GetDynamicNum() const;
@@ -96,8 +106,8 @@
Elf_Word GetRelaNum(Elf_Shdr&) const;
Elf_Rela& GetRela(Elf_Shdr&, Elf_Word) const;
- // Returns the expected size when the file is loaded at runtime
- size_t GetLoadedSize() const;
+ // Retrieves the expected size when the file is loaded at runtime. Returns true if successful.
+ bool GetLoadedSize(size_t* size, std::string* error_msg) const;
// Load segments into memory based on PT_LOAD program headers.
// executable is true at run time, false at compile time.
@@ -156,7 +166,7 @@
// Check whether the offset is in range, and set to target to Begin() + offset if OK.
bool CheckAndSet(Elf32_Off offset, const char* label, uint8_t** target, std::string* error_msg);
- // Find symbol in specified table, returning nullptr if it is not found.
+ // Find symbol in specified table, returning null if it is not found.
//
// If build_map is true, builds a map to speed repeated access. The
// map does not included untyped symbol values (aka STT_NOTYPE)
@@ -173,7 +183,7 @@
Elf_Dyn* FindDynamicByType(Elf_Sword type) const;
Elf_Word FindDynamicValueByType(Elf_Sword type) const;
- // Lookup a string by section type. Returns nullptr for special 0 offset.
+ // Lookup a string by section type. Returns null for special 0 offset.
const char* GetString(Elf_Word section_type, Elf_Word) const;
const File* const file_;
@@ -209,9 +219,7 @@
// Support for GDB JIT
uint8_t* jit_elf_image_;
JITCodeEntry* jit_gdb_entry_;
- std::unique_ptr<ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
- Elf_Sword, Elf_Addr, Elf_Sym, Elf_Rel,
- Elf_Rela, Elf_Dyn, Elf_Off>> gdb_file_mapping_;
+ std::unique_ptr<ElfFileImpl<ElfTypes>> gdb_file_mapping_;
void GdbJITSupport();
// Override the 'base' p_vaddr in the first LOAD segment with this value (if non-null).
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index cbfba12..625e695 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -22,6 +22,8 @@
#include "class_linker-inl.h"
#include "common_throws.h"
#include "dex_file.h"
+#include "entrypoints/quick/callee_save_frame.h"
+#include "handle_scope-inl.h"
#include "indirect_reference_table.h"
#include "invoke_type.h"
#include "jni_internal.h"
@@ -30,21 +32,50 @@
#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
#include "mirror/throwable.h"
-#include "handle_scope-inl.h"
+#include "nth_caller_visitor.h"
+#include "runtime.h"
#include "thread.h"
namespace art {
+inline mirror::ArtMethod* GetCalleeSaveMethodCaller(StackReference<mirror::ArtMethod>* sp,
+ Runtime::CalleeSaveType type,
+ bool do_caller_check = false)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ DCHECK_EQ(sp->AsMirrorPtr(), Runtime::Current()->GetCalleeSaveMethod(type));
+
+ const size_t callee_frame_size = GetCalleeSaveFrameSize(kRuntimeISA, type);
+ auto* caller_sp = reinterpret_cast<StackReference<mirror::ArtMethod>*>(
+ reinterpret_cast<uintptr_t>(sp) + callee_frame_size);
+ auto* caller = caller_sp->AsMirrorPtr();
+
+ if (kIsDebugBuild && do_caller_check) {
+ // Note that do_caller_check is optional, as this method can be called by
+ // stubs, and tests without a proper call stack.
+ NthCallerVisitor visitor(Thread::Current(), 1, true);
+ visitor.WalkStack();
+ CHECK_EQ(caller, visitor.caller);
+ }
+
+ return caller;
+}
+
+inline mirror::ArtMethod* GetCalleeSaveMethodCaller(Thread* self, Runtime::CalleeSaveType type)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return GetCalleeSaveMethodCaller(
+ self->GetManagedStack()->GetTopQuickFrame(), type, true /* do_caller_check */);
+}
+
template <const bool kAccessCheck>
ALWAYS_INLINE
inline mirror::Class* CheckObjectAlloc(uint32_t type_idx,
mirror::ArtMethod* method,
Thread* self, bool* slow_path) {
mirror::Class* klass = method->GetDexCacheResolvedType<false>(type_idx);
- if (UNLIKELY(klass == NULL)) {
+ if (UNLIKELY(klass == nullptr)) {
klass = Runtime::Current()->GetClassLinker()->ResolveType(type_idx, method);
*slow_path = true;
- if (klass == NULL) {
+ if (klass == nullptr) {
DCHECK(self->IsExceptionPending());
return nullptr; // Failure
} else {
@@ -526,19 +557,19 @@
mirror::Object* this_object,
mirror::ArtMethod* referrer,
bool access_check, InvokeType type) {
- if (UNLIKELY(this_object == NULL && type != kStatic)) {
- return NULL;
+ if (UNLIKELY(this_object == nullptr && type != kStatic)) {
+ return nullptr;
}
mirror::ArtMethod* resolved_method =
referrer->GetDeclaringClass()->GetDexCache()->GetResolvedMethod(method_idx);
- if (UNLIKELY(resolved_method == NULL)) {
- return NULL;
+ if (UNLIKELY(resolved_method == nullptr)) {
+ return nullptr;
}
if (access_check) {
// Check for incompatible class change errors and access.
bool icce = resolved_method->CheckIncompatibleClassChange(type);
if (UNLIKELY(icce)) {
- return NULL;
+ return nullptr;
}
mirror::Class* methods_class = resolved_method->GetDeclaringClass();
mirror::Class* referring_class = referrer->GetDeclaringClass();
@@ -546,7 +577,7 @@
!referring_class->CanAccessMember(methods_class,
resolved_method->GetAccessFlags()))) {
// Potential illegal access, may need to refine the method's class.
- return NULL;
+ return nullptr;
}
}
if (type == kInterface) { // Most common form of slow path dispatch.
@@ -606,7 +637,7 @@
inline void UnlockJniSynchronizedMethod(jobject locked, Thread* self) {
// Save any pending exception over monitor exit call.
- mirror::Throwable* saved_exception = NULL;
+ mirror::Throwable* saved_exception = nullptr;
if (UNLIKELY(self->IsExceptionPending())) {
saved_exception = self->GetException();
self->ClearException();
@@ -620,7 +651,7 @@
<< self->GetException()->Dump();
}
// Restore pending exception.
- if (saved_exception != NULL) {
+ if (saved_exception != nullptr) {
self->SetException(saved_exception);
}
}
diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc
index 1d8df68..ce56739 100644
--- a/runtime/entrypoints/entrypoint_utils.cc
+++ b/runtime/entrypoints/entrypoint_utils.cc
@@ -23,6 +23,7 @@
#include "gc/accounting/card_table-inl.h"
#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
+#include "mirror/method.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
#include "reflection.h"
@@ -43,9 +44,9 @@
return nullptr; // Failure
}
mirror::Class* klass = referrer->GetDexCacheResolvedType<false>(type_idx);
- if (UNLIKELY(klass == NULL)) { // Not in dex cache so try to resolve
+ if (UNLIKELY(klass == nullptr)) { // Not in dex cache so try to resolve
klass = Runtime::Current()->GetClassLinker()->ResolveType(type_idx, referrer);
- if (klass == NULL) { // Error
+ if (klass == nullptr) { // Error
DCHECK(self->IsExceptionPending());
return nullptr; // Failure
}
@@ -230,13 +231,13 @@
// Build argument array possibly triggering GC.
soa.Self()->AssertThreadSuspensionIsAllowable();
- jobjectArray args_jobj = NULL;
+ jobjectArray args_jobj = nullptr;
const JValue zero;
int32_t target_sdk_version = Runtime::Current()->GetTargetSdkVersion();
// Do not create empty arrays unless needed to maintain Dalvik bug compatibility.
if (args.size() > 0 || (target_sdk_version > 0 && target_sdk_version <= 21)) {
- args_jobj = soa.Env()->NewObjectArray(args.size(), WellKnownClasses::java_lang_Object, NULL);
- if (args_jobj == NULL) {
+ args_jobj = soa.Env()->NewObjectArray(args.size(), WellKnownClasses::java_lang_Object, nullptr);
+ if (args_jobj == nullptr) {
CHECK(soa.Self()->IsExceptionPending());
return zero;
}
@@ -248,7 +249,7 @@
JValue jv;
jv.SetJ(args.at(i).j);
mirror::Object* val = BoxPrimitive(Primitive::GetType(shorty[i + 1]), jv);
- if (val == NULL) {
+ if (val == nullptr) {
CHECK(soa.Self()->IsExceptionPending());
return zero;
}
@@ -257,7 +258,7 @@
}
}
- // Call Proxy.invoke(Proxy proxy, ArtMethod method, Object[] args).
+ // Call Proxy.invoke(Proxy proxy, Method method, Object[] args).
jvalue invocation_args[3];
invocation_args[0].l = rcvr_jobj;
invocation_args[1].l = interface_method_jobj;
@@ -269,15 +270,14 @@
// Unbox result and handle error conditions.
if (LIKELY(!soa.Self()->IsExceptionPending())) {
- if (shorty[0] == 'V' || (shorty[0] == 'L' && result == NULL)) {
+ if (shorty[0] == 'V' || (shorty[0] == 'L' && result == nullptr)) {
// Do nothing.
return zero;
} else {
StackHandleScope<1> hs(soa.Self());
- Handle<mirror::ArtMethod> h_interface_method(
- hs.NewHandle(soa.Decode<mirror::ArtMethod*>(interface_method_jobj)));
+ auto h_interface_method(hs.NewHandle(soa.Decode<mirror::Method*>(interface_method_jobj)));
// This can cause thread suspension.
- mirror::Class* result_type = h_interface_method->GetReturnType();
+ mirror::Class* result_type = h_interface_method->GetArtMethod()->GetReturnType();
mirror::Object* result_ref = soa.Decode<mirror::Object*>(result);
JValue result_unboxed;
if (!UnboxPrimitiveForResult(result_ref, result_type, &result_unboxed)) {
@@ -293,10 +293,9 @@
if (exception->IsCheckedException()) {
mirror::Object* rcvr = soa.Decode<mirror::Object*>(rcvr_jobj);
mirror::Class* proxy_class = rcvr->GetClass();
- mirror::ArtMethod* interface_method =
- soa.Decode<mirror::ArtMethod*>(interface_method_jobj);
+ mirror::Method* interface_method = soa.Decode<mirror::Method*>(interface_method_jobj);
mirror::ArtMethod* proxy_method =
- rcvr->GetClass()->FindVirtualMethodForInterface(interface_method);
+ rcvr->GetClass()->FindVirtualMethodForInterface(interface_method->GetArtMethod());
int throws_index = -1;
size_t num_virt_methods = proxy_class->NumVirtualMethods();
for (size_t i = 0; i < num_virt_methods; i++) {
@@ -316,7 +315,7 @@
}
if (!declares_exception) {
soa.Self()->ThrowNewWrappedException("Ljava/lang/reflect/UndeclaredThrowableException;",
- NULL);
+ nullptr);
}
}
return zero;
diff --git a/runtime/entrypoints/interpreter/interpreter_entrypoints.cc b/runtime/entrypoints/interpreter/interpreter_entrypoints.cc
index 28e19d4..d4844c2 100644
--- a/runtime/entrypoints/interpreter/interpreter_entrypoints.cc
+++ b/runtime/entrypoints/interpreter/interpreter_entrypoints.cc
@@ -47,7 +47,7 @@
method = shadow_frame->GetMethod();
}
}
- uint16_t arg_offset = (code_item == NULL) ? 0 : code_item->registers_size_ - code_item->ins_size_;
+ uint16_t arg_offset = (code_item == nullptr) ? 0 : code_item->registers_size_ - code_item->ins_size_;
method->Invoke(self, shadow_frame->GetVRegArgs(arg_offset),
(shadow_frame->NumberOfVRegs() - arg_offset) * sizeof(uint32_t),
result, method->GetShorty());
diff --git a/runtime/entrypoints/jni/jni_entrypoints.cc b/runtime/entrypoints/jni/jni_entrypoints.cc
index 2752407..a68eeeb 100644
--- a/runtime/entrypoints/jni/jni_entrypoints.cc
+++ b/runtime/entrypoints/jni/jni_entrypoints.cc
@@ -34,15 +34,15 @@
Locks::mutator_lock_->AssertNotHeld(self); // We come here as Native.
ScopedObjectAccess soa(self);
- mirror::ArtMethod* method = self->GetCurrentMethod(NULL);
- DCHECK(method != NULL);
+ mirror::ArtMethod* method = self->GetCurrentMethod(nullptr);
+ DCHECK(method != nullptr);
- // Lookup symbol address for method, on failure we'll return NULL with an exception set,
+ // Lookup symbol address for method, on failure we'll return null with an exception set,
// otherwise we return the address of the method we found.
void* native_code = soa.Vm()->FindCodeForNativeMethod(method);
- if (native_code == NULL) {
+ if (native_code == nullptr) {
DCHECK(self->IsExceptionPending());
- return NULL;
+ return nullptr;
} else {
// Register so that future calls don't come here
method->RegisterNative(native_code, false);
diff --git a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
index c049e3d..fa129af 100644
--- a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
@@ -153,6 +153,32 @@
} else { \
return CheckAndAllocArrayFromCodeInstrumented(type_idx, component_count, method, self, true, allocator_type); \
} \
+} \
+extern "C" mirror::String* artAllocStringFromBytesFromCode##suffix##suffix2( \
+ mirror::ByteArray* byte_array, int32_t high, int32_t offset, int32_t byte_count, \
+ Thread* self) \
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
+ ScopedQuickEntrypointChecks sqec(self); \
+ StackHandleScope<1> hs(self); \
+ Handle<mirror::ByteArray> handle_array(hs.NewHandle(byte_array)); \
+ return mirror::String::AllocFromByteArray<instrumented_bool>(self, byte_count, handle_array, \
+ offset, high, allocator_type); \
+} \
+extern "C" mirror::String* artAllocStringFromCharsFromCode##suffix##suffix2( \
+ int32_t offset, int32_t char_count, mirror::CharArray* char_array, Thread* self) \
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
+ StackHandleScope<1> hs(self); \
+ Handle<mirror::CharArray> handle_array(hs.NewHandle(char_array)); \
+ return mirror::String::AllocFromCharArray<instrumented_bool>(self, char_count, handle_array, \
+ offset, allocator_type); \
+} \
+extern "C" mirror::String* artAllocStringFromStringFromCode##suffix##suffix2( \
+ mirror::String* string, Thread* self) \
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
+ StackHandleScope<1> hs(self); \
+ Handle<mirror::String> handle_string(hs.NewHandle(string)); \
+ return mirror::String::AllocFromString<instrumented_bool>(self, handle_string->GetLength(), \
+ handle_string, 0, allocator_type); \
}
#define GENERATE_ENTRYPOINTS_FOR_ALLOCATOR(suffix, allocator_type) \
@@ -176,6 +202,9 @@
extern "C" void* art_quick_alloc_object_with_access_check##suffix(uint32_t type_idx, mirror::ArtMethod* ref); \
extern "C" void* art_quick_check_and_alloc_array##suffix(uint32_t, int32_t, mirror::ArtMethod* ref); \
extern "C" void* art_quick_check_and_alloc_array_with_access_check##suffix(uint32_t, int32_t, mirror::ArtMethod* ref); \
+extern "C" void* art_quick_alloc_string_from_bytes##suffix(void*, int32_t, int32_t, int32_t); \
+extern "C" void* art_quick_alloc_string_from_chars##suffix(int32_t, int32_t, void*); \
+extern "C" void* art_quick_alloc_string_from_string##suffix(void*); \
extern "C" void* art_quick_alloc_array##suffix##_instrumented(uint32_t, int32_t, mirror::ArtMethod* ref); \
extern "C" void* art_quick_alloc_array_resolved##suffix##_instrumented(mirror::Class* klass, int32_t, mirror::ArtMethod* ref); \
extern "C" void* art_quick_alloc_array_with_access_check##suffix##_instrumented(uint32_t, int32_t, mirror::ArtMethod* ref); \
@@ -185,6 +214,9 @@
extern "C" void* art_quick_alloc_object_with_access_check##suffix##_instrumented(uint32_t type_idx, mirror::ArtMethod* ref); \
extern "C" void* art_quick_check_and_alloc_array##suffix##_instrumented(uint32_t, int32_t, mirror::ArtMethod* ref); \
extern "C" void* art_quick_check_and_alloc_array_with_access_check##suffix##_instrumented(uint32_t, int32_t, mirror::ArtMethod* ref); \
+extern "C" void* art_quick_alloc_string_from_bytes##suffix##_instrumented(void*, int32_t, int32_t, int32_t); \
+extern "C" void* art_quick_alloc_string_from_chars##suffix##_instrumented(int32_t, int32_t, void*); \
+extern "C" void* art_quick_alloc_string_from_string##suffix##_instrumented(void*); \
void SetQuickAllocEntryPoints##suffix(QuickEntryPoints* qpoints, bool instrumented) { \
if (instrumented) { \
qpoints->pAllocArray = art_quick_alloc_array##suffix##_instrumented; \
@@ -196,6 +228,9 @@
qpoints->pAllocObjectWithAccessCheck = art_quick_alloc_object_with_access_check##suffix##_instrumented; \
qpoints->pCheckAndAllocArray = art_quick_check_and_alloc_array##suffix##_instrumented; \
qpoints->pCheckAndAllocArrayWithAccessCheck = art_quick_check_and_alloc_array_with_access_check##suffix##_instrumented; \
+ qpoints->pAllocStringFromBytes = art_quick_alloc_string_from_bytes##suffix##_instrumented; \
+ qpoints->pAllocStringFromChars = art_quick_alloc_string_from_chars##suffix##_instrumented; \
+ qpoints->pAllocStringFromString = art_quick_alloc_string_from_string##suffix##_instrumented; \
} else { \
qpoints->pAllocArray = art_quick_alloc_array##suffix; \
qpoints->pAllocArrayResolved = art_quick_alloc_array_resolved##suffix; \
@@ -206,6 +241,9 @@
qpoints->pAllocObjectWithAccessCheck = art_quick_alloc_object_with_access_check##suffix; \
qpoints->pCheckAndAllocArray = art_quick_check_and_alloc_array##suffix; \
qpoints->pCheckAndAllocArrayWithAccessCheck = art_quick_check_and_alloc_array_with_access_check##suffix; \
+ qpoints->pAllocStringFromBytes = art_quick_alloc_string_from_bytes##suffix; \
+ qpoints->pAllocStringFromChars = art_quick_alloc_string_from_chars##suffix; \
+ qpoints->pAllocStringFromString = art_quick_alloc_string_from_string##suffix; \
} \
}
diff --git a/runtime/entrypoints/quick/quick_cast_entrypoints.cc b/runtime/entrypoints/quick/quick_cast_entrypoints.cc
index a6ab69b..37de380 100644
--- a/runtime/entrypoints/quick/quick_cast_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_cast_entrypoints.cc
@@ -22,8 +22,8 @@
// Assignable test for code, won't throw. Null and equality tests already performed
extern "C" uint32_t artIsAssignableFromCode(mirror::Class* klass, mirror::Class* ref_class)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK(klass != NULL);
- DCHECK(ref_class != NULL);
+ DCHECK(klass != nullptr);
+ DCHECK(ref_class != nullptr);
return klass->IsAssignableFrom(ref_class) ? 1 : 0;
}
diff --git a/runtime/entrypoints/quick/quick_default_externs.h b/runtime/entrypoints/quick/quick_default_externs.h
index b7e8d50..1fd8a949a 100644
--- a/runtime/entrypoints/quick/quick_default_externs.h
+++ b/runtime/entrypoints/quick/quick_default_externs.h
@@ -34,10 +34,10 @@
extern "C" void art_quick_check_cast(const art::mirror::Class*, const art::mirror::Class*);
// DexCache entrypoints.
-extern "C" void* art_quick_initialize_static_storage(uint32_t, art::mirror::ArtMethod*);
-extern "C" void* art_quick_initialize_type(uint32_t, art::mirror::ArtMethod*);
-extern "C" void* art_quick_initialize_type_and_verify_access(uint32_t, art::mirror::ArtMethod*);
-extern "C" void* art_quick_resolve_string(uint32_t, art::mirror::ArtMethod*);
+extern "C" void* art_quick_initialize_static_storage(uint32_t);
+extern "C" void* art_quick_initialize_type(uint32_t);
+extern "C" void* art_quick_initialize_type_and_verify_access(uint32_t);
+extern "C" void* art_quick_resolve_string(uint32_t);
// Field entrypoints.
extern "C" int art_quick_set8_instance(uint32_t, void*, int8_t);
diff --git a/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc b/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
index 6a8aaf2..1b1ef66 100644
--- a/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
@@ -14,6 +14,7 @@
* limitations under the License.
*/
+#include "base/logging.h"
#include "callee_save_frame.h"
#include "dex_file-inl.h"
#include "interpreter/interpreter.h"
@@ -29,6 +30,12 @@
extern "C" NO_RETURN void artDeoptimize(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
+
+ if (VLOG_IS_ON(deopt)) {
+ LOG(INFO) << "Deopting:";
+ self->Dump(LOG(INFO));
+ }
+
self->SetException(Thread::GetDeoptimizationException());
self->QuickDeliverException();
}
diff --git a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
index 348495d..9148878 100644
--- a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
@@ -25,42 +25,38 @@
namespace art {
-extern "C" mirror::Class* artInitializeStaticStorageFromCode(uint32_t type_idx,
- mirror::ArtMethod* referrer,
- Thread* self)
+extern "C" mirror::Class* artInitializeStaticStorageFromCode(uint32_t type_idx, Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Called to ensure static storage base is initialized for direct static field reads and writes.
// A class may be accessing another class' fields when it doesn't have access, as access has been
// given by inheritance.
ScopedQuickEntrypointChecks sqec(self);
- return ResolveVerifyAndClinit(type_idx, referrer, self, true, false);
+ auto* caller = GetCalleeSaveMethodCaller(self, Runtime::kRefsOnly);
+ return ResolveVerifyAndClinit(type_idx, caller, self, true, false);
}
-extern "C" mirror::Class* artInitializeTypeFromCode(uint32_t type_idx,
- mirror::ArtMethod* referrer,
- Thread* self)
+extern "C" mirror::Class* artInitializeTypeFromCode(uint32_t type_idx, Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Called when method->dex_cache_resolved_types_[] misses.
ScopedQuickEntrypointChecks sqec(self);
- return ResolveVerifyAndClinit(type_idx, referrer, self, false, false);
+ auto* caller = GetCalleeSaveMethodCaller(self, Runtime::kRefsOnly);
+ return ResolveVerifyAndClinit(type_idx, caller, self, false, false);
}
-extern "C" mirror::Class* artInitializeTypeAndVerifyAccessFromCode(uint32_t type_idx,
- mirror::ArtMethod* referrer,
- Thread* self)
+extern "C" mirror::Class* artInitializeTypeAndVerifyAccessFromCode(uint32_t type_idx, Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Called when caller isn't guaranteed to have access to a type and the dex cache may be
// unpopulated.
ScopedQuickEntrypointChecks sqec(self);
- return ResolveVerifyAndClinit(type_idx, referrer, self, false, true);
+ auto* caller = GetCalleeSaveMethodCaller(self, Runtime::kRefsOnly);
+ return ResolveVerifyAndClinit(type_idx, caller, self, false, true);
}
-extern "C" mirror::String* artResolveStringFromCode(int32_t string_idx,
- mirror::ArtMethod* referrer,
- Thread* self)
+extern "C" mirror::String* artResolveStringFromCode(int32_t string_idx, Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
- return ResolveStringFromCode(referrer, string_idx);
+ auto* caller = GetCalleeSaveMethodCaller(self, Runtime::kRefsOnly);
+ return ResolveStringFromCode(caller, string_idx);
}
} // namespace art
diff --git a/runtime/entrypoints/quick/quick_entrypoints.h b/runtime/entrypoints/quick/quick_entrypoints.h
index db8c0e3..b72ce34 100644
--- a/runtime/entrypoints/quick/quick_entrypoints.h
+++ b/runtime/entrypoints/quick/quick_entrypoints.h
@@ -32,6 +32,8 @@
class ArtMethod;
class Class;
class Object;
+template<class MirrorType>
+class CompressedReference;
} // namespace mirror
class Thread;
@@ -65,6 +67,10 @@
jobject locked, Thread* self)
NO_THREAD_SAFETY_ANALYSIS HOT_ATTR;
+extern void ReadBarrierJni(mirror::CompressedReference<mirror::Object>* handle_on_stack,
+ Thread* self)
+ NO_THREAD_SAFETY_ANALYSIS HOT_ATTR;
+
} // namespace art
#endif // ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_ENTRYPOINTS_H_
diff --git a/runtime/entrypoints/quick/quick_entrypoints_list.h b/runtime/entrypoints/quick/quick_entrypoints_list.h
index eaf874e..0aca58f 100644
--- a/runtime/entrypoints/quick/quick_entrypoints_list.h
+++ b/runtime/entrypoints/quick/quick_entrypoints_list.h
@@ -29,14 +29,17 @@
V(AllocObjectWithAccessCheck, void*, uint32_t, mirror::ArtMethod*) \
V(CheckAndAllocArray, void*, uint32_t, int32_t, mirror::ArtMethod*) \
V(CheckAndAllocArrayWithAccessCheck, void*, uint32_t, int32_t, mirror::ArtMethod*) \
+ V(AllocStringFromBytes, void*, void*, int32_t, int32_t, int32_t) \
+ V(AllocStringFromChars, void*, int32_t, int32_t, void*) \
+ V(AllocStringFromString, void*, void*) \
\
V(InstanceofNonTrivial, uint32_t, const mirror::Class*, const mirror::Class*) \
V(CheckCast, void, const mirror::Class*, const mirror::Class*) \
\
- V(InitializeStaticStorage, void*, uint32_t, mirror::ArtMethod*) \
- V(InitializeTypeAndVerifyAccess, void*, uint32_t, mirror::ArtMethod*) \
- V(InitializeType, void*, uint32_t, mirror::ArtMethod*) \
- V(ResolveString, void*, uint32_t, mirror::ArtMethod*) \
+ V(InitializeStaticStorage, void*, uint32_t) \
+ V(InitializeTypeAndVerifyAccess, void*, uint32_t) \
+ V(InitializeType, void*, uint32_t) \
+ V(ResolveString, void*, uint32_t) \
\
V(Set8Instance, int, uint32_t, void*, int8_t) \
V(Set8Static, int, uint32_t, int8_t) \
@@ -123,8 +126,26 @@
V(Deoptimize, void, void) \
\
V(A64Load, int64_t, volatile const int64_t *) \
- V(A64Store, void, volatile int64_t *, int64_t)
-
+ V(A64Store, void, volatile int64_t *, int64_t) \
+\
+ V(NewEmptyString, void) \
+ V(NewStringFromBytes_B, void) \
+ V(NewStringFromBytes_BI, void) \
+ V(NewStringFromBytes_BII, void) \
+ V(NewStringFromBytes_BIII, void) \
+ V(NewStringFromBytes_BIIString, void) \
+ V(NewStringFromBytes_BString, void) \
+ V(NewStringFromBytes_BIICharset, void) \
+ V(NewStringFromBytes_BCharset, void) \
+ V(NewStringFromChars_C, void) \
+ V(NewStringFromChars_CII, void) \
+ V(NewStringFromChars_IIC, void) \
+ V(NewStringFromCodePoints, void) \
+ V(NewStringFromString, void) \
+ V(NewStringFromStringBuffer, void) \
+ V(NewStringFromStringBuilder, void) \
+\
+ V(ReadBarrierJni, void, mirror::CompressedReference<mirror::Object>*, Thread*)
#endif // ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_ENTRYPOINTS_LIST_H_
#undef ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_ENTRYPOINTS_LIST_H_ // #define is only for lint.
diff --git a/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc b/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
index 54dbd8c..eb1b105 100644
--- a/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
@@ -41,7 +41,7 @@
bool interpreter_entry = (result == GetQuickToInterpreterBridge());
instrumentation->PushInstrumentationStackFrame(self, method->IsStatic() ? nullptr : this_object,
method, lr, interpreter_entry);
- CHECK(result != NULL) << PrettyMethod(method);
+ CHECK(result != nullptr) << PrettyMethod(method);
return result;
}
diff --git a/runtime/entrypoints/quick/quick_jni_entrypoints.cc b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
index e478d2a..51817a2 100644
--- a/runtime/entrypoints/quick/quick_jni_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
@@ -22,6 +22,13 @@
namespace art {
+extern void ReadBarrierJni(mirror::CompressedReference<mirror::Object>* handle_on_stack,
+ Thread* self ATTRIBUTE_UNUSED) {
+ // Call the read barrier and update the handle.
+ mirror::Object* to_ref = ReadBarrier::BarrierForRoot(handle_on_stack);
+ handle_on_stack->Assign(to_ref);
+}
+
// Called on entry to JNI, transition out of Runnable and release share of mutator_lock_.
extern uint32_t JniMethodStart(Thread* self) {
JNIEnvExt* env = self->GetJniEnv();
diff --git a/runtime/entrypoints/quick/quick_throw_entrypoints.cc b/runtime/entrypoints/quick/quick_throw_entrypoints.cc
index 9644b98..f22edc1 100644
--- a/runtime/entrypoints/quick/quick_throw_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_throw_entrypoints.cc
@@ -34,10 +34,10 @@
extern "C" NO_RETURN void artDeliverExceptionFromCode(mirror::Throwable* exception, Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
/*
- * exception may be NULL, in which case this routine should
+ * exception may be null, in which case this routine should
* throw NPE. NOTE: this is a convenience for generated code,
* which previously did the null check inline and constructed
- * and threw a NPE if NULL. This routine responsible for setting
+ * and threw a NPE if null. This routine responsible for setting
* exception_ in thread and delivering the exception.
*/
ScopedQuickEntrypointChecks sqec(self);
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 2e813c8..345b0ad 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -26,6 +26,7 @@
#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
#include "mirror/dex_cache-inl.h"
+#include "mirror/method.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
#include "runtime.h"
@@ -293,8 +294,13 @@
static mirror::ArtMethod* GetCallingMethod(StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK(sp->AsMirrorPtr()->IsCalleeSaveMethod());
- uint8_t* previous_sp = reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize;
- return reinterpret_cast<StackReference<mirror::ArtMethod>*>(previous_sp)->AsMirrorPtr();
+ return GetCalleeSaveMethodCaller(sp, Runtime::kRefsAndArgs);
+ }
+
+ static uint32_t GetCallingDexPc(StackReference<mirror::ArtMethod>* sp)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ DCHECK(sp->AsMirrorPtr()->IsCalleeSaveMethod());
+ return GetCallingMethod(sp)->ToDexPc(QuickArgumentVisitor::GetCallingPc(sp));
}
// For the given quick ref and args quick frame, return the caller's PC.
@@ -760,11 +766,12 @@
mirror::ArtMethod* interface_method = proxy_method->FindOverriddenMethod();
DCHECK(interface_method != nullptr) << PrettyMethod(proxy_method);
DCHECK(!interface_method->IsProxyMethod()) << PrettyMethod(interface_method);
- jobject interface_method_jobj = soa.AddLocalReference<jobject>(interface_method);
+ self->EndAssertNoThreadSuspension(old_cause);
+ jobject interface_method_jobj = soa.AddLocalReference<jobject>(
+ mirror::Method::CreateFromArtMethod(soa.Self(), interface_method));
// All naked Object*s should now be in jobjects, so its safe to go into the main invoke code
// that performs allocations.
- self->EndAssertNoThreadSuspension(old_cause);
JValue result = InvokeProxyInvocationHandler(soa, shorty, rcvr_jobj, interface_method_jobj, args);
// Restore references which might have moved.
local_ref_visitor.FixupReferences();
@@ -825,12 +832,13 @@
// Compute details about the called method (avoid GCs)
ClassLinker* linker = Runtime::Current()->GetClassLinker();
- mirror::ArtMethod* caller = QuickArgumentVisitor::GetCallingMethod(sp);
InvokeType invoke_type;
MethodReference called_method(nullptr, 0);
const bool called_method_known_on_entry = !called->IsRuntimeMethod();
+ mirror::ArtMethod* caller = nullptr;
if (!called_method_known_on_entry) {
- uint32_t dex_pc = caller->ToDexPc(QuickArgumentVisitor::GetCallingPc(sp));
+ caller = QuickArgumentVisitor::GetCallingMethod(sp);
+ uint32_t dex_pc = QuickArgumentVisitor::GetCallingDexPc(sp);
const DexFile::CodeItem* code;
called_method.dex_file = caller->GetDexFile();
code = caller->GetCodeItem();
@@ -1944,16 +1952,13 @@
// to hold the mutator lock (see SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) annotations).
template<InvokeType type, bool access_check>
-static TwoWordReturn artInvokeCommon(uint32_t method_idx, mirror::Object* this_object,
- mirror::ArtMethod* caller_method,
- Thread* self, StackReference<mirror::ArtMethod>* sp);
-
-template<InvokeType type, bool access_check>
-static TwoWordReturn artInvokeCommon(uint32_t method_idx, mirror::Object* this_object,
- mirror::ArtMethod* caller_method,
- Thread* self, StackReference<mirror::ArtMethod>* sp) {
+static TwoWordReturn artInvokeCommon(uint32_t method_idx,
+ mirror::Object* this_object,
+ Thread* self,
+ StackReference<mirror::ArtMethod>* sp) {
ScopedQuickEntrypointChecks sqec(self);
DCHECK_EQ(sp->AsMirrorPtr(), Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs));
+ mirror::ArtMethod* caller_method = QuickArgumentVisitor::GetCallingMethod(sp);
mirror::ArtMethod* method = FindMethodFast(method_idx, this_object, caller_method, access_check,
type);
if (UNLIKELY(method == nullptr)) {
@@ -1992,7 +1997,6 @@
template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) \
TwoWordReturn artInvokeCommon<type, access_check>(uint32_t method_idx, \
mirror::Object* this_object, \
- mirror::ArtMethod* caller_method, \
Thread* self, \
StackReference<mirror::ArtMethod>* sp) \
@@ -2010,58 +2014,58 @@
// See comments in runtime_support_asm.S
extern "C" TwoWordReturn artInvokeInterfaceTrampolineWithAccessCheck(
- uint32_t method_idx, mirror::Object* this_object,
- mirror::ArtMethod* caller_method, Thread* self,
+ uint32_t method_idx,
+ mirror::Object* this_object,
+ Thread* self,
StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return artInvokeCommon<kInterface, true>(method_idx, this_object,
- caller_method, self, sp);
+ return artInvokeCommon<kInterface, true>(method_idx, this_object, self, sp);
}
extern "C" TwoWordReturn artInvokeDirectTrampolineWithAccessCheck(
- uint32_t method_idx, mirror::Object* this_object,
- mirror::ArtMethod* caller_method, Thread* self,
+ uint32_t method_idx,
+ mirror::Object* this_object,
+ Thread* self,
StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return artInvokeCommon<kDirect, true>(method_idx, this_object, caller_method,
- self, sp);
+ return artInvokeCommon<kDirect, true>(method_idx, this_object, self, sp);
}
extern "C" TwoWordReturn artInvokeStaticTrampolineWithAccessCheck(
- uint32_t method_idx, mirror::Object* this_object,
- mirror::ArtMethod* caller_method, Thread* self,
+ uint32_t method_idx,
+ mirror::Object* this_object,
+ Thread* self,
StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return artInvokeCommon<kStatic, true>(method_idx, this_object, caller_method,
- self, sp);
+ return artInvokeCommon<kStatic, true>(method_idx, this_object, self, sp);
}
extern "C" TwoWordReturn artInvokeSuperTrampolineWithAccessCheck(
- uint32_t method_idx, mirror::Object* this_object,
- mirror::ArtMethod* caller_method, Thread* self,
+ uint32_t method_idx,
+ mirror::Object* this_object,
+ Thread* self,
StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return artInvokeCommon<kSuper, true>(method_idx, this_object, caller_method,
- self, sp);
+ return artInvokeCommon<kSuper, true>(method_idx, this_object, self, sp);
}
extern "C" TwoWordReturn artInvokeVirtualTrampolineWithAccessCheck(
- uint32_t method_idx, mirror::Object* this_object,
- mirror::ArtMethod* caller_method, Thread* self,
+ uint32_t method_idx,
+ mirror::Object* this_object,
+ Thread* self,
StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return artInvokeCommon<kVirtual, true>(method_idx, this_object, caller_method,
- self, sp);
+ return artInvokeCommon<kVirtual, true>(method_idx, this_object, self, sp);
}
// Determine target of interface dispatch. This object is known non-null.
extern "C" TwoWordReturn artInvokeInterfaceTrampoline(mirror::ArtMethod* interface_method,
mirror::Object* this_object,
- mirror::ArtMethod* caller_method,
Thread* self,
StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
+ mirror::ArtMethod* caller_method = QuickArgumentVisitor::GetCallingMethod(sp);
mirror::ArtMethod* method;
if (LIKELY(interface_method->GetDexMethodIndex() != DexFile::kDexNoIndex)) {
method = this_object->GetClass()->FindVirtualMethodForInterface(interface_method);
@@ -2073,12 +2077,7 @@
} else {
DCHECK(interface_method == Runtime::Current()->GetResolutionMethod());
- // Find the caller PC.
- constexpr size_t pc_offset = GetCalleeSaveReturnPcOffset(kRuntimeISA, Runtime::kRefsAndArgs);
- uintptr_t caller_pc = *reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(sp) + pc_offset);
-
- // Map the caller PC to a dex PC.
- uint32_t dex_pc = caller_method->ToDexPc(caller_pc);
+ uint32_t dex_pc = QuickArgumentVisitor::GetCallingDexPc(sp);
const DexFile::CodeItem* code = caller_method->GetCodeItem();
CHECK_LT(dex_pc, code->insns_size_in_code_units_);
const Instruction* instr = Instruction::At(&code->insns_[dex_pc]);
diff --git a/runtime/entrypoints_order_test.cc b/runtime/entrypoints_order_test.cc
index 0664fa0..482f656 100644
--- a/runtime/entrypoints_order_test.cc
+++ b/runtime/entrypoints_order_test.cc
@@ -167,7 +167,13 @@
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pCheckAndAllocArray, pCheckAndAllocArrayWithAccessCheck,
sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pCheckAndAllocArrayWithAccessCheck,
- pInstanceofNonTrivial, sizeof(void*));
+ pAllocStringFromBytes, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAllocStringFromBytes, pAllocStringFromChars,
+ sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAllocStringFromChars, pAllocStringFromString,
+ sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAllocStringFromString, pInstanceofNonTrivial,
+ sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pInstanceofNonTrivial, pCheckCast, sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pCheckCast, pInitializeStaticStorage, sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pInitializeStaticStorage, pInitializeTypeAndVerifyAccess,
@@ -269,7 +275,40 @@
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pDeoptimize, pA64Load, sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pA64Load, pA64Store, sizeof(void*));
- CHECKED(OFFSETOF_MEMBER(QuickEntryPoints, pA64Store)
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pA64Store, pNewEmptyString, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pNewEmptyString, pNewStringFromBytes_B, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pNewStringFromBytes_B, pNewStringFromBytes_BI,
+ sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pNewStringFromBytes_BI, pNewStringFromBytes_BII,
+ sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pNewStringFromBytes_BII, pNewStringFromBytes_BIII,
+ sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pNewStringFromBytes_BIII, pNewStringFromBytes_BIIString,
+ sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pNewStringFromBytes_BIIString,
+ pNewStringFromBytes_BString, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pNewStringFromBytes_BString,
+ pNewStringFromBytes_BIICharset, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pNewStringFromBytes_BIICharset,
+ pNewStringFromBytes_BCharset, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pNewStringFromBytes_BCharset,
+ pNewStringFromChars_C, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pNewStringFromChars_C, pNewStringFromChars_CII,
+ sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pNewStringFromChars_CII, pNewStringFromChars_IIC,
+ sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pNewStringFromChars_IIC, pNewStringFromCodePoints,
+ sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pNewStringFromCodePoints, pNewStringFromString,
+ sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pNewStringFromString, pNewStringFromStringBuffer,
+ sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pNewStringFromStringBuffer, pNewStringFromStringBuilder,
+ sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pNewStringFromStringBuilder, pReadBarrierJni,
+ sizeof(void*));
+
+ CHECKED(OFFSETOF_MEMBER(QuickEntryPoints, pReadBarrierJni)
+ sizeof(void*) == sizeof(QuickEntryPoints), QuickEntryPoints_all);
}
};
diff --git a/runtime/exception_test.cc b/runtime/exception_test.cc
index 1770658..6808000 100644
--- a/runtime/exception_test.cc
+++ b/runtime/exception_test.cc
@@ -200,7 +200,7 @@
fake_stack.push_back(0);
fake_stack.push_back(0xEBAD6070); // return pc
- // Push Method* of NULL to terminate the trace
+ // Push Method* of null to terminate the trace
fake_stack.push_back(0);
// Push null values which will become null incoming arguments.
diff --git a/runtime/gc/accounting/atomic_stack.h b/runtime/gc/accounting/atomic_stack.h
index 5224d64..399832a 100644
--- a/runtime/gc/accounting/atomic_stack.h
+++ b/runtime/gc/accounting/atomic_stack.h
@@ -238,9 +238,9 @@
std::string error_msg;
mem_map_.reset(MemMap::MapAnonymous(name_.c_str(), nullptr, capacity_ * sizeof(begin_[0]),
PROT_READ | PROT_WRITE, false, false, &error_msg));
- CHECK(mem_map_.get() != NULL) << "couldn't allocate mark stack.\n" << error_msg;
+ CHECK(mem_map_.get() != nullptr) << "couldn't allocate mark stack.\n" << error_msg;
uint8_t* addr = mem_map_->Begin();
- CHECK(addr != NULL);
+ CHECK(addr != nullptr);
debug_is_sorted_ = true;
begin_ = reinterpret_cast<StackReference<T>*>(addr);
Reset();
diff --git a/runtime/gc/accounting/bitmap.h b/runtime/gc/accounting/bitmap.h
index b294d49..eb00472 100644
--- a/runtime/gc/accounting/bitmap.h
+++ b/runtime/gc/accounting/bitmap.h
@@ -121,7 +121,7 @@
const size_t bitmap_size_;
private:
- DISALLOW_COPY_AND_ASSIGN(Bitmap);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Bitmap);
};
// One bit per kAlignment in range (start, end]
@@ -184,6 +184,8 @@
uintptr_t const cover_begin_;
uintptr_t const cover_end_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryRangeBitmap);
};
} // namespace accounting
diff --git a/runtime/gc/accounting/card_table.cc b/runtime/gc/accounting/card_table.cc
index 7879632..1a7b1a3 100644
--- a/runtime/gc/accounting/card_table.cc
+++ b/runtime/gc/accounting/card_table.cc
@@ -36,7 +36,7 @@
/*
* Maintain a card table from the write barrier. All writes of
- * non-NULL values to heap addresses should go through an entry in
+ * non-null values to heap addresses should go through an entry in
* WriteBarrier, and from there to here.
*
* The heap is divided into "cards" of GC_CARD_SIZE bytes, as
@@ -44,7 +44,7 @@
* data per card, to be used by the GC. The value of the byte will be
* one of GC_CARD_CLEAN or GC_CARD_DIRTY.
*
- * After any store of a non-NULL object pointer into a heap object,
+ * After any store of a non-null object pointer into a heap object,
* code is obliged to mark the card dirty. The setters in
* object.h [such as SetFieldObject] do this for you. The
* compiler also contains code to mark cards as dirty.
@@ -64,13 +64,13 @@
std::unique_ptr<MemMap> mem_map(
MemMap::MapAnonymous("card table", nullptr, capacity + 256, PROT_READ | PROT_WRITE,
false, false, &error_msg));
- CHECK(mem_map.get() != NULL) << "couldn't allocate card table: " << error_msg;
+ CHECK(mem_map.get() != nullptr) << "couldn't allocate card table: " << error_msg;
// All zeros is the correct initial value; all clean. Anonymous mmaps are initialized to zero, we
// don't clear the card table to avoid unnecessary pages being allocated
static_assert(kCardClean == 0, "kCardClean must be 0");
uint8_t* cardtable_begin = mem_map->Begin();
- CHECK(cardtable_begin != NULL);
+ CHECK(cardtable_begin != nullptr);
// We allocated up to a bytes worth of extra space to allow biased_begin's byte value to equal
// kCardDirty, compute a offset value to make this the case
diff --git a/runtime/gc/accounting/card_table.h b/runtime/gc/accounting/card_table.h
index 896cce5..34e6aa3 100644
--- a/runtime/gc/accounting/card_table.h
+++ b/runtime/gc/accounting/card_table.h
@@ -43,7 +43,7 @@
template<size_t kAlignment> class SpaceBitmap;
// Maintain a card table from the the write barrier. All writes of
-// non-NULL values to heap addresses should go through an entry in
+// non-null values to heap addresses should go through an entry in
// WriteBarrier, and from there to here.
class CardTable {
public:
@@ -146,6 +146,8 @@
// Card table doesn't begin at the beginning of the mem_map_, instead it is displaced by offset
// to allow the byte value of biased_begin_ to equal GC_CARD_DIRTY
const size_t offset_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(CardTable);
};
} // namespace accounting
diff --git a/runtime/gc/accounting/heap_bitmap.h b/runtime/gc/accounting/heap_bitmap.h
index 245e074..1648aef 100644
--- a/runtime/gc/accounting/heap_bitmap.h
+++ b/runtime/gc/accounting/heap_bitmap.h
@@ -39,9 +39,11 @@
void Clear(const mirror::Object* obj) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
template<typename LargeObjectSetVisitor>
bool Set(const mirror::Object* obj, const LargeObjectSetVisitor& visitor)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) ALWAYS_INLINE;
template<typename LargeObjectSetVisitor>
bool AtomicTestAndSet(const mirror::Object* obj, const LargeObjectSetVisitor& visitor)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) ALWAYS_INLINE;
ContinuousSpaceBitmap* GetContinuousSpaceBitmap(const mirror::Object* obj) const;
LargeObjectBitmap* GetLargeObjectBitmap(const mirror::Object* obj) const;
diff --git a/runtime/gc/accounting/space_bitmap-inl.h b/runtime/gc/accounting/space_bitmap-inl.h
index 11347a5..ae91200 100644
--- a/runtime/gc/accounting/space_bitmap-inl.h
+++ b/runtime/gc/accounting/space_bitmap-inl.h
@@ -55,7 +55,7 @@
inline bool SpaceBitmap<kAlignment>::Test(const mirror::Object* obj) const {
uintptr_t addr = reinterpret_cast<uintptr_t>(obj);
DCHECK(HasAddress(obj)) << obj;
- DCHECK(bitmap_begin_ != NULL);
+ DCHECK(bitmap_begin_ != nullptr);
DCHECK_GE(addr, heap_begin_);
const uintptr_t offset = addr - heap_begin_;
return (bitmap_begin_[OffsetToIndex(offset)] & OffsetToMask(offset)) != 0;
diff --git a/runtime/gc/accounting/space_bitmap.cc b/runtime/gc/accounting/space_bitmap.cc
index 2da8325..84dadea 100644
--- a/runtime/gc/accounting/space_bitmap.cc
+++ b/runtime/gc/accounting/space_bitmap.cc
@@ -104,8 +104,8 @@
template<size_t kAlignment>
void SpaceBitmap<kAlignment>::Walk(ObjectCallback* callback, void* arg) {
- CHECK(bitmap_begin_ != NULL);
- CHECK(callback != NULL);
+ CHECK(bitmap_begin_ != nullptr);
+ CHECK(callback != nullptr);
uintptr_t end = OffsetToIndex(HeapLimit() - heap_begin_ - 1);
uintptr_t* bitmap_begin = bitmap_begin_;
@@ -132,7 +132,7 @@
CHECK(mark_bitmap.bitmap_begin_ != nullptr);
CHECK_EQ(live_bitmap.heap_begin_, mark_bitmap.heap_begin_);
CHECK_EQ(live_bitmap.bitmap_size_, mark_bitmap.bitmap_size_);
- CHECK(callback != NULL);
+ CHECK(callback != nullptr);
CHECK_LE(sweep_begin, sweep_end);
CHECK_GE(sweep_begin, live_bitmap.heap_begin_);
@@ -186,7 +186,7 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Visit fields of parent classes first.
mirror::Class* super = klass->GetSuperClass();
- if (super != NULL) {
+ if (super != nullptr) {
WalkInstanceFields(visited, callback, obj, super, arg);
}
// Walk instance fields
@@ -233,7 +233,7 @@
int32_t length = obj_array->GetLength();
for (int32_t i = 0; i < length; i++) {
mirror::Object* value = obj_array->Get(i);
- if (value != NULL) {
+ if (value != nullptr) {
WalkFieldsInOrder(visited, callback, value, arg);
}
}
diff --git a/runtime/gc/accounting/space_bitmap_test.cc b/runtime/gc/accounting/space_bitmap_test.cc
index 850325a..edb08ef 100644
--- a/runtime/gc/accounting/space_bitmap_test.cc
+++ b/runtime/gc/accounting/space_bitmap_test.cc
@@ -34,7 +34,7 @@
size_t heap_capacity = 16 * MB;
std::unique_ptr<ContinuousSpaceBitmap> space_bitmap(
ContinuousSpaceBitmap::Create("test bitmap", heap_begin, heap_capacity));
- EXPECT_TRUE(space_bitmap.get() != NULL);
+ EXPECT_TRUE(space_bitmap.get() != nullptr);
}
class BitmapVerify {
@@ -62,7 +62,7 @@
std::unique_ptr<ContinuousSpaceBitmap> space_bitmap(
ContinuousSpaceBitmap::Create("test bitmap", heap_begin, heap_capacity));
- EXPECT_TRUE(space_bitmap.get() != NULL);
+ EXPECT_TRUE(space_bitmap.get() != nullptr);
// Set all the odd bits in the first BitsPerIntPtrT * 3 to one.
for (size_t j = 0; j < kBitsPerIntPtrT * 3; ++j) {
diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc
index 515f124..49c7fda 100644
--- a/runtime/gc/allocator/rosalloc.cc
+++ b/runtime/gc/allocator/rosalloc.cc
@@ -115,7 +115,7 @@
void* RosAlloc::AllocPages(Thread* self, size_t num_pages, uint8_t page_map_type) {
lock_.AssertHeld(self);
DCHECK(page_map_type == kPageMapRun || page_map_type == kPageMapLargeObject);
- FreePageRun* res = NULL;
+ FreePageRun* res = nullptr;
const size_t req_byte_size = num_pages * kPageSize;
// Find the lowest address free page run that's large enough.
for (auto it = free_page_runs_.begin(); it != free_page_runs_.end(); ) {
@@ -157,8 +157,8 @@
}
// Failed to allocate pages. Grow the footprint, if possible.
- if (UNLIKELY(res == NULL && capacity_ > footprint_)) {
- FreePageRun* last_free_page_run = NULL;
+ if (UNLIKELY(res == nullptr && capacity_ > footprint_)) {
+ FreePageRun* last_free_page_run = nullptr;
size_t last_free_page_run_size;
auto it = free_page_runs_.rbegin();
if (it != free_page_runs_.rend() && (last_free_page_run = *it)->End(this) == base_ + footprint_) {
@@ -218,7 +218,7 @@
DCHECK(it != free_page_runs_.rend());
FreePageRun* fpr = *it;
if (kIsDebugBuild && last_free_page_run_size > 0) {
- DCHECK(last_free_page_run != NULL);
+ DCHECK(last_free_page_run != nullptr);
DCHECK_EQ(last_free_page_run, fpr);
}
size_t fpr_byte_size = fpr->ByteSize(this);
@@ -249,7 +249,7 @@
res = fpr;
}
}
- if (LIKELY(res != NULL)) {
+ if (LIKELY(res != nullptr)) {
// Update the page map.
size_t page_map_idx = ToPageMapIndex(res);
for (size_t i = 0; i < num_pages; i++) {
@@ -286,7 +286,7 @@
// Fail.
if (kTraceRosAlloc) {
- LOG(INFO) << "RosAlloc::AllocPages() : NULL";
+ LOG(INFO) << "RosAlloc::AllocPages() : nullptr";
}
return nullptr;
}
@@ -468,7 +468,7 @@
}
if (UNLIKELY(r == nullptr)) {
if (kTraceRosAlloc) {
- LOG(INFO) << "RosAlloc::AllocLargeObject() : NULL";
+ LOG(INFO) << "RosAlloc::AllocLargeObject() : nullptr";
}
return nullptr;
}
@@ -824,7 +824,7 @@
// already in the non-full run set (i.e., it was full) insert it
// into the non-full run set.
if (run != current_runs_[idx]) {
- auto* full_runs = kIsDebugBuild ? &full_runs_[idx] : NULL;
+ auto* full_runs = kIsDebugBuild ? &full_runs_[idx] : nullptr;
auto pos = non_full_runs->find(run);
if (pos == non_full_runs->end()) {
DCHECK(run_was_full);
@@ -1042,10 +1042,11 @@
inline uint32_t RosAlloc::Run::GetBitmapLastVectorMask(size_t num_slots, size_t num_vec) {
const size_t kBitsPerVec = 32;
- DCHECK_GE(num_slots * kBitsPerVec, num_vec);
+ DCHECK_GE(num_vec * kBitsPerVec, num_slots);
+ DCHECK_NE(num_vec, 0U);
size_t remain = num_vec * kBitsPerVec - num_slots;
- DCHECK_NE(remain, kBitsPerVec);
- return ((1U << remain) - 1) << (kBitsPerVec - remain);
+ DCHECK_LT(remain, kBitsPerVec);
+ return ((1U << remain) - 1) << ((kBitsPerVec - remain) & 0x1F);
}
inline bool RosAlloc::Run::IsAllFree() {
@@ -1275,7 +1276,7 @@
// Check if the run should be moved to non_full_runs_ or
// free_page_runs_.
auto* non_full_runs = &non_full_runs_[idx];
- auto* full_runs = kIsDebugBuild ? &full_runs_[idx] : NULL;
+ auto* full_runs = kIsDebugBuild ? &full_runs_[idx] : nullptr;
if (run->IsAllFree()) {
// It has just become completely free. Free the pages of the
// run.
@@ -1358,7 +1359,7 @@
stream << "RosAlloc PageMap: " << std::endl;
lock_.AssertHeld(Thread::Current());
size_t end = page_map_size_;
- FreePageRun* curr_fpr = NULL;
+ FreePageRun* curr_fpr = nullptr;
size_t curr_fpr_size = 0;
size_t remaining_curr_fpr_size = 0;
size_t num_running_empty_pages = 0;
@@ -1373,7 +1374,7 @@
// Encountered a fresh free page run.
DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
DCHECK(fpr->IsFree());
- DCHECK(curr_fpr == NULL);
+ DCHECK(curr_fpr == nullptr);
DCHECK_EQ(curr_fpr_size, static_cast<size_t>(0));
curr_fpr = fpr;
curr_fpr_size = fpr->ByteSize(this);
@@ -1384,7 +1385,7 @@
<< " remaining_fpr_size=" << remaining_curr_fpr_size << std::endl;
if (remaining_curr_fpr_size == 0) {
// Reset at the end of the current free page run.
- curr_fpr = NULL;
+ curr_fpr = nullptr;
curr_fpr_size = 0;
}
stream << "curr_fpr=0x" << std::hex << reinterpret_cast<intptr_t>(curr_fpr) << std::endl;
@@ -1392,7 +1393,7 @@
} else {
// Still part of the current free page run.
DCHECK_NE(num_running_empty_pages, static_cast<size_t>(0));
- DCHECK(curr_fpr != NULL && curr_fpr_size > 0 && remaining_curr_fpr_size > 0);
+ DCHECK(curr_fpr != nullptr && curr_fpr_size > 0 && remaining_curr_fpr_size > 0);
DCHECK_EQ(remaining_curr_fpr_size % kPageSize, static_cast<size_t>(0));
DCHECK_GE(remaining_curr_fpr_size, static_cast<size_t>(kPageSize));
remaining_curr_fpr_size -= kPageSize;
@@ -1400,7 +1401,7 @@
<< " remaining_fpr_size=" << remaining_curr_fpr_size << std::endl;
if (remaining_curr_fpr_size == 0) {
// Reset at the end of the current free page run.
- curr_fpr = NULL;
+ curr_fpr = nullptr;
curr_fpr_size = 0;
}
}
@@ -1546,7 +1547,7 @@
void RosAlloc::InspectAll(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg),
void* arg) {
// Note: no need to use this to release pages as we already do so in FreePages().
- if (handler == NULL) {
+ if (handler == nullptr) {
return;
}
MutexLock mu(Thread::Current(), lock_);
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index eabb1c2..26f349a 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -718,6 +718,7 @@
// Leave References gray so that GetReferent() will trigger RB.
CHECK(to_ref->AsReference()->IsEnqueued()) << "Left unenqueued ref gray " << to_ref;
} else {
+#ifdef USE_BAKER_OR_BROOKS_READ_BARRIER
if (kUseBakerReadBarrier) {
if (region_space_->IsInToSpace(to_ref)) {
// If to-space, change from gray to white.
@@ -739,6 +740,9 @@
CHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr());
}
}
+#else
+ DCHECK(!kUseBakerReadBarrier);
+#endif
}
if (ReadBarrier::kEnableToSpaceInvariantChecks || kIsDebugBuild) {
ConcurrentCopyingAssertToSpaceInvariantObjectVisitor visitor(this);
@@ -815,7 +819,7 @@
DCHECK(obj != nullptr);
DCHECK(collector_->heap_->GetMarkBitmap()->Test(obj)) << obj;
DCHECK_EQ(obj->GetReadBarrierPointer(), ReadBarrier::BlackPtr()) << obj;
- obj->SetReadBarrierPointer(ReadBarrier::WhitePtr());
+ obj->AtomicSetReadBarrierPointer(ReadBarrier::BlackPtr(), ReadBarrier::WhitePtr());
DCHECK_EQ(obj->GetReadBarrierPointer(), ReadBarrier::WhitePtr()) << obj;
}
@@ -963,7 +967,8 @@
if (kUseBakerReadBarrier) {
DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::BlackPtr()) << ref;
// Clear the black ptr.
- ref->SetReadBarrierPointer(ReadBarrier::WhitePtr());
+ ref->AtomicSetReadBarrierPointer(ReadBarrier::BlackPtr(), ReadBarrier::WhitePtr());
+ DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::WhitePtr()) << ref;
}
size_t obj_size = ref->SizeOf();
size_t alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment);
@@ -1330,10 +1335,6 @@
while (true) {
// Copy the object. TODO: copy only the lockword in the second iteration and on?
memcpy(to_ref, from_ref, obj_size);
- // Set the gray ptr.
- if (kUseBakerReadBarrier) {
- to_ref->SetReadBarrierPointer(ReadBarrier::GrayPtr());
- }
LockWord old_lock_word = to_ref->GetLockWord(false);
@@ -1378,6 +1379,11 @@
return to_ref;
}
+ // Set the gray ptr.
+ if (kUseBakerReadBarrier) {
+ to_ref->SetReadBarrierPointer(ReadBarrier::GrayPtr());
+ }
+
LockWord new_lock_word = LockWord::FromForwardingAddress(reinterpret_cast<size_t>(to_ref));
// Try to atomically write the fwd ptr.
@@ -1484,6 +1490,21 @@
}
DCHECK(from_ref != nullptr);
DCHECK(heap_->collector_type_ == kCollectorTypeCC);
+ if (kUseBakerReadBarrier && !is_active_) {
+ // In the lock word forward address state, the read barrier bits
+ // in the lock word are part of the stored forwarding address and
+ // invalid. This is usually OK as the from-space copy of objects
+ // aren't accessed by mutators due to the to-space
+ // invariant. However, during the dex2oat image writing relocation
+ // and the zygote compaction, objects can be in the forward
+ // address state (to store the forward/relocation addresses) and
+ // they can still be accessed and the invalid read barrier bits
+ // are consulted. If they look like gray but aren't really, the
+ // read barriers slow path can trigger when it shouldn't. To guard
+ // against this, return here if the CC collector isn't running.
+ return from_ref;
+ }
+ DCHECK(region_space_ != nullptr) << "Read barrier slow path taken when CC isn't running?";
space::RegionSpace::RegionType rtype = region_space_->GetRegionType(from_ref);
if (rtype == space::RegionSpace::RegionType::kRegionTypeToSpace) {
// It's already marked.
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index 93de035..60ea6b6 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -279,7 +279,7 @@
friend class FlipCallback;
friend class ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor;
- DISALLOW_COPY_AND_ASSIGN(ConcurrentCopying);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ConcurrentCopying);
};
} // namespace collector
diff --git a/runtime/gc/collector/garbage_collector.h b/runtime/gc/collector/garbage_collector.h
index c5a8d5d..9b76d1a 100644
--- a/runtime/gc/collector/garbage_collector.h
+++ b/runtime/gc/collector/garbage_collector.h
@@ -190,6 +190,9 @@
int64_t total_freed_bytes_;
CumulativeLogger cumulative_timings_;
mutable Mutex pause_histogram_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(GarbageCollector);
};
} // namespace collector
diff --git a/runtime/gc/collector/mark_compact.h b/runtime/gc/collector/mark_compact.h
index 4337644..f59a2cd 100644
--- a/runtime/gc/collector/mark_compact.h
+++ b/runtime/gc/collector/mark_compact.h
@@ -251,7 +251,7 @@
friend class UpdateReferenceVisitor;
friend class UpdateRootVisitor;
- DISALLOW_COPY_AND_ASSIGN(MarkCompact);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(MarkCompact);
};
} // namespace collector
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 14eb80b..5401b56 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -368,10 +368,12 @@
class MarkSweepMarkObjectSlowPath {
public:
- explicit MarkSweepMarkObjectSlowPath(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {
+ explicit MarkSweepMarkObjectSlowPath(MarkSweep* mark_sweep, Object* holder = nullptr,
+ MemberOffset offset = MemberOffset(0))
+ : mark_sweep_(mark_sweep), holder_(holder), offset_(offset) {
}
- void operator()(const Object* obj) const ALWAYS_INLINE {
+ void operator()(const Object* obj) const ALWAYS_INLINE NO_THREAD_SAFETY_ANALYSIS {
if (kProfileLargeObjects) {
// TODO: Differentiate between marking and testing somehow.
++mark_sweep_->large_object_test_;
@@ -381,18 +383,63 @@
if (UNLIKELY(obj == nullptr || !IsAligned<kPageSize>(obj) ||
(kIsDebugBuild && large_object_space != nullptr &&
!large_object_space->Contains(obj)))) {
- LOG(ERROR) << "Tried to mark " << obj << " not contained by any spaces";
- LOG(ERROR) << "Attempting see if it's a bad root";
- mark_sweep_->VerifyRoots();
+ LOG(INTERNAL_FATAL) << "Tried to mark " << obj << " not contained by any spaces";
+ if (holder_ != nullptr) {
+ size_t holder_size = holder_->SizeOf();
+ ArtField* field = holder_->FindFieldByOffset(offset_);
+ LOG(INTERNAL_FATAL) << "Field info: "
+ << " holder=" << holder_
+ << " holder_size=" << holder_size
+ << " holder_type=" << PrettyTypeOf(holder_)
+ << " offset=" << offset_.Uint32Value()
+ << " field=" << (field != nullptr ? field->GetName() : "nullptr")
+ << " field_type="
+ << (field != nullptr ? field->GetTypeDescriptor() : "")
+ << " first_ref_field_offset="
+ << (holder_->IsClass()
+ ? holder_->AsClass()->GetFirstReferenceStaticFieldOffset()
+ : holder_->GetClass()->GetFirstReferenceInstanceFieldOffset())
+ << " num_of_ref_fields="
+ << (holder_->IsClass()
+ ? holder_->AsClass()->NumReferenceStaticFields()
+ : holder_->GetClass()->NumReferenceInstanceFields())
+ << "\n";
+ }
+ PrintFileToLog("/proc/self/maps", LogSeverity::INTERNAL_FATAL);
+ MemMap::DumpMaps(LOG(INTERNAL_FATAL), true);
+ {
+ LOG(INTERNAL_FATAL) << "Attempting see if it's a bad root";
+ Thread* self = Thread::Current();
+ if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
+ mark_sweep_->VerifyRoots();
+ } else {
+ const bool heap_bitmap_exclusive_locked =
+ Locks::heap_bitmap_lock_->IsExclusiveHeld(self);
+ if (heap_bitmap_exclusive_locked) {
+ Locks::heap_bitmap_lock_->ExclusiveUnlock(self);
+ }
+ Locks::mutator_lock_->SharedUnlock(self);
+ ThreadList* tl = Runtime::Current()->GetThreadList();
+ tl->SuspendAll(__FUNCTION__);
+ mark_sweep_->VerifyRoots();
+ tl->ResumeAll();
+ Locks::mutator_lock_->SharedLock(self);
+ if (heap_bitmap_exclusive_locked) {
+ Locks::heap_bitmap_lock_->ExclusiveLock(self);
+ }
+ }
+ }
LOG(FATAL) << "Can't mark invalid object";
}
}
private:
MarkSweep* const mark_sweep_;
+ mirror::Object* const holder_;
+ MemberOffset offset_;
};
-inline void MarkSweep::MarkObjectNonNull(Object* obj) {
+inline void MarkSweep::MarkObjectNonNull(Object* obj, Object* holder, MemberOffset offset) {
DCHECK(obj != nullptr);
if (kUseBakerOrBrooksReadBarrier) {
// Verify all the objects have the correct pointer installed.
@@ -414,7 +461,7 @@
if (kCountMarkedObjects) {
++mark_slowpath_count_;
}
- MarkSweepMarkObjectSlowPath visitor(this);
+ MarkSweepMarkObjectSlowPath visitor(this, holder, offset);
// TODO: We already know that the object is not in the current_space_bitmap_ but MarkBitmap::Set
// will check again.
if (!mark_bitmap_->Set(obj, visitor)) {
@@ -454,9 +501,9 @@
}
// Used to mark objects when processing the mark stack. If an object is null, it is not marked.
-inline void MarkSweep::MarkObject(Object* obj) {
+inline void MarkSweep::MarkObject(Object* obj, Object* holder, MemberOffset offset) {
if (obj != nullptr) {
- MarkObjectNonNull(obj);
+ MarkObjectNonNull(obj, holder, offset);
} else if (kCountMarkedObjects) {
++mark_null_count_;
}
@@ -498,7 +545,7 @@
if (heap->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == nullptr) {
space::LargeObjectSpace* large_object_space = heap->GetLargeObjectsSpace();
if (large_object_space != nullptr && !large_object_space->Contains(root)) {
- LOG(ERROR) << "Found invalid root: " << root << " " << info;
+ LOG(INTERNAL_FATAL) << "Found invalid root: " << root << " " << info;
}
}
}
@@ -578,7 +625,7 @@
mark_stack_pos_(mark_stack_size) {
// We may have to copy part of an existing mark stack when another mark stack overflows.
if (mark_stack_size != 0) {
- DCHECK(mark_stack != NULL);
+ DCHECK(mark_stack != nullptr);
// TODO: Check performance?
std::copy(mark_stack, mark_stack + mark_stack_size, mark_stack_);
}
@@ -850,7 +897,7 @@
public:
RecursiveMarkTask(ThreadPool* thread_pool, MarkSweep* mark_sweep,
accounting::ContinuousSpaceBitmap* bitmap, uintptr_t begin, uintptr_t end)
- : MarkStackTask<false>(thread_pool, mark_sweep, 0, NULL), bitmap_(bitmap), begin_(begin),
+ : MarkStackTask<false>(thread_pool, mark_sweep, 0, nullptr), bitmap_(bitmap), begin_(begin),
end_(end) {
}
@@ -1207,7 +1254,7 @@
Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
}
- mark_sweep_->MarkObject(obj->GetFieldObject<mirror::Object>(offset));
+ mark_sweep_->MarkObject(obj->GetFieldObject<mirror::Object>(offset), obj, offset);
}
private:
@@ -1260,11 +1307,11 @@
static const size_t kFifoSize = 4;
BoundedFifoPowerOfTwo<Object*, kFifoSize> prefetch_fifo;
for (;;) {
- Object* obj = NULL;
+ Object* obj = nullptr;
if (kUseMarkStackPrefetch) {
while (!mark_stack_->IsEmpty() && prefetch_fifo.size() < kFifoSize) {
Object* mark_stack_obj = mark_stack_->PopBack();
- DCHECK(mark_stack_obj != NULL);
+ DCHECK(mark_stack_obj != nullptr);
__builtin_prefetch(mark_stack_obj);
prefetch_fifo.push_back(mark_stack_obj);
}
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index fad3403..d29d87a 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -199,7 +199,8 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Marks an object.
- void MarkObject(mirror::Object* obj)
+ void MarkObject(mirror::Object* obj, mirror::Object* holder = nullptr,
+ MemberOffset offset = MemberOffset(0))
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
@@ -222,7 +223,8 @@
static void VerifyImageRootVisitor(mirror::Object* root, void* arg)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
- void MarkObjectNonNull(mirror::Object* obj)
+ void MarkObjectNonNull(mirror::Object* obj, mirror::Object* holder = nullptr,
+ MemberOffset offset = MemberOffset(0))
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
@@ -336,7 +338,7 @@
friend class VerifyRootMarkedVisitor;
friend class VerifyRootVisitor;
- DISALLOW_COPY_AND_ASSIGN(MarkSweep);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(MarkSweep);
};
} // namespace collector
diff --git a/runtime/gc/collector/partial_mark_sweep.h b/runtime/gc/collector/partial_mark_sweep.h
index ac0d068..1a211cd 100644
--- a/runtime/gc/collector/partial_mark_sweep.h
+++ b/runtime/gc/collector/partial_mark_sweep.h
@@ -40,7 +40,7 @@
virtual void BindBitmaps() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
private:
- DISALLOW_COPY_AND_ASSIGN(PartialMarkSweep);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(PartialMarkSweep);
};
} // namespace collector
diff --git a/runtime/gc/collector/semi_space-inl.h b/runtime/gc/collector/semi_space-inl.h
index 922a71c..7b19dc9 100644
--- a/runtime/gc/collector/semi_space-inl.h
+++ b/runtime/gc/collector/semi_space-inl.h
@@ -60,10 +60,6 @@
if (obj == nullptr) {
return;
}
- if (kUseBakerOrBrooksReadBarrier) {
- // Verify all the objects have the correct forward pointer installed.
- obj->AssertReadBarrierPointer();
- }
if (from_space_->HasAddress(obj)) {
mirror::Object* forward_address = GetForwardingAddressInFromSpace(obj);
// If the object has already been moved, return the new forward address.
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index dbf01d8..82d02e7 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -750,7 +750,7 @@
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
// All immune objects are assumed marked.
if (from_space_->HasAddress(obj)) {
- // Returns either the forwarding address or nullptr.
+ // Returns either the forwarding address or null.
return GetForwardingAddressInFromSpace(obj);
} else if (collect_from_space_only_ || immune_region_.ContainsObject(obj) ||
to_space_->HasAddress(obj)) {
diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h
index 61fbead..3c25f53 100644
--- a/runtime/gc/collector/semi_space.h
+++ b/runtime/gc/collector/semi_space.h
@@ -278,7 +278,7 @@
private:
friend class BitmapSetSlowPathVisitor;
- DISALLOW_COPY_AND_ASSIGN(SemiSpace);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(SemiSpace);
};
} // namespace collector
diff --git a/runtime/gc/collector/sticky_mark_sweep.h b/runtime/gc/collector/sticky_mark_sweep.h
index 4f9dabf..b9ef137 100644
--- a/runtime/gc/collector/sticky_mark_sweep.h
+++ b/runtime/gc/collector/sticky_mark_sweep.h
@@ -47,7 +47,7 @@
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
private:
- DISALLOW_COPY_AND_ASSIGN(StickyMarkSweep);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(StickyMarkSweep);
};
} // namespace collector
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index b770096..fbf36e8 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -293,7 +293,7 @@
return nullptr;
}
// Try allocating a new thread local buffer, if the allocaiton fails the space must be
- // full so return nullptr.
+ // full so return null.
if (!bump_pointer_space_->AllocNewTlab(self, new_tlab_size)) {
return nullptr;
}
@@ -371,11 +371,8 @@
}
inline Heap::AllocationTimer::AllocationTimer(Heap* heap, mirror::Object** allocated_obj_ptr)
- : heap_(heap), allocated_obj_ptr_(allocated_obj_ptr) {
- if (kMeasureAllocationTime) {
- allocation_start_time_ = NanoTime() / kTimeAdjust;
- }
-}
+ : heap_(heap), allocated_obj_ptr_(allocated_obj_ptr),
+ allocation_start_time_(kMeasureAllocationTime ? NanoTime() / kTimeAdjust : 0u) { }
inline Heap::AllocationTimer::~AllocationTimer() {
if (kMeasureAllocationTime) {
@@ -419,7 +416,7 @@
inline void Heap::CheckConcurrentGC(Thread* self, size_t new_num_bytes_allocated,
mirror::Object** obj) {
if (UNLIKELY(new_num_bytes_allocated >= concurrent_start_bytes_)) {
- RequestConcurrentGCAndSaveObject(self, obj);
+ RequestConcurrentGCAndSaveObject(self, false, obj);
}
}
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index beaf067..11a0e3c 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -412,7 +412,7 @@
}
// Allocate the card table.
card_table_.reset(accounting::CardTable::Create(heap_begin, heap_capacity));
- CHECK(card_table_.get() != NULL) << "Failed to create card table";
+ CHECK(card_table_.get() != nullptr) << "Failed to create card table";
if (foreground_collector_type_ == kCollectorTypeCC && kUseTableLookupReadBarrier) {
rb_table_.reset(new accounting::ReadBarrierTable());
@@ -491,7 +491,7 @@
bool no_gap = MemMap::CheckNoGaps(GetImageSpace()->GetMemMap(),
non_moving_space_->GetMemMap());
if (!no_gap) {
- MemMap::DumpMaps(LOG(ERROR));
+ MemMap::DumpMaps(LOG(ERROR), true);
LOG(FATAL) << "There's a gap between the image space and the non-moving space";
}
}
@@ -1052,7 +1052,7 @@
if (!fail_ok) {
LOG(FATAL) << "object " << reinterpret_cast<const void*>(obj) << " not inside any spaces!";
}
- return NULL;
+ return nullptr;
}
space::DiscontinuousSpace* Heap::FindDiscontinuousSpaceFromObject(const mirror::Object* obj,
@@ -1065,12 +1065,12 @@
if (!fail_ok) {
LOG(FATAL) << "object " << reinterpret_cast<const void*>(obj) << " not inside any spaces!";
}
- return NULL;
+ return nullptr;
}
space::Space* Heap::FindSpaceFromObject(const mirror::Object* obj, bool fail_ok) const {
space::Space* result = FindContinuousSpaceFromObject(obj, true);
- if (result != NULL) {
+ if (result != nullptr) {
return result;
}
return FindDiscontinuousSpaceFromObject(obj, fail_ok);
@@ -1082,7 +1082,7 @@
return space->AsImageSpace();
}
}
- return NULL;
+ return nullptr;
}
void Heap::ThrowOutOfMemoryError(Thread* self, size_t byte_count, AllocatorType allocator_type) {
@@ -1612,10 +1612,19 @@
}
size_t Heap::GetObjectsAllocated() const {
+ Thread* self = Thread::Current();
+ ScopedThreadStateChange tsc(self, kWaitingForGetObjectsAllocated);
+ auto* tl = Runtime::Current()->GetThreadList();
+ // Need SuspendAll here to prevent lock violation if RosAlloc does it during InspectAll.
+ tl->SuspendAll(__FUNCTION__);
size_t total = 0;
- for (space::AllocSpace* space : alloc_spaces_) {
- total += space->GetObjectsAllocated();
+ {
+ ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
+ for (space::AllocSpace* space : alloc_spaces_) {
+ total += space->GetObjectsAllocated();
+ }
}
+ tl->ResumeAll();
return total;
}
@@ -2204,7 +2213,7 @@
// Turn the current alloc space into a zygote space and obtain the new alloc space composed of
// the remaining available space.
// Remove the old space before creating the zygote space since creating the zygote space sets
- // the old alloc space's bitmaps to nullptr.
+ // the old alloc space's bitmaps to null.
RemoveSpace(old_alloc_space);
if (collector::SemiSpace::kUseRememberedSet) {
// Sanity bound check.
@@ -3325,20 +3334,24 @@
*object = soa.Decode<mirror::Object*>(arg.get());
}
-void Heap::RequestConcurrentGCAndSaveObject(Thread* self, mirror::Object** obj) {
+void Heap::RequestConcurrentGCAndSaveObject(Thread* self, bool force_full, mirror::Object** obj) {
StackHandleScope<1> hs(self);
HandleWrapper<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
- RequestConcurrentGC(self);
+ RequestConcurrentGC(self, force_full);
}
class Heap::ConcurrentGCTask : public HeapTask {
public:
- explicit ConcurrentGCTask(uint64_t target_time) : HeapTask(target_time) { }
+ explicit ConcurrentGCTask(uint64_t target_time, bool force_full)
+ : HeapTask(target_time), force_full_(force_full) { }
virtual void Run(Thread* self) OVERRIDE {
gc::Heap* heap = Runtime::Current()->GetHeap();
- heap->ConcurrentGC(self);
+ heap->ConcurrentGC(self, force_full_);
heap->ClearConcurrentGCRequest();
}
+
+ private:
+ const bool force_full_; // If true, force full (or partial) collection.
};
static bool CanAddHeapTask(Thread* self) LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_) {
@@ -3351,24 +3364,30 @@
concurrent_gc_pending_.StoreRelaxed(false);
}
-void Heap::RequestConcurrentGC(Thread* self) {
+void Heap::RequestConcurrentGC(Thread* self, bool force_full) {
if (CanAddHeapTask(self) &&
concurrent_gc_pending_.CompareExchangeStrongSequentiallyConsistent(false, true)) {
- task_processor_->AddTask(self, new ConcurrentGCTask(NanoTime())); // Start straight away.
+ task_processor_->AddTask(self, new ConcurrentGCTask(NanoTime(), // Start straight away.
+ force_full));
}
}
-void Heap::ConcurrentGC(Thread* self) {
+void Heap::ConcurrentGC(Thread* self, bool force_full) {
if (!Runtime::Current()->IsShuttingDown(self)) {
// Wait for any GCs currently running to finish.
if (WaitForGcToComplete(kGcCauseBackground, self) == collector::kGcTypeNone) {
// If the we can't run the GC type we wanted to run, find the next appropriate one and try that
// instead. E.g. can't do partial, so do full instead.
- if (CollectGarbageInternal(next_gc_type_, kGcCauseBackground, false) ==
+ collector::GcType next_gc_type = next_gc_type_;
+ // If forcing full and next gc type is sticky, override with a non-sticky type.
+ if (force_full && next_gc_type == collector::kGcTypeSticky) {
+ next_gc_type = HasZygoteSpace() ? collector::kGcTypePartial : collector::kGcTypeFull;
+ }
+ if (CollectGarbageInternal(next_gc_type, kGcCauseBackground, false) ==
collector::kGcTypeNone) {
for (collector::GcType gc_type : gc_plan_) {
// Attempt to run the collector, if we succeed, we are done.
- if (gc_type > next_gc_type_ &&
+ if (gc_type > next_gc_type &&
CollectGarbageInternal(gc_type, kGcCauseBackground, false) !=
collector::kGcTypeNone) {
break;
@@ -3553,7 +3572,7 @@
UpdateMaxNativeFootprint();
} else if (!IsGCRequestPending()) {
if (IsGcConcurrent()) {
- RequestConcurrentGC(self);
+ RequestConcurrentGC(self, true); // Request non-sticky type.
} else {
CollectGarbageInternal(gc_type, kGcCauseForNativeAlloc, false);
}
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 066b4c5..90249f9 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -288,7 +288,7 @@
// Does a concurrent GC, should only be called by the GC daemon thread
// through runtime.
- void ConcurrentGC(Thread* self) LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_);
+ void ConcurrentGC(Thread* self, bool force_full) LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_);
// Implements VMDebug.countInstancesOfClass and JDWP VM_InstanceCount.
// The boolean decides whether to use IsAssignableFrom or == when comparing classes.
@@ -396,7 +396,7 @@
void RecordFreeRevoke();
// Must be called if a field of an Object in the heap changes, and before any GC safe-point.
- // The call is not needed if NULL is stored in the field.
+ // The call is not needed if null is stored in the field.
ALWAYS_INLINE void WriteBarrierField(const mirror::Object* dst, MemberOffset /*offset*/,
const mirror::Object* /*new_value*/) {
card_table_->MarkCard(dst);
@@ -664,7 +664,7 @@
void RequestTrim(Thread* self) LOCKS_EXCLUDED(pending_task_lock_);
// Request asynchronous GC.
- void RequestConcurrentGC(Thread* self) LOCKS_EXCLUDED(pending_task_lock_);
+ void RequestConcurrentGC(Thread* self, bool force_full) LOCKS_EXCLUDED(pending_task_lock_);
// Whether or not we may use a garbage collector, used so that we only create collectors we need.
bool MayUseCollector(CollectorType type) const;
@@ -786,7 +786,7 @@
void RequestCollectorTransition(CollectorType desired_collector_type, uint64_t delta_time)
LOCKS_EXCLUDED(pending_task_lock_);
- void RequestConcurrentGCAndSaveObject(Thread* self, mirror::Object** obj)
+ void RequestConcurrentGCAndSaveObject(Thread* self, bool force_full, mirror::Object** obj)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool IsGCRequestPending() const;
@@ -991,7 +991,7 @@
// programs it is "cleared" making it the same as capacity.
size_t growth_limit_;
- // When the number of bytes allocated exceeds the footprint TryAllocate returns NULL indicating
+ // When the number of bytes allocated exceeds the footprint TryAllocate returns null indicating
// a GC should be triggered.
size_t max_allowed_footprint_;
@@ -1201,41 +1201,23 @@
friend class VerifyReferenceVisitor;
friend class VerifyObjectVisitor;
friend class ScopedHeapFill;
- friend class ScopedHeapLock;
friend class space::SpaceTest;
class AllocationTimer {
- private:
- Heap* heap_;
- mirror::Object** allocated_obj_ptr_;
- uint64_t allocation_start_time_;
public:
- AllocationTimer(Heap* heap, mirror::Object** allocated_obj_ptr);
- ~AllocationTimer();
+ ALWAYS_INLINE AllocationTimer(Heap* heap, mirror::Object** allocated_obj_ptr);
+ ALWAYS_INLINE ~AllocationTimer();
+ private:
+ Heap* const heap_;
+ mirror::Object** allocated_obj_ptr_;
+ const uint64_t allocation_start_time_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(AllocationTimer);
};
DISALLOW_IMPLICIT_CONSTRUCTORS(Heap);
};
-// ScopedHeapFill changes the bytes allocated counter to be equal to the growth limit. This
-// causes the next allocation to perform a GC and possibly an OOM. It can be used to ensure that a
-// GC happens in specific methods such as ThrowIllegalMonitorStateExceptionF in Monitor::Wait.
-class ScopedHeapFill {
- public:
- explicit ScopedHeapFill(Heap* heap)
- : heap_(heap),
- delta_(heap_->GetMaxMemory() - heap_->GetBytesAllocated()) {
- heap_->num_bytes_allocated_.FetchAndAddSequentiallyConsistent(delta_);
- }
- ~ScopedHeapFill() {
- heap_->num_bytes_allocated_.FetchAndSubSequentiallyConsistent(delta_);
- }
-
- private:
- Heap* const heap_;
- const int64_t delta_;
-};
-
} // namespace gc
} // namespace art
diff --git a/runtime/gc/reference_processor.h b/runtime/gc/reference_processor.h
index c67fd98..a44319b 100644
--- a/runtime/gc/reference_processor.h
+++ b/runtime/gc/reference_processor.h
@@ -81,6 +81,9 @@
IsHeapReferenceMarkedCallback* is_marked_callback_;
MarkObjectCallback* mark_callback_;
void* arg_;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ProcessReferencesArgs);
};
bool SlowPathEnabled() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Called by ProcessReferences.
@@ -105,6 +108,8 @@
ReferenceQueue finalizer_reference_queue_;
ReferenceQueue phantom_reference_queue_;
ReferenceQueue cleared_references_;
+
+ DISALLOW_COPY_AND_ASSIGN(ReferenceProcessor);
};
} // namespace gc
diff --git a/runtime/gc/reference_queue.cc b/runtime/gc/reference_queue.cc
index 7be0704..4ba3983 100644
--- a/runtime/gc/reference_queue.cc
+++ b/runtime/gc/reference_queue.cc
@@ -30,7 +30,7 @@
}
void ReferenceQueue::AtomicEnqueueIfNotEnqueued(Thread* self, mirror::Reference* ref) {
- DCHECK(ref != NULL);
+ DCHECK(ref != nullptr);
MutexLock mu(self, *lock_);
if (!ref->IsEnqueued()) {
EnqueuePendingReference(ref);
@@ -43,7 +43,7 @@
}
void ReferenceQueue::EnqueuePendingReference(mirror::Reference* ref) {
- DCHECK(ref != NULL);
+ DCHECK(ref != nullptr);
if (IsEmpty()) {
// 1 element cyclic queue, ie: Reference ref = ..; ref.pendingNext = ref;
list_ = ref;
@@ -96,11 +96,11 @@
<< "ref=" << ref << " rb_ptr=" << ref->GetReadBarrierPointer();
if (heap->ConcurrentCopyingCollector()->RegionSpace()->IsInToSpace(ref)) {
// Moving objects.
- ref->SetReadBarrierPointer(ReadBarrier::WhitePtr());
+ ref->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(), ReadBarrier::WhitePtr());
CHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::WhitePtr());
} else {
// Non-moving objects.
- ref->SetReadBarrierPointer(ReadBarrier::BlackPtr());
+ ref->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(), ReadBarrier::BlackPtr());
CHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::BlackPtr());
}
}
diff --git a/runtime/gc/reference_queue.h b/runtime/gc/reference_queue.h
index f7d89d0..c45be85 100644
--- a/runtime/gc/reference_queue.h
+++ b/runtime/gc/reference_queue.h
@@ -106,7 +106,7 @@
// GC types.
mirror::Reference* list_;
- DISALLOW_COPY_AND_ASSIGN(ReferenceQueue);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ReferenceQueue);
};
} // namespace gc
diff --git a/runtime/gc/space/bump_pointer_space.h b/runtime/gc/space/bump_pointer_space.h
index c496a42..df43606 100644
--- a/runtime/gc/space/bump_pointer_space.h
+++ b/runtime/gc/space/bump_pointer_space.h
@@ -45,7 +45,7 @@
static BumpPointerSpace* Create(const std::string& name, size_t capacity, uint8_t* requested_begin);
static BumpPointerSpace* CreateFromMemMap(const std::string& name, MemMap* mem_map);
- // Allocate num_bytes, returns nullptr if the space is full.
+ // Allocate num_bytes, returns null if the space is full.
mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size, size_t* bytes_tl_bulk_allocated) OVERRIDE;
// Thread-unsafe allocation for when mutators are suspended, used by the semispace collector.
diff --git a/runtime/gc/space/dlmalloc_space-inl.h b/runtime/gc/space/dlmalloc_space-inl.h
index 9eace89..4fc4ada 100644
--- a/runtime/gc/space/dlmalloc_space-inl.h
+++ b/runtime/gc/space/dlmalloc_space-inl.h
@@ -35,7 +35,7 @@
obj = AllocWithoutGrowthLocked(self, num_bytes, bytes_allocated, usable_size,
bytes_tl_bulk_allocated);
}
- if (LIKELY(obj != NULL)) {
+ if (LIKELY(obj != nullptr)) {
// Zero freshly allocated memory, done while not holding the space's lock.
memset(obj, 0, num_bytes);
}
@@ -57,13 +57,13 @@
size_t* usable_size,
size_t* bytes_tl_bulk_allocated) {
mirror::Object* result = reinterpret_cast<mirror::Object*>(mspace_malloc(mspace_, num_bytes));
- if (LIKELY(result != NULL)) {
+ if (LIKELY(result != nullptr)) {
if (kDebugSpaces) {
CHECK(Contains(result)) << "Allocation (" << reinterpret_cast<void*>(result)
<< ") not in bounds of allocation space " << *this;
}
size_t allocation_size = AllocationSizeNonvirtual(result, usable_size);
- DCHECK(bytes_allocated != NULL);
+ DCHECK(bytes_allocated != nullptr);
*bytes_allocated = allocation_size;
*bytes_tl_bulk_allocated = allocation_size;
}
diff --git a/runtime/gc/space/dlmalloc_space.cc b/runtime/gc/space/dlmalloc_space.cc
index 225861d..7b1a421 100644
--- a/runtime/gc/space/dlmalloc_space.cc
+++ b/runtime/gc/space/dlmalloc_space.cc
@@ -39,7 +39,7 @@
: MallocSpace(name, mem_map, begin, end, limit, growth_limit, true, can_move_objects,
starting_size, initial_size),
mspace_(mspace) {
- CHECK(mspace != NULL);
+ CHECK(mspace != nullptr);
}
DlMallocSpace* DlMallocSpace::CreateFromMemMap(MemMap* mem_map, const std::string& name,
@@ -176,7 +176,7 @@
}
size_t DlMallocSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
- DCHECK(ptrs != NULL);
+ DCHECK(ptrs != nullptr);
// Don't need the lock to calculate the size of the freed pointers.
size_t bytes_freed = 0;
@@ -232,7 +232,7 @@
void* arg) {
MutexLock mu(Thread::Current(), lock_);
mspace_inspect_all(mspace_, callback, arg);
- callback(NULL, NULL, 0, arg); // Indicate end of a space.
+ callback(nullptr, nullptr, 0, arg); // Indicate end of a space.
}
size_t DlMallocSpace::GetFootprint() {
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index e28e8d7..99f5d45 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -203,6 +203,9 @@
oat_file_option_string += ImageHeader::GetOatLocationFromImageLocation(image_filename);
arg_vector.push_back(oat_file_option_string);
+ // Note: we do not generate a fully debuggable boot image so we do not pass the
+ // compiler flag --debuggable here.
+
Runtime::Current()->AddCurrentRuntimeFeaturesAsDex2OatArguments(&arg_vector);
CHECK_EQ(image_isa, kRuntimeISA)
<< "We should always be generating an image for the current isa.";
@@ -666,7 +669,7 @@
}
std::unique_ptr<File> file(OS::OpenFileForReading(image_filename));
- if (file.get() == NULL) {
+ if (file.get() == nullptr) {
*error_msg = StringPrintf("Failed to open '%s'", image_filename);
return nullptr;
}
@@ -695,7 +698,7 @@
std::unique_ptr<MemMap> map(MemMap::MapFileAtAddress(
image_header.GetImageBegin(), image_header.GetImageSize() + image_header.GetArtFieldsSize(),
PROT_READ | PROT_WRITE, MAP_PRIVATE, file->Fd(), 0, false, image_filename, error_msg));
- if (map.get() == NULL) {
+ if (map.get() == nullptr) {
DCHECK(!error_msg->empty());
return nullptr;
}
@@ -786,7 +789,7 @@
image_header.GetOatFileBegin(),
!Runtime::Current()->IsAotCompiler(),
nullptr, error_msg);
- if (oat_file == NULL) {
+ if (oat_file == nullptr) {
*error_msg = StringPrintf("Failed to open oat file '%s' referenced from image %s: %s",
oat_filename.c_str(), GetName(), error_msg->c_str());
return nullptr;
@@ -811,7 +814,7 @@
}
bool ImageSpace::ValidateOatFile(std::string* error_msg) const {
- CHECK(oat_file_.get() != NULL);
+ CHECK(oat_file_.get() != nullptr);
for (const OatFile::OatDexFile* oat_dex_file : oat_file_->GetOatDexFiles()) {
const std::string& dex_file_location = oat_dex_file->GetDexFileLocation();
uint32_t dex_file_location_checksum;
@@ -837,7 +840,7 @@
}
OatFile* ImageSpace::ReleaseOatFile() {
- CHECK(oat_file_.get() != NULL);
+ CHECK(oat_file_.get() != nullptr);
return oat_file_.release();
}
diff --git a/runtime/gc/space/image_space.h b/runtime/gc/space/image_space.h
index 9ae2af4..54dc7a6 100644
--- a/runtime/gc/space/image_space.h
+++ b/runtime/gc/space/image_space.h
@@ -52,7 +52,7 @@
InstructionSet image_isa);
// Reads the image header from the specified image location for the
- // instruction set image_isa. Returns nullptr on failure, with
+ // instruction set image_isa. Returns null on failure, with
// reason in error_msg.
static ImageHeader* ReadImageHeader(const char* image_location,
InstructionSet image_isa,
@@ -122,7 +122,7 @@
private:
// Tries to initialize an ImageSpace from the given image path,
- // returning NULL on error.
+ // returning null on error.
//
// If validate_oat_file is false (for /system), do not verify that
// image's OatFile is up-to-date relative to its DexFile
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index 7353c83..4dfdaa5 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -124,9 +124,9 @@
std::string error_msg;
MemMap* mem_map = MemMap::MapAnonymous("large object space allocation", nullptr, num_bytes,
PROT_READ | PROT_WRITE, true, false, &error_msg);
- if (UNLIKELY(mem_map == NULL)) {
+ if (UNLIKELY(mem_map == nullptr)) {
LOG(WARNING) << "Large object allocation failed: " << error_msg;
- return NULL;
+ return nullptr;
}
mirror::Object* const obj = reinterpret_cast<mirror::Object*>(mem_map->Begin());
if (kIsDebugBuild) {
@@ -206,7 +206,7 @@
for (auto it = mem_maps_.begin(); it != mem_maps_.end(); ++it) {
MemMap* mem_map = it->second;
callback(mem_map->Begin(), mem_map->End(), mem_map->Size(), arg);
- callback(NULL, NULL, 0, arg);
+ callback(nullptr, nullptr, 0, arg);
}
}
@@ -316,7 +316,7 @@
std::string error_msg;
MemMap* mem_map = MemMap::MapAnonymous(name.c_str(), requested_begin, size,
PROT_READ | PROT_WRITE, true, false, &error_msg);
- CHECK(mem_map != NULL) << "Failed to allocate large object space mem map: " << error_msg;
+ CHECK(mem_map != nullptr) << "Failed to allocate large object space mem map: " << error_msg;
return new FreeListSpace(name, mem_map, mem_map->Begin(), mem_map->End());
}
diff --git a/runtime/gc/space/malloc_space.cc b/runtime/gc/space/malloc_space.cc
index 9195b06..b014217 100644
--- a/runtime/gc/space/malloc_space.cc
+++ b/runtime/gc/space/malloc_space.cc
@@ -75,13 +75,13 @@
LOG(ERROR) << "Failed to create alloc space (" << name << ") where the initial size ("
<< PrettySize(*initial_size) << ") is larger than its capacity ("
<< PrettySize(*growth_limit) << ")";
- return NULL;
+ return nullptr;
}
if (*growth_limit > *capacity) {
LOG(ERROR) << "Failed to create alloc space (" << name << ") where the growth limit capacity ("
<< PrettySize(*growth_limit) << ") is larger than the capacity ("
<< PrettySize(*capacity) << ")";
- return NULL;
+ return nullptr;
}
// Page align growth limit and capacity which will be used to manage mmapped storage
diff --git a/runtime/gc/space/malloc_space.h b/runtime/gc/space/malloc_space.h
index bbf1bbb..5f3a1db 100644
--- a/runtime/gc/space/malloc_space.h
+++ b/runtime/gc/space/malloc_space.h
@@ -60,7 +60,7 @@
// Allocate num_bytes without allowing the underlying space to grow.
virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size, size_t* bytes_tl_bulk_allocated) = 0;
- // Return the storage space required by obj. If usable_size isn't nullptr then it is set to the
+ // Return the storage space required by obj. If usable_size isn't null then it is set to the
// amount of the storage space that may be used by obj.
virtual size_t AllocationSize(mirror::Object* obj, size_t* usable_size) = 0;
virtual size_t Free(Thread* self, mirror::Object* ptr)
diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h
index b88ce24..19109f0 100644
--- a/runtime/gc/space/region_space.h
+++ b/runtime/gc/space/region_space.h
@@ -40,7 +40,7 @@
// space to confirm the request was granted.
static RegionSpace* Create(const std::string& name, size_t capacity, uint8_t* requested_begin);
- // Allocate num_bytes, returns nullptr if the space is full.
+ // Allocate num_bytes, returns null if the space is full.
mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size, size_t* bytes_tl_bulk_allocated) OVERRIDE;
// Thread-unsafe allocation for when mutators are suspended, used by the semispace collector.
diff --git a/runtime/gc/space/rosalloc_space-inl.h b/runtime/gc/space/rosalloc_space-inl.h
index 9d582a3..25d4445 100644
--- a/runtime/gc/space/rosalloc_space-inl.h
+++ b/runtime/gc/space/rosalloc_space-inl.h
@@ -73,18 +73,18 @@
rosalloc_->Alloc<kThreadSafe>(self, num_bytes, &rosalloc_bytes_allocated,
&rosalloc_usable_size,
&rosalloc_bytes_tl_bulk_allocated));
- if (LIKELY(result != NULL)) {
+ if (LIKELY(result != nullptr)) {
if (kDebugSpaces) {
CHECK(Contains(result)) << "Allocation (" << reinterpret_cast<void*>(result)
<< ") not in bounds of allocation space " << *this;
}
- DCHECK(bytes_allocated != NULL);
+ DCHECK(bytes_allocated != nullptr);
*bytes_allocated = rosalloc_bytes_allocated;
DCHECK_EQ(rosalloc_usable_size, rosalloc_->UsableSize(result));
if (usable_size != nullptr) {
*usable_size = rosalloc_usable_size;
}
- DCHECK(bytes_tl_bulk_allocated != NULL);
+ DCHECK(bytes_tl_bulk_allocated != nullptr);
*bytes_tl_bulk_allocated = rosalloc_bytes_tl_bulk_allocated;
}
return result;
diff --git a/runtime/gc/space/rosalloc_space.cc b/runtime/gc/space/rosalloc_space.cc
index eb1d5f4..2c7d93e 100644
--- a/runtime/gc/space/rosalloc_space.cc
+++ b/runtime/gc/space/rosalloc_space.cc
@@ -64,9 +64,9 @@
allocator::RosAlloc* rosalloc = CreateRosAlloc(mem_map->Begin(), starting_size, initial_size,
capacity, low_memory_mode, running_on_valgrind);
- if (rosalloc == NULL) {
+ if (rosalloc == nullptr) {
LOG(ERROR) << "Failed to initialize rosalloc for alloc space (" << name << ")";
- return NULL;
+ return nullptr;
}
// Protect memory beyond the starting size. MoreCore will add r/w permissions when necessory
@@ -113,10 +113,10 @@
size_t starting_size = Heap::kDefaultStartingSize;
MemMap* mem_map = CreateMemMap(name, starting_size, &initial_size, &growth_limit, &capacity,
requested_begin);
- if (mem_map == NULL) {
+ if (mem_map == nullptr) {
LOG(ERROR) << "Failed to create mem map for alloc space (" << name << ") of size "
<< PrettySize(capacity);
- return NULL;
+ return nullptr;
}
RosAllocSpace* space = CreateFromMemMap(mem_map, name, starting_size, initial_size,
@@ -145,7 +145,7 @@
art::gc::allocator::RosAlloc::kPageReleaseModeAll :
art::gc::allocator::RosAlloc::kPageReleaseModeSizeAndEnd,
running_on_valgrind);
- if (rosalloc != NULL) {
+ if (rosalloc != nullptr) {
rosalloc->SetFootprintLimit(initial_size);
} else {
PLOG(ERROR) << "RosAlloc::Create failed";
@@ -170,7 +170,7 @@
rosalloc_->SetFootprintLimit(footprint);
}
// Note RosAlloc zeroes memory internally.
- // Return the new allocation or NULL.
+ // Return the new allocation or null.
CHECK(!kDebugSpaces || result == nullptr || Contains(result));
return result;
}
@@ -192,7 +192,7 @@
size_t RosAllocSpace::Free(Thread* self, mirror::Object* ptr) {
if (kDebugSpaces) {
- CHECK(ptr != NULL);
+ CHECK(ptr != nullptr);
CHECK(Contains(ptr)) << "Free (" << ptr << ") not in bounds of heap " << *this;
}
if (kRecentFreeCount > 0) {
@@ -309,7 +309,7 @@
MutexLock mu2(self, *Locks::thread_list_lock_);
rosalloc_->InspectAll(callback, arg);
if (do_null_callback_at_end) {
- callback(NULL, NULL, 0, arg); // Indicate end of a space.
+ callback(nullptr, nullptr, 0, arg); // Indicate end of a space.
}
}
tl->ResumeAll();
@@ -324,7 +324,7 @@
// from SignalCatcher::HandleSigQuit().
rosalloc_->InspectAll(callback, arg);
if (do_null_callback_at_end) {
- callback(NULL, NULL, 0, arg); // Indicate end of a space.
+ callback(nullptr, nullptr, 0, arg); // Indicate end of a space.
}
} else if (Locks::mutator_lock_->IsSharedHeld(self)) {
// The mutators are not suspended yet and we have a shared access
diff --git a/runtime/gc/space/space.h b/runtime/gc/space/space.h
index f2378d9..871ebac 100644
--- a/runtime/gc/space/space.h
+++ b/runtime/gc/space/space.h
@@ -187,7 +187,7 @@
private:
friend class art::gc::Heap;
- DISALLOW_COPY_AND_ASSIGN(Space);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Space);
};
std::ostream& operator<<(std::ostream& os, const Space& space);
@@ -337,7 +337,7 @@
uint8_t* limit_;
private:
- DISALLOW_COPY_AND_ASSIGN(ContinuousSpace);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ContinuousSpace);
};
// A space where objects may be allocated higgledy-piggledy throughout virtual memory. Currently
@@ -366,7 +366,7 @@
std::unique_ptr<accounting::LargeObjectBitmap> mark_bitmap_;
private:
- DISALLOW_COPY_AND_ASSIGN(DiscontinuousSpace);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(DiscontinuousSpace);
};
class MemMapSpace : public ContinuousSpace {
@@ -400,7 +400,7 @@
std::unique_ptr<MemMap> mem_map_;
private:
- DISALLOW_COPY_AND_ASSIGN(MemMapSpace);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(MemMapSpace);
};
// Used by the heap compaction interface to enable copying from one type of alloc space to another.
@@ -453,7 +453,7 @@
private:
friend class gc::Heap;
- DISALLOW_COPY_AND_ASSIGN(ContinuousMemMapAllocSpace);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ContinuousMemMapAllocSpace);
};
} // namespace space
diff --git a/runtime/gc/task_processor.h b/runtime/gc/task_processor.h
index 67e3a54..5f48619 100644
--- a/runtime/gc/task_processor.h
+++ b/runtime/gc/task_processor.h
@@ -46,6 +46,7 @@
uint64_t target_run_time_;
friend class TaskProcessor;
+ DISALLOW_IMPLICIT_CONSTRUCTORS(HeapTask);
};
// Used to process GC tasks (heap trim, heap transitions, concurrent GC).
@@ -78,6 +79,8 @@
std::unique_ptr<ConditionVariable> cond_ GUARDED_BY(lock_);
std::multiset<HeapTask*, CompareByTargetRunTime> tasks_ GUARDED_BY(lock_);
Thread* running_thread_ GUARDED_BY(lock_);
+
+ DISALLOW_COPY_AND_ASSIGN(TaskProcessor);
};
} // namespace gc
diff --git a/runtime/gc_map.h b/runtime/gc_map.h
index ffe54c4..b4ccdd6 100644
--- a/runtime/gc_map.h
+++ b/runtime/gc_map.h
@@ -28,7 +28,7 @@
class NativePcOffsetToReferenceMap {
public:
explicit NativePcOffsetToReferenceMap(const uint8_t* data) : data_(data) {
- CHECK(data_ != NULL);
+ CHECK(data_ != nullptr);
}
// The number of entries in the table.
diff --git a/runtime/hprof/hprof.cc b/runtime/hprof/hprof.cc
index 23af25d..efead51 100644
--- a/runtime/hprof/hprof.cc
+++ b/runtime/hprof/hprof.cc
@@ -538,7 +538,7 @@
// STRING format:
// ID: ID for this string
- // U1*: UTF8 characters for string (NOT NULL terminated)
+ // U1*: UTF8 characters for string (NOT null terminated)
// (the record format encodes the length)
__ AddU4(id);
__ AddUtf8String(string.c_str());
@@ -931,7 +931,7 @@
mirror::Class* c = obj->GetClass();
if (c == nullptr) {
- // This object will bother HprofReader, because it has a NULL
+ // This object will bother HprofReader, because it has a null
// class, so just don't dump it. It could be
// gDvm.unlinkedJavaLangClass or it could be an object just
// allocated which hasn't been initialized yet.
@@ -981,6 +981,10 @@
// ClassObjects have their static fields appended, so aren't all the same size.
// But they're at least this size.
__ AddU4(sizeof(mirror::Class)); // instance size
+ } else if (klass->IsStringClass()) {
+ // Strings are variable length with character data at the end like arrays.
+ // This outputs the size of an empty string.
+ __ AddU4(sizeof(mirror::String));
} else if (klass->IsArrayClass() || klass->IsPrimitive()) {
__ AddU4(0);
} else {
@@ -1036,13 +1040,22 @@
// Instance fields for this class (no superclass fields)
int iFieldCount = klass->IsObjectClass() ? 0 : klass->NumInstanceFields();
- __ AddU2((uint16_t)iFieldCount);
+ if (klass->IsStringClass()) {
+ __ AddU2((uint16_t)iFieldCount + 1);
+ } else {
+ __ AddU2((uint16_t)iFieldCount);
+ }
for (int i = 0; i < iFieldCount; ++i) {
ArtField* f = klass->GetInstanceField(i);
__ AddStringId(LookupStringId(f->GetName()));
HprofBasicType t = SignatureToBasicTypeAndSize(f->GetTypeDescriptor(), nullptr);
__ AddU1(t);
}
+ // Add native value character array for strings.
+ if (klass->IsStringClass()) {
+ __ AddStringId(LookupStringId("value"));
+ __ AddU1(hprof_basic_object);
+ }
}
void Hprof::DumpHeapArray(mirror::Array* obj, mirror::Class* klass) {
@@ -1057,7 +1070,7 @@
__ AddU4(length);
__ AddClassId(LookupClassId(klass));
- // Dump the elements, which are always objects or NULL.
+ // Dump the elements, which are always objects or null.
__ AddIdList(obj->AsObjectArray<mirror::Object>());
} else {
size_t size;
@@ -1099,6 +1112,7 @@
// Write the instance data; fields for this class, followed by super class fields,
// and so on. Don't write the klass or monitor fields of Object.class.
+ mirror::Class* orig_klass = klass;
while (!klass->IsObjectClass()) {
int ifieldCount = klass->NumInstanceFields();
for (int i = 0; i < ifieldCount; ++i) {
@@ -1133,8 +1147,31 @@
klass = klass->GetSuperClass();
}
- // Patch the instance field length.
- __ UpdateU4(size_patch_offset, output_->Length() - (size_patch_offset + 4));
+ // Output native value character array for strings.
+ if (orig_klass->IsStringClass()) {
+ mirror::String* s = obj->AsString();
+ mirror::Object* value;
+ if (s->GetLength() == 0) {
+ // If string is empty, use an object-aligned address within the string for the value.
+ value = reinterpret_cast<mirror::Object*>(reinterpret_cast<uintptr_t>(s) + kObjectAlignment);
+ } else {
+ value = reinterpret_cast<mirror::Object*>(s->GetValue());
+ }
+ __ AddObjectId(value);
+
+ // Patch the instance field length.
+ __ UpdateU4(size_patch_offset, output_->Length() - (size_patch_offset + 4));
+
+ __ AddU1(HPROF_PRIMITIVE_ARRAY_DUMP);
+ __ AddObjectId(value);
+ __ AddU4(StackTraceSerialNumber(obj));
+ __ AddU4(s->GetLength());
+ __ AddU1(hprof_basic_char);
+ __ AddU2List(s->GetValue(), s->GetLength());
+ } else {
+ // Patch the instance field length.
+ __ UpdateU4(size_patch_offset, output_->Length() - (size_patch_offset + 4));
+ }
}
void Hprof::VisitRoot(mirror::Object* obj, const RootInfo& info) {
diff --git a/runtime/indirect_reference_table-inl.h b/runtime/indirect_reference_table-inl.h
index e571a0e..39d850f 100644
--- a/runtime/indirect_reference_table-inl.h
+++ b/runtime/indirect_reference_table-inl.h
@@ -31,7 +31,7 @@
// Returns "false" if something looks bad.
inline bool IndirectReferenceTable::GetChecked(IndirectRef iref) const {
if (UNLIKELY(iref == nullptr)) {
- LOG(WARNING) << "Attempt to look up NULL " << kind_;
+ LOG(WARNING) << "Attempt to look up nullptr " << kind_;
return false;
}
if (UNLIKELY(GetIndirectRefKind(iref) == kHandleScopeOrInvalid)) {
@@ -82,6 +82,15 @@
return obj;
}
+inline void IndirectReferenceTable::Update(IndirectRef iref, mirror::Object* obj) {
+ if (!GetChecked(iref)) {
+ LOG(WARNING) << "IndirectReferenceTable Update failed to find reference " << iref;
+ return;
+ }
+ uint32_t idx = ExtractIndex(iref);
+ table_[idx].SetReference(obj);
+}
+
} // namespace art
#endif // ART_RUNTIME_INDIRECT_REFERENCE_TABLE_INL_H_
diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc
index d6f9682..0ef58ea 100644
--- a/runtime/indirect_reference_table.cc
+++ b/runtime/indirect_reference_table.cc
@@ -103,9 +103,9 @@
prevState.all = cookie;
size_t topIndex = segment_state_.parts.topIndex;
- CHECK(obj != NULL);
+ CHECK(obj != nullptr);
VerifyObject(obj);
- DCHECK(table_ != NULL);
+ DCHECK(table_ != nullptr);
DCHECK_GE(segment_state_.parts.numHoles, prevState.parts.numHoles);
if (topIndex == max_entries_) {
@@ -144,7 +144,7 @@
<< " holes=" << segment_state_.parts.numHoles;
}
- DCHECK(result != NULL);
+ DCHECK(result != nullptr);
return result;
}
@@ -172,13 +172,19 @@
int topIndex = segment_state_.parts.topIndex;
int bottomIndex = prevState.parts.topIndex;
- DCHECK(table_ != NULL);
+ DCHECK(table_ != nullptr);
DCHECK_GE(segment_state_.parts.numHoles, prevState.parts.numHoles);
- if (GetIndirectRefKind(iref) == kHandleScopeOrInvalid &&
- Thread::Current()->HandleScopeContains(reinterpret_cast<jobject>(iref))) {
- LOG(WARNING) << "Attempt to remove local handle scope entry from IRT, ignoring";
- return true;
+ if (GetIndirectRefKind(iref) == kHandleScopeOrInvalid) {
+ auto* self = Thread::Current();
+ if (self->HandleScopeContains(reinterpret_cast<jobject>(iref))) {
+ auto* env = self->GetJniEnv();
+ DCHECK(env != nullptr);
+ if (env->check_jni) {
+ LOG(WARNING) << "Attempt to remove local handle scope entry from IRT, ignoring";
+ }
+ return true;
+ }
}
const int idx = ExtractIndex(iref);
if (idx < bottomIndex) {
@@ -227,9 +233,8 @@
}
}
} else {
- // Not the top-most entry. This creates a hole. We NULL out the
- // entry to prevent somebody from deleting it twice and screwing up
- // the hole count.
+ // Not the top-most entry. This creates a hole. We null out the entry to prevent somebody
+ // from deleting it twice and screwing up the hole count.
if (table_[idx].GetReference()->IsNull()) {
LOG(INFO) << "--- WEIRD: removing null entry " << idx;
return false;
@@ -270,9 +275,7 @@
ReferenceTable::Table entries;
for (size_t i = 0; i < Capacity(); ++i) {
mirror::Object* obj = table_[i].GetReference()->Read<kWithoutReadBarrier>();
- if (UNLIKELY(obj == nullptr)) {
- // Remove NULLs.
- } else {
+ if (obj != nullptr) {
obj = table_[i].GetReference()->Read();
entries.push_back(GcRoot<mirror::Object>(obj));
}
diff --git a/runtime/indirect_reference_table.h b/runtime/indirect_reference_table.h
index 0072184..dea5dfd 100644
--- a/runtime/indirect_reference_table.h
+++ b/runtime/indirect_reference_table.h
@@ -213,6 +213,10 @@
uint32_t GetSerial() const {
return serial_;
}
+ void SetReference(mirror::Object* obj) {
+ DCHECK_LT(serial_, kIRTPrevCount);
+ references_[serial_] = GcRoot<mirror::Object>(obj);
+ }
private:
uint32_t serial_;
@@ -268,9 +272,9 @@
bool IsValid() const;
/*
- * Add a new entry. "obj" must be a valid non-NULL object reference.
+ * Add a new entry. "obj" must be a valid non-nullptr object reference.
*
- * Returns NULL if the table is full (max entries reached, or alloc
+ * Returns nullptr if the table is full (max entries reached, or alloc
* failed during expansion).
*/
IndirectRef Add(uint32_t cookie, mirror::Object* obj)
@@ -294,6 +298,13 @@
}
/*
+ * Update an existing entry.
+ *
+ * Updates an existing indirect reference to point to a new object.
+ */
+ void Update(IndirectRef iref, mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ /*
* Remove an existing entry.
*
* If the entry is not between the current top index and the bottom index
diff --git a/runtime/indirect_reference_table_test.cc b/runtime/indirect_reference_table_test.cc
index fe1b8f0..c20002b 100644
--- a/runtime/indirect_reference_table_test.cc
+++ b/runtime/indirect_reference_table_test.cc
@@ -216,7 +216,7 @@
ASSERT_EQ(0U, irt.Capacity()) << "temporal del not empty";
CheckDump(&irt, 0, 0);
- // nullptr isn't a valid iref.
+ // null isn't a valid iref.
ASSERT_TRUE(irt.Get(nullptr) == nullptr);
// Stale lookup.
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index 51600f7..98e6200 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -16,13 +16,10 @@
#include "instrumentation.h"
-#include <sys/uio.h>
-
#include <sstream>
#include "arch/context.h"
#include "atomic.h"
-#include "base/unix_file/fd_file.h"
#include "class_linker.h"
#include "debugger.h"
#include "dex_file-inl.h"
@@ -39,16 +36,18 @@
#include "mirror/object_array-inl.h"
#include "mirror/object-inl.h"
#include "nth_caller_visitor.h"
-#include "os.h"
-#include "scoped_thread_state_change.h"
#include "thread.h"
#include "thread_list.h"
namespace art {
-
namespace instrumentation {
-const bool kVerboseInstrumentation = false;
+constexpr bool kVerboseInstrumentation = false;
+
+// Instrumentation works on non-inlined frames by updating returned PCs
+// of compiled frames.
+static constexpr StackVisitor::StackWalkKind kInstrumentationStackWalk =
+ StackVisitor::StackWalkKind::kSkipInlinedFrames;
static bool InstallStubsClassVisitor(mirror::Class* klass, void* arg)
EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -64,7 +63,7 @@
have_method_entry_listeners_(false), have_method_exit_listeners_(false),
have_method_unwind_listeners_(false), have_dex_pc_listeners_(false),
have_field_read_listeners_(false), have_field_write_listeners_(false),
- have_exception_caught_listeners_(false),
+ have_exception_caught_listeners_(false), have_backward_branch_listeners_(false),
deoptimized_methods_lock_("deoptimized methods lock"),
deoptimization_enabled_(false),
interpreter_handler_table_(kMainHandlerTable),
@@ -166,25 +165,25 @@
// existing instrumentation frames.
static void InstrumentationInstallStack(Thread* thread, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- struct InstallStackVisitor : public StackVisitor {
+ struct InstallStackVisitor FINAL : public StackVisitor {
InstallStackVisitor(Thread* thread_in, Context* context, uintptr_t instrumentation_exit_pc)
- : StackVisitor(thread_in, context),
+ : StackVisitor(thread_in, context, kInstrumentationStackWalk),
instrumentation_stack_(thread_in->GetInstrumentationStack()),
instrumentation_exit_pc_(instrumentation_exit_pc),
reached_existing_instrumentation_frames_(false), instrumentation_stack_depth_(0),
last_return_pc_(0) {
}
- virtual bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::ArtMethod* m = GetMethod();
- if (m == NULL) {
+ if (m == nullptr) {
if (kVerboseInstrumentation) {
LOG(INFO) << " Skipping upcall. Frame " << GetFrameId();
}
last_return_pc_ = 0;
return true; // Ignore upcalls.
}
- if (GetCurrentQuickFrame() == NULL) {
+ if (GetCurrentQuickFrame() == nullptr) {
bool interpreter_frame = true;
InstrumentationStackFrame instrumentation_frame(GetThisObject(), m, 0, GetFrameId(),
interpreter_frame);
@@ -306,28 +305,29 @@
// Removes the instrumentation exit pc as the return PC for every quick frame.
static void InstrumentationRestoreStack(Thread* thread, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- struct RestoreStackVisitor : public StackVisitor {
+ struct RestoreStackVisitor FINAL : public StackVisitor {
RestoreStackVisitor(Thread* thread_in, uintptr_t instrumentation_exit_pc,
Instrumentation* instrumentation)
- : StackVisitor(thread_in, NULL), thread_(thread_in),
+ : StackVisitor(thread_in, nullptr, kInstrumentationStackWalk),
+ thread_(thread_in),
instrumentation_exit_pc_(instrumentation_exit_pc),
instrumentation_(instrumentation),
instrumentation_stack_(thread_in->GetInstrumentationStack()),
frames_removed_(0) {}
- virtual bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (instrumentation_stack_->size() == 0) {
return false; // Stop.
}
mirror::ArtMethod* m = GetMethod();
- if (GetCurrentQuickFrame() == NULL) {
+ if (GetCurrentQuickFrame() == nullptr) {
if (kVerboseInstrumentation) {
LOG(INFO) << " Ignoring a shadow frame. Frame " << GetFrameId()
<< " Method=" << PrettyMethod(m);
}
return true; // Ignore shadow frames.
}
- if (m == NULL) {
+ if (m == nullptr) {
if (kVerboseInstrumentation) {
LOG(INFO) << " Skipping upcall. Frame " << GetFrameId();
}
@@ -390,25 +390,29 @@
}
}
+static bool HasEvent(Instrumentation::InstrumentationEvent expected, uint32_t events) {
+ return (events & expected) != 0;
+}
+
void Instrumentation::AddListener(InstrumentationListener* listener, uint32_t events) {
Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
- if ((events & kMethodEntered) != 0) {
+ if (HasEvent(kMethodEntered, events)) {
method_entry_listeners_.push_back(listener);
have_method_entry_listeners_ = true;
}
- if ((events & kMethodExited) != 0) {
+ if (HasEvent(kMethodExited, events)) {
method_exit_listeners_.push_back(listener);
have_method_exit_listeners_ = true;
}
- if ((events & kMethodUnwind) != 0) {
+ if (HasEvent(kMethodUnwind, events)) {
method_unwind_listeners_.push_back(listener);
have_method_unwind_listeners_ = true;
}
- if ((events & kBackwardBranch) != 0) {
+ if (HasEvent(kBackwardBranch, events)) {
backward_branch_listeners_.push_back(listener);
have_backward_branch_listeners_ = true;
}
- if ((events & kDexPcMoved) != 0) {
+ if (HasEvent(kDexPcMoved, events)) {
std::list<InstrumentationListener*>* modified;
if (have_dex_pc_listeners_) {
modified = new std::list<InstrumentationListener*>(*dex_pc_listeners_.get());
@@ -419,7 +423,7 @@
dex_pc_listeners_.reset(modified);
have_dex_pc_listeners_ = true;
}
- if ((events & kFieldRead) != 0) {
+ if (HasEvent(kFieldRead, events)) {
std::list<InstrumentationListener*>* modified;
if (have_field_read_listeners_) {
modified = new std::list<InstrumentationListener*>(*field_read_listeners_.get());
@@ -430,7 +434,7 @@
field_read_listeners_.reset(modified);
have_field_read_listeners_ = true;
}
- if ((events & kFieldWritten) != 0) {
+ if (HasEvent(kFieldWritten, events)) {
std::list<InstrumentationListener*>* modified;
if (have_field_write_listeners_) {
modified = new std::list<InstrumentationListener*>(*field_write_listeners_.get());
@@ -441,7 +445,7 @@
field_write_listeners_.reset(modified);
have_field_write_listeners_ = true;
}
- if ((events & kExceptionCaught) != 0) {
+ if (HasEvent(kExceptionCaught, events)) {
std::list<InstrumentationListener*>* modified;
if (have_exception_caught_listeners_) {
modified = new std::list<InstrumentationListener*>(*exception_caught_listeners_.get());
@@ -458,102 +462,104 @@
void Instrumentation::RemoveListener(InstrumentationListener* listener, uint32_t events) {
Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
- if ((events & kMethodEntered) != 0) {
- if (have_method_entry_listeners_) {
- method_entry_listeners_.remove(listener);
- have_method_entry_listeners_ = !method_entry_listeners_.empty();
- }
+ if (HasEvent(kMethodEntered, events) && have_method_entry_listeners_) {
+ method_entry_listeners_.remove(listener);
+ have_method_entry_listeners_ = !method_entry_listeners_.empty();
}
- if ((events & kMethodExited) != 0) {
- if (have_method_exit_listeners_) {
- method_exit_listeners_.remove(listener);
- have_method_exit_listeners_ = !method_exit_listeners_.empty();
- }
+ if (HasEvent(kMethodExited, events) && have_method_exit_listeners_) {
+ method_exit_listeners_.remove(listener);
+ have_method_exit_listeners_ = !method_exit_listeners_.empty();
}
- if ((events & kMethodUnwind) != 0) {
- if (have_method_unwind_listeners_) {
+ if (HasEvent(kMethodUnwind, events) && have_method_unwind_listeners_) {
method_unwind_listeners_.remove(listener);
have_method_unwind_listeners_ = !method_unwind_listeners_.empty();
- }
}
- if ((events & kDexPcMoved) != 0) {
+ if (HasEvent(kBackwardBranch, events) && have_backward_branch_listeners_) {
+ backward_branch_listeners_.remove(listener);
+ have_backward_branch_listeners_ = !backward_branch_listeners_.empty();
+ }
+ if (HasEvent(kDexPcMoved, events) && have_dex_pc_listeners_) {
+ std::list<InstrumentationListener*>* modified =
+ new std::list<InstrumentationListener*>(*dex_pc_listeners_.get());
+ modified->remove(listener);
+ have_dex_pc_listeners_ = !modified->empty();
if (have_dex_pc_listeners_) {
- std::list<InstrumentationListener*>* modified =
- new std::list<InstrumentationListener*>(*dex_pc_listeners_.get());
- modified->remove(listener);
- have_dex_pc_listeners_ = !modified->empty();
- if (have_dex_pc_listeners_) {
- dex_pc_listeners_.reset(modified);
- } else {
- dex_pc_listeners_.reset();
- delete modified;
- }
+ dex_pc_listeners_.reset(modified);
+ } else {
+ dex_pc_listeners_.reset();
+ delete modified;
}
}
- if ((events & kFieldRead) != 0) {
+ if (HasEvent(kFieldRead, events) && have_field_read_listeners_) {
+ std::list<InstrumentationListener*>* modified =
+ new std::list<InstrumentationListener*>(*field_read_listeners_.get());
+ modified->remove(listener);
+ have_field_read_listeners_ = !modified->empty();
if (have_field_read_listeners_) {
- std::list<InstrumentationListener*>* modified =
- new std::list<InstrumentationListener*>(*field_read_listeners_.get());
- modified->remove(listener);
- have_field_read_listeners_ = !modified->empty();
- if (have_field_read_listeners_) {
- field_read_listeners_.reset(modified);
- } else {
- field_read_listeners_.reset();
- delete modified;
- }
+ field_read_listeners_.reset(modified);
+ } else {
+ field_read_listeners_.reset();
+ delete modified;
}
}
- if ((events & kFieldWritten) != 0) {
+ if (HasEvent(kFieldWritten, events) && have_field_write_listeners_) {
+ std::list<InstrumentationListener*>* modified =
+ new std::list<InstrumentationListener*>(*field_write_listeners_.get());
+ modified->remove(listener);
+ have_field_write_listeners_ = !modified->empty();
if (have_field_write_listeners_) {
- std::list<InstrumentationListener*>* modified =
- new std::list<InstrumentationListener*>(*field_write_listeners_.get());
- modified->remove(listener);
- have_field_write_listeners_ = !modified->empty();
- if (have_field_write_listeners_) {
- field_write_listeners_.reset(modified);
- } else {
- field_write_listeners_.reset();
- delete modified;
- }
+ field_write_listeners_.reset(modified);
+ } else {
+ field_write_listeners_.reset();
+ delete modified;
}
}
- if ((events & kExceptionCaught) != 0) {
+ if (HasEvent(kExceptionCaught, events) && have_exception_caught_listeners_) {
+ std::list<InstrumentationListener*>* modified =
+ new std::list<InstrumentationListener*>(*exception_caught_listeners_.get());
+ modified->remove(listener);
+ have_exception_caught_listeners_ = !modified->empty();
if (have_exception_caught_listeners_) {
- std::list<InstrumentationListener*>* modified =
- new std::list<InstrumentationListener*>(*exception_caught_listeners_.get());
- modified->remove(listener);
- have_exception_caught_listeners_ = !modified->empty();
- if (have_exception_caught_listeners_) {
- exception_caught_listeners_.reset(modified);
- } else {
- exception_caught_listeners_.reset();
- delete modified;
- }
+ exception_caught_listeners_.reset(modified);
+ } else {
+ exception_caught_listeners_.reset();
+ delete modified;
}
}
UpdateInterpreterHandlerTable();
}
-void Instrumentation::ConfigureStubs(bool require_entry_exit_stubs, bool require_interpreter) {
- interpret_only_ = require_interpreter || forced_interpret_only_;
- // Compute what level of instrumentation is required and compare to current.
- int desired_level, current_level;
- if (require_interpreter) {
- desired_level = 2;
- } else if (require_entry_exit_stubs) {
- desired_level = 1;
- } else {
- desired_level = 0;
- }
+Instrumentation::InstrumentationLevel Instrumentation::GetCurrentInstrumentationLevel() const {
if (interpreter_stubs_installed_) {
- current_level = 2;
+ return InstrumentationLevel::kInstrumentWithInterpreter;
} else if (entry_exit_stubs_installed_) {
- current_level = 1;
+ return InstrumentationLevel::kInstrumentWithInstrumentationStubs;
} else {
- current_level = 0;
+ return InstrumentationLevel::kInstrumentNothing;
}
- if (desired_level == current_level) {
+}
+
+void Instrumentation::ConfigureStubs(const char* key, InstrumentationLevel desired_level) {
+ // Store the instrumentation level for this key or remove it.
+ if (desired_level == InstrumentationLevel::kInstrumentNothing) {
+ // The client no longer needs instrumentation.
+ requested_instrumentation_levels_.erase(key);
+ } else {
+ // The client needs instrumentation.
+ requested_instrumentation_levels_.Overwrite(key, desired_level);
+ }
+
+ // Look for the highest required instrumentation level.
+ InstrumentationLevel requested_level = InstrumentationLevel::kInstrumentNothing;
+ for (const auto& v : requested_instrumentation_levels_) {
+ requested_level = std::max(requested_level, v.second);
+ }
+
+ interpret_only_ = (requested_level == InstrumentationLevel::kInstrumentWithInterpreter) ||
+ forced_interpret_only_;
+
+ InstrumentationLevel current_level = GetCurrentInstrumentationLevel();
+ if (requested_level == current_level) {
// We're already set.
return;
}
@@ -561,12 +567,14 @@
Runtime* runtime = Runtime::Current();
Locks::mutator_lock_->AssertExclusiveHeld(self);
Locks::thread_list_lock_->AssertNotHeld(self);
- if (desired_level > 0) {
- if (require_interpreter) {
+ if (requested_level > InstrumentationLevel::kInstrumentNothing) {
+ if (requested_level == InstrumentationLevel::kInstrumentWithInterpreter) {
interpreter_stubs_installed_ = true;
- } else {
- CHECK(require_entry_exit_stubs);
entry_exit_stubs_installed_ = true;
+ } else {
+ CHECK_EQ(requested_level, InstrumentationLevel::kInstrumentWithInstrumentationStubs);
+ entry_exit_stubs_installed_ = true;
+ interpreter_stubs_installed_ = false;
}
runtime->GetClassLinker()->VisitClasses(InstallStubsClassVisitor, this);
instrumentation_stubs_installed_ = true;
@@ -590,8 +598,7 @@
}
}
-static void ResetQuickAllocEntryPointsForThread(Thread* thread, void* arg) {
- UNUSED(arg);
+static void ResetQuickAllocEntryPointsForThread(Thread* thread, void* arg ATTRIBUTE_UNUSED) {
thread->ResetQuickAllocEntryPointsForThread();
}
@@ -645,7 +652,7 @@
Runtime* runtime = Runtime::Current();
if (runtime->IsStarted()) {
MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
- runtime->GetThreadList()->ForEach(ResetQuickAllocEntryPointsForThread, NULL);
+ runtime->GetThreadList()->ForEach(ResetQuickAllocEntryPointsForThread, nullptr);
}
}
@@ -804,11 +811,11 @@
deoptimization_enabled_ = true;
}
-void Instrumentation::DisableDeoptimization() {
+void Instrumentation::DisableDeoptimization(const char* key) {
CHECK_EQ(deoptimization_enabled_, true);
// If we deoptimized everything, undo it.
if (interpreter_stubs_installed_) {
- UndeoptimizeEverything();
+ UndeoptimizeEverything(key);
}
// Undeoptimized selected methods.
while (true) {
@@ -828,25 +835,35 @@
// Indicates if instrumentation should notify method enter/exit events to the listeners.
bool Instrumentation::ShouldNotifyMethodEnterExitEvents() const {
+ if (!HasMethodEntryListeners() && !HasMethodExitListeners()) {
+ return false;
+ }
return !deoptimization_enabled_ && !interpreter_stubs_installed_;
}
-void Instrumentation::DeoptimizeEverything() {
- CHECK(!interpreter_stubs_installed_);
- ConfigureStubs(false, true);
+void Instrumentation::DeoptimizeEverything(const char* key) {
+ CHECK(deoptimization_enabled_);
+ ConfigureStubs(key, InstrumentationLevel::kInstrumentWithInterpreter);
}
-void Instrumentation::UndeoptimizeEverything() {
+void Instrumentation::UndeoptimizeEverything(const char* key) {
CHECK(interpreter_stubs_installed_);
- ConfigureStubs(false, false);
+ CHECK(deoptimization_enabled_);
+ ConfigureStubs(key, InstrumentationLevel::kInstrumentNothing);
}
-void Instrumentation::EnableMethodTracing(bool require_interpreter) {
- ConfigureStubs(!require_interpreter, require_interpreter);
+void Instrumentation::EnableMethodTracing(const char* key, bool needs_interpreter) {
+ InstrumentationLevel level;
+ if (needs_interpreter) {
+ level = InstrumentationLevel::kInstrumentWithInterpreter;
+ } else {
+ level = InstrumentationLevel::kInstrumentWithInstrumentationStubs;
+ }
+ ConfigureStubs(key, level);
}
-void Instrumentation::DisableMethodTracing() {
- ConfigureStubs(false, false);
+void Instrumentation::DisableMethodTracing(const char* key) {
+ ConfigureStubs(key, InstrumentationLevel::kInstrumentNothing);
}
const void* Instrumentation::GetQuickCodeFor(mirror::ArtMethod* method, size_t pointer_size) const {
@@ -896,7 +913,7 @@
void Instrumentation::MethodUnwindEvent(Thread* thread, mirror::Object* this_object,
mirror::ArtMethod* method,
uint32_t dex_pc) const {
- if (have_method_unwind_listeners_) {
+ if (HasMethodUnwindListeners()) {
for (InstrumentationListener* listener : method_unwind_listeners_) {
listener->MethodUnwind(thread, this_object, method, dex_pc);
}
@@ -906,11 +923,9 @@
void Instrumentation::DexPcMovedEventImpl(Thread* thread, mirror::Object* this_object,
mirror::ArtMethod* method,
uint32_t dex_pc) const {
- if (HasDexPcListeners()) {
- std::shared_ptr<std::list<InstrumentationListener*>> original(dex_pc_listeners_);
- for (InstrumentationListener* listener : *original.get()) {
- listener->DexPcMoved(thread, this_object, method, dex_pc);
- }
+ std::shared_ptr<std::list<InstrumentationListener*>> original(dex_pc_listeners_);
+ for (InstrumentationListener* listener : *original.get()) {
+ listener->DexPcMoved(thread, this_object, method, dex_pc);
}
}
@@ -924,22 +939,18 @@
void Instrumentation::FieldReadEventImpl(Thread* thread, mirror::Object* this_object,
mirror::ArtMethod* method, uint32_t dex_pc,
ArtField* field) const {
- if (HasFieldReadListeners()) {
- std::shared_ptr<std::list<InstrumentationListener*>> original(field_read_listeners_);
- for (InstrumentationListener* listener : *original.get()) {
- listener->FieldRead(thread, this_object, method, dex_pc, field);
- }
+ std::shared_ptr<std::list<InstrumentationListener*>> original(field_read_listeners_);
+ for (InstrumentationListener* listener : *original.get()) {
+ listener->FieldRead(thread, this_object, method, dex_pc, field);
}
}
void Instrumentation::FieldWriteEventImpl(Thread* thread, mirror::Object* this_object,
mirror::ArtMethod* method, uint32_t dex_pc,
ArtField* field, const JValue& field_value) const {
- if (HasFieldWriteListeners()) {
- std::shared_ptr<std::list<InstrumentationListener*>> original(field_write_listeners_);
- for (InstrumentationListener* listener : *original.get()) {
- listener->FieldWritten(thread, this_object, method, dex_pc, field, field_value);
- }
+ std::shared_ptr<std::list<InstrumentationListener*>> original(field_write_listeners_);
+ for (InstrumentationListener* listener : *original.get()) {
+ listener->FieldWritten(thread, this_object, method, dex_pc, field, field_value);
}
}
@@ -959,7 +970,7 @@
static void CheckStackDepth(Thread* self, const InstrumentationStackFrame& instrumentation_frame,
int delta)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- size_t frame_id = StackVisitor::ComputeNumFrames(self) + delta;
+ size_t frame_id = StackVisitor::ComputeNumFrames(self, kInstrumentationStackWalk) + delta;
if (frame_id != instrumentation_frame.frame_id_) {
LOG(ERROR) << "Expected frame_id=" << frame_id << " but found "
<< instrumentation_frame.frame_id_;
@@ -972,7 +983,7 @@
mirror::ArtMethod* method,
uintptr_t lr, bool interpreter_entry) {
// We have a callee-save frame meaning this value is guaranteed to never be 0.
- size_t frame_id = StackVisitor::ComputeNumFrames(self);
+ size_t frame_id = StackVisitor::ComputeNumFrames(self, kInstrumentationStackWalk);
std::deque<instrumentation::InstrumentationStackFrame>* stack = self->GetInstrumentationStack();
if (kVerboseInstrumentation) {
LOG(INFO) << "Entering " << PrettyMethod(method) << " from PC " << reinterpret_cast<void*>(lr);
diff --git a/runtime/instrumentation.h b/runtime/instrumentation.h
index 8b7fcca..7d70d21 100644
--- a/runtime/instrumentation.h
+++ b/runtime/instrumentation.h
@@ -22,11 +22,10 @@
#include <map>
#include "arch/instruction_set.h"
-#include "atomic.h"
#include "base/macros.h"
#include "base/mutex.h"
#include "gc_root.h"
-#include "object_callbacks.h"
+#include "safe_map.h"
namespace art {
namespace mirror {
@@ -67,8 +66,6 @@
uint32_t dex_pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
// Call-back for when a method is exited.
- // TODO: its likely passing the return value would be useful, however, we may need to get and
- // parse the shorty to determine what kind of register holds the result.
virtual void MethodExited(Thread* thread, mirror::Object* this_object,
mirror::ArtMethod* method, uint32_t dex_pc,
const JValue& return_value)
@@ -119,6 +116,12 @@
kBackwardBranch = 0x80,
};
+ enum class InstrumentationLevel {
+ kInstrumentNothing, // execute without instrumentation
+ kInstrumentWithInstrumentationStubs, // execute with instrumentation entry/exit stubs
+ kInstrumentWithInterpreter // execute with interpreter
+ };
+
Instrumentation();
// Add a listener to be notified of the masked together sent of instrumentation events. This
@@ -138,7 +141,7 @@
void EnableDeoptimization()
EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
LOCKS_EXCLUDED(deoptimized_methods_lock_);
- void DisableDeoptimization()
+ void DisableDeoptimization(const char* key)
EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
LOCKS_EXCLUDED(deoptimized_methods_lock_);
bool AreAllMethodsDeoptimized() const {
@@ -147,12 +150,12 @@
bool ShouldNotifyMethodEnterExitEvents() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Executes everything with interpreter.
- void DeoptimizeEverything()
+ void DeoptimizeEverything(const char* key)
EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::classlinker_classes_lock_);
// Executes everything with compiled code (or interpreter if there is no code).
- void UndeoptimizeEverything()
+ void UndeoptimizeEverything(const char* key)
EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::classlinker_classes_lock_);
@@ -170,18 +173,19 @@
LOCKS_EXCLUDED(Locks::thread_list_lock_, deoptimized_methods_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Indicates whether the method has been deoptimized so it is executed with the interpreter.
bool IsDeoptimized(mirror::ArtMethod* method)
LOCKS_EXCLUDED(deoptimized_methods_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Enable method tracing by installing instrumentation entry/exit stubs.
- void EnableMethodTracing(
- bool require_interpreter = kDeoptimizeForAccurateMethodEntryExitListeners)
+ // Enable method tracing by installing instrumentation entry/exit stubs or interpreter.
+ void EnableMethodTracing(const char* key,
+ bool needs_interpreter = kDeoptimizeForAccurateMethodEntryExitListeners)
EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::classlinker_classes_lock_);
- // Disable method tracing by uninstalling instrumentation entry/exit stubs.
- void DisableMethodTracing()
+ // Disable method tracing by uninstalling instrumentation entry/exit stubs or interpreter.
+ void DisableMethodTracing(const char* key)
EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::classlinker_classes_lock_);
@@ -236,6 +240,10 @@
return have_method_exit_listeners_;
}
+ bool HasMethodUnwindListeners() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return have_method_unwind_listeners_;
+ }
+
bool HasDexPcListeners() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return have_dex_pc_listeners_;
}
@@ -355,8 +363,14 @@
LOCKS_EXCLUDED(deoptimized_methods_lock_);
private:
+ InstrumentationLevel GetCurrentInstrumentationLevel() const;
+
// Does the job of installing or removing instrumentation code within methods.
- void ConfigureStubs(bool require_entry_exit_stubs, bool require_interpreter)
+ // In order to support multiple clients using instrumentation at the same time,
+ // the caller must pass a unique key (a string) identifying it so we remind which
+ // instrumentation level it needs. Therefore the current instrumentation level
+ // becomes the highest instrumentation level required by a client.
+ void ConfigureStubs(const char* key, InstrumentationLevel desired_instrumentation_level)
EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::classlinker_classes_lock_,
deoptimized_methods_lock_);
@@ -452,6 +466,11 @@
// Do we have any backward branch listeners? Short-cut to avoid taking the instrumentation_lock_.
bool have_backward_branch_listeners_ GUARDED_BY(Locks::mutator_lock_);
+ // Contains the instrumentation level required by each client of the instrumentation identified
+ // by a string key.
+ typedef SafeMap<const char*, InstrumentationLevel> InstrumentationLevelTable;
+ InstrumentationLevelTable requested_instrumentation_levels_ GUARDED_BY(Locks::mutator_lock_);
+
// The event listeners, written to with the mutator_lock_ exclusively held.
std::list<InstrumentationListener*> method_entry_listeners_ GUARDED_BY(Locks::mutator_lock_);
std::list<InstrumentationListener*> method_exit_listeners_ GUARDED_BY(Locks::mutator_lock_);
@@ -481,9 +500,12 @@
size_t quick_alloc_entry_points_instrumentation_counter_
GUARDED_BY(Locks::instrument_entrypoints_lock_);
+ friend class InstrumentationTest; // For GetCurrentInstrumentationLevel and ConfigureStubs.
+
DISALLOW_COPY_AND_ASSIGN(Instrumentation);
};
std::ostream& operator<<(std::ostream& os, const Instrumentation::InstrumentationEvent& rhs);
+std::ostream& operator<<(std::ostream& os, const Instrumentation::InstrumentationLevel& rhs);
// An element in the instrumentation side stack maintained in art::Thread.
struct InstrumentationStackFrame {
diff --git a/runtime/instrumentation_test.cc b/runtime/instrumentation_test.cc
new file mode 100644
index 0000000..5afacb8
--- /dev/null
+++ b/runtime/instrumentation_test.cc
@@ -0,0 +1,791 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "instrumentation.h"
+
+#include "common_runtime_test.h"
+#include "common_throws.h"
+#include "class_linker-inl.h"
+#include "dex_file.h"
+#include "handle_scope-inl.h"
+#include "jvalue.h"
+#include "runtime.h"
+#include "scoped_thread_state_change.h"
+#include "thread_list.h"
+#include "thread-inl.h"
+
+namespace art {
+namespace instrumentation {
+
+class TestInstrumentationListener FINAL : public instrumentation::InstrumentationListener {
+ public:
+ TestInstrumentationListener()
+ : received_method_enter_event(false), received_method_exit_event(false),
+ received_method_unwind_event(false), received_dex_pc_moved_event(false),
+ received_field_read_event(false), received_field_written_event(false),
+ received_exception_caught_event(false), received_backward_branch_event(false) {}
+
+ virtual ~TestInstrumentationListener() {}
+
+ void MethodEntered(Thread* thread ATTRIBUTE_UNUSED,
+ mirror::Object* this_object ATTRIBUTE_UNUSED,
+ mirror::ArtMethod* method ATTRIBUTE_UNUSED,
+ uint32_t dex_pc ATTRIBUTE_UNUSED)
+ OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ received_method_enter_event = true;
+ }
+
+ void MethodExited(Thread* thread ATTRIBUTE_UNUSED,
+ mirror::Object* this_object ATTRIBUTE_UNUSED,
+ mirror::ArtMethod* method ATTRIBUTE_UNUSED,
+ uint32_t dex_pc ATTRIBUTE_UNUSED,
+ const JValue& return_value ATTRIBUTE_UNUSED)
+ OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ received_method_exit_event = true;
+ }
+
+ void MethodUnwind(Thread* thread ATTRIBUTE_UNUSED,
+ mirror::Object* this_object ATTRIBUTE_UNUSED,
+ mirror::ArtMethod* method ATTRIBUTE_UNUSED,
+ uint32_t dex_pc ATTRIBUTE_UNUSED)
+ OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ received_method_unwind_event = true;
+ }
+
+ void DexPcMoved(Thread* thread ATTRIBUTE_UNUSED,
+ mirror::Object* this_object ATTRIBUTE_UNUSED,
+ mirror::ArtMethod* method ATTRIBUTE_UNUSED,
+ uint32_t new_dex_pc ATTRIBUTE_UNUSED)
+ OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ received_dex_pc_moved_event = true;
+ }
+
+ void FieldRead(Thread* thread ATTRIBUTE_UNUSED,
+ mirror::Object* this_object ATTRIBUTE_UNUSED,
+ mirror::ArtMethod* method ATTRIBUTE_UNUSED,
+ uint32_t dex_pc ATTRIBUTE_UNUSED,
+ ArtField* field ATTRIBUTE_UNUSED)
+ OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ received_field_read_event = true;
+ }
+
+ void FieldWritten(Thread* thread ATTRIBUTE_UNUSED,
+ mirror::Object* this_object ATTRIBUTE_UNUSED,
+ mirror::ArtMethod* method ATTRIBUTE_UNUSED,
+ uint32_t dex_pc ATTRIBUTE_UNUSED,
+ ArtField* field ATTRIBUTE_UNUSED,
+ const JValue& field_value ATTRIBUTE_UNUSED)
+ OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ received_field_written_event = true;
+ }
+
+ void ExceptionCaught(Thread* thread ATTRIBUTE_UNUSED,
+ mirror::Throwable* exception_object ATTRIBUTE_UNUSED)
+ OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ received_exception_caught_event = true;
+ }
+
+ void BackwardBranch(Thread* thread ATTRIBUTE_UNUSED,
+ mirror::ArtMethod* method ATTRIBUTE_UNUSED,
+ int32_t dex_pc_offset ATTRIBUTE_UNUSED)
+ OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ received_backward_branch_event = true;
+ }
+
+ void Reset() {
+ received_method_enter_event = false;
+ received_method_exit_event = false;
+ received_method_unwind_event = false;
+ received_dex_pc_moved_event = false;
+ received_field_read_event = false;
+ received_field_written_event = false;
+ received_exception_caught_event = false;
+ received_backward_branch_event = false;
+ }
+
+ bool received_method_enter_event;
+ bool received_method_exit_event;
+ bool received_method_unwind_event;
+ bool received_dex_pc_moved_event;
+ bool received_field_read_event;
+ bool received_field_written_event;
+ bool received_exception_caught_event;
+ bool received_backward_branch_event;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(TestInstrumentationListener);
+};
+
+class InstrumentationTest : public CommonRuntimeTest {
+ public:
+ // Unique keys used to test Instrumentation::ConfigureStubs.
+ static constexpr const char* kClientOneKey = "TestClient1";
+ static constexpr const char* kClientTwoKey = "TestClient2";
+
+ void CheckConfigureStubs(const char* key, Instrumentation::InstrumentationLevel level) {
+ ScopedObjectAccess soa(Thread::Current());
+ instrumentation::Instrumentation* instr = Runtime::Current()->GetInstrumentation();
+ {
+ soa.Self()->TransitionFromRunnableToSuspended(kSuspended);
+ Runtime* runtime = Runtime::Current();
+ runtime->GetThreadList()->SuspendAll("Instrumentation::ConfigureStubs");
+ instr->ConfigureStubs(key, level);
+ runtime->GetThreadList()->ResumeAll();
+ soa.Self()->TransitionFromSuspendedToRunnable();
+ }
+ }
+
+ Instrumentation::InstrumentationLevel GetCurrentInstrumentationLevel() {
+ return Runtime::Current()->GetInstrumentation()->GetCurrentInstrumentationLevel();
+ }
+
+ size_t GetInstrumentationUserCount() {
+ ScopedObjectAccess soa(Thread::Current());
+ return Runtime::Current()->GetInstrumentation()->requested_instrumentation_levels_.size();
+ }
+
+ void TestEvent(uint32_t instrumentation_event) {
+ ScopedObjectAccess soa(Thread::Current());
+ instrumentation::Instrumentation* instr = Runtime::Current()->GetInstrumentation();
+ TestInstrumentationListener listener;
+ {
+ soa.Self()->TransitionFromRunnableToSuspended(kSuspended);
+ Runtime* runtime = Runtime::Current();
+ runtime->GetThreadList()->SuspendAll("Add instrumentation listener");
+ instr->AddListener(&listener, instrumentation_event);
+ runtime->GetThreadList()->ResumeAll();
+ soa.Self()->TransitionFromSuspendedToRunnable();
+ }
+
+ mirror::ArtMethod* const event_method = nullptr;
+ mirror::Object* const event_obj = nullptr;
+ const uint32_t event_dex_pc = 0;
+
+ // Check the listener is registered and is notified of the event.
+ EXPECT_TRUE(HasEventListener(instr, instrumentation_event));
+ EXPECT_FALSE(DidListenerReceiveEvent(listener, instrumentation_event));
+ ReportEvent(instr, instrumentation_event, soa.Self(), event_method, event_obj, event_dex_pc);
+ EXPECT_TRUE(DidListenerReceiveEvent(listener, instrumentation_event));
+
+ listener.Reset();
+ {
+ soa.Self()->TransitionFromRunnableToSuspended(kSuspended);
+ Runtime* runtime = Runtime::Current();
+ runtime->GetThreadList()->SuspendAll("Remove instrumentation listener");
+ instr->RemoveListener(&listener, instrumentation_event);
+ runtime->GetThreadList()->ResumeAll();
+ soa.Self()->TransitionFromSuspendedToRunnable();
+ }
+
+ // Check the listener is not registered and is not notified of the event.
+ EXPECT_FALSE(HasEventListener(instr, instrumentation_event));
+ EXPECT_FALSE(DidListenerReceiveEvent(listener, instrumentation_event));
+ ReportEvent(instr, instrumentation_event, soa.Self(), event_method, event_obj, event_dex_pc);
+ EXPECT_FALSE(DidListenerReceiveEvent(listener, instrumentation_event));
+ }
+
+ void DeoptimizeMethod(Thread* self, Handle<mirror::ArtMethod> method,
+ bool enable_deoptimization)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ Runtime* runtime = Runtime::Current();
+ instrumentation::Instrumentation* instrumentation = runtime->GetInstrumentation();
+ self->TransitionFromRunnableToSuspended(kSuspended);
+ runtime->GetThreadList()->SuspendAll("Single method deoptimization");
+ if (enable_deoptimization) {
+ instrumentation->EnableDeoptimization();
+ }
+ instrumentation->Deoptimize(method.Get());
+ runtime->GetThreadList()->ResumeAll();
+ self->TransitionFromSuspendedToRunnable();
+ }
+
+ void UndeoptimizeMethod(Thread* self, Handle<mirror::ArtMethod> method,
+ const char* key, bool disable_deoptimization)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ Runtime* runtime = Runtime::Current();
+ instrumentation::Instrumentation* instrumentation = runtime->GetInstrumentation();
+ self->TransitionFromRunnableToSuspended(kSuspended);
+ runtime->GetThreadList()->SuspendAll("Single method undeoptimization");
+ instrumentation->Undeoptimize(method.Get());
+ if (disable_deoptimization) {
+ instrumentation->DisableDeoptimization(key);
+ }
+ runtime->GetThreadList()->ResumeAll();
+ self->TransitionFromSuspendedToRunnable();
+ }
+
+ void DeoptimizeEverything(Thread* self, const char* key, bool enable_deoptimization)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ Runtime* runtime = Runtime::Current();
+ instrumentation::Instrumentation* instrumentation = runtime->GetInstrumentation();
+ self->TransitionFromRunnableToSuspended(kSuspended);
+ runtime->GetThreadList()->SuspendAll("Full deoptimization");
+ if (enable_deoptimization) {
+ instrumentation->EnableDeoptimization();
+ }
+ instrumentation->DeoptimizeEverything(key);
+ runtime->GetThreadList()->ResumeAll();
+ self->TransitionFromSuspendedToRunnable();
+ }
+
+ void UndeoptimizeEverything(Thread* self, const char* key, bool disable_deoptimization)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ Runtime* runtime = Runtime::Current();
+ instrumentation::Instrumentation* instrumentation = runtime->GetInstrumentation();
+ self->TransitionFromRunnableToSuspended(kSuspended);
+ runtime->GetThreadList()->SuspendAll("Full undeoptimization");
+ instrumentation->UndeoptimizeEverything(key);
+ if (disable_deoptimization) {
+ instrumentation->DisableDeoptimization(key);
+ }
+ runtime->GetThreadList()->ResumeAll();
+ self->TransitionFromSuspendedToRunnable();
+ }
+
+ void EnableMethodTracing(Thread* self, const char* key, bool needs_interpreter)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ Runtime* runtime = Runtime::Current();
+ instrumentation::Instrumentation* instrumentation = runtime->GetInstrumentation();
+ self->TransitionFromRunnableToSuspended(kSuspended);
+ runtime->GetThreadList()->SuspendAll("EnableMethodTracing");
+ instrumentation->EnableMethodTracing(key, needs_interpreter);
+ runtime->GetThreadList()->ResumeAll();
+ self->TransitionFromSuspendedToRunnable();
+ }
+
+ void DisableMethodTracing(Thread* self, const char* key)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ Runtime* runtime = Runtime::Current();
+ instrumentation::Instrumentation* instrumentation = runtime->GetInstrumentation();
+ self->TransitionFromRunnableToSuspended(kSuspended);
+ runtime->GetThreadList()->SuspendAll("EnableMethodTracing");
+ instrumentation->DisableMethodTracing(key);
+ runtime->GetThreadList()->ResumeAll();
+ self->TransitionFromSuspendedToRunnable();
+ }
+
+ private:
+ static bool HasEventListener(const instrumentation::Instrumentation* instr, uint32_t event_type)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ switch (event_type) {
+ case instrumentation::Instrumentation::kMethodEntered:
+ return instr->HasMethodEntryListeners();
+ case instrumentation::Instrumentation::kMethodExited:
+ return instr->HasMethodExitListeners();
+ case instrumentation::Instrumentation::kMethodUnwind:
+ return instr->HasMethodUnwindListeners();
+ case instrumentation::Instrumentation::kDexPcMoved:
+ return instr->HasDexPcListeners();
+ case instrumentation::Instrumentation::kFieldRead:
+ return instr->HasFieldReadListeners();
+ case instrumentation::Instrumentation::kFieldWritten:
+ return instr->HasFieldWriteListeners();
+ case instrumentation::Instrumentation::kExceptionCaught:
+ return instr->HasExceptionCaughtListeners();
+ case instrumentation::Instrumentation::kBackwardBranch:
+ return instr->HasBackwardBranchListeners();
+ default:
+ LOG(FATAL) << "Unknown instrumentation event " << event_type;
+ UNREACHABLE();
+ }
+ }
+
+ static void ReportEvent(const instrumentation::Instrumentation* instr, uint32_t event_type,
+ Thread* self, mirror::ArtMethod* method, mirror::Object* obj,
+ uint32_t dex_pc)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ switch (event_type) {
+ case instrumentation::Instrumentation::kMethodEntered:
+ instr->MethodEnterEvent(self, obj, method, dex_pc);
+ break;
+ case instrumentation::Instrumentation::kMethodExited: {
+ JValue value;
+ instr->MethodExitEvent(self, obj, method, dex_pc, value);
+ break;
+ }
+ case instrumentation::Instrumentation::kMethodUnwind:
+ instr->MethodUnwindEvent(self, obj, method, dex_pc);
+ break;
+ case instrumentation::Instrumentation::kDexPcMoved:
+ instr->DexPcMovedEvent(self, obj, method, dex_pc);
+ break;
+ case instrumentation::Instrumentation::kFieldRead:
+ instr->FieldReadEvent(self, obj, method, dex_pc, nullptr);
+ break;
+ case instrumentation::Instrumentation::kFieldWritten: {
+ JValue value;
+ instr->FieldWriteEvent(self, obj, method, dex_pc, nullptr, value);
+ break;
+ }
+ case instrumentation::Instrumentation::kExceptionCaught: {
+ ThrowArithmeticExceptionDivideByZero();
+ mirror::Throwable* event_exception = self->GetException();
+ instr->ExceptionCaughtEvent(self, event_exception);
+ self->ClearException();
+ break;
+ }
+ case instrumentation::Instrumentation::kBackwardBranch:
+ instr->BackwardBranch(self, method, dex_pc);
+ break;
+ default:
+ LOG(FATAL) << "Unknown instrumentation event " << event_type;
+ UNREACHABLE();
+ }
+ }
+
+ static bool DidListenerReceiveEvent(const TestInstrumentationListener& listener,
+ uint32_t event_type) {
+ switch (event_type) {
+ case instrumentation::Instrumentation::kMethodEntered:
+ return listener.received_method_enter_event;
+ case instrumentation::Instrumentation::kMethodExited:
+ return listener.received_method_exit_event;
+ case instrumentation::Instrumentation::kMethodUnwind:
+ return listener.received_method_unwind_event;
+ case instrumentation::Instrumentation::kDexPcMoved:
+ return listener.received_dex_pc_moved_event;
+ case instrumentation::Instrumentation::kFieldRead:
+ return listener.received_field_read_event;
+ case instrumentation::Instrumentation::kFieldWritten:
+ return listener.received_field_written_event;
+ case instrumentation::Instrumentation::kExceptionCaught:
+ return listener.received_exception_caught_event;
+ case instrumentation::Instrumentation::kBackwardBranch:
+ return listener.received_backward_branch_event;
+ default:
+ LOG(FATAL) << "Unknown instrumentation event " << event_type;
+ UNREACHABLE();
+ }
+ }
+};
+
+TEST_F(InstrumentationTest, NoInstrumentation) {
+ ScopedObjectAccess soa(Thread::Current());
+ instrumentation::Instrumentation* instr = Runtime::Current()->GetInstrumentation();
+ ASSERT_NE(instr, nullptr);
+
+ EXPECT_FALSE(instr->AreExitStubsInstalled());
+ EXPECT_FALSE(instr->AreAllMethodsDeoptimized());
+ EXPECT_FALSE(instr->IsActive());
+ EXPECT_FALSE(instr->ShouldNotifyMethodEnterExitEvents());
+
+ // Test interpreter table is the default one.
+ EXPECT_EQ(instrumentation::kMainHandlerTable, instr->GetInterpreterHandlerTable());
+
+ // Check there is no registered listener.
+ EXPECT_FALSE(instr->HasDexPcListeners());
+ EXPECT_FALSE(instr->HasExceptionCaughtListeners());
+ EXPECT_FALSE(instr->HasFieldReadListeners());
+ EXPECT_FALSE(instr->HasFieldWriteListeners());
+ EXPECT_FALSE(instr->HasMethodEntryListeners());
+ EXPECT_FALSE(instr->HasMethodExitListeners());
+ EXPECT_FALSE(instr->IsActive());
+}
+
+// Test instrumentation listeners for each event.
+TEST_F(InstrumentationTest, MethodEntryEvent) {
+ TestEvent(instrumentation::Instrumentation::kMethodEntered);
+}
+
+TEST_F(InstrumentationTest, MethodExitEvent) {
+ TestEvent(instrumentation::Instrumentation::kMethodExited);
+}
+
+TEST_F(InstrumentationTest, MethodUnwindEvent) {
+ TestEvent(instrumentation::Instrumentation::kMethodUnwind);
+}
+
+TEST_F(InstrumentationTest, DexPcMovedEvent) {
+ TestEvent(instrumentation::Instrumentation::kDexPcMoved);
+}
+
+TEST_F(InstrumentationTest, FieldReadEvent) {
+ TestEvent(instrumentation::Instrumentation::kFieldRead);
+}
+
+TEST_F(InstrumentationTest, FieldWriteEvent) {
+ TestEvent(instrumentation::Instrumentation::kFieldWritten);
+}
+
+TEST_F(InstrumentationTest, ExceptionCaughtEvent) {
+ TestEvent(instrumentation::Instrumentation::kExceptionCaught);
+}
+
+TEST_F(InstrumentationTest, BackwardBranchEvent) {
+ TestEvent(instrumentation::Instrumentation::kBackwardBranch);
+}
+
+TEST_F(InstrumentationTest, DeoptimizeDirectMethod) {
+ ScopedObjectAccess soa(Thread::Current());
+ jobject class_loader = LoadDex("Instrumentation");
+ Runtime* const runtime = Runtime::Current();
+ instrumentation::Instrumentation* instr = runtime->GetInstrumentation();
+ ClassLinker* class_linker = runtime->GetClassLinker();
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::ClassLoader> loader(hs.NewHandle(soa.Decode<mirror::ClassLoader*>(class_loader)));
+ mirror::Class* klass = class_linker->FindClass(soa.Self(), "LInstrumentation;", loader);
+ ASSERT_TRUE(klass != nullptr);
+ Handle<mirror::ArtMethod> method_to_deoptimize(
+ hs.NewHandle(klass->FindDeclaredDirectMethod("instanceMethod", "()V")));
+ ASSERT_TRUE(method_to_deoptimize.Get() != nullptr);
+
+ EXPECT_FALSE(instr->AreAllMethodsDeoptimized());
+ EXPECT_FALSE(instr->IsDeoptimized(method_to_deoptimize.Get()));
+
+ DeoptimizeMethod(soa.Self(), method_to_deoptimize, true);
+
+ EXPECT_FALSE(instr->AreAllMethodsDeoptimized());
+ EXPECT_TRUE(instr->AreExitStubsInstalled());
+ EXPECT_TRUE(instr->IsDeoptimized(method_to_deoptimize.Get()));
+
+ constexpr const char* instrumentation_key = "DeoptimizeDirectMethod";
+ UndeoptimizeMethod(soa.Self(), method_to_deoptimize, instrumentation_key, true);
+
+ EXPECT_FALSE(instr->AreAllMethodsDeoptimized());
+ EXPECT_FALSE(instr->IsDeoptimized(method_to_deoptimize.Get()));
+}
+
+TEST_F(InstrumentationTest, FullDeoptimization) {
+ ScopedObjectAccess soa(Thread::Current());
+ Runtime* const runtime = Runtime::Current();
+ instrumentation::Instrumentation* instr = runtime->GetInstrumentation();
+ EXPECT_FALSE(instr->AreAllMethodsDeoptimized());
+
+ constexpr const char* instrumentation_key = "FullDeoptimization";
+ DeoptimizeEverything(soa.Self(), instrumentation_key, true);
+
+ EXPECT_TRUE(instr->AreAllMethodsDeoptimized());
+ EXPECT_TRUE(instr->AreExitStubsInstalled());
+
+ UndeoptimizeEverything(soa.Self(), instrumentation_key, true);
+
+ EXPECT_FALSE(instr->AreAllMethodsDeoptimized());
+}
+
+TEST_F(InstrumentationTest, MixedDeoptimization) {
+ ScopedObjectAccess soa(Thread::Current());
+ jobject class_loader = LoadDex("Instrumentation");
+ Runtime* const runtime = Runtime::Current();
+ instrumentation::Instrumentation* instr = runtime->GetInstrumentation();
+ ClassLinker* class_linker = runtime->GetClassLinker();
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::ClassLoader> loader(hs.NewHandle(soa.Decode<mirror::ClassLoader*>(class_loader)));
+ mirror::Class* klass = class_linker->FindClass(soa.Self(), "LInstrumentation;", loader);
+ ASSERT_TRUE(klass != nullptr);
+ Handle<mirror::ArtMethod> method_to_deoptimize(
+ hs.NewHandle(klass->FindDeclaredDirectMethod("instanceMethod", "()V")));
+ ASSERT_TRUE(method_to_deoptimize.Get() != nullptr);
+
+ EXPECT_FALSE(instr->AreAllMethodsDeoptimized());
+ EXPECT_FALSE(instr->IsDeoptimized(method_to_deoptimize.Get()));
+
+ DeoptimizeMethod(soa.Self(), method_to_deoptimize, true);
+ // Deoptimizing a method does not change instrumentation level.
+ EXPECT_EQ(Instrumentation::InstrumentationLevel::kInstrumentNothing,
+ GetCurrentInstrumentationLevel());
+ EXPECT_FALSE(instr->AreAllMethodsDeoptimized());
+ EXPECT_TRUE(instr->AreExitStubsInstalled());
+ EXPECT_TRUE(instr->IsDeoptimized(method_to_deoptimize.Get()));
+
+ constexpr const char* instrumentation_key = "MixedDeoptimization";
+ DeoptimizeEverything(soa.Self(), instrumentation_key, false);
+ EXPECT_EQ(Instrumentation::InstrumentationLevel::kInstrumentWithInterpreter,
+ GetCurrentInstrumentationLevel());
+ EXPECT_TRUE(instr->AreAllMethodsDeoptimized());
+ EXPECT_TRUE(instr->AreExitStubsInstalled());
+ EXPECT_TRUE(instr->IsDeoptimized(method_to_deoptimize.Get()));
+
+ UndeoptimizeEverything(soa.Self(), instrumentation_key, false);
+ EXPECT_EQ(Instrumentation::InstrumentationLevel::kInstrumentNothing,
+ GetCurrentInstrumentationLevel());
+ EXPECT_FALSE(instr->AreAllMethodsDeoptimized());
+ EXPECT_TRUE(instr->AreExitStubsInstalled());
+ EXPECT_TRUE(instr->IsDeoptimized(method_to_deoptimize.Get()));
+
+ UndeoptimizeMethod(soa.Self(), method_to_deoptimize, instrumentation_key, true);
+ EXPECT_EQ(Instrumentation::InstrumentationLevel::kInstrumentNothing,
+ GetCurrentInstrumentationLevel());
+ EXPECT_FALSE(instr->AreAllMethodsDeoptimized());
+ EXPECT_FALSE(instr->IsDeoptimized(method_to_deoptimize.Get()));
+}
+
+TEST_F(InstrumentationTest, MethodTracing_Interpreter) {
+ ScopedObjectAccess soa(Thread::Current());
+ Runtime* const runtime = Runtime::Current();
+ instrumentation::Instrumentation* instr = runtime->GetInstrumentation();
+ EXPECT_FALSE(instr->AreAllMethodsDeoptimized());
+
+ constexpr const char* instrumentation_key = "MethodTracing";
+ EnableMethodTracing(soa.Self(), instrumentation_key, true);
+ EXPECT_EQ(Instrumentation::InstrumentationLevel::kInstrumentWithInterpreter,
+ GetCurrentInstrumentationLevel());
+ EXPECT_TRUE(instr->AreAllMethodsDeoptimized());
+ EXPECT_TRUE(instr->AreExitStubsInstalled());
+
+ DisableMethodTracing(soa.Self(), instrumentation_key);
+ EXPECT_EQ(Instrumentation::InstrumentationLevel::kInstrumentNothing,
+ GetCurrentInstrumentationLevel());
+ EXPECT_FALSE(instr->AreAllMethodsDeoptimized());
+}
+
+TEST_F(InstrumentationTest, MethodTracing_InstrumentationEntryExitStubs) {
+ ScopedObjectAccess soa(Thread::Current());
+ Runtime* const runtime = Runtime::Current();
+ instrumentation::Instrumentation* instr = runtime->GetInstrumentation();
+ EXPECT_FALSE(instr->AreAllMethodsDeoptimized());
+
+ constexpr const char* instrumentation_key = "MethodTracing";
+ EnableMethodTracing(soa.Self(), instrumentation_key, false);
+ EXPECT_EQ(Instrumentation::InstrumentationLevel::kInstrumentWithInstrumentationStubs,
+ GetCurrentInstrumentationLevel());
+ EXPECT_FALSE(instr->AreAllMethodsDeoptimized());
+ EXPECT_TRUE(instr->AreExitStubsInstalled());
+
+ DisableMethodTracing(soa.Self(), instrumentation_key);
+ EXPECT_EQ(Instrumentation::InstrumentationLevel::kInstrumentNothing,
+ GetCurrentInstrumentationLevel());
+ EXPECT_FALSE(instr->AreAllMethodsDeoptimized());
+}
+
+// We use a macro to print the line number where the test is failing.
+#define CHECK_INSTRUMENTATION(_level, _user_count) \
+ do { \
+ Instrumentation* const instr = Runtime::Current()->GetInstrumentation(); \
+ bool interpreter = \
+ (_level == Instrumentation::InstrumentationLevel::kInstrumentWithInterpreter); \
+ EXPECT_EQ(_level, GetCurrentInstrumentationLevel()); \
+ EXPECT_EQ(_user_count, GetInstrumentationUserCount()); \
+ if (instr->IsForcedInterpretOnly()) { \
+ EXPECT_TRUE(instr->InterpretOnly()); \
+ } else if (interpreter) { \
+ EXPECT_TRUE(instr->InterpretOnly()); \
+ } else { \
+ EXPECT_FALSE(instr->InterpretOnly()); \
+ } \
+ if (interpreter) { \
+ EXPECT_TRUE(instr->AreAllMethodsDeoptimized()); \
+ } else { \
+ EXPECT_FALSE(instr->AreAllMethodsDeoptimized()); \
+ } \
+ } while (false)
+
+TEST_F(InstrumentationTest, ConfigureStubs_Nothing) {
+ CHECK_INSTRUMENTATION(Instrumentation::InstrumentationLevel::kInstrumentNothing, 0U);
+
+ // Check no-op.
+ CheckConfigureStubs(kClientOneKey, Instrumentation::InstrumentationLevel::kInstrumentNothing);
+ CHECK_INSTRUMENTATION(Instrumentation::InstrumentationLevel::kInstrumentNothing, 0U);
+}
+
+TEST_F(InstrumentationTest, ConfigureStubs_InstrumentationStubs) {
+ CHECK_INSTRUMENTATION(Instrumentation::InstrumentationLevel::kInstrumentNothing, 0U);
+
+ // Check we can switch to instrumentation stubs
+ CheckConfigureStubs(kClientOneKey,
+ Instrumentation::InstrumentationLevel::kInstrumentWithInstrumentationStubs);
+ CHECK_INSTRUMENTATION(Instrumentation::InstrumentationLevel::kInstrumentWithInstrumentationStubs,
+ 1U);
+
+ // Check we can disable instrumentation.
+ CheckConfigureStubs(kClientOneKey, Instrumentation::InstrumentationLevel::kInstrumentNothing);
+ CHECK_INSTRUMENTATION(Instrumentation::InstrumentationLevel::kInstrumentNothing, 0U);
+}
+
+TEST_F(InstrumentationTest, ConfigureStubs_Interpreter) {
+ CHECK_INSTRUMENTATION(Instrumentation::InstrumentationLevel::kInstrumentNothing, 0U);
+
+ // Check we can switch to interpreter
+ CheckConfigureStubs(kClientOneKey,
+ Instrumentation::InstrumentationLevel::kInstrumentWithInterpreter);
+ CHECK_INSTRUMENTATION(Instrumentation::InstrumentationLevel::kInstrumentWithInterpreter, 1U);
+
+ // Check we can disable instrumentation.
+ CheckConfigureStubs(kClientOneKey, Instrumentation::InstrumentationLevel::kInstrumentNothing);
+ CHECK_INSTRUMENTATION(Instrumentation::InstrumentationLevel::kInstrumentNothing, 0U);
+}
+
+TEST_F(InstrumentationTest, ConfigureStubs_InstrumentationStubsToInterpreter) {
+ CHECK_INSTRUMENTATION(Instrumentation::InstrumentationLevel::kInstrumentNothing, 0U);
+
+ // Configure stubs with instrumentation stubs.
+ CheckConfigureStubs(kClientOneKey,
+ Instrumentation::InstrumentationLevel::kInstrumentWithInstrumentationStubs);
+ CHECK_INSTRUMENTATION(Instrumentation::InstrumentationLevel::kInstrumentWithInstrumentationStubs,
+ 1U);
+
+ // Configure stubs with interpreter.
+ CheckConfigureStubs(kClientOneKey,
+ Instrumentation::InstrumentationLevel::kInstrumentWithInterpreter);
+ CHECK_INSTRUMENTATION(Instrumentation::InstrumentationLevel::kInstrumentWithInterpreter, 1U);
+
+ // Check we can disable instrumentation.
+ CheckConfigureStubs(kClientOneKey, Instrumentation::InstrumentationLevel::kInstrumentNothing);
+ CHECK_INSTRUMENTATION(Instrumentation::InstrumentationLevel::kInstrumentNothing, 0U);
+}
+
+TEST_F(InstrumentationTest, ConfigureStubs_InterpreterToInstrumentationStubs) {
+ CHECK_INSTRUMENTATION(Instrumentation::InstrumentationLevel::kInstrumentNothing, 0U);
+
+ // Configure stubs with interpreter.
+ CheckConfigureStubs(kClientOneKey,
+ Instrumentation::InstrumentationLevel::kInstrumentWithInterpreter);
+ CHECK_INSTRUMENTATION(Instrumentation::InstrumentationLevel::kInstrumentWithInterpreter, 1U);
+
+ // Configure stubs with instrumentation stubs.
+ CheckConfigureStubs(kClientOneKey,
+ Instrumentation::InstrumentationLevel::kInstrumentWithInstrumentationStubs);
+ CHECK_INSTRUMENTATION(Instrumentation::InstrumentationLevel::kInstrumentWithInstrumentationStubs,
+ 1U);
+
+ // Check we can disable instrumentation.
+ CheckConfigureStubs(kClientOneKey, Instrumentation::InstrumentationLevel::kInstrumentNothing);
+ CHECK_INSTRUMENTATION(Instrumentation::InstrumentationLevel::kInstrumentNothing, 0U);
+}
+
+TEST_F(InstrumentationTest,
+ ConfigureStubs_InstrumentationStubsToInterpreterToInstrumentationStubs) {
+ CHECK_INSTRUMENTATION(Instrumentation::InstrumentationLevel::kInstrumentNothing, 0U);
+
+ // Configure stubs with instrumentation stubs.
+ CheckConfigureStubs(kClientOneKey,
+ Instrumentation::InstrumentationLevel::kInstrumentWithInstrumentationStubs);
+ CHECK_INSTRUMENTATION(Instrumentation::InstrumentationLevel::kInstrumentWithInstrumentationStubs,
+ 1U);
+
+ // Configure stubs with interpreter.
+ CheckConfigureStubs(kClientOneKey,
+ Instrumentation::InstrumentationLevel::kInstrumentWithInterpreter);
+ CHECK_INSTRUMENTATION(Instrumentation::InstrumentationLevel::kInstrumentWithInterpreter, 1U);
+
+ // Configure stubs with instrumentation stubs again.
+ CheckConfigureStubs(kClientOneKey,
+ Instrumentation::InstrumentationLevel::kInstrumentWithInstrumentationStubs);
+ CHECK_INSTRUMENTATION(Instrumentation::InstrumentationLevel::kInstrumentWithInstrumentationStubs,
+ 1U);
+
+ // Check we can disable instrumentation.
+ CheckConfigureStubs(kClientOneKey, Instrumentation::InstrumentationLevel::kInstrumentNothing);
+ CHECK_INSTRUMENTATION(Instrumentation::InstrumentationLevel::kInstrumentNothing, 0U);
+}
+
+TEST_F(InstrumentationTest, MultiConfigureStubs_Nothing) {
+ CHECK_INSTRUMENTATION(Instrumentation::InstrumentationLevel::kInstrumentNothing, 0U);
+
+ // Check kInstrumentNothing with two clients.
+ CheckConfigureStubs(kClientOneKey, Instrumentation::InstrumentationLevel::kInstrumentNothing);
+ CHECK_INSTRUMENTATION(Instrumentation::InstrumentationLevel::kInstrumentNothing, 0U);
+
+ CheckConfigureStubs(kClientTwoKey, Instrumentation::InstrumentationLevel::kInstrumentNothing);
+ CHECK_INSTRUMENTATION(Instrumentation::InstrumentationLevel::kInstrumentNothing, 0U);
+}
+
+TEST_F(InstrumentationTest, MultiConfigureStubs_InstrumentationStubs) {
+ CHECK_INSTRUMENTATION(Instrumentation::InstrumentationLevel::kInstrumentNothing, 0U);
+
+ // Configure stubs with instrumentation stubs for 1st client.
+ CheckConfigureStubs(kClientOneKey,
+ Instrumentation::InstrumentationLevel::kInstrumentWithInstrumentationStubs);
+ CHECK_INSTRUMENTATION(Instrumentation::InstrumentationLevel::kInstrumentWithInstrumentationStubs,
+ 1U);
+
+ // Configure stubs with instrumentation stubs for 2nd client.
+ CheckConfigureStubs(kClientTwoKey,
+ Instrumentation::InstrumentationLevel::kInstrumentWithInstrumentationStubs);
+ CHECK_INSTRUMENTATION(Instrumentation::InstrumentationLevel::kInstrumentWithInstrumentationStubs,
+ 2U);
+
+ // 1st client requests instrumentation deactivation but 2nd client still needs
+ // instrumentation stubs.
+ CheckConfigureStubs(kClientOneKey, Instrumentation::InstrumentationLevel::kInstrumentNothing);
+ CHECK_INSTRUMENTATION(Instrumentation::InstrumentationLevel::kInstrumentWithInstrumentationStubs,
+ 1U);
+
+ // 2nd client requests instrumentation deactivation
+ CheckConfigureStubs(kClientTwoKey, Instrumentation::InstrumentationLevel::kInstrumentNothing);
+ CHECK_INSTRUMENTATION(Instrumentation::InstrumentationLevel::kInstrumentNothing, 0U);
+}
+
+TEST_F(InstrumentationTest, MultiConfigureStubs_Interpreter) {
+ CHECK_INSTRUMENTATION(Instrumentation::InstrumentationLevel::kInstrumentNothing, 0U);
+
+ // Configure stubs with interpreter for 1st client.
+ CheckConfigureStubs(kClientOneKey,
+ Instrumentation::InstrumentationLevel::kInstrumentWithInterpreter);
+ CHECK_INSTRUMENTATION(Instrumentation::InstrumentationLevel::kInstrumentWithInterpreter, 1U);
+
+ // Configure stubs with interpreter for 2nd client.
+ CheckConfigureStubs(kClientTwoKey,
+ Instrumentation::InstrumentationLevel::kInstrumentWithInterpreter);
+ CHECK_INSTRUMENTATION(Instrumentation::InstrumentationLevel::kInstrumentWithInterpreter, 2U);
+
+ // 1st client requests instrumentation deactivation but 2nd client still needs interpreter.
+ CheckConfigureStubs(kClientOneKey, Instrumentation::InstrumentationLevel::kInstrumentNothing);
+ CHECK_INSTRUMENTATION(Instrumentation::InstrumentationLevel::kInstrumentWithInterpreter, 1U);
+
+ // 2nd client requests instrumentation deactivation
+ CheckConfigureStubs(kClientTwoKey, Instrumentation::InstrumentationLevel::kInstrumentNothing);
+ CHECK_INSTRUMENTATION(Instrumentation::InstrumentationLevel::kInstrumentNothing, 0U);
+}
+
+TEST_F(InstrumentationTest, MultiConfigureStubs_InstrumentationStubsThenInterpreter) {
+ CHECK_INSTRUMENTATION(Instrumentation::InstrumentationLevel::kInstrumentNothing, 0U);
+
+ // Configure stubs with instrumentation stubs for 1st client.
+ CheckConfigureStubs(kClientOneKey,
+ Instrumentation::InstrumentationLevel::kInstrumentWithInstrumentationStubs);
+ CHECK_INSTRUMENTATION(Instrumentation::InstrumentationLevel::kInstrumentWithInstrumentationStubs,
+ 1U);
+
+ // Configure stubs with interpreter for 2nd client.
+ CheckConfigureStubs(kClientTwoKey,
+ Instrumentation::InstrumentationLevel::kInstrumentWithInterpreter);
+ CHECK_INSTRUMENTATION(Instrumentation::InstrumentationLevel::kInstrumentWithInterpreter, 2U);
+
+ // 1st client requests instrumentation deactivation but 2nd client still needs interpreter.
+ CheckConfigureStubs(kClientOneKey, Instrumentation::InstrumentationLevel::kInstrumentNothing);
+ CHECK_INSTRUMENTATION(Instrumentation::InstrumentationLevel::kInstrumentWithInterpreter, 1U);
+
+ // 2nd client requests instrumentation deactivation
+ CheckConfigureStubs(kClientTwoKey, Instrumentation::InstrumentationLevel::kInstrumentNothing);
+ CHECK_INSTRUMENTATION(Instrumentation::InstrumentationLevel::kInstrumentNothing, 0U);
+}
+
+TEST_F(InstrumentationTest, MultiConfigureStubs_InterpreterThenInstrumentationStubs) {
+ CHECK_INSTRUMENTATION(Instrumentation::InstrumentationLevel::kInstrumentNothing, 0U);
+
+ // Configure stubs with interpreter for 1st client.
+ CheckConfigureStubs(kClientOneKey,
+ Instrumentation::InstrumentationLevel::kInstrumentWithInterpreter);
+ CHECK_INSTRUMENTATION(Instrumentation::InstrumentationLevel::kInstrumentWithInterpreter, 1U);
+
+ // Configure stubs with instrumentation stubs for 2nd client.
+ CheckConfigureStubs(kClientTwoKey,
+ Instrumentation::InstrumentationLevel::kInstrumentWithInstrumentationStubs);
+ CHECK_INSTRUMENTATION(Instrumentation::InstrumentationLevel::kInstrumentWithInterpreter, 2U);
+
+ // 1st client requests instrumentation deactivation but 2nd client still needs
+ // instrumentation stubs.
+ CheckConfigureStubs(kClientOneKey, Instrumentation::InstrumentationLevel::kInstrumentNothing);
+ CHECK_INSTRUMENTATION(Instrumentation::InstrumentationLevel::kInstrumentWithInstrumentationStubs,
+ 1U);
+
+ // 2nd client requests instrumentation deactivation
+ CheckConfigureStubs(kClientTwoKey, Instrumentation::InstrumentationLevel::kInstrumentNothing);
+ CHECK_INSTRUMENTATION(Instrumentation::InstrumentationLevel::kInstrumentNothing, 0U);
+}
+
+} // namespace instrumentation
+} // namespace art
diff --git a/runtime/intern_table.cc b/runtime/intern_table.cc
index 1f1f9e8..a85d10f 100644
--- a/runtime/intern_table.cc
+++ b/runtime/intern_table.cc
@@ -194,7 +194,7 @@
uint32_t string_idx = dex_file->GetIndexForStringId(*string_id);
// GetResolvedString() contains a RB.
mirror::String* image_string = dex_cache->GetResolvedString(string_idx);
- if (image_string != NULL) {
+ if (image_string != nullptr) {
return image_string;
}
}
@@ -236,11 +236,6 @@
if (strong != nullptr) {
return strong;
}
- // Check the image for a match.
- mirror::String* image = LookupStringFromImage(s);
- if (image != nullptr) {
- return is_strong ? InsertStrong(image) : InsertWeak(image);
- }
// There is no match in the strong table, check the weak table.
mirror::String* weak = LookupWeak(s);
if (weak != nullptr) {
@@ -251,6 +246,11 @@
}
return weak;
}
+ // Check the image for a match.
+ mirror::String* image = LookupStringFromImage(s);
+ if (image != nullptr) {
+ return is_strong ? InsertStrong(image) : InsertWeak(image);
+ }
// No match in the strong table or the weak table. Insert into the strong / weak table.
return is_strong ? InsertStrong(s) : InsertWeak(s);
}
diff --git a/runtime/intern_table.h b/runtime/intern_table.h
index 200a764..1e5d3c2 100644
--- a/runtime/intern_table.h
+++ b/runtime/intern_table.h
@@ -150,7 +150,7 @@
UnorderedSet post_zygote_table_;
};
- // Insert if non null, otherwise return nullptr.
+ // Insert if non null, otherwise return null.
mirror::String* Insert(mirror::String* s, bool is_strong)
LOCKS_EXCLUDED(Locks::intern_table_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/intern_table_test.cc b/runtime/intern_table_test.cc
index d462e14..194d0af 100644
--- a/runtime/intern_table_test.cc
+++ b/runtime/intern_table_test.cc
@@ -38,8 +38,8 @@
EXPECT_TRUE(foo_1->Equals("foo"));
EXPECT_TRUE(foo_2->Equals("foo"));
EXPECT_TRUE(foo_3->Equals("foo"));
- EXPECT_TRUE(foo_1.Get() != NULL);
- EXPECT_TRUE(foo_2.Get() != NULL);
+ EXPECT_TRUE(foo_1.Get() != nullptr);
+ EXPECT_TRUE(foo_2.Get() != nullptr);
EXPECT_EQ(foo_1.Get(), foo_2.Get());
EXPECT_NE(foo_1.Get(), bar.Get());
EXPECT_NE(foo_2.Get(), bar.Get());
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index 4801124..26860e7 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -78,7 +78,8 @@
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedLocalRef<jobject> arg0(soa.Env(),
- soa.AddLocalReference<jobject>(reinterpret_cast<Object*>(args[0])));
+ soa.AddLocalReference<jobject>(
+ reinterpret_cast<Object*>(args[0])));
jobject jresult;
{
ScopedThreadStateChange tsc(self, kNative);
@@ -99,12 +100,14 @@
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedLocalRef<jobject> arg0(soa.Env(),
- soa.AddLocalReference<jobject>(reinterpret_cast<Object*>(args[0])));
+ soa.AddLocalReference<jobject>(
+ reinterpret_cast<Object*>(args[0])));
ScopedThreadStateChange tsc(self, kNative);
result->SetI(fn(soa.Env(), klass.get(), arg0.get(), args[1]));
} else if (shorty == "SIZ") {
typedef jshort (fntype)(JNIEnv*, jclass, jint, jboolean);
- fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetEntryPointFromJni()));
+ fntype* const fn =
+ reinterpret_cast<fntype*>(const_cast<void*>(method->GetEntryPointFromJni()));
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedThreadStateChange tsc(self, kNative);
@@ -122,9 +125,11 @@
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedLocalRef<jobject> arg0(soa.Env(),
- soa.AddLocalReference<jobject>(reinterpret_cast<Object*>(args[0])));
+ soa.AddLocalReference<jobject>(
+ reinterpret_cast<Object*>(args[0])));
ScopedLocalRef<jobject> arg1(soa.Env(),
- soa.AddLocalReference<jobject>(reinterpret_cast<Object*>(args[1])));
+ soa.AddLocalReference<jobject>(
+ reinterpret_cast<Object*>(args[1])));
ScopedThreadStateChange tsc(self, kNative);
result->SetZ(fn(soa.Env(), klass.get(), arg0.get(), arg1.get()));
} else if (shorty == "ZILL") {
@@ -133,9 +138,11 @@
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedLocalRef<jobject> arg1(soa.Env(),
- soa.AddLocalReference<jobject>(reinterpret_cast<Object*>(args[1])));
+ soa.AddLocalReference<jobject>(
+ reinterpret_cast<Object*>(args[1])));
ScopedLocalRef<jobject> arg2(soa.Env(),
- soa.AddLocalReference<jobject>(reinterpret_cast<Object*>(args[2])));
+ soa.AddLocalReference<jobject>(
+ reinterpret_cast<Object*>(args[2])));
ScopedThreadStateChange tsc(self, kNative);
result->SetZ(fn(soa.Env(), klass.get(), args[0], arg1.get(), arg2.get()));
} else if (shorty == "VILII") {
@@ -144,7 +151,8 @@
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedLocalRef<jobject> arg1(soa.Env(),
- soa.AddLocalReference<jobject>(reinterpret_cast<Object*>(args[1])));
+ soa.AddLocalReference<jobject>(
+ reinterpret_cast<Object*>(args[1])));
ScopedThreadStateChange tsc(self, kNative);
fn(soa.Env(), klass.get(), args[0], arg1.get(), args[2], args[3]);
} else if (shorty == "VLILII") {
@@ -153,9 +161,11 @@
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedLocalRef<jobject> arg0(soa.Env(),
- soa.AddLocalReference<jobject>(reinterpret_cast<Object*>(args[0])));
+ soa.AddLocalReference<jobject>(
+ reinterpret_cast<Object*>(args[0])));
ScopedLocalRef<jobject> arg2(soa.Env(),
- soa.AddLocalReference<jobject>(reinterpret_cast<Object*>(args[2])));
+ soa.AddLocalReference<jobject>(
+ reinterpret_cast<Object*>(args[2])));
ScopedThreadStateChange tsc(self, kNative);
fn(soa.Env(), klass.get(), arg0.get(), args[1], arg2.get(), args[3], args[4]);
} else {
@@ -187,7 +197,8 @@
ScopedLocalRef<jobject> rcvr(soa.Env(),
soa.AddLocalReference<jobject>(receiver));
ScopedLocalRef<jobject> arg0(soa.Env(),
- soa.AddLocalReference<jobject>(reinterpret_cast<Object*>(args[0])));
+ soa.AddLocalReference<jobject>(
+ reinterpret_cast<Object*>(args[0])));
jobject jresult;
{
ScopedThreadStateChange tsc(self, kNative);
@@ -302,7 +313,7 @@
const DexFile::CodeItem* code_item = method->GetCodeItem();
uint16_t num_regs;
uint16_t num_ins;
- if (code_item != NULL) {
+ if (code_item != nullptr) {
num_regs = code_item->registers_size_;
num_ins = code_item->ins_size_;
} else if (method->IsAbstract()) {
@@ -325,7 +336,7 @@
size_t cur_reg = num_regs - num_ins;
if (!method->IsStatic()) {
- CHECK(receiver != NULL);
+ CHECK(receiver != nullptr);
shadow_frame->SetVRegReference(cur_reg, receiver);
++cur_reg;
}
@@ -365,7 +376,7 @@
}
if (LIKELY(!method->IsNative())) {
JValue r = Execute(self, code_item, *shadow_frame, JValue());
- if (result != NULL) {
+ if (result != nullptr) {
*result = r;
}
} else {
@@ -375,7 +386,7 @@
// references pointers due to moving GC.
args = shadow_frame->GetVRegArgs(method->IsStatic() ? 0 : 1);
if (!Runtime::Current()->IsStarted()) {
- UnstartedRuntimeJni(self, method, receiver, args, result);
+ UnstartedRuntime::Jni(self, method, receiver, args, result);
} else {
InterpreterJni(self, method, shorty, receiver, args, result);
}
@@ -386,8 +397,9 @@
void EnterInterpreterFromDeoptimize(Thread* self, ShadowFrame* shadow_frame, JValue* ret_val)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
JValue value;
- value.SetJ(ret_val->GetJ()); // Set value to last known result in case the shadow frame chain is empty.
- while (shadow_frame != NULL) {
+ // Set value to last known result in case the shadow frame chain is empty.
+ value.SetJ(ret_val->GetJ());
+ while (shadow_frame != nullptr) {
self->SetTopOfShadowStack(shadow_frame);
const DexFile::CodeItem* code_item = shadow_frame->GetMethod()->GetCodeItem();
const uint32_t dex_pc = shadow_frame->GetDexPC();
@@ -411,7 +423,7 @@
}
ShadowFrame* old_frame = shadow_frame;
shadow_frame = shadow_frame->GetLink();
- delete old_frame;
+ ShadowFrame::DeleteDeoptimizedFrame(old_frame);
}
ret_val->SetJ(value.GetJ());
}
@@ -462,7 +474,7 @@
CHECK(!Runtime::Current()->IsStarted());
Object* receiver = is_static ? nullptr : shadow_frame->GetVRegReference(0);
uint32_t* args = shadow_frame->GetVRegArgs(is_static ? 0 : 1);
- UnstartedRuntimeJni(self, shadow_frame->GetMethod(), receiver, args, result);
+ UnstartedRuntime::Jni(self, shadow_frame->GetMethod(), receiver, args, result);
}
self->PopShadowFrame();
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index 3ae611b..363c65a 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -21,6 +21,7 @@
#include "debugger.h"
#include "mirror/array-inl.h"
#include "unstarted_runtime.h"
+#include "verifier/method_verifier.h"
namespace art {
namespace interpreter {
@@ -485,16 +486,29 @@
template<bool is_range, bool do_assignability_check>
bool DoCall(ArtMethod* called_method, Thread* self, ShadowFrame& shadow_frame,
const Instruction* inst, uint16_t inst_data, JValue* result) {
+ bool string_init = false;
+ // Replace calls to String.<init> with equivalent StringFactory call.
+ if (called_method->GetDeclaringClass()->IsStringClass() && called_method->IsConstructor()) {
+ ScopedObjectAccessUnchecked soa(self);
+ jmethodID mid = soa.EncodeMethod(called_method);
+ called_method = soa.DecodeMethod(WellKnownClasses::StringInitToStringFactoryMethodID(mid));
+ string_init = true;
+ }
+
// Compute method information.
const DexFile::CodeItem* code_item = called_method->GetCodeItem();
const uint16_t num_ins = (is_range) ? inst->VRegA_3rc(inst_data) : inst->VRegA_35c(inst_data);
uint16_t num_regs;
- if (LIKELY(code_item != NULL)) {
+ if (LIKELY(code_item != nullptr)) {
num_regs = code_item->registers_size_;
- DCHECK_EQ(num_ins, code_item->ins_size_);
+ DCHECK_EQ(string_init ? num_ins - 1 : num_ins, code_item->ins_size_);
} else {
DCHECK(called_method->IsNative() || called_method->IsProxyMethod());
num_regs = num_ins;
+ if (string_init) {
+ // The new StringFactory call is static and has one fewer argument.
+ num_regs--;
+ }
}
// Allocate shadow frame on the stack.
@@ -504,7 +518,7 @@
memory));
// Initialize new shadow frame.
- const size_t first_dest_reg = num_regs - num_ins;
+ size_t first_dest_reg = num_regs - num_ins;
if (do_assignability_check) {
// Slow path.
// We might need to do class loading, which incurs a thread state change to kNative. So
@@ -536,6 +550,10 @@
new_shadow_frame->SetVRegReference(dest_reg, shadow_frame.GetVRegReference(receiver_reg));
++dest_reg;
++arg_offset;
+ } else if (string_init) {
+ // Skip the referrer for the new static StringFactory call.
+ ++dest_reg;
+ ++arg_offset;
}
for (uint32_t shorty_pos = 0; dest_reg < num_regs; ++shorty_pos, ++dest_reg, ++arg_offset) {
DCHECK_LT(shorty_pos + 1, shorty_len);
@@ -543,11 +561,11 @@
switch (shorty[shorty_pos + 1]) {
case 'L': {
Object* o = shadow_frame.GetVRegReference(src_reg);
- if (do_assignability_check && o != NULL) {
+ if (do_assignability_check && o != nullptr) {
Class* arg_type =
new_shadow_frame->GetMethod()->GetClassFromTypeIndex(
params->GetTypeItem(shorty_pos).type_idx_, true);
- if (arg_type == NULL) {
+ if (arg_type == nullptr) {
CHECK(self->IsExceptionPending());
return false;
}
@@ -583,7 +601,12 @@
} else {
// Fast path: no extra checks.
if (is_range) {
- const uint16_t first_src_reg = inst->VRegC_3rc();
+ uint16_t first_src_reg = inst->VRegC_3rc();
+ if (string_init) {
+ // Skip the referrer for the new static StringFactory call.
+ ++first_src_reg;
+ ++first_dest_reg;
+ }
for (size_t src_reg = first_src_reg, dest_reg = first_dest_reg; dest_reg < num_regs;
++dest_reg, ++src_reg) {
AssignRegister(new_shadow_frame, shadow_frame, dest_reg, src_reg);
@@ -592,12 +615,19 @@
DCHECK_LE(num_ins, 5U);
uint16_t regList = inst->Fetch16(2);
uint16_t count = num_ins;
+ size_t arg_index = 0;
if (count == 5) {
AssignRegister(new_shadow_frame, shadow_frame, first_dest_reg + 4U,
(inst_data >> 8) & 0x0f);
--count;
- }
- for (size_t arg_index = 0; arg_index < count; ++arg_index, regList >>= 4) {
+ }
+ if (string_init) {
+ // Skip the referrer for the new static StringFactory call.
+ regList >>= 4;
+ ++first_dest_reg;
+ --count;
+ }
+ for (; arg_index < count; ++arg_index, regList >>= 4) {
AssignRegister(new_shadow_frame, shadow_frame, first_dest_reg + arg_index, regList & 0x0f);
}
}
@@ -629,8 +659,40 @@
}
entry(self, code_item, new_shadow_frame, result);
} else {
- UnstartedRuntimeInvoke(self, code_item, new_shadow_frame, result, first_dest_reg);
+ UnstartedRuntime::Invoke(self, code_item, new_shadow_frame, result, first_dest_reg);
}
+
+ if (string_init && !self->IsExceptionPending()) {
+ // Set the new string result of the StringFactory.
+ uint32_t vregC = (is_range) ? inst->VRegC_3rc() : inst->VRegC_35c();
+ shadow_frame.SetVRegReference(vregC, result->GetL());
+ // Overwrite all potential copies of the original result of the new-instance of string with the
+ // new result of the StringFactory. Use the verifier to find this set of registers.
+ mirror::ArtMethod* method = shadow_frame.GetMethod();
+ MethodReference method_ref = method->ToMethodReference();
+ SafeMap<uint32_t, std::set<uint32_t>> string_init_map;
+ SafeMap<uint32_t, std::set<uint32_t>>* string_init_map_ptr;
+ MethodRefToStringInitRegMap& method_to_string_init_map = Runtime::Current()->GetStringInitMap();
+ auto it = method_to_string_init_map.find(method_ref);
+ if (it == method_to_string_init_map.end()) {
+ string_init_map = std::move(verifier::MethodVerifier::FindStringInitMap(method));
+ method_to_string_init_map.Overwrite(method_ref, string_init_map);
+ string_init_map_ptr = &string_init_map;
+ } else {
+ string_init_map_ptr = &it->second;
+ }
+ if (string_init_map_ptr->size() != 0) {
+ uint32_t dex_pc = shadow_frame.GetDexPC();
+ auto map_it = string_init_map_ptr->find(dex_pc);
+ if (map_it != string_init_map_ptr->end()) {
+ const std::set<uint32_t>& reg_set = map_it->second;
+ for (auto set_it = reg_set.begin(); set_it != reg_set.end(); ++set_it) {
+ shadow_frame.SetVRegReference(*set_it, result->GetL());
+ }
+ }
+ }
+ }
+
return !self->IsExceptionPending();
}
@@ -651,7 +713,7 @@
uint16_t type_idx = is_range ? inst->VRegB_3rc() : inst->VRegB_35c();
Class* arrayClass = ResolveVerifyAndClinit(type_idx, shadow_frame.GetMethod(),
self, false, do_access_check);
- if (UNLIKELY(arrayClass == NULL)) {
+ if (UNLIKELY(arrayClass == nullptr)) {
DCHECK(self->IsExceptionPending());
return false;
}
@@ -671,7 +733,7 @@
Object* newArray = Array::Alloc<true>(self, arrayClass, length,
arrayClass->GetComponentSizeShift(),
Runtime::Current()->GetHeap()->GetCurrentAllocator());
- if (UNLIKELY(newArray == NULL)) {
+ if (UNLIKELY(newArray == nullptr)) {
DCHECK(self->IsExceptionPending());
return false;
}
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index 0e0d56a..6acc72e 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -367,9 +367,9 @@
uint32_t raw_value = shadow_frame.GetVReg(i);
Object* ref_value = shadow_frame.GetVRegReference(i);
oss << StringPrintf(" vreg%u=0x%08X", i, raw_value);
- if (ref_value != NULL) {
+ if (ref_value != nullptr) {
if (ref_value->GetClass()->IsStringClass() &&
- ref_value->AsString()->GetCharArray() != NULL) {
+ ref_value->AsString()->GetValue() != nullptr) {
oss << "/java.lang.String \"" << ref_value->AsString()->ToModifiedUtf8() << "\"";
} else {
oss << "/" << PrettyTypeOf(ref_value);
diff --git a/runtime/interpreter/interpreter_goto_table_impl.cc b/runtime/interpreter/interpreter_goto_table_impl.cc
index cead26c..dd1f55e 100644
--- a/runtime/interpreter/interpreter_goto_table_impl.cc
+++ b/runtime/interpreter/interpreter_goto_table_impl.cc
@@ -55,7 +55,8 @@
} while (false)
#define UPDATE_HANDLER_TABLE() \
- currentHandlersTable = handlersTable[Runtime::Current()->GetInstrumentation()->GetInterpreterHandlerTable()]
+ currentHandlersTable = handlersTable[ \
+ Runtime::Current()->GetInstrumentation()->GetInterpreterHandlerTable()]
#define BACKWARD_BRANCH_INSTRUMENTATION(offset) \
do { \
@@ -155,7 +156,6 @@
const Instruction* inst = Instruction::At(code_item->insns_ + dex_pc);
uint16_t inst_data;
const void* const* currentHandlersTable;
- bool notified_method_entry_event = false;
UPDATE_HANDLER_TABLE();
if (LIKELY(dex_pc == 0)) { // We are entering the method as opposed to deoptimizing.
if (kIsDebugBuild) {
@@ -165,7 +165,6 @@
if (UNLIKELY(instrumentation->HasMethodEntryListeners())) {
instrumentation->MethodEnterEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
shadow_frame.GetMethod(), 0);
- notified_method_entry_event = true;
}
}
@@ -263,9 +262,6 @@
instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
shadow_frame.GetMethod(), dex_pc,
result);
- } else if (UNLIKELY(instrumentation->HasDexPcListeners())) {
- instrumentation->DexPcMovedEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
- shadow_frame.GetMethod(), dex_pc);
}
return result;
}
@@ -280,9 +276,6 @@
instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
shadow_frame.GetMethod(), dex_pc,
result);
- } else if (UNLIKELY(instrumentation->HasDexPcListeners())) {
- instrumentation->DexPcMovedEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
- shadow_frame.GetMethod(), dex_pc);
}
return result;
}
@@ -298,9 +291,6 @@
instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
shadow_frame.GetMethod(), dex_pc,
result);
- } else if (UNLIKELY(instrumentation->HasDexPcListeners())) {
- instrumentation->DexPcMovedEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
- shadow_frame.GetMethod(), dex_pc);
}
return result;
}
@@ -315,9 +305,6 @@
instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
shadow_frame.GetMethod(), dex_pc,
result);
- } else if (UNLIKELY(instrumentation->HasDexPcListeners())) {
- instrumentation->DexPcMovedEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
- shadow_frame.GetMethod(), dex_pc);
}
return result;
}
@@ -328,10 +315,10 @@
self->AllowThreadSuspension();
const uint8_t vreg_index = inst->VRegA_11x(inst_data);
Object* obj_result = shadow_frame.GetVRegReference(vreg_index);
- if (do_assignability_check && obj_result != NULL) {
+ if (do_assignability_check && obj_result != nullptr) {
Class* return_type = shadow_frame.GetMethod()->GetReturnType();
obj_result = shadow_frame.GetVRegReference(vreg_index);
- if (return_type == NULL) {
+ if (return_type == nullptr) {
// Return the pending exception.
HANDLE_PENDING_EXCEPTION();
}
@@ -351,9 +338,6 @@
instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
shadow_frame.GetMethod(), dex_pc,
result);
- } else if (UNLIKELY(instrumentation->HasDexPcListeners())) {
- instrumentation->DexPcMovedEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
- shadow_frame.GetMethod(), dex_pc);
}
return result;
}
@@ -364,7 +348,7 @@
int32_t val = inst->VRegB_11n(inst_data);
shadow_frame.SetVReg(dst, val);
if (val == 0) {
- shadow_frame.SetVRegReference(dst, NULL);
+ shadow_frame.SetVRegReference(dst, nullptr);
}
ADVANCE(1);
}
@@ -375,7 +359,7 @@
int32_t val = inst->VRegB_21s();
shadow_frame.SetVReg(dst, val);
if (val == 0) {
- shadow_frame.SetVRegReference(dst, NULL);
+ shadow_frame.SetVRegReference(dst, nullptr);
}
ADVANCE(2);
}
@@ -386,7 +370,7 @@
int32_t val = inst->VRegB_31i();
shadow_frame.SetVReg(dst, val);
if (val == 0) {
- shadow_frame.SetVRegReference(dst, NULL);
+ shadow_frame.SetVRegReference(dst, nullptr);
}
ADVANCE(3);
}
@@ -397,7 +381,7 @@
int32_t val = static_cast<int32_t>(inst->VRegB_21h() << 16);
shadow_frame.SetVReg(dst, val);
if (val == 0) {
- shadow_frame.SetVRegReference(dst, NULL);
+ shadow_frame.SetVRegReference(dst, nullptr);
}
ADVANCE(2);
}
@@ -426,7 +410,7 @@
HANDLE_INSTRUCTION_START(CONST_STRING) {
String* s = ResolveString(self, shadow_frame, inst->VRegB_21c());
- if (UNLIKELY(s == NULL)) {
+ if (UNLIKELY(s == nullptr)) {
HANDLE_PENDING_EXCEPTION();
} else {
shadow_frame.SetVRegReference(inst->VRegA_21c(inst_data), s);
@@ -437,7 +421,7 @@
HANDLE_INSTRUCTION_START(CONST_STRING_JUMBO) {
String* s = ResolveString(self, shadow_frame, inst->VRegB_31c());
- if (UNLIKELY(s == NULL)) {
+ if (UNLIKELY(s == nullptr)) {
HANDLE_PENDING_EXCEPTION();
} else {
shadow_frame.SetVRegReference(inst->VRegA_31c(inst_data), s);
@@ -449,7 +433,7 @@
HANDLE_INSTRUCTION_START(CONST_CLASS) {
Class* c = ResolveVerifyAndClinit(inst->VRegB_21c(), shadow_frame.GetMethod(),
self, false, do_access_check);
- if (UNLIKELY(c == NULL)) {
+ if (UNLIKELY(c == nullptr)) {
HANDLE_PENDING_EXCEPTION();
} else {
shadow_frame.SetVRegReference(inst->VRegA_21c(inst_data), c);
@@ -460,7 +444,7 @@
HANDLE_INSTRUCTION_START(MONITOR_ENTER) {
Object* obj = shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
- if (UNLIKELY(obj == NULL)) {
+ if (UNLIKELY(obj == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
@@ -472,7 +456,7 @@
HANDLE_INSTRUCTION_START(MONITOR_EXIT) {
Object* obj = shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
- if (UNLIKELY(obj == NULL)) {
+ if (UNLIKELY(obj == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
@@ -485,11 +469,11 @@
HANDLE_INSTRUCTION_START(CHECK_CAST) {
Class* c = ResolveVerifyAndClinit(inst->VRegB_21c(), shadow_frame.GetMethod(),
self, false, do_access_check);
- if (UNLIKELY(c == NULL)) {
+ if (UNLIKELY(c == nullptr)) {
HANDLE_PENDING_EXCEPTION();
} else {
Object* obj = shadow_frame.GetVRegReference(inst->VRegA_21c(inst_data));
- if (UNLIKELY(obj != NULL && !obj->InstanceOf(c))) {
+ if (UNLIKELY(obj != nullptr && !obj->InstanceOf(c))) {
ThrowClassCastException(c, obj->GetClass());
HANDLE_PENDING_EXCEPTION();
} else {
@@ -502,11 +486,11 @@
HANDLE_INSTRUCTION_START(INSTANCE_OF) {
Class* c = ResolveVerifyAndClinit(inst->VRegC_22c(), shadow_frame.GetMethod(),
self, false, do_access_check);
- if (UNLIKELY(c == NULL)) {
+ if (UNLIKELY(c == nullptr)) {
HANDLE_PENDING_EXCEPTION();
} else {
Object* obj = shadow_frame.GetVRegReference(inst->VRegB_22c(inst_data));
- shadow_frame.SetVReg(inst->VRegA_22c(inst_data), (obj != NULL && obj->InstanceOf(c)) ? 1 : 0);
+ shadow_frame.SetVReg(inst->VRegA_22c(inst_data), (obj != nullptr && obj->InstanceOf(c)) ? 1 : 0);
ADVANCE(2);
}
}
@@ -514,7 +498,7 @@
HANDLE_INSTRUCTION_START(ARRAY_LENGTH) {
Object* array = shadow_frame.GetVRegReference(inst->VRegB_12x(inst_data));
- if (UNLIKELY(array == NULL)) {
+ if (UNLIKELY(array == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
@@ -525,11 +509,21 @@
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(NEW_INSTANCE) {
- Runtime* runtime = Runtime::Current();
- Object* obj = AllocObjectFromCode<do_access_check, true>(
- inst->VRegB_21c(), shadow_frame.GetMethod(), self,
- runtime->GetHeap()->GetCurrentAllocator());
- if (UNLIKELY(obj == NULL)) {
+ Object* obj = nullptr;
+ Class* c = ResolveVerifyAndClinit(inst->VRegB_21c(), shadow_frame.GetMethod(),
+ self, false, do_access_check);
+ if (LIKELY(c != nullptr)) {
+ if (UNLIKELY(c->IsStringClass())) {
+ gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
+ mirror::SetStringCountVisitor visitor(0);
+ obj = String::Alloc<true>(self, 0, allocator_type, visitor);
+ } else {
+ obj = AllocObjectFromCode<do_access_check, true>(
+ inst->VRegB_21c(), shadow_frame.GetMethod(), self,
+ Runtime::Current()->GetHeap()->GetCurrentAllocator());
+ }
+ }
+ if (UNLIKELY(obj == nullptr)) {
HANDLE_PENDING_EXCEPTION();
} else {
obj->GetClass()->AssertInitializedOrInitializingInThread(self);
@@ -551,7 +545,7 @@
Object* obj = AllocArrayFromCode<do_access_check, true>(
inst->VRegC_22c(), length, shadow_frame.GetMethod(), self,
Runtime::Current()->GetHeap()->GetCurrentAllocator());
- if (UNLIKELY(obj == NULL)) {
+ if (UNLIKELY(obj == nullptr)) {
HANDLE_PENDING_EXCEPTION();
} else {
shadow_frame.SetVRegReference(inst->VRegA_22c(inst_data), obj);
@@ -591,7 +585,7 @@
HANDLE_INSTRUCTION_START(THROW) {
Object* exception = shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
- if (UNLIKELY(exception == NULL)) {
+ if (UNLIKELY(exception == nullptr)) {
ThrowNullPointerException("throw with null exception");
} else if (do_assignability_check && !exception->GetClass()->IsThrowableClass()) {
// This should never happen.
@@ -778,7 +772,8 @@
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(IF_NE) {
- if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) != shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
+ if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) !=
+ shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
int16_t offset = inst->VRegC_22t();
if (IsBackwardBranch(offset)) {
BACKWARD_BRANCH_INSTRUMENTATION(offset);
@@ -795,7 +790,8 @@
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(IF_LT) {
- if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) < shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
+ if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) <
+ shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
int16_t offset = inst->VRegC_22t();
if (IsBackwardBranch(offset)) {
BACKWARD_BRANCH_INSTRUMENTATION(offset);
@@ -812,7 +808,8 @@
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(IF_GE) {
- if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) >= shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
+ if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) >=
+ shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
int16_t offset = inst->VRegC_22t();
if (IsBackwardBranch(offset)) {
BACKWARD_BRANCH_INSTRUMENTATION(offset);
@@ -829,7 +826,8 @@
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(IF_GT) {
- if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) > shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
+ if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) >
+ shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
int16_t offset = inst->VRegC_22t();
if (IsBackwardBranch(offset)) {
BACKWARD_BRANCH_INSTRUMENTATION(offset);
@@ -846,7 +844,8 @@
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(IF_LE) {
- if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) <= shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
+ if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) <=
+ shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
int16_t offset = inst->VRegC_22t();
if (IsBackwardBranch(offset)) {
BACKWARD_BRANCH_INSTRUMENTATION(offset);
@@ -966,7 +965,7 @@
HANDLE_INSTRUCTION_START(AGET_BOOLEAN) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
@@ -984,7 +983,7 @@
HANDLE_INSTRUCTION_START(AGET_BYTE) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
@@ -1002,7 +1001,7 @@
HANDLE_INSTRUCTION_START(AGET_CHAR) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
@@ -1020,7 +1019,7 @@
HANDLE_INSTRUCTION_START(AGET_SHORT) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
@@ -1038,7 +1037,7 @@
HANDLE_INSTRUCTION_START(AGET) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
@@ -1056,7 +1055,7 @@
HANDLE_INSTRUCTION_START(AGET_WIDE) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
@@ -1074,7 +1073,7 @@
HANDLE_INSTRUCTION_START(AGET_OBJECT) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
@@ -1092,7 +1091,7 @@
HANDLE_INSTRUCTION_START(APUT_BOOLEAN) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
@@ -1111,7 +1110,7 @@
HANDLE_INSTRUCTION_START(APUT_BYTE) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
@@ -1130,7 +1129,7 @@
HANDLE_INSTRUCTION_START(APUT_CHAR) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
@@ -1149,7 +1148,7 @@
HANDLE_INSTRUCTION_START(APUT_SHORT) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
@@ -1168,7 +1167,7 @@
HANDLE_INSTRUCTION_START(APUT) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
@@ -1187,7 +1186,7 @@
HANDLE_INSTRUCTION_START(APUT_WIDE) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
@@ -1206,7 +1205,7 @@
HANDLE_INSTRUCTION_START(APUT_OBJECT) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
@@ -1224,43 +1223,50 @@
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(IGET_BOOLEAN) {
- bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimBoolean, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimBoolean, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(IGET_BYTE) {
- bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimByte, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimByte, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(IGET_CHAR) {
- bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimChar, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimChar, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(IGET_SHORT) {
- bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimShort, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimShort, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(IGET) {
- bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimInt, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimInt, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(IGET_WIDE) {
- bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimLong, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimLong, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(IGET_OBJECT) {
- bool success = DoFieldGet<InstanceObjectRead, Primitive::kPrimNot, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<InstanceObjectRead, Primitive::kPrimNot, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
@@ -1308,314 +1314,366 @@
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(SGET_BOOLEAN) {
- bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimBoolean, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimBoolean, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(SGET_BYTE) {
- bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimByte, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimByte, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(SGET_CHAR) {
- bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimChar, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimChar, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(SGET_SHORT) {
- bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimShort, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimShort, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(SGET) {
- bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimInt, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimInt, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(SGET_WIDE) {
- bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimLong, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimLong, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(SGET_OBJECT) {
- bool success = DoFieldGet<StaticObjectRead, Primitive::kPrimNot, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<StaticObjectRead, Primitive::kPrimNot, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(IPUT_BOOLEAN) {
- bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimBoolean, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimBoolean, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(IPUT_BYTE) {
- bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimByte, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimByte, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(IPUT_CHAR) {
- bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimChar, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimChar, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(IPUT_SHORT) {
- bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimShort, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimShort, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(IPUT) {
- bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimInt, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimInt, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(IPUT_WIDE) {
- bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimLong, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimLong, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(IPUT_OBJECT) {
- bool success = DoFieldPut<InstanceObjectWrite, Primitive::kPrimNot, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<InstanceObjectWrite, Primitive::kPrimNot, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(IPUT_QUICK) {
- bool success = DoIPutQuick<Primitive::kPrimInt, transaction_active>(shadow_frame, inst, inst_data);
+ bool success = DoIPutQuick<Primitive::kPrimInt, transaction_active>(
+ shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(IPUT_BOOLEAN_QUICK) {
- bool success = DoIPutQuick<Primitive::kPrimBoolean, transaction_active>(shadow_frame, inst, inst_data);
+ bool success = DoIPutQuick<Primitive::kPrimBoolean, transaction_active>(
+ shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(IPUT_BYTE_QUICK) {
- bool success = DoIPutQuick<Primitive::kPrimByte, transaction_active>(shadow_frame, inst, inst_data);
+ bool success = DoIPutQuick<Primitive::kPrimByte, transaction_active>(
+ shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(IPUT_CHAR_QUICK) {
- bool success = DoIPutQuick<Primitive::kPrimChar, transaction_active>(shadow_frame, inst, inst_data);
+ bool success = DoIPutQuick<Primitive::kPrimChar, transaction_active>(
+ shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(IPUT_SHORT_QUICK) {
- bool success = DoIPutQuick<Primitive::kPrimShort, transaction_active>(shadow_frame, inst, inst_data);
+ bool success = DoIPutQuick<Primitive::kPrimShort, transaction_active>(
+ shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(IPUT_WIDE_QUICK) {
- bool success = DoIPutQuick<Primitive::kPrimLong, transaction_active>(shadow_frame, inst, inst_data);
+ bool success = DoIPutQuick<Primitive::kPrimLong, transaction_active>(
+ shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(IPUT_OBJECT_QUICK) {
- bool success = DoIPutQuick<Primitive::kPrimNot, transaction_active>(shadow_frame, inst, inst_data);
+ bool success = DoIPutQuick<Primitive::kPrimNot, transaction_active>(
+ shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(SPUT_BOOLEAN) {
- bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimBoolean, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimBoolean, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(SPUT_BYTE) {
- bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimByte, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimByte, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(SPUT_CHAR) {
- bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimChar, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimChar, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(SPUT_SHORT) {
- bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimShort, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimShort, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(SPUT) {
- bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimInt, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimInt, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(SPUT_WIDE) {
- bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimLong, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimLong, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(SPUT_OBJECT) {
- bool success = DoFieldPut<StaticObjectWrite, Primitive::kPrimNot, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<StaticObjectWrite, Primitive::kPrimNot, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(INVOKE_VIRTUAL) {
- bool success = DoInvoke<kVirtual, false, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+ bool success = DoInvoke<kVirtual, false, do_access_check>(
+ self, shadow_frame, inst, inst_data, &result_register);
UPDATE_HANDLER_TABLE();
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(INVOKE_VIRTUAL_RANGE) {
- bool success = DoInvoke<kVirtual, true, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+ bool success = DoInvoke<kVirtual, true, do_access_check>(
+ self, shadow_frame, inst, inst_data, &result_register);
UPDATE_HANDLER_TABLE();
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(INVOKE_SUPER) {
- bool success = DoInvoke<kSuper, false, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+ bool success = DoInvoke<kSuper, false, do_access_check>(
+ self, shadow_frame, inst, inst_data, &result_register);
UPDATE_HANDLER_TABLE();
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(INVOKE_SUPER_RANGE) {
- bool success = DoInvoke<kSuper, true, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+ bool success = DoInvoke<kSuper, true, do_access_check>(
+ self, shadow_frame, inst, inst_data, &result_register);
UPDATE_HANDLER_TABLE();
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(INVOKE_DIRECT) {
- bool success = DoInvoke<kDirect, false, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+ bool success = DoInvoke<kDirect, false, do_access_check>(
+ self, shadow_frame, inst, inst_data, &result_register);
UPDATE_HANDLER_TABLE();
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(INVOKE_DIRECT_RANGE) {
- bool success = DoInvoke<kDirect, true, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+ bool success = DoInvoke<kDirect, true, do_access_check>(
+ self, shadow_frame, inst, inst_data, &result_register);
UPDATE_HANDLER_TABLE();
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(INVOKE_INTERFACE) {
- bool success = DoInvoke<kInterface, false, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+ bool success = DoInvoke<kInterface, false, do_access_check>(
+ self, shadow_frame, inst, inst_data, &result_register);
UPDATE_HANDLER_TABLE();
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(INVOKE_INTERFACE_RANGE) {
- bool success = DoInvoke<kInterface, true, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+ bool success = DoInvoke<kInterface, true, do_access_check>(
+ self, shadow_frame, inst, inst_data, &result_register);
UPDATE_HANDLER_TABLE();
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(INVOKE_STATIC) {
- bool success = DoInvoke<kStatic, false, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+ bool success = DoInvoke<kStatic, false, do_access_check>(
+ self, shadow_frame, inst, inst_data, &result_register);
UPDATE_HANDLER_TABLE();
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(INVOKE_STATIC_RANGE) {
- bool success = DoInvoke<kStatic, true, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+ bool success = DoInvoke<kStatic, true, do_access_check>(
+ self, shadow_frame, inst, inst_data, &result_register);
UPDATE_HANDLER_TABLE();
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(INVOKE_VIRTUAL_QUICK) {
- bool success = DoInvokeVirtualQuick<false>(self, shadow_frame, inst, inst_data, &result_register);
+ bool success = DoInvokeVirtualQuick<false>(
+ self, shadow_frame, inst, inst_data, &result_register);
UPDATE_HANDLER_TABLE();
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(INVOKE_VIRTUAL_RANGE_QUICK) {
- bool success = DoInvokeVirtualQuick<true>(self, shadow_frame, inst, inst_data, &result_register);
+ bool success = DoInvokeVirtualQuick<true>(
+ self, shadow_frame, inst, inst_data, &result_register);
UPDATE_HANDLER_TABLE();
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 3);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(NEG_INT)
- shadow_frame.SetVReg(inst->VRegA_12x(inst_data), -shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ shadow_frame.SetVReg(
+ inst->VRegA_12x(inst_data), -shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
ADVANCE(1);
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(NOT_INT)
- shadow_frame.SetVReg(inst->VRegA_12x(inst_data), ~shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ shadow_frame.SetVReg(
+ inst->VRegA_12x(inst_data), ~shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
ADVANCE(1);
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(NEG_LONG)
- shadow_frame.SetVRegLong(inst->VRegA_12x(inst_data), -shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ shadow_frame.SetVRegLong(
+ inst->VRegA_12x(inst_data), -shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
ADVANCE(1);
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(NOT_LONG)
- shadow_frame.SetVRegLong(inst->VRegA_12x(inst_data), ~shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ shadow_frame.SetVRegLong(
+ inst->VRegA_12x(inst_data), ~shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
ADVANCE(1);
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(NEG_FLOAT)
- shadow_frame.SetVRegFloat(inst->VRegA_12x(inst_data), -shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data)));
+ shadow_frame.SetVRegFloat(
+ inst->VRegA_12x(inst_data), -shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data)));
ADVANCE(1);
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(NEG_DOUBLE)
- shadow_frame.SetVRegDouble(inst->VRegA_12x(inst_data), -shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data)));
+ shadow_frame.SetVRegDouble(
+ inst->VRegA_12x(inst_data), -shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data)));
ADVANCE(1);
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(INT_TO_LONG)
- shadow_frame.SetVRegLong(inst->VRegA_12x(inst_data), shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ shadow_frame.SetVRegLong(
+ inst->VRegA_12x(inst_data), shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
ADVANCE(1);
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(INT_TO_FLOAT)
- shadow_frame.SetVRegFloat(inst->VRegA_12x(inst_data), shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ shadow_frame.SetVRegFloat(
+ inst->VRegA_12x(inst_data), shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
ADVANCE(1);
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(INT_TO_DOUBLE)
- shadow_frame.SetVRegDouble(inst->VRegA_12x(inst_data), shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ shadow_frame.SetVRegDouble(
+ inst->VRegA_12x(inst_data), shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
ADVANCE(1);
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(LONG_TO_INT)
- shadow_frame.SetVReg(inst->VRegA_12x(inst_data), shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ shadow_frame.SetVReg(
+ inst->VRegA_12x(inst_data), shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
ADVANCE(1);
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(LONG_TO_FLOAT)
- shadow_frame.SetVRegFloat(inst->VRegA_12x(inst_data), shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ shadow_frame.SetVRegFloat(
+ inst->VRegA_12x(inst_data), shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
ADVANCE(1);
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(LONG_TO_DOUBLE)
- shadow_frame.SetVRegDouble(inst->VRegA_12x(inst_data), shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ shadow_frame.SetVRegDouble(
+ inst->VRegA_12x(inst_data), shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
ADVANCE(1);
HANDLE_INSTRUCTION_END();
@@ -1636,7 +1694,8 @@
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(FLOAT_TO_DOUBLE)
- shadow_frame.SetVRegDouble(inst->VRegA_12x(inst_data), shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data)));
+ shadow_frame.SetVRegDouble(
+ inst->VRegA_12x(inst_data), shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data)));
ADVANCE(1);
HANDLE_INSTRUCTION_END();
@@ -1657,7 +1716,8 @@
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(DOUBLE_TO_FLOAT)
- shadow_frame.SetVRegFloat(inst->VRegA_12x(inst_data), shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data)));
+ shadow_frame.SetVRegFloat(
+ inst->VRegA_12x(inst_data), shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data)));
ADVANCE(1);
HANDLE_INSTRUCTION_END();
@@ -2213,15 +2273,17 @@
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(DIV_INT_LIT16) {
- bool success = DoIntDivide(shadow_frame, inst->VRegA_22s(inst_data),
- shadow_frame.GetVReg(inst->VRegB_22s(inst_data)), inst->VRegC_22s());
+ bool success = DoIntDivide(
+ shadow_frame, inst->VRegA_22s(inst_data), shadow_frame.GetVReg(inst->VRegB_22s(inst_data)),
+ inst->VRegC_22s());
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(REM_INT_LIT16) {
- bool success = DoIntRemainder(shadow_frame, inst->VRegA_22s(inst_data),
- shadow_frame.GetVReg(inst->VRegB_22s(inst_data)), inst->VRegC_22s());
+ bool success = DoIntRemainder(
+ shadow_frame, inst->VRegA_22s(inst_data), shadow_frame.GetVReg(inst->VRegB_22s(inst_data)),
+ inst->VRegC_22s());
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
}
HANDLE_INSTRUCTION_END();
@@ -2431,26 +2493,16 @@
// Note: we do not use the kReturn instruction flag here (to test the instruction is a return). The
// compiler seems to not evaluate "(Instruction::FlagsOf(Instruction::code) & kReturn) != 0" to
// a constant condition that would remove the "if" statement so the test is free.
-#define INSTRUMENTATION_INSTRUCTION_HANDLER(o, code, n, f, r, i, a, v) \
- alt_op_##code: { \
- if (Instruction::code != Instruction::RETURN_VOID && \
- Instruction::code != Instruction::RETURN_VOID_NO_BARRIER && \
- Instruction::code != Instruction::RETURN && \
- Instruction::code != Instruction::RETURN_WIDE && \
- Instruction::code != Instruction::RETURN_OBJECT) { \
- if (LIKELY(!notified_method_entry_event)) { \
- Runtime* runtime = Runtime::Current(); \
- const instrumentation::Instrumentation* instrumentation = runtime->GetInstrumentation(); \
- if (UNLIKELY(instrumentation->HasDexPcListeners())) { \
- Object* this_object = shadow_frame.GetThisObject(code_item->ins_size_); \
- instrumentation->DexPcMovedEvent(self, this_object, shadow_frame.GetMethod(), dex_pc); \
- } \
- } else { \
- notified_method_entry_event = false; \
- } \
- } \
- UPDATE_HANDLER_TABLE(); \
- goto *handlersTable[instrumentation::kMainHandlerTable][Instruction::code]; \
+#define INSTRUMENTATION_INSTRUCTION_HANDLER(o, code, n, f, r, i, a, v) \
+ alt_op_##code: { \
+ Runtime* const runtime = Runtime::Current(); \
+ const instrumentation::Instrumentation* instrumentation = runtime->GetInstrumentation(); \
+ if (UNLIKELY(instrumentation->HasDexPcListeners())) { \
+ Object* this_object = shadow_frame.GetThisObject(code_item->ins_size_); \
+ instrumentation->DexPcMovedEvent(self, this_object, shadow_frame.GetMethod(), dex_pc); \
+ } \
+ UPDATE_HANDLER_TABLE(); \
+ goto *handlersTable[instrumentation::kMainHandlerTable][Instruction::code]; \
}
#include "dex_instruction_list.h"
DEX_INSTRUCTION_LIST(INSTRUMENTATION_INSTRUCTION_HANDLER)
diff --git a/runtime/interpreter/interpreter_switch_impl.cc b/runtime/interpreter/interpreter_switch_impl.cc
index fe7ad77..0e3420f 100644
--- a/runtime/interpreter/interpreter_switch_impl.cc
+++ b/runtime/interpreter/interpreter_switch_impl.cc
@@ -47,10 +47,7 @@
// Code to run before each dex instruction.
#define PREAMBLE() \
do { \
- DCHECK(!inst->IsReturn()); \
- if (UNLIKELY(notified_method_entry_event)) { \
- notified_method_entry_event = false; \
- } else if (UNLIKELY(instrumentation->HasDexPcListeners())) { \
+ if (UNLIKELY(instrumentation->HasDexPcListeners())) { \
instrumentation->DexPcMovedEvent(self, shadow_frame.GetThisObject(code_item->ins_size_), \
shadow_frame.GetMethod(), dex_pc); \
} \
@@ -67,8 +64,7 @@
self->VerifyStack();
uint32_t dex_pc = shadow_frame.GetDexPC();
- bool notified_method_entry_event = false;
- const instrumentation::Instrumentation* const instrumentation = Runtime::Current()->GetInstrumentation();
+ const auto* const instrumentation = Runtime::Current()->GetInstrumentation();
if (LIKELY(dex_pc == 0)) { // We are entering the method as opposed to deoptimizing.
if (kIsDebugBuild) {
self->AssertNoPendingException();
@@ -76,7 +72,6 @@
if (UNLIKELY(instrumentation->HasMethodEntryListeners())) {
instrumentation->MethodEnterEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
shadow_frame.GetMethod(), 0);
- notified_method_entry_event = true;
}
}
const uint16_t* const insns = code_item->insns_;
@@ -171,19 +166,18 @@
break;
}
case Instruction::RETURN_VOID_NO_BARRIER: {
+ PREAMBLE();
JValue result;
self->AllowThreadSuspension();
if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
shadow_frame.GetMethod(), inst->GetDexPc(insns),
result);
- } else if (UNLIKELY(instrumentation->HasDexPcListeners())) {
- instrumentation->DexPcMovedEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
- shadow_frame.GetMethod(), dex_pc);
}
return result;
}
case Instruction::RETURN_VOID: {
+ PREAMBLE();
QuasiAtomic::ThreadFenceForConstructor();
JValue result;
self->AllowThreadSuspension();
@@ -191,13 +185,11 @@
instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
shadow_frame.GetMethod(), inst->GetDexPc(insns),
result);
- } else if (UNLIKELY(instrumentation->HasDexPcListeners())) {
- instrumentation->DexPcMovedEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
- shadow_frame.GetMethod(), dex_pc);
}
return result;
}
case Instruction::RETURN: {
+ PREAMBLE();
JValue result;
result.SetJ(0);
result.SetI(shadow_frame.GetVReg(inst->VRegA_11x(inst_data)));
@@ -206,13 +198,11 @@
instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
shadow_frame.GetMethod(), inst->GetDexPc(insns),
result);
- } else if (UNLIKELY(instrumentation->HasDexPcListeners())) {
- instrumentation->DexPcMovedEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
- shadow_frame.GetMethod(), dex_pc);
}
return result;
}
case Instruction::RETURN_WIDE: {
+ PREAMBLE();
JValue result;
result.SetJ(shadow_frame.GetVRegLong(inst->VRegA_11x(inst_data)));
self->AllowThreadSuspension();
@@ -220,22 +210,20 @@
instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
shadow_frame.GetMethod(), inst->GetDexPc(insns),
result);
- } else if (UNLIKELY(instrumentation->HasDexPcListeners())) {
- instrumentation->DexPcMovedEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
- shadow_frame.GetMethod(), dex_pc);
}
return result;
}
case Instruction::RETURN_OBJECT: {
+ PREAMBLE();
JValue result;
self->AllowThreadSuspension();
const size_t ref_idx = inst->VRegA_11x(inst_data);
Object* obj_result = shadow_frame.GetVRegReference(ref_idx);
- if (do_assignability_check && obj_result != NULL) {
+ if (do_assignability_check && obj_result != nullptr) {
Class* return_type = shadow_frame.GetMethod()->GetReturnType();
// Re-load since it might have moved.
obj_result = shadow_frame.GetVRegReference(ref_idx);
- if (return_type == NULL) {
+ if (return_type == nullptr) {
// Return the pending exception.
HANDLE_PENDING_EXCEPTION();
}
@@ -254,9 +242,6 @@
instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
shadow_frame.GetMethod(), inst->GetDexPc(insns),
result);
- } else if (UNLIKELY(instrumentation->HasDexPcListeners())) {
- instrumentation->DexPcMovedEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
- shadow_frame.GetMethod(), dex_pc);
}
return result;
}
@@ -266,7 +251,7 @@
int4_t val = inst->VRegB_11n(inst_data);
shadow_frame.SetVReg(dst, val);
if (val == 0) {
- shadow_frame.SetVRegReference(dst, NULL);
+ shadow_frame.SetVRegReference(dst, nullptr);
}
inst = inst->Next_1xx();
break;
@@ -277,7 +262,7 @@
int16_t val = inst->VRegB_21s();
shadow_frame.SetVReg(dst, val);
if (val == 0) {
- shadow_frame.SetVRegReference(dst, NULL);
+ shadow_frame.SetVRegReference(dst, nullptr);
}
inst = inst->Next_2xx();
break;
@@ -288,7 +273,7 @@
int32_t val = inst->VRegB_31i();
shadow_frame.SetVReg(dst, val);
if (val == 0) {
- shadow_frame.SetVRegReference(dst, NULL);
+ shadow_frame.SetVRegReference(dst, nullptr);
}
inst = inst->Next_3xx();
break;
@@ -299,7 +284,7 @@
int32_t val = static_cast<int32_t>(inst->VRegB_21h() << 16);
shadow_frame.SetVReg(dst, val);
if (val == 0) {
- shadow_frame.SetVRegReference(dst, NULL);
+ shadow_frame.SetVRegReference(dst, nullptr);
}
inst = inst->Next_2xx();
break;
@@ -328,7 +313,7 @@
case Instruction::CONST_STRING: {
PREAMBLE();
String* s = ResolveString(self, shadow_frame, inst->VRegB_21c());
- if (UNLIKELY(s == NULL)) {
+ if (UNLIKELY(s == nullptr)) {
HANDLE_PENDING_EXCEPTION();
} else {
shadow_frame.SetVRegReference(inst->VRegA_21c(inst_data), s);
@@ -339,7 +324,7 @@
case Instruction::CONST_STRING_JUMBO: {
PREAMBLE();
String* s = ResolveString(self, shadow_frame, inst->VRegB_31c());
- if (UNLIKELY(s == NULL)) {
+ if (UNLIKELY(s == nullptr)) {
HANDLE_PENDING_EXCEPTION();
} else {
shadow_frame.SetVRegReference(inst->VRegA_31c(inst_data), s);
@@ -351,7 +336,7 @@
PREAMBLE();
Class* c = ResolveVerifyAndClinit(inst->VRegB_21c(), shadow_frame.GetMethod(),
self, false, do_access_check);
- if (UNLIKELY(c == NULL)) {
+ if (UNLIKELY(c == nullptr)) {
HANDLE_PENDING_EXCEPTION();
} else {
shadow_frame.SetVRegReference(inst->VRegA_21c(inst_data), c);
@@ -362,7 +347,7 @@
case Instruction::MONITOR_ENTER: {
PREAMBLE();
Object* obj = shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
- if (UNLIKELY(obj == NULL)) {
+ if (UNLIKELY(obj == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
@@ -374,7 +359,7 @@
case Instruction::MONITOR_EXIT: {
PREAMBLE();
Object* obj = shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
- if (UNLIKELY(obj == NULL)) {
+ if (UNLIKELY(obj == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
@@ -387,11 +372,11 @@
PREAMBLE();
Class* c = ResolveVerifyAndClinit(inst->VRegB_21c(), shadow_frame.GetMethod(),
self, false, do_access_check);
- if (UNLIKELY(c == NULL)) {
+ if (UNLIKELY(c == nullptr)) {
HANDLE_PENDING_EXCEPTION();
} else {
Object* obj = shadow_frame.GetVRegReference(inst->VRegA_21c(inst_data));
- if (UNLIKELY(obj != NULL && !obj->InstanceOf(c))) {
+ if (UNLIKELY(obj != nullptr && !obj->InstanceOf(c))) {
ThrowClassCastException(c, obj->GetClass());
HANDLE_PENDING_EXCEPTION();
} else {
@@ -404,11 +389,12 @@
PREAMBLE();
Class* c = ResolveVerifyAndClinit(inst->VRegC_22c(), shadow_frame.GetMethod(),
self, false, do_access_check);
- if (UNLIKELY(c == NULL)) {
+ if (UNLIKELY(c == nullptr)) {
HANDLE_PENDING_EXCEPTION();
} else {
Object* obj = shadow_frame.GetVRegReference(inst->VRegB_22c(inst_data));
- shadow_frame.SetVReg(inst->VRegA_22c(inst_data), (obj != NULL && obj->InstanceOf(c)) ? 1 : 0);
+ shadow_frame.SetVReg(inst->VRegA_22c(inst_data),
+ (obj != nullptr && obj->InstanceOf(c)) ? 1 : 0);
inst = inst->Next_2xx();
}
break;
@@ -416,7 +402,7 @@
case Instruction::ARRAY_LENGTH: {
PREAMBLE();
Object* array = shadow_frame.GetVRegReference(inst->VRegB_12x(inst_data));
- if (UNLIKELY(array == NULL)) {
+ if (UNLIKELY(array == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
@@ -427,11 +413,21 @@
}
case Instruction::NEW_INSTANCE: {
PREAMBLE();
- Runtime* runtime = Runtime::Current();
- Object* obj = AllocObjectFromCode<do_access_check, true>(
- inst->VRegB_21c(), shadow_frame.GetMethod(), self,
- runtime->GetHeap()->GetCurrentAllocator());
- if (UNLIKELY(obj == NULL)) {
+ Object* obj = nullptr;
+ Class* c = ResolveVerifyAndClinit(inst->VRegB_21c(), shadow_frame.GetMethod(),
+ self, false, do_access_check);
+ if (LIKELY(c != nullptr)) {
+ if (UNLIKELY(c->IsStringClass())) {
+ gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
+ mirror::SetStringCountVisitor visitor(0);
+ obj = String::Alloc<true>(self, 0, allocator_type, visitor);
+ } else {
+ obj = AllocObjectFromCode<do_access_check, true>(
+ inst->VRegB_21c(), shadow_frame.GetMethod(), self,
+ Runtime::Current()->GetHeap()->GetCurrentAllocator());
+ }
+ }
+ if (UNLIKELY(obj == nullptr)) {
HANDLE_PENDING_EXCEPTION();
} else {
obj->GetClass()->AssertInitializedOrInitializingInThread(self);
@@ -454,7 +450,7 @@
Object* obj = AllocArrayFromCode<do_access_check, true>(
inst->VRegC_22c(), length, shadow_frame.GetMethod(), self,
Runtime::Current()->GetHeap()->GetCurrentAllocator());
- if (UNLIKELY(obj == NULL)) {
+ if (UNLIKELY(obj == nullptr)) {
HANDLE_PENDING_EXCEPTION();
} else {
shadow_frame.SetVRegReference(inst->VRegA_22c(inst_data), obj);
@@ -498,7 +494,7 @@
case Instruction::THROW: {
PREAMBLE();
Object* exception = shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
- if (UNLIKELY(exception == NULL)) {
+ if (UNLIKELY(exception == nullptr)) {
ThrowNullPointerException("throw with null exception");
} else if (do_assignability_check && !exception->GetClass()->IsThrowableClass()) {
// This should never happen.
@@ -651,7 +647,8 @@
}
case Instruction::IF_EQ: {
PREAMBLE();
- if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) == shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
+ if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) ==
+ shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
int16_t offset = inst->VRegC_22t();
if (IsBackwardBranch(offset)) {
self->AllowThreadSuspension();
@@ -664,7 +661,8 @@
}
case Instruction::IF_NE: {
PREAMBLE();
- if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) != shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
+ if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) !=
+ shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
int16_t offset = inst->VRegC_22t();
if (IsBackwardBranch(offset)) {
self->AllowThreadSuspension();
@@ -677,7 +675,8 @@
}
case Instruction::IF_LT: {
PREAMBLE();
- if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) < shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
+ if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) <
+ shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
int16_t offset = inst->VRegC_22t();
if (IsBackwardBranch(offset)) {
self->AllowThreadSuspension();
@@ -690,7 +689,8 @@
}
case Instruction::IF_GE: {
PREAMBLE();
- if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) >= shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
+ if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) >=
+ shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
int16_t offset = inst->VRegC_22t();
if (IsBackwardBranch(offset)) {
self->AllowThreadSuspension();
@@ -703,7 +703,8 @@
}
case Instruction::IF_GT: {
PREAMBLE();
- if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) > shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
+ if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) >
+ shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
int16_t offset = inst->VRegC_22t();
if (IsBackwardBranch(offset)) {
self->AllowThreadSuspension();
@@ -716,7 +717,8 @@
}
case Instruction::IF_LE: {
PREAMBLE();
- if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) <= shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
+ if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) <=
+ shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
int16_t offset = inst->VRegC_22t();
if (IsBackwardBranch(offset)) {
self->AllowThreadSuspension();
@@ -808,7 +810,7 @@
case Instruction::AGET_BOOLEAN: {
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
break;
@@ -826,7 +828,7 @@
case Instruction::AGET_BYTE: {
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
break;
@@ -844,7 +846,7 @@
case Instruction::AGET_CHAR: {
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
break;
@@ -862,7 +864,7 @@
case Instruction::AGET_SHORT: {
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
break;
@@ -880,7 +882,7 @@
case Instruction::AGET: {
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
break;
@@ -898,7 +900,7 @@
case Instruction::AGET_WIDE: {
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
break;
@@ -916,7 +918,7 @@
case Instruction::AGET_OBJECT: {
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
break;
@@ -934,7 +936,7 @@
case Instruction::APUT_BOOLEAN: {
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
break;
@@ -953,7 +955,7 @@
case Instruction::APUT_BYTE: {
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
break;
@@ -972,7 +974,7 @@
case Instruction::APUT_CHAR: {
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
break;
@@ -991,7 +993,7 @@
case Instruction::APUT_SHORT: {
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
break;
@@ -1010,7 +1012,7 @@
case Instruction::APUT: {
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
break;
@@ -1029,7 +1031,7 @@
case Instruction::APUT_WIDE: {
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
break;
@@ -1048,7 +1050,7 @@
case Instruction::APUT_OBJECT: {
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
- if (UNLIKELY(a == NULL)) {
+ if (UNLIKELY(a == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
break;
@@ -1066,43 +1068,50 @@
}
case Instruction::IGET_BOOLEAN: {
PREAMBLE();
- bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimBoolean, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimBoolean, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::IGET_BYTE: {
PREAMBLE();
- bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimByte, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimByte, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::IGET_CHAR: {
PREAMBLE();
- bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimChar, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimChar, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::IGET_SHORT: {
PREAMBLE();
- bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimShort, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimShort, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::IGET: {
PREAMBLE();
- bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimInt, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimInt, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::IGET_WIDE: {
PREAMBLE();
- bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimLong, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimLong, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::IGET_OBJECT: {
PREAMBLE();
- bool success = DoFieldGet<InstanceObjectRead, Primitive::kPrimNot, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<InstanceObjectRead, Primitive::kPrimNot, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
@@ -1150,272 +1159,318 @@
}
case Instruction::SGET_BOOLEAN: {
PREAMBLE();
- bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimBoolean, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimBoolean, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::SGET_BYTE: {
PREAMBLE();
- bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimByte, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimByte, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::SGET_CHAR: {
PREAMBLE();
- bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimChar, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimChar, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::SGET_SHORT: {
PREAMBLE();
- bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimShort, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimShort, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::SGET: {
PREAMBLE();
- bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimInt, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimInt, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::SGET_WIDE: {
PREAMBLE();
- bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimLong, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimLong, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::SGET_OBJECT: {
PREAMBLE();
- bool success = DoFieldGet<StaticObjectRead, Primitive::kPrimNot, do_access_check>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldGet<StaticObjectRead, Primitive::kPrimNot, do_access_check>(
+ self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::IPUT_BOOLEAN: {
PREAMBLE();
- bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimBoolean, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimBoolean, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::IPUT_BYTE: {
PREAMBLE();
- bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimByte, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimByte, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::IPUT_CHAR: {
PREAMBLE();
- bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimChar, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimChar, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::IPUT_SHORT: {
PREAMBLE();
- bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimShort, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimShort, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::IPUT: {
PREAMBLE();
- bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimInt, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimInt, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::IPUT_WIDE: {
PREAMBLE();
- bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimLong, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimLong, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::IPUT_OBJECT: {
PREAMBLE();
- bool success = DoFieldPut<InstanceObjectWrite, Primitive::kPrimNot, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<InstanceObjectWrite, Primitive::kPrimNot, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::IPUT_QUICK: {
PREAMBLE();
- bool success = DoIPutQuick<Primitive::kPrimInt, transaction_active>(shadow_frame, inst, inst_data);
+ bool success = DoIPutQuick<Primitive::kPrimInt, transaction_active>(
+ shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::IPUT_BOOLEAN_QUICK: {
PREAMBLE();
- bool success = DoIPutQuick<Primitive::kPrimBoolean, transaction_active>(shadow_frame, inst, inst_data);
+ bool success = DoIPutQuick<Primitive::kPrimBoolean, transaction_active>(
+ shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::IPUT_BYTE_QUICK: {
PREAMBLE();
- bool success = DoIPutQuick<Primitive::kPrimByte, transaction_active>(shadow_frame, inst, inst_data);
+ bool success = DoIPutQuick<Primitive::kPrimByte, transaction_active>(
+ shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::IPUT_CHAR_QUICK: {
PREAMBLE();
- bool success = DoIPutQuick<Primitive::kPrimChar, transaction_active>(shadow_frame, inst, inst_data);
+ bool success = DoIPutQuick<Primitive::kPrimChar, transaction_active>(
+ shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::IPUT_SHORT_QUICK: {
PREAMBLE();
- bool success = DoIPutQuick<Primitive::kPrimShort, transaction_active>(shadow_frame, inst, inst_data);
+ bool success = DoIPutQuick<Primitive::kPrimShort, transaction_active>(
+ shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::IPUT_WIDE_QUICK: {
PREAMBLE();
- bool success = DoIPutQuick<Primitive::kPrimLong, transaction_active>(shadow_frame, inst, inst_data);
+ bool success = DoIPutQuick<Primitive::kPrimLong, transaction_active>(
+ shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::IPUT_OBJECT_QUICK: {
PREAMBLE();
- bool success = DoIPutQuick<Primitive::kPrimNot, transaction_active>(shadow_frame, inst, inst_data);
+ bool success = DoIPutQuick<Primitive::kPrimNot, transaction_active>(
+ shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::SPUT_BOOLEAN: {
PREAMBLE();
- bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimBoolean, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimBoolean, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::SPUT_BYTE: {
PREAMBLE();
- bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimByte, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimByte, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::SPUT_CHAR: {
PREAMBLE();
- bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimChar, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimChar, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::SPUT_SHORT: {
PREAMBLE();
- bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimShort, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimShort, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::SPUT: {
PREAMBLE();
- bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimInt, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimInt, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::SPUT_WIDE: {
PREAMBLE();
- bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimLong, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimLong, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::SPUT_OBJECT: {
PREAMBLE();
- bool success = DoFieldPut<StaticObjectWrite, Primitive::kPrimNot, do_access_check, transaction_active>(self, shadow_frame, inst, inst_data);
+ bool success = DoFieldPut<StaticObjectWrite, Primitive::kPrimNot, do_access_check,
+ transaction_active>(self, shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::INVOKE_VIRTUAL: {
PREAMBLE();
- bool success = DoInvoke<kVirtual, false, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+ bool success = DoInvoke<kVirtual, false, do_access_check>(
+ self, shadow_frame, inst, inst_data, &result_register);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
break;
}
case Instruction::INVOKE_VIRTUAL_RANGE: {
PREAMBLE();
- bool success = DoInvoke<kVirtual, true, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+ bool success = DoInvoke<kVirtual, true, do_access_check>(
+ self, shadow_frame, inst, inst_data, &result_register);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
break;
}
case Instruction::INVOKE_SUPER: {
PREAMBLE();
- bool success = DoInvoke<kSuper, false, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+ bool success = DoInvoke<kSuper, false, do_access_check>(
+ self, shadow_frame, inst, inst_data, &result_register);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
break;
}
case Instruction::INVOKE_SUPER_RANGE: {
PREAMBLE();
- bool success = DoInvoke<kSuper, true, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+ bool success = DoInvoke<kSuper, true, do_access_check>(
+ self, shadow_frame, inst, inst_data, &result_register);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
break;
}
case Instruction::INVOKE_DIRECT: {
PREAMBLE();
- bool success = DoInvoke<kDirect, false, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+ bool success = DoInvoke<kDirect, false, do_access_check>(
+ self, shadow_frame, inst, inst_data, &result_register);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
break;
}
case Instruction::INVOKE_DIRECT_RANGE: {
PREAMBLE();
- bool success = DoInvoke<kDirect, true, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+ bool success = DoInvoke<kDirect, true, do_access_check>(
+ self, shadow_frame, inst, inst_data, &result_register);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
break;
}
case Instruction::INVOKE_INTERFACE: {
PREAMBLE();
- bool success = DoInvoke<kInterface, false, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+ bool success = DoInvoke<kInterface, false, do_access_check>(
+ self, shadow_frame, inst, inst_data, &result_register);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
break;
}
case Instruction::INVOKE_INTERFACE_RANGE: {
PREAMBLE();
- bool success = DoInvoke<kInterface, true, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+ bool success = DoInvoke<kInterface, true, do_access_check>(
+ self, shadow_frame, inst, inst_data, &result_register);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
break;
}
case Instruction::INVOKE_STATIC: {
PREAMBLE();
- bool success = DoInvoke<kStatic, false, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+ bool success = DoInvoke<kStatic, false, do_access_check>(
+ self, shadow_frame, inst, inst_data, &result_register);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
break;
}
case Instruction::INVOKE_STATIC_RANGE: {
PREAMBLE();
- bool success = DoInvoke<kStatic, true, do_access_check>(self, shadow_frame, inst, inst_data, &result_register);
+ bool success = DoInvoke<kStatic, true, do_access_check>(
+ self, shadow_frame, inst, inst_data, &result_register);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
break;
}
case Instruction::INVOKE_VIRTUAL_QUICK: {
PREAMBLE();
- bool success = DoInvokeVirtualQuick<false>(self, shadow_frame, inst, inst_data, &result_register);
+ bool success = DoInvokeVirtualQuick<false>(
+ self, shadow_frame, inst, inst_data, &result_register);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
break;
}
case Instruction::INVOKE_VIRTUAL_RANGE_QUICK: {
PREAMBLE();
- bool success = DoInvokeVirtualQuick<true>(self, shadow_frame, inst, inst_data, &result_register);
+ bool success = DoInvokeVirtualQuick<true>(
+ self, shadow_frame, inst, inst_data, &result_register);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
break;
}
case Instruction::NEG_INT:
PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_12x(inst_data), -shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ shadow_frame.SetVReg(
+ inst->VRegA_12x(inst_data), -shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
inst = inst->Next_1xx();
break;
case Instruction::NOT_INT:
PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_12x(inst_data), ~shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
+ shadow_frame.SetVReg(
+ inst->VRegA_12x(inst_data), ~shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
inst = inst->Next_1xx();
break;
case Instruction::NEG_LONG:
PREAMBLE();
- shadow_frame.SetVRegLong(inst->VRegA_12x(inst_data), -shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ shadow_frame.SetVRegLong(
+ inst->VRegA_12x(inst_data), -shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
inst = inst->Next_1xx();
break;
case Instruction::NOT_LONG:
PREAMBLE();
- shadow_frame.SetVRegLong(inst->VRegA_12x(inst_data), ~shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
+ shadow_frame.SetVRegLong(
+ inst->VRegA_12x(inst_data), ~shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
inst = inst->Next_1xx();
break;
case Instruction::NEG_FLOAT:
PREAMBLE();
- shadow_frame.SetVRegFloat(inst->VRegA_12x(inst_data), -shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data)));
+ shadow_frame.SetVRegFloat(
+ inst->VRegA_12x(inst_data), -shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data)));
inst = inst->Next_1xx();
break;
case Instruction::NEG_DOUBLE:
PREAMBLE();
- shadow_frame.SetVRegDouble(inst->VRegA_12x(inst_data), -shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data)));
+ shadow_frame.SetVRegDouble(
+ inst->VRegA_12x(inst_data), -shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data)));
inst = inst->Next_1xx();
break;
case Instruction::INT_TO_LONG:
@@ -1500,20 +1555,20 @@
break;
case Instruction::INT_TO_BYTE:
PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_12x(inst_data),
- static_cast<int8_t>(shadow_frame.GetVReg(inst->VRegB_12x(inst_data))));
+ shadow_frame.SetVReg(inst->VRegA_12x(inst_data), static_cast<int8_t>(
+ shadow_frame.GetVReg(inst->VRegB_12x(inst_data))));
inst = inst->Next_1xx();
break;
case Instruction::INT_TO_CHAR:
PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_12x(inst_data),
- static_cast<uint16_t>(shadow_frame.GetVReg(inst->VRegB_12x(inst_data))));
+ shadow_frame.SetVReg(inst->VRegA_12x(inst_data), static_cast<uint16_t>(
+ shadow_frame.GetVReg(inst->VRegB_12x(inst_data))));
inst = inst->Next_1xx();
break;
case Instruction::INT_TO_SHORT:
PREAMBLE();
- shadow_frame.SetVReg(inst->VRegA_12x(inst_data),
- static_cast<int16_t>(shadow_frame.GetVReg(inst->VRegB_12x(inst_data))));
+ shadow_frame.SetVReg(inst->VRegA_12x(inst_data), static_cast<int16_t>(
+ shadow_frame.GetVReg(inst->VRegB_12x(inst_data))));
inst = inst->Next_1xx();
break;
case Instruction::ADD_INT: {
@@ -2050,14 +2105,16 @@
case Instruction::DIV_INT_LIT16: {
PREAMBLE();
bool success = DoIntDivide(shadow_frame, inst->VRegA_22s(inst_data),
- shadow_frame.GetVReg(inst->VRegB_22s(inst_data)), inst->VRegC_22s());
+ shadow_frame.GetVReg(inst->VRegB_22s(inst_data)),
+ inst->VRegC_22s());
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
case Instruction::REM_INT_LIT16: {
PREAMBLE();
bool success = DoIntRemainder(shadow_frame, inst->VRegA_22s(inst_data),
- shadow_frame.GetVReg(inst->VRegB_22s(inst_data)), inst->VRegC_22s());
+ shadow_frame.GetVReg(inst->VRegB_22s(inst_data)),
+ inst->VRegC_22s());
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
index 61def35..738e52b 100644
--- a/runtime/interpreter/unstarted_runtime.cc
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -120,7 +120,7 @@
return param->AsString();
}
-static void UnstartedClassForName(
+void UnstartedRuntime::UnstartedClassForName(
Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::String* class_name = GetClassName(self, shadow_frame, arg_offset);
@@ -134,7 +134,7 @@
CheckExceptionGenerateClassNotFound(self);
}
-static void UnstartedClassForNameLong(
+void UnstartedRuntime::UnstartedClassForNameLong(
Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::String* class_name = GetClassName(self, shadow_frame, arg_offset);
@@ -152,7 +152,7 @@
CheckExceptionGenerateClassNotFound(self);
}
-static void UnstartedClassClassForName(
+void UnstartedRuntime::UnstartedClassClassForName(
Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::String* class_name = GetClassName(self, shadow_frame, arg_offset);
@@ -170,7 +170,7 @@
CheckExceptionGenerateClassNotFound(self);
}
-static void UnstartedClassNewInstance(
+void UnstartedRuntime::UnstartedClassNewInstance(
Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
StackHandleScope<3> hs(self); // Class, constructor, object.
@@ -226,7 +226,7 @@
}
}
-static void UnstartedClassGetDeclaredField(
+void UnstartedRuntime::UnstartedClassGetDeclaredField(
Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Special managed code cut-out to allow field lookup in a un-started runtime that'd fail
@@ -265,7 +265,7 @@
}
}
-static void UnstartedVmClassLoaderFindLoadedClass(
+void UnstartedRuntime::UnstartedVmClassLoaderFindLoadedClass(
Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::String* class_name = shadow_frame->GetVRegReference(arg_offset + 1)->AsString();
@@ -286,7 +286,7 @@
}
}
-static void UnstartedVoidLookupType(Thread* self ATTRIBUTE_UNUSED,
+void UnstartedRuntime::UnstartedVoidLookupType(Thread* self ATTRIBUTE_UNUSED,
ShadowFrame* shadow_frame ATTRIBUTE_UNUSED,
JValue* result,
size_t arg_offset ATTRIBUTE_UNUSED)
@@ -323,7 +323,7 @@
}
}
-static void UnstartedSystemArraycopy(
+void UnstartedRuntime::UnstartedSystemArraycopy(
Thread* self, ShadowFrame* shadow_frame, JValue* result ATTRIBUTE_UNUSED, size_t arg_offset)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Special case array copying without initializing System.
@@ -409,7 +409,21 @@
}
}
-static void UnstartedThreadLocalGet(
+void UnstartedRuntime::UnstartedSystemArraycopyChar(
+ Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ // Just forward.
+ UnstartedRuntime::UnstartedSystemArraycopy(self, shadow_frame, result, arg_offset);
+}
+
+void UnstartedRuntime::UnstartedSystemArraycopyInt(
+ Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ // Just forward.
+ UnstartedRuntime::UnstartedSystemArraycopy(self, shadow_frame, result, arg_offset);
+}
+
+void UnstartedRuntime::UnstartedThreadLocalGet(
Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset ATTRIBUTE_UNUSED)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
std::string caller(PrettyMethod(shadow_frame->GetLink()->GetMethod()));
@@ -459,7 +473,7 @@
}
}
-static void UnstartedMathCeil(
+void UnstartedRuntime::UnstartedMathCeil(
Thread* self ATTRIBUTE_UNUSED, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) {
double in = shadow_frame->GetVRegDouble(arg_offset);
double out;
@@ -474,21 +488,21 @@
result->SetD(out);
}
-static void UnstartedArtMethodGetMethodName(
+void UnstartedRuntime::UnstartedArtMethodGetMethodName(
Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::ArtMethod* method = shadow_frame->GetVRegReference(arg_offset)->AsArtMethod();
result->SetL(method->GetNameAsString(self));
}
-static void UnstartedObjectHashCode(
+void UnstartedRuntime::UnstartedObjectHashCode(
Thread* self ATTRIBUTE_UNUSED, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::Object* obj = shadow_frame->GetVRegReference(arg_offset);
result->SetI(obj->IdentityHashCode());
}
-static void UnstartedDoubleDoubleToRawLongBits(
+void UnstartedRuntime::UnstartedDoubleDoubleToRawLongBits(
Thread* self ATTRIBUTE_UNUSED, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) {
double in = shadow_frame->GetVRegDouble(arg_offset);
result->SetJ(bit_cast<int64_t, double>(in));
@@ -522,7 +536,7 @@
return self->DecodeJObject(dex.get());
}
-static void UnstartedDexCacheGetDexNative(
+void UnstartedRuntime::UnstartedDexCacheGetDexNative(
Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// We will create the Dex object, but the image writer will release it before creating the
@@ -555,17 +569,20 @@
}
case Primitive::kPrimShort: {
- result->SetS(*reinterpret_cast<int16_t*>(static_cast<intptr_t>(address)));
+ typedef int16_t unaligned_short __attribute__ ((aligned (1)));
+ result->SetS(*reinterpret_cast<unaligned_short*>(static_cast<intptr_t>(address)));
return;
}
case Primitive::kPrimInt: {
- result->SetI(*reinterpret_cast<int32_t*>(static_cast<intptr_t>(address)));
+ typedef int32_t unaligned_int __attribute__ ((aligned (1)));
+ result->SetI(*reinterpret_cast<unaligned_int*>(static_cast<intptr_t>(address)));
return;
}
case Primitive::kPrimLong: {
- result->SetJ(*reinterpret_cast<int64_t*>(static_cast<intptr_t>(address)));
+ typedef int64_t unaligned_long __attribute__ ((aligned (1)));
+ result->SetJ(*reinterpret_cast<unaligned_long*>(static_cast<intptr_t>(address)));
return;
}
@@ -582,22 +599,28 @@
UNREACHABLE();
}
-static void UnstartedMemoryPeekEntry(
+void UnstartedRuntime::UnstartedMemoryPeekByte(
Thread* self ATTRIBUTE_UNUSED, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- std::string name(PrettyMethod(shadow_frame->GetMethod()));
- if (name == "byte libcore.io.Memory.peekByte(long)") {
- UnstartedMemoryPeek(Primitive::kPrimByte, shadow_frame, result, arg_offset);
- } else if (name == "short libcore.io.Memory.peekShortNative(long)") {
- UnstartedMemoryPeek(Primitive::kPrimShort, shadow_frame, result, arg_offset);
- } else if (name == "int libcore.io.Memory.peekIntNative(long)") {
- UnstartedMemoryPeek(Primitive::kPrimInt, shadow_frame, result, arg_offset);
- } else if (name == "long libcore.io.Memory.peekLongNative(long)") {
- UnstartedMemoryPeek(Primitive::kPrimLong, shadow_frame, result, arg_offset);
- } else {
- LOG(FATAL) << "Unsupported Memory.peek entry: " << name;
- UNREACHABLE();
- }
+ UnstartedMemoryPeek(Primitive::kPrimByte, shadow_frame, result, arg_offset);
+}
+
+void UnstartedRuntime::UnstartedMemoryPeekShort(
+ Thread* self ATTRIBUTE_UNUSED, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ UnstartedMemoryPeek(Primitive::kPrimShort, shadow_frame, result, arg_offset);
+}
+
+void UnstartedRuntime::UnstartedMemoryPeekInt(
+ Thread* self ATTRIBUTE_UNUSED, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ UnstartedMemoryPeek(Primitive::kPrimInt, shadow_frame, result, arg_offset);
+}
+
+void UnstartedRuntime::UnstartedMemoryPeekLong(
+ Thread* self ATTRIBUTE_UNUSED, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ UnstartedMemoryPeek(Primitive::kPrimLong, shadow_frame, result, arg_offset);
}
static void UnstartedMemoryPeekArray(
@@ -649,20 +672,14 @@
UNREACHABLE();
}
-static void UnstartedMemoryPeekArrayEntry(
+void UnstartedRuntime::UnstartedMemoryPeekByteArray(
Thread* self, ShadowFrame* shadow_frame, JValue* result ATTRIBUTE_UNUSED, size_t arg_offset)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- std::string name(PrettyMethod(shadow_frame->GetMethod()));
- if (name == "void libcore.io.Memory.peekByteArray(long, byte[], int, int)") {
- UnstartedMemoryPeekArray(Primitive::kPrimByte, self, shadow_frame, arg_offset);
- } else {
- LOG(FATAL) << "Unsupported Memory.peekArray entry: " << name;
- UNREACHABLE();
- }
+ UnstartedMemoryPeekArray(Primitive::kPrimByte, self, shadow_frame, arg_offset);
}
// This allows reading security.properties in an unstarted runtime and initialize Security.
-static void UnstartedSecurityGetSecurityPropertiesReader(
+void UnstartedRuntime::UnstartedSecurityGetSecurityPropertiesReader(
Thread* self,
ShadowFrame* shadow_frame ATTRIBUTE_UNUSED,
JValue* result,
@@ -755,7 +772,114 @@
result->SetL(h_obj.Get());
}
-static void UnstartedJNIVMRuntimeNewUnpaddedArray(Thread* self,
+// This allows reading the new style of String objects during compilation.
+void UnstartedRuntime::UnstartedStringGetCharsNoCheck(
+ Thread* self, ShadowFrame* shadow_frame, JValue* result ATTRIBUTE_UNUSED, size_t arg_offset)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ jint start = shadow_frame->GetVReg(arg_offset + 1);
+ jint end = shadow_frame->GetVReg(arg_offset + 2);
+ jint index = shadow_frame->GetVReg(arg_offset + 4);
+ mirror::String* string = shadow_frame->GetVRegReference(arg_offset)->AsString();
+ if (string == nullptr) {
+ AbortTransactionOrFail(self, "String.getCharsNoCheck with null object");
+ return;
+ }
+ DCHECK_GE(start, 0);
+ DCHECK_GE(end, string->GetLength());
+ StackHandleScope<1> hs(self);
+ Handle<mirror::CharArray> h_char_array(hs.NewHandle(shadow_frame->GetVRegReference(arg_offset + 3)->AsCharArray()));
+ DCHECK_LE(index, h_char_array->GetLength());
+ DCHECK_LE(end - start, h_char_array->GetLength() - index);
+ string->GetChars(start, end, h_char_array, index);
+}
+
+// This allows reading chars from the new style of String objects during compilation.
+void UnstartedRuntime::UnstartedStringCharAt(
+ Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ jint index = shadow_frame->GetVReg(arg_offset + 1);
+ mirror::String* string = shadow_frame->GetVRegReference(arg_offset)->AsString();
+ if (string == nullptr) {
+ AbortTransactionOrFail(self, "String.charAt with null object");
+ return;
+ }
+ result->SetC(string->CharAt(index));
+}
+
+// This allows setting chars from the new style of String objects during compilation.
+void UnstartedRuntime::UnstartedStringSetCharAt(
+ Thread* self, ShadowFrame* shadow_frame, JValue* result ATTRIBUTE_UNUSED, size_t arg_offset)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ jint index = shadow_frame->GetVReg(arg_offset + 1);
+ jchar c = shadow_frame->GetVReg(arg_offset + 2);
+ mirror::String* string = shadow_frame->GetVRegReference(arg_offset)->AsString();
+ if (string == nullptr) {
+ AbortTransactionOrFail(self, "String.setCharAt with null object");
+ return;
+ }
+ string->SetCharAt(index, c);
+}
+
+// This allows creating the new style of String objects during compilation.
+void UnstartedRuntime::UnstartedStringFactoryNewStringFromChars(
+ Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ jint offset = shadow_frame->GetVReg(arg_offset);
+ jint char_count = shadow_frame->GetVReg(arg_offset + 1);
+ DCHECK_GE(char_count, 0);
+ StackHandleScope<1> hs(self);
+ Handle<mirror::CharArray> h_char_array(hs.NewHandle(shadow_frame->GetVRegReference(arg_offset + 2)->AsCharArray()));
+ Runtime* runtime = Runtime::Current();
+ gc::AllocatorType allocator = runtime->GetHeap()->GetCurrentAllocator();
+ result->SetL(mirror::String::AllocFromCharArray<true>(self, char_count, h_char_array, offset, allocator));
+}
+
+// This allows creating the new style of String objects during compilation.
+void UnstartedRuntime::UnstartedStringFactoryNewStringFromString(
+ Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::String* to_copy = shadow_frame->GetVRegReference(arg_offset)->AsString();
+ if (to_copy == nullptr) {
+ AbortTransactionOrFail(self, "StringFactory.newStringFromString with null object");
+ return;
+ }
+ StackHandleScope<1> hs(self);
+ Handle<mirror::String> h_string(hs.NewHandle(to_copy));
+ Runtime* runtime = Runtime::Current();
+ gc::AllocatorType allocator = runtime->GetHeap()->GetCurrentAllocator();
+ result->SetL(mirror::String::AllocFromString<true>(self, h_string->GetLength(), h_string, 0,
+ allocator));
+}
+
+void UnstartedRuntime::UnstartedStringFastSubstring(
+ Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ jint start = shadow_frame->GetVReg(arg_offset + 1);
+ jint length = shadow_frame->GetVReg(arg_offset + 2);
+ DCHECK_GE(start, 0);
+ DCHECK_GE(length, 0);
+ StackHandleScope<1> hs(self);
+ Handle<mirror::String> h_string(hs.NewHandle(shadow_frame->GetVRegReference(arg_offset)->AsString()));
+ DCHECK_LE(start, h_string->GetLength());
+ DCHECK_LE(start + length, h_string->GetLength());
+ Runtime* runtime = Runtime::Current();
+ gc::AllocatorType allocator = runtime->GetHeap()->GetCurrentAllocator();
+ result->SetL(mirror::String::AllocFromString<true>(self, length, h_string, start, allocator));
+}
+
+// This allows getting the char array for new style of String objects during compilation.
+void UnstartedRuntime::UnstartedStringToCharArray(
+ Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::String* string = shadow_frame->GetVRegReference(arg_offset)->AsString();
+ if (string == nullptr) {
+ AbortTransactionOrFail(self, "String.charAt with null object");
+ return;
+ }
+ result->SetL(string->ToCharArray(self));
+}
+
+void UnstartedRuntime::UnstartedJNIVMRuntimeNewUnpaddedArray(Thread* self,
mirror::ArtMethod* method ATTRIBUTE_UNUSED,
mirror::Object* receiver ATTRIBUTE_UNUSED,
uint32_t* args,
@@ -772,7 +896,7 @@
array_class->GetComponentSizeShift(), allocator));
}
-static void UnstartedJNIVMStackGetCallingClassLoader(Thread* self ATTRIBUTE_UNUSED,
+void UnstartedRuntime::UnstartedJNIVMStackGetCallingClassLoader(Thread* self ATTRIBUTE_UNUSED,
mirror::ArtMethod* method ATTRIBUTE_UNUSED,
mirror::Object* receiver ATTRIBUTE_UNUSED,
uint32_t* args ATTRIBUTE_UNUSED,
@@ -780,7 +904,7 @@
result->SetL(nullptr);
}
-static void UnstartedJNIVMStackGetStackClass2(Thread* self,
+void UnstartedRuntime::UnstartedJNIVMStackGetStackClass2(Thread* self,
mirror::ArtMethod* method ATTRIBUTE_UNUSED,
mirror::Object* receiver ATTRIBUTE_UNUSED,
uint32_t* args ATTRIBUTE_UNUSED,
@@ -793,7 +917,7 @@
}
}
-static void UnstartedJNIMathLog(Thread* self ATTRIBUTE_UNUSED,
+void UnstartedRuntime::UnstartedJNIMathLog(Thread* self ATTRIBUTE_UNUSED,
mirror::ArtMethod* method ATTRIBUTE_UNUSED,
mirror::Object* receiver ATTRIBUTE_UNUSED,
uint32_t* args,
@@ -803,7 +927,7 @@
result->SetD(log(value.GetD()));
}
-static void UnstartedJNIMathExp(Thread* self ATTRIBUTE_UNUSED,
+void UnstartedRuntime::UnstartedJNIMathExp(Thread* self ATTRIBUTE_UNUSED,
mirror::ArtMethod* method ATTRIBUTE_UNUSED,
mirror::Object* receiver ATTRIBUTE_UNUSED,
uint32_t* args,
@@ -813,7 +937,7 @@
result->SetD(exp(value.GetD()));
}
-static void UnstartedJNIClassGetNameNative(Thread* self,
+void UnstartedRuntime::UnstartedJNIClassGetNameNative(Thread* self,
mirror::ArtMethod* method ATTRIBUTE_UNUSED,
mirror::Object* receiver,
uint32_t* args ATTRIBUTE_UNUSED,
@@ -823,7 +947,7 @@
result->SetL(mirror::Class::ComputeName(hs.NewHandle(receiver->AsClass())));
}
-static void UnstartedJNIFloatFloatToRawIntBits(Thread* self ATTRIBUTE_UNUSED,
+void UnstartedRuntime::UnstartedJNIFloatFloatToRawIntBits(Thread* self ATTRIBUTE_UNUSED,
mirror::ArtMethod* method ATTRIBUTE_UNUSED,
mirror::Object* receiver ATTRIBUTE_UNUSED,
uint32_t* args,
@@ -831,7 +955,7 @@
result->SetI(args[0]);
}
-static void UnstartedJNIFloatIntBitsToFloat(Thread* self ATTRIBUTE_UNUSED,
+void UnstartedRuntime::UnstartedJNIFloatIntBitsToFloat(Thread* self ATTRIBUTE_UNUSED,
mirror::ArtMethod* method ATTRIBUTE_UNUSED,
mirror::Object* receiver ATTRIBUTE_UNUSED,
uint32_t* args,
@@ -839,7 +963,7 @@
result->SetI(args[0]);
}
-static void UnstartedJNIObjectInternalClone(Thread* self,
+void UnstartedRuntime::UnstartedJNIObjectInternalClone(Thread* self,
mirror::ArtMethod* method ATTRIBUTE_UNUSED,
mirror::Object* receiver,
uint32_t* args ATTRIBUTE_UNUSED,
@@ -848,7 +972,7 @@
result->SetL(receiver->Clone(self));
}
-static void UnstartedJNIObjectNotifyAll(Thread* self,
+void UnstartedRuntime::UnstartedJNIObjectNotifyAll(Thread* self,
mirror::ArtMethod* method ATTRIBUTE_UNUSED,
mirror::Object* receiver,
uint32_t* args ATTRIBUTE_UNUSED,
@@ -857,7 +981,7 @@
receiver->NotifyAll(self);
}
-static void UnstartedJNIStringCompareTo(Thread* self,
+void UnstartedRuntime::UnstartedJNIStringCompareTo(Thread* self,
mirror::ArtMethod* method ATTRIBUTE_UNUSED,
mirror::Object* receiver,
uint32_t* args,
@@ -870,7 +994,7 @@
result->SetI(receiver->AsString()->CompareTo(rhs));
}
-static void UnstartedJNIStringIntern(Thread* self ATTRIBUTE_UNUSED,
+void UnstartedRuntime::UnstartedJNIStringIntern(Thread* self ATTRIBUTE_UNUSED,
mirror::ArtMethod* method ATTRIBUTE_UNUSED,
mirror::Object* receiver,
uint32_t* args ATTRIBUTE_UNUSED,
@@ -879,7 +1003,7 @@
result->SetL(receiver->AsString()->Intern());
}
-static void UnstartedJNIStringFastIndexOf(Thread* self ATTRIBUTE_UNUSED,
+void UnstartedRuntime::UnstartedJNIStringFastIndexOf(Thread* self ATTRIBUTE_UNUSED,
mirror::ArtMethod* method ATTRIBUTE_UNUSED,
mirror::Object* receiver,
uint32_t* args,
@@ -888,7 +1012,7 @@
result->SetI(receiver->AsString()->FastIndexOf(args[0], args[1]));
}
-static void UnstartedJNIArrayCreateMultiArray(Thread* self,
+void UnstartedRuntime::UnstartedJNIArrayCreateMultiArray(Thread* self,
mirror::ArtMethod* method ATTRIBUTE_UNUSED,
mirror::Object* receiver ATTRIBUTE_UNUSED,
uint32_t* args,
@@ -900,7 +1024,7 @@
result->SetL(mirror::Array::CreateMultiArray(self, h_class, h_dimensions));
}
-static void UnstartedJNIArrayCreateObjectArray(Thread* self,
+void UnstartedRuntime::UnstartedJNIArrayCreateObjectArray(Thread* self,
mirror::ArtMethod* method ATTRIBUTE_UNUSED,
mirror::Object* receiver ATTRIBUTE_UNUSED,
uint32_t* args,
@@ -915,7 +1039,7 @@
Runtime* runtime = Runtime::Current();
ClassLinker* class_linker = runtime->GetClassLinker();
mirror::Class* array_class = class_linker->FindArrayClass(self, &element_class);
- if (UNLIKELY(array_class == NULL)) {
+ if (UNLIKELY(array_class == nullptr)) {
CHECK(self->IsExceptionPending());
return;
}
@@ -925,7 +1049,7 @@
result->SetL(new_array);
}
-static void UnstartedJNIThrowableNativeFillInStackTrace(Thread* self,
+void UnstartedRuntime::UnstartedJNIThrowableNativeFillInStackTrace(Thread* self,
mirror::ArtMethod* method ATTRIBUTE_UNUSED,
mirror::Object* receiver ATTRIBUTE_UNUSED,
uint32_t* args ATTRIBUTE_UNUSED,
@@ -939,7 +1063,7 @@
}
}
-static void UnstartedJNISystemIdentityHashCode(Thread* self ATTRIBUTE_UNUSED,
+void UnstartedRuntime::UnstartedJNISystemIdentityHashCode(Thread* self ATTRIBUTE_UNUSED,
mirror::ArtMethod* method ATTRIBUTE_UNUSED,
mirror::Object* receiver ATTRIBUTE_UNUSED,
uint32_t* args,
@@ -949,7 +1073,7 @@
result->SetI((obj != nullptr) ? obj->IdentityHashCode() : 0);
}
-static void UnstartedJNIByteOrderIsLittleEndian(Thread* self ATTRIBUTE_UNUSED,
+void UnstartedRuntime::UnstartedJNIByteOrderIsLittleEndian(Thread* self ATTRIBUTE_UNUSED,
mirror::ArtMethod* method ATTRIBUTE_UNUSED,
mirror::Object* receiver ATTRIBUTE_UNUSED,
uint32_t* args ATTRIBUTE_UNUSED,
@@ -957,7 +1081,7 @@
result->SetZ(JNI_TRUE);
}
-static void UnstartedJNIUnsafeCompareAndSwapInt(Thread* self ATTRIBUTE_UNUSED,
+void UnstartedRuntime::UnstartedJNIUnsafeCompareAndSwapInt(Thread* self ATTRIBUTE_UNUSED,
mirror::ArtMethod* method ATTRIBUTE_UNUSED,
mirror::Object* receiver ATTRIBUTE_UNUSED,
uint32_t* args,
@@ -978,7 +1102,7 @@
result->SetZ(success ? JNI_TRUE : JNI_FALSE);
}
-static void UnstartedJNIUnsafePutObject(Thread* self ATTRIBUTE_UNUSED,
+void UnstartedRuntime::UnstartedJNIUnsafePutObject(Thread* self ATTRIBUTE_UNUSED,
mirror::ArtMethod* method ATTRIBUTE_UNUSED,
mirror::Object* receiver ATTRIBUTE_UNUSED,
uint32_t* args,
@@ -994,7 +1118,7 @@
}
}
-static void UnstartedJNIUnsafeGetArrayBaseOffsetForComponentType(
+void UnstartedRuntime::UnstartedJNIUnsafeGetArrayBaseOffsetForComponentType(
Thread* self ATTRIBUTE_UNUSED,
mirror::ArtMethod* method ATTRIBUTE_UNUSED,
mirror::Object* receiver ATTRIBUTE_UNUSED,
@@ -1006,7 +1130,7 @@
result->SetI(mirror::Array::DataOffset(Primitive::ComponentSize(primitive_type)).Int32Value());
}
-static void UnstartedJNIUnsafeGetArrayIndexScaleForComponentType(
+void UnstartedRuntime::UnstartedJNIUnsafeGetArrayIndexScaleForComponentType(
Thread* self ATTRIBUTE_UNUSED,
mirror::ArtMethod* method ATTRIBUTE_UNUSED,
mirror::Object* receiver ATTRIBUTE_UNUSED,
@@ -1028,133 +1152,37 @@
static std::unordered_map<std::string, InvokeHandler> invoke_handlers_;
static std::unordered_map<std::string, JNIHandler> jni_handlers_;
-static void UnstartedRuntimeInitializeInvokeHandlers() {
- struct InvokeHandlerDef {
- std::string name;
- InvokeHandler function;
- };
-
- InvokeHandlerDef defs[] {
- { "java.lang.Class java.lang.Class.forName(java.lang.String)",
- &UnstartedClassForName },
- { "java.lang.Class java.lang.Class.forName(java.lang.String, boolean, java.lang.ClassLoader)",
- &UnstartedClassForNameLong },
- { "java.lang.Class java.lang.Class.classForName(java.lang.String, boolean, java.lang.ClassLoader)",
- &UnstartedClassClassForName },
- { "java.lang.Class java.lang.VMClassLoader.findLoadedClass(java.lang.ClassLoader, java.lang.String)",
- &UnstartedVmClassLoaderFindLoadedClass },
- { "java.lang.Class java.lang.Void.lookupType()",
- &UnstartedVoidLookupType },
- { "java.lang.Object java.lang.Class.newInstance()",
- &UnstartedClassNewInstance },
- { "java.lang.reflect.Field java.lang.Class.getDeclaredField(java.lang.String)",
- &UnstartedClassGetDeclaredField },
- { "int java.lang.Object.hashCode()",
- &UnstartedObjectHashCode },
- { "java.lang.String java.lang.reflect.ArtMethod.getMethodName(java.lang.reflect.ArtMethod)",
- &UnstartedArtMethodGetMethodName },
- { "void java.lang.System.arraycopy(java.lang.Object, int, java.lang.Object, int, int)",
- &UnstartedSystemArraycopy},
- { "void java.lang.System.arraycopy(char[], int, char[], int, int)",
- &UnstartedSystemArraycopy },
- { "void java.lang.System.arraycopy(int[], int, int[], int, int)",
- &UnstartedSystemArraycopy },
- { "long java.lang.Double.doubleToRawLongBits(double)",
- &UnstartedDoubleDoubleToRawLongBits },
- { "double java.lang.Math.ceil(double)",
- &UnstartedMathCeil },
- { "java.lang.Object java.lang.ThreadLocal.get()",
- &UnstartedThreadLocalGet },
- { "com.android.dex.Dex java.lang.DexCache.getDexNative()",
- &UnstartedDexCacheGetDexNative },
- { "byte libcore.io.Memory.peekByte(long)",
- &UnstartedMemoryPeekEntry },
- { "short libcore.io.Memory.peekShortNative(long)",
- &UnstartedMemoryPeekEntry },
- { "int libcore.io.Memory.peekIntNative(long)",
- &UnstartedMemoryPeekEntry },
- { "long libcore.io.Memory.peekLongNative(long)",
- &UnstartedMemoryPeekEntry },
- { "void libcore.io.Memory.peekByteArray(long, byte[], int, int)",
- &UnstartedMemoryPeekArrayEntry },
- { "java.io.Reader java.security.Security.getSecurityPropertiesReader()",
- &UnstartedSecurityGetSecurityPropertiesReader },
- };
-
- for (auto& def : defs) {
- invoke_handlers_.insert(std::make_pair(def.name, def.function));
- }
+void UnstartedRuntime::InitializeInvokeHandlers() {
+#define UNSTARTED_DIRECT(ShortName, Sig) \
+ invoke_handlers_.insert(std::make_pair(Sig, & UnstartedRuntime::Unstarted ## ShortName));
+#include "unstarted_runtime_list.h"
+ UNSTARTED_RUNTIME_DIRECT_LIST(UNSTARTED_DIRECT)
+#undef UNSTARTED_RUNTIME_DIRECT_LIST
+#undef UNSTARTED_RUNTIME_JNI_LIST
+#undef UNSTARTED_DIRECT
}
-static void UnstartedRuntimeInitializeJNIHandlers() {
- struct JNIHandlerDef {
- std::string name;
- JNIHandler function;
- };
-
- JNIHandlerDef defs[] {
- { "java.lang.Object dalvik.system.VMRuntime.newUnpaddedArray(java.lang.Class, int)",
- &UnstartedJNIVMRuntimeNewUnpaddedArray },
- { "java.lang.ClassLoader dalvik.system.VMStack.getCallingClassLoader()",
- &UnstartedJNIVMStackGetCallingClassLoader },
- { "java.lang.Class dalvik.system.VMStack.getStackClass2()",
- &UnstartedJNIVMStackGetStackClass2 },
- { "double java.lang.Math.log(double)",
- &UnstartedJNIMathLog },
- { "java.lang.String java.lang.Class.getNameNative()",
- &UnstartedJNIClassGetNameNative },
- { "int java.lang.Float.floatToRawIntBits(float)",
- &UnstartedJNIFloatFloatToRawIntBits },
- { "float java.lang.Float.intBitsToFloat(int)",
- &UnstartedJNIFloatIntBitsToFloat },
- { "double java.lang.Math.exp(double)",
- &UnstartedJNIMathExp },
- { "java.lang.Object java.lang.Object.internalClone()",
- &UnstartedJNIObjectInternalClone },
- { "void java.lang.Object.notifyAll()",
- &UnstartedJNIObjectNotifyAll},
- { "int java.lang.String.compareTo(java.lang.String)",
- &UnstartedJNIStringCompareTo },
- { "java.lang.String java.lang.String.intern()",
- &UnstartedJNIStringIntern },
- { "int java.lang.String.fastIndexOf(int, int)",
- &UnstartedJNIStringFastIndexOf },
- { "java.lang.Object java.lang.reflect.Array.createMultiArray(java.lang.Class, int[])",
- &UnstartedJNIArrayCreateMultiArray },
- { "java.lang.Object java.lang.reflect.Array.createObjectArray(java.lang.Class, int)",
- &UnstartedJNIArrayCreateObjectArray },
- { "java.lang.Object java.lang.Throwable.nativeFillInStackTrace()",
- &UnstartedJNIThrowableNativeFillInStackTrace },
- { "int java.lang.System.identityHashCode(java.lang.Object)",
- &UnstartedJNISystemIdentityHashCode },
- { "boolean java.nio.ByteOrder.isLittleEndian()",
- &UnstartedJNIByteOrderIsLittleEndian },
- { "boolean sun.misc.Unsafe.compareAndSwapInt(java.lang.Object, long, int, int)",
- &UnstartedJNIUnsafeCompareAndSwapInt },
- { "void sun.misc.Unsafe.putObject(java.lang.Object, long, java.lang.Object)",
- &UnstartedJNIUnsafePutObject },
- { "int sun.misc.Unsafe.getArrayBaseOffsetForComponentType(java.lang.Class)",
- &UnstartedJNIUnsafeGetArrayBaseOffsetForComponentType },
- { "int sun.misc.Unsafe.getArrayIndexScaleForComponentType(java.lang.Class)",
- &UnstartedJNIUnsafeGetArrayIndexScaleForComponentType },
- };
-
- for (auto& def : defs) {
- jni_handlers_.insert(std::make_pair(def.name, def.function));
- }
+void UnstartedRuntime::InitializeJNIHandlers() {
+#define UNSTARTED_JNI(ShortName, Sig) \
+ jni_handlers_.insert(std::make_pair(Sig, & UnstartedRuntime::UnstartedJNI ## ShortName));
+#include "unstarted_runtime_list.h"
+ UNSTARTED_RUNTIME_JNI_LIST(UNSTARTED_JNI)
+#undef UNSTARTED_RUNTIME_DIRECT_LIST
+#undef UNSTARTED_RUNTIME_JNI_LIST
+#undef UNSTARTED_JNI
}
-void UnstartedRuntimeInitialize() {
+void UnstartedRuntime::Initialize() {
CHECK(!tables_initialized_);
- UnstartedRuntimeInitializeInvokeHandlers();
- UnstartedRuntimeInitializeJNIHandlers();
+ InitializeInvokeHandlers();
+ InitializeJNIHandlers();
tables_initialized_ = true;
}
-void UnstartedRuntimeInvoke(Thread* self, const DexFile::CodeItem* code_item,
- ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) {
+void UnstartedRuntime::Invoke(Thread* self, const DexFile::CodeItem* code_item,
+ ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) {
// In a runtime that's not started we intercept certain methods to avoid complicated dependency
// problems in core libraries.
CHECK(tables_initialized_);
@@ -1162,6 +1190,8 @@
std::string name(PrettyMethod(shadow_frame->GetMethod()));
const auto& iter = invoke_handlers_.find(name);
if (iter != invoke_handlers_.end()) {
+ // Clear out the result in case it's not zeroed out.
+ result->SetL(0);
(*iter->second)(self, shadow_frame, result, arg_offset);
} else {
// Not special, continue with regular interpreter execution.
@@ -1170,11 +1200,13 @@
}
// Hand select a number of methods to be run in a not yet started runtime without using JNI.
-void UnstartedRuntimeJni(Thread* self, mirror::ArtMethod* method, mirror::Object* receiver,
- uint32_t* args, JValue* result) {
+void UnstartedRuntime::Jni(Thread* self, mirror::ArtMethod* method, mirror::Object* receiver,
+ uint32_t* args, JValue* result) {
std::string name(PrettyMethod(method));
const auto& iter = jni_handlers_.find(name);
if (iter != jni_handlers_.end()) {
+ // Clear out the result in case it's not zeroed out.
+ result->SetL(0);
(*iter->second)(self, method, receiver, args, result);
} else if (Runtime::Current()->IsActiveTransaction()) {
AbortTransactionF(self, "Attempt to invoke native method in non-started runtime: %s",
diff --git a/runtime/interpreter/unstarted_runtime.h b/runtime/interpreter/unstarted_runtime.h
index 2d7d380..a361af0 100644
--- a/runtime/interpreter/unstarted_runtime.h
+++ b/runtime/interpreter/unstarted_runtime.h
@@ -36,16 +36,69 @@
namespace interpreter {
-void UnstartedRuntimeInitialize();
+// Support for an unstarted runtime. These are special handwritten implementations for select
+// libcore native and non-native methods so we can compile-time initialize classes in the boot
+// image.
+//
+// While it would technically be OK to only expose the public functions, a class was chosen to
+// wrap this so the actual implementations are exposed for testing. This is also why the private
+// methods are not documented here - they are not intended to be used directly except in
+// testing.
-void UnstartedRuntimeInvoke(Thread* self, const DexFile::CodeItem* code_item,
- ShadowFrame* shadow_frame,
- JValue* result, size_t arg_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+class UnstartedRuntime {
+ public:
+ static void Initialize();
-void UnstartedRuntimeJni(Thread* self, mirror::ArtMethod* method, mirror::Object* receiver,
- uint32_t* args, JValue* result)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static void Invoke(Thread* self,
+ const DexFile::CodeItem* code_item,
+ ShadowFrame* shadow_frame,
+ JValue* result,
+ size_t arg_offset)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ static void Jni(Thread* self,
+ mirror::ArtMethod* method,
+ mirror::Object* receiver,
+ uint32_t* args,
+ JValue* result)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ private:
+ // Methods that intercept available libcore implementations.
+#define UNSTARTED_DIRECT(ShortName, SigIgnored) \
+ static void Unstarted ## ShortName(Thread* self, \
+ ShadowFrame* shadow_frame, \
+ JValue* result, \
+ size_t arg_offset) \
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+#include "unstarted_runtime_list.h"
+ UNSTARTED_RUNTIME_DIRECT_LIST(UNSTARTED_DIRECT)
+#undef UNSTARTED_RUNTIME_DIRECT_LIST
+#undef UNSTARTED_RUNTIME_JNI_LIST
+#undef UNSTARTED_DIRECT
+
+ // Methods that are native.
+#define UNSTARTED_JNI(ShortName, SigIgnored) \
+ static void UnstartedJNI ## ShortName(Thread* self, \
+ mirror::ArtMethod* method, \
+ mirror::Object* receiver, \
+ uint32_t* args, \
+ JValue* result) \
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+#include "unstarted_runtime_list.h"
+ UNSTARTED_RUNTIME_JNI_LIST(UNSTARTED_JNI)
+#undef UNSTARTED_RUNTIME_DIRECT_LIST
+#undef UNSTARTED_RUNTIME_JNI_LIST
+#undef UNSTARTED_JNI
+
+ static void InitializeInvokeHandlers();
+ static void InitializeJNIHandlers();
+
+ friend class UnstartedRuntimeTest;
+
+ DISALLOW_ALLOCATION();
+ DISALLOW_COPY_AND_ASSIGN(UnstartedRuntime);
+};
} // namespace interpreter
} // namespace art
diff --git a/runtime/interpreter/unstarted_runtime_list.h b/runtime/interpreter/unstarted_runtime_list.h
new file mode 100644
index 0000000..8f6014c
--- /dev/null
+++ b/runtime/interpreter/unstarted_runtime_list.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_INTERPRETER_UNSTARTED_RUNTIME_LIST_H_
+#define ART_RUNTIME_INTERPRETER_UNSTARTED_RUNTIME_LIST_H_
+
+// Methods that intercept available libcore implementations.
+#define UNSTARTED_RUNTIME_DIRECT_LIST(V) \
+ V(ClassForName, "java.lang.Class java.lang.Class.forName(java.lang.String)") \
+ V(ClassForNameLong, "java.lang.Class java.lang.Class.forName(java.lang.String, boolean, java.lang.ClassLoader)") \
+ V(ClassClassForName, "java.lang.Class java.lang.Class.classForName(java.lang.String, boolean, java.lang.ClassLoader)") \
+ V(ClassNewInstance, "java.lang.Object java.lang.Class.newInstance()") \
+ V(ClassGetDeclaredField, "java.lang.reflect.Field java.lang.Class.getDeclaredField(java.lang.String)") \
+ V(VmClassLoaderFindLoadedClass, "java.lang.Class java.lang.VMClassLoader.findLoadedClass(java.lang.ClassLoader, java.lang.String)") \
+ V(VoidLookupType, "java.lang.Class java.lang.Void.lookupType()") \
+ V(SystemArraycopy, "void java.lang.System.arraycopy(java.lang.Object, int, java.lang.Object, int, int)") \
+ V(SystemArraycopyChar, "void java.lang.System.arraycopy(char[], int, char[], int, int)") \
+ V(SystemArraycopyInt, "void java.lang.System.arraycopy(int[], int, int[], int, int)") \
+ V(ThreadLocalGet, "java.lang.Object java.lang.ThreadLocal.get()") \
+ V(MathCeil, "double java.lang.Math.ceil(double)") \
+ V(ArtMethodGetMethodName, "java.lang.String java.lang.reflect.ArtMethod.getMethodName(java.lang.reflect.ArtMethod)") \
+ V(ObjectHashCode, "int java.lang.Object.hashCode()") \
+ V(DoubleDoubleToRawLongBits, "long java.lang.Double.doubleToRawLongBits(double)") \
+ V(DexCacheGetDexNative, "com.android.dex.Dex java.lang.DexCache.getDexNative()") \
+ V(MemoryPeekByte, "byte libcore.io.Memory.peekByte(long)") \
+ V(MemoryPeekShort, "short libcore.io.Memory.peekShortNative(long)") \
+ V(MemoryPeekInt, "int libcore.io.Memory.peekIntNative(long)") \
+ V(MemoryPeekLong, "long libcore.io.Memory.peekLongNative(long)") \
+ V(MemoryPeekByteArray, "void libcore.io.Memory.peekByteArray(long, byte[], int, int)") \
+ V(SecurityGetSecurityPropertiesReader, "java.io.Reader java.security.Security.getSecurityPropertiesReader()") \
+ V(StringGetCharsNoCheck, "void java.lang.String.getCharsNoCheck(int, int, char[], int)") \
+ V(StringCharAt, "char java.lang.String.charAt(int)") \
+ V(StringSetCharAt, "void java.lang.String.setCharAt(int, char)") \
+ V(StringFactoryNewStringFromChars, "java.lang.String java.lang.StringFactory.newStringFromChars(int, int, char[])") \
+ V(StringFactoryNewStringFromString, "java.lang.String java.lang.StringFactory.newStringFromString(java.lang.String)") \
+ V(StringFastSubstring, "java.lang.String java.lang.String.fastSubstring(int, int)") \
+ V(StringToCharArray, "char[] java.lang.String.toCharArray()")
+
+// Methods that are native.
+#define UNSTARTED_RUNTIME_JNI_LIST(V) \
+ V(VMRuntimeNewUnpaddedArray, "java.lang.Object dalvik.system.VMRuntime.newUnpaddedArray(java.lang.Class, int)") \
+ V(VMStackGetCallingClassLoader, "java.lang.ClassLoader dalvik.system.VMStack.getCallingClassLoader()") \
+ V(VMStackGetStackClass2, "java.lang.Class dalvik.system.VMStack.getStackClass2()") \
+ V(MathLog, "double java.lang.Math.log(double)") \
+ V(MathExp, "double java.lang.Math.exp(double)") \
+ V(ClassGetNameNative, "java.lang.String java.lang.Class.getNameNative()") \
+ V(FloatFloatToRawIntBits, "int java.lang.Float.floatToRawIntBits(float)") \
+ V(FloatIntBitsToFloat, "float java.lang.Float.intBitsToFloat(int)") \
+ V(ObjectInternalClone, "java.lang.Object java.lang.Object.internalClone()") \
+ V(ObjectNotifyAll, "void java.lang.Object.notifyAll()") \
+ V(StringCompareTo, "int java.lang.String.compareTo(java.lang.String)") \
+ V(StringIntern, "java.lang.String java.lang.String.intern()") \
+ V(StringFastIndexOf, "int java.lang.String.fastIndexOf(int, int)") \
+ V(ArrayCreateMultiArray, "java.lang.Object java.lang.reflect.Array.createMultiArray(java.lang.Class, int[])") \
+ V(ArrayCreateObjectArray, "java.lang.Object java.lang.reflect.Array.createObjectArray(java.lang.Class, int)") \
+ V(ThrowableNativeFillInStackTrace, "java.lang.Object java.lang.Throwable.nativeFillInStackTrace()") \
+ V(SystemIdentityHashCode, "int java.lang.System.identityHashCode(java.lang.Object)") \
+ V(ByteOrderIsLittleEndian, "boolean java.nio.ByteOrder.isLittleEndian()") \
+ V(UnsafeCompareAndSwapInt, "boolean sun.misc.Unsafe.compareAndSwapInt(java.lang.Object, long, int, int)") \
+ V(UnsafePutObject, "void sun.misc.Unsafe.putObject(java.lang.Object, long, java.lang.Object)") \
+ V(UnsafeGetArrayBaseOffsetForComponentType, "int sun.misc.Unsafe.getArrayBaseOffsetForComponentType(java.lang.Class)") \
+ V(UnsafeGetArrayIndexScaleForComponentType, "int sun.misc.Unsafe.getArrayIndexScaleForComponentType(java.lang.Class)")
+
+#endif // ART_RUNTIME_INTERPRETER_UNSTARTED_RUNTIME_LIST_H_
+// the guard in this file is just for cpplint
+#undef ART_RUNTIME_INTERPRETER_UNSTARTED_RUNTIME_LIST_H_
diff --git a/runtime/interpreter/unstarted_runtime_test.cc b/runtime/interpreter/unstarted_runtime_test.cc
new file mode 100644
index 0000000..34ab277
--- /dev/null
+++ b/runtime/interpreter/unstarted_runtime_test.cc
@@ -0,0 +1,251 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "unstarted_runtime.h"
+
+#include "class_linker.h"
+#include "common_runtime_test.h"
+#include "handle.h"
+#include "handle_scope-inl.h"
+#include "mirror/class_loader.h"
+#include "mirror/string-inl.h"
+#include "runtime.h"
+#include "scoped_thread_state_change.h"
+#include "thread.h"
+
+namespace art {
+namespace interpreter {
+
+class UnstartedRuntimeTest : public CommonRuntimeTest {
+ protected:
+ // Re-expose all UnstartedRuntime implementations so we don't need to declare a million
+ // test friends.
+
+ // Methods that intercept available libcore implementations.
+#define UNSTARTED_DIRECT(Name, SigIgnored) \
+ static void Unstarted ## Name(Thread* self, \
+ ShadowFrame* shadow_frame, \
+ JValue* result, \
+ size_t arg_offset) \
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
+ interpreter::UnstartedRuntime::Unstarted ## Name(self, shadow_frame, result, arg_offset); \
+ }
+#include "unstarted_runtime_list.h"
+ UNSTARTED_RUNTIME_DIRECT_LIST(UNSTARTED_DIRECT)
+#undef UNSTARTED_RUNTIME_DIRECT_LIST
+#undef UNSTARTED_RUNTIME_JNI_LIST
+#undef UNSTARTED_DIRECT
+
+ // Methods that are native.
+#define UNSTARTED_JNI(Name, SigIgnored) \
+ static void UnstartedJNI ## Name(Thread* self, \
+ mirror::ArtMethod* method, \
+ mirror::Object* receiver, \
+ uint32_t* args, \
+ JValue* result) \
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
+ interpreter::UnstartedRuntime::UnstartedJNI ## Name(self, method, receiver, args, result); \
+ }
+#include "unstarted_runtime_list.h"
+ UNSTARTED_RUNTIME_JNI_LIST(UNSTARTED_JNI)
+#undef UNSTARTED_RUNTIME_DIRECT_LIST
+#undef UNSTARTED_RUNTIME_JNI_LIST
+#undef UNSTARTED_JNI
+};
+
+TEST_F(UnstartedRuntimeTest, MemoryPeekByte) {
+ Thread* self = Thread::Current();
+
+ ScopedObjectAccess soa(self);
+ constexpr const uint8_t base_array[] = "abcdefghijklmnop";
+ constexpr int32_t kBaseLen = sizeof(base_array) / sizeof(uint8_t);
+ const uint8_t* base_ptr = base_array;
+
+ JValue result;
+ ShadowFrame* tmp = ShadowFrame::CreateDeoptimizedFrame(10, nullptr, nullptr, 0);
+
+ for (int32_t i = 0; i < kBaseLen; ++i) {
+ tmp->SetVRegLong(0, static_cast<int64_t>(reinterpret_cast<intptr_t>(base_ptr + i)));
+
+ UnstartedMemoryPeekByte(self, tmp, &result, 0);
+
+ EXPECT_EQ(result.GetB(), static_cast<int8_t>(base_array[i]));
+ }
+
+ ShadowFrame::DeleteDeoptimizedFrame(tmp);
+}
+
+TEST_F(UnstartedRuntimeTest, MemoryPeekShort) {
+ Thread* self = Thread::Current();
+
+ ScopedObjectAccess soa(self);
+ constexpr const uint8_t base_array[] = "abcdefghijklmnop";
+ constexpr int32_t kBaseLen = sizeof(base_array) / sizeof(uint8_t);
+ const uint8_t* base_ptr = base_array;
+
+ JValue result;
+ ShadowFrame* tmp = ShadowFrame::CreateDeoptimizedFrame(10, nullptr, nullptr, 0);
+
+ int32_t adjusted_length = kBaseLen - sizeof(int16_t);
+ for (int32_t i = 0; i < adjusted_length; ++i) {
+ tmp->SetVRegLong(0, static_cast<int64_t>(reinterpret_cast<intptr_t>(base_ptr + i)));
+
+ UnstartedMemoryPeekShort(self, tmp, &result, 0);
+
+ typedef int16_t unaligned_short __attribute__ ((aligned (1)));
+ const unaligned_short* short_ptr = reinterpret_cast<const unaligned_short*>(base_ptr + i);
+ EXPECT_EQ(result.GetS(), *short_ptr);
+ }
+
+ ShadowFrame::DeleteDeoptimizedFrame(tmp);
+}
+
+TEST_F(UnstartedRuntimeTest, MemoryPeekInt) {
+ Thread* self = Thread::Current();
+
+ ScopedObjectAccess soa(self);
+ constexpr const uint8_t base_array[] = "abcdefghijklmnop";
+ constexpr int32_t kBaseLen = sizeof(base_array) / sizeof(uint8_t);
+ const uint8_t* base_ptr = base_array;
+
+ JValue result;
+ ShadowFrame* tmp = ShadowFrame::CreateDeoptimizedFrame(10, nullptr, nullptr, 0);
+
+ int32_t adjusted_length = kBaseLen - sizeof(int32_t);
+ for (int32_t i = 0; i < adjusted_length; ++i) {
+ tmp->SetVRegLong(0, static_cast<int64_t>(reinterpret_cast<intptr_t>(base_ptr + i)));
+
+ UnstartedMemoryPeekInt(self, tmp, &result, 0);
+
+ typedef int32_t unaligned_int __attribute__ ((aligned (1)));
+ const unaligned_int* int_ptr = reinterpret_cast<const unaligned_int*>(base_ptr + i);
+ EXPECT_EQ(result.GetI(), *int_ptr);
+ }
+
+ ShadowFrame::DeleteDeoptimizedFrame(tmp);
+}
+
+TEST_F(UnstartedRuntimeTest, MemoryPeekLong) {
+ Thread* self = Thread::Current();
+
+ ScopedObjectAccess soa(self);
+ constexpr const uint8_t base_array[] = "abcdefghijklmnop";
+ constexpr int32_t kBaseLen = sizeof(base_array) / sizeof(uint8_t);
+ const uint8_t* base_ptr = base_array;
+
+ JValue result;
+ ShadowFrame* tmp = ShadowFrame::CreateDeoptimizedFrame(10, nullptr, nullptr, 0);
+
+ int32_t adjusted_length = kBaseLen - sizeof(int64_t);
+ for (int32_t i = 0; i < adjusted_length; ++i) {
+ tmp->SetVRegLong(0, static_cast<int64_t>(reinterpret_cast<intptr_t>(base_ptr + i)));
+
+ UnstartedMemoryPeekLong(self, tmp, &result, 0);
+
+ typedef int64_t unaligned_long __attribute__ ((aligned (1)));
+ const unaligned_long* long_ptr = reinterpret_cast<const unaligned_long*>(base_ptr + i);
+ EXPECT_EQ(result.GetJ(), *long_ptr);
+ }
+
+ ShadowFrame::DeleteDeoptimizedFrame(tmp);
+}
+
+TEST_F(UnstartedRuntimeTest, StringGetCharsNoCheck) {
+ Thread* self = Thread::Current();
+
+ ScopedObjectAccess soa(self);
+ StackHandleScope<2> hs(self);
+ // TODO: Actual UTF.
+ constexpr const char base_string[] = "abcdefghijklmnop";
+ Handle<mirror::String> h_test_string(hs.NewHandle(
+ mirror::String::AllocFromModifiedUtf8(self, base_string)));
+ constexpr int32_t kBaseLen = sizeof(base_string) / sizeof(char) - 1;
+ Handle<mirror::CharArray> h_char_array(hs.NewHandle(
+ mirror::CharArray::Alloc(self, kBaseLen)));
+ // A buffer so we can make sure we only modify the elements targetted.
+ uint16_t buf[kBaseLen];
+
+ JValue result;
+ ShadowFrame* tmp = ShadowFrame::CreateDeoptimizedFrame(10, nullptr, nullptr, 0);
+
+ for (int32_t start_index = 0; start_index < kBaseLen; ++start_index) {
+ for (int32_t count = 0; count <= kBaseLen; ++count) {
+ for (int32_t trg_offset = 0; trg_offset < kBaseLen; ++trg_offset) {
+ // Only do it when in bounds.
+ if (start_index + count <= kBaseLen && trg_offset + count <= kBaseLen) {
+ tmp->SetVRegReference(0, h_test_string.Get());
+ tmp->SetVReg(1, start_index);
+ tmp->SetVReg(2, count);
+ tmp->SetVRegReference(3, h_char_array.Get());
+ tmp->SetVReg(3, trg_offset);
+
+ // Copy the char_array into buf.
+ memcpy(buf, h_char_array->GetData(), kBaseLen * sizeof(uint16_t));
+
+ UnstartedStringCharAt(self, tmp, &result, 0);
+
+ uint16_t* data = h_char_array->GetData();
+
+ bool success = true;
+
+ // First segment should be unchanged.
+ for (int32_t i = 0; i < trg_offset; ++i) {
+ success = success && (data[i] == buf[i]);
+ }
+ // Second segment should be a copy.
+ for (int32_t i = trg_offset; i < trg_offset + count; ++i) {
+ success = success && (data[i] == buf[i - trg_offset + start_index]);
+ }
+ // Third segment should be unchanged.
+ for (int32_t i = trg_offset + count; i < kBaseLen; ++i) {
+ success = success && (data[i] == buf[i]);
+ }
+
+ EXPECT_TRUE(success);
+ }
+ }
+ }
+ }
+
+ ShadowFrame::DeleteDeoptimizedFrame(tmp);
+}
+
+TEST_F(UnstartedRuntimeTest, StringCharAt) {
+ Thread* self = Thread::Current();
+
+ ScopedObjectAccess soa(self);
+ // TODO: Actual UTF.
+ constexpr const char* base_string = "abcdefghijklmnop";
+ int32_t base_len = static_cast<int32_t>(strlen(base_string));
+ mirror::String* test_string = mirror::String::AllocFromModifiedUtf8(self, base_string);
+
+ JValue result;
+ ShadowFrame* tmp = ShadowFrame::CreateDeoptimizedFrame(10, nullptr, nullptr, 0);
+
+ for (int32_t i = 0; i < base_len; ++i) {
+ tmp->SetVRegReference(0, test_string);
+ tmp->SetVReg(1, i);
+
+ UnstartedStringCharAt(self, tmp, &result, 0);
+
+ EXPECT_EQ(result.GetI(), base_string[i]);
+ }
+
+ ShadowFrame::DeleteDeoptimizedFrame(tmp);
+}
+
+} // namespace interpreter
+} // namespace art
diff --git a/runtime/jdwp/jdwp.h b/runtime/jdwp/jdwp.h
index 8dffee6..55441c9 100644
--- a/runtime/jdwp/jdwp.h
+++ b/runtime/jdwp/jdwp.h
@@ -127,7 +127,7 @@
* Among other things, this binds to a port to listen for a connection from
* the debugger.
*
- * Returns a newly-allocated JdwpState struct on success, or NULL on failure.
+ * Returns a newly-allocated JdwpState struct on success, or nullptr on failure.
*/
static JdwpState* Create(const JdwpOptions* options)
LOCKS_EXCLUDED(Locks::mutator_lock_);
diff --git a/runtime/jdwp/jdwp_event.cc b/runtime/jdwp/jdwp_event.cc
index 1ec800f..ff75268 100644
--- a/runtime/jdwp/jdwp_event.cc
+++ b/runtime/jdwp/jdwp_event.cc
@@ -32,6 +32,8 @@
#include "scoped_thread_state_change.h"
#include "thread-inl.h"
+#include "handle_scope-inl.h"
+
/*
General notes:
@@ -108,20 +110,32 @@
* Stuff to compare against when deciding if a mod matches. Only the
* values for mods valid for the event being evaluated will be filled in.
* The rest will be zeroed.
+ * Must be allocated on the stack only. This is enforced by removing the
+ * operator new.
*/
struct ModBasket {
- ModBasket() : pLoc(nullptr), thread(nullptr), locationClass(nullptr), exceptionClass(nullptr),
- caught(false), field(nullptr), thisPtr(nullptr) { }
+ explicit ModBasket(Thread* self)
+ : hs(self), pLoc(nullptr), thread(self),
+ locationClass(hs.NewHandle<mirror::Class>(nullptr)),
+ exceptionClass(hs.NewHandle<mirror::Class>(nullptr)),
+ caught(false),
+ field(nullptr),
+ thisPtr(hs.NewHandle<mirror::Object>(nullptr)) { }
- const EventLocation* pLoc; /* LocationOnly */
- std::string className; /* ClassMatch/ClassExclude */
- Thread* thread; /* ThreadOnly */
- mirror::Class* locationClass; /* ClassOnly */
- mirror::Class* exceptionClass; /* ExceptionOnly */
- bool caught; /* ExceptionOnly */
- ArtField* field; /* FieldOnly */
- mirror::Object* thisPtr; /* InstanceOnly */
+ StackHandleScope<3> hs;
+ const EventLocation* pLoc; /* LocationOnly */
+ std::string className; /* ClassMatch/ClassExclude */
+ Thread* const thread; /* ThreadOnly */
+ MutableHandle<mirror::Class> locationClass; /* ClassOnly */
+ MutableHandle<mirror::Class> exceptionClass; /* ExceptionOnly */
+ bool caught; /* ExceptionOnly */
+ ArtField* field; /* FieldOnly */
+ MutableHandle<mirror::Object> thisPtr; /* InstanceOnly */
/* nothing for StepOnly -- handled differently */
+
+ private:
+ DISALLOW_ALLOCATION(); // forbids allocation on the heap.
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ModBasket);
};
static bool NeedsFullDeoptimization(JdwpEventKind eventKind) {
@@ -141,6 +155,8 @@
}
}
+// Returns the instrumentation event the DebugInstrumentationListener must
+// listen to in order to properly report the given JDWP event to the debugger.
static uint32_t GetInstrumentationEventFor(JdwpEventKind eventKind) {
switch (eventKind) {
case EK_BREAKPOINT:
@@ -455,7 +471,7 @@
}
break;
case MK_CLASS_ONLY:
- if (!Dbg::MatchType(basket.locationClass, pMod->classOnly.refTypeId)) {
+ if (!Dbg::MatchType(basket.locationClass.Get(), pMod->classOnly.refTypeId)) {
return false;
}
break;
@@ -476,7 +492,7 @@
break;
case MK_EXCEPTION_ONLY:
if (pMod->exceptionOnly.refTypeId != 0 &&
- !Dbg::MatchType(basket.exceptionClass, pMod->exceptionOnly.refTypeId)) {
+ !Dbg::MatchType(basket.exceptionClass.Get(), pMod->exceptionOnly.refTypeId)) {
return false;
}
if ((basket.caught && !pMod->exceptionOnly.caught) ||
@@ -495,7 +511,7 @@
}
break;
case MK_INSTANCE_ONLY:
- if (!Dbg::MatchInstance(pMod->instanceOnly.objectId, basket.thisPtr)) {
+ if (!Dbg::MatchInstance(pMod->instanceOnly.objectId, basket.thisPtr.Get())) {
return false;
}
break;
@@ -823,12 +839,11 @@
DCHECK(pLoc->method != nullptr);
DCHECK_EQ(pLoc->method->IsStatic(), thisPtr == nullptr);
- ModBasket basket;
+ ModBasket basket(Thread::Current());
basket.pLoc = pLoc;
- basket.locationClass = pLoc->method->GetDeclaringClass();
- basket.thisPtr = thisPtr;
- basket.thread = Thread::Current();
- basket.className = Dbg::GetClassName(basket.locationClass);
+ basket.locationClass.Assign(pLoc->method->GetDeclaringClass());
+ basket.thisPtr.Assign(thisPtr);
+ basket.className = Dbg::GetClassName(basket.locationClass.Get());
/*
* On rare occasions we may need to execute interpreted code in the VM
@@ -922,16 +937,15 @@
DCHECK_EQ(fieldValue != nullptr, is_modification);
DCHECK_EQ(field->IsStatic(), this_object == nullptr);
- ModBasket basket;
+ ModBasket basket(Thread::Current());
basket.pLoc = pLoc;
- basket.locationClass = pLoc->method->GetDeclaringClass();
- basket.thisPtr = this_object;
- basket.thread = Thread::Current();
- basket.className = Dbg::GetClassName(basket.locationClass);
+ basket.locationClass.Assign(pLoc->method->GetDeclaringClass());
+ basket.thisPtr.Assign(this_object);
+ basket.className = Dbg::GetClassName(basket.locationClass.Get());
basket.field = field;
if (InvokeInProgress()) {
- VLOG(jdwp) << "Not posting field event during invoke";
+ VLOG(jdwp) << "Not posting field event during invoke (" << basket.className << ")";
return;
}
@@ -973,7 +987,7 @@
uint8_t tag;
{
ScopedObjectAccessUnchecked soa(Thread::Current());
- tag = Dbg::TagFromObject(soa, basket.thisPtr);
+ tag = Dbg::TagFromObject(soa, basket.thisPtr.Get());
}
for (const JdwpEvent* pEvent : match_list) {
@@ -1026,8 +1040,7 @@
return;
}
- ModBasket basket;
- basket.thread = thread;
+ ModBasket basket(thread);
std::vector<JdwpEvent*> match_list;
const JdwpEventKind match_kind = (start) ? EK_THREAD_START : EK_THREAD_DEATH;
@@ -1104,18 +1117,15 @@
VLOG(jdwp) << "Unexpected: exception event with empty throw location";
}
- ModBasket basket;
+ ModBasket basket(Thread::Current());
basket.pLoc = pThrowLoc;
if (pThrowLoc->method != nullptr) {
- basket.locationClass = pThrowLoc->method->GetDeclaringClass();
- } else {
- basket.locationClass = nullptr;
+ basket.locationClass.Assign(pThrowLoc->method->GetDeclaringClass());
}
- basket.thread = Thread::Current();
- basket.className = Dbg::GetClassName(basket.locationClass);
- basket.exceptionClass = exception_object->GetClass();
+ basket.className = Dbg::GetClassName(basket.locationClass.Get());
+ basket.exceptionClass.Assign(exception_object->GetClass());
basket.caught = (pCatchLoc->method != 0);
- basket.thisPtr = thisPtr;
+ basket.thisPtr.Assign(thisPtr);
/* don't try to post an exception caused by the debugger */
if (InvokeInProgress()) {
@@ -1186,10 +1196,9 @@
void JdwpState::PostClassPrepare(mirror::Class* klass) {
DCHECK(klass != nullptr);
- ModBasket basket;
- basket.locationClass = klass;
- basket.thread = Thread::Current();
- basket.className = Dbg::GetClassName(basket.locationClass);
+ ModBasket basket(Thread::Current());
+ basket.locationClass.Assign(klass);
+ basket.className = Dbg::GetClassName(basket.locationClass.Get());
/* suppress class prep caused by debugger */
if (InvokeInProgress()) {
@@ -1212,7 +1221,7 @@
// debuggers seem to like that. There might be some advantage to honesty,
// since the class may not yet be verified.
int status = JDWP::CS_VERIFIED | JDWP::CS_PREPARED;
- JDWP::JdwpTypeTag tag = Dbg::GetTypeTag(basket.locationClass);
+ JDWP::JdwpTypeTag tag = Dbg::GetTypeTag(basket.locationClass.Get());
std::string temp;
std::string signature(basket.locationClass->GetDescriptor(&temp));
diff --git a/runtime/jdwp/jdwp_expand_buf.cc b/runtime/jdwp/jdwp_expand_buf.cc
index cc85cdd..e492d7e 100644
--- a/runtime/jdwp/jdwp_expand_buf.cc
+++ b/runtime/jdwp/jdwp_expand_buf.cc
@@ -156,7 +156,7 @@
}
/*
- * Add a UTF8 string as a 4-byte length followed by a non-NULL-terminated
+ * Add a UTF8 string as a 4-byte length followed by a non-nullptr-terminated
* string.
*
* Because these strings are coming out of the VM, it's safe to assume that
diff --git a/runtime/jdwp/jdwp_handler.cc b/runtime/jdwp/jdwp_handler.cc
index 2457f14..8e9ab32 100644
--- a/runtime/jdwp/jdwp_handler.cc
+++ b/runtime/jdwp/jdwp_handler.cc
@@ -133,7 +133,7 @@
if (is_constructor) {
// If we invoked a constructor (which actually returns void), return the receiver,
- // unless we threw, in which case we return NULL.
+ // unless we threw, in which case we return null.
resultTag = JT_OBJECT;
resultValue = (exceptObjId == 0) ? object_id : 0;
}
diff --git a/runtime/jdwp/object_registry.cc b/runtime/jdwp/object_registry.cc
index a42a58f..2b28f7d 100644
--- a/runtime/jdwp/object_registry.cc
+++ b/runtime/jdwp/object_registry.cc
@@ -36,17 +36,45 @@
}
JDWP::RefTypeId ObjectRegistry::AddRefType(mirror::Class* c) {
- return InternalAdd(c);
+ return Add(c);
+}
+
+JDWP::RefTypeId ObjectRegistry::AddRefType(Handle<mirror::Class> c_h) {
+ return Add(c_h);
}
JDWP::ObjectId ObjectRegistry::Add(mirror::Object* o) {
- return InternalAdd(o);
-}
-
-JDWP::ObjectId ObjectRegistry::InternalAdd(mirror::Object* o) {
if (o == nullptr) {
return 0;
}
+ Thread* const self = Thread::Current();
+ StackHandleScope<1> hs(self);
+ return InternalAdd(hs.NewHandle(o));
+}
+
+// Template instantiations must be declared below.
+template<class T>
+JDWP::ObjectId ObjectRegistry::Add(Handle<T> obj_h) {
+ if (obj_h.Get() == nullptr) {
+ return 0;
+ }
+ return InternalAdd(obj_h);
+}
+
+// Explicit template instantiation.
+template
+SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::thread_suspend_count_lock_)
+JDWP::ObjectId ObjectRegistry::Add(Handle<mirror::Object> obj_h);
+
+template
+SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::thread_suspend_count_lock_)
+JDWP::ObjectId ObjectRegistry::Add(Handle<mirror::Throwable> obj_h);
+
+template<class T>
+JDWP::ObjectId ObjectRegistry::InternalAdd(Handle<T> obj_h) {
+ CHECK(obj_h.Get() != nullptr);
Thread* const self = Thread::Current();
self->AssertNoPendingException();
@@ -55,9 +83,6 @@
Locks::thread_list_lock_->AssertNotHeld(self);
Locks::thread_suspend_count_lock_->AssertNotHeld(self);
- StackHandleScope<1> hs(self);
- Handle<mirror::Object> obj_h(hs.NewHandle(o));
-
// Call IdentityHashCode here to avoid a lock level violation between lock_ and monitor_lock.
int32_t identity_hash_code = obj_h->IdentityHashCode();
diff --git a/runtime/jdwp/object_registry.h b/runtime/jdwp/object_registry.h
index 27a4e55..4c149cd 100644
--- a/runtime/jdwp/object_registry.h
+++ b/runtime/jdwp/object_registry.h
@@ -23,6 +23,7 @@
#include <map>
#include "base/casts.h"
+#include "handle.h"
#include "jdwp/jdwp.h"
#include "safe_map.h"
@@ -65,11 +66,23 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
LOCKS_EXCLUDED(Locks::thread_list_lock_,
Locks::thread_suspend_count_lock_);
+
JDWP::RefTypeId AddRefType(mirror::Class* c)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
LOCKS_EXCLUDED(Locks::thread_list_lock_,
Locks::thread_suspend_count_lock_);
+ template<class T>
+ JDWP::ObjectId Add(Handle<T> obj_h)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ LOCKS_EXCLUDED(Locks::thread_list_lock_,
+ Locks::thread_suspend_count_lock_);
+
+ JDWP::RefTypeId AddRefType(Handle<mirror::Class> c_h)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ LOCKS_EXCLUDED(Locks::thread_list_lock_,
+ Locks::thread_suspend_count_lock_);
+
template<typename T> T Get(JDWP::ObjectId id, JDWP::JdwpError* error)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (id == 0) {
@@ -98,7 +111,8 @@
jobject GetJObject(JDWP::ObjectId id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
private:
- JDWP::ObjectId InternalAdd(mirror::Object* o)
+ template<class T>
+ JDWP::ObjectId InternalAdd(Handle<T> obj_h)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
LOCKS_EXCLUDED(lock_,
Locks::thread_list_lock_,
diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h
index 3e80aef..c698cfc 100644
--- a/runtime/jit/jit.h
+++ b/runtime/jit/jit.h
@@ -19,8 +19,6 @@
#include <unordered_map>
-#include "instrumentation.h"
-
#include "atomic.h"
#include "base/macros.h"
#include "base/mutex.h"
@@ -86,6 +84,8 @@
std::unique_ptr<jit::JitInstrumentationCache> instrumentation_cache_;
std::unique_ptr<jit::JitCodeCache> code_cache_;
CompilerCallbacks* compiler_callbacks_; // Owned by the jit compiler.
+
+ DISALLOW_COPY_AND_ASSIGN(Jit);
};
class JitOptions {
@@ -114,8 +114,9 @@
bool dump_info_on_shutdown_;
JitOptions() : use_jit_(false), code_cache_capacity_(0), compile_threshold_(0),
- dump_info_on_shutdown_(false) {
- }
+ dump_info_on_shutdown_(false) { }
+
+ DISALLOW_COPY_AND_ASSIGN(JitOptions);
};
} // namespace jit
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index 8a20e39..8b76647 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -86,10 +86,10 @@
// Return true if the code cache contains a code ptr.
bool ContainsCodePtr(const void* ptr) const;
- // Reserve a region of code of size at least "size". Returns nullptr if there is no more room.
+ // Reserve a region of code of size at least "size". Returns null if there is no more room.
uint8_t* ReserveCode(Thread* self, size_t size) LOCKS_EXCLUDED(lock_);
- // Add a data array of size (end - begin) with the associated contents, returns nullptr if there
+ // Add a data array of size (end - begin) with the associated contents, returns null if there
// is no more room.
uint8_t* AddDataArray(Thread* self, const uint8_t* begin, const uint8_t* end)
LOCKS_EXCLUDED(lock_);
@@ -130,7 +130,7 @@
// required since we have to implement ClassLinker::GetQuickOatCodeFor for walking stacks.
SafeMap<mirror::ArtMethod*, const void*> method_code_map_ GUARDED_BY(lock_);
- DISALLOW_COPY_AND_ASSIGN(JitCodeCache);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JitCodeCache);
};
diff --git a/runtime/jit/jit_instrumentation.cc b/runtime/jit/jit_instrumentation.cc
index 160e678..3232674 100644
--- a/runtime/jit/jit_instrumentation.cc
+++ b/runtime/jit/jit_instrumentation.cc
@@ -47,6 +47,8 @@
private:
mirror::ArtMethod* const method_;
JitInstrumentationCache* const cache_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JitCompileTask);
};
JitInstrumentationCache::JitInstrumentationCache(size_t hot_method_threshold)
@@ -75,7 +77,7 @@
ScopedObjectAccessUnchecked soa(self);
// Since we don't have on-stack replacement, some methods can remain in the interpreter longer
// than we want resulting in samples even after the method is compiled.
- if (method->IsClassInitializer() ||
+ if (method->IsClassInitializer() || method->IsNative() ||
Runtime::Current()->GetJit()->GetCodeCache()->ContainsMethod(method)) {
return;
}
diff --git a/runtime/jit/jit_instrumentation.h b/runtime/jit/jit_instrumentation.h
index 9d5d74f..72acaef 100644
--- a/runtime/jit/jit_instrumentation.h
+++ b/runtime/jit/jit_instrumentation.h
@@ -58,6 +58,8 @@
std::unordered_map<jmethodID, size_t> samples_;
size_t hot_method_threshold_;
std::unique_ptr<ThreadPool> thread_pool_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JitInstrumentationCache);
};
class JitInstrumentationListener : public instrumentation::InstrumentationListener {
@@ -97,6 +99,8 @@
private:
JitInstrumentationCache* const instrumentation_cache_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JitInstrumentationListener);
};
} // namespace jit
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index 8a5461b..f435467 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -42,6 +42,7 @@
#include "mirror/class-inl.h"
#include "mirror/class_loader.h"
#include "mirror/field-inl.h"
+#include "mirror/method.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
#include "mirror/string-inl.h"
@@ -310,6 +311,30 @@
return; \
}
+template <bool kNative>
+static mirror::ArtMethod* FindMethod(mirror::Class* c,
+ const StringPiece& name,
+ const StringPiece& sig)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ for (size_t i = 0; i < c->NumDirectMethods(); ++i) {
+ mirror::ArtMethod* method = c->GetDirectMethod(i);
+ if (kNative == method->IsNative() &&
+ name == method->GetName() && method->GetSignature() == sig) {
+ return method;
+ }
+ }
+
+ for (size_t i = 0; i < c->NumVirtualMethods(); ++i) {
+ mirror::ArtMethod* method = c->GetVirtualMethod(i);
+ if (kNative == method->IsNative() &&
+ name == method->GetName() && method->GetSignature() == sig) {
+ return method;
+ }
+ }
+
+ return nullptr;
+}
+
class JNI {
public:
static jint GetVersion(JNIEnv*) {
@@ -349,7 +374,7 @@
ScopedObjectAccess soa(env);
mirror::Object* obj_field = soa.Decode<mirror::Object*>(jlr_field);
if (obj_field->GetClass() != mirror::Field::StaticClass()) {
- // Not even a java.lang.reflect.Field, return nullptr.
+ // Not even a java.lang.reflect.Field, return null. TODO, is this check necessary?
return nullptr;
}
auto* field = static_cast<mirror::Field*>(obj_field);
@@ -361,19 +386,13 @@
ScopedObjectAccess soa(env);
mirror::ArtMethod* m = soa.DecodeMethod(mid);
CHECK(!kMovingMethods);
- ScopedLocalRef<jobject> art_method(env, soa.AddLocalReference<jobject>(m));
- jobject reflect_method;
+ mirror::AbstractMethod* method;
if (m->IsConstructor()) {
- reflect_method = env->AllocObject(WellKnownClasses::java_lang_reflect_Constructor);
+ method = mirror::Constructor::CreateFromArtMethod(soa.Self(), m);
} else {
- reflect_method = env->AllocObject(WellKnownClasses::java_lang_reflect_Method);
+ method = mirror::Method::CreateFromArtMethod(soa.Self(), m);
}
- if (env->ExceptionCheck()) {
- return nullptr;
- }
- SetObjectField(env, reflect_method,
- WellKnownClasses::java_lang_reflect_AbstractMethod_artMethod, art_method.get());
- return reflect_method;
+ return soa.AddLocalReference<jobject>(method);
}
static jobject ToReflectedField(JNIEnv* env, jclass, jfieldID fid, jboolean) {
@@ -578,6 +597,12 @@
if (c == nullptr) {
return nullptr;
}
+ if (c->IsStringClass()) {
+ gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
+ mirror::SetStringCountVisitor visitor(0);
+ return soa.AddLocalReference<jobject>(mirror::String::Alloc<true>(soa.Self(), 0,
+ allocator_type, visitor));
+ }
return soa.AddLocalReference<jobject>(c->AllocObject(soa.Self()));
}
@@ -599,6 +624,11 @@
if (c == nullptr) {
return nullptr;
}
+ if (c->IsStringClass()) {
+ // Replace calls to String.<init> with equivalent StringFactory call.
+ jmethodID sf_mid = WellKnownClasses::StringInitToStringFactoryMethodID(mid);
+ return CallStaticObjectMethodV(env, WellKnownClasses::java_lang_StringFactory, sf_mid, args);
+ }
mirror::Object* result = c->AllocObject(soa.Self());
if (result == nullptr) {
return nullptr;
@@ -619,6 +649,11 @@
if (c == nullptr) {
return nullptr;
}
+ if (c->IsStringClass()) {
+ // Replace calls to String.<init> with equivalent StringFactory call.
+ jmethodID sf_mid = WellKnownClasses::StringInitToStringFactoryMethodID(mid);
+ return CallStaticObjectMethodA(env, WellKnownClasses::java_lang_StringFactory, sf_mid, args);
+ }
mirror::Object* result = c->AllocObject(soa.Self());
if (result == nullptr) {
return nullptr;
@@ -671,8 +706,7 @@
CHECK_NON_NULL_ARGUMENT(obj);
CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
- JValue result(InvokeVirtualOrInterfaceWithJValues(soa, soa.Decode<mirror::Object*>(obj), mid,
- args));
+ JValue result(InvokeVirtualOrInterfaceWithJValues(soa, obj, mid, args));
return soa.AddLocalReference<jobject>(result.GetL());
}
@@ -698,8 +732,7 @@
CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(obj);
CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
- return InvokeVirtualOrInterfaceWithJValues(soa, soa.Decode<mirror::Object*>(obj), mid,
- args).GetZ();
+ return InvokeVirtualOrInterfaceWithJValues(soa, obj, mid, args).GetZ();
}
static jbyte CallByteMethod(JNIEnv* env, jobject obj, jmethodID mid, ...) {
@@ -724,8 +757,7 @@
CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(obj);
CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
- return InvokeVirtualOrInterfaceWithJValues(soa, soa.Decode<mirror::Object*>(obj), mid,
- args).GetB();
+ return InvokeVirtualOrInterfaceWithJValues(soa, obj, mid, args).GetB();
}
static jchar CallCharMethod(JNIEnv* env, jobject obj, jmethodID mid, ...) {
@@ -750,8 +782,7 @@
CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(obj);
CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
- return InvokeVirtualOrInterfaceWithJValues(soa, soa.Decode<mirror::Object*>(obj), mid,
- args).GetC();
+ return InvokeVirtualOrInterfaceWithJValues(soa, obj, mid, args).GetC();
}
static jdouble CallDoubleMethod(JNIEnv* env, jobject obj, jmethodID mid, ...) {
@@ -776,8 +807,7 @@
CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(obj);
CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
- return InvokeVirtualOrInterfaceWithJValues(soa, soa.Decode<mirror::Object*>(obj), mid,
- args).GetD();
+ return InvokeVirtualOrInterfaceWithJValues(soa, obj, mid, args).GetD();
}
static jfloat CallFloatMethod(JNIEnv* env, jobject obj, jmethodID mid, ...) {
@@ -802,8 +832,7 @@
CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(obj);
CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
- return InvokeVirtualOrInterfaceWithJValues(soa, soa.Decode<mirror::Object*>(obj), mid,
- args).GetF();
+ return InvokeVirtualOrInterfaceWithJValues(soa, obj, mid, args).GetF();
}
static jint CallIntMethod(JNIEnv* env, jobject obj, jmethodID mid, ...) {
@@ -828,8 +857,7 @@
CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(obj);
CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
- return InvokeVirtualOrInterfaceWithJValues(soa, soa.Decode<mirror::Object*>(obj), mid,
- args).GetI();
+ return InvokeVirtualOrInterfaceWithJValues(soa, obj, mid, args).GetI();
}
static jlong CallLongMethod(JNIEnv* env, jobject obj, jmethodID mid, ...) {
@@ -854,8 +882,7 @@
CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(obj);
CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
- return InvokeVirtualOrInterfaceWithJValues(soa, soa.Decode<mirror::Object*>(obj), mid,
- args).GetJ();
+ return InvokeVirtualOrInterfaceWithJValues(soa, obj, mid, args).GetJ();
}
static jshort CallShortMethod(JNIEnv* env, jobject obj, jmethodID mid, ...) {
@@ -880,8 +907,7 @@
CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(obj);
CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
- return InvokeVirtualOrInterfaceWithJValues(soa, soa.Decode<mirror::Object*>(obj), mid,
- args).GetS();
+ return InvokeVirtualOrInterfaceWithJValues(soa, obj, mid, args).GetS();
}
static void CallVoidMethod(JNIEnv* env, jobject obj, jmethodID mid, ...) {
@@ -905,7 +931,7 @@
CHECK_NON_NULL_ARGUMENT_RETURN_VOID(obj);
CHECK_NON_NULL_ARGUMENT_RETURN_VOID(mid);
ScopedObjectAccess soa(env);
- InvokeVirtualOrInterfaceWithJValues(soa, soa.Decode<mirror::Object*>(obj), mid, args);
+ InvokeVirtualOrInterfaceWithJValues(soa, obj, mid, args);
}
static jobject CallNonvirtualObjectMethod(JNIEnv* env, jobject obj, jclass, jmethodID mid, ...) {
@@ -934,7 +960,7 @@
CHECK_NON_NULL_ARGUMENT(obj);
CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
- JValue result(InvokeWithJValues(soa, soa.Decode<mirror::Object*>(obj), mid, args));
+ JValue result(InvokeWithJValues(soa, obj, mid, args));
return soa.AddLocalReference<jobject>(result.GetL());
}
@@ -963,7 +989,7 @@
CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(obj);
CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
- return InvokeWithJValues(soa, soa.Decode<mirror::Object*>(obj), mid, args).GetZ();
+ return InvokeWithJValues(soa, obj, mid, args).GetZ();
}
static jbyte CallNonvirtualByteMethod(JNIEnv* env, jobject obj, jclass, jmethodID mid, ...) {
@@ -990,7 +1016,7 @@
CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(obj);
CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
- return InvokeWithJValues(soa, soa.Decode<mirror::Object*>(obj), mid, args).GetB();
+ return InvokeWithJValues(soa, obj, mid, args).GetB();
}
static jchar CallNonvirtualCharMethod(JNIEnv* env, jobject obj, jclass, jmethodID mid, ...) {
@@ -1017,7 +1043,7 @@
CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(obj);
CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
- return InvokeWithJValues(soa, soa.Decode<mirror::Object*>(obj), mid, args).GetC();
+ return InvokeWithJValues(soa, obj, mid, args).GetC();
}
static jshort CallNonvirtualShortMethod(JNIEnv* env, jobject obj, jclass, jmethodID mid, ...) {
@@ -1044,7 +1070,7 @@
CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(obj);
CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
- return InvokeWithJValues(soa, soa.Decode<mirror::Object*>(obj), mid, args).GetS();
+ return InvokeWithJValues(soa, obj, mid, args).GetS();
}
static jint CallNonvirtualIntMethod(JNIEnv* env, jobject obj, jclass, jmethodID mid, ...) {
@@ -1071,7 +1097,7 @@
CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(obj);
CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
- return InvokeWithJValues(soa, soa.Decode<mirror::Object*>(obj), mid, args).GetI();
+ return InvokeWithJValues(soa, obj, mid, args).GetI();
}
static jlong CallNonvirtualLongMethod(JNIEnv* env, jobject obj, jclass, jmethodID mid, ...) {
@@ -1098,7 +1124,7 @@
CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(obj);
CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
- return InvokeWithJValues(soa, soa.Decode<mirror::Object*>(obj), mid, args).GetJ();
+ return InvokeWithJValues(soa, obj, mid, args).GetJ();
}
static jfloat CallNonvirtualFloatMethod(JNIEnv* env, jobject obj, jclass, jmethodID mid, ...) {
@@ -1125,7 +1151,7 @@
CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(obj);
CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
- return InvokeWithJValues(soa, soa.Decode<mirror::Object*>(obj), mid, args).GetF();
+ return InvokeWithJValues(soa, obj, mid, args).GetF();
}
static jdouble CallNonvirtualDoubleMethod(JNIEnv* env, jobject obj, jclass, jmethodID mid, ...) {
@@ -1152,7 +1178,7 @@
CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(obj);
CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
- return InvokeWithJValues(soa, soa.Decode<mirror::Object*>(obj), mid, args).GetD();
+ return InvokeWithJValues(soa, obj, mid, args).GetD();
}
static void CallNonvirtualVoidMethod(JNIEnv* env, jobject obj, jclass, jmethodID mid, ...) {
@@ -1178,7 +1204,7 @@
CHECK_NON_NULL_ARGUMENT_RETURN_VOID(obj);
CHECK_NON_NULL_ARGUMENT_RETURN_VOID(mid);
ScopedObjectAccess soa(env);
- InvokeWithJValues(soa, soa.Decode<mirror::Object*>(obj), mid, args);
+ InvokeWithJValues(soa, obj, mid, args);
}
static jfieldID GetFieldID(JNIEnv* env, jclass java_class, const char* name, const char* sig) {
@@ -1654,7 +1680,7 @@
ThrowSIOOBE(soa, start, length, s->GetLength());
} else {
CHECK_NON_NULL_MEMCPY_ARGUMENT(length, buf);
- const jchar* chars = s->GetCharArray()->GetData() + s->GetOffset();
+ const jchar* chars = s->GetValue();
memcpy(buf, chars + start, length * sizeof(jchar));
}
}
@@ -1668,7 +1694,7 @@
ThrowSIOOBE(soa, start, length, s->GetLength());
} else {
CHECK_NON_NULL_MEMCPY_ARGUMENT(length, buf);
- const jchar* chars = s->GetCharArray()->GetData() + s->GetOffset();
+ const jchar* chars = s->GetValue();
ConvertUtf16ToModifiedUtf8(buf, chars + start, length);
}
}
@@ -1677,33 +1703,26 @@
CHECK_NON_NULL_ARGUMENT(java_string);
ScopedObjectAccess soa(env);
mirror::String* s = soa.Decode<mirror::String*>(java_string);
- mirror::CharArray* chars = s->GetCharArray();
gc::Heap* heap = Runtime::Current()->GetHeap();
- if (heap->IsMovableObject(chars)) {
+ if (heap->IsMovableObject(s)) {
+ jchar* chars = new jchar[s->GetLength()];
+ memcpy(chars, s->GetValue(), sizeof(jchar) * s->GetLength());
if (is_copy != nullptr) {
*is_copy = JNI_TRUE;
}
- int32_t char_count = s->GetLength();
- int32_t offset = s->GetOffset();
- jchar* bytes = new jchar[char_count];
- for (int32_t i = 0; i < char_count; i++) {
- bytes[i] = chars->Get(i + offset);
- }
- return bytes;
- } else {
- if (is_copy != nullptr) {
- *is_copy = JNI_FALSE;
- }
- return static_cast<jchar*>(chars->GetData() + s->GetOffset());
+ return chars;
}
+ if (is_copy != nullptr) {
+ *is_copy = JNI_FALSE;
+ }
+ return static_cast<jchar*>(s->GetValue());
}
static void ReleaseStringChars(JNIEnv* env, jstring java_string, const jchar* chars) {
CHECK_NON_NULL_ARGUMENT_RETURN_VOID(java_string);
ScopedObjectAccess soa(env);
mirror::String* s = soa.Decode<mirror::String*>(java_string);
- mirror::CharArray* s_chars = s->GetCharArray();
- if (chars != (s_chars->GetData() + s->GetOffset())) {
+ if (chars != s->GetValue()) {
delete[] chars;
}
}
@@ -1712,18 +1731,16 @@
CHECK_NON_NULL_ARGUMENT(java_string);
ScopedObjectAccess soa(env);
mirror::String* s = soa.Decode<mirror::String*>(java_string);
- mirror::CharArray* chars = s->GetCharArray();
- int32_t offset = s->GetOffset();
gc::Heap* heap = Runtime::Current()->GetHeap();
- if (heap->IsMovableObject(chars)) {
+ if (heap->IsMovableObject(s)) {
StackHandleScope<1> hs(soa.Self());
- HandleWrapper<mirror::CharArray> h(hs.NewHandleWrapper(&chars));
+ HandleWrapper<mirror::String> h(hs.NewHandleWrapper(&s));
heap->IncrementDisableMovingGC(soa.Self());
}
if (is_copy != nullptr) {
*is_copy = JNI_FALSE;
}
- return static_cast<jchar*>(chars->GetData() + offset);
+ return static_cast<jchar*>(s->GetValue());
}
static void ReleaseStringCritical(JNIEnv* env, jstring java_string, const jchar* chars) {
@@ -1732,8 +1749,7 @@
ScopedObjectAccess soa(env);
gc::Heap* heap = Runtime::Current()->GetHeap();
mirror::String* s = soa.Decode<mirror::String*>(java_string);
- mirror::CharArray* s_chars = s->GetCharArray();
- if (heap->IsMovableObject(s_chars)) {
+ if (heap->IsMovableObject(s)) {
heap->DecrementDisableMovingGC(soa.Self());
}
}
@@ -1750,7 +1766,7 @@
size_t byte_count = s->GetUtfLength();
char* bytes = new char[byte_count + 1];
CHECK(bytes != nullptr); // bionic aborts anyway.
- const uint16_t* chars = s->GetCharArray()->GetData() + s->GetOffset();
+ const uint16_t* chars = s->GetValue();
ConvertUtf16ToModifiedUtf8(bytes, chars, s->GetLength());
bytes[byte_count] = '\0';
return bytes;
@@ -2098,20 +2114,74 @@
return JNI_ERR;
}
bool is_fast = false;
+ // Notes about fast JNI calls:
+ //
+ // On a normal JNI call, the calling thread usually transitions
+ // from the kRunnable state to the kNative state. But if the
+ // called native function needs to access any Java object, it
+ // will have to transition back to the kRunnable state.
+ //
+ // There is a cost to this double transition. For a JNI call
+ // that should be quick, this cost may dominate the call cost.
+ //
+ // On a fast JNI call, the calling thread avoids this double
+ // transition by not transitioning from kRunnable to kNative and
+ // stays in the kRunnable state.
+ //
+ // There are risks to using a fast JNI call because it can delay
+ // a response to a thread suspension request which is typically
+ // used for a GC root scanning, etc. If a fast JNI call takes a
+ // long time, it could cause longer thread suspension latency
+ // and GC pauses.
+ //
+ // Thus, fast JNI should be used with care. It should be used
+ // for a JNI call that takes a short amount of time (eg. no
+ // long-running loop) and does not block (eg. no locks, I/O,
+ // etc.)
+ //
+ // A '!' prefix in the signature in the JNINativeMethod
+ // indicates that it's a fast JNI call and the runtime omits the
+ // thread state transition from kRunnable to kNative at the
+ // entry.
if (*sig == '!') {
is_fast = true;
++sig;
}
- mirror::ArtMethod* m = c->FindDirectMethod(name, sig);
- if (m == nullptr) {
- m = c->FindVirtualMethod(name, sig);
+ // Note: the right order is to try to find the method locally
+ // first, either as a direct or a virtual method. Then move to
+ // the parent.
+ mirror::ArtMethod* m = nullptr;
+ bool warn_on_going_to_parent = down_cast<JNIEnvExt*>(env)->vm->IsCheckJniEnabled();
+ for (mirror::Class* current_class = c;
+ current_class != nullptr;
+ current_class = current_class->GetSuperClass()) {
+ // Search first only comparing methods which are native.
+ m = FindMethod<true>(current_class, name, sig);
+ if (m != nullptr) {
+ break;
+ }
+
+ // Search again comparing to all methods, to find non-native methods that match.
+ m = FindMethod<false>(current_class, name, sig);
+ if (m != nullptr) {
+ break;
+ }
+
+ if (warn_on_going_to_parent) {
+ LOG(WARNING) << "CheckJNI: method to register \"" << name << "\" not in the given class. "
+ << "This is slow, consider changing your RegisterNatives calls.";
+ warn_on_going_to_parent = false;
+ }
}
+
if (m == nullptr) {
- c->DumpClass(LOG(ERROR), mirror::Class::kDumpClassFullDetail);
- LOG(return_errors ? ERROR : FATAL) << "Failed to register native method "
+ LOG(return_errors ? ERROR : INTERNAL_FATAL) << "Failed to register native method "
<< PrettyDescriptor(c) << "." << name << sig << " in "
<< c->GetDexCache()->GetLocation()->ToModifiedUtf8();
+ // Safe to pass in LOG(FATAL) since the log object aborts in destructor and only goes
+ // out of scope after the DumpClass is done executing.
+ c->DumpClass(LOG(return_errors ? ERROR : FATAL), mirror::Class::kDumpClassFullDetail);
ThrowNoSuchMethodError(soa, c, name, sig, "static or non-static");
return JNI_ERR;
} else if (!m->IsNative()) {
diff --git a/runtime/jni_internal_test.cc b/runtime/jni_internal_test.cc
index 5516eab..581ef0e 100644
--- a/runtime/jni_internal_test.cc
+++ b/runtime/jni_internal_test.cc
@@ -625,8 +625,6 @@
// ...whose fields haven't been initialized because
// we didn't call a constructor.
ASSERT_EQ(0, env_->GetIntField(o, env_->GetFieldID(c, "count", "I")));
- ASSERT_EQ(0, env_->GetIntField(o, env_->GetFieldID(c, "offset", "I")));
- ASSERT_TRUE(env_->GetObjectField(o, env_->GetFieldID(c, "value", "[C")) == nullptr);
}
TEST_F(JniInternalTest, GetVersion) {
@@ -861,6 +859,7 @@
ASSERT_NE(s, nullptr);
env_->CallVoidMethod(s, mid2);
ASSERT_EQ(JNI_FALSE, env_->ExceptionCheck());
+ env_->ExceptionClear();
mid = env_->GetMethodID(c, "length", "()I");
ASSERT_NE(mid, nullptr);
@@ -882,7 +881,7 @@
}
static void BogusMethod() {
- // You can't pass nullptr function pointers to RegisterNatives.
+ // You can't pass null function pointers to RegisterNatives.
}
TEST_F(JniInternalTest, RegisterAndUnregisterNatives) {
@@ -1025,13 +1024,13 @@
env_->set_region_fn(a, size - 1, size, nullptr); \
ExpectException(aioobe_); \
\
- /* It's okay for the buffer to be nullptr as long as the length is 0. */ \
+ /* It's okay for the buffer to be null as long as the length is 0. */ \
env_->get_region_fn(a, 2, 0, nullptr); \
/* Even if the offset is invalid... */ \
env_->get_region_fn(a, 123, 0, nullptr); \
ExpectException(aioobe_); \
\
- /* It's okay for the buffer to be nullptr as long as the length is 0. */ \
+ /* It's okay for the buffer to be null as long as the length is 0. */ \
env_->set_region_fn(a, 2, 0, nullptr); \
/* Even if the offset is invalid... */ \
env_->set_region_fn(a, 123, 0, nullptr); \
@@ -1200,7 +1199,7 @@
}
TEST_F(JniInternalTest, GetArrayLength) {
- // Already tested in NewObjectArray/NewPrimitiveArray except for NULL.
+ // Already tested in NewObjectArray/NewPrimitiveArray except for null.
CheckJniAbortCatcher jni_abort_catcher;
bool old_check_jni = vm_->SetCheckJniEnabled(false);
EXPECT_EQ(0, env_->GetArrayLength(nullptr));
@@ -1463,7 +1462,7 @@
EXPECT_EQ('l', chars[2]);
EXPECT_EQ('x', chars[3]);
- // It's okay for the buffer to be nullptr as long as the length is 0.
+ // It's okay for the buffer to be null as long as the length is 0.
env_->GetStringRegion(s, 2, 0, nullptr);
// Even if the offset is invalid...
env_->GetStringRegion(s, 123, 0, nullptr);
@@ -1485,7 +1484,7 @@
EXPECT_EQ('l', bytes[2]);
EXPECT_EQ('x', bytes[3]);
- // It's okay for the buffer to be nullptr as long as the length is 0.
+ // It's okay for the buffer to be null as long as the length is 0.
env_->GetStringUTFRegion(s, 2, 0, nullptr);
// Even if the offset is invalid...
env_->GetStringUTFRegion(s, 123, 0, nullptr);
@@ -1493,7 +1492,7 @@
}
TEST_F(JniInternalTest, GetStringUTFChars_ReleaseStringUTFChars) {
- // Passing in a nullptr jstring is ignored normally, but caught by -Xcheck:jni.
+ // Passing in a null jstring is ignored normally, but caught by -Xcheck:jni.
bool old_check_jni = vm_->SetCheckJniEnabled(false);
{
CheckJniAbortCatcher check_jni_abort_catcher;
@@ -1538,7 +1537,7 @@
jboolean is_copy = JNI_FALSE;
chars = env_->GetStringChars(s, &is_copy);
- if (Runtime::Current()->GetHeap()->IsMovableObject(s_m->GetCharArray())) {
+ if (Runtime::Current()->GetHeap()->IsMovableObject(s_m)) {
EXPECT_EQ(JNI_TRUE, is_copy);
} else {
EXPECT_EQ(JNI_FALSE, is_copy);
@@ -2102,7 +2101,7 @@
env_->ExceptionClear();
EXPECT_TRUE(env_->IsInstanceOf(thrown_exception, imse_class));
- // It's an error to call MonitorEnter or MonitorExit on nullptr.
+ // It's an error to call MonitorEnter or MonitorExit on null.
{
CheckJniAbortCatcher check_jni_abort_catcher;
env_->MonitorEnter(nullptr);
diff --git a/runtime/linear_alloc.h b/runtime/linear_alloc.h
index 6d8eda6..fcabcc8 100644
--- a/runtime/linear_alloc.h
+++ b/runtime/linear_alloc.h
@@ -42,6 +42,8 @@
private:
mutable Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
ArenaAllocator allocator_ GUARDED_BY(lock_);
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(LinearAlloc);
};
} // namespace art
diff --git a/runtime/lock_word-inl.h b/runtime/lock_word-inl.h
index d831bfb..341501b 100644
--- a/runtime/lock_word-inl.h
+++ b/runtime/lock_word-inl.h
@@ -53,6 +53,7 @@
inline LockWord::LockWord(Monitor* mon, uint32_t rb_state)
: value_(mon->GetMonitorId() | (rb_state << kReadBarrierStateShift) |
(kStateFat << kStateShift)) {
+ DCHECK_EQ(rb_state & ~kReadBarrierStateMask, 0U);
#ifndef __LP64__
DCHECK_ALIGNED(mon, kMonitorIdAlignment);
#endif
diff --git a/runtime/lock_word.h b/runtime/lock_word.h
index 46c3bd4..655aa3a 100644
--- a/runtime/lock_word.h
+++ b/runtime/lock_word.h
@@ -94,6 +94,7 @@
kReadBarrierStateMaskShiftedToggled = ~kReadBarrierStateMaskShifted,
// When the state is kHashCode, the non-state bits hold the hashcode.
+ // Note Object.hashCode() has the hash code layout hardcoded.
kHashShift = 0,
kHashSize = 32 - kStateSize - kReadBarrierStateSize,
kHashMask = (1 << kHashSize) - 1,
@@ -110,6 +111,7 @@
static LockWord FromThinLockId(uint32_t thread_id, uint32_t count, uint32_t rb_state) {
CHECK_LE(thread_id, static_cast<uint32_t>(kThinLockMaxOwner));
CHECK_LE(count, static_cast<uint32_t>(kThinLockMaxCount));
+ DCHECK_EQ(rb_state & ~kReadBarrierStateMask, 0U);
return LockWord((thread_id << kThinLockOwnerShift) | (count << kThinLockCountShift) |
(rb_state << kReadBarrierStateShift) |
(kStateThinOrUnlocked << kStateShift));
@@ -122,12 +124,14 @@
static LockWord FromHashCode(uint32_t hash_code, uint32_t rb_state) {
CHECK_LE(hash_code, static_cast<uint32_t>(kMaxHash));
+ DCHECK_EQ(rb_state & ~kReadBarrierStateMask, 0U);
return LockWord((hash_code << kHashShift) |
(rb_state << kReadBarrierStateShift) |
(kStateHash << kStateShift));
}
static LockWord FromDefault(uint32_t rb_state) {
+ DCHECK_EQ(rb_state & ~kReadBarrierStateMask, 0U);
return LockWord(rb_state << kReadBarrierStateShift);
}
@@ -149,7 +153,8 @@
LockState GetState() const {
CheckReadBarrierState();
- if (UNLIKELY(value_ == 0)) {
+ if ((!kUseReadBarrier && UNLIKELY(value_ == 0)) ||
+ (kUseReadBarrier && UNLIKELY((value_ & kReadBarrierStateMaskShiftedToggled) == 0))) {
return kUnlocked;
} else {
uint32_t internal_state = (value_ >> kStateShift) & kStateMask;
@@ -171,6 +176,14 @@
return (value_ >> kReadBarrierStateShift) & kReadBarrierStateMask;
}
+ void SetReadBarrierState(uint32_t rb_state) {
+ DCHECK_EQ(rb_state & ~kReadBarrierStateMask, 0U);
+ DCHECK_NE(static_cast<uint32_t>(GetState()), static_cast<uint32_t>(kForwardingAddress));
+ // Clear and or the bits.
+ value_ &= ~(kReadBarrierStateMask << kReadBarrierStateShift);
+ value_ |= (rb_state & kReadBarrierStateMask) << kReadBarrierStateShift;
+ }
+
// Return the owner thin lock thread id.
uint32_t ThinLockOwner() const;
diff --git a/runtime/mapping_table.h b/runtime/mapping_table.h
index 79e6e94..dcd5f00 100644
--- a/runtime/mapping_table.h
+++ b/runtime/mapping_table.h
@@ -106,7 +106,7 @@
const MappingTable* const table_; // The original table.
uint32_t element_; // A value in the range 0 to end_.
const uint32_t end_; // Equal to table_->DexToPcSize().
- const uint8_t* encoded_table_ptr_; // Either nullptr or points to encoded data after this entry.
+ const uint8_t* encoded_table_ptr_; // Either null or points to encoded data after this entry.
uint32_t native_pc_offset_; // The current value of native pc offset.
uint32_t dex_pc_; // The current value of dex pc.
};
diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc
index edd2888..cf4233c 100644
--- a/runtime/mem_map.cc
+++ b/runtime/mem_map.cc
@@ -153,7 +153,7 @@
return true;
}
}
- PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
+ PrintFileToLog("/proc/self/maps", LogSeverity::ERROR);
*error_msg = StringPrintf("Requested region 0x%08" PRIxPTR "-0x%08" PRIxPTR " does not overlap "
"any existing map. See process maps in the log.", begin, end);
return false;
@@ -190,7 +190,7 @@
// the expected value, calling munmap if validation fails, giving the
// reason in error_msg.
//
-// If the expected_ptr is nullptr, nothing is checked beyond the fact
+// If the expected_ptr is null, nothing is checked beyond the fact
// that the actual_ptr is not MAP_FAILED. However, if expected_ptr is
// non-null, we check that pointer is the actual_ptr == expected_ptr,
// and if not, report in error_msg what the conflict mapping was if
@@ -256,7 +256,7 @@
// Only use this if you actually made the page reservation yourself.
CHECK(expected_ptr != nullptr);
- DCHECK(ContainedWithinExistingMap(expected_ptr, byte_count, error_msg)) << error_msg;
+ DCHECK(ContainedWithinExistingMap(expected_ptr, byte_count, error_msg)) << *error_msg;
flags |= MAP_FIXED;
}
@@ -398,8 +398,8 @@
page_aligned_byte_count, prot, false);
}
-MemMap* MemMap::MapFileAtAddress(uint8_t* expected_ptr, size_t byte_count, int prot, int flags, int fd,
- off_t start, bool reuse, const char* filename,
+MemMap* MemMap::MapFileAtAddress(uint8_t* expected_ptr, size_t byte_count, int prot, int flags,
+ int fd, off_t start, bool reuse, const char* filename,
std::string* error_msg) {
CHECK_NE(0, prot);
CHECK_NE(0, flags & (MAP_SHARED | MAP_PRIVATE));
@@ -411,7 +411,7 @@
// Only use this if you actually made the page reservation yourself.
CHECK(expected_ptr != nullptr);
- DCHECK(ContainedWithinExistingMap(expected_ptr, byte_count, error_msg)) << error_msg;
+ DCHECK(ContainedWithinExistingMap(expected_ptr, byte_count, error_msg)) << *error_msg;
flags |= MAP_FIXED;
} else {
CHECK_EQ(0, flags & MAP_FIXED);
@@ -429,7 +429,8 @@
size_t page_aligned_byte_count = RoundUp(byte_count + page_offset, kPageSize);
// The 'expected_ptr' is modified (if specified, ie non-null) to be page aligned to the file but
// not necessarily to virtual memory. mmap will page align 'expected' for us.
- uint8_t* page_aligned_expected = (expected_ptr == nullptr) ? nullptr : (expected_ptr - page_offset);
+ uint8_t* page_aligned_expected =
+ (expected_ptr == nullptr) ? nullptr : (expected_ptr - page_offset);
uint8_t* actual = reinterpret_cast<uint8_t*>(mmap(page_aligned_expected,
page_aligned_byte_count,
@@ -616,13 +617,68 @@
return true;
}
-void MemMap::DumpMaps(std::ostream& os) {
+void MemMap::DumpMaps(std::ostream& os, bool terse) {
MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
- DumpMapsLocked(os);
+ DumpMapsLocked(os, terse);
}
-void MemMap::DumpMapsLocked(std::ostream& os) {
- os << *maps_;
+void MemMap::DumpMapsLocked(std::ostream& os, bool terse) {
+ const auto& mem_maps = *maps_;
+ if (!terse) {
+ os << mem_maps;
+ return;
+ }
+
+ // Terse output example:
+ // [MemMap: 0x409be000+0x20P~0x11dP+0x20P~0x61cP+0x20P prot=0x3 LinearAlloc]
+ // [MemMap: 0x451d6000+0x6bP(3) prot=0x3 large object space allocation]
+ // The details:
+ // "+0x20P" means 0x20 pages taken by a single mapping,
+ // "~0x11dP" means a gap of 0x11d pages,
+ // "+0x6bP(3)" means 3 mappings one after another, together taking 0x6b pages.
+ os << "MemMap:" << std::endl;
+ for (auto it = mem_maps.begin(), maps_end = mem_maps.end(); it != maps_end;) {
+ MemMap* map = it->second;
+ void* base = it->first;
+ CHECK_EQ(base, map->BaseBegin());
+ os << "[MemMap: " << base;
+ ++it;
+ // Merge consecutive maps with the same protect flags and name.
+ constexpr size_t kMaxGaps = 9;
+ size_t num_gaps = 0;
+ size_t num = 1u;
+ size_t size = map->BaseSize();
+ CHECK(IsAligned<kPageSize>(size));
+ void* end = map->BaseEnd();
+ while (it != maps_end &&
+ it->second->GetProtect() == map->GetProtect() &&
+ it->second->GetName() == map->GetName() &&
+ (it->second->BaseBegin() == end || num_gaps < kMaxGaps)) {
+ if (it->second->BaseBegin() != end) {
+ ++num_gaps;
+ os << "+0x" << std::hex << (size / kPageSize) << "P";
+ if (num != 1u) {
+ os << "(" << std::dec << num << ")";
+ }
+ size_t gap =
+ reinterpret_cast<uintptr_t>(it->second->BaseBegin()) - reinterpret_cast<uintptr_t>(end);
+ CHECK(IsAligned<kPageSize>(gap));
+ os << "~0x" << std::hex << (gap / kPageSize) << "P";
+ num = 0u;
+ size = 0u;
+ }
+ CHECK(IsAligned<kPageSize>(it->second->BaseSize()));
+ ++num;
+ size += it->second->BaseSize();
+ end = it->second->BaseEnd();
+ ++it;
+ }
+ os << "+0x" << std::hex << (size / kPageSize) << "P";
+ if (num != 1u) {
+ os << "(" << std::dec << num << ")";
+ }
+ os << " prot=0x" << std::hex << map->GetProtect() << " " << map->GetName() << "]" << std::endl;
+ }
}
bool MemMap::HasMemMap(MemMap* map) {
diff --git a/runtime/mem_map.h b/runtime/mem_map.h
index 11b2569..6023a70 100644
--- a/runtime/mem_map.h
+++ b/runtime/mem_map.h
@@ -53,24 +53,25 @@
class MemMap {
public:
// Request an anonymous region of length 'byte_count' and a requested base address.
- // Use NULL as the requested base address if you don't care.
+ // Use null as the requested base address if you don't care.
// "reuse" allows re-mapping an address range from an existing mapping.
//
// The word "anonymous" in this context means "not backed by a file". The supplied
// 'ashmem_name' will be used -- on systems that support it -- to give the mapping
// a name.
//
- // On success, returns returns a MemMap instance. On failure, returns a NULL;
+ // On success, returns returns a MemMap instance. On failure, returns null.
static MemMap* MapAnonymous(const char* ashmem_name, uint8_t* addr, size_t byte_count, int prot,
bool low_4gb, bool reuse, std::string* error_msg);
// Map part of a file, taking care of non-page aligned offsets. The
// "start" offset is absolute, not relative.
//
- // On success, returns returns a MemMap instance. On failure, returns a NULL;
+ // On success, returns returns a MemMap instance. On failure, returns null.
static MemMap* MapFile(size_t byte_count, int prot, int flags, int fd, off_t start,
const char* filename, std::string* error_msg) {
- return MapFileAtAddress(NULL, byte_count, prot, flags, fd, start, false, filename, error_msg);
+ return MapFileAtAddress(
+ nullptr, byte_count, prot, flags, fd, start, false, filename, error_msg);
}
// Map part of a file, taking care of non-page aligned offsets. The
@@ -79,13 +80,12 @@
// mapping. "reuse" allows us to create a view into an existing
// mapping where we do not take ownership of the memory.
//
- // On success, returns returns a MemMap instance. On failure, returns a
- // nullptr;
+ // On success, returns returns a MemMap instance. On failure, returns null.
static MemMap* MapFileAtAddress(uint8_t* addr, size_t byte_count, int prot, int flags, int fd,
off_t start, bool reuse, const char* filename,
std::string* error_msg);
- // Releases the memory mapping
+ // Releases the memory mapping.
~MemMap() LOCKS_EXCLUDED(Locks::mem_maps_lock_);
const std::string& GetName() const {
@@ -137,7 +137,7 @@
static bool CheckNoGaps(MemMap* begin_map, MemMap* end_map)
LOCKS_EXCLUDED(Locks::mem_maps_lock_);
- static void DumpMaps(std::ostream& os)
+ static void DumpMaps(std::ostream& os, bool terse = false)
LOCKS_EXCLUDED(Locks::mem_maps_lock_);
typedef AllocationTrackingMultiMap<void*, MemMap*, kAllocatorTagMaps> Maps;
@@ -149,7 +149,7 @@
MemMap(const std::string& name, uint8_t* begin, size_t size, void* base_begin, size_t base_size,
int prot, bool reuse) LOCKS_EXCLUDED(Locks::mem_maps_lock_);
- static void DumpMapsLocked(std::ostream& os)
+ static void DumpMapsLocked(std::ostream& os, bool terse)
EXCLUSIVE_LOCKS_REQUIRED(Locks::mem_maps_lock_);
static bool HasMemMap(MemMap* map)
EXCLUSIVE_LOCKS_REQUIRED(Locks::mem_maps_lock_);
diff --git a/runtime/memory_region.cc b/runtime/memory_region.cc
index 06eba0f..a5c70c3 100644
--- a/runtime/memory_region.cc
+++ b/runtime/memory_region.cc
@@ -25,7 +25,7 @@
namespace art {
void MemoryRegion::CopyFrom(size_t offset, const MemoryRegion& from) const {
- CHECK(from.pointer() != NULL);
+ CHECK(from.pointer() != nullptr);
CHECK_GT(from.size(), 0U);
CHECK_GE(this->size(), from.size());
CHECK_LE(offset, this->size() - from.size());
diff --git a/runtime/mirror/abstract_method.cc b/runtime/mirror/abstract_method.cc
new file mode 100644
index 0000000..81c656b
--- /dev/null
+++ b/runtime/mirror/abstract_method.cc
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "abstract_method.h"
+
+#include "mirror/art_method-inl.h"
+
+namespace art {
+namespace mirror {
+
+bool AbstractMethod::CreateFromArtMethod(mirror::ArtMethod* method) {
+ auto* interface_method = method->GetInterfaceMethodIfProxy();
+ SetFieldObject<false>(ArtMethodOffset(), method);
+ SetFieldObject<false>(DeclaringClassOffset(), method->GetDeclaringClass());
+ SetFieldObject<false>(
+ DeclaringClassOfOverriddenMethodOffset(), interface_method->GetDeclaringClass());
+ SetField32<false>(AccessFlagsOffset(), method->GetAccessFlags());
+ SetField32<false>(DexMethodIndexOffset(), method->GetDexMethodIndex());
+ return true;
+}
+
+mirror::ArtMethod* AbstractMethod::GetArtMethod() {
+ return GetFieldObject<mirror::ArtMethod>(ArtMethodOffset());
+}
+
+mirror::Class* AbstractMethod::GetDeclaringClass() {
+ return GetFieldObject<mirror::Class>(DeclaringClassOffset());
+}
+
+} // namespace mirror
+} // namespace art
diff --git a/runtime/mirror/abstract_method.h b/runtime/mirror/abstract_method.h
new file mode 100644
index 0000000..ef51d7f
--- /dev/null
+++ b/runtime/mirror/abstract_method.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_MIRROR_ABSTRACT_METHOD_H_
+#define ART_RUNTIME_MIRROR_ABSTRACT_METHOD_H_
+
+#include "accessible_object.h"
+#include "gc_root.h"
+#include "object.h"
+#include "object_callbacks.h"
+#include "read_barrier_option.h"
+
+namespace art {
+
+struct AbstractMethodOffsets;
+
+namespace mirror {
+
+class ArtMethod;
+
+// C++ mirror of java.lang.reflect.AbstractMethod.
+class MANAGED AbstractMethod : public AccessibleObject {
+ public:
+ // Called from Constructor::CreateFromArtMethod, Method::CreateFromArtMethod.
+ bool CreateFromArtMethod(mirror::ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ mirror::ArtMethod* GetArtMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ mirror::Class* GetDeclaringClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ private:
+ static MemberOffset ArtMethodOffset() {
+ return MemberOffset(OFFSETOF_MEMBER(AbstractMethod, art_method_));
+ }
+ static MemberOffset DeclaringClassOffset() {
+ return MemberOffset(OFFSETOF_MEMBER(AbstractMethod, declaring_class_));
+ }
+ static MemberOffset DeclaringClassOfOverriddenMethodOffset() {
+ return MemberOffset(OFFSETOF_MEMBER(AbstractMethod, declaring_class_of_overridden_method_));
+ }
+ static MemberOffset AccessFlagsOffset() {
+ return MemberOffset(OFFSETOF_MEMBER(AbstractMethod, access_flags_));
+ }
+ static MemberOffset DexMethodIndexOffset() {
+ return MemberOffset(OFFSETOF_MEMBER(AbstractMethod, dex_method_index_));
+ }
+
+ HeapReference<mirror::ArtMethod> art_method_;
+ HeapReference<mirror::Class> declaring_class_;
+ HeapReference<mirror::Class> declaring_class_of_overridden_method_;
+ uint32_t access_flags_;
+ uint32_t dex_method_index_;
+
+ friend struct art::AbstractMethodOffsets; // for verifying offset information
+ DISALLOW_IMPLICIT_CONSTRUCTORS(AbstractMethod);
+};
+
+} // namespace mirror
+} // namespace art
+
+#endif // ART_RUNTIME_MIRROR_ABSTRACT_METHOD_H_
diff --git a/runtime/mirror/art_method-inl.h b/runtime/mirror/art_method-inl.h
index a300d52..0f306e8 100644
--- a/runtime/mirror/art_method-inl.h
+++ b/runtime/mirror/art_method-inl.h
@@ -36,7 +36,7 @@
namespace mirror {
inline uint32_t ArtMethod::ClassSize() {
- uint32_t vtable_entries = Object::kVTableLength + 7;
+ uint32_t vtable_entries = Object::kVTableLength;
return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0, 0, 0);
}
@@ -48,7 +48,7 @@
inline Class* ArtMethod::GetDeclaringClass() {
Class* result = GetFieldObject<Class>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, declaring_class_));
- DCHECK(result != NULL) << this;
+ DCHECK(result != nullptr) << this;
DCHECK(result->IsIdxLoaded() || result->IsErroneous()) << this;
return result;
}
diff --git a/runtime/mirror/art_method.cc b/runtime/mirror/art_method.cc
index 92aea1f..9518c9d 100644
--- a/runtime/mirror/art_method.cc
+++ b/runtime/mirror/art_method.cc
@@ -16,6 +16,7 @@
#include "art_method.h"
+#include "abstract_method.h"
#include "arch/context.h"
#include "art_field-inl.h"
#include "art_method-inl.h"
@@ -53,14 +54,11 @@
ArtMethod* ArtMethod::FromReflectedMethod(const ScopedObjectAccessAlreadyRunnable& soa,
jobject jlr_method) {
- ArtField* f =
- soa.DecodeField(WellKnownClasses::java_lang_reflect_AbstractMethod_artMethod);
- mirror::ArtMethod* method = f->GetObject(soa.Decode<mirror::Object*>(jlr_method))->AsArtMethod();
- DCHECK(method != nullptr);
- return method;
+ auto* abstract_method = soa.Decode<mirror::AbstractMethod*>(jlr_method);
+ DCHECK(abstract_method != nullptr);
+ return abstract_method->GetArtMethod();
}
-
void ArtMethod::VisitRoots(RootVisitor* visitor) {
java_lang_reflect_ArtMethod_.VisitRootIfNonNull(visitor, RootInfo(kRootStickyClass));
}
@@ -364,7 +362,7 @@
Runtime* runtime = Runtime::Current();
ClassLinker* class_linker = runtime->GetClassLinker();
const void* code = runtime->GetInstrumentation()->GetQuickCodeFor(this, pointer_size);
- // On failure, instead of nullptr we get the quick-generic-jni-trampoline for native method
+ // On failure, instead of null we get the quick-generic-jni-trampoline for native method
// indicating the generic JNI, or the quick-to-interpreter-bridge (but not the trampoline)
// for non-native methods.
if (class_linker->IsQuickToInterpreterBridge(code) ||
@@ -505,7 +503,7 @@
const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(this, sizeof(void*));
ClassLinker* class_linker = runtime->GetClassLinker();
- // On failure, instead of nullptr we get the quick-generic-jni-trampoline for native method
+ // On failure, instead of null we get the quick-generic-jni-trampoline for native method
// indicating the generic JNI, or the quick-to-interpreter-bridge (but not the trampoline)
// for non-native methods. And we really shouldn't see a failure for non-native methods here.
DCHECK(!class_linker->IsQuickToInterpreterBridge(entry_point));
@@ -513,7 +511,6 @@
if (class_linker->IsQuickGenericJniStub(entry_point)) {
// Generic JNI frame.
DCHECK(IsNative());
- StackHandleScope<1> hs(Thread::Current());
uint32_t handle_refs = GetNumberOfReferenceArgsWithoutReceiver(this) + 1;
size_t scope_size = HandleScope::SizeOf(handle_refs);
QuickMethodFrameInfo callee_info = runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs);
@@ -547,5 +544,31 @@
RegisterNative(GetJniDlsymLookupStub(), false);
}
+bool ArtMethod::EqualParameters(Handle<mirror::ObjectArray<mirror::Class>> params) {
+ auto* dex_cache = GetDexCache();
+ auto* dex_file = dex_cache->GetDexFile();
+ const auto& method_id = dex_file->GetMethodId(GetDexMethodIndex());
+ const auto& proto_id = dex_file->GetMethodPrototype(method_id);
+ const DexFile::TypeList* proto_params = dex_file->GetProtoParameters(proto_id);
+ auto count = proto_params != nullptr ? proto_params->Size() : 0u;
+ auto param_len = params.Get() != nullptr ? params->GetLength() : 0u;
+ if (param_len != count) {
+ return false;
+ }
+ auto* cl = Runtime::Current()->GetClassLinker();
+ for (size_t i = 0; i < count; ++i) {
+ auto type_idx = proto_params->GetTypeItem(i).type_idx_;
+ auto* type = cl->ResolveType(type_idx, this);
+ if (type == nullptr) {
+ Thread::Current()->AssertPendingException();
+ return false;
+ }
+ if (type != params->GetWithoutChecks(i)) {
+ return false;
+ }
+ }
+ return true;
+}
+
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/art_method.h b/runtime/mirror/art_method.h
index 55b8068..0da5925 100644
--- a/runtime/mirror/art_method.h
+++ b/runtime/mirror/art_method.h
@@ -341,10 +341,10 @@
return reinterpret_cast<const void*>(code);
}
- // Actual entry point pointer to compiled oat code or nullptr.
+ // Actual entry point pointer to compiled oat code or null.
const void* GetQuickOatEntryPoint(size_t pointer_size)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Actual pointer to compiled oat code or nullptr.
+ // Actual pointer to compiled oat code or null.
const void* GetQuickOatCodePointer(size_t pointer_size)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return EntryPointToCodePointer(GetQuickOatEntryPoint(pointer_size));
@@ -541,6 +541,10 @@
ALWAYS_INLINE ArtMethod* GetInterfaceMethodIfProxy() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // May cause thread suspension due to class resolution.
+ bool EqualParameters(Handle<mirror::ObjectArray<mirror::Class>> params)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
static size_t SizeWithoutPointerFields(size_t pointer_size) {
size_t total = sizeof(ArtMethod) - sizeof(PtrSizedFields);
#ifdef ART_METHOD_HAS_PADDING_FIELD_ON_64_BIT
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index aaa66f9..cc6f5c4 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -66,7 +66,7 @@
inline void Class::SetDirectMethods(ObjectArray<ArtMethod>* new_direct_methods)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK(NULL == GetFieldObject<ObjectArray<ArtMethod>>(
+ DCHECK(nullptr == GetFieldObject<ObjectArray<ArtMethod>>(
OFFSET_OF_OBJECT_MEMBER(Class, direct_methods_)));
DCHECK_NE(0, new_direct_methods->GetLength());
SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Class, direct_methods_), new_direct_methods);
@@ -85,7 +85,7 @@
// Returns the number of static, private, and constructor methods.
inline uint32_t Class::NumDirectMethods() {
- return (GetDirectMethods() != NULL) ? GetDirectMethods()->GetLength() : 0;
+ return (GetDirectMethods() != nullptr) ? GetDirectMethods()->GetLength() : 0;
}
template<VerifyObjectFlags kVerifyFlags>
@@ -102,7 +102,7 @@
}
inline uint32_t Class::NumVirtualMethods() {
- return (GetVirtualMethods() != NULL) ? GetVirtualMethods()->GetLength() : 0;
+ return (GetVirtualMethods() != nullptr) ? GetVirtualMethods()->GetLength() : 0;
}
template<VerifyObjectFlags kVerifyFlags>
@@ -186,7 +186,7 @@
}
inline bool Class::Implements(Class* klass) {
- DCHECK(klass != NULL);
+ DCHECK(klass != nullptr);
DCHECK(klass->IsInterface()) << PrettyClass(this);
// All interfaces implemented directly and by our superclass, and
// recursively all super-interfaces of those interfaces, are listed
@@ -233,8 +233,8 @@
// If "this" is not also an array, it must be Object.
// src's super should be java_lang_Object, since it is an array.
Class* java_lang_Object = src->GetSuperClass();
- DCHECK(java_lang_Object != NULL) << PrettyClass(src);
- DCHECK(java_lang_Object->GetSuperClass() == NULL) << PrettyClass(src);
+ DCHECK(java_lang_Object != nullptr) << PrettyClass(src);
+ DCHECK(java_lang_Object->GetSuperClass() == nullptr) << PrettyClass(src);
return this == java_lang_Object;
}
return IsArrayAssignableFromArray(src);
@@ -335,13 +335,13 @@
return true;
}
current = current->GetSuperClass();
- } while (current != NULL);
+ } while (current != nullptr);
return false;
}
inline ArtMethod* Class::FindVirtualMethodForInterface(ArtMethod* method) {
Class* declaring_class = method->GetDeclaringClass();
- DCHECK(declaring_class != NULL) << PrettyClass(this);
+ DCHECK(declaring_class != nullptr) << PrettyClass(this);
DCHECK(declaring_class->IsInterface()) << PrettyMethod(method);
// TODO cache to improve lookup speed
int32_t iftable_count = GetIfTableCount();
@@ -351,7 +351,7 @@
return iftable->GetMethodArray(i)->Get(method->GetMethodIndex());
}
}
- return NULL;
+ return nullptr;
}
inline ArtMethod* Class::FindVirtualMethodForVirtual(ArtMethod* method) {
@@ -382,7 +382,7 @@
inline int32_t Class::GetIfTableCount() {
IfTable* iftable = GetIfTable();
- if (iftable == NULL) {
+ if (iftable == nullptr) {
return 0;
}
return iftable->Count();
@@ -484,7 +484,7 @@
}
inline void Class::SetVerifyErrorClass(Class* klass) {
- CHECK(klass != NULL) << PrettyClass(this);
+ CHECK(klass != nullptr) << PrettyClass(this);
if (Runtime::Current()->IsActiveTransaction()) {
SetFieldObject<true>(OFFSET_OF_OBJECT_MEMBER(Class, verify_error_class_), klass);
} else {
@@ -548,6 +548,10 @@
<< PrettyClass(this)
<< "A class object shouldn't be allocated through this "
<< "as it requires a pre-fence visitor that sets the class size.";
+ DCHECK(!IsStringClass())
+ << PrettyClass(this)
+ << "A string shouldn't be allocated through this "
+ << "as it requires a pre-fence visitor that sets the class size.";
DCHECK(IsInstantiable()) << PrettyClass(this);
// TODO: decide whether we want this check. It currently fails during bootstrap.
// DCHECK(!Runtime::Current()->IsStarted() || IsInitializing()) << PrettyClass(this);
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index 2afb4af..56c586a 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -25,6 +25,7 @@
#include "dex_file-inl.h"
#include "gc/accounting/card_table-inl.h"
#include "handle_scope-inl.h"
+#include "method.h"
#include "object_array-inl.h"
#include "object-inl.h"
#include "runtime.h"
@@ -329,10 +330,6 @@
return IsInSamePackage(klass1->GetDescriptor(&temp1), klass2->GetDescriptor(&temp2));
}
-bool Class::IsStringClass() const {
- return this == String::GetJavaLangString();
-}
-
bool Class::IsThrowableClass() {
return WellKnownClasses::ToClass(WellKnownClasses::java_lang_Throwable)->IsAssignableFrom(this);
}
@@ -876,5 +873,26 @@
return Runtime::Current()->GetClassLinker()->GetDescriptorForProxy(this) == match;
}
+mirror::ArtMethod* Class::GetDeclaredConstructor(
+ Thread* self, Handle<mirror::ObjectArray<mirror::Class>> args) {
+ auto* direct_methods = GetDirectMethods();
+ size_t count = direct_methods != nullptr ? direct_methods->GetLength() : 0u;
+ for (size_t i = 0; i < count; ++i) {
+ auto* m = direct_methods->GetWithoutChecks(i);
+ // Skip <clinit> which is a static constructor, as well as non constructors.
+ if (m->IsStatic() || !m->IsConstructor()) {
+ continue;
+ }
+ // May cause thread suspension and exceptions.
+ if (m->EqualParameters(args)) {
+ return m;
+ }
+ if (self->IsExceptionPending()) {
+ return nullptr;
+ }
+ }
+ return nullptr;
+}
+
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index 20f2387..d3cfd01 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -47,6 +47,7 @@
class ArtMethod;
class ClassLoader;
+class Constructor;
class DexCache;
class IfTable;
@@ -234,6 +235,15 @@
SetAccessFlags(flags | kAccClassIsFinalizable);
}
+ ALWAYS_INLINE bool IsStringClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return (GetField32(AccessFlagsOffset()) & kAccClassIsStringClass) != 0;
+ }
+
+ ALWAYS_INLINE void SetStringClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ uint32_t flags = GetField32(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_));
+ SetAccessFlags(flags | kAccClassIsStringClass);
+ }
+
// Returns true if the class is abstract.
ALWAYS_INLINE bool IsAbstract() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccAbstract) != 0;
@@ -399,7 +409,7 @@
// Depth of class from java.lang.Object
uint32_t Depth() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
uint32_t depth = 0;
- for (Class* klass = this; klass->GetSuperClass() != NULL; klass = klass->GetSuperClass()) {
+ for (Class* klass = this; klass->GetSuperClass() != nullptr; klass = klass->GetSuperClass()) {
depth++;
}
return depth;
@@ -408,15 +418,13 @@
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
bool IsArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetComponentType<kVerifyFlags, kReadBarrierOption>() != NULL;
+ return GetComponentType<kVerifyFlags, kReadBarrierOption>() != nullptr;
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
bool IsClassClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool IsStringClass() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
bool IsThrowableClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
@@ -436,8 +444,8 @@
}
void SetComponentType(Class* new_component_type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK(GetComponentType() == NULL);
- DCHECK(new_component_type != NULL);
+ DCHECK(GetComponentType() == nullptr);
+ DCHECK(new_component_type != nullptr);
// Component type is invariant: use non-transactional mode without check.
SetFieldObject<false, false>(ComponentTypeOffset(), new_component_type);
}
@@ -453,7 +461,7 @@
}
bool IsObjectClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return !IsPrimitive() && GetSuperClass() == NULL;
+ return !IsPrimitive() && GetSuperClass() == nullptr;
}
bool IsInstantiableNonArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -483,10 +491,10 @@
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
bool IsVariableSize() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- // Classes and arrays vary in size, and so the object_size_ field cannot
+ // Classes, arrays, and strings vary in size, and so the object_size_ field cannot
// be used to Get their instance size
return IsClassClass<kVerifyFlags, kReadBarrierOption>() ||
- IsArrayClass<kVerifyFlags, kReadBarrierOption>();
+ IsArrayClass<kVerifyFlags, kReadBarrierOption>() || IsStringClass();
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
@@ -610,7 +618,7 @@
// that extends) another can be assigned to its parent, but not vice-versa. All Classes may assign
// to themselves. Classes for primitive types may not assign to each other.
ALWAYS_INLINE bool IsAssignableFrom(Class* src) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK(src != NULL);
+ DCHECK(src != nullptr);
if (this == src) {
// Can always assign to things of the same type.
return true;
@@ -637,7 +645,7 @@
}
bool HasSuperClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetSuperClass() != NULL;
+ return GetSuperClass() != nullptr;
}
static MemberOffset SuperClassOffset() {
@@ -1052,6 +1060,11 @@
return OFFSET_OF_OBJECT_MEMBER(Class, dex_cache_strings_);
}
+ // May cause thread suspension due to EqualParameters.
+ mirror::ArtMethod* GetDeclaredConstructor(
+ Thread* self, Handle<mirror::ObjectArray<mirror::Class>> args)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
// Used to initialize a class in the allocation code path to ensure it is guarded by a StoreStore
// fence.
class InitializeClassVisitor {
@@ -1097,14 +1110,14 @@
bool ProxyDescriptorEquals(const char* match) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // defining class loader, or NULL for the "bootstrap" system loader
+ // Defining class loader, or null for the "bootstrap" system loader.
HeapReference<ClassLoader> class_loader_;
// For array classes, the component class object for instanceof/checkcast
- // (for String[][][], this will be String[][]). NULL for non-array classes.
+ // (for String[][][], this will be String[][]). null for non-array classes.
HeapReference<Class> component_type_;
- // DexCache of resolved constant pool entries (will be NULL for classes generated by the
+ // DexCache of resolved constant pool entries (will be null for classes generated by the
// runtime such as arrays and primitive classes).
HeapReference<DexCache> dex_cache_;
@@ -1130,7 +1143,7 @@
// Descriptor for the class such as "java.lang.Class" or "[C". Lazily initialized by ComputeName
HeapReference<String> name_;
- // The superclass, or NULL if this is java.lang.Object, an interface or primitive type.
+ // The superclass, or null if this is java.lang.Object, an interface or primitive type.
HeapReference<Class> super_class_;
// If class verify fails, we must return same error on subsequent tries.
diff --git a/runtime/mirror/dex_cache_test.cc b/runtime/mirror/dex_cache_test.cc
index 1d6846b..228fce5 100644
--- a/runtime/mirror/dex_cache_test.cc
+++ b/runtime/mirror/dex_cache_test.cc
@@ -34,10 +34,10 @@
TEST_F(DexCacheTest, Open) {
ScopedObjectAccess soa(Thread::Current());
StackHandleScope<1> hs(soa.Self());
- ASSERT_TRUE(java_lang_dex_file_ != NULL);
+ ASSERT_TRUE(java_lang_dex_file_ != nullptr);
Handle<DexCache> dex_cache(
hs.NewHandle(class_linker_->AllocDexCache(soa.Self(), *java_lang_dex_file_)));
- ASSERT_TRUE(dex_cache.Get() != NULL);
+ ASSERT_TRUE(dex_cache.Get() != nullptr);
EXPECT_EQ(java_lang_dex_file_->NumStringIds(), dex_cache->NumStrings());
EXPECT_EQ(java_lang_dex_file_->NumTypeIds(), dex_cache->NumResolvedTypes());
diff --git a/runtime/mirror/field-inl.h b/runtime/mirror/field-inl.h
index 9820db7..388921b 100644
--- a/runtime/mirror/field-inl.h
+++ b/runtime/mirror/field-inl.h
@@ -30,10 +30,11 @@
template <bool kTransactionActive>
inline mirror::Field* Field::CreateFromArtField(Thread* self, ArtField* field,
bool force_resolve) {
+ StackHandleScope<2> hs(self);
// Try to resolve type before allocating since this is a thread suspension point.
- mirror::Class* type = field->GetType<true>();
+ Handle<mirror::Class> type = hs.NewHandle(field->GetType<true>());
- if (type == nullptr) {
+ if (type.Get() == nullptr) {
if (force_resolve) {
if (kIsDebugBuild) {
self->AssertPendingException();
@@ -48,7 +49,6 @@
self->ClearException();
}
}
- StackHandleScope<1> hs(self);
auto ret = hs.NewHandle(static_cast<Field*>(StaticClass()->AllocObject(self)));
if (ret.Get() == nullptr) {
if (kIsDebugBuild) {
@@ -58,14 +58,22 @@
}
auto dex_field_index = field->GetDexFieldIndex();
auto* resolved_field = field->GetDexCache()->GetResolvedField(dex_field_index, sizeof(void*));
- if (resolved_field != nullptr) {
- DCHECK_EQ(resolved_field, field);
+ if (field->GetDeclaringClass()->IsProxyClass()) {
+ DCHECK(field->IsStatic());
+ DCHECK_LT(dex_field_index, 2U);
+ // The two static fields (interfaces, throws) of all proxy classes
+ // share the same dex file indices 0 and 1. So, we can't resolve
+ // them in the dex cache.
} else {
- // We rely on the field being resolved so that we can back to the ArtField
- // (i.e. FromReflectedMethod).
- field->GetDexCache()->SetResolvedField(dex_field_index, field, sizeof(void*));
+ if (resolved_field != nullptr) {
+ DCHECK_EQ(resolved_field, field);
+ } else {
+ // We rely on the field being resolved so that we can back to the ArtField
+ // (i.e. FromReflectedMethod).
+ field->GetDexCache()->SetResolvedField(dex_field_index, field, sizeof(void*));
+ }
}
- ret->SetType<kTransactionActive>(type);
+ ret->SetType<kTransactionActive>(type.Get());
ret->SetDeclaringClass<kTransactionActive>(field->GetDeclaringClass());
ret->SetAccessFlags<kTransactionActive>(field->GetAccessFlags());
ret->SetDexFieldIndex<kTransactionActive>(dex_field_index);
diff --git a/runtime/mirror/field.cc b/runtime/mirror/field.cc
index 70311bb..933784e 100644
--- a/runtime/mirror/field.cc
+++ b/runtime/mirror/field.cc
@@ -54,7 +54,19 @@
}
ArtField* Field::GetArtField() {
- mirror::DexCache* const dex_cache = GetDeclaringClass()->GetDexCache();
+ mirror::Class* declaring_class = GetDeclaringClass();
+ if (UNLIKELY(declaring_class->IsProxyClass())) {
+ DCHECK(IsStatic());
+ DCHECK_EQ(declaring_class->NumStaticFields(), 2U);
+ // 0 == Class[] interfaces; 1 == Class[][] throws;
+ if (GetDexFieldIndex() == 0) {
+ return &declaring_class->GetSFields()[0];
+ } else {
+ DCHECK_EQ(GetDexFieldIndex(), 1U);
+ return &declaring_class->GetSFields()[1];
+ }
+ }
+ mirror::DexCache* const dex_cache = declaring_class->GetDexCache();
ArtField* const art_field = dex_cache->GetResolvedField(GetDexFieldIndex(), sizeof(void*));
CHECK(art_field != nullptr);
return art_field;
diff --git a/runtime/mirror/field.h b/runtime/mirror/field.h
index 9988f84..d927f0c 100644
--- a/runtime/mirror/field.h
+++ b/runtime/mirror/field.h
@@ -82,15 +82,12 @@
}
static void SetClass(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- static void SetArrayClass(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
static void ResetClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static void SetArrayClass(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static void ResetArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static void VisitRoots(RootVisitor* visitor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static void VisitRoots(RootVisitor* visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Slow, try to use only for PrettyField and such.
ArtField* GetArtField() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/mirror/iftable-inl.h b/runtime/mirror/iftable-inl.h
index d1309d2..b465d07 100644
--- a/runtime/mirror/iftable-inl.h
+++ b/runtime/mirror/iftable-inl.h
@@ -23,7 +23,7 @@
namespace mirror {
inline void IfTable::SetInterface(int32_t i, Class* interface) {
- DCHECK(interface != NULL);
+ DCHECK(interface != nullptr);
DCHECK(interface->IsInterface());
const size_t idx = i * kMax + kInterface;
DCHECK_EQ(Get(idx), static_cast<Object*>(nullptr));
diff --git a/runtime/mirror/iftable.h b/runtime/mirror/iftable.h
index 4d899d2..1c1c7b3 100644
--- a/runtime/mirror/iftable.h
+++ b/runtime/mirror/iftable.h
@@ -27,7 +27,7 @@
public:
ALWAYS_INLINE Class* GetInterface(int32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Class* interface = GetWithoutChecks((i * kMax) + kInterface)->AsClass();
- DCHECK(interface != NULL);
+ DCHECK(interface != nullptr);
return interface;
}
@@ -37,14 +37,14 @@
ObjectArray<ArtMethod>* GetMethodArray(int32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ObjectArray<ArtMethod>* method_array =
down_cast<ObjectArray<ArtMethod>*>(Get((i * kMax) + kMethodArray));
- DCHECK(method_array != NULL);
+ DCHECK(method_array != nullptr);
return method_array;
}
size_t GetMethodArrayCount(int32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ObjectArray<ArtMethod>* method_array =
down_cast<ObjectArray<ArtMethod>*>(Get((i * kMax) + kMethodArray));
- if (method_array == NULL) {
+ if (method_array == nullptr) {
return 0;
}
return method_array->GetLength();
@@ -52,8 +52,8 @@
void SetMethodArray(int32_t i, ObjectArray<ArtMethod>* new_ma)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK(new_ma != NULL);
- DCHECK(Get((i * kMax) + kMethodArray) == NULL);
+ DCHECK(new_ma != nullptr);
+ DCHECK(Get((i * kMax) + kMethodArray) == nullptr);
Set<false>((i * kMax) + kMethodArray, new_ma);
}
diff --git a/runtime/mirror/method.cc b/runtime/mirror/method.cc
new file mode 100644
index 0000000..81530bb
--- /dev/null
+++ b/runtime/mirror/method.cc
@@ -0,0 +1,103 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "method.h"
+
+#include "mirror/art_method.h"
+#include "mirror/object-inl.h"
+
+namespace art {
+namespace mirror {
+
+GcRoot<Class> Method::static_class_;
+GcRoot<Class> Method::array_class_;
+GcRoot<Class> Constructor::static_class_;
+GcRoot<Class> Constructor::array_class_;
+
+void Method::SetClass(Class* klass) {
+ CHECK(static_class_.IsNull()) << static_class_.Read() << " " << klass;
+ CHECK(klass != nullptr);
+ static_class_ = GcRoot<Class>(klass);
+}
+
+void Method::ResetClass() {
+ CHECK(!static_class_.IsNull());
+ static_class_ = GcRoot<Class>(nullptr);
+}
+
+void Method::SetArrayClass(Class* klass) {
+ CHECK(array_class_.IsNull()) << array_class_.Read() << " " << klass;
+ CHECK(klass != nullptr);
+ array_class_ = GcRoot<Class>(klass);
+}
+
+void Method::ResetArrayClass() {
+ CHECK(!array_class_.IsNull());
+ array_class_ = GcRoot<Class>(nullptr);
+}
+
+Method* Method::CreateFromArtMethod(Thread* self, mirror::ArtMethod* method) {
+ DCHECK(!method->IsConstructor()) << PrettyMethod(method);
+ auto* ret = down_cast<Method*>(StaticClass()->AllocObject(self));
+ if (LIKELY(ret != nullptr)) {
+ static_cast<AbstractMethod*>(ret)->CreateFromArtMethod(method);
+ }
+ return ret;
+}
+
+void Method::VisitRoots(RootVisitor* visitor) {
+ static_class_.VisitRootIfNonNull(visitor, RootInfo(kRootStickyClass));
+ array_class_.VisitRootIfNonNull(visitor, RootInfo(kRootStickyClass));
+}
+
+void Constructor::SetClass(Class* klass) {
+ CHECK(static_class_.IsNull()) << static_class_.Read() << " " << klass;
+ CHECK(klass != nullptr);
+ static_class_ = GcRoot<Class>(klass);
+}
+
+void Constructor::ResetClass() {
+ CHECK(!static_class_.IsNull());
+ static_class_ = GcRoot<Class>(nullptr);
+}
+
+void Constructor::SetArrayClass(Class* klass) {
+ CHECK(array_class_.IsNull()) << array_class_.Read() << " " << klass;
+ CHECK(klass != nullptr);
+ array_class_ = GcRoot<Class>(klass);
+}
+
+void Constructor::ResetArrayClass() {
+ CHECK(!array_class_.IsNull());
+ array_class_ = GcRoot<Class>(nullptr);
+}
+
+void Constructor::VisitRoots(RootVisitor* visitor) {
+ static_class_.VisitRootIfNonNull(visitor, RootInfo(kRootStickyClass));
+ array_class_.VisitRootIfNonNull(visitor, RootInfo(kRootStickyClass));
+}
+
+Constructor* Constructor::CreateFromArtMethod(Thread* self, mirror::ArtMethod* method) {
+ DCHECK(method->IsConstructor()) << PrettyMethod(method);
+ auto* ret = down_cast<Constructor*>(StaticClass()->AllocObject(self));
+ if (LIKELY(ret != nullptr)) {
+ static_cast<AbstractMethod*>(ret)->CreateFromArtMethod(method);
+ }
+ return ret;
+}
+
+} // namespace mirror
+} // namespace art
diff --git a/runtime/mirror/method.h b/runtime/mirror/method.h
new file mode 100644
index 0000000..88100f0
--- /dev/null
+++ b/runtime/mirror/method.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_MIRROR_METHOD_H_
+#define ART_RUNTIME_MIRROR_METHOD_H_
+
+#include "abstract_method.h"
+#include "gc_root.h"
+
+namespace art {
+namespace mirror {
+
+class Class;
+
+// C++ mirror of java.lang.reflect.Method.
+class MANAGED Method : public AbstractMethod {
+ public:
+ static Method* CreateFromArtMethod(Thread* self, mirror::ArtMethod* method)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ static mirror::Class* StaticClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return static_class_.Read();
+ }
+
+ static void SetClass(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ static void ResetClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ static mirror::Class* ArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return array_class_.Read();
+ }
+
+ static void SetArrayClass(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ static void ResetArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ static void VisitRoots(RootVisitor* visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ private:
+ static GcRoot<Class> static_class_; // java.lang.reflect.Method.class.
+ static GcRoot<Class> array_class_; // [java.lang.reflect.Method.class.
+
+ DISALLOW_COPY_AND_ASSIGN(Method);
+};
+
+// C++ mirror of java.lang.reflect.Constructor.
+class MANAGED Constructor: public AbstractMethod {
+ public:
+ static Constructor* CreateFromArtMethod(Thread* self, mirror::ArtMethod* method)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ static mirror::Class* StaticClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return static_class_.Read();
+ }
+
+ static void SetClass(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ static void ResetClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ static mirror::Class* ArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return array_class_.Read();
+ }
+
+ static void SetArrayClass(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ static void ResetArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ static void VisitRoots(RootVisitor* visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ private:
+ static GcRoot<Class> static_class_; // java.lang.reflect.Constructor.class.
+ static GcRoot<Class> array_class_; // [java.lang.reflect.Constructor.class.
+
+ DISALLOW_COPY_AND_ASSIGN(Constructor);
+};
+
+} // namespace mirror
+} // namespace art
+
+#endif // ART_RUNTIME_MIRROR_METHOD_H_
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index af0e856..39d0f56 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -28,8 +28,9 @@
#include "monitor.h"
#include "object_array-inl.h"
#include "read_barrier-inl.h"
-#include "runtime.h"
#include "reference.h"
+#include "runtime.h"
+#include "string-inl.h"
#include "throwable.h"
namespace art {
@@ -48,7 +49,7 @@
template<VerifyObjectFlags kVerifyFlags>
inline void Object::SetClass(Class* new_klass) {
- // new_klass may be NULL prior to class linker initialization.
+ // new_klass may be null prior to class linker initialization.
// We don't mark the card as this occurs as part of object allocation. Not all objects have
// backing cards, such as large objects.
// We use non transactional version since we can't undo this write. We also disable checking as
@@ -115,8 +116,11 @@
}
inline Object* Object::GetReadBarrierPointer() {
-#ifdef USE_BAKER_OR_BROOKS_READ_BARRIER
- DCHECK(kUseBakerOrBrooksReadBarrier);
+#ifdef USE_BAKER_READ_BARRIER
+ DCHECK(kUseBakerReadBarrier);
+ return reinterpret_cast<Object*>(GetLockWord(false).ReadBarrierState());
+#elif USE_BROOKS_READ_BARRIER
+ DCHECK(kUseBrooksReadBarrier);
return GetFieldObject<Object, kVerifyNone, kWithoutReadBarrier>(
OFFSET_OF_OBJECT_MEMBER(Object, x_rb_ptr_));
#else
@@ -126,8 +130,14 @@
}
inline void Object::SetReadBarrierPointer(Object* rb_ptr) {
-#ifdef USE_BAKER_OR_BROOKS_READ_BARRIER
- DCHECK(kUseBakerOrBrooksReadBarrier);
+#ifdef USE_BAKER_READ_BARRIER
+ DCHECK(kUseBakerReadBarrier);
+ DCHECK_EQ(reinterpret_cast<uint64_t>(rb_ptr) >> 32, 0U);
+ LockWord lw = GetLockWord(false);
+ lw.SetReadBarrierState(static_cast<uint32_t>(reinterpret_cast<uintptr_t>(rb_ptr)));
+ SetLockWord(lw, false);
+#elif USE_BROOKS_READ_BARRIER
+ DCHECK(kUseBrooksReadBarrier);
// We don't mark the card as this occurs as part of object allocation. Not all objects have
// backing cards, such as large objects.
SetFieldObjectWithoutWriteBarrier<false, false, kVerifyNone>(
@@ -140,8 +150,27 @@
}
inline bool Object::AtomicSetReadBarrierPointer(Object* expected_rb_ptr, Object* rb_ptr) {
-#ifdef USE_BAKER_OR_BROOKS_READ_BARRIER
- DCHECK(kUseBakerOrBrooksReadBarrier);
+#ifdef USE_BAKER_READ_BARRIER
+ DCHECK(kUseBakerReadBarrier);
+ DCHECK_EQ(reinterpret_cast<uint64_t>(expected_rb_ptr) >> 32, 0U);
+ DCHECK_EQ(reinterpret_cast<uint64_t>(rb_ptr) >> 32, 0U);
+ LockWord expected_lw;
+ LockWord new_lw;
+ do {
+ LockWord lw = GetLockWord(false);
+ if (UNLIKELY(reinterpret_cast<Object*>(lw.ReadBarrierState()) != expected_rb_ptr)) {
+ // Lost the race.
+ return false;
+ }
+ expected_lw = lw;
+ expected_lw.SetReadBarrierState(
+ static_cast<uint32_t>(reinterpret_cast<uintptr_t>(expected_rb_ptr)));
+ new_lw = lw;
+ new_lw.SetReadBarrierState(static_cast<uint32_t>(reinterpret_cast<uintptr_t>(rb_ptr)));
+ } while (!CasLockWordWeakSequentiallyConsistent(expected_lw, new_lw));
+ return true;
+#elif USE_BROOKS_READ_BARRIER
+ DCHECK(kUseBrooksReadBarrier);
MemberOffset offset = OFFSET_OF_OBJECT_MEMBER(Object, x_rb_ptr_);
uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + offset.SizeValue();
Atomic<uint32_t>* atomic_rb_ptr = reinterpret_cast<Atomic<uint32_t>*>(raw_addr);
@@ -179,15 +208,15 @@
template<VerifyObjectFlags kVerifyFlags>
inline bool Object::VerifierInstanceOf(Class* klass) {
- DCHECK(klass != NULL);
- DCHECK(GetClass<kVerifyFlags>() != NULL);
+ DCHECK(klass != nullptr);
+ DCHECK(GetClass<kVerifyFlags>() != nullptr);
return klass->IsInterface() || InstanceOf(klass);
}
template<VerifyObjectFlags kVerifyFlags>
inline bool Object::InstanceOf(Class* klass) {
- DCHECK(klass != NULL);
- DCHECK(GetClass<kVerifyNone>() != NULL);
+ DCHECK(klass != nullptr);
+ DCHECK(GetClass<kVerifyNone>() != nullptr);
return klass->IsAssignableFrom(GetClass<kVerifyFlags>());
}
@@ -337,9 +366,14 @@
return down_cast<DoubleArray*>(this);
}
-template<VerifyObjectFlags kVerifyFlags>
+template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
+inline bool Object::IsString() {
+ return GetClass<kVerifyFlags, kReadBarrierOption>()->IsStringClass();
+}
+
+template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
inline String* Object::AsString() {
- DCHECK(GetClass<kVerifyFlags>()->IsStringClass());
+ DCHECK((IsString<kVerifyFlags, kReadBarrierOption>()));
return down_cast<String*>(this);
}
@@ -385,6 +419,9 @@
} else if (IsClass<kNewFlags, kReadBarrierOption>()) {
result = AsClass<kNewFlags, kReadBarrierOption>()->
template SizeOf<kNewFlags, kReadBarrierOption>();
+ } else if (GetClass<kNewFlags, kReadBarrierOption>()->IsStringClass()) {
+ result = AsString<kNewFlags, kReadBarrierOption>()->
+ template SizeOf<kNewFlags>();
} else {
result = GetClass<kNewFlags, kReadBarrierOption>()->
template GetObjectSize<kNewFlags, kReadBarrierOption>();
@@ -947,7 +984,7 @@
mirror::Class* klass = GetClass<kVerifyFlags>();
if (klass == Class::GetJavaLangClass()) {
AsClass<kVerifyNone>()->VisitReferences<kVisitClass>(klass, visitor);
- } else if (klass->IsArrayClass()) {
+ } else if (klass->IsArrayClass() || klass->IsStringClass()) {
if (klass->IsObjectArrayClass<kVerifyNone>()) {
AsObjectArray<mirror::Object, kVerifyNone>()->VisitReferences<kVisitClass>(visitor);
} else if (kVisitClass) {
diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc
index 04d0cd8..f9740bb 100644
--- a/runtime/mirror/object.cc
+++ b/runtime/mirror/object.cc
@@ -203,7 +203,7 @@
!runtime->GetHeap()->IsObjectValidationEnabled() || !c->IsResolved()) {
return;
}
- for (Class* cur = c; cur != NULL; cur = cur->GetSuperClass()) {
+ for (Class* cur = c; cur != nullptr; cur = cur->GetSuperClass()) {
ArtField* fields = cur->GetIFields();
for (size_t i = 0, count = cur->NumInstanceFields(); i < count; ++i) {
StackHandleScope<1> hs(Thread::Current());
@@ -244,5 +244,10 @@
UNREACHABLE();
}
+ArtField* Object::FindFieldByOffset(MemberOffset offset) {
+ return IsClass() ? ArtField::FindStaticFieldWithOffset(AsClass(), offset.Uint32Value())
+ : ArtField::FindInstanceFieldWithOffset(GetClass(), offset.Uint32Value());
+}
+
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index 343c9bc..5afe99f 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -62,7 +62,7 @@
static constexpr bool kCheckFieldAssignments = false;
// Size of Object.
-static constexpr uint32_t kObjectHeaderSize = kUseBakerOrBrooksReadBarrier ? 16 : 8;
+static constexpr uint32_t kObjectHeaderSize = kUseBrooksReadBarrier ? 16 : 8;
// C++ mirror of java.lang.Object
class MANAGED LOCKABLE Object {
@@ -94,6 +94,9 @@
NO_RETURN
#endif
void SetReadBarrierPointer(Object* rb_ptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+#ifndef USE_BAKER_OR_BROOKS_READ_BARRIER
+ NO_RETURN
+#endif
bool AtomicSetReadBarrierPointer(Object* expected_rb_ptr, Object* rb_ptr)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void AssertReadBarrierPointer() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -182,7 +185,12 @@
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
DoubleArray* AsDoubleArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
+ ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+ bool IsString() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
+ ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
String* AsString() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
@@ -431,6 +439,8 @@
void VisitReferences(const Visitor& visitor, const JavaLangRefVisitor& ref_visitor)
NO_THREAD_SAFETY_ANALYSIS;
+ ArtField* FindFieldByOffset(MemberOffset offset) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
// Used by object_test.
static void SetHashCodeSeed(uint32_t new_seed);
// Generate an identity hash code. Public for object test.
@@ -502,11 +512,11 @@
// Monitor and hash code information.
uint32_t monitor_;
-#ifdef USE_BAKER_OR_BROOKS_READ_BARRIER
+#ifdef USE_BROOKS_READ_BARRIER
// Note names use a 'x' prefix and the x_rb_ptr_ is of type int
// instead of Object to go with the alphabetical/by-type field order
// on the Java side.
- uint32_t x_rb_ptr_; // For the Baker or Brooks pointer.
+ uint32_t x_rb_ptr_; // For the Brooks pointer.
uint32_t x_xpadding_; // For 8-byte alignment. TODO: get rid of this.
#endif
diff --git a/runtime/mirror/object_array-inl.h b/runtime/mirror/object_array-inl.h
index 30bc1cd..d473816 100644
--- a/runtime/mirror/object_array-inl.h
+++ b/runtime/mirror/object_array-inl.h
@@ -57,14 +57,14 @@
inline T* ObjectArray<T>::Get(int32_t i) {
if (!CheckIsValidIndex(i)) {
DCHECK(Thread::Current()->IsExceptionPending());
- return NULL;
+ return nullptr;
}
return GetFieldObject<T>(OffsetOfElement(i));
}
template<class T> template<VerifyObjectFlags kVerifyFlags>
inline bool ObjectArray<T>::CheckAssignable(T* object) {
- if (object != NULL) {
+ if (object != nullptr) {
Class* element_class = GetClass<kVerifyFlags>()->GetComponentType();
if (UNLIKELY(!object->InstanceOf(element_class))) {
ThrowArrayStoreException(object);
diff --git a/runtime/mirror/object_test.cc b/runtime/mirror/object_test.cc
index 747a008..8e50a7a 100644
--- a/runtime/mirror/object_test.cc
+++ b/runtime/mirror/object_test.cc
@@ -61,13 +61,13 @@
Handle<String> string(
hs.NewHandle(String::AllocFromModifiedUtf8(self, expected_utf16_length, utf8_in)));
ASSERT_EQ(expected_utf16_length, string->GetLength());
- ASSERT_TRUE(string->GetCharArray() != NULL);
- ASSERT_TRUE(string->GetCharArray()->GetData() != NULL);
+ ASSERT_TRUE(string->GetValue() != nullptr);
// strlen is necessary because the 1-character string "\x00\x00" is interpreted as ""
ASSERT_TRUE(string->Equals(utf8_in) || (expected_utf16_length == 1 && strlen(utf8_in) == 0));
- ASSERT_TRUE(string->Equals(StringPiece(utf8_in)) || (expected_utf16_length == 1 && strlen(utf8_in) == 0));
+ ASSERT_TRUE(string->Equals(StringPiece(utf8_in)) ||
+ (expected_utf16_length == 1 && strlen(utf8_in) == 0));
for (int32_t i = 0; i < expected_utf16_length; i++) {
- EXPECT_EQ(utf16_expected[i], string->UncheckedCharAt(i));
+ EXPECT_EQ(utf16_expected[i], string->CharAt(i));
}
EXPECT_EQ(expected_hash, string->GetHashCode());
}
@@ -110,11 +110,11 @@
Handle<ObjectArray<Object>> oa(
hs.NewHandle(class_linker_->AllocObjectArray<Object>(soa.Self(), 2)));
EXPECT_EQ(2, oa->GetLength());
- EXPECT_TRUE(oa->Get(0) == NULL);
- EXPECT_TRUE(oa->Get(1) == NULL);
+ EXPECT_TRUE(oa->Get(0) == nullptr);
+ EXPECT_TRUE(oa->Get(1) == nullptr);
oa->Set<false>(0, oa.Get());
EXPECT_TRUE(oa->Get(0) == oa.Get());
- EXPECT_TRUE(oa->Get(1) == NULL);
+ EXPECT_TRUE(oa->Get(1) == nullptr);
oa->Set<false>(1, oa.Get());
EXPECT_TRUE(oa->Get(0) == oa.Get());
EXPECT_TRUE(oa->Get(1) == oa.Get());
@@ -122,17 +122,17 @@
Class* aioobe = class_linker_->FindSystemClass(soa.Self(),
"Ljava/lang/ArrayIndexOutOfBoundsException;");
- EXPECT_TRUE(oa->Get(-1) == NULL);
+ EXPECT_TRUE(oa->Get(-1) == nullptr);
EXPECT_TRUE(soa.Self()->IsExceptionPending());
EXPECT_EQ(aioobe, soa.Self()->GetException()->GetClass());
soa.Self()->ClearException();
- EXPECT_TRUE(oa->Get(2) == NULL);
+ EXPECT_TRUE(oa->Get(2) == nullptr);
EXPECT_TRUE(soa.Self()->IsExceptionPending());
EXPECT_EQ(aioobe, soa.Self()->GetException()->GetClass());
soa.Self()->ClearException();
- ASSERT_TRUE(oa->GetClass() != NULL);
+ ASSERT_TRUE(oa->GetClass() != nullptr);
Handle<mirror::Class> klass(hs.NewHandle(oa->GetClass()));
ASSERT_EQ(2U, klass->NumDirectInterfaces());
EXPECT_EQ(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Cloneable;"),
@@ -308,13 +308,14 @@
Class* java_util_Arrays = class_linker_->FindSystemClass(soa.Self(), "Ljava/util/Arrays;");
ArtMethod* sort = java_util_Arrays->FindDirectMethod("sort", "([I)V");
const DexFile::StringId* string_id = java_lang_dex_file_->FindStringId("[I");
- ASSERT_TRUE(string_id != NULL);
+ ASSERT_TRUE(string_id != nullptr);
const DexFile::TypeId* type_id = java_lang_dex_file_->FindTypeId(
java_lang_dex_file_->GetIndexForStringId(*string_id));
- ASSERT_TRUE(type_id != NULL);
+ ASSERT_TRUE(type_id != nullptr);
uint32_t type_idx = java_lang_dex_file_->GetIndexForTypeId(*type_id);
- Object* array = CheckAndAllocArrayFromCodeInstrumented(type_idx, 3, sort, Thread::Current(), false,
- Runtime::Current()->GetHeap()->GetCurrentAllocator());
+ Object* array = CheckAndAllocArrayFromCodeInstrumented(
+ type_idx, 3, sort, Thread::Current(), false,
+ Runtime::Current()->GetHeap()->GetCurrentAllocator());
EXPECT_TRUE(array->IsArrayInstance());
EXPECT_EQ(3, array->AsArray()->GetLength());
EXPECT_TRUE(array->GetClass()->IsArrayClass());
@@ -367,36 +368,36 @@
Class* klass = class_linker_->FindClass(soa.Self(), "LStaticsFromCode;", loader);
ArtMethod* clinit = klass->FindClassInitializer();
const DexFile::StringId* klass_string_id = dex_file->FindStringId("LStaticsFromCode;");
- ASSERT_TRUE(klass_string_id != NULL);
+ ASSERT_TRUE(klass_string_id != nullptr);
const DexFile::TypeId* klass_type_id = dex_file->FindTypeId(
dex_file->GetIndexForStringId(*klass_string_id));
- ASSERT_TRUE(klass_type_id != NULL);
+ ASSERT_TRUE(klass_type_id != nullptr);
const DexFile::StringId* type_string_id = dex_file->FindStringId("Ljava/lang/Object;");
- ASSERT_TRUE(type_string_id != NULL);
+ ASSERT_TRUE(type_string_id != nullptr);
const DexFile::TypeId* type_type_id = dex_file->FindTypeId(
dex_file->GetIndexForStringId(*type_string_id));
- ASSERT_TRUE(type_type_id != NULL);
+ ASSERT_TRUE(type_type_id != nullptr);
const DexFile::StringId* name_str_id = dex_file->FindStringId("s0");
- ASSERT_TRUE(name_str_id != NULL);
+ ASSERT_TRUE(name_str_id != nullptr);
const DexFile::FieldId* field_id = dex_file->FindFieldId(
*klass_type_id, *name_str_id, *type_type_id);
- ASSERT_TRUE(field_id != NULL);
+ ASSERT_TRUE(field_id != nullptr);
uint32_t field_idx = dex_file->GetIndexForFieldId(*field_id);
ArtField* field = FindFieldFromCode<StaticObjectRead, true>(field_idx, clinit, Thread::Current(),
sizeof(HeapReference<Object>));
Object* s0 = field->GetObj(klass);
- EXPECT_TRUE(s0 != NULL);
+ EXPECT_TRUE(s0 != nullptr);
Handle<CharArray> char_array(hs.NewHandle(CharArray::Alloc(soa.Self(), 0)));
field->SetObj<false>(field->GetDeclaringClass(), char_array.Get());
EXPECT_EQ(char_array.Get(), field->GetObj(klass));
- field->SetObj<false>(field->GetDeclaringClass(), NULL);
- EXPECT_EQ(NULL, field->GetObj(klass));
+ field->SetObj<false>(field->GetDeclaringClass(), nullptr);
+ EXPECT_EQ(nullptr, field->GetObj(klass));
// TODO: more exhaustive tests of all 6 cases of ArtField::*FromCode
}
@@ -416,13 +417,15 @@
AssertString(1, "\xc2\x80", "\x00\x80", 0x80);
AssertString(1, "\xd9\xa6", "\x06\x66", 0x0666);
AssertString(1, "\xdf\xbf", "\x07\xff", 0x07ff);
- AssertString(3, "h\xd9\xa6i", "\x00\x68\x06\x66\x00\x69", (31 * ((31 * 0x68) + 0x0666)) + 0x69);
+ AssertString(3, "h\xd9\xa6i", "\x00\x68\x06\x66\x00\x69",
+ (31 * ((31 * 0x68) + 0x0666)) + 0x69);
// Test three-byte characters.
AssertString(1, "\xe0\xa0\x80", "\x08\x00", 0x0800);
AssertString(1, "\xe1\x88\xb4", "\x12\x34", 0x1234);
AssertString(1, "\xef\xbf\xbf", "\xff\xff", 0xffff);
- AssertString(3, "h\xe1\x88\xb4i", "\x00\x68\x12\x34\x00\x69", (31 * ((31 * 0x68) + 0x1234)) + 0x69);
+ AssertString(3, "h\xe1\x88\xb4i", "\x00\x68\x12\x34\x00\x69",
+ (31 * ((31 * 0x68) + 0x1234)) + 0x69);
// Test four-byte characters.
AssertString(2, "\xf0\x9f\x8f\xa0", "\xd8\x3c\xdf\xe0", (31 * 0xd83c) + 0xdfe0);
@@ -487,12 +490,6 @@
Handle<String> string(hs.NewHandle(String::AllocFromModifiedUtf8(soa.Self(), "android")));
EXPECT_EQ(string->GetLength(), 7);
EXPECT_EQ(string->GetUtfLength(), 7);
-
- string->SetOffset(2);
- string->SetCount(5);
- EXPECT_TRUE(string->Equals("droid"));
- EXPECT_EQ(string->GetLength(), 5);
- EXPECT_EQ(string->GetUtfLength(), 5);
}
TEST_F(ObjectTest, DescriptorCompare) {
@@ -507,9 +504,9 @@
Handle<ClassLoader> class_loader_2(hs.NewHandle(soa.Decode<ClassLoader*>(jclass_loader_2)));
Class* klass1 = linker->FindClass(soa.Self(), "LProtoCompare;", class_loader_1);
- ASSERT_TRUE(klass1 != NULL);
+ ASSERT_TRUE(klass1 != nullptr);
Class* klass2 = linker->FindClass(soa.Self(), "LProtoCompare2;", class_loader_2);
- ASSERT_TRUE(klass2 != NULL);
+ ASSERT_TRUE(klass2 != nullptr);
ArtMethod* m1_1 = klass1->GetVirtualMethod(0);
EXPECT_STREQ(m1_1->GetName(), "m1");
@@ -550,13 +547,13 @@
Class* X = class_linker_->FindClass(soa.Self(), "LX;", class_loader);
Class* Y = class_linker_->FindClass(soa.Self(), "LY;", class_loader);
- ASSERT_TRUE(X != NULL);
- ASSERT_TRUE(Y != NULL);
+ ASSERT_TRUE(X != nullptr);
+ ASSERT_TRUE(Y != nullptr);
Handle<Object> x(hs.NewHandle(X->AllocObject(soa.Self())));
Handle<Object> y(hs.NewHandle(Y->AllocObject(soa.Self())));
- ASSERT_TRUE(x.Get() != NULL);
- ASSERT_TRUE(y.Get() != NULL);
+ ASSERT_TRUE(x.Get() != nullptr);
+ ASSERT_TRUE(y.Get() != nullptr);
EXPECT_TRUE(x->InstanceOf(X));
EXPECT_FALSE(x->InstanceOf(Y));
@@ -571,8 +568,10 @@
// All array classes implement Cloneable and Serializable.
Object* array = ObjectArray<Object>::Alloc(soa.Self(), Object_array_class, 1);
- Class* java_lang_Cloneable = class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Cloneable;");
- Class* java_io_Serializable = class_linker_->FindSystemClass(soa.Self(), "Ljava/io/Serializable;");
+ Class* java_lang_Cloneable =
+ class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Cloneable;");
+ Class* java_io_Serializable =
+ class_linker_->FindSystemClass(soa.Self(), "Ljava/io/Serializable;");
EXPECT_TRUE(array->InstanceOf(java_lang_Cloneable));
EXPECT_TRUE(array->InstanceOf(java_io_Serializable));
}
@@ -622,35 +621,35 @@
Handle<ClassLoader> class_loader(hs.NewHandle(soa.Decode<ClassLoader*>(jclass_loader)));
Class* X = class_linker_->FindClass(soa.Self(), "LX;", class_loader);
Class* Y = class_linker_->FindClass(soa.Self(), "LY;", class_loader);
- ASSERT_TRUE(X != NULL);
- ASSERT_TRUE(Y != NULL);
+ ASSERT_TRUE(X != nullptr);
+ ASSERT_TRUE(Y != nullptr);
Class* YA = class_linker_->FindClass(soa.Self(), "[LY;", class_loader);
Class* YAA = class_linker_->FindClass(soa.Self(), "[[LY;", class_loader);
- ASSERT_TRUE(YA != NULL);
- ASSERT_TRUE(YAA != NULL);
+ ASSERT_TRUE(YA != nullptr);
+ ASSERT_TRUE(YAA != nullptr);
Class* XAA = class_linker_->FindClass(soa.Self(), "[[LX;", class_loader);
- ASSERT_TRUE(XAA != NULL);
+ ASSERT_TRUE(XAA != nullptr);
Class* O = class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;");
Class* OA = class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/Object;");
Class* OAA = class_linker_->FindSystemClass(soa.Self(), "[[Ljava/lang/Object;");
Class* OAAA = class_linker_->FindSystemClass(soa.Self(), "[[[Ljava/lang/Object;");
- ASSERT_TRUE(O != NULL);
- ASSERT_TRUE(OA != NULL);
- ASSERT_TRUE(OAA != NULL);
- ASSERT_TRUE(OAAA != NULL);
+ ASSERT_TRUE(O != nullptr);
+ ASSERT_TRUE(OA != nullptr);
+ ASSERT_TRUE(OAA != nullptr);
+ ASSERT_TRUE(OAAA != nullptr);
Class* S = class_linker_->FindSystemClass(soa.Self(), "Ljava/io/Serializable;");
Class* SA = class_linker_->FindSystemClass(soa.Self(), "[Ljava/io/Serializable;");
Class* SAA = class_linker_->FindSystemClass(soa.Self(), "[[Ljava/io/Serializable;");
- ASSERT_TRUE(S != NULL);
- ASSERT_TRUE(SA != NULL);
- ASSERT_TRUE(SAA != NULL);
+ ASSERT_TRUE(S != nullptr);
+ ASSERT_TRUE(SA != nullptr);
+ ASSERT_TRUE(SAA != nullptr);
Class* IA = class_linker_->FindSystemClass(soa.Self(), "[I");
- ASSERT_TRUE(IA != NULL);
+ ASSERT_TRUE(IA != nullptr);
EXPECT_TRUE(YAA->IsAssignableFrom(YAA)); // identity
EXPECT_TRUE(XAA->IsAssignableFrom(YAA)); // element superclass
@@ -673,60 +672,62 @@
ScopedObjectAccess soa(Thread::Current());
StackHandleScope<1> hs(soa.Self());
Handle<String> s(hs.NewHandle(String::AllocFromModifiedUtf8(soa.Self(), "ABC")));
- ASSERT_TRUE(s.Get() != NULL);
+ ASSERT_TRUE(s.Get() != nullptr);
Class* c = s->GetClass();
- ASSERT_TRUE(c != NULL);
+ ASSERT_TRUE(c != nullptr);
// Wrong type.
- EXPECT_TRUE(c->FindDeclaredInstanceField("count", "J") == NULL);
- EXPECT_TRUE(c->FindInstanceField("count", "J") == NULL);
+ EXPECT_TRUE(c->FindDeclaredInstanceField("count", "J") == nullptr);
+ EXPECT_TRUE(c->FindInstanceField("count", "J") == nullptr);
// Wrong name.
- EXPECT_TRUE(c->FindDeclaredInstanceField("Count", "I") == NULL);
- EXPECT_TRUE(c->FindInstanceField("Count", "I") == NULL);
+ EXPECT_TRUE(c->FindDeclaredInstanceField("Count", "I") == nullptr);
+ EXPECT_TRUE(c->FindInstanceField("Count", "I") == nullptr);
// Right name and type.
ArtField* f1 = c->FindDeclaredInstanceField("count", "I");
ArtField* f2 = c->FindInstanceField("count", "I");
- EXPECT_TRUE(f1 != NULL);
- EXPECT_TRUE(f2 != NULL);
+ EXPECT_TRUE(f1 != nullptr);
+ EXPECT_TRUE(f2 != nullptr);
EXPECT_EQ(f1, f2);
// TODO: check that s.count == 3.
// Ensure that we handle superclass fields correctly...
c = class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/StringBuilder;");
- ASSERT_TRUE(c != NULL);
+ ASSERT_TRUE(c != nullptr);
// No StringBuilder.count...
- EXPECT_TRUE(c->FindDeclaredInstanceField("count", "I") == NULL);
+ EXPECT_TRUE(c->FindDeclaredInstanceField("count", "I") == nullptr);
// ...but there is an AbstractStringBuilder.count.
- EXPECT_TRUE(c->FindInstanceField("count", "I") != NULL);
+ EXPECT_TRUE(c->FindInstanceField("count", "I") != nullptr);
}
TEST_F(ObjectTest, FindStaticField) {
ScopedObjectAccess soa(Thread::Current());
StackHandleScope<4> hs(soa.Self());
Handle<String> s(hs.NewHandle(String::AllocFromModifiedUtf8(soa.Self(), "ABC")));
- ASSERT_TRUE(s.Get() != NULL);
+ ASSERT_TRUE(s.Get() != nullptr);
Handle<Class> c(hs.NewHandle(s->GetClass()));
- ASSERT_TRUE(c.Get() != NULL);
+ ASSERT_TRUE(c.Get() != nullptr);
// Wrong type.
- EXPECT_TRUE(c->FindDeclaredStaticField("CASE_INSENSITIVE_ORDER", "I") == NULL);
- EXPECT_TRUE(mirror::Class::FindStaticField(soa.Self(), c, "CASE_INSENSITIVE_ORDER", "I") == NULL);
+ EXPECT_TRUE(c->FindDeclaredStaticField("CASE_INSENSITIVE_ORDER", "I") == nullptr);
+ EXPECT_TRUE(mirror::Class::FindStaticField(
+ soa.Self(), c, "CASE_INSENSITIVE_ORDER", "I") == nullptr);
// Wrong name.
- EXPECT_TRUE(c->FindDeclaredStaticField("cASE_INSENSITIVE_ORDER", "Ljava/util/Comparator;") == NULL);
+ EXPECT_TRUE(c->FindDeclaredStaticField(
+ "cASE_INSENSITIVE_ORDER", "Ljava/util/Comparator;") == nullptr);
EXPECT_TRUE(
mirror::Class::FindStaticField(soa.Self(), c, "cASE_INSENSITIVE_ORDER",
- "Ljava/util/Comparator;") == NULL);
+ "Ljava/util/Comparator;") == nullptr);
// Right name and type.
ArtField* f1 = c->FindDeclaredStaticField("CASE_INSENSITIVE_ORDER", "Ljava/util/Comparator;");
ArtField* f2 = mirror::Class::FindStaticField(soa.Self(), c, "CASE_INSENSITIVE_ORDER",
"Ljava/util/Comparator;");
- EXPECT_TRUE(f1 != NULL);
- EXPECT_TRUE(f2 != NULL);
+ EXPECT_TRUE(f1 != nullptr);
+ EXPECT_TRUE(f2 != nullptr);
EXPECT_EQ(f1, f2);
// TODO: test static fields via superclasses.
diff --git a/runtime/mirror/stack_trace_element.cc b/runtime/mirror/stack_trace_element.cc
index ec2b495..96f6a53 100644
--- a/runtime/mirror/stack_trace_element.cc
+++ b/runtime/mirror/stack_trace_element.cc
@@ -30,7 +30,7 @@
void StackTraceElement::SetClass(Class* java_lang_StackTraceElement) {
CHECK(java_lang_StackTraceElement_.IsNull());
- CHECK(java_lang_StackTraceElement != NULL);
+ CHECK(java_lang_StackTraceElement != nullptr);
java_lang_StackTraceElement_ = GcRoot<Class>(java_lang_StackTraceElement);
}
@@ -44,7 +44,7 @@
int32_t line_number) {
StackTraceElement* trace =
down_cast<StackTraceElement*>(GetStackTraceElement()->AllocObject(self));
- if (LIKELY(trace != NULL)) {
+ if (LIKELY(trace != nullptr)) {
if (Runtime::Current()->IsActiveTransaction()) {
trace->Init<true>(declaring_class, method_name, file_name, line_number);
} else {
diff --git a/runtime/mirror/string-inl.h b/runtime/mirror/string-inl.h
index 4a95519..cd5d2f6 100644
--- a/runtime/mirror/string-inl.h
+++ b/runtime/mirror/string-inl.h
@@ -19,6 +19,7 @@
#include "array.h"
#include "class.h"
+#include "gc/heap-inl.h"
#include "intern_table.h"
#include "runtime.h"
#include "string.h"
@@ -29,41 +30,173 @@
namespace mirror {
inline uint32_t String::ClassSize() {
- uint32_t vtable_entries = Object::kVTableLength + 51;
+ uint32_t vtable_entries = Object::kVTableLength + 52;
return Class::ComputeClassSize(true, vtable_entries, 0, 1, 0, 1, 2);
}
-inline uint16_t String::UncheckedCharAt(int32_t index) {
- return GetCharArray()->Get(index + GetOffset());
-}
+// Sets string count in the allocation code path to ensure it is guarded by a CAS.
+class SetStringCountVisitor {
+ public:
+ explicit SetStringCountVisitor(int32_t count) : count_(count) {
+ }
-inline CharArray* String::GetCharArray() {
- return GetFieldObject<CharArray>(ValueOffset());
-}
+ void operator()(Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ // Avoid AsString as object is not yet in live bitmap or allocation stack.
+ String* string = down_cast<String*>(obj);
+ string->SetCount(count_);
+ }
-inline int32_t String::GetLength() {
- int32_t result = GetField32(OFFSET_OF_OBJECT_MEMBER(String, count_));
- DCHECK(result >= 0 && result <= GetCharArray()->GetLength());
- return result;
-}
+ private:
+ const int32_t count_;
+};
-inline void String::SetArray(CharArray* new_array) {
- // Array is invariant so use non-transactional mode. Also disable check as we may run inside
- // a transaction.
- DCHECK(new_array != NULL);
- SetFieldObject<false, false>(OFFSET_OF_OBJECT_MEMBER(String, array_), new_array);
-}
+// Sets string count and value in the allocation code path to ensure it is guarded by a CAS.
+class SetStringCountAndBytesVisitor {
+ public:
+ SetStringCountAndBytesVisitor(int32_t count, Handle<ByteArray> src_array, int32_t offset,
+ int32_t high_byte)
+ : count_(count), src_array_(src_array), offset_(offset), high_byte_(high_byte) {
+ }
+
+ void operator()(Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ // Avoid AsString as object is not yet in live bitmap or allocation stack.
+ String* string = down_cast<String*>(obj);
+ string->SetCount(count_);
+ uint16_t* value = string->GetValue();
+ const uint8_t* const src = reinterpret_cast<uint8_t*>(src_array_->GetData()) + offset_;
+ for (int i = 0; i < count_; i++) {
+ value[i] = high_byte_ + (src[i] & 0xFF);
+ }
+ }
+
+ private:
+ const int32_t count_;
+ Handle<ByteArray> src_array_;
+ const int32_t offset_;
+ const int32_t high_byte_;
+};
+
+// Sets string count and value in the allocation code path to ensure it is guarded by a CAS.
+class SetStringCountAndValueVisitorFromCharArray {
+ public:
+ SetStringCountAndValueVisitorFromCharArray(int32_t count, Handle<CharArray> src_array,
+ int32_t offset) :
+ count_(count), src_array_(src_array), offset_(offset) {
+ }
+
+ void operator()(Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ // Avoid AsString as object is not yet in live bitmap or allocation stack.
+ String* string = down_cast<String*>(obj);
+ string->SetCount(count_);
+ const uint16_t* const src = src_array_->GetData() + offset_;
+ memcpy(string->GetValue(), src, count_ * sizeof(uint16_t));
+ }
+
+ private:
+ const int32_t count_;
+ Handle<CharArray> src_array_;
+ const int32_t offset_;
+};
+
+// Sets string count and value in the allocation code path to ensure it is guarded by a CAS.
+class SetStringCountAndValueVisitorFromString {
+ public:
+ SetStringCountAndValueVisitorFromString(int32_t count, Handle<String> src_string,
+ int32_t offset) :
+ count_(count), src_string_(src_string), offset_(offset) {
+ }
+
+ void operator()(Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ // Avoid AsString as object is not yet in live bitmap or allocation stack.
+ String* string = down_cast<String*>(obj);
+ string->SetCount(count_);
+ const uint16_t* const src = src_string_->GetValue() + offset_;
+ memcpy(string->GetValue(), src, count_ * sizeof(uint16_t));
+ }
+
+ private:
+ const int32_t count_;
+ Handle<String> src_string_;
+ const int32_t offset_;
+};
inline String* String::Intern() {
return Runtime::Current()->GetInternTable()->InternWeak(this);
}
+inline uint16_t String::CharAt(int32_t index) {
+ int32_t count = GetField32(OFFSET_OF_OBJECT_MEMBER(String, count_));
+ if (UNLIKELY((index < 0) || (index >= count))) {
+ Thread* self = Thread::Current();
+ self->ThrowNewExceptionF("Ljava/lang/StringIndexOutOfBoundsException;",
+ "length=%i; index=%i", count, index);
+ return 0;
+ }
+ return GetValue()[index];
+}
+
+template<VerifyObjectFlags kVerifyFlags>
+inline size_t String::SizeOf() {
+ return sizeof(String) + (sizeof(uint16_t) * GetLength<kVerifyFlags>());
+}
+
+template <bool kIsInstrumented, typename PreFenceVisitor>
+inline String* String::Alloc(Thread* self, int32_t utf16_length, gc::AllocatorType allocator_type,
+ const PreFenceVisitor& pre_fence_visitor) {
+ size_t header_size = sizeof(String);
+ size_t data_size = sizeof(uint16_t) * utf16_length;
+ size_t size = header_size + data_size;
+ Class* string_class = GetJavaLangString();
+
+ // Check for overflow and throw OutOfMemoryError if this was an unreasonable request.
+ if (UNLIKELY(size < data_size)) {
+ self->ThrowOutOfMemoryError(StringPrintf("%s of length %d would overflow",
+ PrettyDescriptor(string_class).c_str(),
+ utf16_length).c_str());
+ return nullptr;
+ }
+ gc::Heap* heap = Runtime::Current()->GetHeap();
+ return down_cast<String*>(
+ heap->AllocObjectWithAllocator<kIsInstrumented, false>(self, string_class, size,
+ allocator_type, pre_fence_visitor));
+}
+
+template <bool kIsInstrumented>
+inline String* String::AllocFromByteArray(Thread* self, int32_t byte_length,
+ Handle<ByteArray> array, int32_t offset,
+ int32_t high_byte, gc::AllocatorType allocator_type) {
+ SetStringCountAndBytesVisitor visitor(byte_length, array, offset, high_byte << 8);
+ String* string = Alloc<kIsInstrumented>(self, byte_length, allocator_type, visitor);
+ return string;
+}
+
+template <bool kIsInstrumented>
+inline String* String::AllocFromCharArray(Thread* self, int32_t array_length,
+ Handle<CharArray> array, int32_t offset,
+ gc::AllocatorType allocator_type) {
+ SetStringCountAndValueVisitorFromCharArray visitor(array_length, array, offset);
+ String* new_string = Alloc<kIsInstrumented>(self, array_length, allocator_type, visitor);
+ return new_string;
+}
+
+template <bool kIsInstrumented>
+inline String* String::AllocFromString(Thread* self, int32_t string_length, Handle<String> string,
+ int32_t offset, gc::AllocatorType allocator_type) {
+ SetStringCountAndValueVisitorFromString visitor(string_length, string, offset);
+ String* new_string = Alloc<kIsInstrumented>(self, string_length, allocator_type, visitor);
+ return new_string;
+}
+
inline int32_t String::GetHashCode() {
int32_t result = GetField32(OFFSET_OF_OBJECT_MEMBER(String, hash_code_));
if (UNLIKELY(result == 0)) {
result = ComputeHashCode();
}
- DCHECK(result != 0 || ComputeUtf16Hash(GetCharArray(), GetOffset(), GetLength()) == 0)
+ DCHECK(result != 0 || ComputeUtf16Hash(GetValue(), GetLength()) == 0)
<< ToModifiedUtf8() << " " << result;
return result;
}
diff --git a/runtime/mirror/string.cc b/runtime/mirror/string.cc
index bd6a63c..b6236b1 100644
--- a/runtime/mirror/string.cc
+++ b/runtime/mirror/string.cc
@@ -20,10 +20,11 @@
#include "array.h"
#include "class-inl.h"
#include "gc/accounting/card_table-inl.h"
+#include "handle_scope-inl.h"
#include "intern_table.h"
#include "object-inl.h"
#include "runtime.h"
-#include "handle_scope-inl.h"
+#include "string-inl.h"
#include "thread.h"
#include "utf-inl.h"
@@ -40,7 +41,7 @@
} else if (start > count) {
start = count;
}
- const uint16_t* chars = GetCharArray()->GetData() + GetOffset();
+ const uint16_t* chars = GetValue();
const uint16_t* p = chars + start;
const uint16_t* end = chars + count;
while (p < end) {
@@ -53,7 +54,7 @@
void String::SetClass(Class* java_lang_String) {
CHECK(java_lang_String_.IsNull());
- CHECK(java_lang_String != NULL);
+ CHECK(java_lang_String != nullptr);
java_lang_String_ = GcRoot<Class>(java_lang_String);
}
@@ -62,36 +63,46 @@
java_lang_String_ = GcRoot<Class>(nullptr);
}
-int32_t String::ComputeHashCode() {
- const int32_t hash_code = ComputeUtf16Hash(GetCharArray(), GetOffset(), GetLength());
+int String::ComputeHashCode() {
+ const int32_t hash_code = ComputeUtf16Hash(GetValue(), GetLength());
SetHashCode(hash_code);
return hash_code;
}
int32_t String::GetUtfLength() {
- return CountUtf8Bytes(GetCharArray()->GetData() + GetOffset(), GetLength());
+ return CountUtf8Bytes(GetValue(), GetLength());
}
-String* String::AllocFromUtf16(Thread* self,
- int32_t utf16_length,
- const uint16_t* utf16_data_in,
- int32_t hash_code) {
+void String::SetCharAt(int32_t index, uint16_t c) {
+ DCHECK((index >= 0) && (index < count_));
+ GetValue()[index] = c;
+}
+
+String* String::AllocFromStrings(Thread* self, Handle<String> string, Handle<String> string2) {
+ int32_t length = string->GetLength();
+ int32_t length2 = string2->GetLength();
+ gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
+ SetStringCountVisitor visitor(length + length2);
+ String* new_string = Alloc<true>(self, length + length2, allocator_type, visitor);
+ if (UNLIKELY(new_string == nullptr)) {
+ return nullptr;
+ }
+ uint16_t* new_value = new_string->GetValue();
+ memcpy(new_value, string->GetValue(), length * sizeof(uint16_t));
+ memcpy(new_value + length, string2->GetValue(), length2 * sizeof(uint16_t));
+ return new_string;
+}
+
+String* String::AllocFromUtf16(Thread* self, int32_t utf16_length, const uint16_t* utf16_data_in) {
CHECK(utf16_data_in != nullptr || utf16_length == 0);
- String* string = Alloc(self, utf16_length);
+ gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
+ SetStringCountVisitor visitor(utf16_length);
+ String* string = Alloc<true>(self, utf16_length, allocator_type, visitor);
if (UNLIKELY(string == nullptr)) {
return nullptr;
}
- CharArray* array = const_cast<CharArray*>(string->GetCharArray());
- if (UNLIKELY(array == nullptr)) {
- return nullptr;
- }
- memcpy(array->GetData(), utf16_data_in, utf16_length * sizeof(uint16_t));
- if (hash_code != 0) {
- DCHECK_EQ(hash_code, ComputeUtf16Hash(utf16_data_in, utf16_length));
- string->SetHashCode(hash_code);
- } else {
- string->ComputeHashCode();
- }
+ uint16_t* array = string->GetValue();
+ memcpy(array, utf16_data_in, utf16_length * sizeof(uint16_t));
return string;
}
@@ -103,33 +114,14 @@
String* String::AllocFromModifiedUtf8(Thread* self, int32_t utf16_length,
const char* utf8_data_in) {
- String* string = Alloc(self, utf16_length);
+ gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
+ SetStringCountVisitor visitor(utf16_length);
+ String* string = Alloc<true>(self, utf16_length, allocator_type, visitor);
if (UNLIKELY(string == nullptr)) {
return nullptr;
}
- uint16_t* utf16_data_out =
- const_cast<uint16_t*>(string->GetCharArray()->GetData());
+ uint16_t* utf16_data_out = string->GetValue();
ConvertModifiedUtf8ToUtf16(utf16_data_out, utf8_data_in);
- string->ComputeHashCode();
- return string;
-}
-
-String* String::Alloc(Thread* self, int32_t utf16_length) {
- StackHandleScope<1> hs(self);
- Handle<CharArray> array(hs.NewHandle(CharArray::Alloc(self, utf16_length)));
- if (UNLIKELY(array.Get() == nullptr)) {
- return nullptr;
- }
- return Alloc(self, array);
-}
-
-String* String::Alloc(Thread* self, Handle<CharArray> array) {
- // Hold reference in case AllocObject causes GC.
- String* string = down_cast<String*>(GetJavaLangString()->AllocObject(self));
- if (LIKELY(string != nullptr)) {
- string->SetArray(array.Get());
- string->SetCount(array->GetLength());
- }
return string;
}
@@ -137,7 +129,7 @@
if (this == that) {
// Quick reference equality test
return true;
- } else if (that == NULL) {
+ } else if (that == nullptr) {
// Null isn't an instanceof anything
return false;
} else if (this->GetLength() != that->GetLength()) {
@@ -147,7 +139,7 @@
// Note: don't short circuit on hash code as we're presumably here as the
// hash code was already equal
for (int32_t i = 0; i < that->GetLength(); ++i) {
- if (this->UncheckedCharAt(i) != that->UncheckedCharAt(i)) {
+ if (this->CharAt(i) != that->CharAt(i)) {
return false;
}
}
@@ -160,7 +152,7 @@
return false;
} else {
for (int32_t i = 0; i < that_length; ++i) {
- if (this->UncheckedCharAt(i) != that_chars[that_offset + i]) {
+ if (this->CharAt(i) != that_chars[that_offset + i]) {
return false;
}
}
@@ -177,7 +169,7 @@
return false;
}
- if (GetLeadingUtf16Char(ch) != UncheckedCharAt(i++)) {
+ if (GetLeadingUtf16Char(ch) != CharAt(i++)) {
return false;
}
@@ -187,7 +179,7 @@
return false;
}
- if (UncheckedCharAt(i++) != trailing) {
+ if (CharAt(i++) != trailing) {
return false;
}
}
@@ -201,7 +193,7 @@
for (int32_t i = 0; i < length; ++i) {
uint32_t ch = GetUtf16FromUtf8(&p);
- if (GetLeadingUtf16Char(ch) != UncheckedCharAt(i)) {
+ if (GetLeadingUtf16Char(ch) != CharAt(i)) {
return false;
}
@@ -211,7 +203,7 @@
return false;
}
- if (UncheckedCharAt(++i) != trailing) {
+ if (CharAt(++i) != trailing) {
return false;
}
}
@@ -221,7 +213,7 @@
// Create a modified UTF-8 encoded std::string from a java/lang/String object.
std::string String::ToModifiedUtf8() {
- const uint16_t* chars = GetCharArray()->GetData() + GetOffset();
+ const uint16_t* chars = GetValue();
size_t byte_count = GetUtfLength();
std::string result(byte_count, static_cast<char>(0));
ConvertUtf16ToModifiedUtf8(&result[0], chars, GetLength());
@@ -244,8 +236,8 @@
int32_t rhsCount = rhs->GetLength();
int32_t countDiff = lhsCount - rhsCount;
int32_t minCount = (countDiff < 0) ? lhsCount : rhsCount;
- const uint16_t* lhsChars = lhs->GetCharArray()->GetData() + lhs->GetOffset();
- const uint16_t* rhsChars = rhs->GetCharArray()->GetData() + rhs->GetOffset();
+ const uint16_t* lhsChars = lhs->GetValue();
+ const uint16_t* rhsChars = rhs->GetValue();
int32_t otherRes = MemCmp16(lhsChars, rhsChars, minCount);
if (otherRes != 0) {
return otherRes;
@@ -257,5 +249,19 @@
java_lang_String_.VisitRootIfNonNull(visitor, RootInfo(kRootStickyClass));
}
+CharArray* String::ToCharArray(Thread* self) {
+ StackHandleScope<1> hs(self);
+ Handle<String> string(hs.NewHandle(this));
+ CharArray* result = CharArray::Alloc(self, GetLength());
+ memcpy(result->GetData(), string->GetValue(), string->GetLength() * sizeof(uint16_t));
+ return result;
+}
+
+void String::GetChars(int32_t start, int32_t end, Handle<CharArray> array, int32_t index) {
+ uint16_t* data = array->GetData() + index;
+ uint16_t* value = GetValue() + start;
+ memcpy(data, value, (end - start) * sizeof(uint16_t));
+}
+
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/string.h b/runtime/mirror/string.h
index 0670d0b..fcfe976 100644
--- a/runtime/mirror/string.h
+++ b/runtime/mirror/string.h
@@ -18,6 +18,7 @@
#define ART_RUNTIME_MIRROR_STRING_H_
#include "gc_root.h"
+#include "gc/allocator_type.h"
#include "object.h"
#include "object_callbacks.h"
@@ -45,22 +46,27 @@
}
static MemberOffset ValueOffset() {
- return OFFSET_OF_OBJECT_MEMBER(String, array_);
+ return OFFSET_OF_OBJECT_MEMBER(String, value_);
}
- static MemberOffset OffsetOffset() {
- return OFFSET_OF_OBJECT_MEMBER(String, offset_);
+ uint16_t* GetValue() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return &value_[0];
}
- CharArray* GetCharArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ size_t SizeOf() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- int32_t GetOffset() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- int32_t result = GetField32(OffsetOffset());
- DCHECK_LE(0, result);
- return result;
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ int32_t GetLength() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(String, count_));
}
- int32_t GetLength() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetCount(int32_t new_count) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ // Count is invariant so use non-transactional mode. Also disable check as we may run inside
+ // a transaction.
+ DCHECK_LE(0, new_count);
+ SetField32<false, false>(OFFSET_OF_OBJECT_MEMBER(String, count_), new_count);
+ }
int32_t GetHashCode() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -69,19 +75,47 @@
int32_t GetUtfLength() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ uint16_t CharAt(int32_t index) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ void SetCharAt(int32_t index, uint16_t c) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
String* Intern() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static String* AllocFromUtf16(Thread* self,
- int32_t utf16_length,
- const uint16_t* utf16_data_in,
- int32_t hash_code = 0)
+ template <bool kIsInstrumented, typename PreFenceVisitor>
+ ALWAYS_INLINE static String* Alloc(Thread* self, int32_t utf16_length,
+ gc::AllocatorType allocator_type,
+ const PreFenceVisitor& pre_fence_visitor)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ template <bool kIsInstrumented>
+ ALWAYS_INLINE static String* AllocFromByteArray(Thread* self, int32_t byte_length,
+ Handle<ByteArray> array, int32_t offset,
+ int32_t high_byte,
+ gc::AllocatorType allocator_type)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ template <bool kIsInstrumented>
+ ALWAYS_INLINE static String* AllocFromCharArray(Thread* self, int32_t array_length,
+ Handle<CharArray> array, int32_t offset,
+ gc::AllocatorType allocator_type)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ template <bool kIsInstrumented>
+ ALWAYS_INLINE static String* AllocFromString(Thread* self, int32_t string_length,
+ Handle<String> string, int32_t offset,
+ gc::AllocatorType allocator_type)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ static String* AllocFromStrings(Thread* self, Handle<String> string, Handle<String> string2)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ static String* AllocFromUtf16(Thread* self, int32_t utf16_length, const uint16_t* utf16_data_in)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static String* AllocFromModifiedUtf8(Thread* self, const char* utf)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static String* AllocFromModifiedUtf8(Thread* self, int32_t utf16_length,
- const char* utf8_data_in)
+ static String* AllocFromModifiedUtf8(Thread* self, int32_t utf16_length, const char* utf8_data_in)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// TODO: This is only used in the interpreter to compare against
@@ -112,13 +146,10 @@
int32_t CompareTo(String* other) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void SetOffset(int32_t new_offset) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- // Offset is only used during testing so use non-transactional mode.
- DCHECK_LE(0, new_offset);
- SetField32<false>(OFFSET_OF_OBJECT_MEMBER(String, offset_), new_offset);
- }
+ CharArray* ToCharArray(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void SetArray(CharArray* new_array) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void GetChars(int32_t start, int32_t end, Handle<CharArray> array, int32_t index)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static Class* GetJavaLangString() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK(!java_lang_String_.IsNull());
@@ -130,9 +161,6 @@
static void VisitRoots(RootVisitor* visitor)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // TODO: Make this private. It's only used on ObjectTest at the moment.
- uint16_t UncheckedCharAt(int32_t index) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
private:
void SetHashCode(int32_t new_hash_code) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Hash code is invariant so use non-transactional mode. Also disable check as we may run inside
@@ -141,27 +169,12 @@
SetField32<false, false>(OFFSET_OF_OBJECT_MEMBER(String, hash_code_), new_hash_code);
}
- void SetCount(int32_t new_count) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- // Count is invariant so use non-transactional mode. Also disable check as we may run inside
- // a transaction.
- DCHECK_LE(0, new_count);
- SetField32<false, false>(OFFSET_OF_OBJECT_MEMBER(String, count_), new_count);
- }
-
- static String* Alloc(Thread* self, int32_t utf16_length)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- static String* Alloc(Thread* self, Handle<CharArray> array)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
// Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses".
- HeapReference<CharArray> array_;
-
int32_t count_;
uint32_t hash_code_;
- int32_t offset_;
+ uint16_t value_[0];
static GcRoot<Class> java_lang_String_;
diff --git a/runtime/mirror/throwable.cc b/runtime/mirror/throwable.cc
index b564649..782b9c0 100644
--- a/runtime/mirror/throwable.cc
+++ b/runtime/mirror/throwable.cc
@@ -45,7 +45,7 @@
CHECK(cause != nullptr);
CHECK(cause != this);
Throwable* current_cause = GetFieldObject<Throwable>(OFFSET_OF_OBJECT_MEMBER(Throwable, cause_));
- CHECK(current_cause == NULL || current_cause == this);
+ CHECK(current_cause == nullptr || current_cause == this);
if (Runtime::Current()->IsActiveTransaction()) {
SetFieldObject<true>(OFFSET_OF_OBJECT_MEMBER(Throwable, cause_), cause);
} else {
@@ -80,7 +80,7 @@
std::string result(PrettyTypeOf(this));
result += ": ";
String* msg = GetDetailMessage();
- if (msg != NULL) {
+ if (msg != nullptr) {
result += msg->ToModifiedUtf8();
}
result += "\n";
@@ -115,10 +115,14 @@
} else {
for (int32_t i = 0; i < ste_array->GetLength(); ++i) {
StackTraceElement* ste = ste_array->Get(i);
- result += StringPrintf(" at %s (%s:%d)\n",
- ste->GetMethodName()->ToModifiedUtf8().c_str(),
- ste->GetFileName()->ToModifiedUtf8().c_str(),
- ste->GetLineNumber());
+ DCHECK(ste != nullptr);
+ auto* method_name = ste->GetMethodName();
+ auto* file_name = ste->GetFileName();
+ result += StringPrintf(
+ " at %s (%s:%d)\n",
+ method_name != nullptr ? method_name->ToModifiedUtf8().c_str() : "<unknown method>",
+ file_name != nullptr ? file_name->ToModifiedUtf8().c_str() : "(Unknown Source)",
+ ste->GetLineNumber());
}
}
} else {
@@ -135,7 +139,7 @@
void Throwable::SetClass(Class* java_lang_Throwable) {
CHECK(java_lang_Throwable_.IsNull());
- CHECK(java_lang_Throwable != NULL);
+ CHECK(java_lang_Throwable != nullptr);
java_lang_Throwable_ = GcRoot<Class>(java_lang_Throwable);
}
diff --git a/runtime/modifiers.h b/runtime/modifiers.h
index e7bd207..8586dd1 100644
--- a/runtime/modifiers.h
+++ b/runtime/modifiers.h
@@ -65,6 +65,8 @@
static constexpr uint32_t kAccClassIsFinalizerReference = 0x02000000;
// class is a phantom reference
static constexpr uint32_t kAccClassIsPhantomReference = 0x01000000;
+// class is the string class
+static constexpr uint32_t kAccClassIsStringClass = 0x00800000;
static constexpr uint32_t kAccReferenceFlagsMask = (kAccClassIsReference
| kAccClassIsWeakReference
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index 1a80ded..4b41225 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -68,11 +68,11 @@
* at any given time.
*/
-bool (*Monitor::is_sensitive_thread_hook_)() = NULL;
+bool (*Monitor::is_sensitive_thread_hook_)() = nullptr;
uint32_t Monitor::lock_profiling_threshold_ = 0;
bool Monitor::IsSensitiveThread() {
- if (is_sensitive_thread_hook_ != NULL) {
+ if (is_sensitive_thread_hook_ != nullptr) {
return (*is_sensitive_thread_hook_)();
}
return false;
@@ -90,9 +90,9 @@
owner_(owner),
lock_count_(0),
obj_(GcRoot<mirror::Object>(obj)),
- wait_set_(NULL),
+ wait_set_(nullptr),
hash_code_(hash_code),
- locking_method_(NULL),
+ locking_method_(nullptr),
locking_dex_pc_(0),
monitor_id_(MonitorPool::ComputeMonitorId(this, self)) {
#ifdef __LP64__
@@ -113,9 +113,9 @@
owner_(owner),
lock_count_(0),
obj_(GcRoot<mirror::Object>(obj)),
- wait_set_(NULL),
+ wait_set_(nullptr),
hash_code_(hash_code),
- locking_method_(NULL),
+ locking_method_(nullptr),
locking_dex_pc_(0),
monitor_id_(id) {
#ifdef __LP64__
@@ -183,9 +183,9 @@
void Monitor::AppendToWaitSet(Thread* thread) {
DCHECK(owner_ == Thread::Current());
- DCHECK(thread != NULL);
+ DCHECK(thread != nullptr);
DCHECK(thread->GetWaitNext() == nullptr) << thread->GetWaitNext();
- if (wait_set_ == NULL) {
+ if (wait_set_ == nullptr) {
wait_set_ = thread;
return;
}
@@ -200,8 +200,8 @@
void Monitor::RemoveFromWaitSet(Thread *thread) {
DCHECK(owner_ == Thread::Current());
- DCHECK(thread != NULL);
- if (wait_set_ == NULL) {
+ DCHECK(thread != nullptr);
+ if (wait_set_ == nullptr) {
return;
}
if (wait_set_ == thread) {
@@ -211,7 +211,7 @@
}
Thread* t = wait_set_;
- while (t->GetWaitNext() != NULL) {
+ while (t->GetWaitNext() != nullptr) {
if (t->GetWaitNext() == thread) {
t->SetWaitNext(thread->GetWaitNext());
thread->SetWaitNext(nullptr);
@@ -253,7 +253,8 @@
self->SetMonitorEnterObject(GetObject());
{
ScopedThreadStateChange tsc(self, kBlocked); // Change to blocked and give up mutator_lock_.
- MutexLock mu2(self, monitor_lock_); // Reacquire monitor_lock_ without mutator_lock_ for Wait.
+ // Reacquire monitor_lock_ without mutator_lock_ for Wait.
+ MutexLock mu2(self, monitor_lock_);
if (owner_ != nullptr) { // Did the owner_ give the lock up?
if (ATRACE_ENABLED()) {
std::string name;
@@ -311,8 +312,8 @@
}
static std::string ThreadToString(Thread* thread) {
- if (thread == NULL) {
- return "NULL";
+ if (thread == nullptr) {
+ return "nullptr";
}
std::ostringstream oss;
// TODO: alternatively, we could just return the thread's name.
@@ -322,7 +323,7 @@
void Monitor::FailedUnlock(mirror::Object* o, Thread* expected_owner, Thread* found_owner,
Monitor* monitor) {
- Thread* current_owner = NULL;
+ Thread* current_owner = nullptr;
std::string current_owner_string;
std::string expected_owner_string;
std::string found_owner_string;
@@ -331,14 +332,14 @@
// Acquire thread list lock so threads won't disappear from under us.
MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
// Re-read owner now that we hold lock.
- current_owner = (monitor != NULL) ? monitor->GetOwner() : NULL;
+ current_owner = (monitor != nullptr) ? monitor->GetOwner() : nullptr;
// Get short descriptions of the threads involved.
current_owner_string = ThreadToString(current_owner);
expected_owner_string = ThreadToString(expected_owner);
found_owner_string = ThreadToString(found_owner);
}
- if (current_owner == NULL) {
- if (found_owner == NULL) {
+ if (current_owner == nullptr) {
+ if (found_owner == nullptr) {
ThrowIllegalMonitorStateExceptionF("unlock of unowned monitor on object of type '%s'"
" on thread '%s'",
PrettyTypeOf(o).c_str(),
@@ -352,7 +353,7 @@
expected_owner_string.c_str());
}
} else {
- if (found_owner == NULL) {
+ if (found_owner == nullptr) {
// Race: originally there was no owner, there is now
ThrowIllegalMonitorStateExceptionF("unlock of monitor owned by '%s' on object of type '%s'"
" (originally believed to be unowned) on thread '%s'",
@@ -380,14 +381,14 @@
}
bool Monitor::Unlock(Thread* self) {
- DCHECK(self != NULL);
+ DCHECK(self != nullptr);
MutexLock mu(self, monitor_lock_);
Thread* owner = owner_;
if (owner == self) {
// We own the monitor, so nobody else can be in here.
if (lock_count_ == 0) {
- owner_ = NULL;
- locking_method_ = NULL;
+ owner_ = nullptr;
+ locking_method_ = nullptr;
locking_dex_pc_ = 0;
// Wake a contender.
monitor_contenders_.Signal(self);
@@ -406,7 +407,7 @@
void Monitor::Wait(Thread* self, int64_t ms, int32_t ns,
bool interruptShouldThrow, ThreadState why) {
- DCHECK(self != NULL);
+ DCHECK(self != nullptr);
DCHECK(why == kTimedWaiting || why == kWaiting || why == kSleeping);
monitor_lock_.Lock(self);
@@ -446,9 +447,9 @@
++num_waiters_;
int prev_lock_count = lock_count_;
lock_count_ = 0;
- owner_ = NULL;
+ owner_ = nullptr;
mirror::ArtMethod* saved_method = locking_method_;
- locking_method_ = NULL;
+ locking_method_ = nullptr;
uintptr_t saved_dex_pc = locking_dex_pc_;
locking_dex_pc_ = 0;
@@ -465,7 +466,7 @@
MutexLock mu(self, *self->GetWaitMutex());
// Set wait_monitor_ to the monitor object we will be waiting on. When wait_monitor_ is
- // non-NULL a notifying or interrupting thread must signal the thread's wait_cond_ to wake it
+ // non-null a notifying or interrupting thread must signal the thread's wait_cond_ to wake it
// up.
DCHECK(self->GetWaitMonitor() == nullptr);
self->SetWaitMonitor(this);
@@ -538,13 +539,13 @@
self->SetInterruptedLocked(false);
}
if (interruptShouldThrow) {
- self->ThrowNewException("Ljava/lang/InterruptedException;", NULL);
+ self->ThrowNewException("Ljava/lang/InterruptedException;", nullptr);
}
}
}
void Monitor::Notify(Thread* self) {
- DCHECK(self != NULL);
+ DCHECK(self != nullptr);
MutexLock mu(self, monitor_lock_);
// Make sure that we hold the lock.
if (owner_ != self) {
@@ -552,7 +553,7 @@
return;
}
// Signal the first waiting thread in the wait set.
- while (wait_set_ != NULL) {
+ while (wait_set_ != nullptr) {
Thread* thread = wait_set_;
wait_set_ = thread->GetWaitNext();
thread->SetWaitNext(nullptr);
@@ -567,7 +568,7 @@
}
void Monitor::NotifyAll(Thread* self) {
- DCHECK(self != NULL);
+ DCHECK(self != nullptr);
MutexLock mu(self, monitor_lock_);
// Make sure that we hold the lock.
if (owner_ != self) {
@@ -575,7 +576,7 @@
return;
}
// Signal all threads in the wait set.
- while (wait_set_ != NULL) {
+ while (wait_set_ != nullptr) {
Thread* thread = wait_set_;
wait_set_ = thread->GetWaitNext();
thread->SetWaitNext(nullptr);
@@ -625,7 +626,7 @@
obj->SetLockWord(new_lw, false);
VLOG(monitor) << "Deflated" << obj << " to empty lock word";
}
- // The monitor is deflated, mark the object as nullptr so that we know to delete it during the
+ // The monitor is deflated, mark the object as null so that we know to delete it during the
// next GC.
monitor->obj_ = GcRoot<mirror::Object>(nullptr);
}
@@ -697,8 +698,8 @@
}
mirror::Object* Monitor::MonitorEnter(Thread* self, mirror::Object* obj) {
- DCHECK(self != NULL);
- DCHECK(obj != NULL);
+ DCHECK(self != nullptr);
+ DCHECK(obj != nullptr);
obj = FakeLock(obj);
uint32_t thread_id = self->GetThreadId();
size_t contention_count = 0;
@@ -772,8 +773,8 @@
}
bool Monitor::MonitorExit(Thread* self, mirror::Object* obj) {
- DCHECK(self != NULL);
- DCHECK(obj != NULL);
+ DCHECK(self != nullptr);
+ DCHECK(obj != nullptr);
obj = FakeUnlock(obj);
StackHandleScope<1> hs(self);
Handle<mirror::Object> h_obj(hs.NewHandle(obj));
@@ -979,11 +980,11 @@
// This is used to implement JDWP's ThreadReference.CurrentContendedMonitor, and has a bizarre
// definition of contended that includes a monitor a thread is trying to enter...
mirror::Object* result = thread->GetMonitorEnterObject();
- if (result == NULL) {
+ if (result == nullptr) {
// ...but also a monitor that the thread is waiting on.
MutexLock mu(Thread::Current(), *thread->GetWaitMutex());
Monitor* monitor = thread->GetWaitMonitor();
- if (monitor != NULL) {
+ if (monitor != nullptr) {
result = monitor->GetObject();
}
}
@@ -993,7 +994,7 @@
void Monitor::VisitLocks(StackVisitor* stack_visitor, void (*callback)(mirror::Object*, void*),
void* callback_context, bool abort_on_failure) {
mirror::ArtMethod* m = stack_visitor->GetMethod();
- CHECK(m != NULL);
+ CHECK(m != nullptr);
// Native methods are an easy special case.
// TODO: use the JNI implementation's table of explicit MonitorEnter calls and dump those too.
@@ -1013,7 +1014,7 @@
// Is there any reason to believe there's any synchronization in this method?
const DexFile::CodeItem* code_item = m->GetCodeItem();
- CHECK(code_item != NULL) << PrettyMethod(m);
+ CHECK(code_item != nullptr) << PrettyMethod(m);
if (code_item->tries_size_ == 0) {
return; // No "tries" implies no synchronization, so no held locks to report.
}
@@ -1088,13 +1089,13 @@
void Monitor::TranslateLocation(mirror::ArtMethod* method, uint32_t dex_pc,
const char** source_file, uint32_t* line_number) const {
// If method is null, location is unknown
- if (method == NULL) {
+ if (method == nullptr) {
*source_file = "";
*line_number = 0;
return;
}
*source_file = method->GetDeclaringClassSourceFile();
- if (*source_file == NULL) {
+ if (*source_file == nullptr) {
*source_file = "";
}
*line_number = method->GetLineNumFromDexPC(dex_pc);
@@ -1103,7 +1104,7 @@
uint32_t Monitor::GetOwnerThreadId() {
MutexLock mu(Thread::Current(), monitor_lock_);
Thread* owner = owner_;
- if (owner != NULL) {
+ if (owner != nullptr) {
return owner->GetThreadId();
} else {
return ThreadList::kInvalidThreadId;
@@ -1185,7 +1186,7 @@
if (Monitor::Deflate(args->self, object)) {
DCHECK_NE(object->GetLockWord(true).GetState(), LockWord::kFatLocked);
++args->deflate_count;
- // If we deflated, return nullptr so that the monitor gets removed from the array.
+ // If we deflated, return null so that the monitor gets removed from the array.
return nullptr;
}
return object; // Monitor was not deflated.
@@ -1198,7 +1199,7 @@
return args.deflate_count;
}
-MonitorInfo::MonitorInfo(mirror::Object* obj) : owner_(NULL), entry_count_(0) {
+MonitorInfo::MonitorInfo(mirror::Object* obj) : owner_(nullptr), entry_count_(0) {
DCHECK(obj != nullptr);
LockWord lock_word = obj->GetLockWord(true);
switch (lock_word.GetState()) {
@@ -1217,7 +1218,7 @@
Monitor* mon = lock_word.FatLockMonitor();
owner_ = mon->owner_;
entry_count_ = 1 + mon->lock_count_;
- for (Thread* waiter = mon->wait_set_; waiter != NULL; waiter = waiter->GetWaitNext()) {
+ for (Thread* waiter = mon->wait_set_; waiter != nullptr; waiter = waiter->GetWaitNext()) {
waiters_.push_back(waiter);
}
break;
diff --git a/runtime/monitor.h b/runtime/monitor.h
index 95e4460..b7245c1 100644
--- a/runtime/monitor.h
+++ b/runtime/monitor.h
@@ -141,6 +141,10 @@
CHECK_EQ(error, 0) << strerror(error);
return result;
}
+
+ void operator delete(void* ptr) {
+ free(ptr);
+ }
#endif
private:
diff --git a/runtime/monitor_android.cc b/runtime/monitor_android.cc
index d89290b..48c9cce 100644
--- a/runtime/monitor_android.cc
+++ b/runtime/monitor_android.cc
@@ -88,7 +88,7 @@
cp = EventLogWriteInt(cp, line_number);
// Emit the lock owner source code file name, <= 37 bytes.
- if (owner_filename == NULL) {
+ if (owner_filename == nullptr) {
owner_filename = "";
} else if (strcmp(filename, owner_filename) == 0) {
// Common case, so save on log space.
diff --git a/runtime/monitor_pool.h b/runtime/monitor_pool.h
index 8ae5a54..4ab4e86 100644
--- a/runtime/monitor_pool.h
+++ b/runtime/monitor_pool.h
@@ -138,7 +138,8 @@
for (size_t index = 0; index < num_chunks_; ++index) {
uintptr_t chunk_addr = *(monitor_chunks_.LoadRelaxed() + index);
if (IsInChunk(chunk_addr, mon)) {
- return OffsetToMonitorId(reinterpret_cast<uintptr_t>(mon) - chunk_addr + index * kChunkSize);
+ return OffsetToMonitorId(
+ reinterpret_cast<uintptr_t>(mon) - chunk_addr + index * kChunkSize);
}
}
LOG(FATAL) << "Did not find chunk that contains monitor.";
diff --git a/runtime/monitor_test.cc b/runtime/monitor_test.cc
index 2351463..30cb2d8 100644
--- a/runtime/monitor_test.cc
+++ b/runtime/monitor_test.cc
@@ -116,8 +116,8 @@
ScopedObjectAccess soa(self);
monitor_test_->thread_ = self; // Pass the Thread.
- monitor_test_->object_.Get()->MonitorEnter(self); // Lock the object. This should transition
- LockWord lock_after = monitor_test_->object_.Get()->GetLockWord(false); // it to thinLocked.
+ monitor_test_->object_.Get()->MonitorEnter(self); // Lock the object. This should transition
+ LockWord lock_after = monitor_test_->object_.Get()->GetLockWord(false); // it to thinLocked.
LockWord::LockState new_state = lock_after.GetState();
// Cannot use ASSERT only, as analysis thinks we'll keep holding the mutex.
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index 87ae64d..4f97d20 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -109,7 +109,7 @@
//
// NullableScopedUtfChars name(env, javaName);
// if (env->ExceptionCheck()) {
-// return NULL;
+// return null;
// }
// // ... use name.c_str()
//
@@ -117,7 +117,7 @@
class NullableScopedUtfChars {
public:
NullableScopedUtfChars(JNIEnv* env, jstring s) : mEnv(env), mString(s) {
- mUtfChars = (s != NULL) ? env->GetStringUTFChars(s, NULL) : NULL;
+ mUtfChars = (s != nullptr) ? env->GetStringUTFChars(s, nullptr) : nullptr;
}
~NullableScopedUtfChars() {
@@ -149,9 +149,10 @@
void operator=(const NullableScopedUtfChars&);
};
-static jobject DexFile_openDexFileNative(JNIEnv* env, jclass, jstring javaSourceName, jstring javaOutputName, jint) {
+static jobject DexFile_openDexFileNative(
+ JNIEnv* env, jclass, jstring javaSourceName, jstring javaOutputName, jint) {
ScopedUtfChars sourceName(env, javaSourceName);
- if (sourceName.c_str() == NULL) {
+ if (sourceName.c_str() == nullptr) {
return 0;
}
NullableScopedUtfChars outputName(env, javaOutputName);
@@ -224,9 +225,9 @@
}
ScopedUtfChars class_name(env, javaName);
- if (class_name.c_str() == NULL) {
+ if (class_name.c_str() == nullptr) {
VLOG(class_linker) << "Failed to find class_name";
- return NULL;
+ return nullptr;
}
const std::string descriptor(DotToDescriptor(class_name.c_str()));
const size_t hash(ComputeModifiedUtf8Hash(descriptor.c_str()));
@@ -367,7 +368,7 @@
instruction_set.c_str(), defer);
}
-// public API, NULL pkgname
+// public API, null pkgname
static jboolean DexFile_isDexOptNeeded(JNIEnv* env, jclass, jstring javaFilename) {
const char* instruction_set = GetInstructionSetString(kRuntimeISA);
ScopedUtfChars filename(env, javaFilename);
@@ -378,11 +379,14 @@
static JNINativeMethod gMethods[] = {
NATIVE_METHOD(DexFile, closeDexFile, "(Ljava/lang/Object;)V"),
- NATIVE_METHOD(DexFile, defineClassNative, "(Ljava/lang/String;Ljava/lang/ClassLoader;Ljava/lang/Object;)Ljava/lang/Class;"),
+ NATIVE_METHOD(DexFile, defineClassNative,
+ "(Ljava/lang/String;Ljava/lang/ClassLoader;Ljava/lang/Object;)Ljava/lang/Class;"),
NATIVE_METHOD(DexFile, getClassNameList, "(Ljava/lang/Object;)[Ljava/lang/String;"),
NATIVE_METHOD(DexFile, isDexOptNeeded, "(Ljava/lang/String;)Z"),
- NATIVE_METHOD(DexFile, getDexOptNeeded, "(Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Z)I"),
- NATIVE_METHOD(DexFile, openDexFileNative, "(Ljava/lang/String;Ljava/lang/String;I)Ljava/lang/Object;"),
+ NATIVE_METHOD(DexFile, getDexOptNeeded,
+ "(Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Z)I"),
+ NATIVE_METHOD(DexFile, openDexFileNative,
+ "(Ljava/lang/String;Ljava/lang/String;I)Ljava/lang/Object;"),
};
void register_dalvik_system_DexFile(JNIEnv* env) {
diff --git a/runtime/native/dalvik_system_VMDebug.cc b/runtime/native/dalvik_system_VMDebug.cc
index 876e29a..46881b0 100644
--- a/runtime/native/dalvik_system_VMDebug.cc
+++ b/runtime/native/dalvik_system_VMDebug.cc
@@ -104,7 +104,7 @@
}
ScopedUtfChars traceFilename(env, javaTraceFilename);
- if (traceFilename.c_str() == NULL) {
+ if (traceFilename.c_str() == nullptr) {
return;
}
Trace::Start(traceFilename.c_str(), fd, bufferSize, flags, Trace::TraceOutputMode::kFile,
@@ -116,7 +116,7 @@
jint bufferSize, jint flags,
jboolean samplingEnabled, jint intervalUs) {
ScopedUtfChars traceFilename(env, javaTraceFilename);
- if (traceFilename.c_str() == NULL) {
+ if (traceFilename.c_str() == nullptr) {
return;
}
Trace::Start(traceFilename.c_str(), -1, bufferSize, flags, Trace::TraceOutputMode::kFile,
@@ -156,7 +156,7 @@
static void ThrowUnsupportedOperationException(JNIEnv* env) {
ScopedObjectAccess soa(env);
- soa.Self()->ThrowNewException("Ljava/lang/UnsupportedOperationException;", NULL);
+ soa.Self()->ThrowNewException("Ljava/lang/UnsupportedOperationException;", nullptr);
}
static void VMDebug_startInstructionCounting(JNIEnv* env, jclass) {
@@ -200,15 +200,15 @@
* error occurs during file handling.
*/
static void VMDebug_dumpHprofData(JNIEnv* env, jclass, jstring javaFilename, jobject javaFd) {
- // Only one of these may be NULL.
- if (javaFilename == NULL && javaFd == NULL) {
+ // Only one of these may be null.
+ if (javaFilename == nullptr && javaFd == nullptr) {
ScopedObjectAccess soa(env);
ThrowNullPointerException("fileName == null && fd == null");
return;
}
std::string filename;
- if (javaFilename != NULL) {
+ if (javaFilename != nullptr) {
ScopedUtfChars chars(env, javaFilename);
if (env->ExceptionCheck()) {
return;
@@ -219,7 +219,7 @@
}
int fd = -1;
- if (javaFd != NULL) {
+ if (javaFd != nullptr) {
fd = jniGetFDFromFileDescriptor(env, javaFd);
if (fd < 0) {
ScopedObjectAccess soa(env);
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index 196a231..9736e15 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -114,7 +114,7 @@
}
static jlong VMRuntime_addressOf(JNIEnv* env, jobject, jobject javaArray) {
- if (javaArray == NULL) { // Most likely allocation failed
+ if (javaArray == nullptr) { // Most likely allocation failed
return 0;
}
ScopedFastNativeObjectAccess soa(env);
@@ -223,7 +223,7 @@
}
static void VMRuntime_concurrentGC(JNIEnv* env, jobject) {
- Runtime::Current()->GetHeap()->ConcurrentGC(ThreadForEnv(env));
+ Runtime::Current()->GetHeap()->ConcurrentGC(ThreadForEnv(env), true);
}
static void VMRuntime_requestHeapTrim(JNIEnv* env, jobject) {
@@ -231,7 +231,7 @@
}
static void VMRuntime_requestConcurrentGC(JNIEnv* env, jobject) {
- Runtime::Current()->GetHeap()->RequestConcurrentGC(ThreadForEnv(env));
+ Runtime::Current()->GetHeap()->RequestConcurrentGC(ThreadForEnv(env), true);
}
static void VMRuntime_startHeapTaskProcessor(JNIEnv* env, jobject) {
@@ -263,17 +263,17 @@
};
// Based on ClassLinker::ResolveString.
-static void PreloadDexCachesResolveString(Handle<mirror::DexCache> dex_cache, uint32_t string_idx,
- StringTable& strings)
+static void PreloadDexCachesResolveString(
+ Handle<mirror::DexCache> dex_cache, uint32_t string_idx, StringTable& strings)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::String* string = dex_cache->GetResolvedString(string_idx);
- if (string != NULL) {
+ if (string != nullptr) {
return;
}
const DexFile* dex_file = dex_cache->GetDexFile();
const char* utf8 = dex_file->StringDataByIdx(string_idx);
string = strings[utf8];
- if (string == NULL) {
+ if (string == nullptr) {
return;
}
// LOG(INFO) << "VMRuntime.preloadDexCaches resolved string=" << utf8;
@@ -281,10 +281,11 @@
}
// Based on ClassLinker::ResolveType.
-static void PreloadDexCachesResolveType(Thread* self, mirror::DexCache* dex_cache, uint32_t type_idx)
+static void PreloadDexCachesResolveType(
+ Thread* self, mirror::DexCache* dex_cache, uint32_t type_idx)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::Class* klass = dex_cache->GetResolvedType(type_idx);
- if (klass != NULL) {
+ if (klass != nullptr) {
return;
}
const DexFile* dex_file = dex_cache->GetDexFile();
@@ -293,9 +294,9 @@
if (class_name[1] == '\0') {
klass = linker->FindPrimitiveClass(class_name[0]);
} else {
- klass = linker->LookupClass(self, class_name, ComputeModifiedUtf8Hash(class_name), NULL);
+ klass = linker->LookupClass(self, class_name, ComputeModifiedUtf8Hash(class_name), nullptr);
}
- if (klass == NULL) {
+ if (klass == nullptr) {
return;
}
// LOG(INFO) << "VMRuntime.preloadDexCaches resolved klass=" << class_name;
@@ -321,7 +322,7 @@
Thread* const self = Thread::Current();
StackHandleScope<1> hs(self);
Handle<mirror::Class> klass(hs.NewHandle(dex_cache->GetResolvedType(field_id.class_idx_)));
- if (klass.Get() == NULL) {
+ if (klass.Get() == nullptr) {
return;
}
if (is_static) {
@@ -329,7 +330,7 @@
} else {
field = klass->FindInstanceField(dex_cache.Get(), field_idx);
}
- if (field == NULL) {
+ if (field == nullptr) {
return;
}
// LOG(INFO) << "VMRuntime.preloadDexCaches resolved field " << PrettyField(field);
@@ -341,13 +342,13 @@
InvokeType invoke_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::ArtMethod* method = dex_cache->GetResolvedMethod(method_idx);
- if (method != NULL) {
+ if (method != nullptr) {
return;
}
const DexFile* dex_file = dex_cache->GetDexFile();
const DexFile::MethodId& method_id = dex_file->GetMethodId(method_idx);
mirror::Class* klass = dex_cache->GetResolvedType(method_id.class_idx_);
- if (klass == NULL) {
+ if (klass == nullptr) {
return;
}
switch (invoke_type) {
@@ -366,7 +367,7 @@
LOG(FATAL) << "Unreachable - invocation type: " << invoke_type;
UNREACHABLE();
}
- if (method == NULL) {
+ if (method == nullptr) {
return;
}
// LOG(INFO) << "VMRuntime.preloadDexCaches resolved method " << PrettyMethod(method);
@@ -404,7 +405,7 @@
const std::vector<const DexFile*>& boot_class_path = linker->GetBootClassPath();
for (size_t i = 0; i< boot_class_path.size(); i++) {
const DexFile* dex_file = boot_class_path[i];
- CHECK(dex_file != NULL);
+ CHECK(dex_file != nullptr);
total->num_strings += dex_file->NumStringIds();
total->num_fields += dex_file->NumFieldIds();
total->num_methods += dex_file->NumMethodIds();
@@ -421,29 +422,29 @@
const std::vector<const DexFile*>& boot_class_path = linker->GetBootClassPath();
for (size_t i = 0; i< boot_class_path.size(); i++) {
const DexFile* dex_file = boot_class_path[i];
- CHECK(dex_file != NULL);
+ CHECK(dex_file != nullptr);
mirror::DexCache* dex_cache = linker->FindDexCache(*dex_file);
for (size_t j = 0; j < dex_cache->NumStrings(); j++) {
mirror::String* string = dex_cache->GetResolvedString(j);
- if (string != NULL) {
+ if (string != nullptr) {
filled->num_strings++;
}
}
for (size_t j = 0; j < dex_cache->NumResolvedTypes(); j++) {
mirror::Class* klass = dex_cache->GetResolvedType(j);
- if (klass != NULL) {
+ if (klass != nullptr) {
filled->num_types++;
}
}
for (size_t j = 0; j < dex_cache->NumResolvedFields(); j++) {
ArtField* field = linker->GetResolvedField(j, dex_cache);
- if (field != NULL) {
+ if (field != nullptr) {
filled->num_fields++;
}
}
for (size_t j = 0; j < dex_cache->NumResolvedMethods(); j++) {
mirror::ArtMethod* method = dex_cache->GetResolvedMethod(j);
- if (method != NULL) {
+ if (method != nullptr) {
filled->num_methods++;
}
}
@@ -482,7 +483,7 @@
const std::vector<const DexFile*>& boot_class_path = linker->GetBootClassPath();
for (size_t i = 0; i< boot_class_path.size(); i++) {
const DexFile* dex_file = boot_class_path[i];
- CHECK(dex_file != NULL);
+ CHECK(dex_file != nullptr);
StackHandleScope<1> hs(soa.Self());
Handle<mirror::DexCache> dex_cache(hs.NewHandle(linker->FindDexCache(*dex_file)));
@@ -504,7 +505,7 @@
class_def_index++) {
const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_index);
const uint8_t* class_data = dex_file->GetClassData(class_def);
- if (class_data == NULL) {
+ if (class_data == nullptr) {
continue;
}
ClassDataItemIterator it(*dex_file, class_data);
diff --git a/runtime/native/dalvik_system_VMStack.cc b/runtime/native/dalvik_system_VMStack.cc
index 2cdc68f..1d7d853 100644
--- a/runtime/native/dalvik_system_VMStack.cc
+++ b/runtime/native/dalvik_system_VMStack.cc
@@ -81,33 +81,27 @@
return soa.AddLocalReference<jobject>(visitor.caller->GetDeclaringClass()->GetClassLoader());
}
-static jobject VMStack_getClosestUserClassLoader(JNIEnv* env, jclass, jobject javaBootstrap,
- jobject javaSystem) {
+static jobject VMStack_getClosestUserClassLoader(JNIEnv* env, jclass) {
struct ClosestUserClassLoaderVisitor : public StackVisitor {
- ClosestUserClassLoaderVisitor(Thread* thread, mirror::Object* bootstrap_in,
- mirror::Object* system_in)
- : StackVisitor(thread, NULL), bootstrap(bootstrap_in), system(system_in),
- class_loader(NULL) {}
+ explicit ClosestUserClassLoaderVisitor(Thread* thread)
+ : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
+ class_loader(nullptr) {}
bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK(class_loader == NULL);
+ DCHECK(class_loader == nullptr);
mirror::Class* c = GetMethod()->GetDeclaringClass();
mirror::Object* cl = c->GetClassLoader();
- if (cl != NULL && cl != bootstrap && cl != system) {
+ if (cl != nullptr) {
class_loader = cl;
return false;
}
return true;
}
- mirror::Object* bootstrap;
- mirror::Object* system;
mirror::Object* class_loader;
};
ScopedFastNativeObjectAccess soa(env);
- mirror::Object* bootstrap = soa.Decode<mirror::Object*>(javaBootstrap);
- mirror::Object* system = soa.Decode<mirror::Object*>(javaSystem);
- ClosestUserClassLoaderVisitor visitor(soa.Self(), bootstrap, system);
+ ClosestUserClassLoaderVisitor visitor(soa.Self());
visitor.WalkStack();
return soa.AddLocalReference<jobject>(visitor.class_loader);
}
@@ -136,7 +130,7 @@
static JNINativeMethod gMethods[] = {
NATIVE_METHOD(VMStack, fillStackTraceElements, "!(Ljava/lang/Thread;[Ljava/lang/StackTraceElement;)I"),
NATIVE_METHOD(VMStack, getCallingClassLoader, "!()Ljava/lang/ClassLoader;"),
- NATIVE_METHOD(VMStack, getClosestUserClassLoader, "!(Ljava/lang/ClassLoader;Ljava/lang/ClassLoader;)Ljava/lang/ClassLoader;"),
+ NATIVE_METHOD(VMStack, getClosestUserClassLoader, "!()Ljava/lang/ClassLoader;"),
NATIVE_METHOD(VMStack, getStackClass2, "!()Ljava/lang/Class;"),
NATIVE_METHOD(VMStack, getThreadStackTrace, "!(Ljava/lang/Thread;)[Ljava/lang/StackTraceElement;"),
};
diff --git a/runtime/native/dalvik_system_ZygoteHooks.cc b/runtime/native/dalvik_system_ZygoteHooks.cc
index af01a02..1a7a3e5 100644
--- a/runtime/native/dalvik_system_ZygoteHooks.cc
+++ b/runtime/native/dalvik_system_ZygoteHooks.cc
@@ -65,6 +65,7 @@
DEBUG_ENABLE_SAFEMODE = 1 << 3,
DEBUG_ENABLE_JNI_LOGGING = 1 << 4,
DEBUG_ENABLE_JIT = 1 << 5,
+ DEBUG_GENERATE_CFI = 1 << 6,
};
Runtime* const runtime = Runtime::Current();
@@ -111,6 +112,12 @@
}
runtime->GetJITOptions()->SetUseJIT(use_jit);
+ const bool generate_cfi = (debug_flags & DEBUG_GENERATE_CFI) != 0;
+ if (generate_cfi) {
+ runtime->AddCompilerOption("--include-cfi");
+ debug_flags &= ~DEBUG_GENERATE_CFI;
+ }
+
// This is for backwards compatibility with Dalvik.
debug_flags &= ~DEBUG_ENABLE_ASSERT;
@@ -145,6 +152,7 @@
if (Trace::GetMethodTracingMode() != TracingMode::kTracingInactive) {
Trace::TraceOutputMode output_mode = Trace::GetOutputMode();
Trace::TraceMode trace_mode = Trace::GetMode();
+ size_t buffer_size = Trace::GetBufferSize();
// Just drop it.
Trace::Abort();
@@ -169,7 +177,7 @@
proc_name.c_str());
Trace::Start(trace_file.c_str(),
-1,
- -1, // TODO: Expose buffer size.
+ buffer_size,
0, // TODO: Expose flags.
output_mode,
trace_mode,
diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc
index 5ad18f8..795a0ea 100644
--- a/runtime/native/java_lang_Class.cc
+++ b/runtime/native/java_lang_Class.cc
@@ -25,9 +25,11 @@
#include "mirror/class-inl.h"
#include "mirror/class_loader.h"
#include "mirror/field-inl.h"
+#include "mirror/method.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
#include "mirror/string-inl.h"
+#include "reflection.h"
#include "scoped_thread_state_change.h"
#include "scoped_fast_native_object_access.h"
#include "ScopedLocalRef.h"
@@ -41,7 +43,7 @@
const ScopedFastNativeObjectAccess& soa, jobject java_class)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::Class* c = soa.Decode<mirror::Class*>(java_class);
- DCHECK(c != NULL);
+ DCHECK(c != nullptr);
DCHECK(c->IsClass());
// TODO: we could EnsureInitialized here, rather than on every reflective get/set or invoke .
// For now, we conservatively preserve the old dalvik behavior. A quick "IsInitialized" check
@@ -91,18 +93,6 @@
return soa.AddLocalReference<jclass>(c.Get());
}
-static jobject Class_findOverriddenMethodIfProxy(JNIEnv* env, jclass, jobject art_method) {
- ScopedFastNativeObjectAccess soa(env);
- mirror::ArtMethod* method = soa.Decode<mirror::ArtMethod*>(art_method);
- mirror::Class* declaring_klass = method->GetDeclaringClass();
- if (!declaring_klass->IsProxyClass()) {
- return art_method;
- }
- uint32_t dex_method_index = method->GetDexMethodIndex();
- mirror::ArtMethod* overriden_method = method->GetDexCacheResolvedMethods()->Get(dex_method_index);
- return soa.AddLocalReference<jobject>(overriden_method);
-}
-
static jstring Class_getNameNative(JNIEnv* env, jobject javaThis) {
ScopedFastNativeObjectAccess soa(env);
StackHandleScope<1> hs(soa.Self());
@@ -202,7 +192,7 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
size_t low = 0;
size_t high = num_fields;
- const uint16_t* const data = name->GetCharArray()->GetData() + name->GetOffset();
+ const uint16_t* const data = name->GetValue();
const size_t length = name->GetLength();
while (low < high) {
auto mid = (low + high) / 2;
@@ -252,7 +242,7 @@
static jobject Class_getDeclaredField(JNIEnv* env, jobject javaThis, jstring name) {
ScopedFastNativeObjectAccess soa(env);
auto* name_string = soa.Decode<mirror::String*>(name);
- if (name == nullptr) {
+ if (name_string == nullptr) {
ThrowNullPointerException("name == null");
return nullptr;
}
@@ -262,24 +252,309 @@
std::string name_str = name_string->ToModifiedUtf8();
// We may have a pending exception if we failed to resolve.
if (!soa.Self()->IsExceptionPending()) {
- soa.Self()->ThrowNewException("Ljava/lang/NoSuchFieldException;", name_str.c_str());
+ ThrowNoSuchFieldException(DecodeClass(soa, javaThis), name_str.c_str());
}
return nullptr;
}
return soa.AddLocalReference<jobject>(result);
}
+static jobject Class_getDeclaredConstructorInternal(
+ JNIEnv* env, jobject javaThis, jobjectArray args) {
+ ScopedFastNativeObjectAccess soa(env);
+ auto* klass = DecodeClass(soa, javaThis);
+ auto* params = soa.Decode<mirror::ObjectArray<mirror::Class>*>(args);
+ StackHandleScope<1> hs(soa.Self());
+ auto* declared_constructor = klass->GetDeclaredConstructor(soa.Self(), hs.NewHandle(params));
+ if (declared_constructor != nullptr) {
+ return soa.AddLocalReference<jobject>(
+ mirror::Constructor::CreateFromArtMethod(soa.Self(), declared_constructor));
+ }
+ return nullptr;
+}
+
+static ALWAYS_INLINE inline bool MethodMatchesConstructor(mirror::ArtMethod* m, bool public_only)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ DCHECK(m != nullptr);
+ return (!public_only || m->IsPublic()) && !m->IsStatic() && m->IsConstructor();
+}
+
+static jobjectArray Class_getDeclaredConstructorsInternal(
+ JNIEnv* env, jobject javaThis, jboolean publicOnly) {
+ ScopedFastNativeObjectAccess soa(env);
+ auto* klass = DecodeClass(soa, javaThis);
+ StackHandleScope<2> hs(soa.Self());
+ auto h_direct_methods = hs.NewHandle(klass->GetDirectMethods());
+ size_t constructor_count = 0;
+ auto count = h_direct_methods.Get() != nullptr ? h_direct_methods->GetLength() : 0u;
+ // Two pass approach for speed.
+ for (size_t i = 0; i < count; ++i) {
+ constructor_count += MethodMatchesConstructor(h_direct_methods->GetWithoutChecks(i),
+ publicOnly != JNI_FALSE) ? 1u : 0u;
+ }
+ auto h_constructors = hs.NewHandle(mirror::ObjectArray<mirror::Constructor>::Alloc(
+ soa.Self(), mirror::Constructor::ArrayClass(), constructor_count));
+ if (UNLIKELY(h_constructors.Get() == nullptr)) {
+ soa.Self()->AssertPendingException();
+ return nullptr;
+ }
+ constructor_count = 0;
+ for (size_t i = 0; i < count; ++i) {
+ auto* method = h_direct_methods->GetWithoutChecks(i);
+ if (MethodMatchesConstructor(method, publicOnly != JNI_FALSE)) {
+ auto* constructor = mirror::Constructor::CreateFromArtMethod(soa.Self(), method);
+ if (UNLIKELY(constructor == nullptr)) {
+ soa.Self()->AssertPendingException();
+ return nullptr;
+ }
+ h_constructors->SetWithoutChecks<false>(constructor_count++, constructor);
+ }
+ }
+ return soa.AddLocalReference<jobjectArray>(h_constructors.Get());
+}
+
+static jobject Class_getDeclaredMethodInternal(JNIEnv* env, jobject javaThis,
+ jobject name, jobjectArray args) {
+ // Covariant return types permit the class to define multiple
+ // methods with the same name and parameter types. Prefer to
+ // return a non-synthetic method in such situations. We may
+ // still return a synthetic method to handle situations like
+ // escalated visibility. We never return miranda methods that
+ // were synthesized by the runtime.
+ constexpr uint32_t kSkipModifiers = kAccMiranda | kAccSynthetic;
+ ScopedFastNativeObjectAccess soa(env);
+ StackHandleScope<5> hs(soa.Self());
+ auto h_method_name = hs.NewHandle(soa.Decode<mirror::String*>(name));
+ if (UNLIKELY(h_method_name.Get() == nullptr)) {
+ ThrowNullPointerException("name == null");
+ return nullptr;
+ }
+ auto h_args = hs.NewHandle(soa.Decode<mirror::ObjectArray<mirror::Class>*>(args));
+ auto* klass = DecodeClass(soa, javaThis);
+ mirror::ArtMethod* result = nullptr;
+ auto* virtual_methods = klass->GetVirtualMethods();
+ if (virtual_methods != nullptr) {
+ auto h_virtual_methods = hs.NewHandle(virtual_methods);
+ for (size_t i = 0, count = virtual_methods->GetLength(); i < count; ++i) {
+ auto* m = h_virtual_methods->GetWithoutChecks(i);
+ auto* np_method = m->GetInterfaceMethodIfProxy();
+ // May cause thread suspension.
+ mirror::String* np_name = np_method->GetNameAsString(soa.Self());
+ if (!np_name->Equals(h_method_name.Get()) || !np_method->EqualParameters(h_args)) {
+ if (UNLIKELY(soa.Self()->IsExceptionPending())) {
+ return nullptr;
+ }
+ continue;
+ }
+ auto modifiers = m->GetAccessFlags();
+ if ((modifiers & kSkipModifiers) == 0) {
+ return soa.AddLocalReference<jobject>(mirror::Method::CreateFromArtMethod(soa.Self(), m));
+ }
+ if ((modifiers & kAccMiranda) == 0) {
+ result = m; // Remember as potential result if it's not a miranda method.
+ }
+ }
+ }
+ if (result == nullptr) {
+ auto* direct_methods = klass->GetDirectMethods();
+ if (direct_methods != nullptr) {
+ auto h_direct_methods = hs.NewHandle(direct_methods);
+ for (size_t i = 0, count = direct_methods->GetLength(); i < count; ++i) {
+ auto* m = h_direct_methods->GetWithoutChecks(i);
+ auto modifiers = m->GetAccessFlags();
+ if ((modifiers & kAccConstructor) != 0) {
+ continue;
+ }
+ auto* np_method = m->GetInterfaceMethodIfProxy();
+ // May cause thread suspension.
+ mirror::String* np_name = np_method ->GetNameAsString(soa.Self());
+ if (np_name == nullptr) {
+ soa.Self()->AssertPendingException();
+ return nullptr;
+ }
+ if (!np_name->Equals(h_method_name.Get()) || !np_method->EqualParameters(h_args)) {
+ if (UNLIKELY(soa.Self()->IsExceptionPending())) {
+ return nullptr;
+ }
+ continue;
+ }
+ if ((modifiers & kSkipModifiers) == 0) {
+ return soa.AddLocalReference<jobject>(mirror::Method::CreateFromArtMethod(
+ soa.Self(), m));
+ }
+ // Direct methods cannot be miranda methods, so this potential result must be synthetic.
+ result = m;
+ }
+ }
+ }
+ return result != nullptr ?
+ soa.AddLocalReference<jobject>(mirror::Method::CreateFromArtMethod(soa.Self(), result)) :
+ nullptr;
+}
+
+static jobjectArray Class_getDeclaredMethodsUnchecked(JNIEnv* env, jobject javaThis,
+ jboolean publicOnly) {
+ ScopedFastNativeObjectAccess soa(env);
+ StackHandleScope<5> hs(soa.Self());
+ auto* klass = DecodeClass(soa, javaThis);
+ auto virtual_methods = hs.NewHandle(klass->GetVirtualMethods());
+ auto direct_methods = hs.NewHandle(klass->GetDirectMethods());
+ size_t num_methods = 0;
+ if (virtual_methods.Get() != nullptr) {
+ for (size_t i = 0, count = virtual_methods->GetLength(); i < count; ++i) {
+ auto* m = virtual_methods->GetWithoutChecks(i);
+ auto modifiers = m->GetAccessFlags();
+ if ((publicOnly == JNI_FALSE || (modifiers & kAccPublic) != 0) &&
+ (modifiers & kAccMiranda) == 0) {
+ ++num_methods;
+ }
+ }
+ }
+ if (direct_methods.Get() != nullptr) {
+ for (size_t i = 0, count = direct_methods->GetLength(); i < count; ++i) {
+ auto* m = direct_methods->GetWithoutChecks(i);
+ auto modifiers = m->GetAccessFlags();
+ // Add non-constructor direct/static methods.
+ if ((publicOnly == JNI_FALSE || (modifiers & kAccPublic) != 0) &&
+ (modifiers & kAccConstructor) == 0) {
+ ++num_methods;
+ }
+ }
+ }
+ auto ret = hs.NewHandle(mirror::ObjectArray<mirror::Method>::Alloc(
+ soa.Self(), mirror::Method::ArrayClass(), num_methods));
+ num_methods = 0;
+ if (virtual_methods.Get() != nullptr) {
+ for (size_t i = 0, count = virtual_methods->GetLength(); i < count; ++i) {
+ auto* m = virtual_methods->GetWithoutChecks(i);
+ auto modifiers = m->GetAccessFlags();
+ if ((publicOnly == JNI_FALSE || (modifiers & kAccPublic) != 0) &&
+ (modifiers & kAccMiranda) == 0) {
+ auto* method = mirror::Method::CreateFromArtMethod(soa.Self(), m);
+ if (method == nullptr) {
+ soa.Self()->AssertPendingException();
+ return nullptr;
+ }
+ ret->SetWithoutChecks<false>(num_methods++, method);
+ }
+ }
+ }
+ if (direct_methods.Get() != nullptr) {
+ for (size_t i = 0, count = direct_methods->GetLength(); i < count; ++i) {
+ auto* m = direct_methods->GetWithoutChecks(i);
+ auto modifiers = m->GetAccessFlags();
+ // Add non-constructor direct/static methods.
+ if ((publicOnly == JNI_FALSE || (modifiers & kAccPublic) != 0) &&
+ (modifiers & kAccConstructor) == 0) {
+ auto* method = mirror::Method::CreateFromArtMethod(soa.Self(), m);
+ if (method == nullptr) {
+ soa.Self()->AssertPendingException();
+ return nullptr;
+ }
+ ret->SetWithoutChecks<false>(num_methods++, method);
+ }
+ }
+ }
+ return soa.AddLocalReference<jobjectArray>(ret.Get());
+}
+
+static jobject Class_newInstance(JNIEnv* env, jobject javaThis) {
+ ScopedFastNativeObjectAccess soa(env);
+ StackHandleScope<4> hs(soa.Self());
+ auto klass = hs.NewHandle(DecodeClass(soa, javaThis));
+ if (UNLIKELY(klass->GetPrimitiveType() != 0 || klass->IsInterface() || klass->IsArrayClass() ||
+ klass->IsAbstract())) {
+ soa.Self()->ThrowNewExceptionF("Ljava/lang/InstantiationException;",
+ "%s cannot be instantiated", PrettyClass(klass.Get()).c_str());
+ return nullptr;
+ }
+ auto caller = hs.NewHandle<mirror::Class>(nullptr);
+ // Verify that we can access the class.
+ if (!klass->IsPublic()) {
+ caller.Assign(GetCallingClass(soa.Self(), 1));
+ if (caller.Get() != nullptr && !caller->CanAccess(klass.Get())) {
+ soa.Self()->ThrowNewExceptionF(
+ "Ljava/lang/IllegalAccessException;", "%s is not accessible from %s",
+ PrettyClass(klass.Get()).c_str(), PrettyClass(caller.Get()).c_str());
+ return nullptr;
+ }
+ }
+ auto* constructor = klass->GetDeclaredConstructor(
+ soa.Self(), NullHandle<mirror::ObjectArray<mirror::Class>>());
+ if (UNLIKELY(constructor == nullptr)) {
+ soa.Self()->ThrowNewExceptionF("Ljava/lang/InstantiationException;",
+ "%s has no zero argument constructor",
+ PrettyClass(klass.Get()).c_str());
+ return nullptr;
+ }
+ // Invoke the string allocator to return an empty string for the string class.
+ if (klass->IsStringClass()) {
+ gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
+ mirror::SetStringCountVisitor visitor(0);
+ mirror::Object* obj = mirror::String::Alloc<true>(soa.Self(), 0, allocator_type, visitor);
+ if (UNLIKELY(soa.Self()->IsExceptionPending())) {
+ return nullptr;
+ } else {
+ return soa.AddLocalReference<jobject>(obj);
+ }
+ }
+ auto receiver = hs.NewHandle(klass->AllocObject(soa.Self()));
+ if (UNLIKELY(receiver.Get() == nullptr)) {
+ soa.Self()->AssertPendingOOMException();
+ return nullptr;
+ }
+ // Verify that we can access the constructor.
+ auto* declaring_class = constructor->GetDeclaringClass();
+ if (!constructor->IsPublic()) {
+ if (caller.Get() == nullptr) {
+ caller.Assign(GetCallingClass(soa.Self(), 1));
+ }
+ if (UNLIKELY(caller.Get() != nullptr && !VerifyAccess(
+ soa.Self(), receiver.Get(), declaring_class, constructor->GetAccessFlags(),
+ caller.Get()))) {
+ soa.Self()->ThrowNewExceptionF(
+ "Ljava/lang/IllegalAccessException;", "%s is not accessible from %s",
+ PrettyMethod(constructor).c_str(), PrettyClass(caller.Get()).c_str());
+ return nullptr;
+ }
+ }
+ // Ensure that we are initialized.
+ if (UNLIKELY(!declaring_class->IsInitialized())) {
+ if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(
+ soa.Self(), hs.NewHandle(declaring_class), true, true)) {
+ soa.Self()->AssertPendingException();
+ return nullptr;
+ }
+ }
+ // Invoke the constructor.
+ JValue result;
+ uint32_t args[1] = { static_cast<uint32_t>(reinterpret_cast<uintptr_t>(receiver.Get())) };
+ constructor->Invoke(soa.Self(), args, sizeof(args), &result, "V");
+ if (UNLIKELY(soa.Self()->IsExceptionPending())) {
+ return nullptr;
+ }
+ // Constructors are ()V methods, so we shouldn't touch the result of InvokeMethod.
+ return soa.AddLocalReference<jobject>(receiver.Get());
+}
+
static JNINativeMethod gMethods[] = {
- NATIVE_METHOD(Class, classForName, "!(Ljava/lang/String;ZLjava/lang/ClassLoader;)Ljava/lang/Class;"),
- NATIVE_METHOD(Class, findOverriddenMethodIfProxy,
- "!(Ljava/lang/reflect/ArtMethod;)Ljava/lang/reflect/ArtMethod;"),
+ NATIVE_METHOD(Class, classForName,
+ "!(Ljava/lang/String;ZLjava/lang/ClassLoader;)Ljava/lang/Class;"),
+ NATIVE_METHOD(Class, getDeclaredConstructorInternal,
+ "!([Ljava/lang/Class;)Ljava/lang/reflect/Constructor;"),
+ NATIVE_METHOD(Class, getDeclaredConstructorsInternal, "!(Z)[Ljava/lang/reflect/Constructor;"),
+ NATIVE_METHOD(Class, getDeclaredField, "!(Ljava/lang/String;)Ljava/lang/reflect/Field;"),
+ NATIVE_METHOD(Class, getDeclaredFieldInternal, "!(Ljava/lang/String;)Ljava/lang/reflect/Field;"),
+ NATIVE_METHOD(Class, getDeclaredFields, "!()[Ljava/lang/reflect/Field;"),
+ NATIVE_METHOD(Class, getDeclaredFieldsUnchecked, "!(Z)[Ljava/lang/reflect/Field;"),
+ NATIVE_METHOD(Class, getDeclaredMethodInternal,
+ "!(Ljava/lang/String;[Ljava/lang/Class;)Ljava/lang/reflect/Method;"),
+ NATIVE_METHOD(Class, getDeclaredMethodsUnchecked,
+ "!(Z)[Ljava/lang/reflect/Method;"),
NATIVE_METHOD(Class, getNameNative, "!()Ljava/lang/String;"),
NATIVE_METHOD(Class, getProxyInterfaces, "!()[Ljava/lang/Class;"),
- NATIVE_METHOD(Class, getDeclaredFields, "!()[Ljava/lang/reflect/Field;"),
NATIVE_METHOD(Class, getPublicDeclaredFields, "!()[Ljava/lang/reflect/Field;"),
- NATIVE_METHOD(Class, getDeclaredFieldsUnchecked, "!(Z)[Ljava/lang/reflect/Field;"),
- NATIVE_METHOD(Class, getDeclaredFieldInternal, "!(Ljava/lang/String;)Ljava/lang/reflect/Field;"),
- NATIVE_METHOD(Class, getDeclaredField, "!(Ljava/lang/String;)Ljava/lang/reflect/Field;"),
+ NATIVE_METHOD(Class, newInstance, "!()Ljava/lang/Object;"),
};
void register_java_lang_Class(JNIEnv* env) {
diff --git a/runtime/native/java_lang_DexCache.cc b/runtime/native/java_lang_DexCache.cc
index 1198c2e..b9f8d01 100644
--- a/runtime/native/java_lang_DexCache.cc
+++ b/runtime/native/java_lang_DexCache.cc
@@ -31,14 +31,14 @@
// Should only be called while holding the lock on the dex cache.
DCHECK_EQ(dex_cache->GetLockOwnerThreadId(), soa.Self()->GetThreadId());
const DexFile* dex_file = dex_cache->GetDexFile();
- if (dex_file == NULL) {
- return NULL;
+ if (dex_file == nullptr) {
+ return nullptr;
}
void* address = const_cast<void*>(reinterpret_cast<const void*>(dex_file->Begin()));
jobject byte_buffer = env->NewDirectByteBuffer(address, dex_file->Size());
- if (byte_buffer == NULL) {
+ if (byte_buffer == nullptr) {
DCHECK(soa.Self()->IsExceptionPending());
- return NULL;
+ return nullptr;
}
jvalue args[1];
diff --git a/runtime/native/java_lang_String.cc b/runtime/native/java_lang_String.cc
index 6afe83b..aa64b79 100644
--- a/runtime/native/java_lang_String.cc
+++ b/runtime/native/java_lang_String.cc
@@ -18,6 +18,9 @@
#include "common_throws.h"
#include "jni_internal.h"
+#include "mirror/array.h"
+#include "mirror/object-inl.h"
+#include "mirror/string.h"
#include "mirror/string-inl.h"
#include "scoped_fast_native_object_access.h"
#include "scoped_thread_state_change.h"
@@ -26,36 +29,93 @@
namespace art {
-static jint String_compareTo(JNIEnv* env, jobject javaThis, jobject javaRhs) {
+static jchar String_charAt(JNIEnv* env, jobject java_this, jint index) {
ScopedFastNativeObjectAccess soa(env);
- if (UNLIKELY(javaRhs == NULL)) {
+ return soa.Decode<mirror::String*>(java_this)->CharAt(index);
+}
+
+static jint String_compareTo(JNIEnv* env, jobject java_this, jobject java_rhs) {
+ ScopedFastNativeObjectAccess soa(env);
+ if (UNLIKELY(java_rhs == nullptr)) {
ThrowNullPointerException("rhs == null");
return -1;
} else {
- return soa.Decode<mirror::String*>(javaThis)->CompareTo(soa.Decode<mirror::String*>(javaRhs));
+ return soa.Decode<mirror::String*>(java_this)->CompareTo(soa.Decode<mirror::String*>(java_rhs));
}
}
+static jstring String_concat(JNIEnv* env, jobject java_this, jobject java_string_arg) {
+ ScopedFastNativeObjectAccess soa(env);
+ if (UNLIKELY(java_string_arg == nullptr)) {
+ ThrowNullPointerException("string arg == null");
+ return nullptr;
+ }
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::String> string_this(hs.NewHandle(soa.Decode<mirror::String*>(java_this)));
+ Handle<mirror::String> string_arg(hs.NewHandle(soa.Decode<mirror::String*>(java_string_arg)));
+ int32_t length_this = string_this->GetLength();
+ int32_t length_arg = string_arg->GetLength();
+ if (length_arg > 0 && length_this > 0) {
+ mirror::String* result = mirror::String::AllocFromStrings(soa.Self(), string_this, string_arg);
+ return soa.AddLocalReference<jstring>(result);
+ }
+ jobject string_original = (length_this == 0) ? java_string_arg : java_this;
+ return reinterpret_cast<jstring>(string_original);
+}
+
static jint String_fastIndexOf(JNIEnv* env, jobject java_this, jint ch, jint start) {
ScopedFastNativeObjectAccess soa(env);
// This method does not handle supplementary characters. They're dealt with in managed code.
DCHECK_LE(ch, 0xffff);
-
- mirror::String* s = soa.Decode<mirror::String*>(java_this);
- return s->FastIndexOf(ch, start);
+ return soa.Decode<mirror::String*>(java_this)->FastIndexOf(ch, start);
}
-static jstring String_intern(JNIEnv* env, jobject javaThis) {
+static jstring String_fastSubstring(JNIEnv* env, jobject java_this, jint start, jint length) {
ScopedFastNativeObjectAccess soa(env);
- mirror::String* s = soa.Decode<mirror::String*>(javaThis);
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::String> string_this(hs.NewHandle(soa.Decode<mirror::String*>(java_this)));
+ gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
+ mirror::String* result = mirror::String::AllocFromString<true>(soa.Self(), length, string_this,
+ start, allocator_type);
+ return soa.AddLocalReference<jstring>(result);
+}
+
+static void String_getCharsNoCheck(JNIEnv* env, jobject java_this, jint start, jint end,
+ jcharArray buffer, jint index) {
+ ScopedFastNativeObjectAccess soa(env);
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::CharArray> char_array(hs.NewHandle(soa.Decode<mirror::CharArray*>(buffer)));
+ soa.Decode<mirror::String*>(java_this)->GetChars(start, end, char_array, index);
+}
+
+static jstring String_intern(JNIEnv* env, jobject java_this) {
+ ScopedFastNativeObjectAccess soa(env);
+ mirror::String* s = soa.Decode<mirror::String*>(java_this);
mirror::String* result = s->Intern();
return soa.AddLocalReference<jstring>(result);
}
+static void String_setCharAt(JNIEnv* env, jobject java_this, jint index, jchar c) {
+ ScopedFastNativeObjectAccess soa(env);
+ soa.Decode<mirror::String*>(java_this)->SetCharAt(index, c);
+}
+
+static jcharArray String_toCharArray(JNIEnv* env, jobject java_this) {
+ ScopedFastNativeObjectAccess soa(env);
+ mirror::String* s = soa.Decode<mirror::String*>(java_this);
+ return soa.AddLocalReference<jcharArray>(s->ToCharArray(soa.Self()));
+}
+
static JNINativeMethod gMethods[] = {
+ NATIVE_METHOD(String, charAt, "!(I)C"),
NATIVE_METHOD(String, compareTo, "!(Ljava/lang/String;)I"),
+ NATIVE_METHOD(String, concat, "!(Ljava/lang/String;)Ljava/lang/String;"),
NATIVE_METHOD(String, fastIndexOf, "!(II)I"),
+ NATIVE_METHOD(String, fastSubstring, "!(II)Ljava/lang/String;"),
+ NATIVE_METHOD(String, getCharsNoCheck, "!(II[CI)V"),
NATIVE_METHOD(String, intern, "!()Ljava/lang/String;"),
+ NATIVE_METHOD(String, setCharAt, "!(IC)V"),
+ NATIVE_METHOD(String, toCharArray, "!()[C"),
};
void register_java_lang_String(JNIEnv* env) {
diff --git a/runtime/native/java_lang_StringFactory.cc b/runtime/native/java_lang_StringFactory.cc
new file mode 100644
index 0000000..34d6a37
--- /dev/null
+++ b/runtime/native/java_lang_StringFactory.cc
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "java_lang_StringFactory.h"
+
+#include "common_throws.h"
+#include "jni_internal.h"
+#include "mirror/object-inl.h"
+#include "mirror/string.h"
+#include "scoped_fast_native_object_access.h"
+#include "scoped_thread_state_change.h"
+#include "ScopedLocalRef.h"
+#include "ScopedPrimitiveArray.h"
+
+namespace art {
+
+static jstring StringFactory_newStringFromBytes(JNIEnv* env, jclass, jbyteArray java_data,
+ jint high, jint offset, jint byte_count) {
+ ScopedFastNativeObjectAccess soa(env);
+ if (UNLIKELY(java_data == nullptr)) {
+ ThrowNullPointerException("data == null");
+ return nullptr;
+ }
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::ByteArray> byte_array(hs.NewHandle(soa.Decode<mirror::ByteArray*>(java_data)));
+ int32_t data_size = byte_array->GetLength();
+ if ((offset | byte_count) < 0 || byte_count > data_size - offset) {
+ soa.Self()->ThrowNewExceptionF("Ljava/lang/StringIndexOutOfBoundsException;",
+ "length=%d; regionStart=%d; regionLength=%d", data_size,
+ offset, byte_count);
+ return nullptr;
+ }
+ gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
+ mirror::String* result = mirror::String::AllocFromByteArray<true>(soa.Self(), byte_count,
+ byte_array, offset, high,
+ allocator_type);
+ return soa.AddLocalReference<jstring>(result);
+}
+
+static jstring StringFactory_newStringFromChars(JNIEnv* env, jclass, jint offset,
+ jint char_count, jcharArray java_data) {
+ ScopedFastNativeObjectAccess soa(env);
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::CharArray> char_array(hs.NewHandle(soa.Decode<mirror::CharArray*>(java_data)));
+ gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
+ mirror::String* result = mirror::String::AllocFromCharArray<true>(soa.Self(), char_count,
+ char_array, offset,
+ allocator_type);
+ return soa.AddLocalReference<jstring>(result);
+}
+
+static jstring StringFactory_newStringFromString(JNIEnv* env, jclass, jstring to_copy) {
+ ScopedFastNativeObjectAccess soa(env);
+ if (UNLIKELY(to_copy == nullptr)) {
+ ThrowNullPointerException("toCopy == null");
+ return nullptr;
+ }
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::String> string(hs.NewHandle(soa.Decode<mirror::String*>(to_copy)));
+ gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
+ mirror::String* result = mirror::String::AllocFromString<true>(soa.Self(), string->GetLength(),
+ string, 0, allocator_type);
+ return soa.AddLocalReference<jstring>(result);
+}
+
+static JNINativeMethod gMethods[] = {
+ NATIVE_METHOD(StringFactory, newStringFromBytes, "!([BIII)Ljava/lang/String;"),
+ NATIVE_METHOD(StringFactory, newStringFromChars, "!(II[C)Ljava/lang/String;"),
+ NATIVE_METHOD(StringFactory, newStringFromString, "!(Ljava/lang/String;)Ljava/lang/String;"),
+};
+
+void register_java_lang_StringFactory(JNIEnv* env) {
+ REGISTER_NATIVE_METHODS("java/lang/StringFactory");
+}
+
+} // namespace art
diff --git a/runtime/native/java_lang_StringFactory.h b/runtime/native/java_lang_StringFactory.h
new file mode 100644
index 0000000..c476ad3
--- /dev/null
+++ b/runtime/native/java_lang_StringFactory.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_NATIVE_JAVA_LANG_STRINGFACTORY_H_
+#define ART_RUNTIME_NATIVE_JAVA_LANG_STRINGFACTORY_H_
+
+#include <jni.h>
+
+namespace art {
+
+void register_java_lang_StringFactory(JNIEnv* env);
+
+} // namespace art
+
+#endif // ART_RUNTIME_NATIVE_JAVA_LANG_STRINGFACTORY_H_
diff --git a/runtime/native/java_lang_Thread.cc b/runtime/native/java_lang_Thread.cc
index d3b52ba..6569d83 100644
--- a/runtime/native/java_lang_Thread.cc
+++ b/runtime/native/java_lang_Thread.cc
@@ -43,7 +43,7 @@
ScopedFastNativeObjectAccess soa(env);
MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
Thread* thread = Thread::FromManagedThread(soa, java_thread);
- return (thread != NULL) ? thread->IsInterrupted() : JNI_FALSE;
+ return (thread != nullptr) ? thread->IsInterrupted() : JNI_FALSE;
}
static void Thread_nativeCreate(JNIEnv* env, jclass, jobject java_thread, jlong stack_size,
@@ -64,7 +64,7 @@
ThreadState internal_thread_state = (has_been_started ? kTerminated : kStarting);
MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
Thread* thread = Thread::FromManagedThread(soa, java_thread);
- if (thread != NULL) {
+ if (thread != nullptr) {
internal_thread_state = thread->GetState();
}
switch (internal_thread_state) {
@@ -84,6 +84,7 @@
case kWaitingInMainDebuggerLoop: return kJavaWaiting;
case kWaitingForDebuggerSuspension: return kJavaWaiting;
case kWaitingForDeoptimization: return kJavaWaiting;
+ case kWaitingForGetObjectsAllocated: return kJavaWaiting;
case kWaitingForJniOnLoad: return kJavaWaiting;
case kWaitingForSignalCatcherOutput: return kJavaWaiting;
case kWaitingInMainSignalCatcherLoop: return kJavaWaiting;
@@ -99,7 +100,7 @@
static jboolean Thread_nativeHoldsLock(JNIEnv* env, jobject java_thread, jobject java_object) {
ScopedObjectAccess soa(env);
mirror::Object* object = soa.Decode<mirror::Object*>(java_object);
- if (object == NULL) {
+ if (object == nullptr) {
ThrowNullPointerException("object == null");
return JNI_FALSE;
}
@@ -112,7 +113,7 @@
ScopedFastNativeObjectAccess soa(env);
MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
Thread* thread = Thread::FromManagedThread(soa, java_thread);
- if (thread != NULL) {
+ if (thread != nullptr) {
thread->Interrupt(soa.Self());
}
}
@@ -133,7 +134,7 @@
bool timed_out;
// Take suspend thread lock to avoid races with threads trying to suspend this one.
Thread* thread = thread_list->SuspendThreadByPeer(peer, true, false, &timed_out);
- if (thread != NULL) {
+ if (thread != nullptr) {
{
ScopedObjectAccess soa(env);
thread->SetThreadName(name.c_str());
@@ -154,7 +155,7 @@
ScopedObjectAccess soa(env);
MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
Thread* thread = Thread::FromManagedThread(soa, java_thread);
- if (thread != NULL) {
+ if (thread != nullptr) {
thread->SetNativePriority(new_priority);
}
}
diff --git a/runtime/native/java_lang_reflect_Array.cc b/runtime/native/java_lang_reflect_Array.cc
index eddd7de..beb953b 100644
--- a/runtime/native/java_lang_reflect_Array.cc
+++ b/runtime/native/java_lang_reflect_Array.cc
@@ -27,13 +27,14 @@
namespace art {
-static jobject Array_createMultiArray(JNIEnv* env, jclass, jclass javaElementClass, jobject javaDimArray) {
+static jobject Array_createMultiArray(
+ JNIEnv* env, jclass, jclass javaElementClass, jobject javaDimArray) {
ScopedFastNativeObjectAccess soa(env);
- DCHECK(javaElementClass != NULL);
+ DCHECK(javaElementClass != nullptr);
StackHandleScope<2> hs(soa.Self());
Handle<mirror::Class> element_class(hs.NewHandle(soa.Decode<mirror::Class*>(javaElementClass)));
DCHECK(element_class->IsClass());
- DCHECK(javaDimArray != NULL);
+ DCHECK(javaDimArray != nullptr);
mirror::Object* dimensions_obj = soa.Decode<mirror::Object*>(javaDimArray);
DCHECK(dimensions_obj->IsArrayInstance());
DCHECK_EQ(dimensions_obj->GetClass()->GetComponentType()->GetPrimitiveType(),
@@ -47,18 +48,18 @@
static jobject Array_createObjectArray(JNIEnv* env, jclass, jclass javaElementClass, jint length) {
ScopedFastNativeObjectAccess soa(env);
- DCHECK(javaElementClass != NULL);
+ DCHECK(javaElementClass != nullptr);
if (UNLIKELY(length < 0)) {
ThrowNegativeArraySizeException(length);
- return NULL;
+ return nullptr;
}
mirror::Class* element_class = soa.Decode<mirror::Class*>(javaElementClass);
Runtime* runtime = Runtime::Current();
ClassLinker* class_linker = runtime->GetClassLinker();
mirror::Class* array_class = class_linker->FindArrayClass(soa.Self(), &element_class);
- if (UNLIKELY(array_class == NULL)) {
+ if (UNLIKELY(array_class == nullptr)) {
CHECK(soa.Self()->IsExceptionPending());
- return NULL;
+ return nullptr;
}
DCHECK(array_class->IsObjectArrayClass());
mirror::Array* new_array = mirror::ObjectArray<mirror::Object*>::Alloc(
diff --git a/runtime/native/java_lang_reflect_Constructor.cc b/runtime/native/java_lang_reflect_Constructor.cc
index 5e1a4c5..0fd6759 100644
--- a/runtime/native/java_lang_reflect_Constructor.cc
+++ b/runtime/native/java_lang_reflect_Constructor.cc
@@ -21,6 +21,7 @@
#include "mirror/art_method.h"
#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
+#include "mirror/method.h"
#include "mirror/object-inl.h"
#include "reflection.h"
#include "scoped_fast_native_object_access.h"
@@ -29,52 +30,60 @@
namespace art {
/*
- * We get here through Constructor.newInstance(). The Constructor object
- * would not be available if the constructor weren't public (per the
- * definition of Class.getConstructor), so we can skip the method access
- * check. We can also safely assume the constructor isn't associated
- * with an interface, array, or primitive class.
+ * We can also safely assume the constructor isn't associated
+ * with an interface, array, or primitive class. If this is coming from
+ * native, it is OK to avoid access checks since JNI does not enforce them.
*/
-static jobject Constructor_newInstance(JNIEnv* env, jobject javaMethod, jobjectArray javaArgs,
- jboolean accessible) {
+static jobject Constructor_newInstance(JNIEnv* env, jobject javaMethod, jobjectArray javaArgs) {
ScopedFastNativeObjectAccess soa(env);
- mirror::ArtMethod* m = mirror::ArtMethod::FromReflectedMethod(soa, javaMethod);
+ mirror::Constructor* m = soa.Decode<mirror::Constructor*>(javaMethod);
StackHandleScope<1> hs(soa.Self());
Handle<mirror::Class> c(hs.NewHandle(m->GetDeclaringClass()));
if (UNLIKELY(c->IsAbstract())) {
- soa.Self()->ThrowNewExceptionF("Ljava/lang/InstantiationException;",
- "Can't instantiate %s %s",
+ soa.Self()->ThrowNewExceptionF("Ljava/lang/InstantiationException;", "Can't instantiate %s %s",
c->IsInterface() ? "interface" : "abstract class",
PrettyDescriptor(c.Get()).c_str());
return nullptr;
}
-
+ // Verify that we can access the class.
+ if (!m->IsAccessible() && !c->IsPublic()) {
+ auto* caller = GetCallingClass(soa.Self(), 1);
+ // If caller is null, then we called from JNI, just avoid the check since JNI avoids most
+ // access checks anyways. TODO: Investigate if this the correct behavior.
+ if (caller != nullptr && !caller->CanAccess(c.Get())) {
+ soa.Self()->ThrowNewExceptionF(
+ "Ljava/lang/IllegalAccessException;", "%s is not accessible from %s",
+ PrettyClass(c.Get()).c_str(), PrettyClass(caller).c_str());
+ return nullptr;
+ }
+ }
if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(soa.Self(), c, true, true)) {
DCHECK(soa.Self()->IsExceptionPending());
return nullptr;
}
-
bool movable = true;
- if (!kMovingMethods && c->IsArtMethodClass()) {
- movable = false;
- } else if (!kMovingClasses && c->IsClassClass()) {
+ if (!kMovingClasses && c->IsClassClass()) {
movable = false;
}
+
+ // String constructor is replaced by a StringFactory method in InvokeMethod.
+ if (c->IsStringClass()) {
+ return InvokeMethod(soa, javaMethod, nullptr, javaArgs, 1);
+ }
+
mirror::Object* receiver =
movable ? c->AllocObject(soa.Self()) : c->AllocNonMovableObject(soa.Self());
if (receiver == nullptr) {
return nullptr;
}
-
jobject javaReceiver = soa.AddLocalReference<jobject>(receiver);
- InvokeMethod(soa, javaMethod, javaReceiver, javaArgs, (accessible == JNI_TRUE));
-
+ InvokeMethod(soa, javaMethod, javaReceiver, javaArgs, 1);
// Constructors are ()V methods, so we shouldn't touch the result of InvokeMethod.
return javaReceiver;
}
static JNINativeMethod gMethods[] = {
- NATIVE_METHOD(Constructor, newInstance, "!([Ljava/lang/Object;Z)Ljava/lang/Object;"),
+ NATIVE_METHOD(Constructor, newInstance, "!([Ljava/lang/Object;)Ljava/lang/Object;"),
};
void register_java_lang_reflect_Constructor(JNIEnv* env) {
diff --git a/runtime/native/java_lang_reflect_Method.cc b/runtime/native/java_lang_reflect_Method.cc
index 9859746..c20d832 100644
--- a/runtime/native/java_lang_reflect_Method.cc
+++ b/runtime/native/java_lang_reflect_Method.cc
@@ -30,9 +30,9 @@
namespace art {
static jobject Method_invoke(JNIEnv* env, jobject javaMethod, jobject javaReceiver,
- jobject javaArgs, jboolean accessible) {
+ jobject javaArgs) {
ScopedFastNativeObjectAccess soa(env);
- return InvokeMethod(soa, javaMethod, javaReceiver, javaArgs, (accessible == JNI_TRUE));
+ return InvokeMethod(soa, javaMethod, javaReceiver, javaArgs);
}
static jobject Method_getExceptionTypesNative(JNIEnv* env, jobject javaMethod) {
@@ -55,7 +55,7 @@
}
static JNINativeMethod gMethods[] = {
- NATIVE_METHOD(Method, invoke, "!(Ljava/lang/Object;[Ljava/lang/Object;Z)Ljava/lang/Object;"),
+ NATIVE_METHOD(Method, invoke, "!(Ljava/lang/Object;[Ljava/lang/Object;)Ljava/lang/Object;"),
NATIVE_METHOD(Method, getExceptionTypesNative, "!()[Ljava/lang/Class;"),
};
diff --git a/runtime/native/java_lang_reflect_Proxy.cc b/runtime/native/java_lang_reflect_Proxy.cc
index baf8b24..4a6ab40 100644
--- a/runtime/native/java_lang_reflect_Proxy.cc
+++ b/runtime/native/java_lang_reflect_Proxy.cc
@@ -30,13 +30,12 @@
jobject loader, jobjectArray methods, jobjectArray throws) {
ScopedFastNativeObjectAccess soa(env);
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- mirror::Class* result = class_linker->CreateProxyClass(soa, name, interfaces, loader, methods,
- throws);
- return soa.AddLocalReference<jclass>(result);
+ return soa.AddLocalReference<jclass>(class_linker->CreateProxyClass(
+ soa, name, interfaces, loader, methods, throws));
}
static JNINativeMethod gMethods[] = {
- NATIVE_METHOD(Proxy, generateProxy, "!(Ljava/lang/String;[Ljava/lang/Class;Ljava/lang/ClassLoader;[Ljava/lang/reflect/ArtMethod;[[Ljava/lang/Class;)Ljava/lang/Class;"),
+ NATIVE_METHOD(Proxy, generateProxy, "!(Ljava/lang/String;[Ljava/lang/Class;Ljava/lang/ClassLoader;[Ljava/lang/reflect/Method;[[Ljava/lang/Class;)Ljava/lang/Class;"),
};
void register_java_lang_reflect_Proxy(JNIEnv* env) {
diff --git a/runtime/native/libcore_util_CharsetUtils.cc b/runtime/native/libcore_util_CharsetUtils.cc
new file mode 100644
index 0000000..1216824
--- /dev/null
+++ b/runtime/native/libcore_util_CharsetUtils.cc
@@ -0,0 +1,264 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "jni_internal.h"
+#include "mirror/string.h"
+#include "mirror/string-inl.h"
+#include "native/libcore_util_CharsetUtils.h"
+#include "scoped_fast_native_object_access.h"
+#include "ScopedPrimitiveArray.h"
+#include "unicode/utf16.h"
+
+#include <string.h>
+
+namespace art {
+
+/**
+ * Approximates java.lang.UnsafeByteSequence so we don't have to pay the cost of calling back into
+ * Java when converting a char[] to a UTF-8 byte[]. This lets us have UTF-8 conversions slightly
+ * faster than ICU for large char[]s without paying for the NIO overhead with small char[]s.
+ *
+ * We could avoid this by keeping the UTF-8 bytes on the native heap until we're done and only
+ * creating a byte[] on the Java heap when we know how big it needs to be, but one shouldn't lie
+ * to the garbage collector (nor hide potentially large allocations from it).
+ *
+ * Because a call to append might require an allocation, it might fail. Callers should always
+ * check the return value of append.
+ */
+class NativeUnsafeByteSequence {
+ public:
+ explicit NativeUnsafeByteSequence(JNIEnv* env)
+ : mEnv(env), mJavaArray(nullptr), mRawArray(nullptr), mSize(-1), mOffset(0) {
+ }
+
+ ~NativeUnsafeByteSequence() {
+ // Release our pointer to the raw array, copying changes back to the Java heap.
+ if (mRawArray != nullptr) {
+ mEnv->ReleaseByteArrayElements(mJavaArray, mRawArray, 0);
+ }
+ }
+
+ bool append(jbyte b) {
+ if (mOffset == mSize && !resize(mSize * 2)) {
+ return false;
+ }
+ mRawArray[mOffset++] = b;
+ return true;
+ }
+
+ bool resize(int newSize) {
+ if (newSize == mSize) {
+ return true;
+ }
+
+ // Allocate a new array.
+ jbyteArray newJavaArray = mEnv->NewByteArray(newSize);
+ if (newJavaArray == nullptr) {
+ return false;
+ }
+ jbyte* newRawArray = mEnv->GetByteArrayElements(newJavaArray, nullptr);
+ if (newRawArray == nullptr) {
+ return false;
+ }
+
+ // Copy data out of the old array and then let go of it.
+ // Note that we may be trimming the array.
+ if (mRawArray != nullptr) {
+ memcpy(newRawArray, mRawArray, mOffset);
+ mEnv->ReleaseByteArrayElements(mJavaArray, mRawArray, JNI_ABORT);
+ mEnv->DeleteLocalRef(mJavaArray);
+ }
+
+ // Point ourselves at the new array.
+ mJavaArray = newJavaArray;
+ mRawArray = newRawArray;
+ mSize = newSize;
+ return true;
+ }
+
+ jbyteArray toByteArray() {
+ // Trim any unused space, if necessary.
+ bool okay = resize(mOffset);
+ return okay ? mJavaArray : nullptr;
+ }
+
+ private:
+ JNIEnv* mEnv;
+ jbyteArray mJavaArray;
+ jbyte* mRawArray;
+ jint mSize;
+ jint mOffset;
+
+ // Disallow copy and assignment.
+ NativeUnsafeByteSequence(const NativeUnsafeByteSequence&);
+ void operator=(const NativeUnsafeByteSequence&);
+};
+
+static void CharsetUtils_asciiBytesToChars(JNIEnv* env, jclass, jbyteArray javaBytes, jint offset,
+ jint length, jcharArray javaChars) {
+ ScopedByteArrayRO bytes(env, javaBytes);
+ if (bytes.get() == nullptr) {
+ return;
+ }
+ ScopedCharArrayRW chars(env, javaChars);
+ if (chars.get() == nullptr) {
+ return;
+ }
+
+ const jbyte* src = &bytes[offset];
+ jchar* dst = &chars[0];
+ static const jchar REPLACEMENT_CHAR = 0xfffd;
+ for (int i = length - 1; i >= 0; --i) {
+ jchar ch = static_cast<jchar>(*src++ & 0xff);
+ *dst++ = (ch <= 0x7f) ? ch : REPLACEMENT_CHAR;
+ }
+}
+
+static void CharsetUtils_isoLatin1BytesToChars(JNIEnv* env, jclass, jbyteArray javaBytes,
+ jint offset, jint length, jcharArray javaChars) {
+ ScopedByteArrayRO bytes(env, javaBytes);
+ if (bytes.get() == nullptr) {
+ return;
+ }
+ ScopedCharArrayRW chars(env, javaChars);
+ if (chars.get() == nullptr) {
+ return;
+ }
+
+ const jbyte* src = &bytes[offset];
+ jchar* dst = &chars[0];
+ for (int i = length - 1; i >= 0; --i) {
+ *dst++ = static_cast<jchar>(*src++ & 0xff);
+ }
+}
+
+/**
+ * Translates the given characters to US-ASCII or ISO-8859-1 bytes, using the fact that
+ * Unicode code points between U+0000 and U+007f inclusive are identical to US-ASCII, while
+ * U+0000 to U+00ff inclusive are identical to ISO-8859-1.
+ */
+static jbyteArray charsToBytes(JNIEnv* env, jstring java_string, jint offset, jint length,
+ jchar maxValidChar) {
+ ScopedObjectAccess soa(env);
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::String> string(hs.NewHandle(soa.Decode<mirror::String*>(java_string)));
+ if (string.Get() == nullptr) {
+ return nullptr;
+ }
+
+ jbyteArray javaBytes = env->NewByteArray(length);
+ ScopedByteArrayRW bytes(env, javaBytes);
+ if (bytes.get() == nullptr) {
+ return nullptr;
+ }
+
+ const jchar* src = &(string->GetValue()[offset]);
+ jbyte* dst = &bytes[0];
+ for (int i = length - 1; i >= 0; --i) {
+ jchar ch = *src++;
+ if (ch > maxValidChar) {
+ ch = '?';
+ }
+ *dst++ = static_cast<jbyte>(ch);
+ }
+
+ return javaBytes;
+}
+
+static jbyteArray CharsetUtils_toAsciiBytes(JNIEnv* env, jclass, jstring java_string, jint offset,
+ jint length) {
+ return charsToBytes(env, java_string, offset, length, 0x7f);
+}
+
+static jbyteArray CharsetUtils_toIsoLatin1Bytes(JNIEnv* env, jclass, jstring java_string,
+ jint offset, jint length) {
+ return charsToBytes(env, java_string, offset, length, 0xff);
+}
+
+static jbyteArray CharsetUtils_toUtf8Bytes(JNIEnv* env, jclass, jstring java_string, jint offset,
+ jint length) {
+ ScopedObjectAccess soa(env);
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::String> string(hs.NewHandle(soa.Decode<mirror::String*>(java_string)));
+ if (string.Get() == nullptr) {
+ return nullptr;
+ }
+
+ NativeUnsafeByteSequence out(env);
+ if (!out.resize(length)) {
+ return nullptr;
+ }
+
+ const int end = offset + length;
+ for (int i = offset; i < end; ++i) {
+ jint ch = string->CharAt(i);
+ if (ch < 0x80) {
+ // One byte.
+ if (!out.append(ch)) {
+ return nullptr;
+ }
+ } else if (ch < 0x800) {
+ // Two bytes.
+ if (!out.append((ch >> 6) | 0xc0) || !out.append((ch & 0x3f) | 0x80)) {
+ return nullptr;
+ }
+ } else if (U16_IS_SURROGATE(ch)) {
+ // A supplementary character.
+ jchar high = static_cast<jchar>(ch);
+ jchar low = (i + 1 != end) ? string->CharAt(i + 1) : 0;
+ if (!U16_IS_SURROGATE_LEAD(high) || !U16_IS_SURROGATE_TRAIL(low)) {
+ if (!out.append('?')) {
+ return nullptr;
+ }
+ continue;
+ }
+ // Now we know we have a *valid* surrogate pair, we can consume the low surrogate.
+ ++i;
+ ch = U16_GET_SUPPLEMENTARY(high, low);
+ // Four bytes.
+ jbyte b1 = (ch >> 18) | 0xf0;
+ jbyte b2 = ((ch >> 12) & 0x3f) | 0x80;
+ jbyte b3 = ((ch >> 6) & 0x3f) | 0x80;
+ jbyte b4 = (ch & 0x3f) | 0x80;
+ if (!out.append(b1) || !out.append(b2) || !out.append(b3) || !out.append(b4)) {
+ return nullptr;
+ }
+ } else {
+ // Three bytes.
+ jbyte b1 = (ch >> 12) | 0xe0;
+ jbyte b2 = ((ch >> 6) & 0x3f) | 0x80;
+ jbyte b3 = (ch & 0x3f) | 0x80;
+ if (!out.append(b1) || !out.append(b2) || !out.append(b3)) {
+ return nullptr;
+ }
+ }
+ }
+ return out.toByteArray();
+}
+
+static JNINativeMethod gMethods[] = {
+ NATIVE_METHOD(CharsetUtils, asciiBytesToChars, "!([BII[C)V"),
+ NATIVE_METHOD(CharsetUtils, isoLatin1BytesToChars, "!([BII[C)V"),
+ NATIVE_METHOD(CharsetUtils, toAsciiBytes, "!(Ljava/lang/String;II)[B"),
+ NATIVE_METHOD(CharsetUtils, toIsoLatin1Bytes, "!(Ljava/lang/String;II)[B"),
+ NATIVE_METHOD(CharsetUtils, toUtf8Bytes, "!(Ljava/lang/String;II)[B"),
+};
+
+void register_libcore_util_CharsetUtils(JNIEnv* env) {
+ REGISTER_NATIVE_METHODS("libcore/util/CharsetUtils");
+}
+
+} // namespace art
diff --git a/runtime/native/libcore_util_CharsetUtils.h b/runtime/native/libcore_util_CharsetUtils.h
new file mode 100644
index 0000000..3518bdb
--- /dev/null
+++ b/runtime/native/libcore_util_CharsetUtils.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_NATIVE_LIBCORE_UTIL_CHARSETUTILS_H_
+#define ART_RUNTIME_NATIVE_LIBCORE_UTIL_CHARSETUTILS_H_
+
+#include <jni.h>
+
+namespace art {
+
+void register_libcore_util_CharsetUtils(JNIEnv* env);
+
+} // namespace art
+
+#endif // ART_RUNTIME_NATIVE_LIBCORE_UTIL_CHARSETUTILS_H_
diff --git a/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
index 987427e..b96ddc8 100644
--- a/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
+++ b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
@@ -43,7 +43,7 @@
/*
* Get a stack trace as an array of StackTraceElement objects. Returns
- * NULL on failure, e.g. if the threadId couldn't be found.
+ * nullptr on failure, e.g. if the threadId couldn't be found.
*/
static jobjectArray DdmVmInternal_getStackTraceById(JNIEnv* env, jclass, jint thin_lock_id) {
jobjectArray trace = nullptr;
@@ -145,7 +145,7 @@
}
jbyteArray result = env->NewByteArray(bytes.size());
- if (result != NULL) {
+ if (result != nullptr) {
env->SetByteArrayRegion(result, 0, bytes.size(), reinterpret_cast<const jbyte*>(&bytes[0]));
}
return result;
diff --git a/runtime/nth_caller_visitor.h b/runtime/nth_caller_visitor.h
index a851f21..d2d7fa8 100644
--- a/runtime/nth_caller_visitor.h
+++ b/runtime/nth_caller_visitor.h
@@ -27,20 +27,23 @@
// Walks up the stack 'n' callers, when used with Thread::WalkStack.
struct NthCallerVisitor : public StackVisitor {
NthCallerVisitor(Thread* thread, size_t n_in, bool include_runtime_and_upcalls = false)
- : StackVisitor(thread, NULL), n(n_in),
- include_runtime_and_upcalls_(include_runtime_and_upcalls), count(0), caller(NULL) {}
+ : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
+ n(n_in),
+ include_runtime_and_upcalls_(include_runtime_and_upcalls),
+ count(0),
+ caller(nullptr) {}
bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::ArtMethod* m = GetMethod();
bool do_count = false;
- if (m == NULL || m->IsRuntimeMethod()) {
+ if (m == nullptr || m->IsRuntimeMethod()) {
// Upcall.
do_count = include_runtime_and_upcalls_;
} else {
do_count = true;
}
if (do_count) {
- DCHECK(caller == NULL);
+ DCHECK(caller == nullptr);
if (count == n) {
caller = m;
return false;
diff --git a/runtime/oat.cc b/runtime/oat.cc
index c223e2e..4f6aabc 100644
--- a/runtime/oat.cc
+++ b/runtime/oat.cc
@@ -27,6 +27,8 @@
constexpr uint8_t OatHeader::kOatMagic[4];
constexpr uint8_t OatHeader::kOatVersion[4];
+constexpr const char OatHeader::kTrueValue[];
+constexpr const char OatHeader::kFalseValue[];
static size_t ComputeOatHeaderSize(const SafeMap<std::string, std::string>* variable_data) {
size_t estimate = 0U;
@@ -443,9 +445,16 @@
}
bool OatHeader::IsPic() const {
- const char* pic_string = GetStoreValueByKey(OatHeader::kPicKey);
- static const char kTrue[] = "true";
- return (pic_string != nullptr && strncmp(pic_string, kTrue, sizeof(kTrue)) == 0);
+ return IsKeyEnabled(OatHeader::kPicKey);
+}
+
+bool OatHeader::IsDebuggable() const {
+ return IsKeyEnabled(OatHeader::kDebuggableKey);
+}
+
+bool OatHeader::IsKeyEnabled(const char* key) const {
+ const char* key_value = GetStoreValueByKey(key);
+ return (key_value != nullptr && strncmp(key_value, kTrueValue, sizeof(kTrueValue)) == 0);
}
void OatHeader::Flatten(const SafeMap<std::string, std::string>* key_value_store) {
diff --git a/runtime/oat.h b/runtime/oat.h
index a31e09a..604e161 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -32,14 +32,18 @@
class PACKED(4) OatHeader {
public:
static constexpr uint8_t kOatMagic[] = { 'o', 'a', 't', '\n' };
- static constexpr uint8_t kOatVersion[] = { '0', '6', '1', '\0' };
+ static constexpr uint8_t kOatVersion[] = { '0', '6', '3', '\0' };
static constexpr const char* kImageLocationKey = "image-location";
static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline";
static constexpr const char* kDex2OatHostKey = "dex2oat-host";
static constexpr const char* kPicKey = "pic";
+ static constexpr const char* kDebuggableKey = "debuggable";
static constexpr const char* kClassPathKey = "classpath";
+ static constexpr const char kTrueValue[] = "true";
+ static constexpr const char kFalseValue[] = "false";
+
static OatHeader* Create(InstructionSet instruction_set,
const InstructionSetFeatures* instruction_set_features,
const std::vector<const DexFile*>* dex_files,
@@ -99,6 +103,7 @@
size_t GetHeaderSize() const;
bool IsPic() const;
+ bool IsDebuggable() const;
private:
OatHeader(InstructionSet instruction_set,
@@ -108,6 +113,9 @@
uint32_t image_file_location_oat_data_begin,
const SafeMap<std::string, std::string>* variable_data);
+ // Returns true if the value of the given key is "true", false otherwise.
+ bool IsKeyEnabled(const char* key) const;
+
void Flatten(const SafeMap<std::string, std::string>* variable_data);
uint8_t magic_[4];
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index eddbd8a..63ee4b1 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -497,7 +497,7 @@
MutexLock mu(Thread::Current(), secondary_lookup_lock_);
auto secondary_lb = secondary_oat_dex_files_.lower_bound(key);
if (secondary_lb != secondary_oat_dex_files_.end() && key == secondary_lb->first) {
- oat_dex_file = secondary_lb->second; // May be nullptr.
+ oat_dex_file = secondary_lb->second; // May be null.
} else {
// We haven't seen this dex_location before, we must check the canonical location.
std::string dex_canonical_location = DexFile::GetDexCanonicalLocation(dex_location);
@@ -506,8 +506,8 @@
auto canonical_it = oat_dex_files_.find(canonical_key);
if (canonical_it != oat_dex_files_.end()) {
oat_dex_file = canonical_it->second;
- } // else keep nullptr.
- } // else keep nullptr.
+ } // else keep null.
+ } // else keep null.
// Copy the key to the string_cache_ and store the result in secondary map.
string_cache_.emplace_back(key.data(), key.length());
@@ -703,6 +703,10 @@
// TODO: Check against oat_patches. b/18144996
}
+bool OatFile::IsDebuggable() const {
+ return GetOatHeader().IsDebuggable();
+}
+
static constexpr char kDexClassPathEncodingSeparator = '*';
std::string OatFile::EncodeDexFileDependencies(const std::vector<const DexFile*>& dex_files) {
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index 42c60dc..12e9f6c 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -48,7 +48,7 @@
static OatFile* OpenWithElfFile(ElfFile* elf_file, const std::string& location,
const char* abs_dex_location,
std::string* error_msg);
- // Open an oat file. Returns NULL on failure. Requested base can
+ // Open an oat file. Returns null on failure. Requested base can
// optionally be used to request where the file should be loaded.
// See the ResolveRelativeEncodedDexLocation for a description of how the
// abs_dex_location argument is used.
@@ -81,6 +81,9 @@
bool IsPic() const;
+ // Indicates whether the oat file was compiled with full debugging capability.
+ bool IsDebuggable() const;
+
ElfFile* GetElfFile() const {
CHECK_NE(reinterpret_cast<uintptr_t>(elf_file_.get()), reinterpret_cast<uintptr_t>(nullptr))
<< "Cannot get an elf file from " << GetLocation();
@@ -149,7 +152,7 @@
template<class T>
T GetOatPointer(uint32_t offset) const {
if (offset == 0) {
- return NULL;
+ return nullptr;
}
return reinterpret_cast<T>(begin_ + offset);
}
@@ -177,7 +180,7 @@
const OatMethod GetOatMethod(uint32_t method_index) const;
// Return a pointer to the OatMethodOffsets for the requested
- // method_index, or nullptr if none is present. Note that most
+ // method_index, or null if none is present. Note that most
// callers should use GetOatMethod.
const OatMethodOffsets* GetOatMethodOffsets(uint32_t method_index) const;
@@ -238,7 +241,7 @@
// Returns the absolute dex location for the encoded relative dex location.
//
- // If not nullptr, abs_dex_location is used to resolve the absolute dex
+ // If not null, abs_dex_location is used to resolve the absolute dex
// location of relative dex locations encoded in the oat file.
// For example, given absolute location "/data/app/foo/base.apk", encoded
// dex locations "base.apk", "base.apk:classes2.dex", etc. would be resolved
@@ -300,10 +303,10 @@
// Pointer to end of oat region for bounds checking.
const uint8_t* end_;
- // Pointer to the .bss section, if present, otherwise nullptr.
+ // Pointer to the .bss section, if present, otherwise null.
const uint8_t* bss_begin_;
- // Pointer to the end of the .bss section, if present, otherwise nullptr.
+ // Pointer to the end of the .bss section, if present, otherwise null.
const uint8_t* bss_end_;
// Was this oat_file loaded executable?
@@ -331,7 +334,7 @@
// Map each location and canonical location (if different) retrieved from the
// oat file to its OatDexFile. This map doesn't change after it's constructed in Setup()
// and therefore doesn't need any locking and provides the cheapest dex file lookup
- // for GetOatDexFile() for a very frequent use case. Never contains a nullptr value.
+ // for GetOatDexFile() for a very frequent use case. Never contains a null value.
Table oat_dex_files_;
// Lock guarding all members needed for secondary lookup in GetOatDexFile().
diff --git a/runtime/oat_file_assistant.cc b/runtime/oat_file_assistant.cc
index e5c27b2..094d8b7 100644
--- a/runtime/oat_file_assistant.cc
+++ b/runtime/oat_file_assistant.cc
@@ -96,9 +96,8 @@
OatFileAssistant::~OatFileAssistant() {
// Clean up the lock file.
- if (lock_file_.get() != nullptr) {
- lock_file_->Erase();
- TEMP_FAILURE_RETRY(unlink(lock_file_->GetPath().c_str()));
+ if (flock_.HasFile()) {
+ TEMP_FAILURE_RETRY(unlink(flock_.GetFile()->GetPath().c_str()));
}
}
@@ -121,7 +120,7 @@
bool OatFileAssistant::Lock(std::string* error_msg) {
CHECK(error_msg != nullptr);
- CHECK(lock_file_.get() == nullptr) << "OatFileAssistant::Lock already acquired";
+ CHECK(!flock_.HasFile()) << "OatFileAssistant::Lock already acquired";
if (OatFileName() == nullptr) {
*error_msg = "Failed to determine lock file";
@@ -129,13 +128,7 @@
}
std::string lock_file_name = *OatFileName() + ".flock";
- lock_file_.reset(OS::CreateEmptyFile(lock_file_name.c_str()));
- if (lock_file_.get() == nullptr) {
- *error_msg = "Failed to create lock file " + lock_file_name;
- return false;
- }
-
- if (!flock_.Init(lock_file_.get(), error_msg)) {
+ if (!flock_.Init(lock_file_name.c_str(), error_msg)) {
TEMP_FAILURE_RETRY(unlink(lock_file_name.c_str()));
return false;
}
@@ -230,10 +223,10 @@
dex_files.push_back(std::move(dex_file));
// Load secondary multidex files
- for (int i = 1; ; i++) {
- std::string secondary_dex_location = DexFile::GetMultiDexClassesDexName(i, dex_location);
+ for (size_t i = 1; ; i++) {
+ std::string secondary_dex_location = DexFile::GetMultiDexLocation(i, dex_location);
oat_dex_file = oat_file.GetOatDexFile(secondary_dex_location.c_str(), nullptr, false);
- if (oat_dex_file == NULL) {
+ if (oat_dex_file == nullptr) {
// There are no more secondary dex files to load.
break;
}
@@ -393,22 +386,22 @@
bool OatFileAssistant::GivenOatFileIsOutOfDate(const OatFile& file) {
// Verify the dex checksum.
- // Note: GetOatDexFile will return NULL if the dex checksum doesn't match
+ // Note: GetOatDexFile will return null if the dex checksum doesn't match
// what we provide, which verifies the primary dex checksum for us.
const uint32_t* dex_checksum_pointer = GetRequiredDexChecksum();
const OatFile::OatDexFile* oat_dex_file = file.GetOatDexFile(
dex_location_, dex_checksum_pointer, false);
- if (oat_dex_file == NULL) {
+ if (oat_dex_file == nullptr) {
return true;
}
// Verify the dex checksums for any secondary multidex files
- for (int i = 1; ; i++) {
+ for (size_t i = 1; ; i++) {
std::string secondary_dex_location
- = DexFile::GetMultiDexClassesDexName(i, dex_location_);
+ = DexFile::GetMultiDexLocation(i, dex_location_);
const OatFile::OatDexFile* secondary_oat_dex_file
= file.GetOatDexFile(secondary_dex_location.c_str(), nullptr, false);
- if (secondary_oat_dex_file == NULL) {
+ if (secondary_oat_dex_file == nullptr) {
// There are no more secondary dex files to check.
break;
}
@@ -424,7 +417,7 @@
<< secondary_dex_location
<< ". Expected: " << expected_secondary_checksum
<< ", Actual: " << actual_secondary_checksum;
- return false;
+ return true;
}
} else {
// If we can't get the checksum for the secondary location, we assume
@@ -465,7 +458,7 @@
const ImageInfo* image_info = GetImageInfo();
if (image_info == nullptr) {
- VLOG(oat) << "No image for to check oat relocation against.";
+ VLOG(oat) << "No image to check oat relocation against.";
return false;
}
@@ -697,12 +690,20 @@
return false;
}
+ ClassLinker* linker = runtime->GetClassLinker();
+ CHECK(linker != nullptr) << "ClassLinker is not created yet";
+ const OatFile* primary_oat_file = linker->GetPrimaryOatFile();
+ const bool debuggable = primary_oat_file != nullptr && primary_oat_file->IsDebuggable();
+
std::vector<std::string> argv;
argv.push_back(runtime->GetCompilerExecutable());
argv.push_back("--runtime-arg");
argv.push_back("-classpath");
argv.push_back("--runtime-arg");
argv.push_back(runtime->GetClassPathString());
+ if (debuggable) {
+ argv.push_back("--debuggable");
+ }
runtime->AddCurrentRuntimeFeaturesAsDex2OatArguments(&argv);
if (!runtime->IsVerificationEnabled()) {
diff --git a/runtime/oat_file_assistant.h b/runtime/oat_file_assistant.h
index 9e7c2ef..4c0b0e2 100644
--- a/runtime/oat_file_assistant.h
+++ b/runtime/oat_file_assistant.h
@@ -85,7 +85,7 @@
// Constructs an OatFileAssistant object to assist the oat file
// corresponding to the given dex location with the target instruction set.
//
- // The dex_location must not be NULL and should remain available and
+ // The dex_location must not be null and should remain available and
// unchanged for the duration of the lifetime of the OatFileAssistant object.
// Typically the dex_location is the absolute path to the original,
// un-optimized dex file.
@@ -152,11 +152,11 @@
// Returns true on success.
//
// If there is a failure, the value of error_msg will be set to a string
- // describing why there was failure. error_msg must not be nullptr.
+ // describing why there was failure. error_msg must not be null.
bool MakeUpToDate(std::string* error_msg);
// Returns an oat file that can be used for loading dex files.
- // Returns nullptr if no suitable oat file was found.
+ // Returns null if no suitable oat file was found.
//
// After this call, no other methods of the OatFileAssistant should be
// called, because access to the loaded oat file has been taken away from
@@ -244,7 +244,7 @@
// This will fail if dex2oat is not enabled in the current runtime.
//
// If there is a failure, the value of error_msg will be set to a string
- // describing why there was failure. error_msg must not be nullptr.
+ // describing why there was failure. error_msg must not be null.
bool RelocateOatFile(const std::string* input_file, std::string* error_msg);
// Generate the oat file from the dex file.
@@ -254,7 +254,7 @@
// This will fail if dex2oat is not enabled in the current runtime.
//
// If there is a failure, the value of error_msg will be set to a string
- // describing why there was failure. error_msg must not be nullptr.
+ // describing why there was failure. error_msg must not be null.
bool GenerateOatFile(std::string* error_msg);
// Executes dex2oat using the current runtime configuration overridden with
@@ -263,7 +263,7 @@
// Returns true on success.
//
// If there is a failure, the value of error_msg will be set to a string
- // describing why there was failure. error_msg must not be nullptr.
+ // describing why there was failure. error_msg must not be null.
//
// TODO: The OatFileAssistant probably isn't the right place to have this
// function.
@@ -310,12 +310,12 @@
// Gets the dex checksum required for an up-to-date oat file.
// Returns dex_checksum if a required checksum was located. Returns
- // nullptr if the required checksum was not found.
+ // null if the required checksum was not found.
// The caller shouldn't clean up or free the returned pointer.
const uint32_t* GetRequiredDexChecksum();
// Returns the loaded odex file.
- // Loads the file if needed. Returns nullptr if the file failed to load.
+ // Loads the file if needed. Returns null if the file failed to load.
// The caller shouldn't clean up or free the returned pointer.
const OatFile* GetOdexFile();
@@ -324,7 +324,7 @@
void ClearOdexFileCache();
// Returns the loaded oat file.
- // Loads the file if needed. Returns nullptr if the file failed to load.
+ // Loads the file if needed. Returns null if the file failed to load.
// The caller shouldn't clean up or free the returned pointer.
const OatFile* GetOatFile();
@@ -333,19 +333,19 @@
void ClearOatFileCache();
// Returns the loaded image info.
- // Loads the image info if needed. Returns nullptr if the image info failed
+ // Loads the image info if needed. Returns null if the image info failed
// to load.
// The caller shouldn't clean up or free the returned pointer.
const ImageInfo* GetImageInfo();
// Returns the loaded profile.
- // Loads the profile if needed. Returns nullptr if the profile failed
+ // Loads the profile if needed. Returns null if the profile failed
// to load.
// The caller shouldn't clean up or free the returned pointer.
ProfileFile* GetProfile();
// Returns the loaded old profile.
- // Loads the old profile if needed. Returns nullptr if the old profile
+ // Loads the old profile if needed. Returns null if the old profile
// failed to load.
// The caller shouldn't clean up or free the returned pointer.
ProfileFile* GetOldProfile();
@@ -353,11 +353,10 @@
// To implement Lock(), we lock a dummy file where the oat file would go
// (adding ".flock" to the target file name) and retain the lock for the
// remaining lifetime of the OatFileAssistant object.
- std::unique_ptr<File> lock_file_;
ScopedFlock flock_;
// In a properly constructed OatFileAssistant object, dex_location_ should
- // never be nullptr.
+ // never be null.
const char* dex_location_ = nullptr;
// In a properly constructed OatFileAssistant object, isa_ should be either
@@ -365,7 +364,7 @@
const InstructionSet isa_ = kNone;
// The package name, used solely to find the profile file.
- // This may be nullptr in a properly constructed object. In this case,
+ // This may be null in a properly constructed object. In this case,
// profile_load_attempted_ and old_profile_load_attempted_ will be true, and
// profile_load_succeeded_ and old_profile_load_succeeded_ will be false.
const char* package_name_ = nullptr;
@@ -405,9 +404,9 @@
bool cached_oat_file_name_found_;
std::string cached_oat_file_name_;
- // Cached value of the loaded odex file.
+ // Cached value of the loaded oat file.
// Use the GetOatFile method rather than accessing this directly, unless you
- // know the odex file isn't out of date.
+ // know the oat file isn't out of date.
bool oat_file_load_attempted_ = false;
std::unique_ptr<OatFile> cached_oat_file_;
diff --git a/runtime/oat_file_assistant_test.cc b/runtime/oat_file_assistant_test.cc
index 0c942d2..d8e3797 100644
--- a/runtime/oat_file_assistant_test.cc
+++ b/runtime/oat_file_assistant_test.cc
@@ -29,6 +29,7 @@
#include "class_linker-inl.h"
#include "common_runtime_test.h"
#include "compiler_callbacks.h"
+#include "gc/space/image_space.h"
#include "mem_map.h"
#include "os.h"
#include "scoped_thread_state_change.h"
@@ -66,10 +67,23 @@
<< "Expected stripped dex file to be at: " << GetStrippedDexSrc1();
ASSERT_FALSE(DexFile::GetChecksum(GetStrippedDexSrc1().c_str(), &checksum, &error_msg))
<< "Expected stripped dex file to be stripped: " << GetStrippedDexSrc1();
- ASSERT_TRUE(OS::FileExists(GetMultiDexSrc1().c_str()))
- << "Expected multidex file to be at: " << GetMultiDexSrc1();
ASSERT_TRUE(OS::FileExists(GetDexSrc2().c_str()))
<< "Expected dex file to be at: " << GetDexSrc2();
+
+ // GetMultiDexSrc2 should have the same primary dex checksum as
+ // GetMultiDexSrc1, but a different secondary dex checksum.
+ std::vector<std::unique_ptr<const DexFile>> multi1;
+ ASSERT_TRUE(DexFile::Open(GetMultiDexSrc1().c_str(),
+ GetMultiDexSrc1().c_str(), &error_msg, &multi1)) << error_msg;
+ ASSERT_GT(multi1.size(), 1u);
+
+ std::vector<std::unique_ptr<const DexFile>> multi2;
+ ASSERT_TRUE(DexFile::Open(GetMultiDexSrc2().c_str(),
+ GetMultiDexSrc2().c_str(), &error_msg, &multi2)) << error_msg;
+ ASSERT_GT(multi2.size(), 1u);
+
+ ASSERT_EQ(multi1[0]->GetLocationChecksum(), multi2[0]->GetLocationChecksum());
+ ASSERT_NE(multi1[1]->GetLocationChecksum(), multi2[1]->GetLocationChecksum());
}
virtual void SetUpRuntimeOptions(RuntimeOptions* options) {
@@ -118,7 +132,7 @@
std::string GetImageDirectory() {
if (IsHost()) {
const char* host_dir = getenv("ANDROID_HOST_OUT");
- CHECK(host_dir != NULL);
+ CHECK(host_dir != nullptr);
return std::string(host_dir) + "/framework";
} else {
return std::string("/data/art-test");
@@ -148,6 +162,12 @@
return GetTestDexFileName("MultiDex");
}
+ // Returns the path to a multidex file equivalent to GetMultiDexSrc2, but
+ // with the contents of the secondary dex file changed.
+ std::string GetMultiDexSrc2() {
+ return GetTestDexFileName("MultiDexModifiedSecondary");
+ }
+
std::string GetDexSrc2() {
return GetTestDexFileName("Nested");
}
@@ -343,6 +363,23 @@
EXPECT_EQ(2u, dex_files.size());
}
+// Case: We have a MultiDEX file where the secondary dex file is out of date.
+// Expect: The status is kDex2OatNeeded.
+TEST_F(OatFileAssistantTest, MultiDexSecondaryOutOfDate) {
+ std::string dex_location = GetScratchDir() + "/MultiDexSecondaryOutOfDate.jar";
+
+ // Compile code for GetMultiDexSrc1.
+ Copy(GetMultiDexSrc1(), dex_location);
+ GenerateOatForTest(dex_location.c_str());
+
+ // Now overwrite the dex file with GetMultiDexSrc2 so the secondary checksum
+ // is out of date.
+ Copy(GetMultiDexSrc2(), dex_location);
+
+ OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, true);
+ EXPECT_EQ(OatFileAssistant::kDex2OatNeeded, oat_file_assistant.GetDexOptNeeded());
+}
+
// Case: We have a MultiDEX file and up-to-date OAT file for it with relative
// encoded dex locations.
// Expect: The oat file status is kNoDexOptNeeded.
@@ -610,10 +647,23 @@
// Things aren't relocated, so it should fall back to interpreted.
std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile();
ASSERT_TRUE(oat_file.get() != nullptr);
+
EXPECT_FALSE(oat_file->IsExecutable());
std::vector<std::unique_ptr<const DexFile>> dex_files;
dex_files = oat_file_assistant.LoadDexFiles(*oat_file, dex_location.c_str());
EXPECT_EQ(1u, dex_files.size());
+
+ // Add some extra checks to help diagnose apparently flaky test failures.
+ Runtime* runtime = Runtime::Current();
+ const gc::space::ImageSpace* image_space = runtime->GetHeap()->GetImageSpace();
+ ASSERT_TRUE(image_space != nullptr);
+ const ImageHeader& image_header = image_space->GetImageHeader();
+ const OatHeader& oat_header = oat_file->GetOatHeader();
+ EXPECT_FALSE(oat_file->IsPic());
+ EXPECT_EQ(image_header.GetOatChecksum(), oat_header.GetImageFileLocationOatChecksum());
+ EXPECT_NE(reinterpret_cast<uintptr_t>(image_header.GetOatDataBegin()),
+ oat_header.GetImageFileLocationOatDataBegin());
+ EXPECT_NE(image_header.GetPatchDelta(), oat_header.GetImagePatchDelta());
}
// Case: We have a DEX file and a PIC ODEX file, but no OAT file.
@@ -987,9 +1037,6 @@
// TODO: More Tests:
// * Test class linker falls back to unquickened dex for DexNoOat
// * Test class linker falls back to unquickened dex for MultiDexNoOat
-// * Test multidex files:
-// - Multidex with only classes2.dex out of date should have status
-// kOutOfDate
// * Test using secondary isa
// * Test with profiling info?
// * Test for status of oat while oat is being generated (how?)
diff --git a/runtime/object_callbacks.h b/runtime/object_callbacks.h
index cf81cc5..8e99dbb 100644
--- a/runtime/object_callbacks.h
+++ b/runtime/object_callbacks.h
@@ -41,9 +41,10 @@
typedef mirror::Object* (MarkObjectCallback)(mirror::Object* obj, void* arg) WARN_UNUSED;
typedef void (MarkHeapReferenceCallback)(mirror::HeapReference<mirror::Object>* ref, void* arg);
-typedef void (DelayReferenceReferentCallback)(mirror::Class* klass, mirror::Reference* ref, void* arg);
+typedef void (DelayReferenceReferentCallback)(mirror::Class* klass, mirror::Reference* ref,
+ void* arg);
-// A callback for testing if an object is marked, returns nullptr if not marked, otherwise the new
+// A callback for testing if an object is marked, returns null if not marked, otherwise the new
// address the object (if the object didn't move, returns the object input parameter).
typedef mirror::Object* (IsMarkedCallback)(mirror::Object* object, void* arg) WARN_UNUSED;
diff --git a/runtime/os_linux.cc b/runtime/os_linux.cc
index e4403d7..2282789 100644
--- a/runtime/os_linux.cc
+++ b/runtime/os_linux.cc
@@ -40,10 +40,10 @@
}
File* OS::OpenFileWithFlags(const char* name, int flags) {
- CHECK(name != NULL);
+ CHECK(name != nullptr);
std::unique_ptr<File> file(new File);
if (!file->Open(name, flags, 0666)) {
- return NULL;
+ return nullptr;
}
return file.release();
}
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index 620a4bd..0bc834f 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -324,7 +324,7 @@
} else if (option == "vfprintf") {
const void* hook = options[i].second;
if (hook == nullptr) {
- Usage("vfprintf argument was NULL");
+ Usage("vfprintf argument was nullptr");
return false;
}
int (*hook_vfprintf)(FILE *, const char*, va_list) =
@@ -337,7 +337,7 @@
} else if (option == "exit") {
const void* hook = options[i].second;
if (hook == nullptr) {
- Usage("exit argument was NULL");
+ Usage("exit argument was nullptr");
return false;
}
void(*hook_exit)(jint) = reinterpret_cast<void(*)(jint)>(const_cast<void*>(hook));
@@ -348,7 +348,7 @@
} else if (option == "abort") {
const void* hook = options[i].second;
if (hook == nullptr) {
- Usage("abort was NULL\n");
+ Usage("abort was nullptr\n");
return false;
}
void(*hook_abort)() = reinterpret_cast<void(*)()>(const_cast<void*>(hook));
diff --git a/runtime/parsed_options_test.cc b/runtime/parsed_options_test.cc
index 658b656..a8575de 100644
--- a/runtime/parsed_options_test.cc
+++ b/runtime/parsed_options_test.cc
@@ -33,7 +33,6 @@
void* test_vfprintf = reinterpret_cast<void*>(0xa);
void* test_abort = reinterpret_cast<void*>(0xb);
void* test_exit = reinterpret_cast<void*>(0xc);
- void* null = reinterpret_cast<void*>(NULL);
std::string lib_core(CommonRuntimeTest::GetLibCoreDexFileName());
@@ -42,27 +41,27 @@
boot_class_path += lib_core;
RuntimeOptions options;
- options.push_back(std::make_pair(boot_class_path.c_str(), null));
- options.push_back(std::make_pair("-classpath", null));
- options.push_back(std::make_pair(lib_core.c_str(), null));
- options.push_back(std::make_pair("-cp", null));
- options.push_back(std::make_pair(lib_core.c_str(), null));
- options.push_back(std::make_pair("-Ximage:boot_image", null));
- options.push_back(std::make_pair("-Xcheck:jni", null));
- options.push_back(std::make_pair("-Xms2048", null));
- options.push_back(std::make_pair("-Xmx4k", null));
- options.push_back(std::make_pair("-Xss1m", null));
- options.push_back(std::make_pair("-XX:HeapTargetUtilization=0.75", null));
- options.push_back(std::make_pair("-Dfoo=bar", null));
- options.push_back(std::make_pair("-Dbaz=qux", null));
- options.push_back(std::make_pair("-verbose:gc,class,jni", null));
+ options.push_back(std::make_pair(boot_class_path.c_str(), nullptr));
+ options.push_back(std::make_pair("-classpath", nullptr));
+ options.push_back(std::make_pair(lib_core.c_str(), nullptr));
+ options.push_back(std::make_pair("-cp", nullptr));
+ options.push_back(std::make_pair(lib_core.c_str(), nullptr));
+ options.push_back(std::make_pair("-Ximage:boot_image", nullptr));
+ options.push_back(std::make_pair("-Xcheck:jni", nullptr));
+ options.push_back(std::make_pair("-Xms2048", nullptr));
+ options.push_back(std::make_pair("-Xmx4k", nullptr));
+ options.push_back(std::make_pair("-Xss1m", nullptr));
+ options.push_back(std::make_pair("-XX:HeapTargetUtilization=0.75", nullptr));
+ options.push_back(std::make_pair("-Dfoo=bar", nullptr));
+ options.push_back(std::make_pair("-Dbaz=qux", nullptr));
+ options.push_back(std::make_pair("-verbose:gc,class,jni", nullptr));
options.push_back(std::make_pair("vfprintf", test_vfprintf));
options.push_back(std::make_pair("abort", test_abort));
options.push_back(std::make_pair("exit", test_exit));
RuntimeArgumentMap map;
std::unique_ptr<ParsedOptions> parsed(ParsedOptions::Create(options, false, &map));
- ASSERT_TRUE(parsed.get() != NULL);
+ ASSERT_TRUE(parsed.get() != nullptr);
ASSERT_NE(0u, map.Size());
using Opt = RuntimeArgumentMap;
@@ -104,7 +103,7 @@
RuntimeArgumentMap map;
std::unique_ptr<ParsedOptions> parsed(ParsedOptions::Create(options, false, &map));
- ASSERT_TRUE(parsed.get() != NULL);
+ ASSERT_TRUE(parsed.get() != nullptr);
ASSERT_NE(0u, map.Size());
using Opt = RuntimeArgumentMap;
diff --git a/runtime/primitive.h b/runtime/primitive.h
index 3818487..0ac5f40 100644
--- a/runtime/primitive.h
+++ b/runtime/primitive.h
@@ -132,7 +132,7 @@
return "V";
default:
LOG(FATAL) << "Primitive char conversion on invalid type " << static_cast<int>(type);
- return NULL;
+ return nullptr;
}
}
diff --git a/runtime/profiler.cc b/runtime/profiler.cc
index db372c3..3b0e6c1 100644
--- a/runtime/profiler.cc
+++ b/runtime/profiler.cc
@@ -58,8 +58,10 @@
BoundedStackVisitor(std::vector<std::pair<mirror::ArtMethod*, uint32_t>>* stack,
Thread* thread, uint32_t max_depth)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- : StackVisitor(thread, NULL), stack_(stack), max_depth_(max_depth), depth_(0) {
- }
+ : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
+ stack_(stack),
+ max_depth_(max_depth),
+ depth_(0) {}
bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::ArtMethod* m = GetMethod();
@@ -300,7 +302,9 @@
} while (length > 0);
// Truncate the file to the new length.
- ftruncate(fd, full_length);
+ if (ftruncate(fd, full_length) == -1) {
+ LOG(ERROR) << "Failed to truncate profile file " << full_name;
+ }
// Now unlock the file, allowing another process in.
err = flock(fd, LOCK_UN);
@@ -399,7 +403,7 @@
bool BackgroundMethodSamplingProfiler::ProcessMethod(mirror::ArtMethod* method) {
if (method == nullptr) {
profile_table_.NullMethod();
- // Don't record a nullptr method.
+ // Don't record a null method.
return false;
}
@@ -820,7 +824,7 @@
// Bad summary info. It should be total/null/boot.
return false;
}
- // This is the number of hits in all profiled methods (without nullptr or boot methods)
+ // This is the number of hits in all profiled methods (without null or boot methods)
uint32_t total_count = strtoul(summary_info[0].c_str(), nullptr, 10);
// Now read each line until the end of file. Each line consists of 3 fields separated by '/'.
diff --git a/runtime/proxy_test.cc b/runtime/proxy_test.cc
index 6061f73..93d1f66 100644
--- a/runtime/proxy_test.cc
+++ b/runtime/proxy_test.cc
@@ -20,6 +20,8 @@
#include "art_field-inl.h"
#include "class_linker-inl.h"
#include "common_compiler_test.h"
+#include "mirror/field-inl.h"
+#include "mirror/method.h"
#include "scoped_thread_state_change.h"
namespace art {
@@ -53,41 +55,34 @@
mirror::ObjectArray<mirror::ArtMethod>* virtual_methods = interface->GetVirtualMethods();
methods_count += (virtual_methods == nullptr) ? 0 : virtual_methods->GetLength();
}
- jclass javaLangReflectArtMethod =
- soa.AddLocalReference<jclass>(mirror::ArtMethod::GetJavaLangReflectArtMethod());
- jobjectArray proxyClassMethods = soa.Env()->NewObjectArray(methods_count,
- javaLangReflectArtMethod, nullptr);
+ jobjectArray proxyClassMethods = soa.Env()->NewObjectArray(
+ methods_count, soa.AddLocalReference<jclass>(mirror::Method::StaticClass()), nullptr);
soa.Self()->AssertNoPendingException();
- // Fill the method array
- mirror::ArtMethod* equalsMethod = javaLangObject->FindDeclaredVirtualMethod("equals",
- "(Ljava/lang/Object;)Z");
- mirror::ArtMethod* hashCodeMethod = javaLangObject->FindDeclaredVirtualMethod("hashCode",
- "()I");
- mirror::ArtMethod* toStringMethod = javaLangObject->FindDeclaredVirtualMethod("toString",
- "()Ljava/lang/String;");
- CHECK(equalsMethod != nullptr);
- CHECK(hashCodeMethod != nullptr);
- CHECK(toStringMethod != nullptr);
-
jsize array_index = 0;
- // Adds Object methods.
- soa.Env()->SetObjectArrayElement(proxyClassMethods, array_index++,
- soa.AddLocalReference<jobject>(equalsMethod));
- soa.Env()->SetObjectArrayElement(proxyClassMethods, array_index++,
- soa.AddLocalReference<jobject>(hashCodeMethod));
- soa.Env()->SetObjectArrayElement(proxyClassMethods, array_index++,
- soa.AddLocalReference<jobject>(toStringMethod));
-
+ // Fill the method array
+ mirror::ArtMethod* method = javaLangObject->FindDeclaredVirtualMethod(
+ "equals", "(Ljava/lang/Object;)Z");
+ CHECK(method != nullptr);
+ soa.Env()->SetObjectArrayElement(
+ proxyClassMethods, array_index++, soa.AddLocalReference<jobject>(
+ mirror::Method::CreateFromArtMethod(soa.Self(), method)));
+ method = javaLangObject->FindDeclaredVirtualMethod("hashCode", "()I");
+ CHECK(method != nullptr);
+ soa.Env()->SetObjectArrayElement(
+ proxyClassMethods, array_index++, soa.AddLocalReference<jobject>(
+ mirror::Method::CreateFromArtMethod(soa.Self(), method)));
+ method = javaLangObject->FindDeclaredVirtualMethod("toString", "()Ljava/lang/String;");
+ CHECK(method != nullptr);
+ soa.Env()->SetObjectArrayElement(
+ proxyClassMethods, array_index++, soa.AddLocalReference<jobject>(
+ mirror::Method::CreateFromArtMethod(soa.Self(), method)));
// Now adds all interfaces virtual methods.
for (mirror::Class* interface : interfaces) {
- mirror::ObjectArray<mirror::ArtMethod>* virtual_methods = interface->GetVirtualMethods();
- if (virtual_methods != nullptr) {
- for (int32_t mth_index = 0; mth_index < virtual_methods->GetLength(); ++mth_index) {
- mirror::ArtMethod* method = virtual_methods->Get(mth_index);
- soa.Env()->SetObjectArrayElement(proxyClassMethods, array_index++,
- soa.AddLocalReference<jobject>(method));
- }
+ for (int32_t i = 0, count = interface->NumVirtualMethods(); i < count; ++i) {
+ soa.Env()->SetObjectArrayElement(
+ proxyClassMethods, array_index++, soa.AddLocalReference<jobject>(
+ mirror::Method::CreateFromArtMethod(soa.Self(), interface->GetVirtualMethod(i))));
}
}
CHECK_EQ(array_index, methods_count);
@@ -96,10 +91,9 @@
jobjectArray proxyClassThrows = soa.Env()->NewObjectArray(0, javaLangClass, nullptr);
soa.Self()->AssertNoPendingException();
- mirror::Class* proxyClass = class_linker_->CreateProxyClass(soa,
- soa.Env()->NewStringUTF(className),
- proxyClassInterfaces, jclass_loader,
- proxyClassMethods, proxyClassThrows);
+ mirror::Class* proxyClass = class_linker_->CreateProxyClass(
+ soa, soa.Env()->NewStringUTF(className), proxyClassInterfaces, jclass_loader,
+ proxyClassMethods, proxyClassThrows);
soa.Self()->AssertNoPendingException();
return proxyClass;
}
@@ -198,4 +192,53 @@
EXPECT_FALSE(field->IsPrimitiveType());
}
+// Creates two proxy classes and check the art/mirror fields of their static fields.
+TEST_F(ProxyTest, CheckArtMirrorFieldsOfProxyStaticFields) {
+ ScopedObjectAccess soa(Thread::Current());
+ jobject jclass_loader = LoadDex("Interfaces");
+ StackHandleScope<7> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
+
+ Handle<mirror::Class> proxyClass0;
+ Handle<mirror::Class> proxyClass1;
+ {
+ std::vector<mirror::Class*> interfaces;
+ proxyClass0 = hs.NewHandle(GenerateProxyClass(soa, jclass_loader, "$Proxy0", interfaces));
+ proxyClass1 = hs.NewHandle(GenerateProxyClass(soa, jclass_loader, "$Proxy1", interfaces));
+ }
+
+ ASSERT_TRUE(proxyClass0.Get() != nullptr);
+ ASSERT_TRUE(proxyClass0->IsProxyClass());
+ ASSERT_TRUE(proxyClass0->IsInitialized());
+ ASSERT_TRUE(proxyClass1.Get() != nullptr);
+ ASSERT_TRUE(proxyClass1->IsProxyClass());
+ ASSERT_TRUE(proxyClass1->IsInitialized());
+
+ ArtField* static_fields0 = proxyClass0->GetSFields();
+ ASSERT_TRUE(static_fields0 != nullptr);
+ ASSERT_EQ(2u, proxyClass0->NumStaticFields());
+ ArtField* static_fields1 = proxyClass1->GetSFields();
+ ASSERT_TRUE(static_fields1 != nullptr);
+ ASSERT_EQ(2u, proxyClass1->NumStaticFields());
+
+ EXPECT_EQ(static_fields0[0].GetDeclaringClass(), proxyClass0.Get());
+ EXPECT_EQ(static_fields0[1].GetDeclaringClass(), proxyClass0.Get());
+ EXPECT_EQ(static_fields1[0].GetDeclaringClass(), proxyClass1.Get());
+ EXPECT_EQ(static_fields1[1].GetDeclaringClass(), proxyClass1.Get());
+
+ Handle<mirror::Field> field00 =
+ hs.NewHandle(mirror::Field::CreateFromArtField(soa.Self(), &static_fields0[0], true));
+ Handle<mirror::Field> field01 =
+ hs.NewHandle(mirror::Field::CreateFromArtField(soa.Self(), &static_fields0[1], true));
+ Handle<mirror::Field> field10 =
+ hs.NewHandle(mirror::Field::CreateFromArtField(soa.Self(), &static_fields1[0], true));
+ Handle<mirror::Field> field11 =
+ hs.NewHandle(mirror::Field::CreateFromArtField(soa.Self(), &static_fields1[1], true));
+ EXPECT_EQ(field00->GetArtField(), &static_fields0[0]);
+ EXPECT_EQ(field01->GetArtField(), &static_fields0[1]);
+ EXPECT_EQ(field10->GetArtField(), &static_fields1[0]);
+ EXPECT_EQ(field11->GetArtField(), &static_fields1[1]);
+}
+
} // namespace art
diff --git a/runtime/quick/inline_method_analyser.cc b/runtime/quick/inline_method_analyser.cc
index 9cf4b16..1c404ff 100644
--- a/runtime/quick/inline_method_analyser.cc
+++ b/runtime/quick/inline_method_analyser.cc
@@ -134,7 +134,10 @@
bool InlineMethodAnalyser::IsSyntheticAccessor(MethodReference ref) {
const DexFile::MethodId& method_id = ref.dex_file->GetMethodId(ref.dex_method_index);
const char* method_name = ref.dex_file->GetMethodName(method_id);
- return strncmp(method_name, "access$", strlen("access$")) == 0;
+ // javac names synthetic accessors "access$nnn",
+ // jack names them "-getN", "-putN", "-wrapN".
+ return strncmp(method_name, "access$", strlen("access$")) == 0 ||
+ strncmp(method_name, "-", strlen("-")) == 0;
}
bool InlineMethodAnalyser::AnalyseReturnMethod(const DexFile::CodeItem* code_item,
diff --git a/runtime/quick/inline_method_analyser.h b/runtime/quick/inline_method_analyser.h
index 3463025..0d39e22 100644
--- a/runtime/quick/inline_method_analyser.h
+++ b/runtime/quick/inline_method_analyser.h
@@ -56,8 +56,12 @@
kIntrinsicReferenceGetReferent,
kIntrinsicCharAt,
kIntrinsicCompareTo,
+ kIntrinsicGetCharsNoCheck,
kIntrinsicIsEmptyOrLength,
kIntrinsicIndexOf,
+ kIntrinsicNewStringFromBytes,
+ kIntrinsicNewStringFromChars,
+ kIntrinsicNewStringFromString,
kIntrinsicCurrentThread,
kIntrinsicPeek,
kIntrinsicPoke,
@@ -71,6 +75,7 @@
kInlineOpNonWideConst,
kInlineOpIGet,
kInlineOpIPut,
+ kInlineStringInit,
};
std::ostream& operator<<(std::ostream& os, const InlineMethodOpcode& rhs);
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index 2432603..730759a 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -46,7 +46,9 @@
CatchBlockStackVisitor(Thread* self, Context* context, Handle<mirror::Throwable>* exception,
QuickExceptionHandler* exception_handler)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- : StackVisitor(self, context), self_(self), exception_(exception),
+ : StackVisitor(self, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
+ self_(self),
+ exception_(exception),
exception_handler_(exception_handler) {
}
@@ -160,7 +162,9 @@
public:
DeoptimizeStackVisitor(Thread* self, Context* context, QuickExceptionHandler* exception_handler)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- : StackVisitor(self, context), self_(self), exception_handler_(exception_handler),
+ : StackVisitor(self, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
+ self_(self),
+ exception_handler_(exception_handler),
prev_shadow_frame_(nullptr) {
CHECK(!self_->HasDeoptimizationShadowFrame());
}
@@ -202,7 +206,8 @@
h_method, m->GetAccessFlags(), true, true, true, true);
bool verifier_success = verifier.Verify();
CHECK(verifier_success) << PrettyMethod(h_method.Get());
- ShadowFrame* new_frame = ShadowFrame::Create(num_regs, nullptr, h_method.Get(), dex_pc);
+ ShadowFrame* new_frame = ShadowFrame::CreateDeoptimizedFrame(
+ num_regs, nullptr, h_method.Get(), dex_pc);
self_->SetShadowFrameUnderConstruction(new_frame);
const std::vector<int32_t> kinds(verifier.DescribeVRegs(dex_pc));
@@ -222,7 +227,10 @@
break;
case kReferenceVReg: {
uint32_t value = 0;
- if (GetVReg(h_method.Get(), reg, kind, &value)) {
+ // Check IsReferenceVReg in case the compiled GC map doesn't agree with the verifier.
+ // We don't want to copy a stale reference into the shadow frame as a reference.
+ // b/20736048
+ if (GetVReg(h_method.Get(), reg, kind, &value) && IsReferenceVReg(h_method.Get(), reg)) {
new_frame->SetVRegReference(reg, reinterpret_cast<mirror::Object*>(value));
} else {
new_frame->SetVReg(reg, kDeadValue);
@@ -334,7 +342,7 @@
public:
InstrumentationStackVisitor(Thread* self, size_t frame_depth)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- : StackVisitor(self, nullptr),
+ : StackVisitor(self, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
frame_depth_(frame_depth),
instrumentation_frames_to_pop_(0) {
CHECK_NE(frame_depth_, kInvalidFrameDepth);
@@ -345,7 +353,12 @@
if (current_frame_depth < frame_depth_) {
CHECK(GetMethod() != nullptr);
if (UNLIKELY(reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc()) == GetReturnPc())) {
- ++instrumentation_frames_to_pop_;
+ if (!IsInInlinedFrame()) {
+ // We do not count inlined frames, because we do not instrument them. The reason we
+ // include them in the stack walking is the check against `frame_depth_`, which is
+ // given to us by a visitor that visits inlined frames.
+ ++instrumentation_frames_to_pop_;
+ }
}
return true;
} else {
diff --git a/runtime/read_barrier_c.h b/runtime/read_barrier_c.h
index a2c4c36..88bda3a 100644
--- a/runtime/read_barrier_c.h
+++ b/runtime/read_barrier_c.h
@@ -26,9 +26,9 @@
// table-lookup read barriers.
#ifdef ART_USE_READ_BARRIER
-// #define USE_BAKER_READ_BARRIER
+#define USE_BAKER_READ_BARRIER
// #define USE_BROOKS_READ_BARRIER
-#define USE_TABLE_LOOKUP_READ_BARRIER
+// #define USE_TABLE_LOOKUP_READ_BARRIER
#endif
#if defined(USE_BAKER_READ_BARRIER) || defined(USE_BROOKS_READ_BARRIER)
diff --git a/runtime/reference_table.cc b/runtime/reference_table.cc
index beba64f..a31d8ac 100644
--- a/runtime/reference_table.cc
+++ b/runtime/reference_table.cc
@@ -40,7 +40,7 @@
}
void ReferenceTable::Add(mirror::Object* obj) {
- DCHECK(obj != NULL);
+ DCHECK(obj != nullptr);
VerifyObject(obj);
if (entries_.size() >= max_size_) {
LOG(FATAL) << "ReferenceTable '" << name_ << "' "
@@ -79,8 +79,8 @@
static void DumpSummaryLine(std::ostream& os, mirror::Object* obj, size_t element_count,
int identical, int equiv)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- if (obj == NULL) {
- os << " NULL reference (count=" << equiv << ")\n";
+ if (obj == nullptr) {
+ os << " null reference (count=" << equiv << ")\n";
return;
}
if (Runtime::Current()->IsClearedJniWeakGlobal(obj)) {
diff --git a/runtime/reference_table_test.cc b/runtime/reference_table_test.cc
index db98e1f..4ffebf2 100644
--- a/runtime/reference_table_test.cc
+++ b/runtime/reference_table_test.cc
@@ -40,8 +40,8 @@
EXPECT_EQ(0U, rt.Size());
}
- // Check removal of all NULLs in a empty table is a no-op.
- rt.Remove(NULL);
+ // Check removal of all nullss in a empty table is a no-op.
+ rt.Remove(nullptr);
EXPECT_EQ(0U, rt.Size());
// Check removal of all o1 in a empty table is a no-op.
diff --git a/runtime/reflection.cc b/runtime/reflection.cc
index 3e1315c..d321d27 100644
--- a/runtime/reflection.cc
+++ b/runtime/reflection.cc
@@ -21,7 +21,9 @@
#include "common_throws.h"
#include "dex_file-inl.h"
#include "entrypoints/entrypoint_utils.h"
+#include "indirect_reference_table-inl.h"
#include "jni_internal.h"
+#include "mirror/abstract_method.h"
#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
#include "mirror/object_array-inl.h"
@@ -448,6 +450,11 @@
}
mirror::ArtMethod* method = soa.DecodeMethod(mid);
+ bool is_string_init = method->GetDeclaringClass()->IsStringClass() && method->IsConstructor();
+ if (is_string_init) {
+ // Replace calls to String.<init> with equivalent StringFactory call.
+ method = soa.DecodeMethod(WellKnownClasses::StringInitToStringFactoryMethodID(mid));
+ }
mirror::Object* receiver = method->IsStatic() ? nullptr : soa.Decode<mirror::Object*>(obj);
uint32_t shorty_len = 0;
const char* shorty = method->GetShorty(&shorty_len);
@@ -455,11 +462,15 @@
ArgArray arg_array(shorty, shorty_len);
arg_array.BuildArgArrayFromVarArgs(soa, receiver, args);
InvokeWithArgArray(soa, method, &arg_array, &result, shorty);
+ if (is_string_init) {
+ // For string init, remap original receiver to StringFactory result.
+ soa.Self()->GetJniEnv()->locals.Update(obj, result.GetL());
+ }
return result;
}
-JValue InvokeWithJValues(const ScopedObjectAccessAlreadyRunnable& soa, mirror::Object* receiver,
- jmethodID mid, jvalue* args) {
+JValue InvokeWithJValues(const ScopedObjectAccessAlreadyRunnable& soa, jobject obj, jmethodID mid,
+ jvalue* args) {
// We want to make sure that the stack is not within a small distance from the
// protected region in case we are calling into a leaf function whose stack
// check has been elided.
@@ -469,17 +480,27 @@
}
mirror::ArtMethod* method = soa.DecodeMethod(mid);
+ bool is_string_init = method->GetDeclaringClass()->IsStringClass() && method->IsConstructor();
+ if (is_string_init) {
+ // Replace calls to String.<init> with equivalent StringFactory call.
+ method = soa.DecodeMethod(WellKnownClasses::StringInitToStringFactoryMethodID(mid));
+ }
+ mirror::Object* receiver = method->IsStatic() ? nullptr : soa.Decode<mirror::Object*>(obj);
uint32_t shorty_len = 0;
const char* shorty = method->GetShorty(&shorty_len);
JValue result;
ArgArray arg_array(shorty, shorty_len);
arg_array.BuildArgArrayFromJValues(soa, receiver, args);
InvokeWithArgArray(soa, method, &arg_array, &result, shorty);
+ if (is_string_init) {
+ // For string init, remap original receiver to StringFactory result.
+ soa.Self()->GetJniEnv()->locals.Update(obj, result.GetL());
+ }
return result;
}
JValue InvokeVirtualOrInterfaceWithJValues(const ScopedObjectAccessAlreadyRunnable& soa,
- mirror::Object* receiver, jmethodID mid, jvalue* args) {
+ jobject obj, jmethodID mid, jvalue* args) {
// We want to make sure that the stack is not within a small distance from the
// protected region in case we are calling into a leaf function whose stack
// check has been elided.
@@ -488,13 +509,24 @@
return JValue();
}
+ mirror::Object* receiver = soa.Decode<mirror::Object*>(obj);
mirror::ArtMethod* method = FindVirtualMethod(receiver, soa.DecodeMethod(mid));
+ bool is_string_init = method->GetDeclaringClass()->IsStringClass() && method->IsConstructor();
+ if (is_string_init) {
+ // Replace calls to String.<init> with equivalent StringFactory call.
+ method = soa.DecodeMethod(WellKnownClasses::StringInitToStringFactoryMethodID(mid));
+ receiver = nullptr;
+ }
uint32_t shorty_len = 0;
const char* shorty = method->GetShorty(&shorty_len);
JValue result;
ArgArray arg_array(shorty, shorty_len);
arg_array.BuildArgArrayFromJValues(soa, receiver, args);
InvokeWithArgArray(soa, method, &arg_array, &result, shorty);
+ if (is_string_init) {
+ // For string init, remap original receiver to StringFactory result.
+ soa.Self()->GetJniEnv()->locals.Update(obj, result.GetL());
+ }
return result;
}
@@ -510,34 +542,27 @@
mirror::Object* receiver = soa.Decode<mirror::Object*>(obj);
mirror::ArtMethod* method = FindVirtualMethod(receiver, soa.DecodeMethod(mid));
+ bool is_string_init = method->GetDeclaringClass()->IsStringClass() && method->IsConstructor();
+ if (is_string_init) {
+ // Replace calls to String.<init> with equivalent StringFactory call.
+ method = soa.DecodeMethod(WellKnownClasses::StringInitToStringFactoryMethodID(mid));
+ receiver = nullptr;
+ }
uint32_t shorty_len = 0;
const char* shorty = method->GetShorty(&shorty_len);
JValue result;
ArgArray arg_array(shorty, shorty_len);
arg_array.BuildArgArrayFromVarArgs(soa, receiver, args);
InvokeWithArgArray(soa, method, &arg_array, &result, shorty);
+ if (is_string_init) {
+ // For string init, remap original receiver to StringFactory result.
+ soa.Self()->GetJniEnv()->locals.Update(obj, result.GetL());
+ }
return result;
}
-void InvokeWithShadowFrame(Thread* self, ShadowFrame* shadow_frame, uint16_t arg_offset,
- JValue* result) {
- // We want to make sure that the stack is not within a small distance from the
- // protected region in case we are calling into a leaf function whose stack
- // check has been elided.
- if (UNLIKELY(__builtin_frame_address(0) < self->GetStackEnd())) {
- ThrowStackOverflowError(self);
- return;
- }
- uint32_t shorty_len;
- const char* shorty = shadow_frame->GetMethod()->GetShorty(&shorty_len);
- ArgArray arg_array(shorty, shorty_len);
- arg_array.BuildArgArrayFromFrame(shadow_frame, arg_offset);
- shadow_frame->GetMethod()->Invoke(self, arg_array.GetArray(), arg_array.GetNumBytes(), result,
- shorty);
-}
-
jobject InvokeMethod(const ScopedObjectAccessAlreadyRunnable& soa, jobject javaMethod,
- jobject javaReceiver, jobject javaArgs, bool accessible) {
+ jobject javaReceiver, jobject javaArgs, size_t num_frames) {
// We want to make sure that the stack is not within a small distance from the
// protected region in case we are calling into a leaf function whose stack
// check has been elided.
@@ -547,7 +572,9 @@
return nullptr;
}
- mirror::ArtMethod* m = mirror::ArtMethod::FromReflectedMethod(soa, javaMethod);
+ auto* abstract_method = soa.Decode<mirror::AbstractMethod*>(javaMethod);
+ const bool accessible = abstract_method->IsAccessible();
+ mirror::ArtMethod* m = abstract_method->GetArtMethod();
mirror::Class* declaring_class = m->GetDeclaringClass();
if (UNLIKELY(!declaring_class->IsInitialized())) {
@@ -561,19 +588,25 @@
mirror::Object* receiver = nullptr;
if (!m->IsStatic()) {
- // Check that the receiver is non-null and an instance of the field's declaring class.
- receiver = soa.Decode<mirror::Object*>(javaReceiver);
- if (!VerifyObjectIsClass(receiver, declaring_class)) {
- return nullptr;
- }
+ // Replace calls to String.<init> with equivalent StringFactory call.
+ if (declaring_class->IsStringClass() && m->IsConstructor()) {
+ jmethodID mid = soa.EncodeMethod(m);
+ m = soa.DecodeMethod(WellKnownClasses::StringInitToStringFactoryMethodID(mid));
+ CHECK(javaReceiver == nullptr);
+ } else {
+ // Check that the receiver is non-null and an instance of the field's declaring class.
+ receiver = soa.Decode<mirror::Object*>(javaReceiver);
+ if (!VerifyObjectIsClass(receiver, declaring_class)) {
+ return nullptr;
+ }
- // Find the actual implementation of the virtual method.
- m = receiver->GetClass()->FindVirtualMethodForVirtualOrInterface(m);
+ // Find the actual implementation of the virtual method.
+ m = receiver->GetClass()->FindVirtualMethodForVirtualOrInterface(m);
+ }
}
// Get our arrays of arguments and their types, and check they're the same size.
- mirror::ObjectArray<mirror::Object>* objects =
- soa.Decode<mirror::ObjectArray<mirror::Object>*>(javaArgs);
+ auto* objects = soa.Decode<mirror::ObjectArray<mirror::Object>*>(javaArgs);
const DexFile::TypeList* classes = m->GetParameterTypeList();
uint32_t classes_size = (classes == nullptr) ? 0 : classes->Size();
uint32_t arg_count = (objects != nullptr) ? objects->GetLength() : 0;
@@ -586,7 +619,7 @@
// If method is not set to be accessible, verify it can be accessed by the caller.
mirror::Class* calling_class = nullptr;
if (!accessible && !VerifyAccess(soa.Self(), receiver, declaring_class, m->GetAccessFlags(),
- &calling_class, 2)) {
+ &calling_class, num_frames)) {
ThrowIllegalAccessException(
StringPrintf("Class %s cannot access %s method %s of class %s",
calling_class == nullptr ? "null" : PrettyClass(calling_class).c_str(),
@@ -613,11 +646,21 @@
// Wrap any exception with "Ljava/lang/reflect/InvocationTargetException;" and return early.
if (soa.Self()->IsExceptionPending()) {
+ // If we get another exception when we are trying to wrap, then just use that instead.
jthrowable th = soa.Env()->ExceptionOccurred();
- soa.Env()->ExceptionClear();
+ soa.Self()->ClearException();
jclass exception_class = soa.Env()->FindClass("java/lang/reflect/InvocationTargetException");
+ if (exception_class == nullptr) {
+ soa.Self()->AssertPendingOOMException();
+ return nullptr;
+ }
jmethodID mid = soa.Env()->GetMethodID(exception_class, "<init>", "(Ljava/lang/Throwable;)V");
+ CHECK(mid != nullptr);
jobject exception_instance = soa.Env()->NewObject(exception_class, mid, th);
+ if (exception_instance == nullptr) {
+ soa.Self()->AssertPendingOOMException();
+ return nullptr;
+ }
soa.Env()->Throw(reinterpret_cast<jthrowable>(exception_instance));
return nullptr;
}
@@ -787,40 +830,48 @@
return UnboxPrimitive(o, dst_class, f, unboxed_value);
}
-bool UnboxPrimitiveForResult(mirror::Object* o,
- mirror::Class* dst_class, JValue* unboxed_value) {
+bool UnboxPrimitiveForResult(mirror::Object* o, mirror::Class* dst_class, JValue* unboxed_value) {
return UnboxPrimitive(o, dst_class, nullptr, unboxed_value);
}
+mirror::Class* GetCallingClass(Thread* self, size_t num_frames) {
+ NthCallerVisitor visitor(self, num_frames);
+ visitor.WalkStack();
+ return visitor.caller != nullptr ? visitor.caller->GetDeclaringClass() : nullptr;
+}
+
bool VerifyAccess(Thread* self, mirror::Object* obj, mirror::Class* declaring_class,
uint32_t access_flags, mirror::Class** calling_class, size_t num_frames) {
if ((access_flags & kAccPublic) != 0) {
return true;
}
- NthCallerVisitor visitor(self, num_frames);
- visitor.WalkStack();
- if (UNLIKELY(visitor.caller == nullptr)) {
+ auto* klass = GetCallingClass(self, num_frames);
+ if (UNLIKELY(klass == nullptr)) {
// The caller is an attached native thread.
return false;
}
- mirror::Class* caller_class = visitor.caller->GetDeclaringClass();
- if (caller_class == declaring_class) {
+ *calling_class = klass;
+ return VerifyAccess(self, obj, declaring_class, access_flags, klass);
+}
+
+bool VerifyAccess(Thread* self, mirror::Object* obj, mirror::Class* declaring_class,
+ uint32_t access_flags, mirror::Class* calling_class) {
+ if (calling_class == declaring_class) {
return true;
}
ScopedAssertNoThreadSuspension sants(self, "verify-access");
- *calling_class = caller_class;
if ((access_flags & kAccPrivate) != 0) {
return false;
}
if ((access_flags & kAccProtected) != 0) {
- if (obj != nullptr && !obj->InstanceOf(caller_class) &&
- !declaring_class->IsInSamePackage(caller_class)) {
+ if (obj != nullptr && !obj->InstanceOf(calling_class) &&
+ !declaring_class->IsInSamePackage(calling_class)) {
return false;
- } else if (declaring_class->IsAssignableFrom(caller_class)) {
+ } else if (declaring_class->IsAssignableFrom(calling_class)) {
return true;
}
}
- return declaring_class->IsInSamePackage(caller_class);
+ return declaring_class->IsInSamePackage(calling_class);
}
void InvalidReceiverError(mirror::Object* o, mirror::Class* c) {
diff --git a/runtime/reflection.h b/runtime/reflection.h
index c2d406a..6b5ffc7 100644
--- a/runtime/reflection.h
+++ b/runtime/reflection.h
@@ -49,24 +49,21 @@
va_list args)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-JValue InvokeWithJValues(const ScopedObjectAccessAlreadyRunnable& soa, mirror::Object* receiver,
- jmethodID mid, jvalue* args)
+JValue InvokeWithJValues(const ScopedObjectAccessAlreadyRunnable& soa, jobject obj, jmethodID mid,
+ jvalue* args)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
JValue InvokeVirtualOrInterfaceWithJValues(const ScopedObjectAccessAlreadyRunnable& soa,
- mirror::Object* receiver, jmethodID mid, jvalue* args)
+ jobject obj, jmethodID mid, jvalue* args)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
JValue InvokeVirtualOrInterfaceWithVarArgs(const ScopedObjectAccessAlreadyRunnable& soa,
jobject obj, jmethodID mid, va_list args)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-void InvokeWithShadowFrame(Thread* self, ShadowFrame* shadow_frame, uint16_t arg_offset,
- JValue* result)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
+// num_frames is number of frames we look up for access check.
jobject InvokeMethod(const ScopedObjectAccessAlreadyRunnable& soa, jobject method, jobject receiver,
- jobject args, bool accessible)
+ jobject args, size_t num_frames = 1)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
ALWAYS_INLINE bool VerifyObjectIsClass(mirror::Object* o, mirror::Class* c)
@@ -76,6 +73,15 @@
uint32_t access_flags, mirror::Class** calling_class, size_t num_frames)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+// This version takes a known calling class.
+bool VerifyAccess(Thread* self, mirror::Object* obj, mirror::Class* declaring_class,
+ uint32_t access_flags, mirror::Class* calling_class)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+// Get the calling class by using a stack visitor, may return null for unattached native threads.
+mirror::Class* GetCallingClass(Thread* self, size_t num_frames)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
void InvalidReceiverError(mirror::Object* o, mirror::Class* c)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/reflection_test.cc b/runtime/reflection_test.cc
index 7aefdaa..36e444a 100644
--- a/runtime/reflection_test.cc
+++ b/runtime/reflection_test.cc
@@ -37,35 +37,35 @@
// Turn on -verbose:jni for the JNI tests.
// gLogVerbosity.jni = true;
- vm_->AttachCurrentThread(&env_, NULL);
+ vm_->AttachCurrentThread(&env_, nullptr);
ScopedLocalRef<jclass> aioobe(env_,
env_->FindClass("java/lang/ArrayIndexOutOfBoundsException"));
- CHECK(aioobe.get() != NULL);
+ CHECK(aioobe.get() != nullptr);
aioobe_ = reinterpret_cast<jclass>(env_->NewGlobalRef(aioobe.get()));
ScopedLocalRef<jclass> ase(env_, env_->FindClass("java/lang/ArrayStoreException"));
- CHECK(ase.get() != NULL);
+ CHECK(ase.get() != nullptr);
ase_ = reinterpret_cast<jclass>(env_->NewGlobalRef(ase.get()));
ScopedLocalRef<jclass> sioobe(env_,
env_->FindClass("java/lang/StringIndexOutOfBoundsException"));
- CHECK(sioobe.get() != NULL);
+ CHECK(sioobe.get() != nullptr);
sioobe_ = reinterpret_cast<jclass>(env_->NewGlobalRef(sioobe.get()));
}
void CleanUpJniEnv() {
- if (aioobe_ != NULL) {
+ if (aioobe_ != nullptr) {
env_->DeleteGlobalRef(aioobe_);
- aioobe_ = NULL;
+ aioobe_ = nullptr;
}
- if (ase_ != NULL) {
+ if (ase_ != nullptr) {
env_->DeleteGlobalRef(ase_);
- ase_ = NULL;
+ ase_ = nullptr;
}
- if (sioobe_ != NULL) {
+ if (sioobe_ != nullptr) {
env_->DeleteGlobalRef(sioobe_);
- sioobe_ = NULL;
+ sioobe_ = nullptr;
}
}
@@ -105,7 +105,7 @@
mirror::Class* c = class_linker_->FindClass(self, DotToDescriptor(class_name).c_str(),
class_loader);
- CHECK(c != NULL);
+ CHECK(c != nullptr);
*method = is_static ? c->FindDirectMethod(method_name, method_signature)
: c->FindVirtualMethod(method_name, method_signature);
@@ -133,7 +133,8 @@
mirror::ArtMethod* method;
mirror::Object* receiver;
ReflectionTestMakeExecutable(&method, &receiver, is_static, "nop", "()V");
- InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), nullptr);
+ ScopedLocalRef<jobject> receiver_ref(soa.Env(), soa.AddLocalReference<jobject>(receiver));
+ InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), nullptr);
}
void InvokeIdentityByteMethod(bool is_static) {
@@ -141,22 +142,23 @@
mirror::ArtMethod* method;
mirror::Object* receiver;
ReflectionTestMakeExecutable(&method, &receiver, is_static, "identity", "(B)B");
+ ScopedLocalRef<jobject> receiver_ref(soa.Env(), soa.AddLocalReference<jobject>(receiver));
jvalue args[1];
args[0].b = 0;
- JValue result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ JValue result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
EXPECT_EQ(0, result.GetB());
args[0].b = -1;
- result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
EXPECT_EQ(-1, result.GetB());
args[0].b = SCHAR_MAX;
- result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
EXPECT_EQ(SCHAR_MAX, result.GetB());
args[0].b = (SCHAR_MIN << 24) >> 24;
- result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
EXPECT_EQ(SCHAR_MIN, result.GetB());
}
@@ -165,22 +167,23 @@
mirror::ArtMethod* method;
mirror::Object* receiver;
ReflectionTestMakeExecutable(&method, &receiver, is_static, "identity", "(I)I");
+ ScopedLocalRef<jobject> receiver_ref(soa.Env(), soa.AddLocalReference<jobject>(receiver));
jvalue args[1];
args[0].i = 0;
- JValue result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ JValue result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
EXPECT_EQ(0, result.GetI());
args[0].i = -1;
- result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
EXPECT_EQ(-1, result.GetI());
args[0].i = INT_MAX;
- result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
EXPECT_EQ(INT_MAX, result.GetI());
args[0].i = INT_MIN;
- result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
EXPECT_EQ(INT_MIN, result.GetI());
}
@@ -189,22 +192,23 @@
mirror::ArtMethod* method;
mirror::Object* receiver;
ReflectionTestMakeExecutable(&method, &receiver, is_static, "identity", "(D)D");
+ ScopedLocalRef<jobject> receiver_ref(soa.Env(), soa.AddLocalReference<jobject>(receiver));
jvalue args[1];
args[0].d = 0.0;
- JValue result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ JValue result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
EXPECT_DOUBLE_EQ(0.0, result.GetD());
args[0].d = -1.0;
- result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
EXPECT_DOUBLE_EQ(-1.0, result.GetD());
args[0].d = DBL_MAX;
- result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
EXPECT_DOUBLE_EQ(DBL_MAX, result.GetD());
args[0].d = DBL_MIN;
- result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
EXPECT_DOUBLE_EQ(DBL_MIN, result.GetD());
}
@@ -213,26 +217,27 @@
mirror::ArtMethod* method;
mirror::Object* receiver;
ReflectionTestMakeExecutable(&method, &receiver, is_static, "sum", "(II)I");
+ ScopedLocalRef<jobject> receiver_ref(soa.Env(), soa.AddLocalReference<jobject>(receiver));
jvalue args[2];
args[0].i = 1;
args[1].i = 2;
- JValue result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ JValue result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
EXPECT_EQ(3, result.GetI());
args[0].i = -2;
args[1].i = 5;
- result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
EXPECT_EQ(3, result.GetI());
args[0].i = INT_MAX;
args[1].i = INT_MIN;
- result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
EXPECT_EQ(-1, result.GetI());
args[0].i = INT_MAX;
args[1].i = INT_MAX;
- result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
EXPECT_EQ(-2, result.GetI());
}
@@ -241,36 +246,37 @@
mirror::ArtMethod* method;
mirror::Object* receiver;
ReflectionTestMakeExecutable(&method, &receiver, is_static, "sum", "(III)I");
+ ScopedLocalRef<jobject> receiver_ref(soa.Env(), soa.AddLocalReference<jobject>(receiver));
jvalue args[3];
args[0].i = 0;
args[1].i = 0;
args[2].i = 0;
- JValue result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ JValue result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
EXPECT_EQ(0, result.GetI());
args[0].i = 1;
args[1].i = 2;
args[2].i = 3;
- result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
EXPECT_EQ(6, result.GetI());
args[0].i = -1;
args[1].i = 2;
args[2].i = -3;
- result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
EXPECT_EQ(-2, result.GetI());
args[0].i = INT_MAX;
args[1].i = INT_MIN;
args[2].i = INT_MAX;
- result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
EXPECT_EQ(2147483646, result.GetI());
args[0].i = INT_MAX;
args[1].i = INT_MAX;
args[2].i = INT_MAX;
- result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
EXPECT_EQ(2147483645, result.GetI());
}
@@ -279,41 +285,42 @@
mirror::ArtMethod* method;
mirror::Object* receiver;
ReflectionTestMakeExecutable(&method, &receiver, is_static, "sum", "(IIII)I");
+ ScopedLocalRef<jobject> receiver_ref(soa.Env(), soa.AddLocalReference<jobject>(receiver));
jvalue args[4];
args[0].i = 0;
args[1].i = 0;
args[2].i = 0;
args[3].i = 0;
- JValue result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ JValue result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
EXPECT_EQ(0, result.GetI());
args[0].i = 1;
args[1].i = 2;
args[2].i = 3;
args[3].i = 4;
- result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
EXPECT_EQ(10, result.GetI());
args[0].i = -1;
args[1].i = 2;
args[2].i = -3;
args[3].i = 4;
- result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
EXPECT_EQ(2, result.GetI());
args[0].i = INT_MAX;
args[1].i = INT_MIN;
args[2].i = INT_MAX;
args[3].i = INT_MIN;
- result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
EXPECT_EQ(-2, result.GetI());
args[0].i = INT_MAX;
args[1].i = INT_MAX;
args[2].i = INT_MAX;
args[3].i = INT_MAX;
- result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
EXPECT_EQ(-4, result.GetI());
}
@@ -322,6 +329,7 @@
mirror::ArtMethod* method;
mirror::Object* receiver;
ReflectionTestMakeExecutable(&method, &receiver, is_static, "sum", "(IIIII)I");
+ ScopedLocalRef<jobject> receiver_ref(soa.Env(), soa.AddLocalReference<jobject>(receiver));
jvalue args[5];
args[0].i = 0;
@@ -329,7 +337,7 @@
args[2].i = 0;
args[3].i = 0;
args[4].i = 0;
- JValue result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ JValue result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
EXPECT_EQ(0, result.GetI());
args[0].i = 1;
@@ -337,7 +345,7 @@
args[2].i = 3;
args[3].i = 4;
args[4].i = 5;
- result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
EXPECT_EQ(15, result.GetI());
args[0].i = -1;
@@ -345,7 +353,7 @@
args[2].i = -3;
args[3].i = 4;
args[4].i = -5;
- result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
EXPECT_EQ(-3, result.GetI());
args[0].i = INT_MAX;
@@ -353,7 +361,7 @@
args[2].i = INT_MAX;
args[3].i = INT_MIN;
args[4].i = INT_MAX;
- result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
EXPECT_EQ(2147483645, result.GetI());
args[0].i = INT_MAX;
@@ -361,7 +369,7 @@
args[2].i = INT_MAX;
args[3].i = INT_MAX;
args[4].i = INT_MAX;
- result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
EXPECT_EQ(2147483643, result.GetI());
}
@@ -370,31 +378,32 @@
mirror::ArtMethod* method;
mirror::Object* receiver;
ReflectionTestMakeExecutable(&method, &receiver, is_static, "sum", "(DD)D");
+ ScopedLocalRef<jobject> receiver_ref(soa.Env(), soa.AddLocalReference<jobject>(receiver));
jvalue args[2];
args[0].d = 0.0;
args[1].d = 0.0;
- JValue result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ JValue result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
EXPECT_DOUBLE_EQ(0.0, result.GetD());
args[0].d = 1.0;
args[1].d = 2.0;
- result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
EXPECT_DOUBLE_EQ(3.0, result.GetD());
args[0].d = 1.0;
args[1].d = -2.0;
- result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
EXPECT_DOUBLE_EQ(-1.0, result.GetD());
args[0].d = DBL_MAX;
args[1].d = DBL_MIN;
- result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
EXPECT_DOUBLE_EQ(1.7976931348623157e308, result.GetD());
args[0].d = DBL_MAX;
args[1].d = DBL_MAX;
- result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
EXPECT_DOUBLE_EQ(INFINITY, result.GetD());
}
@@ -403,24 +412,25 @@
mirror::ArtMethod* method;
mirror::Object* receiver;
ReflectionTestMakeExecutable(&method, &receiver, is_static, "sum", "(DDD)D");
+ ScopedLocalRef<jobject> receiver_ref(soa.Env(), soa.AddLocalReference<jobject>(receiver));
jvalue args[3];
args[0].d = 0.0;
args[1].d = 0.0;
args[2].d = 0.0;
- JValue result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ JValue result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
EXPECT_DOUBLE_EQ(0.0, result.GetD());
args[0].d = 1.0;
args[1].d = 2.0;
args[2].d = 3.0;
- result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
EXPECT_DOUBLE_EQ(6.0, result.GetD());
args[0].d = 1.0;
args[1].d = -2.0;
args[2].d = 3.0;
- result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
EXPECT_DOUBLE_EQ(2.0, result.GetD());
}
@@ -429,27 +439,28 @@
mirror::ArtMethod* method;
mirror::Object* receiver;
ReflectionTestMakeExecutable(&method, &receiver, is_static, "sum", "(DDDD)D");
+ ScopedLocalRef<jobject> receiver_ref(soa.Env(), soa.AddLocalReference<jobject>(receiver));
jvalue args[4];
args[0].d = 0.0;
args[1].d = 0.0;
args[2].d = 0.0;
args[3].d = 0.0;
- JValue result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ JValue result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
EXPECT_DOUBLE_EQ(0.0, result.GetD());
args[0].d = 1.0;
args[1].d = 2.0;
args[2].d = 3.0;
args[3].d = 4.0;
- result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
EXPECT_DOUBLE_EQ(10.0, result.GetD());
args[0].d = 1.0;
args[1].d = -2.0;
args[2].d = 3.0;
args[3].d = -4.0;
- result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
EXPECT_DOUBLE_EQ(-2.0, result.GetD());
}
@@ -458,6 +469,7 @@
mirror::ArtMethod* method;
mirror::Object* receiver;
ReflectionTestMakeExecutable(&method, &receiver, is_static, "sum", "(DDDDD)D");
+ ScopedLocalRef<jobject> receiver_ref(soa.Env(), soa.AddLocalReference<jobject>(receiver));
jvalue args[5];
args[0].d = 0.0;
@@ -465,7 +477,7 @@
args[2].d = 0.0;
args[3].d = 0.0;
args[4].d = 0.0;
- JValue result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ JValue result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
EXPECT_DOUBLE_EQ(0.0, result.GetD());
args[0].d = 1.0;
@@ -473,7 +485,7 @@
args[2].d = 3.0;
args[3].d = 4.0;
args[4].d = 5.0;
- result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
EXPECT_DOUBLE_EQ(15.0, result.GetD());
args[0].d = 1.0;
@@ -481,7 +493,7 @@
args[2].d = 3.0;
args[3].d = -4.0;
args[4].d = 5.0;
- result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
+ result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
EXPECT_DOUBLE_EQ(3.0, result.GetD());
}
@@ -501,10 +513,10 @@
CompileDirectMethod(class_loader, "Main", "main", "([Ljava/lang/String;)V");
mirror::Class* klass = class_linker_->FindClass(soa.Self(), "LMain;", class_loader);
- ASSERT_TRUE(klass != NULL);
+ ASSERT_TRUE(klass != nullptr);
mirror::ArtMethod* method = klass->FindDirectMethod("main", "([Ljava/lang/String;)V");
- ASSERT_TRUE(method != NULL);
+ ASSERT_TRUE(method != nullptr);
// Start runtime.
bool started = runtime_->Start();
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 7bebb96..2618661 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -77,6 +77,7 @@
#include "mirror/class-inl.h"
#include "mirror/class_loader.h"
#include "mirror/field.h"
+#include "mirror/method.h"
#include "mirror/stack_trace_element.h"
#include "mirror/throwable.h"
#include "monitor.h"
@@ -90,6 +91,7 @@
#include "native/java_lang_Object.h"
#include "native/java_lang_Runtime.h"
#include "native/java_lang_String.h"
+#include "native/java_lang_StringFactory.h"
#include "native/java_lang_System.h"
#include "native/java_lang_Thread.h"
#include "native/java_lang_Throwable.h"
@@ -102,6 +104,7 @@
#include "native/java_lang_reflect_Method.h"
#include "native/java_lang_reflect_Proxy.h"
#include "native/java_util_concurrent_atomic_AtomicLong.h"
+#include "native/libcore_util_CharsetUtils.h"
#include "native/org_apache_harmony_dalvik_ddmc_DdmServer.h"
#include "native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.h"
#include "native/sun_misc_Unsafe.h"
@@ -288,7 +291,7 @@
}
gAborting++;
os << "Runtime aborting...\n";
- if (Runtime::Current() == NULL) {
+ if (Runtime::Current() == nullptr) {
os << "(Runtime does not yet exist!)\n";
return;
}
@@ -349,7 +352,7 @@
MutexLock mu(Thread::Current(), *Locks::abort_lock_);
// Get any pending output out of the way.
- fflush(NULL);
+ fflush(nullptr);
// Many people have difficulty distinguish aborts from crashes,
// so be explicit.
@@ -357,7 +360,7 @@
LOG(INTERNAL_FATAL) << Dumpable<AbortState>(state);
// Call the abort hook if we have one.
- if (Runtime::Current() != NULL && Runtime::Current()->abort_ != NULL) {
+ if (Runtime::Current() != nullptr && Runtime::Current()->abort_ != nullptr) {
LOG(INTERNAL_FATAL) << "Calling abort hook...";
Runtime::Current()->abort_();
// notreached
@@ -385,7 +388,7 @@
}
void Runtime::CallExitHook(jint status) {
- if (exit_ != NULL) {
+ if (exit_ != nullptr) {
ScopedThreadStateChange tsc(Thread::Current(), kNative);
exit_(status);
LOG(WARNING) << "Exit hook returned instead of exiting!";
@@ -400,16 +403,16 @@
bool Runtime::Create(const RuntimeOptions& options, bool ignore_unrecognized) {
// TODO: acquire a static mutex on Runtime to avoid racing.
- if (Runtime::instance_ != NULL) {
+ if (Runtime::instance_ != nullptr) {
return false;
}
- InitLogging(NULL); // Calls Locks::Init() as a side effect.
+ InitLogging(nullptr); // Calls Locks::Init() as a side effect.
instance_ = new Runtime;
if (!instance_->Init(options, ignore_unrecognized)) {
// TODO: Currently deleting the instance will abort the runtime on destruction. Now This will
// leak memory, instead. Fix the destructor. b/19100793.
// delete instance_;
- instance_ = NULL;
+ instance_ = nullptr;
return false;
}
return true;
@@ -430,7 +433,7 @@
mirror::ArtMethod* getSystemClassLoader =
class_loader_class->FindDirectMethod("getSystemClassLoader", "()Ljava/lang/ClassLoader;");
- CHECK(getSystemClassLoader != NULL);
+ CHECK(getSystemClassLoader != nullptr);
JValue result = InvokeWithJValues(soa, nullptr, soa.EncodeMethod(getSystemClassLoader), nullptr);
JNIEnv* env = soa.Self()->GetJniEnv();
@@ -446,7 +449,7 @@
ArtField* contextClassLoader =
thread_class->FindDeclaredInstanceField("contextClassLoader", "Ljava/lang/ClassLoader;");
- CHECK(contextClassLoader != NULL);
+ CHECK(contextClassLoader != nullptr);
// We can't run in a transaction yet.
contextClassLoader->SetObject<false>(soa.Self()->GetPeer(),
@@ -589,7 +592,7 @@
// Mark rootfs as being a slave so that changes from default
// namespace only flow into our children.
- if (mount("rootfs", "/", NULL, (MS_SLAVE | MS_REC), NULL) == -1) {
+ if (mount("rootfs", "/", nullptr, (MS_SLAVE | MS_REC), nullptr) == -1) {
PLOG(WARNING) << "Failed to mount() rootfs as MS_SLAVE";
return false;
}
@@ -598,7 +601,7 @@
// bind mount storage into their respective private namespaces, which
// are isolated from each other.
const char* target_base = getenv("EMULATED_STORAGE_TARGET");
- if (target_base != NULL) {
+ if (target_base != nullptr) {
if (mount("tmpfs", target_base, "tmpfs", MS_NOSUID | MS_NODEV,
"uid=0,gid=1028,mode=0751") == -1) {
LOG(WARNING) << "Failed to mount tmpfs to " << target_base;
@@ -676,7 +679,7 @@
static bool OpenDexFilesFromImage(const std::string& image_location,
std::vector<std::unique_ptr<const DexFile>>* dex_files,
size_t* failures) {
- DCHECK(dex_files != nullptr) << "OpenDexFilesFromImage: out-param is NULL";
+ DCHECK(dex_files != nullptr) << "OpenDexFilesFromImage: out-param is nullptr";
std::string system_filename;
bool has_system = false;
std::string cache_filename_unused;
@@ -736,7 +739,7 @@
const std::vector<std::string>& dex_locations,
const std::string& image_location,
std::vector<std::unique_ptr<const DexFile>>* dex_files) {
- DCHECK(dex_files != nullptr) << "OpenDexFiles: out-param is NULL";
+ DCHECK(dex_files != nullptr) << "OpenDexFiles: out-param is nullptr";
size_t failure_count = 0;
if (!image_location.empty() && OpenDexFilesFromImage(image_location, dex_files, &failure_count)) {
return failure_count;
@@ -869,7 +872,7 @@
// If we are already the compiler at this point, we must be dex2oat. Don't create the jit in
// this case.
// If runtime_options doesn't have UseJIT set to true then CreateFromRuntimeArguments returns
- // nullptr and we don't create the jit.
+ // null and we don't create the jit.
use_jit = false;
}
@@ -1128,26 +1131,26 @@
env->NewGlobalRef(env->GetStaticObjectField(
WellKnownClasses::java_lang_ThreadGroup,
WellKnownClasses::java_lang_ThreadGroup_mainThreadGroup));
- CHECK(main_thread_group_ != NULL || IsAotCompiler());
+ CHECK(main_thread_group_ != nullptr || IsAotCompiler());
system_thread_group_ =
env->NewGlobalRef(env->GetStaticObjectField(
WellKnownClasses::java_lang_ThreadGroup,
WellKnownClasses::java_lang_ThreadGroup_systemThreadGroup));
- CHECK(system_thread_group_ != NULL || IsAotCompiler());
+ CHECK(system_thread_group_ != nullptr || IsAotCompiler());
}
jobject Runtime::GetMainThreadGroup() const {
- CHECK(main_thread_group_ != NULL || IsAotCompiler());
+ CHECK(main_thread_group_ != nullptr || IsAotCompiler());
return main_thread_group_;
}
jobject Runtime::GetSystemThreadGroup() const {
- CHECK(system_thread_group_ != NULL || IsAotCompiler());
+ CHECK(system_thread_group_ != nullptr || IsAotCompiler());
return system_thread_group_;
}
jobject Runtime::GetSystemClassLoader() const {
- CHECK(system_class_loader_ != NULL || IsAotCompiler());
+ CHECK(system_class_loader_ != nullptr || IsAotCompiler());
return system_class_loader_;
}
@@ -1169,11 +1172,13 @@
register_java_lang_ref_Reference(env);
register_java_lang_Runtime(env);
register_java_lang_String(env);
+ register_java_lang_StringFactory(env);
register_java_lang_System(env);
register_java_lang_Thread(env);
register_java_lang_Throwable(env);
register_java_lang_VMClassLoader(env);
register_java_util_concurrent_atomic_AtomicLong(env);
+ register_libcore_util_CharsetUtils(env);
register_org_apache_harmony_dalvik_ddmc_DdmServer(env);
register_org_apache_harmony_dalvik_ddmc_DdmVmInternal(env);
register_sun_misc_Unsafe(env);
@@ -1273,12 +1278,12 @@
bool Runtime::AttachCurrentThread(const char* thread_name, bool as_daemon, jobject thread_group,
bool create_peer) {
- return Thread::Attach(thread_name, as_daemon, thread_group, create_peer) != NULL;
+ return Thread::Attach(thread_name, as_daemon, thread_group, create_peer) != nullptr;
}
void Runtime::DetachCurrentThread() {
Thread* self = Thread::Current();
- if (self == NULL) {
+ if (self == nullptr) {
LOG(FATAL) << "attempting to detach thread that is not attached";
}
if (self->HasManagedStack()) {
@@ -1308,7 +1313,9 @@
// need to be visited once per GC since they never change.
mirror::ArtMethod::VisitRoots(visitor);
mirror::Class::VisitRoots(visitor);
+ mirror::Constructor::VisitRoots(visitor);
mirror::Reference::VisitRoots(visitor);
+ mirror::Method::VisitRoots(visitor);
mirror::StackTraceElement::VisitRoots(visitor);
mirror::String::VisitRoots(visitor);
mirror::Throwable::VisitRoots(visitor);
@@ -1559,14 +1566,15 @@
// Throwing an exception may cause its class initialization. If we mark the transaction
// aborted before that, we may warn with a false alarm. Throwing the exception before
// marking the transaction aborted avoids that.
- preinitialization_transaction_->ThrowAbortError(self, false);
+ preinitialization_transaction_->ThrowAbortError(self, &abort_message);
preinitialization_transaction_->Abort(abort_message);
}
void Runtime::ThrowTransactionAbortError(Thread* self) {
DCHECK(IsAotCompiler());
DCHECK(IsActiveTransaction());
- preinitialization_transaction_->ThrowAbortError(self, true);
+ // Passing nullptr means we rethrow an exception with the earlier transaction abort message.
+ preinitialization_transaction_->ThrowAbortError(self, nullptr);
}
void Runtime::RecordWriteFieldBoolean(mirror::Object* obj, MemberOffset field_offset,
@@ -1670,10 +1678,6 @@
std::string feature_string("--instruction-set-features=");
feature_string += features->GetFeatureString();
argv->push_back(feature_string);
-
- if (Dbg::IsJdwpConfigured()) {
- argv->push_back("--debuggable");
- }
}
void Runtime::UpdateProfilerState(int state) {
diff --git a/runtime/runtime.h b/runtime/runtime.h
index d95640d..348d5c6 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -31,6 +31,7 @@
#include "gc_root.h"
#include "instrumentation.h"
#include "jobject_comparator.h"
+#include "method_reference.h"
#include "object_callbacks.h"
#include "offsets.h"
#include "profiler_options.h"
@@ -86,6 +87,8 @@
class Transaction;
typedef std::vector<std::pair<std::string, const void*>> RuntimeOptions;
+typedef SafeMap<MethodReference, SafeMap<uint32_t, std::set<uint32_t>>,
+ MethodReferenceComparator> MethodRefToStringInitRegMap;
// Not all combinations of flags are valid. You may not visit all roots as well as the new roots
// (no logical reason to do this). You also may not start logging new roots and stop logging new
@@ -248,7 +251,7 @@
}
InternTable* GetInternTable() const {
- DCHECK(intern_table_ != NULL);
+ DCHECK(intern_table_ != nullptr);
return intern_table_;
}
@@ -328,7 +331,7 @@
void VisitNonConcurrentRoots(RootVisitor* visitor)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Sweep system weaks, the system weak is deleted if the visitor return nullptr. Otherwise, the
+ // Sweep system weaks, the system weak is deleted if the visitor return null. Otherwise, the
// system weak is updated to be the visitor's returned value.
void SweepSystemWeaks(IsMarkedCallback* visitor, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -558,6 +561,10 @@
return jit_options_.get();
}
+ MethodRefToStringInitRegMap& GetStringInitMap() {
+ return method_ref_string_init_reg_map_;
+ }
+
private:
static void InitPlatformSignalHandlers();
@@ -574,7 +581,7 @@
void StartDaemonThreads();
void StartSignalCatcher();
- // A pointer to the active runtime or NULL.
+ // A pointer to the active runtime or null.
static Runtime* instance_;
// NOTE: these must match the gc::ProcessState values as they come directly from the framework.
@@ -737,6 +744,8 @@
// zygote.
uint32_t zygote_max_failed_boots_;
+ MethodRefToStringInitRegMap method_ref_string_init_reg_map_;
+
DISALLOW_COPY_AND_ASSIGN(Runtime);
};
std::ostream& operator<<(std::ostream& os, const Runtime::CalleeSaveType& rhs);
diff --git a/runtime/runtime_linux.cc b/runtime/runtime_linux.cc
index 35d944f..d65e18e 100644
--- a/runtime/runtime_linux.cc
+++ b/runtime/runtime_linux.cc
@@ -321,7 +321,7 @@
OsInfo os_info;
const char* cmd_line = GetCmdLine();
- if (cmd_line == NULL) {
+ if (cmd_line == nullptr) {
cmd_line = "<unset>"; // Because no-one called InitLogging.
}
pid_t tid = GetTid();
@@ -353,9 +353,10 @@
heap->DumpObject(LOG(INTERNAL_FATAL), reinterpret_cast<mirror::Object*>(info->si_addr));
}
}
- if (getenv("debug_db_uid") != NULL || getenv("art_wait_for_gdb_on_crash") != NULL) {
+ if (getenv("debug_db_uid") != nullptr || getenv("art_wait_for_gdb_on_crash") != nullptr) {
LOG(INTERNAL_FATAL) << "********************************************************\n"
- << "* Process " << getpid() << " thread " << tid << " \"" << thread_name << "\""
+ << "* Process " << getpid() << " thread " << tid << " \"" << thread_name
+ << "\""
<< " has been suspended while crashing.\n"
<< "* Attach gdb:\n"
<< "* gdb -p " << tid << "\n"
@@ -370,7 +371,7 @@
memset(&action, 0, sizeof(action));
sigemptyset(&action.sa_mask);
action.sa_handler = SIG_DFL;
- sigaction(signal_number, &action, NULL);
+ sigaction(signal_number, &action, nullptr);
// ...and re-raise so we die with the appropriate status.
kill(getpid(), signal_number);
#else
@@ -390,19 +391,19 @@
action.sa_flags |= SA_ONSTACK;
int rc = 0;
- rc += sigaction(SIGABRT, &action, NULL);
- rc += sigaction(SIGBUS, &action, NULL);
- rc += sigaction(SIGFPE, &action, NULL);
- rc += sigaction(SIGILL, &action, NULL);
- rc += sigaction(SIGPIPE, &action, NULL);
- rc += sigaction(SIGSEGV, &action, NULL);
+ rc += sigaction(SIGABRT, &action, nullptr);
+ rc += sigaction(SIGBUS, &action, nullptr);
+ rc += sigaction(SIGFPE, &action, nullptr);
+ rc += sigaction(SIGILL, &action, nullptr);
+ rc += sigaction(SIGPIPE, &action, nullptr);
+ rc += sigaction(SIGSEGV, &action, nullptr);
#if defined(SIGSTKFLT)
- rc += sigaction(SIGSTKFLT, &action, NULL);
+ rc += sigaction(SIGSTKFLT, &action, nullptr);
#endif
- rc += sigaction(SIGTRAP, &action, NULL);
+ rc += sigaction(SIGTRAP, &action, nullptr);
// Special dump-all timeout.
if (GetTimeoutSignal() != -1) {
- rc += sigaction(GetTimeoutSignal(), &action, NULL);
+ rc += sigaction(GetTimeoutSignal(), &action, nullptr);
}
CHECK_EQ(rc, 0);
}
diff --git a/runtime/runtime_options.def b/runtime/runtime_options.def
index 8b504c1..922334e 100644
--- a/runtime/runtime_options.def
+++ b/runtime/runtime_options.def
@@ -62,7 +62,7 @@
RUNTIME_OPTIONS_KEY (Unit, DumpJITInfoOnShutdown)
RUNTIME_OPTIONS_KEY (Unit, IgnoreMaxFootprint)
RUNTIME_OPTIONS_KEY (Unit, LowMemoryMode)
-RUNTIME_OPTIONS_KEY (bool, UseTLAB, kUseTlab)
+RUNTIME_OPTIONS_KEY (bool, UseTLAB, (kUseTlab || kUseReadBarrier))
RUNTIME_OPTIONS_KEY (bool, EnableHSpaceCompactForOOM, true)
RUNTIME_OPTIONS_KEY (bool, UseJIT, false)
RUNTIME_OPTIONS_KEY (unsigned int, JITCompileThreshold, jit::Jit::kDefaultCompileThreshold)
diff --git a/runtime/scoped_thread_state_change.h b/runtime/scoped_thread_state_change.h
index 11b7df6..60ed55a 100644
--- a/runtime/scoped_thread_state_change.h
+++ b/runtime/scoped_thread_state_change.h
@@ -36,11 +36,11 @@
ScopedThreadStateChange(Thread* self, ThreadState new_thread_state)
LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) ALWAYS_INLINE
: self_(self), thread_state_(new_thread_state), expected_has_no_thread_(false) {
- if (UNLIKELY(self_ == NULL)) {
- // Value chosen arbitrarily and won't be used in the destructor since thread_ == NULL.
+ if (UNLIKELY(self_ == nullptr)) {
+ // Value chosen arbitrarily and won't be used in the destructor since thread_ == null.
old_thread_state_ = kTerminated;
Runtime* runtime = Runtime::Current();
- CHECK(runtime == NULL || !runtime->IsStarted() || runtime->IsShuttingDown(self_));
+ CHECK(runtime == nullptr || !runtime->IsStarted() || runtime->IsShuttingDown(self_));
} else {
DCHECK_EQ(self, Thread::Current());
// Read state without locks, ok as state is effectively thread local and we're not interested
@@ -60,10 +60,10 @@
}
~ScopedThreadStateChange() LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) ALWAYS_INLINE {
- if (UNLIKELY(self_ == NULL)) {
+ if (UNLIKELY(self_ == nullptr)) {
if (!expected_has_no_thread_) {
Runtime* runtime = Runtime::Current();
- bool shutting_down = (runtime == NULL) || runtime->IsShuttingDown(nullptr);
+ bool shutting_down = (runtime == nullptr) || runtime->IsShuttingDown(nullptr);
CHECK(shutting_down);
}
} else {
@@ -87,7 +87,7 @@
protected:
// Constructor used by ScopedJniThreadState for an unattached thread that has access to the VM*.
ScopedThreadStateChange()
- : self_(NULL), thread_state_(kTerminated), old_thread_state_(kTerminated),
+ : self_(nullptr), thread_state_(kTerminated), old_thread_state_(kTerminated),
expected_has_no_thread_(true) {}
Thread* const self_;
@@ -124,7 +124,7 @@
* Add a local reference for an object to the indirect reference table associated with the
* current stack frame. When the native function returns, the reference will be discarded.
*
- * We need to allow the same reference to be added multiple times, and cope with NULL.
+ * We need to allow the same reference to be added multiple times, and cope with nullptr.
*
* This will be called on otherwise unreferenced objects. We cannot do GC allocations here, and
* it's best if we don't grab a mutex.
@@ -133,11 +133,8 @@
T AddLocalReference(mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Locks::mutator_lock_->AssertSharedHeld(Self());
DCHECK(IsRunnable()); // Don't work with raw objects in non-runnable states.
- if (obj == NULL) {
- return NULL;
- }
- DCHECK_NE((reinterpret_cast<uintptr_t>(obj) & 0xffff0000), 0xebad0000);
- return Env()->AddLocalReference<T>(obj);
+ DCHECK_NE(obj, Runtime::Current()->GetClearedJniWeakGlobal());
+ return obj == nullptr ? nullptr : Env()->AddLocalReference<T>(obj);
}
template<typename T>
diff --git a/runtime/signal_catcher.cc b/runtime/signal_catcher.cc
index 26bf655..863d59b 100644
--- a/runtime/signal_catcher.cc
+++ b/runtime/signal_catcher.cc
@@ -53,7 +53,7 @@
os << "Cmd line: " << current_cmd_line << "\n";
const char* stashed_cmd_line = GetCmdLine();
- if (stashed_cmd_line != NULL && current_cmd_line != stashed_cmd_line
+ if (stashed_cmd_line != nullptr && current_cmd_line != stashed_cmd_line
&& strcmp(stashed_cmd_line, "<unset>") != 0) {
os << "Original command line: " << stashed_cmd_line << "\n";
}
@@ -67,15 +67,15 @@
: stack_trace_file_(stack_trace_file),
lock_("SignalCatcher lock"),
cond_("SignalCatcher::cond_", lock_),
- thread_(NULL) {
+ thread_(nullptr) {
SetHaltFlag(false);
// Create a raw pthread; its start routine will attach to the runtime.
- CHECK_PTHREAD_CALL(pthread_create, (&pthread_, NULL, &Run, this), "signal catcher thread");
+ CHECK_PTHREAD_CALL(pthread_create, (&pthread_, nullptr, &Run, this), "signal catcher thread");
Thread* self = Thread::Current();
MutexLock mu(self, lock_);
- while (thread_ == NULL) {
+ while (thread_ == nullptr) {
cond_.Wait(self);
}
}
@@ -85,7 +85,7 @@
// to arrive, send it one.
SetHaltFlag(true);
CHECK_PTHREAD_CALL(pthread_kill, (pthread_, SIGQUIT), "signal catcher shutdown");
- CHECK_PTHREAD_CALL(pthread_join, (pthread_, NULL), "signal catcher shutdown");
+ CHECK_PTHREAD_CALL(pthread_join, (pthread_, nullptr), "signal catcher shutdown");
}
void SignalCatcher::SetHaltFlag(bool new_value) {
@@ -176,7 +176,7 @@
void* SignalCatcher::Run(void* arg) {
SignalCatcher* signal_catcher = reinterpret_cast<SignalCatcher*>(arg);
- CHECK(signal_catcher != NULL);
+ CHECK(signal_catcher != nullptr);
Runtime* runtime = Runtime::Current();
CHECK(runtime->AttachCurrentThread("Signal Catcher", true, runtime->GetSystemThreadGroup(),
@@ -199,7 +199,7 @@
int signal_number = signal_catcher->WaitForSignal(self, signals);
if (signal_catcher->ShouldHalt()) {
runtime->DetachCurrentThread();
- return NULL;
+ return nullptr;
}
switch (signal_number) {
diff --git a/runtime/signal_set.h b/runtime/signal_set.h
index 3b89e6e..c272514 100644
--- a/runtime/signal_set.h
+++ b/runtime/signal_set.h
@@ -38,7 +38,7 @@
}
void Block() {
- if (sigprocmask(SIG_BLOCK, &set_, NULL) == -1) {
+ if (sigprocmask(SIG_BLOCK, &set_, nullptr) == -1) {
PLOG(FATAL) << "sigprocmask failed";
}
}
diff --git a/runtime/stack.cc b/runtime/stack.cc
index 4ae49dd..6795516 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -19,6 +19,7 @@
#include "arch/context.h"
#include "base/hex_dump.h"
#include "entrypoints/runtime_asm_entrypoints.h"
+#include "gc_map.h"
#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
#include "mirror/object.h"
@@ -36,12 +37,12 @@
mirror::Object* ShadowFrame::GetThisObject() const {
mirror::ArtMethod* m = GetMethod();
if (m->IsStatic()) {
- return NULL;
+ return nullptr;
} else if (m->IsNative()) {
return GetVRegReference(0);
} else {
const DexFile::CodeItem* code_item = m->GetCodeItem();
- CHECK(code_item != NULL) << PrettyMethod(m);
+ CHECK(code_item != nullptr) << PrettyMethod(m);
uint16_t reg = code_item->registers_size_ - code_item->ins_size_;
return GetVRegReference(reg);
}
@@ -50,7 +51,7 @@
mirror::Object* ShadowFrame::GetThisObject(uint16_t num_ins) const {
mirror::ArtMethod* m = GetMethod();
if (m->IsStatic()) {
- return NULL;
+ return nullptr;
} else {
return GetVRegReference(NumberOfVRegs() - num_ins);
}
@@ -58,9 +59,9 @@
size_t ManagedStack::NumJniShadowFrameReferences() const {
size_t count = 0;
- for (const ManagedStack* current_fragment = this; current_fragment != NULL;
+ for (const ManagedStack* current_fragment = this; current_fragment != nullptr;
current_fragment = current_fragment->GetLink()) {
- for (ShadowFrame* current_frame = current_fragment->top_shadow_frame_; current_frame != NULL;
+ for (ShadowFrame* current_frame = current_fragment->top_shadow_frame_; current_frame != nullptr;
current_frame = current_frame->GetLink()) {
if (current_frame->GetMethod()->IsNative()) {
// The JNI ShadowFrame only contains references. (For indirect reference.)
@@ -72,9 +73,9 @@
}
bool ManagedStack::ShadowFramesContain(StackReference<mirror::Object>* shadow_frame_entry) const {
- for (const ManagedStack* current_fragment = this; current_fragment != NULL;
+ for (const ManagedStack* current_fragment = this; current_fragment != nullptr;
current_fragment = current_fragment->GetLink()) {
- for (ShadowFrame* current_frame = current_fragment->top_shadow_frame_; current_frame != NULL;
+ for (ShadowFrame* current_frame = current_fragment->top_shadow_frame_; current_frame != nullptr;
current_frame = current_frame->GetLink()) {
if (current_frame->Contains(shadow_frame_entry)) {
return true;
@@ -84,25 +85,59 @@
return false;
}
-StackVisitor::StackVisitor(Thread* thread, Context* context)
- : thread_(thread), cur_shadow_frame_(NULL),
- cur_quick_frame_(NULL), cur_quick_frame_pc_(0), num_frames_(0), cur_depth_(0),
+StackVisitor::StackVisitor(Thread* thread, Context* context, StackWalkKind walk_kind)
+ : StackVisitor(thread, context, walk_kind, 0) {}
+
+StackVisitor::StackVisitor(Thread* thread,
+ Context* context,
+ StackWalkKind walk_kind,
+ size_t num_frames)
+ : thread_(thread),
+ walk_kind_(walk_kind),
+ cur_shadow_frame_(nullptr),
+ cur_quick_frame_(nullptr),
+ cur_quick_frame_pc_(0),
+ num_frames_(num_frames),
+ cur_depth_(0),
+ current_inlining_depth_(0),
context_(context) {
DCHECK(thread == Thread::Current() || thread->IsSuspended()) << *thread;
}
-StackVisitor::StackVisitor(Thread* thread, Context* context, size_t num_frames)
- : thread_(thread), cur_shadow_frame_(NULL),
- cur_quick_frame_(NULL), cur_quick_frame_pc_(0), num_frames_(num_frames), cur_depth_(0),
- context_(context) {
- DCHECK(thread == Thread::Current() || thread->IsSuspended()) << *thread;
+InlineInfo StackVisitor::GetCurrentInlineInfo() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::ArtMethod* outer_method = GetCurrentQuickFrame()->AsMirrorPtr();
+ uint32_t native_pc_offset = outer_method->NativeQuickPcOffset(cur_quick_frame_pc_);
+ CodeInfo code_info = outer_method->GetOptimizedCodeInfo();
+ StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
+ return code_info.GetInlineInfoOf(stack_map);
+}
+
+mirror::ArtMethod* StackVisitor::GetMethod() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ if (cur_shadow_frame_ != nullptr) {
+ return cur_shadow_frame_->GetMethod();
+ } else if (cur_quick_frame_ != nullptr) {
+ if (IsInInlinedFrame()) {
+ size_t depth_in_stack_map = current_inlining_depth_ - 1;
+ return GetCurrentQuickFrame()->AsMirrorPtr()->GetDexCacheResolvedMethod(
+ GetCurrentInlineInfo().GetMethodIndexAtDepth(depth_in_stack_map));
+ } else {
+ return cur_quick_frame_->AsMirrorPtr();
+ }
+ } else {
+ return nullptr;
+ }
}
uint32_t StackVisitor::GetDexPc(bool abort_on_failure) const {
- if (cur_shadow_frame_ != NULL) {
+ if (cur_shadow_frame_ != nullptr) {
return cur_shadow_frame_->GetDexPC();
- } else if (cur_quick_frame_ != NULL) {
- return GetMethod()->ToDexPc(cur_quick_frame_pc_, abort_on_failure);
+ } else if (cur_quick_frame_ != nullptr) {
+ if (IsInInlinedFrame()) {
+ size_t depth_in_stack_map = current_inlining_depth_ - 1;
+ return GetCurrentInlineInfo().GetDexPcAtDepth(depth_in_stack_map);
+ } else {
+ return GetMethod()->ToDexPc(cur_quick_frame_pc_, abort_on_failure);
+ }
} else {
return 0;
}
@@ -151,6 +186,33 @@
return GetMethod()->NativeQuickPcOffset(cur_quick_frame_pc_);
}
+bool StackVisitor::IsReferenceVReg(mirror::ArtMethod* m, uint16_t vreg) {
+ // Process register map (which native and runtime methods don't have)
+ if (m->IsNative() || m->IsRuntimeMethod() || m->IsProxyMethod()) {
+ return false;
+ }
+ if (m->IsOptimized(sizeof(void*))) {
+ return true; // TODO: Implement.
+ }
+ const uint8_t* native_gc_map = m->GetNativeGcMap(sizeof(void*));
+ CHECK(native_gc_map != nullptr) << PrettyMethod(m);
+ const DexFile::CodeItem* code_item = m->GetCodeItem();
+ // Can't be null or how would we compile its instructions?
+ DCHECK(code_item != nullptr) << PrettyMethod(m);
+ NativePcOffsetToReferenceMap map(native_gc_map);
+ size_t num_regs = std::min(map.RegWidth() * 8, static_cast<size_t>(code_item->registers_size_));
+ const uint8_t* reg_bitmap = nullptr;
+ if (num_regs > 0) {
+ Runtime* runtime = Runtime::Current();
+ const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(m, sizeof(void*));
+ uintptr_t native_pc_offset = m->NativeQuickPcOffset(GetCurrentQuickFramePc(), entry_point);
+ reg_bitmap = map.FindBitMap(native_pc_offset);
+ DCHECK(reg_bitmap != nullptr);
+ }
+ // Does this register hold a reference?
+ return vreg < num_regs && TestBitmap(vreg, reg_bitmap);
+}
+
bool StackVisitor::GetVReg(mirror::ArtMethod* m, uint16_t vreg, VRegKind kind,
uint32_t* val) const {
if (cur_quick_frame_ != nullptr) {
@@ -183,7 +245,7 @@
return GetRegisterIfAccessible(reg, kind, val);
} else {
const DexFile::CodeItem* code_item = m->GetCodeItem();
- DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be NULL or how would we compile
+ DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be null or how would we compile
// its instructions?
*val = *GetVRegAddrFromQuickCode(cur_quick_frame_, code_item, frame_info.CoreSpillMask(),
frame_info.FpSpillMask(), frame_info.FrameSizeInBytes(), vreg);
@@ -193,18 +255,27 @@
bool StackVisitor::GetVRegFromOptimizedCode(mirror::ArtMethod* m, uint16_t vreg, VRegKind kind,
uint32_t* val) const {
- const void* code_pointer = m->GetQuickOatCodePointer(sizeof(void*));
- DCHECK(code_pointer != nullptr);
- uint32_t native_pc_offset = m->NativeQuickPcOffset(cur_quick_frame_pc_);
- CodeInfo code_info = m->GetOptimizedCodeInfo();
- StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
+ DCHECK_EQ(m, GetMethod());
const DexFile::CodeItem* code_item = m->GetCodeItem();
- DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be NULL or how would we compile
+ DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be null or how would we compile
// its instructions?
- DCHECK_LT(vreg, code_item->registers_size_);
uint16_t number_of_dex_registers = code_item->registers_size_;
- DexRegisterMap dex_register_map =
- code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers);
+ DCHECK_LT(vreg, code_item->registers_size_);
+
+ mirror::ArtMethod* outer_method = GetCurrentQuickFrame()->AsMirrorPtr();
+ const void* code_pointer = outer_method->GetQuickOatCodePointer(sizeof(void*));
+ DCHECK(code_pointer != nullptr);
+ CodeInfo code_info = outer_method->GetOptimizedCodeInfo();
+
+ uint32_t native_pc_offset = outer_method->NativeQuickPcOffset(cur_quick_frame_pc_);
+ StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
+ size_t depth_in_stack_map = current_inlining_depth_ - 1;
+
+ DexRegisterMap dex_register_map = IsInInlinedFrame()
+ ? code_info.GetDexRegisterMapAtDepth(
+ depth_in_stack_map, code_info.GetInlineInfoOf(stack_map), number_of_dex_registers)
+ : code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers);
+
DexRegisterLocation::Kind location_kind =
dex_register_map.GetLocationKind(vreg, number_of_dex_registers, code_info);
switch (location_kind) {
@@ -297,7 +368,7 @@
return GetRegisterPairIfAccessible(reg_lo, reg_hi, kind_lo, val);
} else {
const DexFile::CodeItem* code_item = m->GetCodeItem();
- DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be NULL or how would we compile
+ DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be null or how would we compile
// its instructions?
uint32_t* addr = GetVRegAddrFromQuickCode(
cur_quick_frame_, code_item, frame_info.CoreSpillMask(),
@@ -345,7 +416,7 @@
DCHECK(context_ != nullptr); // You can't reliably write registers without a context.
DCHECK(m == GetMethod());
if (m->IsOptimized(sizeof(void*))) {
- return SetVRegFromOptimizedCode(m, vreg, new_value, kind);
+ return false;
} else {
return SetVRegFromQuickCode(m, vreg, new_value, kind);
}
@@ -372,7 +443,7 @@
return SetRegisterIfAccessible(reg, new_value, kind);
} else {
const DexFile::CodeItem* code_item = m->GetCodeItem();
- DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be NULL or how would we compile
+ DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be null or how would we compile
// its instructions?
uint32_t* addr = GetVRegAddrFromQuickCode(
cur_quick_frame_, code_item, frame_info.CoreSpillMask(),
@@ -382,57 +453,6 @@
}
}
-bool StackVisitor::SetVRegFromOptimizedCode(mirror::ArtMethod* m, uint16_t vreg, uint32_t new_value,
- VRegKind kind) {
- const void* code_pointer = m->GetQuickOatCodePointer(sizeof(void*));
- DCHECK(code_pointer != nullptr);
- uint32_t native_pc_offset = m->NativeQuickPcOffset(cur_quick_frame_pc_);
- CodeInfo code_info = m->GetOptimizedCodeInfo();
- StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
- const DexFile::CodeItem* code_item = m->GetCodeItem();
- DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be NULL or how would we compile
- // its instructions?
- uint16_t number_of_dex_registers = code_item->registers_size_;
- DCHECK_LT(vreg, number_of_dex_registers);
- DexRegisterMap dex_register_map =
- code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers);
- DexRegisterLocation::Kind location_kind =
- dex_register_map.GetLocationKind(vreg, number_of_dex_registers, code_info);
- uint32_t dex_pc = m->ToDexPc(cur_quick_frame_pc_, false);
- switch (location_kind) {
- case DexRegisterLocation::Kind::kInStack: {
- const int32_t offset =
- dex_register_map.GetStackOffsetInBytes(vreg, number_of_dex_registers, code_info);
- uint8_t* addr = reinterpret_cast<uint8_t*>(cur_quick_frame_) + offset;
- *reinterpret_cast<uint32_t*>(addr) = new_value;
- return true;
- }
- case DexRegisterLocation::Kind::kInRegister:
- case DexRegisterLocation::Kind::kInFpuRegister: {
- uint32_t reg = dex_register_map.GetMachineRegister(vreg, number_of_dex_registers, code_info);
- return SetRegisterIfAccessible(reg, new_value, kind);
- }
- case DexRegisterLocation::Kind::kConstant:
- LOG(ERROR) << StringPrintf("Cannot change value of DEX register v%u used as a constant at "
- "DEX pc 0x%x (native pc 0x%x) of method %s",
- vreg, dex_pc, native_pc_offset,
- PrettyMethod(cur_quick_frame_->AsMirrorPtr()).c_str());
- return false;
- case DexRegisterLocation::Kind::kNone:
- LOG(ERROR) << StringPrintf("No location for DEX register v%u at DEX pc 0x%x "
- "(native pc 0x%x) of method %s",
- vreg, dex_pc, native_pc_offset,
- PrettyMethod(cur_quick_frame_->AsMirrorPtr()).c_str());
- return false;
- default:
- LOG(FATAL) << StringPrintf("Unknown location for DEX register v%u at DEX pc 0x%x "
- "(native pc 0x%x) of method %s",
- vreg, dex_pc, native_pc_offset,
- PrettyMethod(cur_quick_frame_->AsMirrorPtr()).c_str());
- UNREACHABLE();
- }
-}
-
bool StackVisitor::SetRegisterIfAccessible(uint32_t reg, uint32_t new_value, VRegKind kind) {
const bool is_float = (kind == kFloatVReg) || (kind == kDoubleLoVReg) || (kind == kDoubleHiVReg);
if (!IsAccessibleRegister(reg, is_float)) {
@@ -477,7 +497,7 @@
DCHECK(context_ != nullptr); // You can't reliably write registers without a context.
DCHECK(m == GetMethod());
if (m->IsOptimized(sizeof(void*))) {
- return SetVRegPairFromOptimizedCode(m, vreg, new_value, kind_lo, kind_hi);
+ return false;
} else {
return SetVRegPairFromQuickCode(m, vreg, new_value, kind_lo, kind_hi);
}
@@ -488,8 +508,8 @@
}
}
-bool StackVisitor::SetVRegPairFromQuickCode(mirror::ArtMethod* m, uint16_t vreg, uint64_t new_value,
- VRegKind kind_lo, VRegKind kind_hi) {
+bool StackVisitor::SetVRegPairFromQuickCode(
+ mirror::ArtMethod* m, uint16_t vreg, uint64_t new_value, VRegKind kind_lo, VRegKind kind_hi) {
const void* code_pointer = m->GetQuickOatCodePointer(sizeof(void*));
DCHECK(code_pointer != nullptr);
const VmapTable vmap_table(m->GetVmapTable(code_pointer, sizeof(void*)));
@@ -505,7 +525,7 @@
return SetRegisterPairIfAccessible(reg_lo, reg_hi, new_value, is_float);
} else {
const DexFile::CodeItem* code_item = m->GetCodeItem();
- DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be NULL or how would we compile
+ DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be null or how would we compile
// its instructions?
uint32_t* addr = GetVRegAddrFromQuickCode(
cur_quick_frame_, code_item, frame_info.CoreSpillMask(),
@@ -515,15 +535,6 @@
}
}
-bool StackVisitor::SetVRegPairFromOptimizedCode(mirror::ArtMethod* m, uint16_t vreg, uint64_t new_value,
- VRegKind kind_lo, VRegKind kind_hi) {
- uint32_t low_32bits = Low32Bits(new_value);
- uint32_t high_32bits = High32Bits(new_value);
- bool success = SetVRegFromOptimizedCode(m, vreg, low_32bits, kind_lo);
- success &= SetVRegFromOptimizedCode(m, vreg + 1, high_32bits, kind_hi);
- return success;
-}
-
bool StackVisitor::SetRegisterPairIfAccessible(uint32_t reg_lo, uint32_t reg_hi,
uint64_t new_value, bool is_float) {
if (!IsAccessibleRegister(reg_lo, is_float) || !IsAccessibleRegister(reg_hi, is_float)) {
@@ -585,22 +596,22 @@
uintptr_t StackVisitor::GetReturnPc() const {
uint8_t* sp = reinterpret_cast<uint8_t*>(GetCurrentQuickFrame());
- DCHECK(sp != NULL);
+ DCHECK(sp != nullptr);
uint8_t* pc_addr = sp + GetMethod()->GetReturnPcOffset().SizeValue();
return *reinterpret_cast<uintptr_t*>(pc_addr);
}
void StackVisitor::SetReturnPc(uintptr_t new_ret_pc) {
uint8_t* sp = reinterpret_cast<uint8_t*>(GetCurrentQuickFrame());
- CHECK(sp != NULL);
+ CHECK(sp != nullptr);
uint8_t* pc_addr = sp + GetMethod()->GetReturnPcOffset().SizeValue();
*reinterpret_cast<uintptr_t*>(pc_addr) = new_ret_pc;
}
-size_t StackVisitor::ComputeNumFrames(Thread* thread) {
+size_t StackVisitor::ComputeNumFrames(Thread* thread, StackWalkKind walk_kind) {
struct NumFramesVisitor : public StackVisitor {
- explicit NumFramesVisitor(Thread* thread_in)
- : StackVisitor(thread_in, NULL), frames(0) {}
+ NumFramesVisitor(Thread* thread_in, StackWalkKind walk_kind_in)
+ : StackVisitor(thread_in, nullptr, walk_kind_in), frames(0) {}
bool VisitFrame() OVERRIDE {
frames++;
@@ -609,16 +620,23 @@
size_t frames;
};
- NumFramesVisitor visitor(thread);
+ NumFramesVisitor visitor(thread, walk_kind);
visitor.WalkStack(true);
return visitor.frames;
}
bool StackVisitor::GetNextMethodAndDexPc(mirror::ArtMethod** next_method, uint32_t* next_dex_pc) {
struct HasMoreFramesVisitor : public StackVisitor {
- explicit HasMoreFramesVisitor(Thread* thread, size_t num_frames, size_t frame_height)
- : StackVisitor(thread, nullptr, num_frames), frame_height_(frame_height),
- found_frame_(false), has_more_frames_(false), next_method_(nullptr), next_dex_pc_(0) {
+ HasMoreFramesVisitor(Thread* thread,
+ StackWalkKind walk_kind,
+ size_t num_frames,
+ size_t frame_height)
+ : StackVisitor(thread, nullptr, walk_kind, num_frames),
+ frame_height_(frame_height),
+ found_frame_(false),
+ has_more_frames_(false),
+ next_method_(nullptr),
+ next_dex_pc_(0) {
}
bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -642,7 +660,7 @@
mirror::ArtMethod* next_method_;
uint32_t next_dex_pc_;
};
- HasMoreFramesVisitor visitor(thread_, GetNumFrames(), GetFrameHeight());
+ HasMoreFramesVisitor visitor(thread_, walk_kind_, GetNumFrames(), GetFrameHeight());
visitor.WalkStack(true);
*next_method = visitor.next_method_;
*next_dex_pc = visitor.next_dex_pc_;
@@ -652,7 +670,7 @@
void StackVisitor::DescribeStack(Thread* thread) {
struct DescribeStackVisitor : public StackVisitor {
explicit DescribeStackVisitor(Thread* thread_in)
- : StackVisitor(thread_in, NULL) {}
+ : StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {}
bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
LOG(INFO) << "Frame Id=" << GetFrameId() << " " << DescribeLocation();
@@ -666,7 +684,7 @@
std::string StackVisitor::DescribeLocation() const {
std::string result("Visiting method '");
mirror::ArtMethod* m = GetMethod();
- if (m == NULL) {
+ if (m == nullptr) {
return "upcall";
}
result += PrettyMethod(m);
@@ -713,24 +731,44 @@
bool exit_stubs_installed = Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled();
uint32_t instrumentation_stack_depth = 0;
- for (const ManagedStack* current_fragment = thread_->GetManagedStack(); current_fragment != NULL;
- current_fragment = current_fragment->GetLink()) {
+ for (const ManagedStack* current_fragment = thread_->GetManagedStack();
+ current_fragment != nullptr; current_fragment = current_fragment->GetLink()) {
cur_shadow_frame_ = current_fragment->GetTopShadowFrame();
cur_quick_frame_ = current_fragment->GetTopQuickFrame();
cur_quick_frame_pc_ = 0;
- if (cur_quick_frame_ != NULL) { // Handle quick stack frames.
+ if (cur_quick_frame_ != nullptr) { // Handle quick stack frames.
// Can't be both a shadow and a quick fragment.
- DCHECK(current_fragment->GetTopShadowFrame() == NULL);
+ DCHECK(current_fragment->GetTopShadowFrame() == nullptr);
mirror::ArtMethod* method = cur_quick_frame_->AsMirrorPtr();
- while (method != NULL) {
+ while (method != nullptr) {
SanityCheckFrame();
+
+ if ((walk_kind_ == StackWalkKind::kIncludeInlinedFrames)
+ && method->IsOptimized(sizeof(void*))) {
+ CodeInfo code_info = method->GetOptimizedCodeInfo();
+ uint32_t native_pc_offset = method->NativeQuickPcOffset(cur_quick_frame_pc_);
+ StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
+ if (stack_map.HasInlineInfo(code_info)) {
+ InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map);
+ DCHECK_EQ(current_inlining_depth_, 0u);
+ for (current_inlining_depth_ = inline_info.GetDepth();
+ current_inlining_depth_ != 0;
+ --current_inlining_depth_) {
+ bool should_continue = VisitFrame();
+ if (UNLIKELY(!should_continue)) {
+ return;
+ }
+ }
+ }
+ }
+
bool should_continue = VisitFrame();
if (UNLIKELY(!should_continue)) {
return;
}
- if (context_ != NULL) {
+ if (context_ != nullptr) {
context_->FillCalleeSaves(*this);
}
size_t frame_size = method->GetFrameSizeInBytes();
@@ -748,7 +786,8 @@
if (GetMethod() == Runtime::Current()->GetCalleeSaveMethod(Runtime::kSaveAll)) {
// Skip runtime save all callee frames which are used to deliver exceptions.
} else if (instrumentation_frame.interpreter_entry_) {
- mirror::ArtMethod* callee = Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs);
+ mirror::ArtMethod* callee =
+ Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs);
CHECK_EQ(GetMethod(), callee) << "Expected: " << PrettyMethod(callee) << " Found: "
<< PrettyMethod(GetMethod());
} else if (instrumentation_frame.method_ != GetMethod()) {
@@ -771,7 +810,7 @@
cur_depth_++;
method = cur_quick_frame_->AsMirrorPtr();
}
- } else if (cur_shadow_frame_ != NULL) {
+ } else if (cur_shadow_frame_ != nullptr) {
do {
SanityCheckFrame();
bool should_continue = VisitFrame();
@@ -780,7 +819,7 @@
}
cur_depth_++;
cur_shadow_frame_ = cur_shadow_frame_->GetLink();
- } while (cur_shadow_frame_ != NULL);
+ } while (cur_shadow_frame_ != nullptr);
}
if (include_transitions) {
bool should_continue = VisitFrame();
diff --git a/runtime/stack.h b/runtime/stack.h
index fbb0aa4..5b43848 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -36,9 +36,10 @@
} // namespace mirror
class Context;
-class ShadowFrame;
class HandleScope;
+class InlineInfo;
class ScopedObjectAccess;
+class ShadowFrame;
class StackVisitor;
class Thread;
@@ -74,12 +75,18 @@
}
// Create ShadowFrame in heap for deoptimization.
- static ShadowFrame* Create(uint32_t num_vregs, ShadowFrame* link,
- mirror::ArtMethod* method, uint32_t dex_pc) {
+ static ShadowFrame* CreateDeoptimizedFrame(uint32_t num_vregs, ShadowFrame* link,
+ mirror::ArtMethod* method, uint32_t dex_pc) {
uint8_t* memory = new uint8_t[ComputeSize(num_vregs)];
return Create(num_vregs, link, method, dex_pc, memory);
}
+ // Delete a ShadowFrame allocated on the heap for deoptimization.
+ static void DeleteDeoptimizedFrame(ShadowFrame* sf) {
+ uint8_t* memory = reinterpret_cast<uint8_t*>(sf);
+ delete[] memory;
+ }
+
// Create ShadowFrame for interpreter using provided memory.
static ShadowFrame* Create(uint32_t num_vregs, ShadowFrame* link,
mirror::ArtMethod* method, uint32_t dex_pc, void* memory) {
@@ -295,11 +302,12 @@
}
StackReference<mirror::Object>* References() {
- return const_cast<StackReference<mirror::Object>*>(const_cast<const ShadowFrame*>(this)->References());
+ return const_cast<StackReference<mirror::Object>*>(
+ const_cast<const ShadowFrame*>(this)->References());
}
const uint32_t number_of_vregs_;
- // Link to previous shadow frame or NULL.
+ // Link to previous shadow frame or null.
ShadowFrame* link_;
mirror::ArtMethod* method_;
uint32_t dex_pc_;
@@ -402,8 +410,17 @@
};
class StackVisitor {
+ public:
+ // This enum defines a flag to control whether inlined frames are included
+ // when walking the stack.
+ enum class StackWalkKind {
+ kIncludeInlinedFrames,
+ kSkipInlinedFrames,
+ };
+
protected:
- StackVisitor(Thread* thread, Context* context) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ StackVisitor(Thread* thread, Context* context, StackWalkKind walk_kind)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
public:
virtual ~StackVisitor() {}
@@ -414,15 +431,7 @@
void WalkStack(bool include_transitions = false)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- mirror::ArtMethod* GetMethod() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- if (cur_shadow_frame_ != nullptr) {
- return cur_shadow_frame_->GetMethod();
- } else if (cur_quick_frame_ != nullptr) {
- return cur_quick_frame_->AsMirrorPtr();
- } else {
- return nullptr;
- }
- }
+ mirror::ArtMethod* GetMethod() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool IsShadowFrame() const {
return cur_shadow_frame_ != nullptr;
@@ -458,7 +467,7 @@
size_t GetNumFrames() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (num_frames_ == 0) {
- num_frames_ = ComputeNumFrames(thread_);
+ num_frames_ = ComputeNumFrames(thread_, walk_kind_);
}
return num_frames_;
}
@@ -471,6 +480,9 @@
bool GetNextMethodAndDexPc(mirror::ArtMethod** next_method, uint32_t* next_dex_pc)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsReferenceVReg(mirror::ArtMethod* m, uint16_t vreg)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
bool GetVReg(mirror::ArtMethod* m, uint16_t vreg, VRegKind kind, uint32_t* val) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -571,7 +583,8 @@
* Special temporaries may have custom locations and the logic above deals with that.
* However, non-special temporaries are placed relative to the outs.
*/
- int temps_start = sizeof(StackReference<mirror::ArtMethod>) + code_item->outs_size_ * sizeof(uint32_t);
+ int temps_start = sizeof(StackReference<mirror::ArtMethod>) +
+ code_item->outs_size_ * sizeof(uint32_t);
int relative_offset = (reg - (temp_threshold + max_num_special_temps)) * sizeof(uint32_t);
return temps_start + relative_offset;
} else if (reg < num_regs) {
@@ -590,6 +603,10 @@
return sizeof(StackReference<mirror::ArtMethod>) + (out_num * sizeof(uint32_t));
}
+ bool IsInInlinedFrame() const {
+ return current_inlining_depth_ != 0;
+ }
+
uintptr_t GetCurrentQuickFramePc() const {
return cur_quick_frame_pc_;
}
@@ -610,13 +627,14 @@
std::string DescribeLocation() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static size_t ComputeNumFrames(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static size_t ComputeNumFrames(Thread* thread, StackWalkKind walk_kind)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static void DescribeStack(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
private:
// Private constructor known in the case that num_frames_ has already been computed.
- StackVisitor(Thread* thread, Context* context, size_t num_frames)
+ StackVisitor(Thread* thread, Context* context, StackWalkKind walk_kind, size_t num_frames)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool IsAccessibleRegister(uint32_t reg, bool is_float) const {
@@ -666,25 +684,22 @@
bool SetVRegFromQuickCode(mirror::ArtMethod* m, uint16_t vreg, uint32_t new_value,
VRegKind kind)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool SetVRegFromOptimizedCode(mirror::ArtMethod* m, uint16_t vreg, uint32_t new_value,
- VRegKind kind)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool SetRegisterIfAccessible(uint32_t reg, uint32_t new_value, VRegKind kind)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool SetVRegPairFromQuickCode(mirror::ArtMethod* m, uint16_t vreg, uint64_t new_value,
VRegKind kind_lo, VRegKind kind_hi)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool SetVRegPairFromOptimizedCode(mirror::ArtMethod* m, uint16_t vreg, uint64_t new_value,
- VRegKind kind_lo, VRegKind kind_hi)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool SetRegisterPairIfAccessible(uint32_t reg_lo, uint32_t reg_hi, uint64_t new_value,
bool is_float)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void SanityCheckFrame() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ InlineInfo GetCurrentInlineInfo() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
Thread* const thread_;
+ const StackWalkKind walk_kind_;
ShadowFrame* cur_shadow_frame_;
StackReference<mirror::ArtMethod>* cur_quick_frame_;
uintptr_t cur_quick_frame_pc_;
@@ -692,6 +707,9 @@
size_t num_frames_;
// Depth of the frame we're currently at.
size_t cur_depth_;
+ // Current inlining depth of the method we are currently at.
+ // 0 if there is no inlined frame.
+ size_t current_inlining_depth_;
protected:
Context* const context_;
diff --git a/runtime/stack_map.cc b/runtime/stack_map.cc
index 11e7e44..6a0c07d 100644
--- a/runtime/stack_map.cc
+++ b/runtime/stack_map.cc
@@ -257,21 +257,48 @@
DumpStackMapHeader(os, i);
if (stack_map.HasDexRegisterMap(*this)) {
DexRegisterMap dex_register_map = GetDexRegisterMapOf(stack_map, number_of_dex_registers);
- // TODO: Display the bit mask of live Dex registers.
- for (size_t j = 0; j < number_of_dex_registers; ++j) {
- if (dex_register_map.IsDexRegisterLive(j)) {
- size_t location_catalog_entry_index = dex_register_map.GetLocationCatalogEntryIndex(
- j, number_of_dex_registers, number_of_location_catalog_entries);
- DexRegisterLocation location =
- dex_register_map.GetDexRegisterLocation(j, number_of_dex_registers, *this);
- DumpRegisterMapping(
- os, j, location, "v",
- "\t[entry " + std::to_string(static_cast<int>(location_catalog_entry_index)) + "]");
- }
- }
+ dex_register_map.Dump(os, *this, number_of_dex_registers);
}
}
- // TODO: Dump the stack map's inline information.
+ // TODO: Dump the stack map's inline information? We need to know more from the caller:
+ // we need to know the number of dex registers for each inlined method.
+}
+
+void DexRegisterMap::Dump(std::ostream& os,
+ const CodeInfo& code_info,
+ uint16_t number_of_dex_registers) const {
+ size_t number_of_location_catalog_entries =
+ code_info.GetNumberOfDexRegisterLocationCatalogEntries();
+ // TODO: Display the bit mask of live Dex registers.
+ for (size_t j = 0; j < number_of_dex_registers; ++j) {
+ if (IsDexRegisterLive(j)) {
+ size_t location_catalog_entry_index = GetLocationCatalogEntryIndex(
+ j, number_of_dex_registers, number_of_location_catalog_entries);
+ DexRegisterLocation location = GetDexRegisterLocation(j, number_of_dex_registers, code_info);
+ DumpRegisterMapping(
+ os, j, location, "v",
+ "\t[entry " + std::to_string(static_cast<int>(location_catalog_entry_index)) + "]");
+ }
+ }
+}
+
+void InlineInfo::Dump(std::ostream& os,
+ const CodeInfo& code_info,
+ uint16_t number_of_dex_registers[]) const {
+ os << "InlineInfo with depth " << static_cast<uint32_t>(GetDepth()) << "\n";
+
+ for (size_t i = 0; i < GetDepth(); ++i) {
+ os << " At depth " << i
+ << std::hex
+ << " (dex_pc=0x" << GetDexPcAtDepth(i)
+ << ", method_index=0x" << GetMethodIndexAtDepth(i)
+ << ")\n";
+ if (HasDexRegisterMapAtDepth(i)) {
+ DexRegisterMap dex_register_map =
+ code_info.GetDexRegisterMapAtDepth(i, *this, number_of_dex_registers[i]);
+ dex_register_map.Dump(os, code_info, number_of_dex_registers[i]);
+ }
+ }
}
} // namespace art
diff --git a/runtime/stack_map.h b/runtime/stack_map.h
index f68cafe..f07fb74 100644
--- a/runtime/stack_map.h
+++ b/runtime/stack_map.h
@@ -39,47 +39,6 @@
* their own fields.
*/
-/**
- * Inline information for a specific PC. The information is of the form:
- * [inlining_depth, [method_dex reference]+]
- */
-class InlineInfo {
- public:
- explicit InlineInfo(MemoryRegion region) : region_(region) {}
-
- uint8_t GetDepth() const {
- return region_.LoadUnaligned<uint8_t>(kDepthOffset);
- }
-
- void SetDepth(uint8_t depth) {
- region_.StoreUnaligned<uint8_t>(kDepthOffset, depth);
- }
-
- uint32_t GetMethodReferenceIndexAtDepth(uint8_t depth) const {
- return region_.LoadUnaligned<uint32_t>(kFixedSize + depth * SingleEntrySize());
- }
-
- void SetMethodReferenceIndexAtDepth(uint8_t depth, uint32_t index) {
- region_.StoreUnaligned<uint32_t>(kFixedSize + depth * SingleEntrySize(), index);
- }
-
- static size_t SingleEntrySize() {
- return sizeof(uint32_t);
- }
-
- private:
- // TODO: Instead of plain types such as "uint8_t", introduce
- // typedefs (and document the memory layout of InlineInfo).
- static constexpr int kDepthOffset = 0;
- static constexpr int kFixedSize = kDepthOffset + sizeof(uint8_t);
-
- MemoryRegion region_;
-
- friend class CodeInfo;
- friend class StackMap;
- friend class StackMapStream;
-};
-
// Dex register location container used by DexRegisterMap and StackMapStream.
class DexRegisterLocation {
public:
@@ -506,7 +465,8 @@
const CodeInfo& code_info) const {
DexRegisterLocation location =
GetDexRegisterLocation(dex_register_number, number_of_dex_registers, code_info);
- DCHECK(location.GetKind() == DexRegisterLocation::Kind::kConstant);
+ DCHECK(location.GetKind() == DexRegisterLocation::Kind::kConstant)
+ << DexRegisterLocation::PrettyDescriptor(location.GetKind());
return location.GetValue();
}
@@ -641,6 +601,8 @@
return region_.size();
}
+ void Dump(std::ostream& o, const CodeInfo& code_info, uint16_t number_of_dex_registers) const;
+
private:
// Return the index in the Dex register map corresponding to the Dex
// register number `dex_register_number`.
@@ -675,9 +637,6 @@
* The information is of the form:
* [dex_pc, native_pc_offset, dex_register_map_offset, inlining_info_offset, register_mask,
* stack_mask].
- *
- * Note that register_mask is fixed size, but stack_mask is variable size, depending on the
- * stack size of a method.
*/
class StackMap {
public:
@@ -759,6 +718,90 @@
friend class StackMapStream;
};
+/**
+ * Inline information for a specific PC. The information is of the form:
+ * [inlining_depth, [dex_pc, method_index, dex_register_map_offset]+]
+ */
+class InlineInfo {
+ public:
+ explicit InlineInfo(MemoryRegion region) : region_(region) {}
+
+ uint8_t GetDepth() const {
+ return region_.LoadUnaligned<uint8_t>(kDepthOffset);
+ }
+
+ void SetDepth(uint8_t depth) {
+ region_.StoreUnaligned<uint8_t>(kDepthOffset, depth);
+ }
+
+ uint32_t GetMethodIndexAtDepth(uint8_t depth) const {
+ return region_.LoadUnaligned<uint32_t>(
+ kFixedSize + depth * SingleEntrySize() + kMethodIndexOffset);
+ }
+
+ void SetMethodIndexAtDepth(uint8_t depth, uint32_t index) {
+ region_.StoreUnaligned<uint32_t>(
+ kFixedSize + depth * SingleEntrySize() + kMethodIndexOffset, index);
+ }
+
+ uint32_t GetDexPcAtDepth(uint8_t depth) const {
+ return region_.LoadUnaligned<uint32_t>(
+ kFixedSize + depth * SingleEntrySize() + kDexPcOffset);
+ }
+
+ void SetDexPcAtDepth(uint8_t depth, uint32_t dex_pc) {
+ region_.StoreUnaligned<uint32_t>(
+ kFixedSize + depth * SingleEntrySize() + kDexPcOffset, dex_pc);
+ }
+
+ uint8_t GetInvokeTypeAtDepth(uint8_t depth) const {
+ return region_.LoadUnaligned<uint8_t>(
+ kFixedSize + depth * SingleEntrySize() + kInvokeTypeOffset);
+ }
+
+ void SetInvokeTypeAtDepth(uint8_t depth, uint8_t invoke_type) {
+ region_.StoreUnaligned<uint8_t>(
+ kFixedSize + depth * SingleEntrySize() + kInvokeTypeOffset, invoke_type);
+ }
+
+ uint32_t GetDexRegisterMapOffsetAtDepth(uint8_t depth) const {
+ return region_.LoadUnaligned<uint32_t>(
+ kFixedSize + depth * SingleEntrySize() + kDexRegisterMapOffset);
+ }
+
+ void SetDexRegisterMapOffsetAtDepth(uint8_t depth, uint32_t offset) {
+ region_.StoreUnaligned<uint32_t>(
+ kFixedSize + depth * SingleEntrySize() + kDexRegisterMapOffset, offset);
+ }
+
+ bool HasDexRegisterMapAtDepth(uint8_t depth) const {
+ return GetDexRegisterMapOffsetAtDepth(depth) != StackMap::kNoDexRegisterMap;
+ }
+
+ static size_t SingleEntrySize() {
+ return kFixedEntrySize;
+ }
+
+ void Dump(std::ostream& os, const CodeInfo& info, uint16_t* number_of_dex_registers) const;
+
+ private:
+ // TODO: Instead of plain types such as "uint8_t", introduce
+ // typedefs (and document the memory layout of InlineInfo).
+ static constexpr int kDepthOffset = 0;
+ static constexpr int kFixedSize = kDepthOffset + sizeof(uint8_t);
+
+ static constexpr int kMethodIndexOffset = 0;
+ static constexpr int kDexPcOffset = kMethodIndexOffset + sizeof(uint32_t);
+ static constexpr int kInvokeTypeOffset = kDexPcOffset + sizeof(uint32_t);
+ static constexpr int kDexRegisterMapOffset = kInvokeTypeOffset + sizeof(uint8_t);
+ static constexpr int kFixedEntrySize = kDexRegisterMapOffset + sizeof(uint32_t);
+
+ MemoryRegion region_;
+
+ friend class CodeInfo;
+ friend class StackMap;
+ friend class StackMapStream;
+};
/**
* Wrapper around all compiler information collected for a method.
@@ -960,6 +1003,17 @@
return DexRegisterMap(region_.Subregion(offset, size));
}
+ // Return the `DexRegisterMap` pointed by `inline_info` at depth `depth`.
+ DexRegisterMap GetDexRegisterMapAtDepth(uint8_t depth,
+ InlineInfo inline_info,
+ uint32_t number_of_dex_registers) const {
+ DCHECK(inline_info.HasDexRegisterMapAtDepth(depth));
+ uint32_t offset =
+ GetDexRegisterMapsOffset() + inline_info.GetDexRegisterMapOffsetAtDepth(depth);
+ size_t size = ComputeDexRegisterMapSizeOf(offset, number_of_dex_registers);
+ return DexRegisterMap(region_.Subregion(offset, size));
+ }
+
InlineInfo GetInlineInfoOf(StackMap stack_map) const {
DCHECK(stack_map.HasInlineInfo(*this));
uint32_t offset = stack_map.GetInlineDescriptorOffset(*this) + GetDexRegisterMapsOffset();
diff --git a/runtime/thread-inl.h b/runtime/thread-inl.h
index 16add79..f7ef894 100644
--- a/runtime/thread-inl.h
+++ b/runtime/thread-inl.h
@@ -35,10 +35,10 @@
}
inline Thread* Thread::Current() {
- // We rely on Thread::Current returning NULL for a detached thread, so it's not obvious
+ // We rely on Thread::Current returning null for a detached thread, so it's not obvious
// that we can replace this with a direct %fs access on x86.
if (!is_started_) {
- return NULL;
+ return nullptr;
} else {
void* thread = pthread_getspecific(Thread::pthread_key_self_);
return reinterpret_cast<Thread*>(thread);
@@ -92,7 +92,7 @@
// We expect no locks except the mutator_lock_ or thread list suspend thread lock.
if (i != kMutatorLock) {
BaseMutex* held_mutex = GetHeldMutex(static_cast<LockLevel>(i));
- if (held_mutex != NULL) {
+ if (held_mutex != nullptr) {
LOG(ERROR) << "holding \"" << held_mutex->GetName()
<< "\" at point where thread suspension is expected";
bad_mutexes_held = true;
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 5ca51fb..2145c9c 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -105,6 +105,43 @@
&tlsPtr_.quick_entrypoints);
}
+void Thread::InitStringEntryPoints() {
+ ScopedObjectAccess soa(this);
+ QuickEntryPoints* qpoints = &tlsPtr_.quick_entrypoints;
+ qpoints->pNewEmptyString = reinterpret_cast<void(*)()>(
+ soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newEmptyString));
+ qpoints->pNewStringFromBytes_B = reinterpret_cast<void(*)()>(
+ soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromBytes_B));
+ qpoints->pNewStringFromBytes_BI = reinterpret_cast<void(*)()>(
+ soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromBytes_BI));
+ qpoints->pNewStringFromBytes_BII = reinterpret_cast<void(*)()>(
+ soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromBytes_BII));
+ qpoints->pNewStringFromBytes_BIII = reinterpret_cast<void(*)()>(
+ soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromBytes_BIII));
+ qpoints->pNewStringFromBytes_BIIString = reinterpret_cast<void(*)()>(
+ soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromBytes_BIIString));
+ qpoints->pNewStringFromBytes_BString = reinterpret_cast<void(*)()>(
+ soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromBytes_BString));
+ qpoints->pNewStringFromBytes_BIICharset = reinterpret_cast<void(*)()>(
+ soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromBytes_BIICharset));
+ qpoints->pNewStringFromBytes_BCharset = reinterpret_cast<void(*)()>(
+ soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromBytes_BCharset));
+ qpoints->pNewStringFromChars_C = reinterpret_cast<void(*)()>(
+ soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromChars_C));
+ qpoints->pNewStringFromChars_CII = reinterpret_cast<void(*)()>(
+ soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromChars_CII));
+ qpoints->pNewStringFromChars_IIC = reinterpret_cast<void(*)()>(
+ soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromChars_IIC));
+ qpoints->pNewStringFromCodePoints = reinterpret_cast<void(*)()>(
+ soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromCodePoints));
+ qpoints->pNewStringFromString = reinterpret_cast<void(*)()>(
+ soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromString));
+ qpoints->pNewStringFromStringBuffer = reinterpret_cast<void(*)()>(
+ soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromStringBuffer));
+ qpoints->pNewStringFromStringBuilder = reinterpret_cast<void(*)()>(
+ soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromStringBuilder));
+}
+
void Thread::ResetQuickAllocEntryPointsForThread() {
ResetQuickAllocEntryPoints(&tlsPtr_.quick_entrypoints);
}
@@ -163,6 +200,7 @@
}
{
ScopedObjectAccess soa(self);
+ self->InitStringEntryPoints();
// Copy peer into self, deleting global reference when done.
CHECK(self->tlsPtr_.jpeer != nullptr);
@@ -178,7 +216,8 @@
// Invoke the 'run' method of our java.lang.Thread.
mirror::Object* receiver = self->tlsPtr_.opeer;
jmethodID mid = WellKnownClasses::java_lang_Thread_run;
- InvokeVirtualOrInterfaceWithJValues(soa, receiver, mid, nullptr);
+ ScopedLocalRef<jobject> ref(soa.Env(), soa.AddLocalReference<jobject>(receiver));
+ InvokeVirtualOrInterfaceWithJValues(soa, ref.get(), mid, nullptr);
}
// Detach and delete self.
Runtime::Current()->GetThreadList()->Unregister(self);
@@ -409,6 +448,8 @@
}
}
+ self->InitStringEntryPoints();
+
CHECK_NE(self->GetState(), kRunnable);
self->SetState(kNative);
@@ -572,13 +613,13 @@
if (GetThreadId() != 0) {
// If we're in kStarting, we won't have a thin lock id or tid yet.
os << GetThreadId()
- << ",tid=" << GetTid() << ',';
+ << ",tid=" << GetTid() << ',';
}
os << GetState()
- << ",Thread*=" << this
- << ",peer=" << tlsPtr_.opeer
- << ",\"" << *tlsPtr_.name << "\""
- << "]";
+ << ",Thread*=" << this
+ << ",peer=" << tlsPtr_.opeer
+ << ",\"" << (tlsPtr_.name != nullptr ? *tlsPtr_.name : "null") << "\""
+ << "]";
}
void Thread::Dump(std::ostream& os) const {
@@ -588,7 +629,8 @@
mirror::String* Thread::GetThreadName(const ScopedObjectAccessAlreadyRunnable& soa) const {
ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_name);
- return (tlsPtr_.opeer != nullptr) ? reinterpret_cast<mirror::String*>(f->GetObject(tlsPtr_.opeer)) : nullptr;
+ return (tlsPtr_.opeer != nullptr) ?
+ reinterpret_cast<mirror::String*>(f->GetObject(tlsPtr_.opeer)) : nullptr;
}
void Thread::GetThreadName(std::string& name) const {
@@ -713,9 +755,8 @@
union StateAndFlags new_state_and_flags;
new_state_and_flags.as_int = old_state_and_flags.as_int;
new_state_and_flags.as_struct.flags |= kCheckpointRequest;
- bool success =
- tls32_.state_and_flags.as_atomic_int.CompareExchangeStrongSequentiallyConsistent(old_state_and_flags.as_int,
- new_state_and_flags.as_int);
+ bool success =tls32_.state_and_flags.as_atomic_int.CompareExchangeStrongSequentiallyConsistent(
+ old_state_and_flags.as_int, new_state_and_flags.as_int);
if (UNLIKELY(!success)) {
// The thread changed state before the checkpoint was installed.
CHECK_EQ(tlsPtr_.checkpoint_functions[available_checkpoint], function);
@@ -900,10 +941,14 @@
struct StackDumpVisitor : public StackVisitor {
StackDumpVisitor(std::ostream& os_in, Thread* thread_in, Context* context, bool can_allocate_in)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- : StackVisitor(thread_in, context), os(os_in), thread(thread_in),
- can_allocate(can_allocate_in), last_method(nullptr), last_line_number(0),
- repetition_count(0), frame_count(0) {
- }
+ : StackVisitor(thread_in, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
+ os(os_in),
+ thread(thread_in),
+ can_allocate(can_allocate_in),
+ last_method(nullptr),
+ last_line_number(0),
+ repetition_count(0),
+ frame_count(0) {}
virtual ~StackDumpVisitor() {
if (frame_count == 0) {
@@ -1005,8 +1050,8 @@
// Threads with no managed stack frames should be shown.
const ManagedStack* managed_stack = thread->GetManagedStack();
- if (managed_stack == NULL || (managed_stack->GetTopQuickFrame() == NULL &&
- managed_stack->GetTopShadowFrame() == NULL)) {
+ if (managed_stack == nullptr || (managed_stack->GetTopQuickFrame() == nullptr &&
+ managed_stack->GetTopShadowFrame() == nullptr)) {
return true;
}
@@ -1097,7 +1142,7 @@
{
// MutexLock to keep annotalysis happy.
//
- // Note we use nullptr for the thread because Thread::Current can
+ // Note we use null for the thread because Thread::Current can
// return garbage since (is_started_ == true) and
// Thread::pthread_key_self_ is not yet initialized.
// This was seen on glibc.
@@ -1162,7 +1207,7 @@
bool Thread::IsStillStarting() const {
// You might think you can check whether the state is kStarting, but for much of thread startup,
// the thread is in kNative; it might also be in kVmWait.
- // You might think you can check whether the peer is nullptr, but the peer is actually created and
+ // You might think you can check whether the peer is null, but the peer is actually created and
// assigned fairly early on, and needs to be.
// It turns out that the last thing to change is the thread name; that's a good proxy for "has
// this thread _ever_ entered kRunnable".
@@ -1171,9 +1216,14 @@
}
void Thread::AssertPendingException() const {
- if (UNLIKELY(!IsExceptionPending())) {
- LOG(FATAL) << "Pending exception expected.";
- }
+ CHECK(IsExceptionPending()) << "Pending exception expected.";
+}
+
+void Thread::AssertPendingOOMException() const {
+ AssertPendingException();
+ auto* e = GetException();
+ CHECK_EQ(e->GetClass(), DecodeJObject(WellKnownClasses::java_lang_OutOfMemoryError)->AsClass())
+ << e->Dump();
}
void Thread::AssertNoPendingException() const {
@@ -1424,7 +1474,7 @@
DCHECK_EQ(kind, kWeakGlobal);
result = tlsPtr_.jni_env->vm->DecodeWeakGlobal(const_cast<Thread*>(this), ref);
if (Runtime::Current()->IsClearedJniWeakGlobal(result)) {
- // This is a special case where it's okay to return nullptr.
+ // This is a special case where it's okay to return null.
expect_null = true;
result = nullptr;
}
@@ -1483,7 +1533,7 @@
public:
explicit CountStackDepthVisitor(Thread* thread)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- : StackVisitor(thread, nullptr),
+ : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
depth_(0), skip_depth_(0), skipping_(true) {}
bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -1523,8 +1573,12 @@
class BuildInternalStackTraceVisitor : public StackVisitor {
public:
explicit BuildInternalStackTraceVisitor(Thread* self, Thread* thread, int skip_depth)
- : StackVisitor(thread, nullptr), self_(self),
- skip_depth_(skip_depth), count_(0), dex_pc_trace_(nullptr), method_trace_(nullptr) {}
+ : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
+ self_(self),
+ skip_depth_(skip_depth),
+ count_(0),
+ dex_pc_trace_(nullptr),
+ method_trace_(nullptr) {}
bool Init(int depth)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -1833,7 +1887,8 @@
jv_args[i].l = cause.get();
++i;
}
- InvokeWithJValues(soa, exception.Get(), soa.EncodeMethod(exception_init_method), jv_args);
+ ScopedLocalRef<jobject> ref(soa.Env(), soa.AddLocalReference<jobject>(exception.Get()));
+ InvokeWithJValues(soa, ref.get(), soa.EncodeMethod(exception_init_method), jv_args);
if (LIKELY(!IsExceptionPending())) {
SetException(exception.Get());
}
@@ -1925,6 +1980,9 @@
QUICK_ENTRY_POINT_INFO(pAllocObjectWithAccessCheck)
QUICK_ENTRY_POINT_INFO(pCheckAndAllocArray)
QUICK_ENTRY_POINT_INFO(pCheckAndAllocArrayWithAccessCheck)
+ QUICK_ENTRY_POINT_INFO(pAllocStringFromBytes)
+ QUICK_ENTRY_POINT_INFO(pAllocStringFromChars)
+ QUICK_ENTRY_POINT_INFO(pAllocStringFromString)
QUICK_ENTRY_POINT_INFO(pInstanceofNonTrivial)
QUICK_ENTRY_POINT_INFO(pCheckCast)
QUICK_ENTRY_POINT_INFO(pInitializeStaticStorage)
@@ -2008,6 +2066,23 @@
QUICK_ENTRY_POINT_INFO(pDeoptimize)
QUICK_ENTRY_POINT_INFO(pA64Load)
QUICK_ENTRY_POINT_INFO(pA64Store)
+ QUICK_ENTRY_POINT_INFO(pNewEmptyString)
+ QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_B)
+ QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BI)
+ QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BII)
+ QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BIII)
+ QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BIIString)
+ QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BString)
+ QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BIICharset)
+ QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BCharset)
+ QUICK_ENTRY_POINT_INFO(pNewStringFromChars_C)
+ QUICK_ENTRY_POINT_INFO(pNewStringFromChars_CII)
+ QUICK_ENTRY_POINT_INFO(pNewStringFromChars_IIC)
+ QUICK_ENTRY_POINT_INFO(pNewStringFromCodePoints)
+ QUICK_ENTRY_POINT_INFO(pNewStringFromString)
+ QUICK_ENTRY_POINT_INFO(pNewStringFromStringBuffer)
+ QUICK_ENTRY_POINT_INFO(pNewStringFromStringBuilder)
+ QUICK_ENTRY_POINT_INFO(pReadBarrierJni)
#undef QUICK_ENTRY_POINT_INFO
os << offset;
@@ -2047,7 +2122,10 @@
struct CurrentMethodVisitor FINAL : public StackVisitor {
CurrentMethodVisitor(Thread* thread, Context* context, bool abort_on_error)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- : StackVisitor(thread, context), this_object_(nullptr), method_(nullptr), dex_pc_(0),
+ : StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
+ this_object_(nullptr),
+ method_(nullptr),
+ dex_pc_(0),
abort_on_error_(abort_on_error) {}
bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::ArtMethod* m = GetMethod();
@@ -2090,7 +2168,10 @@
public:
ReferenceMapVisitor(Thread* thread, Context* context, RootVisitor& visitor)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- : StackVisitor(thread, context), visitor_(visitor) {}
+ // We are visiting the references in compiled frames, so we do not need
+ // to know the inlined frames.
+ : StackVisitor(thread, context, StackVisitor::StackWalkKind::kSkipInlinedFrames),
+ visitor_(visitor) {}
bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (false) {
@@ -2197,7 +2278,7 @@
const uint8_t* native_gc_map = m->GetNativeGcMap(sizeof(void*));
CHECK(native_gc_map != nullptr) << PrettyMethod(m);
const DexFile::CodeItem* code_item = m->GetCodeItem();
- // Can't be nullptr or how would we compile its instructions?
+ // Can't be null or how would we compile its instructions?
DCHECK(code_item != nullptr) << PrettyMethod(m);
NativePcOffsetToReferenceMap map(native_gc_map);
size_t num_regs = std::min(map.RegWidth() * 8,
@@ -2247,10 +2328,6 @@
}
}
- static bool TestBitmap(size_t reg, const uint8_t* reg_vector) {
- return ((reg_vector[reg / kBitsPerByte] >> (reg % kBitsPerByte)) & 0x01) != 0;
- }
-
// Visitor for when we visit a root.
RootVisitor& visitor_;
};
diff --git a/runtime/thread.h b/runtime/thread.h
index b095e22..9346813 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -25,6 +25,7 @@
#include <setjmp.h>
#include <string>
+#include "arch/context.h"
#include "arch/instruction_set.h"
#include "atomic.h"
#include "base/macros.h"
@@ -185,7 +186,7 @@
LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Dumps the SIGQUIT per-thread header. 'thread' can be NULL for a non-attached thread, in which
+ // Dumps the SIGQUIT per-thread header. 'thread' can be null for a non-attached thread, in which
// case we use 'tid' to identify the thread, and we'll include as much information as we can.
static void DumpState(std::ostream& os, const Thread* thread, pid_t tid)
LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
@@ -245,7 +246,7 @@
// Once called thread suspension will cause an assertion failure.
const char* StartAssertNoThreadSuspension(const char* cause) {
if (kIsDebugBuild) {
- CHECK(cause != NULL);
+ CHECK(cause != nullptr);
const char* previous_cause = tlsPtr_.last_no_thread_suspension_cause;
tls32_.no_thread_suspension++;
tlsPtr_.last_no_thread_suspension_cause = cause;
@@ -297,7 +298,7 @@
return tls32_.tid;
}
- // Returns the java.lang.Thread's name, or NULL if this Thread* doesn't have a peer.
+ // Returns the java.lang.Thread's name, or null if this Thread* doesn't have a peer.
mirror::String* GetThreadName(const ScopedObjectAccessAlreadyRunnable& ts) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -335,12 +336,13 @@
}
void AssertPendingException() const;
+ void AssertPendingOOMException() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void AssertNoPendingException() const;
void AssertNoPendingExceptionForNewException(const char* msg) const;
void SetException(mirror::Throwable* new_exception)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- CHECK(new_exception != NULL);
+ CHECK(new_exception != nullptr);
// TODO: DCHECK(!IsExceptionPending());
tlsPtr_.exception = new_exception;
}
@@ -354,7 +356,18 @@
Context* GetLongJumpContext();
void ReleaseLongJumpContext(Context* context) {
- DCHECK(tlsPtr_.long_jump_context == nullptr);
+ if (tlsPtr_.long_jump_context != nullptr) {
+ // Each QuickExceptionHandler gets a long jump context and uses
+ // it for doing the long jump, after finding catch blocks/doing deoptimization.
+ // Both finding catch blocks and deoptimization can trigger another
+ // exception such as a result of class loading. So there can be nested
+ // cases of exception handling and multiple contexts being used.
+ // ReleaseLongJumpContext tries to save the context in tlsPtr_.long_jump_context
+ // for reuse so there is no need to always allocate a new one each time when
+ // getting a context. Since we only keep one context for reuse, delete the
+ // existing one since the passed in context is yet to be used for longjump.
+ delete tlsPtr_.long_jump_context;
+ }
tlsPtr_.long_jump_context = context;
}
@@ -381,11 +394,11 @@
(tlsPtr_.managed_stack.GetTopShadowFrame() != nullptr);
}
- // If 'msg' is NULL, no detail message is set.
+ // If 'msg' is null, no detail message is set.
void ThrowNewException(const char* exception_class_descriptor, const char* msg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // If 'msg' is NULL, no detail message is set. An exception must be pending, and will be
+ // If 'msg' is null, no detail message is set. An exception must be pending, and will be
// used as the new exception's cause.
void ThrowNewWrappedException(const char* exception_class_descriptor, const char* msg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -477,8 +490,8 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Convert an internal stack trace representation (returned by CreateInternalStackTrace) to a
- // StackTraceElement[]. If output_array is NULL, a new array is created, otherwise as many
- // frames as will fit are written into the given array. If stack_depth is non-NULL, it's updated
+ // StackTraceElement[]. If output_array is null, a new array is created, otherwise as many
+ // frames as will fit are written into the given array. If stack_depth is non-null, it's updated
// with the number of valid frames in the returned array.
static jobjectArray InternalStackTraceToStackTraceElementArray(
const ScopedObjectAccessAlreadyRunnable& soa, jobject internal,
@@ -528,6 +541,16 @@
}
public:
+ static uint32_t QuickEntryPointOffsetWithSize(size_t quick_entrypoint_offset,
+ size_t pointer_size) {
+ DCHECK(pointer_size == 4 || pointer_size == 8) << pointer_size;
+ if (pointer_size == 4) {
+ return QuickEntryPointOffset<4>(quick_entrypoint_offset).Uint32Value();
+ } else {
+ return QuickEntryPointOffset<8>(quick_entrypoint_offset).Uint32Value();
+ }
+ }
+
template<size_t pointer_size>
static ThreadOffset<pointer_size> QuickEntryPointOffset(size_t quick_entrypoint_offset) {
return ThreadOffsetFromTlsPtr<pointer_size>(
@@ -729,6 +752,18 @@
tls32_.ready_for_debug_invoke = ready;
}
+ bool IsDebugMethodEntry() const {
+ return tls32_.debug_method_entry_;
+ }
+
+ void SetDebugMethodEntry() {
+ tls32_.debug_method_entry_ = true;
+ }
+
+ void ClearDebugMethodEntry() {
+ tls32_.debug_method_entry_ = false;
+ }
+
// Activates single step control for debugging. The thread takes the
// ownership of the given SingleStepControl*. It is deleted by a call
// to DeactivateSingleStepControl or upon thread destruction.
@@ -898,6 +933,8 @@
void PushVerifier(verifier::MethodVerifier* verifier);
void PopVerifier(verifier::MethodVerifier* verifier);
+ void InitStringEntryPoints();
+
private:
explicit Thread(bool daemon);
~Thread() LOCKS_EXCLUDED(Locks::mutator_lock_,
@@ -1003,7 +1040,7 @@
suspend_count(0), debug_suspend_count(0), thin_lock_thread_id(0), tid(0),
daemon(is_daemon), throwing_OutOfMemoryError(false), no_thread_suspension(0),
thread_exit_check_count(0), handling_signal_(false), suspended_at_suspend_check(false),
- ready_for_debug_invoke(false) {
+ ready_for_debug_invoke(false), debug_method_entry_(false) {
}
union StateAndFlags state_and_flags;
@@ -1052,6 +1089,10 @@
// used to invoke method from the debugger which is only allowed when
// the thread is suspended by an event.
bool32_t ready_for_debug_invoke;
+
+ // True if the thread enters a method. This is used to detect method entry
+ // event for the debugger.
+ bool32_t debug_method_entry_;
} tls32_;
struct PACKED(8) tls_64bit_sized_values {
@@ -1085,7 +1126,7 @@
// The biased card table, see CardTable for details.
uint8_t* card_table;
- // The pending exception or NULL.
+ // The pending exception or null.
mirror::Throwable* exception;
// The end of this thread's stack. This is the lowest safely-addressable address on the stack.
@@ -1121,13 +1162,13 @@
// Pointer to previous stack trace captured by sampling profiler.
std::vector<mirror::ArtMethod*>* stack_trace_sample;
- // The next thread in the wait set this thread is part of or NULL if not waiting.
+ // The next thread in the wait set this thread is part of or null if not waiting.
Thread* wait_next;
// If we're blocked in MonitorEnter, this is the object we're trying to lock.
mirror::Object* monitor_enter_object;
- // Top of linked list of handle scopes or nullptr for none.
+ // Top of linked list of handle scopes or null for none.
HandleScope* top_handle_scope;
// Needed to get the right ClassLoader in JNI_OnLoad, but also
@@ -1162,7 +1203,7 @@
// If no_thread_suspension_ is > 0, what is causing that assertion.
const char* last_no_thread_suspension_cause;
- // Pending checkpoint function or NULL if non-pending. Installation guarding by
+ // Pending checkpoint function or null if non-pending. Installation guarding by
// Locks::thread_suspend_count_lock_.
Closure* checkpoint_functions[kMaxCheckpoints];
@@ -1203,7 +1244,7 @@
// Condition variable waited upon during a wait.
ConditionVariable* wait_cond_ GUARDED_BY(wait_mutex_);
- // Pointer to the monitor lock we're currently waiting on or NULL if not waiting.
+ // Pointer to the monitor lock we're currently waiting on or null if not waiting.
Monitor* wait_monitor_ GUARDED_BY(wait_mutex_);
// Thread "interrupted" status; stays raised until queried or thrown.
diff --git a/runtime/thread_linux.cc b/runtime/thread_linux.cc
index 0284364..0526f49 100644
--- a/runtime/thread_linux.cc
+++ b/runtime/thread_linux.cc
@@ -50,26 +50,26 @@
ss.ss_sp = new uint8_t[kHostAltSigStackSize];
ss.ss_size = kHostAltSigStackSize;
ss.ss_flags = 0;
- CHECK(ss.ss_sp != NULL);
- SigAltStack(&ss, NULL);
+ CHECK(ss.ss_sp != nullptr);
+ SigAltStack(&ss, nullptr);
// Double-check that it worked.
- ss.ss_sp = NULL;
- SigAltStack(NULL, &ss);
+ ss.ss_sp = nullptr;
+ SigAltStack(nullptr, &ss);
VLOG(threads) << "Alternate signal stack is " << PrettySize(ss.ss_size) << " at " << ss.ss_sp;
}
void Thread::TearDownAlternateSignalStack() {
// Get the pointer so we can free the memory.
stack_t ss;
- SigAltStack(NULL, &ss);
+ SigAltStack(nullptr, &ss);
uint8_t* allocated_signal_stack = reinterpret_cast<uint8_t*>(ss.ss_sp);
// Tell the kernel to stop using it.
- ss.ss_sp = NULL;
+ ss.ss_sp = nullptr;
ss.ss_flags = SS_DISABLE;
ss.ss_size = kHostAltSigStackSize; // Avoid ENOMEM failure with Mac OS' buggy libc.
- SigAltStack(&ss, NULL);
+ SigAltStack(&ss, nullptr);
// Free it.
delete[] allocated_signal_stack;
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index 560bcc1..cc54bbd 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -116,9 +116,9 @@
}
static void DumpUnattachedThread(std::ostream& os, pid_t tid) NO_THREAD_SAFETY_ANALYSIS {
- // TODO: No thread safety analysis as DumpState with a NULL thread won't access fields, should
+ // TODO: No thread safety analysis as DumpState with a null thread won't access fields, should
// refactor DumpState to avoid skipping analysis.
- Thread::DumpState(os, NULL, tid);
+ Thread::DumpState(os, nullptr, tid);
DumpKernelStack(os, tid, " kernel: ", false);
// TODO: Reenable this when the native code in system_server can handle it.
// Currently "adb shell kill -3 `pid system_server`" will cause it to exit.
@@ -136,7 +136,7 @@
Thread* self = Thread::Current();
dirent* e;
- while ((e = readdir(d)) != NULL) {
+ while ((e = readdir(d)) != nullptr) {
char* end;
pid_t tid = strtol(e->d_name, &end, 10);
if (!*end) {
@@ -602,7 +602,7 @@
scoped_name_string(env, (jstring)env->GetObjectField(peer,
WellKnownClasses::java_lang_Thread_name));
ScopedUtfChars scoped_name_chars(env, scoped_name_string.get());
- if (scoped_name_chars.c_str() == NULL) {
+ if (scoped_name_chars.c_str() == nullptr) {
LOG(severity) << message << ": " << peer;
env->ExceptionClear();
} else {
@@ -813,7 +813,7 @@
return thread;
}
}
- return NULL;
+ return nullptr;
}
void ThreadList::SuspendAllForDebugger() {
@@ -865,7 +865,7 @@
// The debugger thread must not suspend itself due to debugger activity!
Thread* debug_thread = Dbg::GetDebugThread();
- CHECK(debug_thread != NULL);
+ CHECK(debug_thread != nullptr);
CHECK(self != debug_thread);
CHECK_NE(self->GetState(), kRunnable);
Locks::mutator_lock_->AssertNotHeld(self);
@@ -1142,7 +1142,7 @@
// Clear the TLS data, so that the underlying native thread is recognizably detached.
// (It may wish to reattach later.)
- CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, NULL), "detach self");
+ CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, nullptr), "detach self");
// Signal that a thread just detached.
MutexLock mu(nullptr, *Locks::thread_list_lock_);
diff --git a/runtime/thread_list.h b/runtime/thread_list.h
index fa747b8..0f094cc 100644
--- a/runtime/thread_list.h
+++ b/runtime/thread_list.h
@@ -68,7 +68,7 @@
// Suspend a thread using a peer, typically used by the debugger. Returns the thread on success,
- // else NULL. The peer is used to identify the thread to avoid races with the thread terminating.
+ // else null. The peer is used to identify the thread to avoid races with the thread terminating.
// If the thread should be suspended then value of request_suspension should be true otherwise
// the routine will wait for a previous suspend request. If the suspension times out then *timeout
// is set to true.
@@ -79,7 +79,7 @@
Locks::thread_suspend_count_lock_);
// Suspend a thread using its thread id, typically used by lock/monitor inflation. Returns the
- // thread on success else NULL. The thread id is used to identify the thread to avoid races with
+ // thread on success else null. The thread id is used to identify the thread to avoid races with
// the thread terminating. Note that as thread ids are recycled this may not suspend the expected
// thread, that may be terminating. If the suspension times out then *timeout is set to true.
Thread* SuspendThreadByThreadId(uint32_t thread_id, bool debug_suspension, bool* timed_out)
@@ -164,7 +164,7 @@
LOCKS_EXCLUDED(Locks::thread_list_lock_,
Locks::thread_suspend_count_lock_);
- void AssertThreadsAreSuspended(Thread* self, Thread* ignore1, Thread* ignore2 = NULL)
+ void AssertThreadsAreSuspended(Thread* self, Thread* ignore1, Thread* ignore2 = nullptr)
LOCKS_EXCLUDED(Locks::thread_list_lock_,
Locks::thread_suspend_count_lock_);
diff --git a/runtime/thread_pool.cc b/runtime/thread_pool.cc
index 2a82285..ce76eae 100644
--- a/runtime/thread_pool.cc
+++ b/runtime/thread_pool.cc
@@ -158,7 +158,7 @@
--waiting_count_;
}
- // We are shutting down, return nullptr to tell the worker thread to stop looping.
+ // We are shutting down, return null to tell the worker thread to stop looping.
return nullptr;
}
diff --git a/runtime/thread_pool.h b/runtime/thread_pool.h
index 79b57af..0557708 100644
--- a/runtime/thread_pool.h
+++ b/runtime/thread_pool.h
@@ -112,7 +112,7 @@
// get a task to run, blocks if there are no tasks left
virtual Task* GetTask(Thread* self);
- // Try to get a task, returning NULL if there is none available.
+ // Try to get a task, returning null if there is none available.
Task* TryGetTask(Thread* self);
Task* TryGetTaskLocked() EXCLUSIVE_LOCKS_REQUIRED(task_queue_lock_);
@@ -166,7 +166,7 @@
virtual ~WorkStealingWorker();
bool IsRunningTask() const {
- return task_ != NULL;
+ return task_ != nullptr;
}
protected:
diff --git a/runtime/thread_state.h b/runtime/thread_state.h
index b5479ed..c7ea7f4 100644
--- a/runtime/thread_state.h
+++ b/runtime/thread_state.h
@@ -42,6 +42,7 @@
kWaitingForDeoptimization, // WAITING TS_WAIT waiting for deoptimization suspend all
kWaitingForMethodTracingStart, // WAITING TS_WAIT waiting for method tracing to start
kWaitingForVisitObjects, // WAITING TS_WAIT waiting for visiting objects
+ kWaitingForGetObjectsAllocated, // WAITING TS_WAIT waiting for getting the number of allocated objects
kStarting, // NEW TS_WAIT native thread started, not yet ready to run managed code
kNative, // RUNNABLE TS_RUNNING running in a JNI native method
kSuspended, // RUNNABLE TS_RUNNING suspended by GC or debugger
diff --git a/runtime/trace.cc b/runtime/trace.cc
index 5322f9f..7636792 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -22,6 +22,7 @@
#define ATRACE_TAG ATRACE_TAG_DALVIK
#include "cutils/trace.h"
+#include "base/casts.h"
#include "base/stl_util.h"
#include "base/unix_file/fd_file.h"
#include "class_linker.h"
@@ -90,8 +91,9 @@
class BuildStackTraceVisitor : public StackVisitor {
public:
- explicit BuildStackTraceVisitor(Thread* thread) : StackVisitor(thread, nullptr),
- method_trace_(Trace::AllocStackTrace()) {}
+ explicit BuildStackTraceVisitor(Thread* thread)
+ : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
+ method_trace_(Trace::AllocStackTrace()) {}
bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::ArtMethod* m = GetMethod();
@@ -125,6 +127,9 @@
pthread_t Trace::sampling_pthread_ = 0U;
std::unique_ptr<std::vector<mirror::ArtMethod*>> Trace::temp_stack_trace_;
+// The key identifying the tracer to update instrumentation.
+static constexpr const char* kTracerInstrumentationKey = "Tracer";
+
static mirror::ArtMethod* DecodeTraceMethodId(uint32_t tmid) {
return reinterpret_cast<mirror::ArtMethod*>(tmid & ~kTraceMethodActionMask);
}
@@ -329,7 +334,7 @@
return nullptr;
}
-void Trace::Start(const char* trace_filename, int trace_fd, int buffer_size, int flags,
+void Trace::Start(const char* trace_filename, int trace_fd, size_t buffer_size, int flags,
TraceOutputMode output_mode, TraceMode trace_mode, int interval_us) {
Thread* self = Thread::Current();
{
@@ -392,7 +397,7 @@
instrumentation::Instrumentation::kMethodExited |
instrumentation::Instrumentation::kMethodUnwind);
// TODO: In full-PIC mode, we don't need to fully deopt.
- runtime->GetInstrumentation()->EnableMethodTracing();
+ runtime->GetInstrumentation()->EnableMethodTracing(kTracerInstrumentationKey);
}
}
}
@@ -439,7 +444,7 @@
MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
runtime->GetThreadList()->ForEach(ClearThreadStackTraceAndClockBase, nullptr);
} else {
- runtime->GetInstrumentation()->DisableMethodTracing();
+ runtime->GetInstrumentation()->DisableMethodTracing(kTracerInstrumentationKey);
runtime->GetInstrumentation()->RemoveListener(
the_trace, instrumentation::Instrumentation::kMethodEntered |
instrumentation::Instrumentation::kMethodExited |
@@ -521,7 +526,7 @@
MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
runtime->GetThreadList()->ForEach(ClearThreadStackTraceAndClockBase, nullptr);
} else {
- runtime->GetInstrumentation()->DisableMethodTracing();
+ runtime->GetInstrumentation()->DisableMethodTracing(kTracerInstrumentationKey);
runtime->GetInstrumentation()->RemoveListener(the_trace,
instrumentation::Instrumentation::kMethodEntered |
instrumentation::Instrumentation::kMethodExited |
@@ -565,7 +570,7 @@
instrumentation::Instrumentation::kMethodExited |
instrumentation::Instrumentation::kMethodUnwind);
// TODO: In full-PIC mode, we don't need to fully deopt.
- runtime->GetInstrumentation()->EnableMethodTracing();
+ runtime->GetInstrumentation()->EnableMethodTracing(kTracerInstrumentationKey);
}
runtime->GetThreadList()->ResumeAll();
@@ -592,19 +597,15 @@
}
}
-static constexpr size_t kStreamingBufferSize = 16 * KB;
+static constexpr size_t kMinBufSize = 18U; // Trace header is up to 18B.
-Trace::Trace(File* trace_file, const char* trace_name, int buffer_size, int flags,
+Trace::Trace(File* trace_file, const char* trace_name, size_t buffer_size, int flags,
TraceOutputMode output_mode, TraceMode trace_mode)
: trace_file_(trace_file),
- buf_(new uint8_t[output_mode == TraceOutputMode::kStreaming ?
- kStreamingBufferSize :
- buffer_size]()),
+ buf_(new uint8_t[std::max(kMinBufSize, buffer_size)]()),
flags_(flags), trace_output_mode_(output_mode), trace_mode_(trace_mode),
clock_source_(default_clock_source_),
- buffer_size_(output_mode == TraceOutputMode::kStreaming ?
- kStreamingBufferSize :
- buffer_size),
+ buffer_size_(std::max(kMinBufSize, buffer_size)),
start_time_(MicroTime()), clock_overhead_ns_(GetClockOverheadNanoSeconds()), cur_offset_(0),
overflow_(false), interval_us_(0), streaming_lock_(nullptr) {
uint16_t trace_version = GetTraceVersion(clock_source_);
@@ -621,6 +622,7 @@
uint16_t record_size = GetRecordSize(clock_source_);
Append2LE(buf_.get() + 16, record_size);
}
+ static_assert(18 <= kMinBufSize, "Minimum buffer size not large enough for trace header");
// Update current offset.
cur_offset_.StoreRelaxed(kTraceHeaderLength);
@@ -875,11 +877,21 @@
void Trace::WriteToBuf(const uint8_t* src, size_t src_size) {
int32_t old_offset = cur_offset_.LoadRelaxed();
int32_t new_offset = old_offset + static_cast<int32_t>(src_size);
- if (new_offset > buffer_size_) {
+ if (dchecked_integral_cast<size_t>(new_offset) > buffer_size_) {
// Flush buffer.
if (!trace_file_->WriteFully(buf_.get(), old_offset)) {
PLOG(WARNING) << "Failed streaming a tracing event.";
}
+
+ // Check whether the data is too large for the buffer, then write immediately.
+ if (src_size >= buffer_size_) {
+ if (!trace_file_->WriteFully(src, src_size)) {
+ PLOG(WARNING) << "Failed streaming a tracing event.";
+ }
+ cur_offset_.StoreRelease(0); // Buffer is empty now.
+ return;
+ }
+
old_offset = 0;
new_offset = static_cast<int32_t>(src_size);
}
@@ -900,7 +912,7 @@
do {
old_offset = cur_offset_.LoadRelaxed();
new_offset = old_offset + GetRecordSize(clock_source_);
- if (new_offset > buffer_size_) {
+ if (static_cast<size_t>(new_offset) > buffer_size_) {
overflow_ = true;
return;
}
@@ -1034,4 +1046,10 @@
return the_trace_->trace_mode_;
}
+size_t Trace::GetBufferSize() {
+ MutexLock mu(Thread::Current(), *Locks::trace_lock_);
+ CHECK(the_trace_ != nullptr) << "Trace mode requested, but no trace currently running";
+ return the_trace_->buffer_size_;
+}
+
} // namespace art
diff --git a/runtime/trace.h b/runtime/trace.h
index b8329ff..df6d5e7 100644
--- a/runtime/trace.h
+++ b/runtime/trace.h
@@ -72,7 +72,7 @@
static void SetDefaultClockSource(TraceClockSource clock_source);
- static void Start(const char* trace_filename, int trace_fd, int buffer_size, int flags,
+ static void Start(const char* trace_filename, int trace_fd, size_t buffer_size, int flags,
TraceOutputMode output_mode, TraceMode trace_mode, int interval_us)
LOCKS_EXCLUDED(Locks::mutator_lock_,
Locks::thread_list_lock_,
@@ -136,9 +136,10 @@
static TraceOutputMode GetOutputMode() LOCKS_EXCLUDED(Locks::trace_lock_);
static TraceMode GetMode() LOCKS_EXCLUDED(Locks::trace_lock_);
+ static size_t GetBufferSize() LOCKS_EXCLUDED(Locks::trace_lock_);
private:
- Trace(File* trace_file, const char* trace_name, int buffer_size, int flags,
+ Trace(File* trace_file, const char* trace_name, size_t buffer_size, int flags,
TraceOutputMode output_mode, TraceMode trace_mode);
// The sampling interval in microseconds is passed as an argument.
@@ -172,7 +173,7 @@
void WriteToBuf(const uint8_t* src, size_t src_size)
EXCLUSIVE_LOCKS_REQUIRED(streaming_lock_);
- // Singleton instance of the Trace or NULL when no method tracing is active.
+ // Singleton instance of the Trace or null when no method tracing is active.
static Trace* volatile the_trace_ GUARDED_BY(Locks::trace_lock_);
// The default profiler clock source.
@@ -184,11 +185,11 @@
// Used to remember an unused stack trace to avoid re-allocation during sampling.
static std::unique_ptr<std::vector<mirror::ArtMethod*>> temp_stack_trace_;
- // File to write trace data out to, NULL if direct to ddms.
+ // File to write trace data out to, null if direct to ddms.
std::unique_ptr<File> trace_file_;
// Buffer to store trace data.
- std::unique_ptr<uint8_t> buf_;
+ std::unique_ptr<uint8_t[]> buf_;
// Flags enabling extra tracing of things such as alloc counts.
const int flags_;
@@ -202,7 +203,7 @@
const TraceClockSource clock_source_;
// Size of buf_.
- const int buffer_size_;
+ const size_t buffer_size_;
// Time trace was created.
const uint64_t start_time_;
diff --git a/runtime/transaction.cc b/runtime/transaction.cc
index cc0f15f..ab821d7 100644
--- a/runtime/transaction.cc
+++ b/runtime/transaction.cc
@@ -70,13 +70,21 @@
}
}
-void Transaction::ThrowAbortError(Thread* self, bool rethrow) {
+void Transaction::ThrowAbortError(Thread* self, const std::string* abort_message) {
+ const bool rethrow = (abort_message == nullptr);
if (kIsDebugBuild && rethrow) {
CHECK(IsAborted()) << "Rethrow " << Transaction::kAbortExceptionDescriptor
<< " while transaction is not aborted";
}
- std::string abort_msg(GetAbortMessage());
- self->ThrowNewWrappedException(Transaction::kAbortExceptionSignature, abort_msg.c_str());
+ if (rethrow) {
+ // Rethrow an exception with the earlier abort message stored in the transaction.
+ self->ThrowNewWrappedException(Transaction::kAbortExceptionSignature,
+ GetAbortMessage().c_str());
+ } else {
+ // Throw an exception with the given abort message.
+ self->ThrowNewWrappedException(Transaction::kAbortExceptionSignature,
+ abort_message->c_str());
+ }
}
bool Transaction::IsAborted() {
diff --git a/runtime/transaction.h b/runtime/transaction.h
index 4d85662..030478c 100644
--- a/runtime/transaction.h
+++ b/runtime/transaction.h
@@ -48,7 +48,7 @@
void Abort(const std::string& abort_message)
LOCKS_EXCLUDED(log_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void ThrowAbortError(Thread* self, bool rethrow)
+ void ThrowAbortError(Thread* self, const std::string* abort_message)
LOCKS_EXCLUDED(log_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool IsAborted() LOCKS_EXCLUDED(log_lock_);
diff --git a/runtime/utf.cc b/runtime/utf.cc
index 3d13c3e..10600e2 100644
--- a/runtime/utf.cc
+++ b/runtime/utf.cc
@@ -107,15 +107,6 @@
}
}
-int32_t ComputeUtf16Hash(mirror::CharArray* chars, int32_t offset,
- size_t char_count) {
- uint32_t hash = 0;
- for (size_t i = 0; i < char_count; i++) {
- hash = hash * 31 + chars->Get(offset + i);
- }
- return static_cast<int32_t>(hash);
-}
-
int32_t ComputeUtf16Hash(const uint16_t* chars, size_t char_count) {
uint32_t hash = 0;
while (char_count--) {
diff --git a/runtime/utf.h b/runtime/utf.h
index dd38afa..7f05248 100644
--- a/runtime/utf.h
+++ b/runtime/utf.h
@@ -87,9 +87,9 @@
/*
* Retrieve the next UTF-16 character or surrogate pair from a UTF-8 string.
* single byte, 2-byte and 3-byte UTF-8 sequences result in a single UTF-16
- * character whereas 4-byte UTF-8 sequences result in a surrogate pair. Use
- * GetLeadingUtf16Char and GetTrailingUtf16Char to process the return value
- * of this function.
+ * character (possibly one half of a surrogate) whereas 4-byte UTF-8 sequences
+ * result in a surrogate pair. Use GetLeadingUtf16Char and GetTrailingUtf16Char
+ * to process the return value of this function.
*
* Advances "*utf8_data_in" to the start of the next character.
*
diff --git a/runtime/utils.cc b/runtime/utils.cc
index a303aa4..7986cdc 100644
--- a/runtime/utils.cc
+++ b/runtime/utils.cc
@@ -60,7 +60,7 @@
pid_t GetTid() {
#if defined(__APPLE__)
uint64_t owner;
- CHECK_PTHREAD_CALL(pthread_threadid_np, (NULL, &owner), __FUNCTION__); // Requires Mac OS 10.6
+ CHECK_PTHREAD_CALL(pthread_threadid_np, (nullptr, &owner), __FUNCTION__); // Requires Mac OS 10.6
return owner;
#elif defined(__BIONIC__)
return gettid();
@@ -205,7 +205,7 @@
}
std::string GetIsoDate() {
- time_t now = time(NULL);
+ time_t now = time(nullptr);
tm tmbuf;
tm* ptm = localtime_r(&now, &tmbuf);
return StringPrintf("%04d-%02d-%02d %02d:%02d:%02d",
@@ -220,7 +220,7 @@
return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000) + now.tv_nsec / UINT64_C(1000000);
#else // __APPLE__
timeval now;
- gettimeofday(&now, NULL);
+ gettimeofday(&now, nullptr);
return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000) + now.tv_usec / UINT64_C(1000);
#endif
}
@@ -232,7 +232,7 @@
return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000000) + now.tv_nsec / UINT64_C(1000);
#else // __APPLE__
timeval now;
- gettimeofday(&now, NULL);
+ gettimeofday(&now, nullptr);
return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000000) + now.tv_usec;
#endif
}
@@ -244,7 +244,7 @@
return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000000000) + now.tv_nsec;
#else // __APPLE__
timeval now;
- gettimeofday(&now, NULL);
+ gettimeofday(&now, nullptr);
return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000000000) + now.tv_usec * UINT64_C(1000);
#endif
}
@@ -262,9 +262,9 @@
void NanoSleep(uint64_t ns) {
timespec tm;
- tm.tv_sec = 0;
- tm.tv_nsec = ns;
- nanosleep(&tm, NULL);
+ tm.tv_sec = ns / MsToNs(1000);
+ tm.tv_nsec = ns - static_cast<uint64_t>(tm.tv_sec) * MsToNs(1000);
+ nanosleep(&tm, nullptr);
}
void InitTimeSpec(bool absolute, int clock, int64_t ms, int32_t ns, timespec* ts) {
@@ -276,7 +276,7 @@
#else
UNUSED(clock);
timeval tv;
- gettimeofday(&tv, NULL);
+ gettimeofday(&tv, nullptr);
ts->tv_sec = tv.tv_sec;
ts->tv_nsec = tv.tv_usec * 1000;
#endif
@@ -301,14 +301,14 @@
}
std::string PrettyDescriptor(mirror::String* java_descriptor) {
- if (java_descriptor == NULL) {
+ if (java_descriptor == nullptr) {
return "null";
}
return PrettyDescriptor(java_descriptor->ToModifiedUtf8().c_str());
}
std::string PrettyDescriptor(mirror::Class* klass) {
- if (klass == NULL) {
+ if (klass == nullptr) {
return "null";
}
std::string temp;
@@ -365,7 +365,7 @@
}
std::string PrettyField(ArtField* f, bool with_type) {
- if (f == NULL) {
+ if (f == nullptr) {
return "null";
}
std::string result;
@@ -436,7 +436,7 @@
std::string PrettyReturnType(const char* signature) {
const char* return_type = strchr(signature, ')');
- CHECK(return_type != NULL);
+ CHECK(return_type != nullptr);
++return_type; // Skip ')'.
return PrettyDescriptor(return_type);
}
@@ -484,10 +484,10 @@
}
std::string PrettyTypeOf(mirror::Object* obj) {
- if (obj == NULL) {
+ if (obj == nullptr) {
return "null";
}
- if (obj->GetClass() == NULL) {
+ if (obj->GetClass() == nullptr) {
return "(raw)";
}
std::string temp;
@@ -499,7 +499,7 @@
}
std::string PrettyClass(mirror::Class* c) {
- if (c == NULL) {
+ if (c == nullptr) {
return "null";
}
std::string result;
@@ -510,7 +510,7 @@
}
std::string PrettyClassAndClassLoader(mirror::Class* c) {
- if (c == NULL) {
+ if (c == nullptr) {
return "null";
}
std::string result;
@@ -827,14 +827,21 @@
*/
const uint32_t pair = GetUtf16FromUtf8(pUtf8Ptr);
-
const uint16_t leading = GetLeadingUtf16Char(pair);
- const uint32_t trailing = GetTrailingUtf16Char(pair);
- if (trailing == 0) {
- // Perform follow-up tests based on the high 8 bits of the
- // lower surrogate.
- switch (leading >> 8) {
+ // We have a surrogate pair resulting from a valid 4 byte UTF sequence.
+ // No further checks are necessary because 4 byte sequences span code
+ // points [U+10000, U+1FFFFF], which are valid codepoints in a dex
+ // identifier. Furthermore, GetUtf16FromUtf8 guarantees that each of
+ // the surrogate halves are valid and well formed in this instance.
+ if (GetTrailingUtf16Char(pair) != 0) {
+ return true;
+ }
+
+
+ // We've encountered a one, two or three byte UTF-8 sequence. The
+ // three byte UTF-8 sequence could be one half of a surrogate pair.
+ switch (leading >> 8) {
case 0x00:
// It's only valid if it's above the ISO-8859-1 high space (0xa0).
return (leading > 0x00a0);
@@ -842,9 +849,14 @@
case 0xd9:
case 0xda:
case 0xdb:
- // It looks like a leading surrogate but we didn't find a trailing
- // surrogate if we're here.
- return false;
+ {
+ // We found a three byte sequence encoding one half of a surrogate.
+ // Look for the other half.
+ const uint32_t pair2 = GetUtf16FromUtf8(pUtf8Ptr);
+ const uint16_t trailing = GetLeadingUtf16Char(pair2);
+
+ return (GetTrailingUtf16Char(pair2) == 0) && (0xdc00 <= trailing && trailing <= 0xdfff);
+ }
case 0xdc:
case 0xdd:
case 0xde:
@@ -855,21 +867,19 @@
case 0xff:
// It's in the range that has spaces, controls, and specials.
switch (leading & 0xfff8) {
- case 0x2000:
- case 0x2008:
- case 0x2028:
- case 0xfff0:
- case 0xfff8:
- return false;
+ case 0x2000:
+ case 0x2008:
+ case 0x2028:
+ case 0xfff0:
+ case 0xfff8:
+ return false;
}
- break;
- }
-
- return true;
+ return true;
+ default:
+ return true;
}
- // We have a surrogate pair. Check that trailing surrogate is well formed.
- return (trailing >= 0xdc00 && trailing <= 0xdfff);
+ UNREACHABLE();
}
/* Return whether the pointed-at modified-UTF-8 encoded character is
@@ -1158,9 +1168,9 @@
std::vector<std::string> fields;
Split(stats, ' ', &fields);
*state = fields[0][0];
- *utime = strtoull(fields[11].c_str(), NULL, 10);
- *stime = strtoull(fields[12].c_str(), NULL, 10);
- *task_cpu = strtoull(fields[36].c_str(), NULL, 10);
+ *utime = strtoull(fields[11].c_str(), nullptr, 10);
+ *stime = strtoull(fields[12].c_str(), nullptr, 10);
+ *task_cpu = strtoull(fields[36].c_str(), nullptr, 10);
}
std::string GetSchedulerGroupName(pid_t tid) {
@@ -1298,7 +1308,7 @@
if (!BacktraceMap::IsValid(it->map)) {
os << StringPrintf("%08" PRIxPTR " ???", it->pc);
} else {
- os << StringPrintf("%08" PRIxPTR " ", it->pc - it->map.start);
+ os << StringPrintf("%08" PRIxPTR " ", BacktraceMap::GetRelativePc(it->map, it->pc));
os << it->map.name;
os << " (";
if (!it->func_name.empty()) {
@@ -1358,7 +1368,7 @@
// into "futex_wait_queue_me+0xcd/0x110".
const char* text = kernel_stack_frames[i].c_str();
const char* close_bracket = strchr(text, ']');
- if (close_bracket != NULL) {
+ if (close_bracket != nullptr) {
text = close_bracket + 2;
}
os << prefix;
@@ -1373,7 +1383,7 @@
const char* GetAndroidRoot() {
const char* android_root = getenv("ANDROID_ROOT");
- if (android_root == NULL) {
+ if (android_root == nullptr) {
if (OS::DirectoryExists("/system")) {
android_root = "/system";
} else {
@@ -1401,7 +1411,7 @@
const char* GetAndroidDataSafe(std::string* error_msg) {
const char* android_data = getenv("ANDROID_DATA");
- if (android_data == NULL) {
+ if (android_data == nullptr) {
if (OS::DirectoryExists("/data")) {
android_data = "/data";
} else {
@@ -1563,7 +1573,7 @@
CHECK(arg_str != nullptr) << i;
args.push_back(arg_str);
}
- args.push_back(NULL);
+ args.push_back(nullptr);
// fork and exec
pid_t pid = fork();
diff --git a/runtime/utils.h b/runtime/utils.h
index 6708c67..71ccf85 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -300,6 +300,18 @@
return CTZ(x);
}
+// Return whether x / divisor == x * (1.0f / divisor), for every float x.
+static constexpr bool CanDivideByReciprocalMultiplyFloat(int32_t divisor) {
+ // True, if the most significant bits of divisor are 0.
+ return ((divisor & 0x7fffff) == 0);
+}
+
+// Return whether x / divisor == x * (1.0 / divisor), for every double x.
+static constexpr bool CanDivideByReciprocalMultiplyDouble(int64_t divisor) {
+ // True, if the most significant bits of divisor are 0.
+ return ((divisor & ((UINT64_C(1) << 52) - 1)) == 0);
+}
+
template<typename T>
static constexpr int POPCOUNT(T x) {
return (sizeof(T) == sizeof(uint32_t))
@@ -508,7 +520,7 @@
// Find $ANDROID_DATA, /data, or abort.
const char* GetAndroidData();
-// Find $ANDROID_DATA, /data, or return nullptr.
+// Find $ANDROID_DATA, /data, or return null.
const char* GetAndroidDataSafe(std::string* error_msg);
// Returns the dalvik-cache location, with subdir appended. Returns the empty string if the cache
@@ -592,6 +604,11 @@
return std::unique_ptr<T>(new T(std::forward<Args>(args)...));
}
+inline bool TestBitmap(size_t idx, const uint8_t* bitmap) {
+ return ((bitmap[idx / kBitsPerByte] >> (idx % kBitsPerByte)) & 0x01) != 0;
+}
+
+
} // namespace art
#endif // ART_RUNTIME_UTILS_H_
diff --git a/runtime/utils_test.cc b/runtime/utils_test.cc
index 6ccbd13..869d305 100644
--- a/runtime/utils_test.cc
+++ b/runtime/utils_test.cc
@@ -106,7 +106,7 @@
TEST_F(UtilsTest, PrettyTypeOf) {
ScopedObjectAccess soa(Thread::Current());
- EXPECT_EQ("null", PrettyTypeOf(NULL));
+ EXPECT_EQ("null", PrettyTypeOf(nullptr));
StackHandleScope<2> hs(soa.Self());
Handle<mirror::String> s(hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), "")));
@@ -116,7 +116,7 @@
EXPECT_EQ("short[]", PrettyTypeOf(a.Get()));
mirror::Class* c = class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/String;");
- ASSERT_TRUE(c != NULL);
+ ASSERT_TRUE(c != nullptr);
mirror::Object* o = mirror::ObjectArray<mirror::String>::Alloc(soa.Self(), c, 0);
EXPECT_EQ("java.lang.String[]", PrettyTypeOf(o));
EXPECT_EQ("java.lang.Class<java.lang.String[]>", PrettyTypeOf(o->GetClass()));
@@ -124,25 +124,25 @@
TEST_F(UtilsTest, PrettyClass) {
ScopedObjectAccess soa(Thread::Current());
- EXPECT_EQ("null", PrettyClass(NULL));
+ EXPECT_EQ("null", PrettyClass(nullptr));
mirror::Class* c = class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/String;");
- ASSERT_TRUE(c != NULL);
+ ASSERT_TRUE(c != nullptr);
mirror::Object* o = mirror::ObjectArray<mirror::String>::Alloc(soa.Self(), c, 0);
EXPECT_EQ("java.lang.Class<java.lang.String[]>", PrettyClass(o->GetClass()));
}
TEST_F(UtilsTest, PrettyClassAndClassLoader) {
ScopedObjectAccess soa(Thread::Current());
- EXPECT_EQ("null", PrettyClassAndClassLoader(NULL));
+ EXPECT_EQ("null", PrettyClassAndClassLoader(nullptr));
mirror::Class* c = class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/String;");
- ASSERT_TRUE(c != NULL);
+ ASSERT_TRUE(c != nullptr);
mirror::Object* o = mirror::ObjectArray<mirror::String>::Alloc(soa.Self(), c, 0);
EXPECT_EQ("java.lang.Class<java.lang.String[],null>", PrettyClassAndClassLoader(o->GetClass()));
}
TEST_F(UtilsTest, PrettyField) {
ScopedObjectAccess soa(Thread::Current());
- EXPECT_EQ("null", PrettyField(NULL));
+ EXPECT_EQ("null", PrettyField(nullptr));
mirror::Class* java_lang_String = class_linker_->FindSystemClass(soa.Self(),
"Ljava/lang/String;");
@@ -151,9 +151,6 @@
f = java_lang_String->FindDeclaredInstanceField("count", "I");
EXPECT_EQ("int java.lang.String.count", PrettyField(f));
EXPECT_EQ("java.lang.String.count", PrettyField(f, false));
- f = java_lang_String->FindDeclaredInstanceField("value", "[C");
- EXPECT_EQ("char[] java.lang.String.value", PrettyField(f));
- EXPECT_EQ("java.lang.String.value", PrettyField(f, false));
}
TEST_F(UtilsTest, PrettySize) {
@@ -216,21 +213,21 @@
TEST_F(UtilsTest, JniShortName_JniLongName) {
ScopedObjectAccess soa(Thread::Current());
mirror::Class* c = class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/String;");
- ASSERT_TRUE(c != NULL);
+ ASSERT_TRUE(c != nullptr);
mirror::ArtMethod* m;
m = c->FindVirtualMethod("charAt", "(I)C");
- ASSERT_TRUE(m != NULL);
+ ASSERT_TRUE(m != nullptr);
EXPECT_EQ("Java_java_lang_String_charAt", JniShortName(m));
EXPECT_EQ("Java_java_lang_String_charAt__I", JniLongName(m));
m = c->FindVirtualMethod("indexOf", "(Ljava/lang/String;I)I");
- ASSERT_TRUE(m != NULL);
+ ASSERT_TRUE(m != nullptr);
EXPECT_EQ("Java_java_lang_String_indexOf", JniShortName(m));
EXPECT_EQ("Java_java_lang_String_indexOf__Ljava_lang_String_2I", JniLongName(m));
m = c->FindDirectMethod("copyValueOf", "([CII)Ljava/lang/String;");
- ASSERT_TRUE(m != NULL);
+ ASSERT_TRUE(m != nullptr);
EXPECT_EQ("Java_java_lang_String_copyValueOf", JniShortName(m));
EXPECT_EQ("Java_java_lang_String_copyValueOf___3CII", JniLongName(m));
}
@@ -384,7 +381,8 @@
TEST_F(UtilsTest, ExecSuccess) {
std::vector<std::string> command;
if (kIsTargetBuild) {
- command.push_back("/system/bin/id");
+ std::string android_root(GetAndroidRoot());
+ command.push_back(android_root + "/bin/id");
} else {
command.push_back("/usr/bin/id");
}
@@ -517,4 +515,33 @@
EXPECT_FALSE(IsAbsoluteUint<32>(UINT_MAX_plus1));
}
+TEST_F(UtilsTest, TestSleep) {
+ auto start = NanoTime();
+ NanoSleep(MsToNs(1500));
+ EXPECT_GT(NanoTime() - start, MsToNs(1000));
+}
+
+TEST_F(UtilsTest, IsValidDescriptor) {
+ std::vector<uint8_t> descriptor(
+ { 'L', 'a', '/', 'b', '$', 0xed, 0xa0, 0x80, 0xed, 0xb0, 0x80, ';', 0x00 });
+ EXPECT_TRUE(IsValidDescriptor(reinterpret_cast<char*>(&descriptor[0])));
+
+ std::vector<uint8_t> unpaired_surrogate(
+ { 'L', 'a', '/', 'b', '$', 0xed, 0xa0, 0x80, ';', 0x00 });
+ EXPECT_FALSE(IsValidDescriptor(reinterpret_cast<char*>(&unpaired_surrogate[0])));
+
+ std::vector<uint8_t> unpaired_surrogate_at_end(
+ { 'L', 'a', '/', 'b', '$', 0xed, 0xa0, 0x80, 0x00 });
+ EXPECT_FALSE(IsValidDescriptor(reinterpret_cast<char*>(&unpaired_surrogate_at_end[0])));
+
+ std::vector<uint8_t> invalid_surrogate(
+ { 'L', 'a', '/', 'b', '$', 0xed, 0xb0, 0x80, ';', 0x00 });
+ EXPECT_FALSE(IsValidDescriptor(reinterpret_cast<char*>(&invalid_surrogate[0])));
+
+ std::vector<uint8_t> unpaired_surrogate_with_multibyte_sequence(
+ { 'L', 'a', '/', 'b', '$', 0xed, 0xb0, 0x80, 0xf0, 0x9f, 0x8f, 0xa0, ';', 0x00 });
+ EXPECT_FALSE(
+ IsValidDescriptor(reinterpret_cast<char*>(&unpaired_surrogate_with_multibyte_sequence[0])));
+}
+
} // namespace art
diff --git a/runtime/verifier/dex_gc_map.cc b/runtime/verifier/dex_gc_map.cc
index cd0b137..c435f9f 100644
--- a/runtime/verifier/dex_gc_map.cc
+++ b/runtime/verifier/dex_gc_map.cc
@@ -49,7 +49,7 @@
if (error_if_not_present) {
LOG(ERROR) << "Didn't find reference bit map for dex_pc " << dex_pc;
}
- return NULL;
+ return nullptr;
}
} // namespace verifier
diff --git a/runtime/verifier/dex_gc_map.h b/runtime/verifier/dex_gc_map.h
index d77ea65..03a7821 100644
--- a/runtime/verifier/dex_gc_map.h
+++ b/runtime/verifier/dex_gc_map.h
@@ -39,7 +39,7 @@
class DexPcToReferenceMap {
public:
explicit DexPcToReferenceMap(const uint8_t* data) : data_(data) {
- CHECK(data_ != NULL);
+ CHECK(data_ != nullptr);
}
// The total size of the reference bit map including header.
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 065df05..1b1bc54 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -286,6 +286,13 @@
}
}
+static bool IsLargeMethod(const DexFile::CodeItem* const code_item) {
+ uint16_t registers_size = code_item->registers_size_;
+ uint32_t insns_size = code_item->insns_size_in_code_units_;
+
+ return registers_size * insns_size > 4*1024*1024;
+}
+
MethodVerifier::FailureKind MethodVerifier::VerifyMethod(Thread* self, uint32_t method_idx,
const DexFile* dex_file,
Handle<mirror::DexCache> dex_cache,
@@ -329,7 +336,8 @@
uint64_t duration_ns = NanoTime() - start_ns;
if (duration_ns > MsToNs(100)) {
LOG(WARNING) << "Verification of " << PrettyMethod(method_idx, *dex_file)
- << " took " << PrettyDuration(duration_ns);
+ << " took " << PrettyDuration(duration_ns)
+ << (IsLargeMethod(code_item) ? " (large method)" : "");
}
}
return result;
@@ -516,6 +524,23 @@
return GetQuickInvokedMethod(inst, register_line, is_range, false);
}
+SafeMap<uint32_t, std::set<uint32_t>> MethodVerifier::FindStringInitMap(mirror::ArtMethod* m) {
+ Thread* self = Thread::Current();
+ StackHandleScope<3> hs(self);
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(m->GetDexCache()));
+ Handle<mirror::ClassLoader> class_loader(hs.NewHandle(m->GetClassLoader()));
+ Handle<mirror::ArtMethod> method(hs.NewHandle(m));
+ MethodVerifier verifier(self, m->GetDexFile(), dex_cache, class_loader, &m->GetClassDef(),
+ m->GetCodeItem(), m->GetDexMethodIndex(), method, m->GetAccessFlags(),
+ true, true, false, true);
+ return verifier.FindStringInitMap();
+}
+
+SafeMap<uint32_t, std::set<uint32_t>>& MethodVerifier::FindStringInitMap() {
+ Verify();
+ return GetStringInitPcRegMap();
+}
+
bool MethodVerifier::Verify() {
// If there aren't any instructions, make sure that's expected, then exit successfully.
if (code_item_ == nullptr) {
@@ -1194,10 +1219,6 @@
uint16_t registers_size = code_item_->registers_size_;
uint32_t insns_size = code_item_->insns_size_in_code_units_;
- if (registers_size * insns_size > 4*1024*1024) {
- LOG(WARNING) << "warning: method is huge (regs=" << registers_size
- << " insns_size=" << insns_size << ")";
- }
/* Create and initialize table holding register status */
reg_table_.Init(kTrackCompilerInterestPoints,
insn_flags_.get(),
@@ -2445,7 +2466,8 @@
* Replace the uninitialized reference with an initialized one. We need to do this for all
* registers that have the same object instance in them, not just the "this" register.
*/
- work_line_->MarkRefsAsInitialized(this, this_type);
+ const uint32_t this_reg = (is_range) ? inst->VRegC_3rc() : inst->VRegC_35c();
+ work_line_->MarkRefsAsInitialized(this, this_type, this_reg, work_insn_idx_);
}
if (return_type == nullptr) {
return_type = ®_types_.FromDescriptor(GetClassLoader(), return_type_descriptor,
diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h
index cd414c2..452d1dd 100644
--- a/runtime/verifier/method_verifier.h
+++ b/runtime/verifier/method_verifier.h
@@ -199,6 +199,9 @@
static mirror::ArtMethod* FindInvokedMethodAtDexPc(mirror::ArtMethod* m, uint32_t dex_pc)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static SafeMap<uint32_t, std::set<uint32_t>> FindStringInitMap(mirror::ArtMethod* m)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
static void Init() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static void Shutdown();
@@ -244,11 +247,11 @@
bool HasFailures() const;
const RegType& ResolveCheckedClass(uint32_t class_idx)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Returns the method of a quick invoke or nullptr if it cannot be found.
+ // Returns the method of a quick invoke or null if it cannot be found.
mirror::ArtMethod* GetQuickInvokedMethod(const Instruction* inst, RegisterLine* reg_line,
bool is_range, bool allow_failure)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Returns the access field of a quick field access (iget/iput-quick) or nullptr
+ // Returns the access field of a quick field access (iget/iput-quick) or null
// if it cannot be found.
ArtField* GetQuickFieldAccess(const Instruction* inst, RegisterLine* reg_line)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -263,6 +266,10 @@
return (method_access_flags_ & kAccStatic) != 0;
}
+ SafeMap<uint32_t, std::set<uint32_t>>& GetStringInitPcRegMap() {
+ return string_init_pc_reg_map_;
+ }
+
private:
// Private constructor for dumping.
MethodVerifier(Thread* self, const DexFile* dex_file, Handle<mirror::DexCache> dex_cache,
@@ -307,6 +314,9 @@
mirror::ArtMethod* FindInvokedMethodAtDexPc(uint32_t dex_pc)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SafeMap<uint32_t, std::set<uint32_t>>& FindStringInitMap()
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
/*
* Compute the width of the instruction at each address in the instruction stream, and store it in
* insn_flags_. Addresses that are in the middle of an instruction, or that are part of switch
@@ -585,7 +595,7 @@
* Widening conversions on integers and references are allowed, but
* narrowing conversions are not.
*
- * Returns the resolved method on success, nullptr on failure (with *failure
+ * Returns the resolved method on success, null on failure (with *failure
* set appropriately).
*/
mirror::ArtMethod* VerifyInvocationArgs(const Instruction* inst,
@@ -686,7 +696,7 @@
// The dex PC of a FindLocksAtDexPc request, -1 otherwise.
uint32_t interesting_dex_pc_;
// The container into which FindLocksAtDexPc should write the registers containing held locks,
- // nullptr if we're not doing FindLocksAtDexPc.
+ // null if we're not doing FindLocksAtDexPc.
std::vector<uint32_t>* monitor_enter_dex_pcs_;
// The types of any error that occurs.
@@ -743,6 +753,12 @@
MethodVerifier* link_;
friend class art::Thread;
+
+ // Map of dex pcs of invocations of java.lang.String.<init> to the set of other registers that
+ // contain the uninitialized this pointer to that invoke. Will contain no entry if there are
+ // no other registers.
+ SafeMap<uint32_t, std::set<uint32_t>> string_init_pc_reg_map_;
+
DISALLOW_COPY_AND_ASSIGN(MethodVerifier);
};
std::ostream& operator<<(std::ostream& os, const MethodVerifier::FailureKind& rhs);
diff --git a/runtime/verifier/method_verifier_test.cc b/runtime/verifier/method_verifier_test.cc
index f67adc1..3994536 100644
--- a/runtime/verifier/method_verifier_test.cc
+++ b/runtime/verifier/method_verifier_test.cc
@@ -31,7 +31,7 @@
protected:
void VerifyClass(const std::string& descriptor)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- ASSERT_TRUE(descriptor != NULL);
+ ASSERT_TRUE(descriptor != nullptr);
Thread* self = Thread::Current();
mirror::Class* klass = class_linker_->FindSystemClass(self, descriptor.c_str());
diff --git a/runtime/verifier/reg_type.h b/runtime/verifier/reg_type.h
index e4d2c3e..d08c937 100644
--- a/runtime/verifier/reg_type.h
+++ b/runtime/verifier/reg_type.h
@@ -707,7 +707,7 @@
UnresolvedUninitializedRefType(const std::string& descriptor,
uint32_t allocation_pc, uint16_t cache_id)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- : UninitializedType(NULL, descriptor, allocation_pc, cache_id) {
+ : UninitializedType(nullptr, descriptor, allocation_pc, cache_id) {
if (kIsDebugBuild) {
CheckInvariants();
}
@@ -752,7 +752,7 @@
UnresolvedUninitializedThisRefType(const std::string& descriptor,
uint16_t cache_id)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- : UninitializedType(NULL, descriptor, 0, cache_id) {
+ : UninitializedType(nullptr, descriptor, 0, cache_id) {
if (kIsDebugBuild) {
CheckInvariants();
}
@@ -808,7 +808,7 @@
public:
UnresolvedType(const std::string& descriptor, uint16_t cache_id)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- : RegType(NULL, descriptor, cache_id) {}
+ : RegType(nullptr, descriptor, cache_id) {}
bool IsNonZeroReferenceTypes() const OVERRIDE;
};
diff --git a/runtime/verifier/reg_type_cache-inl.h b/runtime/verifier/reg_type_cache-inl.h
index 9024a7d..b6f253b 100644
--- a/runtime/verifier/reg_type_cache-inl.h
+++ b/runtime/verifier/reg_type_cache-inl.h
@@ -30,7 +30,7 @@
inline const art::verifier::RegType& RegTypeCache::GetFromId(uint16_t id) const {
DCHECK_LT(id, entries_.size());
const RegType* result = entries_[id];
- DCHECK(result != NULL);
+ DCHECK(result != nullptr);
return *result;
}
diff --git a/runtime/verifier/register_line.cc b/runtime/verifier/register_line.cc
index ed588fc..2838681 100644
--- a/runtime/verifier/register_line.cc
+++ b/runtime/verifier/register_line.cc
@@ -127,14 +127,25 @@
return true;
}
-void RegisterLine::MarkRefsAsInitialized(MethodVerifier* verifier, const RegType& uninit_type) {
+void RegisterLine::MarkRefsAsInitialized(MethodVerifier* verifier, const RegType& uninit_type,
+ uint32_t this_reg, uint32_t dex_pc) {
DCHECK(uninit_type.IsUninitializedTypes());
+ bool is_string = !uninit_type.IsUnresolvedTypes() && uninit_type.GetClass()->IsStringClass();
const RegType& init_type = verifier->GetRegTypeCache()->FromUninitialized(uninit_type);
size_t changed = 0;
for (uint32_t i = 0; i < num_regs_; i++) {
if (GetRegisterType(verifier, i).Equals(uninit_type)) {
line_[i] = init_type.GetId();
changed++;
+ if (is_string && i != this_reg) {
+ auto it = verifier->GetStringInitPcRegMap().find(dex_pc);
+ if (it != verifier->GetStringInitPcRegMap().end()) {
+ it->second.insert(i);
+ } else {
+ std::set<uint32_t> reg_set = { i };
+ verifier->GetStringInitPcRegMap().Put(dex_pc, reg_set);
+ }
+ }
}
}
DCHECK_GT(changed, 0u);
diff --git a/runtime/verifier/register_line.h b/runtime/verifier/register_line.h
index 376dbf1..0de0d9c 100644
--- a/runtime/verifier/register_line.h
+++ b/runtime/verifier/register_line.h
@@ -138,7 +138,8 @@
* reference type. This is called when an appropriate constructor is invoked -- all copies of
* the reference must be marked as initialized.
*/
- void MarkRefsAsInitialized(MethodVerifier* verifier, const RegType& uninit_type)
+ void MarkRefsAsInitialized(MethodVerifier* verifier, const RegType& uninit_type,
+ uint32_t this_reg, uint32_t dex_pc)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
/*
diff --git a/runtime/well_known_classes.cc b/runtime/well_known_classes.cc
index d389244..2843806 100644
--- a/runtime/well_known_classes.cc
+++ b/runtime/well_known_classes.cc
@@ -39,6 +39,7 @@
jclass WellKnownClasses::java_lang_Daemons;
jclass WellKnownClasses::java_lang_Error;
jclass WellKnownClasses::java_lang_Object;
+jclass WellKnownClasses::java_lang_OutOfMemoryError;
jclass WellKnownClasses::java_lang_reflect_AbstractMethod;
jclass WellKnownClasses::java_lang_reflect_ArtMethod;
jclass WellKnownClasses::java_lang_reflect_Constructor;
@@ -48,6 +49,7 @@
jclass WellKnownClasses::java_lang_RuntimeException;
jclass WellKnownClasses::java_lang_StackOverflowError;
jclass WellKnownClasses::java_lang_String;
+jclass WellKnownClasses::java_lang_StringFactory;
jclass WellKnownClasses::java_lang_System;
jclass WellKnownClasses::java_lang_Thread;
jclass WellKnownClasses::java_lang_Thread__UncaughtExceptionHandler;
@@ -78,7 +80,39 @@
jmethodID WellKnownClasses::java_lang_reflect_Proxy_invoke;
jmethodID WellKnownClasses::java_lang_Runtime_nativeLoad;
jmethodID WellKnownClasses::java_lang_Short_valueOf;
-jmethodID WellKnownClasses::java_lang_System_runFinalization = NULL;
+jmethodID WellKnownClasses::java_lang_String_init;
+jmethodID WellKnownClasses::java_lang_String_init_B;
+jmethodID WellKnownClasses::java_lang_String_init_BI;
+jmethodID WellKnownClasses::java_lang_String_init_BII;
+jmethodID WellKnownClasses::java_lang_String_init_BIII;
+jmethodID WellKnownClasses::java_lang_String_init_BIIString;
+jmethodID WellKnownClasses::java_lang_String_init_BString;
+jmethodID WellKnownClasses::java_lang_String_init_BIICharset;
+jmethodID WellKnownClasses::java_lang_String_init_BCharset;
+jmethodID WellKnownClasses::java_lang_String_init_C;
+jmethodID WellKnownClasses::java_lang_String_init_CII;
+jmethodID WellKnownClasses::java_lang_String_init_IIC;
+jmethodID WellKnownClasses::java_lang_String_init_String;
+jmethodID WellKnownClasses::java_lang_String_init_StringBuffer;
+jmethodID WellKnownClasses::java_lang_String_init_III;
+jmethodID WellKnownClasses::java_lang_String_init_StringBuilder;
+jmethodID WellKnownClasses::java_lang_StringFactory_newEmptyString;
+jmethodID WellKnownClasses::java_lang_StringFactory_newStringFromBytes_B;
+jmethodID WellKnownClasses::java_lang_StringFactory_newStringFromBytes_BI;
+jmethodID WellKnownClasses::java_lang_StringFactory_newStringFromBytes_BII;
+jmethodID WellKnownClasses::java_lang_StringFactory_newStringFromBytes_BIII;
+jmethodID WellKnownClasses::java_lang_StringFactory_newStringFromBytes_BIIString;
+jmethodID WellKnownClasses::java_lang_StringFactory_newStringFromBytes_BString;
+jmethodID WellKnownClasses::java_lang_StringFactory_newStringFromBytes_BIICharset;
+jmethodID WellKnownClasses::java_lang_StringFactory_newStringFromBytes_BCharset;
+jmethodID WellKnownClasses::java_lang_StringFactory_newStringFromChars_C;
+jmethodID WellKnownClasses::java_lang_StringFactory_newStringFromChars_CII;
+jmethodID WellKnownClasses::java_lang_StringFactory_newStringFromChars_IIC;
+jmethodID WellKnownClasses::java_lang_StringFactory_newStringFromString;
+jmethodID WellKnownClasses::java_lang_StringFactory_newStringFromStringBuffer;
+jmethodID WellKnownClasses::java_lang_StringFactory_newStringFromCodePoints;
+jmethodID WellKnownClasses::java_lang_StringFactory_newStringFromStringBuilder;
+jmethodID WellKnownClasses::java_lang_System_runFinalization = nullptr;
jmethodID WellKnownClasses::java_lang_Thread_init;
jmethodID WellKnownClasses::java_lang_Thread_run;
jmethodID WellKnownClasses::java_lang_Thread__UncaughtExceptionHandler_uncaughtException;
@@ -123,7 +157,7 @@
static jclass CacheClass(JNIEnv* env, const char* jni_class_name) {
ScopedLocalRef<jclass> c(env, env->FindClass(jni_class_name));
- if (c.get() == NULL) {
+ if (c.get() == nullptr) {
LOG(FATAL) << "Couldn't find class: " << jni_class_name;
}
return reinterpret_cast<jclass>(env->NewGlobalRef(c.get()));
@@ -134,7 +168,7 @@
jfieldID fid = (is_static ?
env->GetStaticFieldID(c, name, signature) :
env->GetFieldID(c, name, signature));
- if (fid == NULL) {
+ if (fid == nullptr) {
ScopedObjectAccess soa(env);
std::ostringstream os;
WellKnownClasses::ToClass(c)->DumpClass(os, mirror::Class::kDumpClassFullDetail);
@@ -149,7 +183,7 @@
jmethodID mid = (is_static ?
env->GetStaticMethodID(c, name, signature) :
env->GetMethodID(c, name, signature));
- if (mid == NULL) {
+ if (mid == nullptr) {
ScopedObjectAccess soa(env);
std::ostringstream os;
WellKnownClasses::ToClass(c)->DumpClass(os, mirror::Class::kDumpClassFullDetail);
@@ -176,6 +210,7 @@
java_lang_ClassNotFoundException = CacheClass(env, "java/lang/ClassNotFoundException");
java_lang_Daemons = CacheClass(env, "java/lang/Daemons");
java_lang_Object = CacheClass(env, "java/lang/Object");
+ java_lang_OutOfMemoryError = CacheClass(env, "java/lang/OutOfMemoryError");
java_lang_Error = CacheClass(env, "java/lang/Error");
java_lang_reflect_AbstractMethod = CacheClass(env, "java/lang/reflect/AbstractMethod");
java_lang_reflect_ArtMethod = CacheClass(env, "java/lang/reflect/ArtMethod");
@@ -186,6 +221,7 @@
java_lang_RuntimeException = CacheClass(env, "java/lang/RuntimeException");
java_lang_StackOverflowError = CacheClass(env, "java/lang/StackOverflowError");
java_lang_String = CacheClass(env, "java/lang/String");
+ java_lang_StringFactory = CacheClass(env, "java/lang/StringFactory");
java_lang_System = CacheClass(env, "java/lang/System");
java_lang_Thread = CacheClass(env, "java/lang/Thread");
java_lang_Thread__UncaughtExceptionHandler = CacheClass(env,
@@ -212,7 +248,7 @@
ScopedLocalRef<jclass> java_lang_ref_ReferenceQueue(env, env->FindClass("java/lang/ref/ReferenceQueue"));
java_lang_ref_ReferenceQueue_add = CacheMethod(env, java_lang_ref_ReferenceQueue.get(), true, "add", "(Ljava/lang/ref/Reference;)V");
- java_lang_reflect_Proxy_invoke = CacheMethod(env, java_lang_reflect_Proxy, true, "invoke", "(Ljava/lang/reflect/Proxy;Ljava/lang/reflect/ArtMethod;[Ljava/lang/Object;)Ljava/lang/Object;");
+ java_lang_reflect_Proxy_invoke = CacheMethod(env, java_lang_reflect_Proxy, true, "invoke", "(Ljava/lang/reflect/Proxy;Ljava/lang/reflect/Method;[Ljava/lang/Object;)Ljava/lang/Object;");
java_lang_Thread_init = CacheMethod(env, java_lang_Thread, false, "<init>", "(Ljava/lang/ThreadGroup;Ljava/lang/String;IZ)V");
java_lang_Thread_run = CacheMethod(env, java_lang_Thread, false, "run", "()V");
java_lang_Thread__UncaughtExceptionHandler_uncaughtException = CacheMethod(env, java_lang_Thread__UncaughtExceptionHandler, false, "uncaughtException", "(Ljava/lang/Thread;Ljava/lang/Throwable;)V");
@@ -221,6 +257,62 @@
org_apache_harmony_dalvik_ddmc_DdmServer_broadcast = CacheMethod(env, org_apache_harmony_dalvik_ddmc_DdmServer, true, "broadcast", "(I)V");
org_apache_harmony_dalvik_ddmc_DdmServer_dispatch = CacheMethod(env, org_apache_harmony_dalvik_ddmc_DdmServer, true, "dispatch", "(I[BII)Lorg/apache/harmony/dalvik/ddmc/Chunk;");
+ java_lang_String_init = CacheMethod(env, java_lang_String, false, "<init>", "()V");
+ java_lang_String_init_B = CacheMethod(env, java_lang_String, false, "<init>", "([B)V");
+ java_lang_String_init_BI = CacheMethod(env, java_lang_String, false, "<init>", "([BI)V");
+ java_lang_String_init_BII = CacheMethod(env, java_lang_String, false, "<init>", "([BII)V");
+ java_lang_String_init_BIII = CacheMethod(env, java_lang_String, false, "<init>", "([BIII)V");
+ java_lang_String_init_BIIString = CacheMethod(env, java_lang_String, false, "<init>",
+ "([BIILjava/lang/String;)V");
+ java_lang_String_init_BString = CacheMethod(env, java_lang_String, false, "<init>",
+ "([BLjava/lang/String;)V");
+ java_lang_String_init_BIICharset = CacheMethod(env, java_lang_String, false, "<init>",
+ "([BIILjava/nio/charset/Charset;)V");
+ java_lang_String_init_BCharset = CacheMethod(env, java_lang_String, false, "<init>",
+ "([BLjava/nio/charset/Charset;)V");
+ java_lang_String_init_C = CacheMethod(env, java_lang_String, false, "<init>", "([C)V");
+ java_lang_String_init_CII = CacheMethod(env, java_lang_String, false, "<init>", "([CII)V");
+ java_lang_String_init_IIC = CacheMethod(env, java_lang_String, false, "<init>", "(II[C)V");
+ java_lang_String_init_String = CacheMethod(env, java_lang_String, false, "<init>",
+ "(Ljava/lang/String;)V");
+ java_lang_String_init_StringBuffer = CacheMethod(env, java_lang_String, false, "<init>",
+ "(Ljava/lang/StringBuffer;)V");
+ java_lang_String_init_III = CacheMethod(env, java_lang_String, false, "<init>", "([III)V");
+ java_lang_String_init_StringBuilder = CacheMethod(env, java_lang_String, false, "<init>",
+ "(Ljava/lang/StringBuilder;)V");
+ java_lang_StringFactory_newEmptyString = CacheMethod(env, java_lang_StringFactory, true,
+ "newEmptyString", "()Ljava/lang/String;");
+ java_lang_StringFactory_newStringFromBytes_B = CacheMethod(env, java_lang_StringFactory, true,
+ "newStringFromBytes", "([B)Ljava/lang/String;");
+ java_lang_StringFactory_newStringFromBytes_BI = CacheMethod(env, java_lang_StringFactory, true,
+ "newStringFromBytes", "([BI)Ljava/lang/String;");
+ java_lang_StringFactory_newStringFromBytes_BII = CacheMethod(env, java_lang_StringFactory, true,
+ "newStringFromBytes", "([BII)Ljava/lang/String;");
+ java_lang_StringFactory_newStringFromBytes_BIII = CacheMethod(env, java_lang_StringFactory, true,
+ "newStringFromBytes", "([BIII)Ljava/lang/String;");
+ java_lang_StringFactory_newStringFromBytes_BIIString = CacheMethod(env, java_lang_StringFactory,
+ true, "newStringFromBytes", "([BIILjava/lang/String;)Ljava/lang/String;");
+ java_lang_StringFactory_newStringFromBytes_BString = CacheMethod(env, java_lang_StringFactory,
+ true, "newStringFromBytes", "([BLjava/lang/String;)Ljava/lang/String;");
+ java_lang_StringFactory_newStringFromBytes_BIICharset = CacheMethod(env, java_lang_StringFactory,
+ true, "newStringFromBytes", "([BIILjava/nio/charset/Charset;)Ljava/lang/String;");
+ java_lang_StringFactory_newStringFromBytes_BCharset = CacheMethod(env, java_lang_StringFactory,
+ true, "newStringFromBytes", "([BLjava/nio/charset/Charset;)Ljava/lang/String;");
+ java_lang_StringFactory_newStringFromChars_C = CacheMethod(env, java_lang_StringFactory, true,
+ "newStringFromChars", "([C)Ljava/lang/String;");
+ java_lang_StringFactory_newStringFromChars_CII = CacheMethod(env, java_lang_StringFactory, true,
+ "newStringFromChars", "([CII)Ljava/lang/String;");
+ java_lang_StringFactory_newStringFromChars_IIC = CacheMethod(env, java_lang_StringFactory, true,
+ "newStringFromChars", "(II[C)Ljava/lang/String;");
+ java_lang_StringFactory_newStringFromString = CacheMethod(env, java_lang_StringFactory, true,
+ "newStringFromString", "(Ljava/lang/String;)Ljava/lang/String;");
+ java_lang_StringFactory_newStringFromStringBuffer = CacheMethod(env, java_lang_StringFactory,
+ true, "newStringFromStringBuffer", "(Ljava/lang/StringBuffer;)Ljava/lang/String;");
+ java_lang_StringFactory_newStringFromCodePoints = CacheMethod(env, java_lang_StringFactory,
+ true, "newStringFromCodePoints", "([III)Ljava/lang/String;");
+ java_lang_StringFactory_newStringFromStringBuilder = CacheMethod(env, java_lang_StringFactory,
+ true, "newStringFromStringBuilder", "(Ljava/lang/StringBuilder;)Ljava/lang/String;");
+
dalvik_system_DexFile_cookie = CacheField(env, dalvik_system_DexFile, false, "mCookie", "Ljava/lang/Object;");
dalvik_system_PathClassLoader_pathList = CacheField(env, dalvik_system_PathClassLoader, false, "pathList", "Ldalvik/system/DexPathList;");
dalvik_system_DexPathList_dexElements = CacheField(env, dalvik_system_DexPathList, false, "dexElements", "[Ldalvik/system/DexPathList$Element;");
@@ -263,6 +355,8 @@
java_lang_Integer_valueOf = CachePrimitiveBoxingMethod(env, 'I', "java/lang/Integer");
java_lang_Long_valueOf = CachePrimitiveBoxingMethod(env, 'J', "java/lang/Long");
java_lang_Short_valueOf = CachePrimitiveBoxingMethod(env, 'S', "java/lang/Short");
+
+ Thread::Current()->InitStringEntryPoints();
}
void WellKnownClasses::LateInit(JNIEnv* env) {
@@ -274,4 +368,43 @@
return reinterpret_cast<mirror::Class*>(Thread::Current()->DecodeJObject(global_jclass));
}
+jmethodID WellKnownClasses::StringInitToStringFactoryMethodID(jmethodID string_init) {
+ // TODO: Prioritize ordering.
+ if (string_init == java_lang_String_init) {
+ return java_lang_StringFactory_newEmptyString;
+ } else if (string_init == java_lang_String_init_B) {
+ return java_lang_StringFactory_newStringFromBytes_B;
+ } else if (string_init == java_lang_String_init_BI) {
+ return java_lang_StringFactory_newStringFromBytes_BI;
+ } else if (string_init == java_lang_String_init_BII) {
+ return java_lang_StringFactory_newStringFromBytes_BII;
+ } else if (string_init == java_lang_String_init_BIII) {
+ return java_lang_StringFactory_newStringFromBytes_BIII;
+ } else if (string_init == java_lang_String_init_BIIString) {
+ return java_lang_StringFactory_newStringFromBytes_BIIString;
+ } else if (string_init == java_lang_String_init_BString) {
+ return java_lang_StringFactory_newStringFromBytes_BString;
+ } else if (string_init == java_lang_String_init_BIICharset) {
+ return java_lang_StringFactory_newStringFromBytes_BIICharset;
+ } else if (string_init == java_lang_String_init_BCharset) {
+ return java_lang_StringFactory_newStringFromBytes_BCharset;
+ } else if (string_init == java_lang_String_init_C) {
+ return java_lang_StringFactory_newStringFromChars_C;
+ } else if (string_init == java_lang_String_init_CII) {
+ return java_lang_StringFactory_newStringFromChars_CII;
+ } else if (string_init == java_lang_String_init_IIC) {
+ return java_lang_StringFactory_newStringFromChars_IIC;
+ } else if (string_init == java_lang_String_init_String) {
+ return java_lang_StringFactory_newStringFromString;
+ } else if (string_init == java_lang_String_init_StringBuffer) {
+ return java_lang_StringFactory_newStringFromStringBuffer;
+ } else if (string_init == java_lang_String_init_III) {
+ return java_lang_StringFactory_newStringFromCodePoints;
+ } else if (string_init == java_lang_String_init_StringBuilder) {
+ return java_lang_StringFactory_newStringFromStringBuilder;
+ }
+ LOG(FATAL) << "Could not find StringFactory method for String.<init>";
+ return nullptr;
+}
+
} // namespace art
diff --git a/runtime/well_known_classes.h b/runtime/well_known_classes.h
index 2df1c0e..acb2656 100644
--- a/runtime/well_known_classes.h
+++ b/runtime/well_known_classes.h
@@ -35,6 +35,7 @@
public:
static void Init(JNIEnv* env); // Run before native methods are registered.
static void LateInit(JNIEnv* env); // Run after native methods are registered.
+ static jmethodID StringInitToStringFactoryMethodID(jmethodID string_init);
static mirror::Class* ToClass(jclass global_jclass)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -50,6 +51,7 @@
static jclass java_lang_Daemons;
static jclass java_lang_Error;
static jclass java_lang_Object;
+ static jclass java_lang_OutOfMemoryError;
static jclass java_lang_reflect_AbstractMethod;
static jclass java_lang_reflect_ArtMethod;
static jclass java_lang_reflect_Constructor;
@@ -59,6 +61,7 @@
static jclass java_lang_RuntimeException;
static jclass java_lang_StackOverflowError;
static jclass java_lang_String;
+ static jclass java_lang_StringFactory;
static jclass java_lang_System;
static jclass java_lang_Thread;
static jclass java_lang_ThreadGroup;
@@ -89,6 +92,38 @@
static jmethodID java_lang_reflect_Proxy_invoke;
static jmethodID java_lang_Runtime_nativeLoad;
static jmethodID java_lang_Short_valueOf;
+ static jmethodID java_lang_String_init;
+ static jmethodID java_lang_String_init_B;
+ static jmethodID java_lang_String_init_BI;
+ static jmethodID java_lang_String_init_BII;
+ static jmethodID java_lang_String_init_BIII;
+ static jmethodID java_lang_String_init_BIIString;
+ static jmethodID java_lang_String_init_BString;
+ static jmethodID java_lang_String_init_BIICharset;
+ static jmethodID java_lang_String_init_BCharset;
+ static jmethodID java_lang_String_init_C;
+ static jmethodID java_lang_String_init_CII;
+ static jmethodID java_lang_String_init_IIC;
+ static jmethodID java_lang_String_init_String;
+ static jmethodID java_lang_String_init_StringBuffer;
+ static jmethodID java_lang_String_init_III;
+ static jmethodID java_lang_String_init_StringBuilder;
+ static jmethodID java_lang_StringFactory_newEmptyString;
+ static jmethodID java_lang_StringFactory_newStringFromBytes_B;
+ static jmethodID java_lang_StringFactory_newStringFromBytes_BI;
+ static jmethodID java_lang_StringFactory_newStringFromBytes_BII;
+ static jmethodID java_lang_StringFactory_newStringFromBytes_BIII;
+ static jmethodID java_lang_StringFactory_newStringFromBytes_BIIString;
+ static jmethodID java_lang_StringFactory_newStringFromBytes_BString;
+ static jmethodID java_lang_StringFactory_newStringFromBytes_BIICharset;
+ static jmethodID java_lang_StringFactory_newStringFromBytes_BCharset;
+ static jmethodID java_lang_StringFactory_newStringFromChars_C;
+ static jmethodID java_lang_StringFactory_newStringFromChars_CII;
+ static jmethodID java_lang_StringFactory_newStringFromChars_IIC;
+ static jmethodID java_lang_StringFactory_newStringFromString;
+ static jmethodID java_lang_StringFactory_newStringFromStringBuffer;
+ static jmethodID java_lang_StringFactory_newStringFromCodePoints;
+ static jmethodID java_lang_StringFactory_newStringFromStringBuilder;
static jmethodID java_lang_System_runFinalization;
static jmethodID java_lang_Thread_init;
static jmethodID java_lang_Thread_run;
diff --git a/runtime/zip_archive.cc b/runtime/zip_archive.cc
index ffab674..88c1f69 100644
--- a/runtime/zip_archive.cc
+++ b/runtime/zip_archive.cc
@@ -56,7 +56,7 @@
name += " extracted in memory from ";
name += zip_filename;
std::unique_ptr<MemMap> map(MemMap::MapAnonymous(name.c_str(),
- NULL, GetUncompressedLength(),
+ nullptr, GetUncompressedLength(),
PROT_READ | PROT_WRITE, false, false,
error_msg));
if (map.get() == nullptr) {
diff --git a/runtime/zip_archive.h b/runtime/zip_archive.h
index 865af51..717eb8c 100644
--- a/runtime/zip_archive.h
+++ b/runtime/zip_archive.h
@@ -57,7 +57,7 @@
class ZipArchive {
public:
- // return new ZipArchive instance on success, NULL on error.
+ // return new ZipArchive instance on success, null on error.
static ZipArchive* Open(const char* filename, std::string* error_msg);
static ZipArchive* OpenFromFd(int fd, const char* filename, std::string* error_msg);
diff --git a/runtime/zip_archive_test.cc b/runtime/zip_archive_test.cc
index 70a4dda..aded30c 100644
--- a/runtime/zip_archive_test.cc
+++ b/runtime/zip_archive_test.cc
@@ -42,11 +42,11 @@
ScratchFile tmp;
ASSERT_NE(-1, tmp.GetFd());
std::unique_ptr<File> file(new File(tmp.GetFd(), tmp.GetFilename(), false));
- ASSERT_TRUE(file.get() != NULL);
+ ASSERT_TRUE(file.get() != nullptr);
bool success = zip_entry->ExtractToFile(*file, &error_msg);
ASSERT_TRUE(success) << error_msg;
ASSERT_TRUE(error_msg.empty());
- file.reset(NULL);
+ file.reset(nullptr);
uint32_t computed_crc = crc32(0L, Z_NULL, 0);
int fd = open(tmp.GetFilename().c_str(), O_RDONLY);
diff --git a/sigchainlib/sigchain.cc b/sigchainlib/sigchain.cc
index e61fcd8..0359ed3 100644
--- a/sigchainlib/sigchain.cc
+++ b/sigchainlib/sigchain.cc
@@ -51,7 +51,7 @@
// Unclaim the signal and restore the old action.
void Unclaim(int signal) {
claimed_ = false;
- sigaction(signal, &action_, NULL); // Restore old action.
+ sigaction(signal, &action_, nullptr); // Restore old action.
}
// Get the action associated with this signal.
@@ -133,14 +133,14 @@
const struct sigaction& action = user_sigactions[sig].GetAction();
if (user_sigactions[sig].OldStyle()) {
- if (action.sa_handler != NULL) {
+ if (action.sa_handler != nullptr) {
action.sa_handler(sig);
} else {
signal(sig, SIG_DFL);
raise(sig);
}
} else {
- if (action.sa_sigaction != NULL) {
+ if (action.sa_sigaction != nullptr) {
action.sa_sigaction(sig, info, context);
} else {
signal(sig, SIG_DFL);
@@ -172,10 +172,10 @@
if (signal > 0 && signal < _NSIG && user_sigactions[signal].IsClaimed() &&
(new_action == nullptr || new_action->sa_handler != SIG_DFL)) {
struct sigaction saved_action = user_sigactions[signal].GetAction();
- if (new_action != NULL) {
+ if (new_action != nullptr) {
user_sigactions[signal].SetAction(*new_action, false);
}
- if (old_action != NULL) {
+ if (old_action != nullptr) {
*old_action = saved_action;
}
return 0;
@@ -242,7 +242,7 @@
extern "C" int sigprocmask(int how, const sigset_t* bionic_new_set, sigset_t* bionic_old_set) {
const sigset_t* new_set_ptr = bionic_new_set;
sigset_t tmpset;
- if (bionic_new_set != NULL) {
+ if (bionic_new_set != nullptr) {
tmpset = *bionic_new_set;
if (how == SIG_BLOCK) {
diff --git a/test/004-JniTest/jni_test.cc b/test/004-JniTest/jni_test.cc
index 544cbc5..1ec0cf2 100644
--- a/test/004-JniTest/jni_test.cc
+++ b/test/004-JniTest/jni_test.cc
@@ -25,7 +25,7 @@
#error test code compiled without NDEBUG
#endif
-static JavaVM* jvm = NULL;
+static JavaVM* jvm = nullptr;
extern "C" JNIEXPORT jint JNI_OnLoad(JavaVM *vm, void *) {
assert(vm != nullptr);
@@ -38,7 +38,7 @@
assert(jvm != nullptr);
JNIEnv* env = nullptr;
- JavaVMAttachArgs args = { JNI_VERSION_1_6, __FUNCTION__, NULL };
+ JavaVMAttachArgs args = { JNI_VERSION_1_6, __FUNCTION__, nullptr };
int attach_result = jvm->AttachCurrentThread(&env, &args);
assert(attach_result == 0);
@@ -548,3 +548,60 @@
extern "C" void JNICALL Java_Main_testCallNonvirtual(JNIEnv* env, jclass) {
JniCallNonvirtualVoidMethodTest(env).Test();
}
+
+extern "C" JNIEXPORT void JNICALL Java_Main_testNewStringObject(JNIEnv* env, jclass) {
+ jclass c = env->FindClass("java/lang/String");
+ assert(c != nullptr);
+
+ jmethodID mid1 = env->GetMethodID(c, "<init>", "()V");
+ assert(mid1 != nullptr);
+ assert(!env->ExceptionCheck());
+ jmethodID mid2 = env->GetMethodID(c, "<init>", "([B)V");
+ assert(mid2 != nullptr);
+ assert(!env->ExceptionCheck());
+ jmethodID mid3 = env->GetMethodID(c, "<init>", "([C)V");
+ assert(mid3 != nullptr);
+ assert(!env->ExceptionCheck());
+ jmethodID mid4 = env->GetMethodID(c, "<init>", "(Ljava/lang/String;)V");
+ assert(mid4 != nullptr);
+ assert(!env->ExceptionCheck());
+
+ const char* test_array = "Test";
+ int byte_array_length = strlen(test_array);
+ jbyteArray byte_array = env->NewByteArray(byte_array_length);
+ env->SetByteArrayRegion(byte_array, 0, byte_array_length, reinterpret_cast<const jbyte*>(test_array));
+
+ // Test NewObject
+ jstring s = reinterpret_cast<jstring>(env->NewObject(c, mid2, byte_array));
+ assert(s != nullptr);
+ assert(env->GetStringLength(s) == byte_array_length);
+ assert(env->GetStringUTFLength(s) == byte_array_length);
+ const char* chars = env->GetStringUTFChars(s, nullptr);
+ assert(strcmp(test_array, chars) == 0);
+ env->ReleaseStringUTFChars(s, chars);
+
+ // Test AllocObject and Call(Nonvirtual)VoidMethod
+ jstring s1 = reinterpret_cast<jstring>(env->AllocObject(c));
+ assert(s1 != nullptr);
+ jstring s2 = reinterpret_cast<jstring>(env->AllocObject(c));
+ assert(s2 != nullptr);
+ jstring s3 = reinterpret_cast<jstring>(env->AllocObject(c));
+ assert(s3 != nullptr);
+ jstring s4 = reinterpret_cast<jstring>(env->AllocObject(c));
+ assert(s4 != nullptr);
+
+ jcharArray char_array = env->NewCharArray(5);
+ jstring string_arg = env->NewStringUTF("helloworld");
+
+ // With Var Args
+ env->CallVoidMethod(s1, mid1);
+ env->CallNonvirtualVoidMethod(s2, c, mid2, byte_array);
+
+ // With JValues
+ jvalue args3[1];
+ args3[0].l = char_array;
+ jvalue args4[1];
+ args4[0].l = string_arg;
+ env->CallVoidMethodA(s3, mid3, args3);
+ env->CallNonvirtualVoidMethodA(s4, c, mid4, args4);
+}
diff --git a/test/004-JniTest/src/Main.java b/test/004-JniTest/src/Main.java
index 8e92010..584fae3 100644
--- a/test/004-JniTest/src/Main.java
+++ b/test/004-JniTest/src/Main.java
@@ -33,6 +33,7 @@
testShallowGetCallingClassLoader();
testShallowGetStackClass2();
testCallNonvirtual();
+ testNewStringObject();
}
private static native void testFindClassOnAttachedNativeThread();
@@ -184,6 +185,8 @@
private static native void nativeTestShallowGetStackClass2();
private static native void testCallNonvirtual();
+
+ private static native void testNewStringObject();
}
class JniCallNonvirtualTest {
diff --git a/test/004-SignalTest/signaltest.cc b/test/004-SignalTest/signaltest.cc
index 876d27e..1414715 100644
--- a/test/004-SignalTest/signaltest.cc
+++ b/test/004-SignalTest/signaltest.cc
@@ -89,7 +89,7 @@
}
// Prevent the compiler being a smart-alec and optimizing out the assignment
-// to nullptr.
+// to null.
char *go_away_compiler = nullptr;
extern "C" JNIEXPORT jint JNICALL Java_Main_testSignal(JNIEnv*, jclass) {
diff --git a/test/008-exceptions/expected.txt b/test/008-exceptions/expected.txt
index ef6eaff..92c79dc 100644
--- a/test/008-exceptions/expected.txt
+++ b/test/008-exceptions/expected.txt
@@ -1,9 +1,12 @@
Got an NPE: second throw
java.lang.NullPointerException: second throw
- at Main.catchAndRethrow(Main.java:39)
- at Main.exceptions_007(Main.java:23)
- at Main.main(Main.java:31)
+ at Main.catchAndRethrow(Main.java:58)
+ at Main.exceptions_007(Main.java:41)
+ at Main.main(Main.java:49)
Caused by: java.lang.NullPointerException: first throw
- at Main.throwNullPointerException(Main.java:46)
- at Main.catchAndRethrow(Main.java:36)
+ at Main.throwNullPointerException(Main.java:65)
+ at Main.catchAndRethrow(Main.java:55)
... 2 more
+Static Init
+BadError: This is bad by convention
+BadError: This is bad by convention
diff --git a/test/008-exceptions/src/Main.java b/test/008-exceptions/src/Main.java
index 1f76f12..7f6d0c5 100644
--- a/test/008-exceptions/src/Main.java
+++ b/test/008-exceptions/src/Main.java
@@ -14,6 +14,24 @@
* limitations under the License.
*/
+// An exception that doesn't have a <init>(String) method.
+class BadError extends Error {
+ public BadError() {
+ super("This is bad by convention");
+ }
+}
+
+// A class that throws BadException during static initialization.
+class BadInit {
+ static int dummy;
+ static {
+ System.out.println("Static Init");
+ if (true) {
+ throw new BadError();
+ }
+ }
+}
+
/**
* Exceptions across method calls
*/
@@ -29,6 +47,7 @@
}
public static void main (String args[]) {
exceptions_007();
+ exceptionsRethrowClassInitFailure();
}
private static void catchAndRethrow() {
@@ -45,4 +64,26 @@
private static void throwNullPointerException() {
throw new NullPointerException("first throw");
}
+
+ private static void exceptionsRethrowClassInitFailure() {
+ try {
+ try {
+ BadInit.dummy = 1;
+ throw new IllegalStateException("Should not reach here.");
+ } catch (BadError e) {
+ System.out.println(e);
+ }
+
+ // Check if it works a second time.
+
+ try {
+ BadInit.dummy = 1;
+ throw new IllegalStateException("Should not reach here.");
+ } catch (BadError e) {
+ System.out.println(e);
+ }
+ } catch (Exception error) {
+ error.printStackTrace();
+ }
+ }
}
diff --git a/test/020-string/src/Main.java b/test/020-string/src/Main.java
index bb8ce1f..b876e6a 100644
--- a/test/020-string/src/Main.java
+++ b/test/020-string/src/Main.java
@@ -14,6 +14,9 @@
* limitations under the License.
*/
+import java.nio.charset.Charset;
+import java.io.UnsupportedEncodingException;
+
/**
* Simple string test.
*/
@@ -21,6 +24,7 @@
public static void main(String args[]) {
basicTest();
indexTest();
+ constructorTest();
}
public static void basicTest() {
@@ -81,4 +85,36 @@
subStr.indexOf('&') + ":" +
baseStr.indexOf(0x12341234));
}
+
+ public static void constructorTest() {
+ byte[] byteArray = "byteArray".getBytes();
+ char[] charArray = new char[] { 'c', 'h', 'a', 'r', 'A', 'r', 'r', 'a', 'y' };
+ String charsetName = "US-ASCII";
+ Charset charset = Charset.forName("UTF-8");
+ String string = "string";
+ StringBuffer stringBuffer = new StringBuffer("stringBuffer");
+ int [] codePoints = new int[] { 65, 66, 67, 68, 69 };
+ StringBuilder stringBuilder = new StringBuilder("stringBuilder");
+
+ String s1 = new String();
+ String s2 = new String(byteArray);
+ String s3 = new String(byteArray, 1);
+ String s4 = new String(byteArray, 0, 4);
+ String s5 = new String(byteArray, 2, 4, 5);
+
+ try {
+ String s6 = new String(byteArray, 2, 4, charsetName);
+ String s7 = new String(byteArray, charsetName);
+ } catch (UnsupportedEncodingException e) {
+ System.out.println("Got unexpected UnsupportedEncodingException");
+ }
+ String s8 = new String(byteArray, 3, 3, charset);
+ String s9 = new String(byteArray, charset);
+ String s10 = new String(charArray);
+ String s11 = new String(charArray, 0, 4);
+ String s12 = new String(string);
+ String s13 = new String(stringBuffer);
+ String s14 = new String(codePoints, 1, 3);
+ String s15 = new String(stringBuilder);
+ }
}
diff --git a/test/021-string2/expected.txt b/test/021-string2/expected.txt
index bd7f049..a9c6eb8 100644
--- a/test/021-string2/expected.txt
+++ b/test/021-string2/expected.txt
@@ -1 +1,2 @@
Got expected npe
+OK
diff --git a/test/021-string2/src/Main.java b/test/021-string2/src/Main.java
index 0239a3c..0226614 100644
--- a/test/021-string2/src/Main.java
+++ b/test/021-string2/src/Main.java
@@ -15,12 +15,13 @@
*/
import junit.framework.Assert;
+import java.lang.reflect.Method;
/**
* more string tests
*/
public class Main {
- public static void main(String args[]) {
+ public static void main(String args[]) throws Exception {
String test = "0123456789";
String test1 = new String("0123456789"); // different object
String test2 = new String("0123456780"); // different value
@@ -83,5 +84,10 @@
Assert.assertEquals("this is a path", test.replaceAll("/", " "));
Assert.assertEquals("this is a path", test.replace("/", " "));
+
+ Class Strings = Class.forName("com.android.org.bouncycastle.util.Strings");
+ Method fromUTF8ByteArray = Strings.getDeclaredMethod("fromUTF8ByteArray", byte[].class);
+ String result = (String) fromUTF8ByteArray.invoke(null, new byte[] {'O', 'K'});
+ System.out.println(result);
}
}
diff --git a/test/046-reflect/src/Main.java b/test/046-reflect/src/Main.java
index 59f7001..0d8e576 100644
--- a/test/046-reflect/src/Main.java
+++ b/test/046-reflect/src/Main.java
@@ -233,6 +233,20 @@
field.set(instance, null);
/*
+ * Try getDeclaredField on a non-existant field.
+ */
+ try {
+ field = target.getDeclaredField("nonExistant");
+ System.out.println("ERROR: Expected NoSuchFieldException");
+ } catch (NoSuchFieldException nsfe) {
+ String msg = nsfe.getMessage();
+ if (!msg.contains("Target;")) {
+ System.out.println(" NoSuchFieldException '" + msg +
+ "' didn't contain class");
+ }
+ }
+
+ /*
* Do some stuff with long.
*/
long longVal;
@@ -868,4 +882,4 @@
System.out.println(e);
}
}
-}
\ No newline at end of file
+}
diff --git a/test/068-classloader/expected.txt b/test/068-classloader/expected.txt
index bf131ee..8725799 100644
--- a/test/068-classloader/expected.txt
+++ b/test/068-classloader/expected.txt
@@ -11,3 +11,5 @@
DoubledImplement one
Got LinkageError on DI (early)
Got LinkageError on IDI (early)
+class Main
+Got expected ClassNotFoundException
diff --git a/test/068-classloader/src-ex/MutationTarget.java b/test/068-classloader/src-ex/MutationTarget.java
new file mode 100644
index 0000000..b02a236
--- /dev/null
+++ b/test/068-classloader/src-ex/MutationTarget.java
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Mutator target, see Mutator.java.
+ */
+public class MutationTarget {
+ public static int value = 0;
+}
\ No newline at end of file
diff --git a/test/068-classloader/src-ex/Mutator.java b/test/068-classloader/src-ex/Mutator.java
new file mode 100644
index 0000000..6bcd5b8
--- /dev/null
+++ b/test/068-classloader/src-ex/Mutator.java
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Simple mutator to change a static field of the mutator target. This will require a dex-cache
+ * access, so this setup allows the correct disambiguation between multiple class-loaders.
+ */
+public class Mutator {
+ public static void mutate(int v) {
+ MutationTarget.value = v;
+ }
+}
\ No newline at end of file
diff --git a/test/068-classloader/src/Main.java b/test/068-classloader/src/Main.java
index 7dfb6f5..361e293 100644
--- a/test/068-classloader/src/Main.java
+++ b/test/068-classloader/src/Main.java
@@ -21,7 +21,7 @@
/**
* Main entry point.
*/
- public static void main(String[] args) {
+ public static void main(String[] args) throws Exception {
FancyLoader loader;
loader = new FancyLoader(ClassLoader.getSystemClassLoader());
@@ -58,6 +58,65 @@
testAbstract(loader);
testImplement(loader);
testIfaceImplement(loader);
+
+ testSeparation();
+
+ testClassForName();
+ }
+
+ static void testSeparation() {
+ FancyLoader loader1 = new FancyLoader(ClassLoader.getSystemClassLoader());
+ FancyLoader loader2 = new FancyLoader(ClassLoader.getSystemClassLoader());
+
+ try {
+ Class target1 = loader1.loadClass("MutationTarget");
+ Class target2 = loader2.loadClass("MutationTarget");
+
+ if (target1 == target2) {
+ throw new RuntimeException("target1 should not be equal to target2");
+ }
+
+ Class mutator1 = loader1.loadClass("Mutator");
+ Class mutator2 = loader2.loadClass("Mutator");
+
+ if (mutator1 == mutator2) {
+ throw new RuntimeException("mutator1 should not be equal to mutator2");
+ }
+
+ runMutator(mutator1, 1);
+
+ int value = getMutationTargetValue(target1);
+ if (value != 1) {
+ throw new RuntimeException("target 1 has unexpected value " + value);
+ }
+ value = getMutationTargetValue(target2);
+ if (value != 0) {
+ throw new RuntimeException("target 2 has unexpected value " + value);
+ }
+
+ runMutator(mutator2, 2);
+
+ value = getMutationTargetValue(target1);
+ if (value != 1) {
+ throw new RuntimeException("target 1 has unexpected value " + value);
+ }
+ value = getMutationTargetValue(target2);
+ if (value != 2) {
+ throw new RuntimeException("target 2 has unexpected value " + value);
+ }
+ } catch (Exception ex) {
+ ex.printStackTrace();
+ }
+ }
+
+ private static void runMutator(Class c, int v) throws Exception {
+ java.lang.reflect.Method m = c.getDeclaredMethod("mutate", int.class);
+ m.invoke(null, v);
+ }
+
+ private static int getMutationTargetValue(Class c) throws Exception {
+ java.lang.reflect.Field f = c.getDeclaredField("value");
+ return f.getInt(null);
}
/**
@@ -422,4 +481,13 @@
DoubledImplement2 di2 = ifaceSuper.getDoubledInstance2();
di2.one();
}
+
+ static void testClassForName() throws Exception {
+ System.out.println(Class.forName("Main").toString());
+ try {
+ System.out.println(Class.forName("Main", false, null).toString());
+ } catch (ClassNotFoundException expected) {
+ System.out.println("Got expected ClassNotFoundException");
+ }
+ }
}
diff --git a/test/080-oom-throw/expected.txt b/test/080-oom-throw/expected.txt
index 73cc0d8..904393b 100644
--- a/test/080-oom-throw/expected.txt
+++ b/test/080-oom-throw/expected.txt
@@ -1,2 +1,3 @@
+Test reflection correctly threw
NEW_ARRAY correctly threw OOME
NEW_INSTANCE correctly threw OOME
diff --git a/test/080-oom-throw/src/Main.java b/test/080-oom-throw/src/Main.java
index c93f8bb..f007b25 100644
--- a/test/080-oom-throw/src/Main.java
+++ b/test/080-oom-throw/src/Main.java
@@ -14,6 +14,9 @@
* limitations under the License.
*/
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+
public class Main {
static class ArrayMemEater {
static boolean sawOome;
@@ -68,6 +71,10 @@
}
public static void main(String[] args) {
+ if (triggerReflectionOOM()) {
+ System.out.println("Test reflection correctly threw");
+ }
+
if (triggerArrayOOM()) {
System.out.println("NEW_ARRAY correctly threw OOME");
}
@@ -76,4 +83,46 @@
System.out.println("NEW_INSTANCE correctly threw OOME");
}
}
+
+ static Object[] holder;
+
+ public static void blowup() throws Exception {
+ int size = 32 * 1024 * 1024;
+ for (int i = 0; i < holder.length; ) {
+ try {
+ holder[i] = new char[size];
+ i++;
+ } catch (OutOfMemoryError oome) {
+ size = size / 2;
+ if (size == 0) {
+ break;
+ }
+ }
+ }
+ holder[0] = new char[100000];
+ }
+
+ static boolean triggerReflectionOOM() {
+ try {
+ Class<?> c = Main.class;
+ Method m = c.getMethod("blowup", (Class[]) null);
+ holder = new Object[1000000];
+ m.invoke(null);
+ holder = null;
+ System.out.println("Didn't throw from blowup");
+ } catch (OutOfMemoryError e) {
+ holder = null;
+ } catch (InvocationTargetException e) {
+ holder = null;
+ if (!(e.getCause() instanceof OutOfMemoryError)) {
+ System.out.println("InvocationTargetException cause not OOME " + e.getCause());
+ return false;
+ }
+ } catch (Exception e) {
+ holder = null;
+ System.out.println("Unexpected exception " + e);
+ return false;
+ }
+ return true;
+ }
}
diff --git a/test/082-inline-execute/src/Main.java b/test/082-inline-execute/src/Main.java
index 0e90c4d..4dfa73c 100644
--- a/test/082-inline-execute/src/Main.java
+++ b/test/082-inline-execute/src/Main.java
@@ -236,15 +236,6 @@
String str10 = "abcdefghij";
String str40 = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabc";
- int supplementaryChar = 0x20b9f;
- String surrogatePair = "\ud842\udf9f";
- String stringWithSurrogates = "hello " + surrogatePair + " world";
-
- Assert.assertEquals(stringWithSurrogates.indexOf(supplementaryChar), "hello ".length());
- Assert.assertEquals(stringWithSurrogates.indexOf(supplementaryChar, 2), "hello ".length());
- Assert.assertEquals(stringWithSurrogates.indexOf(supplementaryChar, 6), 6);
- Assert.assertEquals(stringWithSurrogates.indexOf(supplementaryChar, 7), -1);
-
Assert.assertEquals(str0.indexOf('a'), -1);
Assert.assertEquals(str3.indexOf('a'), 0);
Assert.assertEquals(str3.indexOf('b'), 1);
@@ -269,24 +260,123 @@
Assert.assertEquals(str40.indexOf('a',10), 10);
Assert.assertEquals(str40.indexOf('b',40), -1);
+ testIndexOfNull();
+
+ // Same data as above, but stored so it's not a literal in the next test. -2 stands for
+ // indexOf(I) instead of indexOf(II).
+ start--;
+ int[][] searchData = {
+ { 'a', -2, -1 },
+ { 'a', -2, 0 },
+ { 'b', -2, 1 },
+ { 'c', -2, 2 },
+ { 'j', -2, 9 },
+ { 'a', -2, 0 },
+ { 'b', -2, 38 },
+ { 'c', -2, 39 },
+ { 'a', 20, -1 },
+ { 'a', 0, -1 },
+ { 'a', -1, -1 },
+ { '/', ++start, -1 },
+ { 'a', negIndex[0], -1 },
+ { 'a', 0, 0 },
+ { 'a', 1, -1 },
+ { 'a', 1234, -1 },
+ { 'b', 0, 1 },
+ { 'b', 1, 1 },
+ { 'c', 2, 2 },
+ { 'j', 5, 9 },
+ { 'j', 9, 9 },
+ { 'a', 10, 10 },
+ { 'b', 40, -1 },
+ };
+ testStringIndexOfChars(searchData);
+
+ testSurrogateIndexOf();
+ }
+
+ private static void testStringIndexOfChars(int[][] searchData) {
+ // Use a try-catch to avoid inlining.
+ try {
+ testStringIndexOfCharsImpl(searchData);
+ } catch (Exception e) {
+ System.out.println("Unexpected exception");
+ }
+ }
+
+ private static void testStringIndexOfCharsImpl(int[][] searchData) {
+ String str0 = "";
+ String str1 = "/";
+ String str3 = "abc";
+ String str10 = "abcdefghij";
+ String str40 = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabc";
+
+ Assert.assertEquals(str0.indexOf(searchData[0][0]), searchData[0][2]);
+ Assert.assertEquals(str3.indexOf(searchData[1][0]), searchData[1][2]);
+ Assert.assertEquals(str3.indexOf(searchData[2][0]), searchData[2][2]);
+ Assert.assertEquals(str3.indexOf(searchData[3][0]), searchData[3][2]);
+ Assert.assertEquals(str10.indexOf(searchData[4][0]), searchData[4][2]);
+ Assert.assertEquals(str40.indexOf(searchData[5][0]), searchData[5][2]);
+ Assert.assertEquals(str40.indexOf(searchData[6][0]), searchData[6][2]);
+ Assert.assertEquals(str40.indexOf(searchData[7][0]), searchData[7][2]);
+ Assert.assertEquals(str0.indexOf(searchData[8][0], searchData[8][1]), searchData[8][2]);
+ Assert.assertEquals(str0.indexOf(searchData[9][0], searchData[9][1]), searchData[9][2]);
+ Assert.assertEquals(str0.indexOf(searchData[10][0], searchData[10][1]), searchData[10][2]);
+ Assert.assertEquals(str1.indexOf(searchData[11][0], searchData[11][1]), searchData[11][2]);
+ Assert.assertEquals(str1.indexOf(searchData[12][0], searchData[12][1]), searchData[12][2]);
+ Assert.assertEquals(str3.indexOf(searchData[13][0], searchData[13][1]), searchData[13][2]);
+ Assert.assertEquals(str3.indexOf(searchData[14][0], searchData[14][1]), searchData[14][2]);
+ Assert.assertEquals(str3.indexOf(searchData[15][0], searchData[15][1]), searchData[15][2]);
+ Assert.assertEquals(str3.indexOf(searchData[16][0], searchData[16][1]), searchData[16][2]);
+ Assert.assertEquals(str3.indexOf(searchData[17][0], searchData[17][1]), searchData[17][2]);
+ Assert.assertEquals(str3.indexOf(searchData[18][0], searchData[18][1]), searchData[18][2]);
+ Assert.assertEquals(str10.indexOf(searchData[19][0], searchData[19][1]), searchData[19][2]);
+ Assert.assertEquals(str10.indexOf(searchData[20][0], searchData[20][1]), searchData[20][2]);
+ Assert.assertEquals(str40.indexOf(searchData[21][0], searchData[21][1]), searchData[21][2]);
+ Assert.assertEquals(str40.indexOf(searchData[22][0], searchData[22][1]), searchData[22][2]);
+ }
+
+ private static void testSurrogateIndexOf() {
+ int supplementaryChar = 0x20b9f;
+ String surrogatePair = "\ud842\udf9f";
+ String stringWithSurrogates = "hello " + surrogatePair + " world";
+
+ Assert.assertEquals(stringWithSurrogates.indexOf(supplementaryChar), "hello ".length());
+ Assert.assertEquals(stringWithSurrogates.indexOf(supplementaryChar, 2), "hello ".length());
+ Assert.assertEquals(stringWithSurrogates.indexOf(supplementaryChar, 6), 6);
+ Assert.assertEquals(stringWithSurrogates.indexOf(supplementaryChar, 7), -1);
+
+ Assert.assertEquals(stringWithSurrogates.indexOf(supplementaryChar - 0x10000), -1);
+ Assert.assertEquals(stringWithSurrogates.indexOf(supplementaryChar | 0x80000000), -1);
+ }
+
+ private static void testIndexOfNull() {
String strNull = null;
try {
- strNull.indexOf('a');
+ testNullIndex(strNull, 'a');
Assert.fail();
} catch (NullPointerException expected) {
}
try {
- strNull.indexOf('a', 0);
+ testNullIndex(strNull, 'a', 0);
Assert.fail();
} catch (NullPointerException expected) {
}
try {
- strNull.indexOf('a', -1);
+ testNullIndex(strNull, 'a', -1);
Assert.fail();
} catch (NullPointerException expected) {
}
}
+ private static int testNullIndex(String strNull, int c) {
+ return strNull.indexOf(c);
+ }
+
+ private static int testNullIndex(String strNull, int c, int startIndex) {
+ return strNull.indexOf(c, startIndex);
+ }
+
public static void test_String_compareTo() {
String test = "0123456789";
String test1 = new String("0123456789"); // different object
diff --git a/test/090-loop-formation/expected.txt b/test/090-loop-formation/expected.txt
index b7e0bb3..b945c30 100644
--- a/test/090-loop-formation/expected.txt
+++ b/test/090-loop-formation/expected.txt
@@ -3,3 +3,4 @@
counter3 is 32767
counter4 is 0
counter5 is 65534
+256
diff --git a/test/090-loop-formation/src/Main.java b/test/090-loop-formation/src/Main.java
index 7c16667..16ff3b2 100644
--- a/test/090-loop-formation/src/Main.java
+++ b/test/090-loop-formation/src/Main.java
@@ -52,5 +52,31 @@
System.out.println("counter3 is " + counter3);
System.out.println("counter4 is " + counter4);
System.out.println("counter5 is " + counter5);
+
+ deeplyNested();
+ }
+
+ // GVN is limited to a maximum loop depth of 6. To track whether dependent passes are
+ // correctly turned off, test some very simple, but deeply nested loops.
+ private static void deeplyNested() {
+ int sum = 0;
+ for (int i = 0; i < 2; i++) {
+ for (int j = 0; j < 2; j++) {
+ for (int k = 0; k < 2; k++) {
+ for (int l = 0; l < 2; l++) {
+ for (int m = 0; m < 2; m++) {
+ for (int n = 0; n < 2; n++) {
+ for (int o = 0; o < 2; o++) {
+ for (int p = 0; p < 2; p++) {
+ sum++;
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ System.out.println(sum);
}
}
diff --git a/test/098-ddmc/src/Main.java b/test/098-ddmc/src/Main.java
index 962bd7f..f41ff2a 100644
--- a/test/098-ddmc/src/Main.java
+++ b/test/098-ddmc/src/Main.java
@@ -44,7 +44,7 @@
System.out.println("Confirm when we overflow, we don't roll over to zero. b/17392248");
final int overflowAllocations = 64 * 1024; // Won't fit in unsigned 16-bit value.
for (int i = 0; i < overflowAllocations; i++) {
- new String("fnord");
+ new Object();
}
Allocations after = new Allocations(DdmVmInternal.getRecentAllocations());
System.out.println("before < overflowAllocations=" + (before.numberOfEntries < overflowAllocations));
diff --git a/test/100-reflect2/expected.txt b/test/100-reflect2/expected.txt
index 1f8df1d..7db61a1 100644
--- a/test/100-reflect2/expected.txt
+++ b/test/100-reflect2/expected.txt
@@ -32,8 +32,8 @@
62 (class java.lang.Long)
14 (class java.lang.Short)
[public java.lang.String(), java.lang.String(int,int,char[]), public java.lang.String(java.lang.String), public java.lang.String(java.lang.StringBuffer), public java.lang.String(java.lang.StringBuilder), public java.lang.String(byte[]), public java.lang.String(byte[],int), public java.lang.String(byte[],int,int), public java.lang.String(byte[],int,int,int), public java.lang.String(byte[],int,int,java.lang.String) throws java.io.UnsupportedEncodingException, public java.lang.String(byte[],int,int,java.nio.charset.Charset), public java.lang.String(byte[],java.lang.String) throws java.io.UnsupportedEncodingException, public java.lang.String(byte[],java.nio.charset.Charset), public java.lang.String(char[]), public java.lang.String(char[],int,int), public java.lang.String(int[],int,int)]
-[private final int java.lang.String.count, private int java.lang.String.hashCode, private final int java.lang.String.offset, private final char[] java.lang.String.value, private static final char[] java.lang.String.ASCII, public static final java.util.Comparator java.lang.String.CASE_INSENSITIVE_ORDER, private static final char java.lang.String.REPLACEMENT_CHAR, private static final long java.lang.String.serialVersionUID]
-[void java.lang.String._getChars(int,int,char[],int), public char java.lang.String.charAt(int), public int java.lang.String.codePointAt(int), public int java.lang.String.codePointBefore(int), public int java.lang.String.codePointCount(int,int), public int java.lang.String.compareTo(java.lang.Object), public native int java.lang.String.compareTo(java.lang.String), public int java.lang.String.compareToIgnoreCase(java.lang.String), public java.lang.String java.lang.String.concat(java.lang.String), public boolean java.lang.String.contains(java.lang.CharSequence), public boolean java.lang.String.contentEquals(java.lang.CharSequence), public boolean java.lang.String.contentEquals(java.lang.StringBuffer), public boolean java.lang.String.endsWith(java.lang.String), public boolean java.lang.String.equals(java.lang.Object), public boolean java.lang.String.equalsIgnoreCase(java.lang.String), public void java.lang.String.getBytes(int,int,byte[],int), public [B java.lang.String.getBytes(), public [B java.lang.String.getBytes(java.lang.String) throws java.io.UnsupportedEncodingException, public [B java.lang.String.getBytes(java.nio.charset.Charset), public void java.lang.String.getChars(int,int,char[],int), public int java.lang.String.hashCode(), public int java.lang.String.indexOf(int), public int java.lang.String.indexOf(int,int), public int java.lang.String.indexOf(java.lang.String), public int java.lang.String.indexOf(java.lang.String,int), public native java.lang.String java.lang.String.intern(), public boolean java.lang.String.isEmpty(), public int java.lang.String.lastIndexOf(int), public int java.lang.String.lastIndexOf(int,int), public int java.lang.String.lastIndexOf(java.lang.String), public int java.lang.String.lastIndexOf(java.lang.String,int), public int java.lang.String.length(), public boolean java.lang.String.matches(java.lang.String), public int java.lang.String.offsetByCodePoints(int,int), public boolean java.lang.String.regionMatches(int,java.lang.String,int,int), public boolean java.lang.String.regionMatches(boolean,int,java.lang.String,int,int), public java.lang.String java.lang.String.replace(char,char), public java.lang.String java.lang.String.replace(java.lang.CharSequence,java.lang.CharSequence), public java.lang.String java.lang.String.replaceAll(java.lang.String,java.lang.String), public java.lang.String java.lang.String.replaceFirst(java.lang.String,java.lang.String), public [Ljava.lang.String; java.lang.String.split(java.lang.String), public [Ljava.lang.String; java.lang.String.split(java.lang.String,int), public boolean java.lang.String.startsWith(java.lang.String), public boolean java.lang.String.startsWith(java.lang.String,int), public java.lang.CharSequence java.lang.String.subSequence(int,int), public java.lang.String java.lang.String.substring(int), public java.lang.String java.lang.String.substring(int,int), public [C java.lang.String.toCharArray(), public java.lang.String java.lang.String.toLowerCase(), public java.lang.String java.lang.String.toLowerCase(java.util.Locale), public java.lang.String java.lang.String.toString(), public java.lang.String java.lang.String.toUpperCase(), public java.lang.String java.lang.String.toUpperCase(java.util.Locale), public java.lang.String java.lang.String.trim(), public static java.lang.String java.lang.String.copyValueOf(char[]), public static java.lang.String java.lang.String.copyValueOf(char[],int,int), private java.lang.StringIndexOutOfBoundsException java.lang.String.failedBoundsCheck(int,int,int), private native int java.lang.String.fastIndexOf(int,int), private char java.lang.String.foldCase(char), public static java.lang.String java.lang.String.format(java.lang.String,java.lang.Object[]), public static java.lang.String java.lang.String.format(java.util.Locale,java.lang.String,java.lang.Object[]), private java.lang.StringIndexOutOfBoundsException java.lang.String.indexAndLength(int), private static int java.lang.String.indexOf(java.lang.String,java.lang.String,int,int,char), private int java.lang.String.indexOfSupplementary(int,int), private int java.lang.String.lastIndexOfSupplementary(int,int), private java.lang.StringIndexOutOfBoundsException java.lang.String.startEndAndLength(int,int), public static java.lang.String java.lang.String.valueOf(char), public static java.lang.String java.lang.String.valueOf(double), public static java.lang.String java.lang.String.valueOf(float), public static java.lang.String java.lang.String.valueOf(int), public static java.lang.String java.lang.String.valueOf(long), public static java.lang.String java.lang.String.valueOf(java.lang.Object), public static java.lang.String java.lang.String.valueOf(boolean), public static java.lang.String java.lang.String.valueOf(char[]), public static java.lang.String java.lang.String.valueOf(char[],int,int)]
+[private final int java.lang.String.count, private int java.lang.String.hashCode, private static final char[] java.lang.String.ASCII, public static final java.util.Comparator java.lang.String.CASE_INSENSITIVE_ORDER, private static final char java.lang.String.REPLACEMENT_CHAR, private static final long java.lang.String.serialVersionUID]
+[public native char java.lang.String.charAt(int), public int java.lang.String.codePointAt(int), public int java.lang.String.codePointBefore(int), public int java.lang.String.codePointCount(int,int), public int java.lang.String.compareTo(java.lang.Object), public native int java.lang.String.compareTo(java.lang.String), public int java.lang.String.compareToIgnoreCase(java.lang.String), public native java.lang.String java.lang.String.concat(java.lang.String), public boolean java.lang.String.contains(java.lang.CharSequence), public boolean java.lang.String.contentEquals(java.lang.CharSequence), public boolean java.lang.String.contentEquals(java.lang.StringBuffer), public boolean java.lang.String.endsWith(java.lang.String), public boolean java.lang.String.equals(java.lang.Object), public boolean java.lang.String.equalsIgnoreCase(java.lang.String), public void java.lang.String.getBytes(int,int,byte[],int), public [B java.lang.String.getBytes(), public [B java.lang.String.getBytes(java.lang.String) throws java.io.UnsupportedEncodingException, public [B java.lang.String.getBytes(java.nio.charset.Charset), public void java.lang.String.getChars(int,int,char[],int), native void java.lang.String.getCharsNoCheck(int,int,char[],int), public int java.lang.String.hashCode(), public int java.lang.String.indexOf(int), public int java.lang.String.indexOf(int,int), public int java.lang.String.indexOf(java.lang.String), public int java.lang.String.indexOf(java.lang.String,int), public native java.lang.String java.lang.String.intern(), public boolean java.lang.String.isEmpty(), public int java.lang.String.lastIndexOf(int), public int java.lang.String.lastIndexOf(int,int), public int java.lang.String.lastIndexOf(java.lang.String), public int java.lang.String.lastIndexOf(java.lang.String,int), public int java.lang.String.length(), public boolean java.lang.String.matches(java.lang.String), public int java.lang.String.offsetByCodePoints(int,int), public boolean java.lang.String.regionMatches(int,java.lang.String,int,int), public boolean java.lang.String.regionMatches(boolean,int,java.lang.String,int,int), public java.lang.String java.lang.String.replace(char,char), public java.lang.String java.lang.String.replace(java.lang.CharSequence,java.lang.CharSequence), public java.lang.String java.lang.String.replaceAll(java.lang.String,java.lang.String), public java.lang.String java.lang.String.replaceFirst(java.lang.String,java.lang.String), native void java.lang.String.setCharAt(int,char), public [Ljava.lang.String; java.lang.String.split(java.lang.String), public [Ljava.lang.String; java.lang.String.split(java.lang.String,int), public boolean java.lang.String.startsWith(java.lang.String), public boolean java.lang.String.startsWith(java.lang.String,int), public java.lang.CharSequence java.lang.String.subSequence(int,int), public java.lang.String java.lang.String.substring(int), public java.lang.String java.lang.String.substring(int,int), public native [C java.lang.String.toCharArray(), public java.lang.String java.lang.String.toLowerCase(), public java.lang.String java.lang.String.toLowerCase(java.util.Locale), public java.lang.String java.lang.String.toString(), public java.lang.String java.lang.String.toUpperCase(), public java.lang.String java.lang.String.toUpperCase(java.util.Locale), public java.lang.String java.lang.String.trim(), public static java.lang.String java.lang.String.copyValueOf(char[]), public static java.lang.String java.lang.String.copyValueOf(char[],int,int), private java.lang.StringIndexOutOfBoundsException java.lang.String.failedBoundsCheck(int,int,int), private native int java.lang.String.fastIndexOf(int,int), private native java.lang.String java.lang.String.fastSubstring(int,int), private char java.lang.String.foldCase(char), public static java.lang.String java.lang.String.format(java.lang.String,java.lang.Object[]), public static java.lang.String java.lang.String.format(java.util.Locale,java.lang.String,java.lang.Object[]), private java.lang.StringIndexOutOfBoundsException java.lang.String.indexAndLength(int), private static int java.lang.String.indexOf(java.lang.String,java.lang.String,int,int,char), private int java.lang.String.indexOfSupplementary(int,int), private int java.lang.String.lastIndexOfSupplementary(int,int), private java.lang.StringIndexOutOfBoundsException java.lang.String.startEndAndLength(int,int), public static java.lang.String java.lang.String.valueOf(char), public static java.lang.String java.lang.String.valueOf(double), public static java.lang.String java.lang.String.valueOf(float), public static java.lang.String java.lang.String.valueOf(int), public static java.lang.String java.lang.String.valueOf(long), public static java.lang.String java.lang.String.valueOf(java.lang.Object), public static java.lang.String java.lang.String.valueOf(boolean), public static java.lang.String java.lang.String.valueOf(char[]), public static java.lang.String java.lang.String.valueOf(char[],int,int)]
[]
[interface java.io.Serializable, interface java.lang.Comparable, interface java.lang.CharSequence]
0
diff --git a/test/100-reflect2/src/Main.java b/test/100-reflect2/src/Main.java
index 0cc1488..72e14b1 100644
--- a/test/100-reflect2/src/Main.java
+++ b/test/100-reflect2/src/Main.java
@@ -266,9 +266,37 @@
show(ctor.newInstance(new char[] { 'x', 'y', 'z', '!' }, 1, 2));
}
+ private static void testPackagePrivateConstructor() {
+ try {
+ Class<?> c = Class.forName("sub.PPClass");
+ Constructor cons = c.getConstructor();
+ cons.newInstance();
+ throw new RuntimeException("Expected IllegalAccessException.");
+ } catch (IllegalAccessException e) {
+ // Expected.
+ } catch (Exception e) {
+ // Error.
+ e.printStackTrace();
+ }
+ }
+
+ private static void testPackagePrivateAccessibleConstructor() {
+ try {
+ Class<?> c = Class.forName("sub.PPClass");
+ Constructor cons = c.getConstructor();
+ cons.setAccessible(true); // ensure we prevent IllegalAccessException
+ cons.newInstance();
+ } catch (Exception e) {
+ // Error.
+ e.printStackTrace();
+ }
+ }
+
public static void main(String[] args) throws Exception {
testFieldReflection();
testMethodReflection();
testConstructorReflection();
+ testPackagePrivateConstructor();
+ testPackagePrivateAccessibleConstructor();
}
}
diff --git a/test/100-reflect2/src/sub/PPClass.java b/test/100-reflect2/src/sub/PPClass.java
new file mode 100644
index 0000000..d972287
--- /dev/null
+++ b/test/100-reflect2/src/sub/PPClass.java
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package sub;
+
+// A package-private class with a public constructor.
+class PPClass {
+ public PPClass() {
+ }
+}
\ No newline at end of file
diff --git a/test/104-growth-limit/src/Main.java b/test/104-growth-limit/src/Main.java
index d666377..d31cbf1 100644
--- a/test/104-growth-limit/src/Main.java
+++ b/test/104-growth-limit/src/Main.java
@@ -29,26 +29,28 @@
final Method get_runtime = vm_runtime.getDeclaredMethod("getRuntime");
final Object runtime = get_runtime.invoke(null);
final Method clear_growth_limit = vm_runtime.getDeclaredMethod("clearGrowthLimit");
+ List<byte[]> l = new ArrayList<byte[]>();
try {
- List<byte[]> l = new ArrayList<byte[]>();
while (true) {
// Allocate a MB at a time
l.add(new byte[1048576]);
alloc1++;
}
} catch (OutOfMemoryError e) {
+ l = null;
}
// Expand the heap to the maximum size.
clear_growth_limit.invoke(runtime);
int alloc2 = 1;
+ l = new ArrayList<byte[]>();
try {
- List<byte[]> l = new ArrayList<byte[]>();
while (true) {
// Allocate a MB at a time
l.add(new byte[1048576]);
alloc2++;
}
} catch (OutOfMemoryError e2) {
+ l = null;
if (alloc1 > alloc2) {
System.out.println("ERROR: Allocated less memory after growth" +
"limit cleared (" + alloc1 + " MBs > " + alloc2 + " MBs");
diff --git a/test/115-native-bridge/expected.txt b/test/115-native-bridge/expected.txt
index 16a71e4..deb70ba 100644
--- a/test/115-native-bridge/expected.txt
+++ b/test/115-native-bridge/expected.txt
@@ -4,7 +4,7 @@
Ready for native bridge tests.
Checking for support.
Getting trampoline for JNI_OnLoad with shorty (null).
-Test ART callbacks: all JNI function number is 9.
+Test ART callbacks: all JNI function number is 10.
name:booleanMethod, signature:(ZZZZZZZZZZ)Z, shorty:ZZZZZZZZZZZ.
name:byteMethod, signature:(BBBBBBBBBB)B, shorty:BBBBBBBBBBB.
name:charMethod, signature:(CCCCCCCCCC)C, shorty:CCCCCCCCCCC.
@@ -13,6 +13,7 @@
name:testFindClassOnAttachedNativeThread, signature:()V, shorty:V.
name:testFindFieldOnAttachedNativeThreadNative, signature:()V, shorty:V.
name:testGetMirandaMethodNative, signature:()Ljava/lang/reflect/Method;, shorty:L.
+ name:testNewStringObject, signature:()V, shorty:V.
name:testZeroLengthByteBuffers, signature:()V, shorty:V.
trampoline_JNI_OnLoad called!
Getting trampoline for Java_Main_testFindClassOnAttachedNativeThread with shorty V.
@@ -55,3 +56,5 @@
trampoline_Java_Main_charMethod called!
trampoline_Java_Main_charMethod called!
trampoline_Java_Main_charMethod called!
+Getting trampoline for Java_Main_testNewStringObject with shorty V.
+trampoline_Java_Main_testNewStringObject called!
diff --git a/test/115-native-bridge/nativebridge.cc b/test/115-native-bridge/nativebridge.cc
index 6bcc1f5..db2fc9b 100644
--- a/test/115-native-bridge/nativebridge.cc
+++ b/test/115-native-bridge/nativebridge.cc
@@ -122,6 +122,14 @@
return fnPtr(env, klass);
}
+static void trampoline_Java_Main_testNewStringObject(JNIEnv* env, jclass klass) {
+ typedef void (*FnPtr_t)(JNIEnv*, jclass);
+ FnPtr_t fnPtr = reinterpret_cast<FnPtr_t>
+ (find_native_bridge_method("testNewStringObject")->fnPtr);
+ printf("%s called!\n", __FUNCTION__);
+ return fnPtr(env, klass);
+}
+
static void trampoline_Java_Main_testZeroLengthByteBuffers(JNIEnv* env, jclass klass) {
typedef void (*FnPtr_t)(JNIEnv*, jclass);
FnPtr_t fnPtr = reinterpret_cast<FnPtr_t>
@@ -190,6 +198,8 @@
reinterpret_cast<void*>(trampoline_Java_Main_testFindFieldOnAttachedNativeThreadNative) },
{ "testGetMirandaMethodNative", "()Ljava/lang/reflect/Method;", true, nullptr,
reinterpret_cast<void*>(trampoline_Java_Main_testGetMirandaMethodNative) },
+ { "testNewStringObject", "()V", true, nullptr,
+ reinterpret_cast<void*>(trampoline_Java_Main_testNewStringObject) },
{ "testZeroLengthByteBuffers", "()V", true, nullptr,
reinterpret_cast<void*>(trampoline_Java_Main_testZeroLengthByteBuffers) },
};
@@ -317,5 +327,7 @@
.loadLibrary = &native_bridge_loadLibrary,
.getTrampoline = &native_bridge_getTrampoline,
.isSupported = &native_bridge_isSupported,
- .getAppEnv = &native_bridge_getAppEnv
+ .getAppEnv = &native_bridge_getAppEnv,
+ .isCompatibleWith = nullptr,
+ .getSignalHandler = nullptr
};
diff --git a/test/115-native-bridge/src/NativeBridgeMain.java b/test/115-native-bridge/src/NativeBridgeMain.java
index 2405627..c843707 100644
--- a/test/115-native-bridge/src/NativeBridgeMain.java
+++ b/test/115-native-bridge/src/NativeBridgeMain.java
@@ -31,6 +31,7 @@
testBooleanMethod();
testCharMethod();
testEnvironment();
+ testNewStringObject();
}
public static native void testFindClassOnAttachedNativeThread();
@@ -167,6 +168,8 @@
// throw new AssertionError("unexpected value for supported_abis");
// }
}
+
+ private static native void testNewStringObject();
}
public class NativeBridgeMain {
diff --git a/test/127-secondarydex/expected.txt b/test/127-secondarydex/expected.txt
index 29a1411..1c8defb 100644
--- a/test/127-secondarydex/expected.txt
+++ b/test/127-secondarydex/expected.txt
@@ -1,3 +1,4 @@
testSlowPathDirectInvoke
Test
Got null pointer exception
+Test
diff --git a/test/127-secondarydex/src/Main.java b/test/127-secondarydex/src/Main.java
index c921c5b..0ede8ed 100644
--- a/test/127-secondarydex/src/Main.java
+++ b/test/127-secondarydex/src/Main.java
@@ -24,6 +24,7 @@
public class Main {
public static void main(String[] args) {
testSlowPathDirectInvoke();
+ testString();
}
public static void testSlowPathDirectInvoke() {
@@ -40,4 +41,11 @@
System.out.println("Got unexpected exception " + e);
}
}
+
+ // For string change, test that String.<init> is compiled properly in
+ // secondary dex. See http://b/20870917
+ public static void testString() {
+ Test t = new Test();
+ System.out.println(t.toString());
+ }
}
diff --git a/test/127-secondarydex/src/Test.java b/test/127-secondarydex/src/Test.java
index 82cb901..8547e79 100644
--- a/test/127-secondarydex/src/Test.java
+++ b/test/127-secondarydex/src/Test.java
@@ -22,4 +22,8 @@
private void print() {
System.out.println("Test");
}
+
+ public String toString() {
+ return new String("Test");
+ }
}
diff --git a/test/137-cfi/cfi.cc b/test/137-cfi/cfi.cc
new file mode 100644
index 0000000..b2d7e55
--- /dev/null
+++ b/test/137-cfi/cfi.cc
@@ -0,0 +1,207 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#if __linux__
+#include <errno.h>
+#include <signal.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/ptrace.h>
+#include <sys/wait.h>
+#endif
+
+#include "jni.h"
+
+#include <backtrace/Backtrace.h>
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "utils.h"
+
+namespace art {
+
+// For testing debuggerd. We do not have expected-death tests, so can't test this by default.
+// Code for this is copied from SignalTest.
+static constexpr bool kCauseSegfault = false;
+char* go_away_compiler_cfi = nullptr;
+
+static void CauseSegfault() {
+#if defined(__arm__) || defined(__i386__) || defined(__x86_64__) || defined(__aarch64__)
+ // On supported architectures we cause a real SEGV.
+ *go_away_compiler_cfi = 'a';
+#else
+ // On other architectures we simulate SEGV.
+ kill(getpid(), SIGSEGV);
+#endif
+}
+
+extern "C" JNIEXPORT jboolean JNICALL Java_Main_sleep(JNIEnv*, jobject, jint, jboolean, jdouble) {
+ // Keep pausing.
+ for (;;) {
+ pause();
+ }
+}
+
+// Helper to look for a sequence in the stack trace.
+#if __linux__
+static bool CheckStack(Backtrace* bt, const std::vector<std::string>& seq) {
+ size_t cur_search_index = 0; // The currently active index in seq.
+ CHECK_GT(seq.size(), 0U);
+
+ for (Backtrace::const_iterator it = bt->begin(); it != bt->end(); ++it) {
+ if (BacktraceMap::IsValid(it->map)) {
+ LOG(INFO) << "Got " << it->func_name << ", looking for " << seq[cur_search_index];
+ if (it->func_name == seq[cur_search_index]) {
+ cur_search_index++;
+ if (cur_search_index == seq.size()) {
+ return true;
+ }
+ }
+ }
+ }
+
+ return false;
+}
+#endif
+
+extern "C" JNIEXPORT jboolean JNICALL Java_Main_unwindInProcess(JNIEnv*, jobject, jint, jboolean) {
+#if __linux__
+ // TODO: What to do on Valgrind?
+
+ std::unique_ptr<Backtrace> bt(Backtrace::Create(BACKTRACE_CURRENT_PROCESS, GetTid()));
+ if (!bt->Unwind(0, nullptr)) {
+ return JNI_FALSE;
+ } else if (bt->NumFrames() == 0) {
+ return JNI_FALSE;
+ }
+
+ // We cannot really parse an exact stack, as the optimizing compiler may inline some functions.
+ // This is also risky, as deduping might play a trick on us, so the test needs to make sure that
+ // only unique functions are being expected.
+ std::vector<std::string> seq = {
+ "Java_Main_unwindInProcess", // This function.
+ "boolean Main.unwindInProcess(int, boolean)", // The corresponding Java native method frame.
+ "void Main.main(java.lang.String[])" // The Java entry method.
+ };
+
+ bool result = CheckStack(bt.get(), seq);
+ if (!kCauseSegfault) {
+ return result ? JNI_TRUE : JNI_FALSE;
+ } else {
+ LOG(INFO) << "Result of check-stack: " << result;
+ }
+#endif
+
+ if (kCauseSegfault) {
+ CauseSegfault();
+ }
+
+ return JNI_FALSE;
+}
+
+#if __linux__
+static constexpr int kSleepTimeMicroseconds = 50000; // 0.05 seconds
+static constexpr int kMaxTotalSleepTimeMicroseconds = 1000000; // 1 second
+
+// Wait for a sigstop. This code is copied from libbacktrace.
+int wait_for_sigstop(pid_t tid, int* total_sleep_time_usec, bool* detach_failed ATTRIBUTE_UNUSED) {
+ for (;;) {
+ int status;
+ pid_t n = TEMP_FAILURE_RETRY(waitpid(tid, &status, __WALL | WNOHANG));
+ if (n == -1) {
+ PLOG(WARNING) << "waitpid failed: tid " << tid;
+ break;
+ } else if (n == tid) {
+ if (WIFSTOPPED(status)) {
+ return WSTOPSIG(status);
+ } else {
+ PLOG(ERROR) << "unexpected waitpid response: n=" << n << ", status=" << std::hex << status;
+ break;
+ }
+ }
+
+ if (*total_sleep_time_usec > kMaxTotalSleepTimeMicroseconds) {
+ PLOG(WARNING) << "timed out waiting for stop signal: tid=" << tid;
+ break;
+ }
+
+ usleep(kSleepTimeMicroseconds);
+ *total_sleep_time_usec += kSleepTimeMicroseconds;
+ }
+
+ return -1;
+}
+#endif
+
+extern "C" JNIEXPORT jboolean JNICALL Java_Main_unwindOtherProcess(JNIEnv*, jobject, jint pid_int) {
+#if __linux__
+ // TODO: What to do on Valgrind?
+ pid_t pid = static_cast<pid_t>(pid_int);
+
+ // OK, this is painful. debuggerd uses ptrace to unwind other processes.
+
+ if (ptrace(PTRACE_ATTACH, pid, 0, 0)) {
+ // Were not able to attach, bad.
+ PLOG(ERROR) << "Failed to attach.";
+ kill(pid, SIGCONT);
+ return JNI_FALSE;
+ }
+
+ kill(pid, SIGSTOP);
+
+ bool detach_failed = false;
+ int total_sleep_time_usec = 0;
+ int signal = wait_for_sigstop(pid, &total_sleep_time_usec, &detach_failed);
+ if (signal == -1) {
+ LOG(WARNING) << "wait_for_sigstop failed.";
+ }
+
+ std::unique_ptr<Backtrace> bt(Backtrace::Create(pid, BACKTRACE_CURRENT_THREAD));
+ bool result = true;
+ if (!bt->Unwind(0, nullptr)) {
+ result = false;
+ } else if (bt->NumFrames() == 0) {
+ result = false;
+ }
+
+ if (result) {
+ // See comment in unwindInProcess for non-exact stack matching.
+ std::vector<std::string> seq = {
+ // "Java_Main_sleep", // The sleep function being executed in the
+ // other runtime.
+ // Note: For some reason, the name isn't
+ // resolved, so don't look for it right now.
+ "boolean Main.sleep(int, boolean, double)", // The corresponding Java native method frame.
+ "void Main.main(java.lang.String[])" // The Java entry method.
+ };
+
+ result = CheckStack(bt.get(), seq);
+ }
+
+ if (ptrace(PTRACE_DETACH, pid, 0, 0) != 0) {
+ PLOG(ERROR) << "Detach failed";
+ }
+
+ // Continue the process so we can kill it on the Java side.
+ kill(pid, SIGCONT);
+
+ return result ? JNI_TRUE : JNI_FALSE;
+#else
+ return JNI_FALSE;
+#endif
+}
+
+} // namespace art
diff --git a/test/137-cfi/expected.txt b/test/137-cfi/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/137-cfi/expected.txt
diff --git a/test/137-cfi/info.txt b/test/137-cfi/info.txt
new file mode 100644
index 0000000..7d59605
--- /dev/null
+++ b/test/137-cfi/info.txt
@@ -0,0 +1 @@
+Test that unwinding with CFI info works.
diff --git a/test/137-cfi/src/Main.java b/test/137-cfi/src/Main.java
new file mode 100644
index 0000000..e184e66
--- /dev/null
+++ b/test/137-cfi/src/Main.java
@@ -0,0 +1,155 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.BufferedReader;
+import java.io.FileReader;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+
+public class Main {
+ // Whether to test local unwinding. Libunwind uses linker info to find executables. As we do
+ // not dlopen at the moment, this doesn't work, so keep it off for now.
+ public final static boolean TEST_LOCAL_UNWINDING = false;
+
+ // Unwinding another process, modelling debuggerd. This doesn't use the linker, so should work
+ // no matter whether we're using dlopen or not.
+ public final static boolean TEST_REMOTE_UNWINDING = true;
+
+ private boolean secondary;
+
+ public Main(boolean secondary) {
+ this.secondary = secondary;
+ }
+
+ public static void main(String[] args) throws Exception {
+ boolean secondary = false;
+ if (args.length > 0 && args[args.length - 1].equals("--secondary")) {
+ secondary = true;
+ }
+ new Main(secondary).run();
+ }
+
+ static {
+ System.loadLibrary("arttest");
+ }
+
+ private void run() {
+ if (secondary) {
+ if (!TEST_REMOTE_UNWINDING) {
+ throw new RuntimeException("Should not be running secondary!");
+ }
+ runSecondary();
+ } else {
+ runPrimary();
+ }
+ }
+
+ private void runSecondary() {
+ foo(true);
+ throw new RuntimeException("Didn't expect to get back...");
+ }
+
+ private void runPrimary() {
+ // First do the in-process unwinding.
+ if (TEST_LOCAL_UNWINDING && !foo(false)) {
+ System.out.println("Unwinding self failed.");
+ }
+
+ if (!TEST_REMOTE_UNWINDING) {
+ // Skip the remote step.
+ return;
+ }
+
+ // Fork the secondary.
+ String[] cmdline = getCmdLine();
+ String[] secCmdLine = new String[cmdline.length + 1];
+ System.arraycopy(cmdline, 0, secCmdLine, 0, cmdline.length);
+ secCmdLine[secCmdLine.length - 1] = "--secondary";
+ Process p = exec(secCmdLine);
+
+ try {
+ int pid = getPid(p);
+ if (pid <= 0) {
+ throw new RuntimeException("Couldn't parse process");
+ }
+
+ // Wait a bit, so the forked process has time to run until its sleep phase.
+ try {
+ Thread.sleep(5000);
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+
+ if (!unwindOtherProcess(pid)) {
+ System.out.println("Unwinding other process failed.");
+ }
+ } finally {
+ // Kill the forked process.
+ p.destroy();
+ }
+ }
+
+ private static Process exec(String[] args) {
+ try {
+ return Runtime.getRuntime().exec(args);
+ } catch (Exception exc) {
+ throw new RuntimeException(exc);
+ }
+ }
+
+ private static int getPid(Process p) {
+ // Could do reflection for the private pid field, but String parsing is easier.
+ String s = p.toString();
+ if (s.startsWith("Process[pid=")) {
+ return Integer.parseInt(s.substring("Process[pid=".length(), s.length() - 1));
+ } else {
+ return -1;
+ }
+ }
+
+ // Read /proc/self/cmdline to find the invocation command line (so we can fork another runtime).
+ private static String[] getCmdLine() {
+ try {
+ BufferedReader in = new BufferedReader(new FileReader("/proc/self/cmdline"));
+ String s = in.readLine();
+ in.close();
+ return s.split("\0");
+ } catch (Exception exc) {
+ throw new RuntimeException(exc);
+ }
+ }
+
+ public boolean foo(boolean b) {
+ return bar(b);
+ }
+
+ public boolean bar(boolean b) {
+ if (b) {
+ return sleep(2, b, 1.0);
+ } else {
+ return unwindInProcess(1, b);
+ }
+ }
+
+ // Native functions. Note: to avoid deduping, they must all have different signatures.
+
+ public native boolean sleep(int i, boolean b, double dummy);
+
+ public native boolean unwindInProcess(int i, boolean b);
+ public native boolean unwindOtherProcess(int pid);
+}
diff --git a/test/138-duplicate-classes-check/expected.txt b/test/138-duplicate-classes-check/expected.txt
new file mode 100644
index 0000000..b2f7f08
--- /dev/null
+++ b/test/138-duplicate-classes-check/expected.txt
@@ -0,0 +1,2 @@
+10
+10
diff --git a/test/138-duplicate-classes-check/info.txt b/test/138-duplicate-classes-check/info.txt
new file mode 100644
index 0000000..22a66a2
--- /dev/null
+++ b/test/138-duplicate-classes-check/info.txt
@@ -0,0 +1 @@
+Check whether a duplicate class is detected.
diff --git a/test/138-duplicate-classes-check/src-ex/A.java b/test/138-duplicate-classes-check/src-ex/A.java
new file mode 100644
index 0000000..8e52cb3
--- /dev/null
+++ b/test/138-duplicate-classes-check/src-ex/A.java
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class A {
+ public volatile int i;
+
+ public A() {
+ i = 10;
+ }
+}
diff --git a/test/138-duplicate-classes-check/src-ex/TestEx.java b/test/138-duplicate-classes-check/src-ex/TestEx.java
new file mode 100644
index 0000000..87558fa
--- /dev/null
+++ b/test/138-duplicate-classes-check/src-ex/TestEx.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class TestEx {
+ public static void test() {
+ System.out.println(new A().i);
+ }
+}
diff --git a/test/138-duplicate-classes-check/src/A.java b/test/138-duplicate-classes-check/src/A.java
new file mode 100644
index 0000000..e1773e5
--- /dev/null
+++ b/test/138-duplicate-classes-check/src/A.java
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class A {
+ // Object fields add padding in the Foo class object layout. Therefore the field 'i' should
+ // be at a different offset compared to the A class from the ex DEX file.
+ public final Object anObject = null;
+ public final Object anotherObject = null;
+ // Use volatile to defeat inlining of the constructor + load-elimination.
+ public volatile int i;
+
+ public A() {
+ i = 10;
+ }
+}
diff --git a/test/138-duplicate-classes-check/src/FancyLoader.java b/test/138-duplicate-classes-check/src/FancyLoader.java
new file mode 100644
index 0000000..03ec948
--- /dev/null
+++ b/test/138-duplicate-classes-check/src/FancyLoader.java
@@ -0,0 +1,229 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.RandomAccessFile;
+import java.lang.reflect.Constructor;
+import java.lang.reflect.Method;
+import java.lang.reflect.InvocationTargetException;
+
+/**
+ * A class loader with atypical behavior: we try to load a private
+ * class implementation before asking the system or boot loader. This
+ * is used to create multiple classes with identical names in a single VM.
+ *
+ * If DexFile is available, we use that; if not, we assume we're not in
+ * Dalvik and instantiate the class with defineClass().
+ *
+ * The location of the DEX files and class data is dependent upon the
+ * test framework.
+ */
+public class FancyLoader extends ClassLoader {
+ /* this is where the "alternate" .class files live */
+ static final String CLASS_PATH = "classes-ex/";
+
+ /* this is the "alternate" DEX/Jar file */
+ static final String DEX_FILE = System.getenv("DEX_LOCATION") +
+ "/138-duplicate-classes-check-ex.jar";
+
+ /* on Dalvik, this is a DexFile; otherwise, it's null */
+ private Class mDexClass;
+
+ private Object mDexFile;
+
+ /**
+ * Construct FancyLoader, grabbing a reference to the DexFile class
+ * if we're running under Dalvik.
+ */
+ public FancyLoader(ClassLoader parent) {
+ super(parent);
+
+ try {
+ mDexClass = parent.loadClass("dalvik.system.DexFile");
+ } catch (ClassNotFoundException cnfe) {
+ // ignore -- not running Dalvik
+ }
+ }
+
+ /**
+ * Finds the class with the specified binary name.
+ *
+ * We search for a file in CLASS_PATH or pull an entry from DEX_FILE.
+ * If we don't find a match, we throw an exception.
+ */
+ protected Class<?> findClass(String name) throws ClassNotFoundException
+ {
+ if (mDexClass != null) {
+ return findClassDalvik(name);
+ } else {
+ return findClassNonDalvik(name);
+ }
+ }
+
+ /**
+ * Finds the class with the specified binary name, from a DEX file.
+ */
+ private Class<?> findClassDalvik(String name)
+ throws ClassNotFoundException {
+
+ if (mDexFile == null) {
+ synchronized (FancyLoader.class) {
+ Constructor ctor;
+ /*
+ * Construct a DexFile object through reflection.
+ */
+ try {
+ ctor = mDexClass.getConstructor(new Class[] {String.class});
+ } catch (NoSuchMethodException nsme) {
+ throw new ClassNotFoundException("getConstructor failed",
+ nsme);
+ }
+
+ try {
+ mDexFile = ctor.newInstance(DEX_FILE);
+ } catch (InstantiationException ie) {
+ throw new ClassNotFoundException("newInstance failed", ie);
+ } catch (IllegalAccessException iae) {
+ throw new ClassNotFoundException("newInstance failed", iae);
+ } catch (InvocationTargetException ite) {
+ throw new ClassNotFoundException("newInstance failed", ite);
+ }
+ }
+ }
+
+ /*
+ * Call DexFile.loadClass(String, ClassLoader).
+ */
+ Method meth;
+
+ try {
+ meth = mDexClass.getMethod("loadClass",
+ new Class[] { String.class, ClassLoader.class });
+ } catch (NoSuchMethodException nsme) {
+ throw new ClassNotFoundException("getMethod failed", nsme);
+ }
+
+ try {
+ meth.invoke(mDexFile, name, this);
+ } catch (IllegalAccessException iae) {
+ throw new ClassNotFoundException("loadClass failed", iae);
+ } catch (InvocationTargetException ite) {
+ throw new ClassNotFoundException("loadClass failed",
+ ite.getCause());
+ }
+
+ return null;
+ }
+
+ /**
+ * Finds the class with the specified binary name, from .class files.
+ */
+ private Class<?> findClassNonDalvik(String name)
+ throws ClassNotFoundException {
+
+ String pathName = CLASS_PATH + name + ".class";
+ //System.out.println("--- Fancy: looking for " + pathName);
+
+ File path = new File(pathName);
+ RandomAccessFile raf;
+
+ try {
+ raf = new RandomAccessFile(path, "r");
+ } catch (FileNotFoundException fnfe) {
+ throw new ClassNotFoundException("Not found: " + pathName);
+ }
+
+ /* read the entire file in */
+ byte[] fileData;
+ try {
+ fileData = new byte[(int) raf.length()];
+ raf.readFully(fileData);
+ } catch (IOException ioe) {
+ throw new ClassNotFoundException("Read error: " + pathName);
+ } finally {
+ try {
+ raf.close();
+ } catch (IOException ioe) {
+ // drop
+ }
+ }
+
+ /* create the class */
+ //System.out.println("--- Fancy: defining " + name);
+ try {
+ return defineClass(name, fileData, 0, fileData.length);
+ } catch (Throwable th) {
+ throw new ClassNotFoundException("defineClass failed", th);
+ }
+ }
+
+ /**
+ * Load a class.
+ *
+ * Normally a class loader wouldn't override this, but we want our
+ * version of the class to take precedence over an already-loaded
+ * version.
+ *
+ * We still want the system classes (e.g. java.lang.Object) from the
+ * bootstrap class loader.
+ */
+ protected Class<?> loadClass(String name, boolean resolve)
+ throws ClassNotFoundException
+ {
+ Class res;
+
+ /*
+ * 1. Invoke findLoadedClass(String) to check if the class has
+ * already been loaded.
+ *
+ * This doesn't change.
+ */
+ res = findLoadedClass(name);
+ if (res != null) {
+ System.out.println("FancyLoader.loadClass: "
+ + name + " already loaded");
+ if (resolve)
+ resolveClass(res);
+ return res;
+ }
+
+ /*
+ * 3. Invoke the findClass(String) method to find the class.
+ */
+ try {
+ res = findClass(name);
+ if (resolve)
+ resolveClass(res);
+ }
+ catch (ClassNotFoundException e) {
+ // we couldn't find it, so eat the exception and keep going
+ }
+
+ /*
+ * 2. Invoke the loadClass method on the parent class loader. If
+ * the parent loader is null the class loader built-in to the
+ * virtual machine is used, instead.
+ *
+ * (Since we're not in java.lang, we can't actually invoke the
+ * parent's loadClass() method, but we passed our parent to the
+ * super-class which can take care of it for us.)
+ */
+ res = super.loadClass(name, resolve); // returns class or throws
+ return res;
+ }
+}
diff --git a/test/138-duplicate-classes-check/src/Main.java b/test/138-duplicate-classes-check/src/Main.java
new file mode 100644
index 0000000..a9b5bb0
--- /dev/null
+++ b/test/138-duplicate-classes-check/src/Main.java
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.File;
+import java.lang.reflect.Method;
+
+/**
+ * Structural hazard test.
+ */
+public class Main {
+ public static void main(String[] args) {
+ new Main().run();
+ }
+
+ private void run() {
+ System.out.println(new A().i);
+
+ // Now run the class from the -ex file.
+
+ FancyLoader loader = new FancyLoader(getClass().getClassLoader());
+
+ try {
+ Class testEx = loader.loadClass("TestEx");
+ Method test = testEx.getDeclaredMethod("test");
+ test.invoke(null);
+ } catch (Exception exc) {
+ exc.printStackTrace();
+ }
+ }
+}
diff --git a/test/138-duplicate-classes-check2/build b/test/138-duplicate-classes-check2/build
new file mode 100755
index 0000000..abcbbb8
--- /dev/null
+++ b/test/138-duplicate-classes-check2/build
@@ -0,0 +1,32 @@
+#!/bin/bash
+#
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Stop if something fails.
+set -e
+
+mkdir classes
+${JAVAC} -d classes `find src -name '*.java'`
+
+mkdir classes-ex
+${JAVAC} -d classes-ex `find src-ex -name '*.java'`
+rm classes-ex/A.class
+
+if [ ${NEED_DEX} = "true" ]; then
+ ${DX} -JXmx256m --debug --dex --dump-to=classes.lst --output=classes.dex --dump-width=1000 classes
+ zip $TEST_NAME.jar classes.dex
+ ${DX} -JXmx256m --debug --dex --dump-to=classes-ex.lst --output=classes.dex --dump-width=1000 classes-ex
+ zip ${TEST_NAME}-ex.jar classes.dex
+fi
diff --git a/test/138-duplicate-classes-check2/expected.txt b/test/138-duplicate-classes-check2/expected.txt
new file mode 100644
index 0000000..b2f7f08
--- /dev/null
+++ b/test/138-duplicate-classes-check2/expected.txt
@@ -0,0 +1,2 @@
+10
+10
diff --git a/test/138-duplicate-classes-check2/info.txt b/test/138-duplicate-classes-check2/info.txt
new file mode 100644
index 0000000..7100122
--- /dev/null
+++ b/test/138-duplicate-classes-check2/info.txt
@@ -0,0 +1,2 @@
+Check whether a duplicate class is not detected, even though we compiled against one (but removed
+it before creating the dex file).
diff --git a/test/138-duplicate-classes-check2/run b/test/138-duplicate-classes-check2/run
new file mode 100755
index 0000000..8494ad9
--- /dev/null
+++ b/test/138-duplicate-classes-check2/run
@@ -0,0 +1,19 @@
+#!/bin/bash
+#
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# We want to run as no-dex-file-fallback to confirm that even though the -ex file has a symbolic
+# reference to A, there's no class-def, so we don't detect a collision.
+exec ${RUN} --runtime-option -Xno-dex-file-fallback "${@}"
diff --git a/test/138-duplicate-classes-check2/src-ex/A.java b/test/138-duplicate-classes-check2/src-ex/A.java
new file mode 100644
index 0000000..8e52cb3
--- /dev/null
+++ b/test/138-duplicate-classes-check2/src-ex/A.java
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class A {
+ public volatile int i;
+
+ public A() {
+ i = 10;
+ }
+}
diff --git a/test/138-duplicate-classes-check2/src-ex/TestEx.java b/test/138-duplicate-classes-check2/src-ex/TestEx.java
new file mode 100644
index 0000000..87558fa
--- /dev/null
+++ b/test/138-duplicate-classes-check2/src-ex/TestEx.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class TestEx {
+ public static void test() {
+ System.out.println(new A().i);
+ }
+}
diff --git a/test/138-duplicate-classes-check2/src/A.java b/test/138-duplicate-classes-check2/src/A.java
new file mode 100644
index 0000000..e1773e5
--- /dev/null
+++ b/test/138-duplicate-classes-check2/src/A.java
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class A {
+ // Object fields add padding in the Foo class object layout. Therefore the field 'i' should
+ // be at a different offset compared to the A class from the ex DEX file.
+ public final Object anObject = null;
+ public final Object anotherObject = null;
+ // Use volatile to defeat inlining of the constructor + load-elimination.
+ public volatile int i;
+
+ public A() {
+ i = 10;
+ }
+}
diff --git a/test/138-duplicate-classes-check2/src/FancyLoader.java b/test/138-duplicate-classes-check2/src/FancyLoader.java
new file mode 100644
index 0000000..7e2bb08
--- /dev/null
+++ b/test/138-duplicate-classes-check2/src/FancyLoader.java
@@ -0,0 +1,229 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.RandomAccessFile;
+import java.lang.reflect.Constructor;
+import java.lang.reflect.Method;
+import java.lang.reflect.InvocationTargetException;
+
+/**
+ * A class loader with atypical behavior: we try to load a private
+ * class implementation before asking the system or boot loader. This
+ * is used to create multiple classes with identical names in a single VM.
+ *
+ * If DexFile is available, we use that; if not, we assume we're not in
+ * Dalvik and instantiate the class with defineClass().
+ *
+ * The location of the DEX files and class data is dependent upon the
+ * test framework.
+ */
+public class FancyLoader extends ClassLoader {
+ /* this is where the "alternate" .class files live */
+ static final String CLASS_PATH = "classes-ex/";
+
+ /* this is the "alternate" DEX/Jar file */
+ static final String DEX_FILE = System.getenv("DEX_LOCATION") +
+ "/138-duplicate-classes-check2-ex.jar";
+
+ /* on Dalvik, this is a DexFile; otherwise, it's null */
+ private Class mDexClass;
+
+ private Object mDexFile;
+
+ /**
+ * Construct FancyLoader, grabbing a reference to the DexFile class
+ * if we're running under Dalvik.
+ */
+ public FancyLoader(ClassLoader parent) {
+ super(parent);
+
+ try {
+ mDexClass = parent.loadClass("dalvik.system.DexFile");
+ } catch (ClassNotFoundException cnfe) {
+ // ignore -- not running Dalvik
+ }
+ }
+
+ /**
+ * Finds the class with the specified binary name.
+ *
+ * We search for a file in CLASS_PATH or pull an entry from DEX_FILE.
+ * If we don't find a match, we throw an exception.
+ */
+ protected Class<?> findClass(String name) throws ClassNotFoundException
+ {
+ if (mDexClass != null) {
+ return findClassDalvik(name);
+ } else {
+ return findClassNonDalvik(name);
+ }
+ }
+
+ /**
+ * Finds the class with the specified binary name, from a DEX file.
+ */
+ private Class<?> findClassDalvik(String name)
+ throws ClassNotFoundException {
+
+ if (mDexFile == null) {
+ synchronized (FancyLoader.class) {
+ Constructor ctor;
+ /*
+ * Construct a DexFile object through reflection.
+ */
+ try {
+ ctor = mDexClass.getConstructor(new Class[] {String.class});
+ } catch (NoSuchMethodException nsme) {
+ throw new ClassNotFoundException("getConstructor failed",
+ nsme);
+ }
+
+ try {
+ mDexFile = ctor.newInstance(DEX_FILE);
+ } catch (InstantiationException ie) {
+ throw new ClassNotFoundException("newInstance failed", ie);
+ } catch (IllegalAccessException iae) {
+ throw new ClassNotFoundException("newInstance failed", iae);
+ } catch (InvocationTargetException ite) {
+ throw new ClassNotFoundException("newInstance failed", ite);
+ }
+ }
+ }
+
+ /*
+ * Call DexFile.loadClass(String, ClassLoader).
+ */
+ Method meth;
+
+ try {
+ meth = mDexClass.getMethod("loadClass",
+ new Class[] { String.class, ClassLoader.class });
+ } catch (NoSuchMethodException nsme) {
+ throw new ClassNotFoundException("getMethod failed", nsme);
+ }
+
+ try {
+ meth.invoke(mDexFile, name, this);
+ } catch (IllegalAccessException iae) {
+ throw new ClassNotFoundException("loadClass failed", iae);
+ } catch (InvocationTargetException ite) {
+ throw new ClassNotFoundException("loadClass failed",
+ ite.getCause());
+ }
+
+ return null;
+ }
+
+ /**
+ * Finds the class with the specified binary name, from .class files.
+ */
+ private Class<?> findClassNonDalvik(String name)
+ throws ClassNotFoundException {
+
+ String pathName = CLASS_PATH + name + ".class";
+ //System.out.println("--- Fancy: looking for " + pathName);
+
+ File path = new File(pathName);
+ RandomAccessFile raf;
+
+ try {
+ raf = new RandomAccessFile(path, "r");
+ } catch (FileNotFoundException fnfe) {
+ throw new ClassNotFoundException("Not found: " + pathName);
+ }
+
+ /* read the entire file in */
+ byte[] fileData;
+ try {
+ fileData = new byte[(int) raf.length()];
+ raf.readFully(fileData);
+ } catch (IOException ioe) {
+ throw new ClassNotFoundException("Read error: " + pathName);
+ } finally {
+ try {
+ raf.close();
+ } catch (IOException ioe) {
+ // drop
+ }
+ }
+
+ /* create the class */
+ //System.out.println("--- Fancy: defining " + name);
+ try {
+ return defineClass(name, fileData, 0, fileData.length);
+ } catch (Throwable th) {
+ throw new ClassNotFoundException("defineClass failed", th);
+ }
+ }
+
+ /**
+ * Load a class.
+ *
+ * Normally a class loader wouldn't override this, but we want our
+ * version of the class to take precedence over an already-loaded
+ * version.
+ *
+ * We still want the system classes (e.g. java.lang.Object) from the
+ * bootstrap class loader.
+ */
+ protected Class<?> loadClass(String name, boolean resolve)
+ throws ClassNotFoundException
+ {
+ Class res;
+
+ /*
+ * 1. Invoke findLoadedClass(String) to check if the class has
+ * already been loaded.
+ *
+ * This doesn't change.
+ */
+ res = findLoadedClass(name);
+ if (res != null) {
+ System.out.println("FancyLoader.loadClass: "
+ + name + " already loaded");
+ if (resolve)
+ resolveClass(res);
+ return res;
+ }
+
+ /*
+ * 3. Invoke the findClass(String) method to find the class.
+ */
+ try {
+ res = findClass(name);
+ if (resolve)
+ resolveClass(res);
+ }
+ catch (ClassNotFoundException e) {
+ // we couldn't find it, so eat the exception and keep going
+ }
+
+ /*
+ * 2. Invoke the loadClass method on the parent class loader. If
+ * the parent loader is null the class loader built-in to the
+ * virtual machine is used, instead.
+ *
+ * (Since we're not in java.lang, we can't actually invoke the
+ * parent's loadClass() method, but we passed our parent to the
+ * super-class which can take care of it for us.)
+ */
+ res = super.loadClass(name, resolve); // returns class or throws
+ return res;
+ }
+}
diff --git a/test/138-duplicate-classes-check2/src/Main.java b/test/138-duplicate-classes-check2/src/Main.java
new file mode 100644
index 0000000..a9b5bb0
--- /dev/null
+++ b/test/138-duplicate-classes-check2/src/Main.java
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.File;
+import java.lang.reflect.Method;
+
+/**
+ * Structural hazard test.
+ */
+public class Main {
+ public static void main(String[] args) {
+ new Main().run();
+ }
+
+ private void run() {
+ System.out.println(new A().i);
+
+ // Now run the class from the -ex file.
+
+ FancyLoader loader = new FancyLoader(getClass().getClassLoader());
+
+ try {
+ Class testEx = loader.loadClass("TestEx");
+ Method test = testEx.getDeclaredMethod("test");
+ test.invoke(null);
+ } catch (Exception exc) {
+ exc.printStackTrace();
+ }
+ }
+}
diff --git a/test/139-register-natives/check b/test/139-register-natives/check
new file mode 100755
index 0000000..265ad85
--- /dev/null
+++ b/test/139-register-natives/check
@@ -0,0 +1,20 @@
+#!/bin/bash
+#
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Strip any JNI registration error messages
+sed -e '/java_vm_ext/d' -e '/jni_internal.cc/d' "$2" > "$2.tmp"
+
+diff --strip-trailing-cr -q "$1" "$2.tmp" >/dev/null
diff --git a/test/139-register-natives/expected.txt b/test/139-register-natives/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/139-register-natives/expected.txt
diff --git a/test/139-register-natives/info.txt b/test/139-register-natives/info.txt
new file mode 100644
index 0000000..48e08a4
--- /dev/null
+++ b/test/139-register-natives/info.txt
@@ -0,0 +1 @@
+Tests the correct order of RegisterNatives.
diff --git a/test/139-register-natives/regnative.cc b/test/139-register-natives/regnative.cc
new file mode 100644
index 0000000..d9c8b31
--- /dev/null
+++ b/test/139-register-natives/regnative.cc
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "jni.h"
+
+namespace art {
+
+// Simple empty method. We will check for correct registration with UnsatisfiedLinkError.
+static void foo(JNIEnv*, jclass) {
+}
+
+static JNINativeMethod gMethods[] = {
+ { "foo", "()V", reinterpret_cast<void*>(foo) }
+};
+
+extern "C" JNIEXPORT jint JNICALL Java_Main_registerNatives(JNIEnv* env, jclass, jclass trg) {
+ return env->RegisterNatives(trg, gMethods, 1);
+}
+
+} // namespace art
diff --git a/test/139-register-natives/src/Main.java b/test/139-register-natives/src/Main.java
new file mode 100644
index 0000000..35b2f9c
--- /dev/null
+++ b/test/139-register-natives/src/Main.java
@@ -0,0 +1,120 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public static void main(String[] args) {
+ testRegistration1();
+ testRegistration2();
+ testRegistration3();
+ }
+
+ static {
+ System.loadLibrary("arttest");
+ }
+
+ // Test that a subclass' method is registered instead of a superclass' method.
+ private static void testRegistration1() {
+ registerNatives(TestSub.class);
+
+ expectNotThrows(new TestSub());
+ expectThrows(new TestSuper());
+ }
+
+ // Test that a superclass' method is registered if the subclass doesn't have a matching method.
+ private static void testRegistration2() {
+ registerNatives(TestSub2.class);
+
+ expectNotThrows(new TestSub2());
+ expectNotThrows(new TestSuper2());
+ }
+
+ // Test that registration fails if the subclass has a matching non-native method.
+ private static void testRegistration3() {
+ try {
+ registerNatives(TestSub3.class);
+ System.out.println("Expected exception for registerNatives(TestSub3.class)");
+ } catch (NoSuchMethodError ignored) {
+ }
+ }
+
+ private native static int registerNatives(Class c);
+
+ private static void expectThrows(Base b) {
+ try {
+ b.callMyFoo();
+ System.out.println("Expected exception for " + b.getClass().getName());
+ } catch (Throwable ignored) {
+ }
+ }
+
+ private static void expectNotThrows(Base b) {
+ try {
+ b.callMyFoo();
+ } catch (Throwable t) {
+ System.out.println("Did not expect an exception for " + b.getClass().getName());
+ t.printStackTrace(System.out);
+ }
+ }
+}
+
+abstract class Base {
+ public abstract void callMyFoo();
+}
+
+class TestSuper extends Base {
+ private native void foo();
+
+ @Override
+ public void callMyFoo() {
+ foo();
+ }
+}
+
+class TestSub extends TestSuper {
+ public native void foo();
+
+ @Override
+ public void callMyFoo() {
+ foo();
+ }
+}
+
+class TestSuper2 extends Base{
+ public native void foo();
+
+ @Override
+ public void callMyFoo() {
+ foo();
+ }
+}
+
+class TestSub2 extends TestSuper2 {
+}
+
+class TestSuper3 extends Base {
+ public native void foo();
+
+ @Override
+ public void callMyFoo() {
+ foo();
+ }
+}
+
+class TestSub3 extends TestSuper3 {
+ public void foo() {
+ System.out.println("TestSub3.foo()");
+ }
+}
diff --git a/test/201-built-in-exception-detail-messages/src/Main.java b/test/201-built-in-exception-detail-messages/src/Main.java
index 24ee6e0..52d4259 100644
--- a/test/201-built-in-exception-detail-messages/src/Main.java
+++ b/test/201-built-in-exception-detail-messages/src/Main.java
@@ -358,7 +358,8 @@
field.set(new A(), 5);
fail();
} catch (IllegalArgumentException expected) {
- assertEquals("field A.b has type java.lang.String, got java.lang.Integer", expected.getMessage());
+ assertEquals("field A.b has type java.lang.String, got java.lang.Integer",
+ expected.getMessage());
}
// Can't unbox null to a primitive.
@@ -385,7 +386,8 @@
m.invoke(new A(), 2, 2);
fail();
} catch (IllegalArgumentException expected) {
- assertEquals("method A.m argument 2 has type java.lang.String, got java.lang.Integer", expected.getMessage());
+ assertEquals("method A.m argument 2 has type java.lang.String, got java.lang.Integer",
+ expected.getMessage());
}
// Can't pass null as an int.
@@ -409,21 +411,24 @@
m.invoke("hello", "world"); // Wrong type.
fail();
} catch (IllegalArgumentException iae) {
- assertEquals("method java.lang.String.charAt argument 1 has type int, got java.lang.String", iae.getMessage());
+ assertEquals("method java.lang.String.charAt! argument 1 has type int, got java.lang.String",
+ iae.getMessage());
}
try {
Method m = String.class.getMethod("charAt", int.class);
m.invoke("hello", (Object) null); // Null for a primitive argument.
fail();
} catch (IllegalArgumentException iae) {
- assertEquals("method java.lang.String.charAt argument 1 has type int, got null", iae.getMessage());
+ assertEquals("method java.lang.String.charAt! argument 1 has type int, got null",
+ iae.getMessage());
}
try {
Method m = String.class.getMethod("charAt", int.class);
m.invoke(new Integer(5)); // Wrong type for 'this'.
fail();
} catch (IllegalArgumentException iae) {
- assertEquals("Expected receiver of type java.lang.String, but got java.lang.Integer", iae.getMessage());
+ assertEquals("Expected receiver of type java.lang.String, but got java.lang.Integer",
+ iae.getMessage());
}
try {
Method m = String.class.getMethod("charAt", int.class);
diff --git a/test/422-type-conversion/src/Main.java b/test/422-type-conversion/src/Main.java
index 7ce2868..447b9b8 100644
--- a/test/422-type-conversion/src/Main.java
+++ b/test/422-type-conversion/src/Main.java
@@ -625,65 +625,67 @@
assertCharEquals((char)0, $opt$IntToChar(-2147483648)); // -(2^31)
}
+ // A dummy value to defeat inlining of these routines.
+ static boolean doThrow = false;
// These methods produce int-to-long Dex instructions.
- static long $opt$ByteToLong(byte a) { return (long)a; }
- static long $opt$ShortToLong(short a) { return (long)a; }
- static long $opt$IntToLong(int a) { return (long)a; }
- static long $opt$CharToLong(int a) { return (long)a; }
+ static long $opt$ByteToLong(byte a) { if (doThrow) throw new Error(); return (long)a; }
+ static long $opt$ShortToLong(short a) { if (doThrow) throw new Error(); return (long)a; }
+ static long $opt$IntToLong(int a) { if (doThrow) throw new Error(); return (long)a; }
+ static long $opt$CharToLong(int a) { if (doThrow) throw new Error(); return (long)a; }
// These methods produce int-to-float Dex instructions.
- static float $opt$ByteToFloat(byte a) { return (float)a; }
- static float $opt$ShortToFloat(short a) { return (float)a; }
- static float $opt$IntToFloat(int a) { return (float)a; }
- static float $opt$CharToFloat(char a) { return (float)a; }
+ static float $opt$ByteToFloat(byte a) { if (doThrow) throw new Error(); return (float)a; }
+ static float $opt$ShortToFloat(short a) { if (doThrow) throw new Error(); return (float)a; }
+ static float $opt$IntToFloat(int a) { if (doThrow) throw new Error(); return (float)a; }
+ static float $opt$CharToFloat(char a) { if (doThrow) throw new Error(); return (float)a; }
// These methods produce int-to-double Dex instructions.
- static double $opt$ByteToDouble(byte a) { return (double)a; }
- static double $opt$ShortToDouble(short a) { return (double)a; }
- static double $opt$IntToDouble(int a) { return (double)a; }
- static double $opt$CharToDouble(int a) { return (double)a; }
+ static double $opt$ByteToDouble(byte a) { if (doThrow) throw new Error(); return (double)a; }
+ static double $opt$ShortToDouble(short a) { if (doThrow) throw new Error(); return (double)a; }
+ static double $opt$IntToDouble(int a) { if (doThrow) throw new Error(); return (double)a; }
+ static double $opt$CharToDouble(int a) { if (doThrow) throw new Error(); return (double)a; }
// These methods produce long-to-int Dex instructions.
- static int $opt$LongToInt(long a) { return (int)a; }
- static int $opt$LongLiteralToInt() { return (int)42L; }
+ static int $opt$LongToInt(long a) { if (doThrow) throw new Error(); return (int)a; }
+ static int $opt$LongLiteralToInt() { if (doThrow) throw new Error(); return (int)42L; }
// This method produces a long-to-float Dex instruction.
- static float $opt$LongToFloat(long a) { return (float)a; }
+ static float $opt$LongToFloat(long a) { if (doThrow) throw new Error(); return (float)a; }
// This method produces a long-to-double Dex instruction.
- static double $opt$LongToDouble(long a) { return (double)a; }
+ static double $opt$LongToDouble(long a) { if (doThrow) throw new Error(); return (double)a; }
// This method produces a float-to-int Dex instruction.
- static int $opt$FloatToInt(float a) { return (int)a; }
+ static int $opt$FloatToInt(float a) { if (doThrow) throw new Error(); return (int)a; }
// This method produces a float-to-long Dex instruction.
- static long $opt$FloatToLong(float a){ return (long)a; }
+ static long $opt$FloatToLong(float a){ if (doThrow) throw new Error(); return (long)a; }
// This method produces a float-to-double Dex instruction.
- static double $opt$FloatToDouble(float a) { return (double)a; }
+ static double $opt$FloatToDouble(float a) { if (doThrow) throw new Error(); return (double)a; }
// This method produces a double-to-int Dex instruction.
- static int $opt$DoubleToInt(double a){ return (int)a; }
+ static int $opt$DoubleToInt(double a){ if (doThrow) throw new Error(); return (int)a; }
// This method produces a double-to-long Dex instruction.
- static long $opt$DoubleToLong(double a){ return (long)a; }
+ static long $opt$DoubleToLong(double a){ if (doThrow) throw new Error(); return (long)a; }
// This method produces a double-to-float Dex instruction.
- static float $opt$DoubleToFloat(double a) { return (float)a; }
+ static float $opt$DoubleToFloat(double a) { if (doThrow) throw new Error(); return (float)a; }
// These methods produce int-to-byte Dex instructions.
- static byte $opt$ShortToByte(short a) { return (byte)a; }
- static byte $opt$IntToByte(int a) { return (byte)a; }
- static byte $opt$CharToByte(char a) { return (byte)a; }
+ static byte $opt$ShortToByte(short a) { if (doThrow) throw new Error(); return (byte)a; }
+ static byte $opt$IntToByte(int a) { if (doThrow) throw new Error(); return (byte)a; }
+ static byte $opt$CharToByte(char a) { if (doThrow) throw new Error(); return (byte)a; }
// These methods produce int-to-short Dex instructions.
- static short $opt$ByteToShort(byte a) { return (short)a; }
- static short $opt$IntToShort(int a) { return (short)a; }
- static short $opt$CharToShort(char a) { return (short)a; }
+ static short $opt$ByteToShort(byte a) { if (doThrow) throw new Error(); return (short)a; }
+ static short $opt$IntToShort(int a) { if (doThrow) throw new Error(); return (short)a; }
+ static short $opt$CharToShort(char a) { if (doThrow) throw new Error(); return (short)a; }
// These methods produce int-to-char Dex instructions.
- static char $opt$ByteToChar(byte a) { return (char)a; }
- static char $opt$ShortToChar(short a) { return (char)a; }
- static char $opt$IntToChar(int a) { return (char)a; }
+ static char $opt$ByteToChar(byte a) { if (doThrow) throw new Error(); return (char)a; }
+ static char $opt$ShortToChar(short a) { if (doThrow) throw new Error(); return (char)a; }
+ static char $opt$IntToChar(int a) { if (doThrow) throw new Error(); return (char)a; }
}
diff --git a/test/431-optimizing-arith-shifts/src/Main.java b/test/431-optimizing-arith-shifts/src/Main.java
index d8667c6..86422bd 100644
--- a/test/431-optimizing-arith-shifts/src/Main.java
+++ b/test/431-optimizing-arith-shifts/src/Main.java
@@ -52,7 +52,7 @@
expectEquals(Integer.MIN_VALUE, $opt$Shl(1073741824, 1)); // overflow
expectEquals(1073741824, $opt$Shl(268435456, 2));
- // othe nly 5 lower bits should be used for shifting (& 0x1f).
+ // Only the 5 lower bits should be used for shifting (& 0x1f).
expectEquals(7, $opt$Shl(7, 32)); // 32 & 0x1f = 0
expectEquals(14, $opt$Shl(7, 33)); // 33 & 0x1f = 1
expectEquals(32, $opt$Shl(1, 101)); // 101 & 0x1f = 5
@@ -97,6 +97,13 @@
expectEquals(Long.MIN_VALUE, $opt$Shl(7L, Long.MAX_VALUE));
expectEquals(7L, $opt$Shl(7L, Long.MIN_VALUE));
+
+ // Exercise some special cases handled by backends/simplifier.
+ expectEquals(24L, $opt$ShlConst1(12L));
+ expectEquals(0x2345678900000000L, $opt$ShlConst32(0x123456789L));
+ expectEquals(0x2490249000000000L, $opt$ShlConst33(0x12481248L));
+ expectEquals(0x4920492000000000L, $opt$ShlConst34(0x12481248L));
+ expectEquals(0x9240924000000000L, $opt$ShlConst35(0x12481248L));
}
private static void shrInt() {
@@ -277,7 +284,7 @@
return a >>> 2L;
}
- static int $opt$ShlConst0(int a) {
+ static int $opt$ShlConst0(int a) {
return a << 0;
}
@@ -301,5 +308,25 @@
return a >>> 0L;
}
+ static long $opt$ShlConst1(long a) {
+ return a << 1L;
+ }
+
+ static long $opt$ShlConst32(long a) {
+ return a << 32L;
+ }
+
+ static long $opt$ShlConst33(long a) {
+ return a << 33L;
+ }
+
+ static long $opt$ShlConst34(long a) {
+ return a << 34L;
+ }
+
+ static long $opt$ShlConst35(long a) {
+ return a << 35L;
+ }
+
}
diff --git a/test/441-checker-inliner/src/Main.java b/test/441-checker-inliner/src/Main.java
index 631b140..8894d4e 100644
--- a/test/441-checker-inliner/src/Main.java
+++ b/test/441-checker-inliner/src/Main.java
@@ -17,9 +17,9 @@
public class Main {
// CHECK-START: void Main.InlineVoid() inliner (before)
- // CHECK-DAG: [[Const42:i\d+]] IntConstant 42
+ // CHECK-DAG: <<Const42:i\d+>> IntConstant 42
// CHECK-DAG: InvokeStaticOrDirect
- // CHECK-DAG: InvokeStaticOrDirect [ [[Const42]] ]
+ // CHECK-DAG: InvokeStaticOrDirect [<<Const42>>]
// CHECK-START: void Main.InlineVoid() inliner (after)
// CHECK-NOT: InvokeStaticOrDirect
@@ -30,94 +30,94 @@
}
// CHECK-START: int Main.InlineParameter(int) inliner (before)
- // CHECK-DAG: [[Param:i\d+]] ParameterValue
- // CHECK-DAG: [[Result:i\d+]] InvokeStaticOrDirect [ [[Param]] ]
- // CHECK-DAG: Return [ [[Result]] ]
+ // CHECK-DAG: <<Param:i\d+>> ParameterValue
+ // CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect [<<Param>>]
+ // CHECK-DAG: Return [<<Result>>]
// CHECK-START: int Main.InlineParameter(int) inliner (after)
- // CHECK-DAG: [[Param:i\d+]] ParameterValue
- // CHECK-DAG: Return [ [[Param]] ]
+ // CHECK-DAG: <<Param:i\d+>> ParameterValue
+ // CHECK-DAG: Return [<<Param>>]
public static int InlineParameter(int a) {
return returnParameter(a);
}
// CHECK-START: long Main.InlineWideParameter(long) inliner (before)
- // CHECK-DAG: [[Param:j\d+]] ParameterValue
- // CHECK-DAG: [[Result:j\d+]] InvokeStaticOrDirect [ [[Param]] ]
- // CHECK-DAG: Return [ [[Result]] ]
+ // CHECK-DAG: <<Param:j\d+>> ParameterValue
+ // CHECK-DAG: <<Result:j\d+>> InvokeStaticOrDirect [<<Param>>]
+ // CHECK-DAG: Return [<<Result>>]
// CHECK-START: long Main.InlineWideParameter(long) inliner (after)
- // CHECK-DAG: [[Param:j\d+]] ParameterValue
- // CHECK-DAG: Return [ [[Param]] ]
+ // CHECK-DAG: <<Param:j\d+>> ParameterValue
+ // CHECK-DAG: Return [<<Param>>]
public static long InlineWideParameter(long a) {
return returnWideParameter(a);
}
// CHECK-START: java.lang.Object Main.InlineReferenceParameter(java.lang.Object) inliner (before)
- // CHECK-DAG: [[Param:l\d+]] ParameterValue
- // CHECK-DAG: [[Result:l\d+]] InvokeStaticOrDirect [ [[Param]] ]
- // CHECK-DAG: Return [ [[Result]] ]
+ // CHECK-DAG: <<Param:l\d+>> ParameterValue
+ // CHECK-DAG: <<Result:l\d+>> InvokeStaticOrDirect [<<Param>>]
+ // CHECK-DAG: Return [<<Result>>]
// CHECK-START: java.lang.Object Main.InlineReferenceParameter(java.lang.Object) inliner (after)
- // CHECK-DAG: [[Param:l\d+]] ParameterValue
- // CHECK-DAG: Return [ [[Param]] ]
+ // CHECK-DAG: <<Param:l\d+>> ParameterValue
+ // CHECK-DAG: Return [<<Param>>]
public static Object InlineReferenceParameter(Object o) {
return returnReferenceParameter(o);
}
// CHECK-START: int Main.InlineInt() inliner (before)
- // CHECK-DAG: [[Result:i\d+]] InvokeStaticOrDirect
- // CHECK-DAG: Return [ [[Result]] ]
+ // CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect
+ // CHECK-DAG: Return [<<Result>>]
// CHECK-START: int Main.InlineInt() inliner (after)
- // CHECK-DAG: [[Const4:i\d+]] IntConstant 4
- // CHECK-DAG: Return [ [[Const4]] ]
+ // CHECK-DAG: <<Const4:i\d+>> IntConstant 4
+ // CHECK-DAG: Return [<<Const4>>]
public static int InlineInt() {
return returnInt();
}
// CHECK-START: long Main.InlineWide() inliner (before)
- // CHECK-DAG: [[Result:j\d+]] InvokeStaticOrDirect
- // CHECK-DAG: Return [ [[Result]] ]
+ // CHECK-DAG: <<Result:j\d+>> InvokeStaticOrDirect
+ // CHECK-DAG: Return [<<Result>>]
// CHECK-START: long Main.InlineWide() inliner (after)
- // CHECK-DAG: [[Const8:j\d+]] LongConstant 8
- // CHECK-DAG: Return [ [[Const8]] ]
+ // CHECK-DAG: <<Const8:j\d+>> LongConstant 8
+ // CHECK-DAG: Return [<<Const8>>]
public static long InlineWide() {
return returnWide();
}
// CHECK-START: int Main.InlineAdd() inliner (before)
- // CHECK-DAG: [[Const3:i\d+]] IntConstant 3
- // CHECK-DAG: [[Const5:i\d+]] IntConstant 5
- // CHECK-DAG: [[Result:i\d+]] InvokeStaticOrDirect
- // CHECK-DAG: Return [ [[Result]] ]
+ // CHECK-DAG: <<Const3:i\d+>> IntConstant 3
+ // CHECK-DAG: <<Const5:i\d+>> IntConstant 5
+ // CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect
+ // CHECK-DAG: Return [<<Result>>]
// CHECK-START: int Main.InlineAdd() inliner (after)
- // CHECK-DAG: [[Const3:i\d+]] IntConstant 3
- // CHECK-DAG: [[Const5:i\d+]] IntConstant 5
- // CHECK-DAG: [[Add:i\d+]] Add [ [[Const3]] [[Const5]] ]
- // CHECK-DAG: Return [ [[Add]] ]
+ // CHECK-DAG: <<Const3:i\d+>> IntConstant 3
+ // CHECK-DAG: <<Const5:i\d+>> IntConstant 5
+ // CHECK-DAG: <<Add:i\d+>> Add [<<Const3>>,<<Const5>>]
+ // CHECK-DAG: Return [<<Add>>]
public static int InlineAdd() {
return returnAdd(3, 5);
}
// CHECK-START: int Main.InlineFieldAccess() inliner (before)
- // CHECK-DAG: [[After:i\d+]] InvokeStaticOrDirect
- // CHECK-DAG: Return [ [[After]] ]
+ // CHECK-DAG: <<After:i\d+>> InvokeStaticOrDirect
+ // CHECK-DAG: Return [<<After>>]
// CHECK-START: int Main.InlineFieldAccess() inliner (after)
- // CHECK-DAG: [[Const1:i\d+]] IntConstant 1
- // CHECK-DAG: [[Before:i\d+]] StaticFieldGet
- // CHECK-DAG: [[After:i\d+]] Add [ [[Before]] [[Const1]] ]
- // CHECK-DAG: StaticFieldSet [ {{l\d+}} [[After]] ]
- // CHECK-DAG: Return [ [[After]] ]
+ // CHECK-DAG: <<Const1:i\d+>> IntConstant 1
+ // CHECK-DAG: <<Before:i\d+>> StaticFieldGet
+ // CHECK-DAG: <<After:i\d+>> Add [<<Before>>,<<Const1>>]
+ // CHECK-DAG: StaticFieldSet [{{l\d+}},<<After>>]
+ // CHECK-DAG: Return [<<After>>]
// CHECK-START: int Main.InlineFieldAccess() inliner (after)
// CHECK-NOT: InvokeStaticOrDirect
@@ -127,22 +127,22 @@
}
// CHECK-START: int Main.InlineWithControlFlow(boolean) inliner (before)
- // CHECK-DAG: [[Const1:i\d+]] IntConstant 1
- // CHECK-DAG: [[Const3:i\d+]] IntConstant 3
- // CHECK-DAG: [[Const5:i\d+]] IntConstant 5
- // CHECK-DAG: [[Add:i\d+]] InvokeStaticOrDirect [ [[Const1]] [[Const3]] ]
- // CHECK-DAG: [[Sub:i\d+]] InvokeStaticOrDirect [ [[Const5]] [[Const3]] ]
- // CHECK-DAG: [[Phi:i\d+]] Phi [ [[Add]] [[Sub]] ]
- // CHECK-DAG: Return [ [[Phi]] ]
+ // CHECK-DAG: <<Const1:i\d+>> IntConstant 1
+ // CHECK-DAG: <<Const3:i\d+>> IntConstant 3
+ // CHECK-DAG: <<Const5:i\d+>> IntConstant 5
+ // CHECK-DAG: <<Add:i\d+>> InvokeStaticOrDirect [<<Const1>>,<<Const3>>]
+ // CHECK-DAG: <<Sub:i\d+>> InvokeStaticOrDirect [<<Const5>>,<<Const3>>]
+ // CHECK-DAG: <<Phi:i\d+>> Phi [<<Add>>,<<Sub>>]
+ // CHECK-DAG: Return [<<Phi>>]
// CHECK-START: int Main.InlineWithControlFlow(boolean) inliner (after)
- // CHECK-DAG: [[Const1:i\d+]] IntConstant 1
- // CHECK-DAG: [[Const3:i\d+]] IntConstant 3
- // CHECK-DAG: [[Const5:i\d+]] IntConstant 5
- // CHECK-DAG: [[Add:i\d+]] Add [ [[Const1]] [[Const3]] ]
- // CHECK-DAG: [[Sub:i\d+]] Sub [ [[Const5]] [[Const3]] ]
- // CHECK-DAG: [[Phi:i\d+]] Phi [ [[Add]] [[Sub]] ]
- // CHECK-DAG: Return [ [[Phi]] ]
+ // CHECK-DAG: <<Const1:i\d+>> IntConstant 1
+ // CHECK-DAG: <<Const3:i\d+>> IntConstant 3
+ // CHECK-DAG: <<Const5:i\d+>> IntConstant 5
+ // CHECK-DAG: <<Add:i\d+>> Add [<<Const1>>,<<Const3>>]
+ // CHECK-DAG: <<Sub:i\d+>> Sub [<<Const5>>,<<Const3>>]
+ // CHECK-DAG: <<Phi:i\d+>> Phi [<<Add>>,<<Sub>>]
+ // CHECK-DAG: Return [<<Phi>>]
public static int InlineWithControlFlow(boolean cond) {
int x, const1, const3, const5;
diff --git a/test/442-checker-constant-folding/src/Main.java b/test/442-checker-constant-folding/src/Main.java
index 6b21fed..c258db9 100644
--- a/test/442-checker-constant-folding/src/Main.java
+++ b/test/442-checker-constant-folding/src/Main.java
@@ -16,6 +16,12 @@
public class Main {
+ public static void assertFalse(boolean condition) {
+ if (condition) {
+ throw new Error();
+ }
+ }
+
public static void assertIntEquals(int expected, int result) {
if (expected != result) {
throw new Error("Expected: " + expected + ", found: " + result);
@@ -28,19 +34,31 @@
}
}
+ public static void assertFloatEquals(float expected, float result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public static void assertDoubleEquals(double expected, double result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
/**
* Tiny three-register program exercising int constant folding
* on negation.
*/
// CHECK-START: int Main.IntNegation() constant_folding (before)
- // CHECK-DAG: [[Const42:i\d+]] IntConstant 42
- // CHECK-DAG: [[Neg:i\d+]] Neg [ [[Const42]] ]
- // CHECK-DAG: Return [ [[Neg]] ]
+ // CHECK-DAG: <<Const42:i\d+>> IntConstant 42
+ // CHECK-DAG: <<Neg:i\d+>> Neg [<<Const42>>]
+ // CHECK-DAG: Return [<<Neg>>]
// CHECK-START: int Main.IntNegation() constant_folding (after)
- // CHECK-DAG: [[ConstN42:i\d+]] IntConstant -42
- // CHECK-DAG: Return [ [[ConstN42]] ]
+ // CHECK-DAG: <<ConstN42:i\d+>> IntConstant -42
+ // CHECK-DAG: Return [<<ConstN42>>]
public static int IntNegation() {
int x, y;
@@ -55,14 +73,14 @@
*/
// CHECK-START: int Main.IntAddition1() constant_folding (before)
- // CHECK-DAG: [[Const1:i\d+]] IntConstant 1
- // CHECK-DAG: [[Const2:i\d+]] IntConstant 2
- // CHECK-DAG: [[Add:i\d+]] Add [ [[Const1]] [[Const2]] ]
- // CHECK-DAG: Return [ [[Add]] ]
+ // CHECK-DAG: <<Const1:i\d+>> IntConstant 1
+ // CHECK-DAG: <<Const2:i\d+>> IntConstant 2
+ // CHECK-DAG: <<Add:i\d+>> Add [<<Const1>>,<<Const2>>]
+ // CHECK-DAG: Return [<<Add>>]
// CHECK-START: int Main.IntAddition1() constant_folding (after)
- // CHECK-DAG: [[Const3:i\d+]] IntConstant 3
- // CHECK-DAG: Return [ [[Const3]] ]
+ // CHECK-DAG: <<Const3:i\d+>> IntConstant 3
+ // CHECK-DAG: Return [<<Const3>>]
public static int IntAddition1() {
int a, b, c;
@@ -78,18 +96,18 @@
*/
// CHECK-START: int Main.IntAddition2() constant_folding (before)
- // CHECK-DAG: [[Const1:i\d+]] IntConstant 1
- // CHECK-DAG: [[Const2:i\d+]] IntConstant 2
- // CHECK-DAG: [[Const5:i\d+]] IntConstant 5
- // CHECK-DAG: [[Const6:i\d+]] IntConstant 6
- // CHECK-DAG: [[Add1:i\d+]] Add [ [[Const1]] [[Const2]] ]
- // CHECK-DAG: [[Add2:i\d+]] Add [ [[Const5]] [[Const6]] ]
- // CHECK-DAG: [[Add3:i\d+]] Add [ [[Add1]] [[Add2]] ]
- // CHECK-DAG: Return [ [[Add3]] ]
+ // CHECK-DAG: <<Const1:i\d+>> IntConstant 1
+ // CHECK-DAG: <<Const2:i\d+>> IntConstant 2
+ // CHECK-DAG: <<Const5:i\d+>> IntConstant 5
+ // CHECK-DAG: <<Const6:i\d+>> IntConstant 6
+ // CHECK-DAG: <<Add1:i\d+>> Add [<<Const1>>,<<Const2>>]
+ // CHECK-DAG: <<Add2:i\d+>> Add [<<Const5>>,<<Const6>>]
+ // CHECK-DAG: <<Add3:i\d+>> Add [<<Add1>>,<<Add2>>]
+ // CHECK-DAG: Return [<<Add3>>]
// CHECK-START: int Main.IntAddition2() constant_folding (after)
- // CHECK-DAG: [[Const14:i\d+]] IntConstant 14
- // CHECK-DAG: Return [ [[Const14]] ]
+ // CHECK-DAG: <<Const14:i\d+>> IntConstant 14
+ // CHECK-DAG: Return [<<Const14>>]
public static int IntAddition2() {
int a, b, c;
@@ -109,14 +127,14 @@
*/
// CHECK-START: int Main.IntSubtraction() constant_folding (before)
- // CHECK-DAG: [[Const6:i\d+]] IntConstant 6
- // CHECK-DAG: [[Const2:i\d+]] IntConstant 2
- // CHECK-DAG: [[Sub:i\d+]] Sub [ [[Const6]] [[Const2]] ]
- // CHECK-DAG: Return [ [[Sub]] ]
+ // CHECK-DAG: <<Const6:i\d+>> IntConstant 6
+ // CHECK-DAG: <<Const2:i\d+>> IntConstant 2
+ // CHECK-DAG: <<Sub:i\d+>> Sub [<<Const6>>,<<Const2>>]
+ // CHECK-DAG: Return [<<Sub>>]
// CHECK-START: int Main.IntSubtraction() constant_folding (after)
- // CHECK-DAG: [[Const4:i\d+]] IntConstant 4
- // CHECK-DAG: Return [ [[Const4]] ]
+ // CHECK-DAG: <<Const4:i\d+>> IntConstant 4
+ // CHECK-DAG: Return [<<Const4>>]
public static int IntSubtraction() {
int a, b, c;
@@ -132,14 +150,14 @@
*/
// CHECK-START: long Main.LongAddition() constant_folding (before)
- // CHECK-DAG: [[Const1:j\d+]] LongConstant 1
- // CHECK-DAG: [[Const2:j\d+]] LongConstant 2
- // CHECK-DAG: [[Add:j\d+]] Add [ [[Const1]] [[Const2]] ]
- // CHECK-DAG: Return [ [[Add]] ]
+ // CHECK-DAG: <<Const1:j\d+>> LongConstant 1
+ // CHECK-DAG: <<Const2:j\d+>> LongConstant 2
+ // CHECK-DAG: <<Add:j\d+>> Add [<<Const1>>,<<Const2>>]
+ // CHECK-DAG: Return [<<Add>>]
// CHECK-START: long Main.LongAddition() constant_folding (after)
- // CHECK-DAG: [[Const3:j\d+]] LongConstant 3
- // CHECK-DAG: Return [ [[Const3]] ]
+ // CHECK-DAG: <<Const3:j\d+>> LongConstant 3
+ // CHECK-DAG: Return [<<Const3>>]
public static long LongAddition() {
long a, b, c;
@@ -155,14 +173,14 @@
*/
// CHECK-START: long Main.LongSubtraction() constant_folding (before)
- // CHECK-DAG: [[Const6:j\d+]] LongConstant 6
- // CHECK-DAG: [[Const2:j\d+]] LongConstant 2
- // CHECK-DAG: [[Sub:j\d+]] Sub [ [[Const6]] [[Const2]] ]
- // CHECK-DAG: Return [ [[Sub]] ]
+ // CHECK-DAG: <<Const6:j\d+>> LongConstant 6
+ // CHECK-DAG: <<Const2:j\d+>> LongConstant 2
+ // CHECK-DAG: <<Sub:j\d+>> Sub [<<Const6>>,<<Const2>>]
+ // CHECK-DAG: Return [<<Sub>>]
// CHECK-START: long Main.LongSubtraction() constant_folding (after)
- // CHECK-DAG: [[Const4:j\d+]] LongConstant 4
- // CHECK-DAG: Return [ [[Const4]] ]
+ // CHECK-DAG: <<Const4:j\d+>> LongConstant 4
+ // CHECK-DAG: Return [<<Const4>>]
public static long LongSubtraction() {
long a, b, c;
@@ -177,14 +195,14 @@
*/
// CHECK-START: int Main.StaticCondition() constant_folding (before)
- // CHECK-DAG: [[Const7:i\d+]] IntConstant 7
- // CHECK-DAG: [[Const2:i\d+]] IntConstant 2
- // CHECK-DAG: [[Cond:z\d+]] GreaterThanOrEqual [ [[Const7]] [[Const2]] ]
- // CHECK-DAG: If [ [[Cond]] ]
+ // CHECK-DAG: <<Const7:i\d+>> IntConstant 7
+ // CHECK-DAG: <<Const2:i\d+>> IntConstant 2
+ // CHECK-DAG: <<Cond:z\d+>> GreaterThanOrEqual [<<Const7>>,<<Const2>>]
+ // CHECK-DAG: If [<<Cond>>]
// CHECK-START: int Main.StaticCondition() constant_folding (after)
- // CHECK-DAG: [[Const1:i\d+]] IntConstant 1
- // CHECK-DAG: If [ [[Const1]] ]
+ // CHECK-DAG: <<Const1:i\d+>> IntConstant 1
+ // CHECK-DAG: If [<<Const1>>]
public static int StaticCondition() {
int a, b, c;
@@ -207,18 +225,18 @@
*/
// CHECK-START: int Main.JumpsAndConditionals(boolean) constant_folding (before)
- // CHECK-DAG: [[Const2:i\d+]] IntConstant 2
- // CHECK-DAG: [[Const5:i\d+]] IntConstant 5
- // CHECK-DAG: [[Add:i\d+]] Add [ [[Const5]] [[Const2]] ]
- // CHECK-DAG: [[Sub:i\d+]] Sub [ [[Const5]] [[Const2]] ]
- // CHECK-DAG: [[Phi:i\d+]] Phi [ [[Add]] [[Sub]] ]
- // CHECK-DAG: Return [ [[Phi]] ]
+ // CHECK-DAG: <<Const2:i\d+>> IntConstant 2
+ // CHECK-DAG: <<Const5:i\d+>> IntConstant 5
+ // CHECK-DAG: <<Add:i\d+>> Add [<<Const5>>,<<Const2>>]
+ // CHECK-DAG: <<Sub:i\d+>> Sub [<<Const5>>,<<Const2>>]
+ // CHECK-DAG: <<Phi:i\d+>> Phi [<<Add>>,<<Sub>>]
+ // CHECK-DAG: Return [<<Phi>>]
// CHECK-START: int Main.JumpsAndConditionals(boolean) constant_folding (after)
- // CHECK-DAG: [[Const3:i\d+]] IntConstant 3
- // CHECK-DAG: [[Const7:i\d+]] IntConstant 7
- // CHECK-DAG: [[Phi:i\d+]] Phi [ [[Const7]] [[Const3]] ]
- // CHECK-DAG: Return [ [[Phi]] ]
+ // CHECK-DAG: <<Const3:i\d+>> IntConstant 3
+ // CHECK-DAG: <<Const7:i\d+>> IntConstant 7
+ // CHECK-DAG: <<Phi:i\d+>> Phi [<<Const7>>,<<Const3>>]
+ // CHECK-DAG: Return [<<Phi>>]
public static int JumpsAndConditionals(boolean cond) {
int a, b, c;
@@ -236,177 +254,393 @@
*/
// CHECK-START: int Main.And0(int) constant_folding (before)
- // CHECK-DAG: [[Arg:i\d+]] ParameterValue
- // CHECK-DAG: [[Const0:i\d+]] IntConstant 0
- // CHECK-DAG: [[And:i\d+]] And [ [[Arg]] [[Const0]] ]
- // CHECK-DAG: Return [ [[And]] ]
+ // CHECK-DAG: <<Arg:i\d+>> ParameterValue
+ // CHECK-DAG: <<Const0:i\d+>> IntConstant 0
+ // CHECK-DAG: <<And:i\d+>> And [<<Arg>>,<<Const0>>]
+ // CHECK-DAG: Return [<<And>>]
// CHECK-START: int Main.And0(int) constant_folding (after)
- // CHECK-DAG: [[Arg:i\d+]] ParameterValue
- // CHECK-DAG: [[Const0:i\d+]] IntConstant 0
+ // CHECK-DAG: <<Arg:i\d+>> ParameterValue
+ // CHECK-DAG: <<Const0:i\d+>> IntConstant 0
// CHECK-NOT: And
- // CHECK-DAG: Return [ [[Const0]] ]
+ // CHECK-DAG: Return [<<Const0>>]
public static int And0(int arg) {
return arg & 0;
}
// CHECK-START: long Main.Mul0(long) constant_folding (before)
- // CHECK-DAG: [[Arg:j\d+]] ParameterValue
- // CHECK-DAG: [[Const0:j\d+]] LongConstant 0
- // CHECK-DAG: [[Mul:j\d+]] Mul [ [[Arg]] [[Const0]] ]
- // CHECK-DAG: Return [ [[Mul]] ]
+ // CHECK-DAG: <<Arg:j\d+>> ParameterValue
+ // CHECK-DAG: <<Const0:j\d+>> LongConstant 0
+ // CHECK-DAG: <<Mul:j\d+>> Mul [<<Arg>>,<<Const0>>]
+ // CHECK-DAG: Return [<<Mul>>]
// CHECK-START: long Main.Mul0(long) constant_folding (after)
- // CHECK-DAG: [[Arg:j\d+]] ParameterValue
- // CHECK-DAG: [[Const0:j\d+]] LongConstant 0
+ // CHECK-DAG: <<Arg:j\d+>> ParameterValue
+ // CHECK-DAG: <<Const0:j\d+>> LongConstant 0
// CHECK-NOT: Mul
- // CHECK-DAG: Return [ [[Const0]] ]
+ // CHECK-DAG: Return [<<Const0>>]
public static long Mul0(long arg) {
return arg * 0;
}
// CHECK-START: int Main.OrAllOnes(int) constant_folding (before)
- // CHECK-DAG: [[Arg:i\d+]] ParameterValue
- // CHECK-DAG: [[ConstF:i\d+]] IntConstant -1
- // CHECK-DAG: [[Or:i\d+]] Or [ [[Arg]] [[ConstF]] ]
- // CHECK-DAG: Return [ [[Or]] ]
+ // CHECK-DAG: <<Arg:i\d+>> ParameterValue
+ // CHECK-DAG: <<ConstF:i\d+>> IntConstant -1
+ // CHECK-DAG: <<Or:i\d+>> Or [<<Arg>>,<<ConstF>>]
+ // CHECK-DAG: Return [<<Or>>]
// CHECK-START: int Main.OrAllOnes(int) constant_folding (after)
- // CHECK-DAG: [[ConstF:i\d+]] IntConstant -1
+ // CHECK-DAG: <<ConstF:i\d+>> IntConstant -1
// CHECK-NOT: Or
- // CHECK-DAG: Return [ [[ConstF]] ]
+ // CHECK-DAG: Return [<<ConstF>>]
public static int OrAllOnes(int arg) {
return arg | -1;
}
// CHECK-START: long Main.Rem0(long) constant_folding (before)
- // CHECK-DAG: [[Arg:j\d+]] ParameterValue
- // CHECK-DAG: [[Const0:j\d+]] LongConstant 0
- // CHECK-DAG: [[DivZeroCheck:j\d+]] DivZeroCheck [ [[Arg]] ]
- // CHECK-DAG: [[Rem:j\d+]] Rem [ [[Const0]] [[DivZeroCheck]] ]
- // CHECK-DAG: Return [ [[Rem]] ]
+ // CHECK-DAG: <<Arg:j\d+>> ParameterValue
+ // CHECK-DAG: <<Const0:j\d+>> LongConstant 0
+ // CHECK-DAG: <<DivZeroCheck:j\d+>> DivZeroCheck [<<Arg>>]
+ // CHECK-DAG: <<Rem:j\d+>> Rem [<<Const0>>,<<DivZeroCheck>>]
+ // CHECK-DAG: Return [<<Rem>>]
// CHECK-START: long Main.Rem0(long) constant_folding (after)
- // CHECK-DAG: [[Const0:j\d+]] LongConstant 0
+ // CHECK-DAG: <<Const0:j\d+>> LongConstant 0
// CHECK-NOT: Rem
- // CHECK-DAG: Return [ [[Const0]] ]
+ // CHECK-DAG: Return [<<Const0>>]
public static long Rem0(long arg) {
return 0 % arg;
}
// CHECK-START: int Main.Rem1(int) constant_folding (before)
- // CHECK-DAG: [[Arg:i\d+]] ParameterValue
- // CHECK-DAG: [[Const1:i\d+]] IntConstant 1
- // CHECK-DAG: [[Rem:i\d+]] Rem [ [[Arg]] [[Const1]] ]
- // CHECK-DAG: Return [ [[Rem]] ]
+ // CHECK-DAG: <<Arg:i\d+>> ParameterValue
+ // CHECK-DAG: <<Const1:i\d+>> IntConstant 1
+ // CHECK-DAG: <<Rem:i\d+>> Rem [<<Arg>>,<<Const1>>]
+ // CHECK-DAG: Return [<<Rem>>]
// CHECK-START: int Main.Rem1(int) constant_folding (after)
- // CHECK-DAG: [[Const0:i\d+]] IntConstant 0
+ // CHECK-DAG: <<Const0:i\d+>> IntConstant 0
// CHECK-NOT: Rem
- // CHECK-DAG: Return [ [[Const0]] ]
+ // CHECK-DAG: Return [<<Const0>>]
public static int Rem1(int arg) {
return arg % 1;
}
// CHECK-START: long Main.RemN1(long) constant_folding (before)
- // CHECK-DAG: [[Arg:j\d+]] ParameterValue
- // CHECK-DAG: [[ConstN1:j\d+]] LongConstant -1
- // CHECK-DAG: [[DivZeroCheck:j\d+]] DivZeroCheck [ [[Arg]] ]
- // CHECK-DAG: [[Rem:j\d+]] Rem [ [[Arg]] [[DivZeroCheck]] ]
- // CHECK-DAG: Return [ [[Rem]] ]
+ // CHECK-DAG: <<Arg:j\d+>> ParameterValue
+ // CHECK-DAG: <<ConstN1:j\d+>> LongConstant -1
+ // CHECK-DAG: <<DivZeroCheck:j\d+>> DivZeroCheck [<<ConstN1>>]
+ // CHECK-DAG: <<Rem:j\d+>> Rem [<<Arg>>,<<DivZeroCheck>>]
+ // CHECK-DAG: Return [<<Rem>>]
// CHECK-START: long Main.RemN1(long) constant_folding (after)
- // CHECK-DAG: [[Const0:j\d+]] LongConstant 0
+ // CHECK-DAG: <<Const0:j\d+>> LongConstant 0
// CHECK-NOT: Rem
- // CHECK-DAG: Return [ [[Const0]] ]
+ // CHECK-DAG: Return [<<Const0>>]
public static long RemN1(long arg) {
return arg % -1;
}
// CHECK-START: int Main.Shl0(int) constant_folding (before)
- // CHECK-DAG: [[Arg:i\d+]] ParameterValue
- // CHECK-DAG: [[Const0:i\d+]] IntConstant 0
- // CHECK-DAG: [[Shl:i\d+]] Shl [ [[Const0]] [[Arg]] ]
- // CHECK-DAG: Return [ [[Shl]] ]
+ // CHECK-DAG: <<Arg:i\d+>> ParameterValue
+ // CHECK-DAG: <<Const0:i\d+>> IntConstant 0
+ // CHECK-DAG: <<Shl:i\d+>> Shl [<<Const0>>,<<Arg>>]
+ // CHECK-DAG: Return [<<Shl>>]
// CHECK-START: int Main.Shl0(int) constant_folding (after)
- // CHECK-DAG: [[Arg:i\d+]] ParameterValue
- // CHECK-DAG: [[Const0:i\d+]] IntConstant 0
+ // CHECK-DAG: <<Arg:i\d+>> ParameterValue
+ // CHECK-DAG: <<Const0:i\d+>> IntConstant 0
// CHECK-NOT: Shl
- // CHECK-DAG: Return [ [[Const0]] ]
+ // CHECK-DAG: Return [<<Const0>>]
public static int Shl0(int arg) {
return 0 << arg;
}
// CHECK-START: long Main.Shr0(int) constant_folding (before)
- // CHECK-DAG: [[Arg:i\d+]] ParameterValue
- // CHECK-DAG: [[Const0:j\d+]] LongConstant 0
- // CHECK-DAG: [[Shr:j\d+]] Shr [ [[Const0]] [[Arg]] ]
- // CHECK-DAG: Return [ [[Shr]] ]
+ // CHECK-DAG: <<Arg:i\d+>> ParameterValue
+ // CHECK-DAG: <<Const0:j\d+>> LongConstant 0
+ // CHECK-DAG: <<Shr:j\d+>> Shr [<<Const0>>,<<Arg>>]
+ // CHECK-DAG: Return [<<Shr>>]
// CHECK-START: long Main.Shr0(int) constant_folding (after)
- // CHECK-DAG: [[Arg:i\d+]] ParameterValue
- // CHECK-DAG: [[Const0:j\d+]] LongConstant 0
+ // CHECK-DAG: <<Arg:i\d+>> ParameterValue
+ // CHECK-DAG: <<Const0:j\d+>> LongConstant 0
// CHECK-NOT: Shr
- // CHECK-DAG: Return [ [[Const0]] ]
+ // CHECK-DAG: Return [<<Const0>>]
public static long Shr0(int arg) {
return (long)0 >> arg;
}
// CHECK-START: long Main.SubSameLong(long) constant_folding (before)
- // CHECK-DAG: [[Arg:j\d+]] ParameterValue
- // CHECK-DAG: [[Sub:j\d+]] Sub [ [[Arg]] [[Arg]] ]
- // CHECK-DAG: Return [ [[Sub]] ]
+ // CHECK-DAG: <<Arg:j\d+>> ParameterValue
+ // CHECK-DAG: <<Sub:j\d+>> Sub [<<Arg>>,<<Arg>>]
+ // CHECK-DAG: Return [<<Sub>>]
// CHECK-START: long Main.SubSameLong(long) constant_folding (after)
- // CHECK-DAG: [[Arg:j\d+]] ParameterValue
- // CHECK-DAG: [[Const0:j\d+]] LongConstant 0
+ // CHECK-DAG: <<Arg:j\d+>> ParameterValue
+ // CHECK-DAG: <<Const0:j\d+>> LongConstant 0
// CHECK-NOT: Sub
- // CHECK-DAG: Return [ [[Const0]] ]
+ // CHECK-DAG: Return [<<Const0>>]
public static long SubSameLong(long arg) {
return arg - arg;
}
// CHECK-START: int Main.UShr0(int) constant_folding (before)
- // CHECK-DAG: [[Arg:i\d+]] ParameterValue
- // CHECK-DAG: [[Const0:i\d+]] IntConstant 0
- // CHECK-DAG: [[UShr:i\d+]] UShr [ [[Const0]] [[Arg]] ]
- // CHECK-DAG: Return [ [[UShr]] ]
+ // CHECK-DAG: <<Arg:i\d+>> ParameterValue
+ // CHECK-DAG: <<Const0:i\d+>> IntConstant 0
+ // CHECK-DAG: <<UShr:i\d+>> UShr [<<Const0>>,<<Arg>>]
+ // CHECK-DAG: Return [<<UShr>>]
// CHECK-START: int Main.UShr0(int) constant_folding (after)
- // CHECK-DAG: [[Arg:i\d+]] ParameterValue
- // CHECK-DAG: [[Const0:i\d+]] IntConstant 0
+ // CHECK-DAG: <<Arg:i\d+>> ParameterValue
+ // CHECK-DAG: <<Const0:i\d+>> IntConstant 0
// CHECK-NOT: UShr
- // CHECK-DAG: Return [ [[Const0]] ]
+ // CHECK-DAG: Return [<<Const0>>]
public static int UShr0(int arg) {
return 0 >>> arg;
}
// CHECK-START: int Main.XorSameInt(int) constant_folding (before)
- // CHECK-DAG: [[Arg:i\d+]] ParameterValue
- // CHECK-DAG: [[Xor:i\d+]] Xor [ [[Arg]] [[Arg]] ]
- // CHECK-DAG: Return [ [[Xor]] ]
+ // CHECK-DAG: <<Arg:i\d+>> ParameterValue
+ // CHECK-DAG: <<Xor:i\d+>> Xor [<<Arg>>,<<Arg>>]
+ // CHECK-DAG: Return [<<Xor>>]
// CHECK-START: int Main.XorSameInt(int) constant_folding (after)
- // CHECK-DAG: [[Arg:i\d+]] ParameterValue
- // CHECK-DAG: [[Const0:i\d+]] IntConstant 0
+ // CHECK-DAG: <<Arg:i\d+>> ParameterValue
+ // CHECK-DAG: <<Const0:i\d+>> IntConstant 0
// CHECK-NOT: Xor
- // CHECK-DAG: Return [ [[Const0]] ]
+ // CHECK-DAG: Return [<<Const0>>]
public static int XorSameInt(int arg) {
return arg ^ arg;
}
+ // CHECK-START: boolean Main.CmpFloatGreaterThanNaN(float) constant_folding (before)
+ // CHECK-DAG: <<Arg:f\d+>> ParameterValue
+ // CHECK-DAG: <<ConstNan:f\d+>> FloatConstant nan
+ // CHECK-DAG: <<Const0:i\d+>> IntConstant 0
+ // CHECK-DAG: IntConstant 1
+ // CHECK-DAG: <<Cmp:i\d+>> Compare [<<Arg>>,<<ConstNan>>]
+ // CHECK-DAG: <<Le:z\d+>> LessThanOrEqual [<<Cmp>>,<<Const0>>]
+ // CHECK-DAG: If [<<Le>>]
+
+ // CHECK-START: boolean Main.CmpFloatGreaterThanNaN(float) constant_folding (after)
+ // CHECK-DAG: ParameterValue
+ // CHECK-DAG: FloatConstant nan
+ // CHECK-DAG: IntConstant 0
+ // CHECK-DAG: <<Const1:i\d+>> IntConstant 1
+ // CHECK-DAG: If [<<Const1>>]
+
+ // CHECK-START: boolean Main.CmpFloatGreaterThanNaN(float) constant_folding (after)
+ // CHECK-NOT: Compare
+ // CHECK-NOT: LessThanOrEqual
+
+ public static boolean CmpFloatGreaterThanNaN(float arg) {
+ return arg > Float.NaN;
+ }
+
+ // CHECK-START: boolean Main.CmpDoubleLessThanNaN(double) constant_folding (before)
+ // CHECK-DAG: <<Arg:d\d+>> ParameterValue
+ // CHECK-DAG: <<ConstNan:d\d+>> DoubleConstant nan
+ // CHECK-DAG: <<Const0:i\d+>> IntConstant 0
+ // CHECK-DAG: IntConstant 1
+ // CHECK-DAG: <<Cmp:i\d+>> Compare [<<Arg>>,<<ConstNan>>]
+ // CHECK-DAG: <<Ge:z\d+>> GreaterThanOrEqual [<<Cmp>>,<<Const0>>]
+ // CHECK-DAG: If [<<Ge>>]
+
+ // CHECK-START: boolean Main.CmpDoubleLessThanNaN(double) constant_folding (after)
+ // CHECK-DAG: ParameterValue
+ // CHECK-DAG: DoubleConstant nan
+ // CHECK-DAG: IntConstant 0
+ // CHECK-DAG: <<Const1:i\d+>> IntConstant 1
+ // CHECK-DAG: If [<<Const1>>]
+
+ // CHECK-START: boolean Main.CmpDoubleLessThanNaN(double) constant_folding (after)
+ // CHECK-NOT: Compare
+ // CHECK-NOT: GreaterThanOrEqual
+
+ public static boolean CmpDoubleLessThanNaN(double arg) {
+ return arg < Double.NaN;
+ }
+
+ // CHECK-START: int Main.ReturnInt33() constant_folding (before)
+ // CHECK-DAG: <<Const33:j\d+>> LongConstant 33
+ // CHECK-DAG: <<Convert:i\d+>> TypeConversion [<<Const33>>]
+ // CHECK-DAG: Return [<<Convert>>]
+
+ // CHECK-START: int Main.ReturnInt33() constant_folding (after)
+ // CHECK-DAG: <<Const33:i\d+>> IntConstant 33
+ // CHECK-DAG: Return [<<Const33>>]
+
+ public static int ReturnInt33() {
+ long imm = 33L;
+ return (int) imm;
+ }
+
+ // CHECK-START: int Main.ReturnIntMax() constant_folding (before)
+ // CHECK-DAG: <<ConstMax:f\d+>> FloatConstant 1e+34
+ // CHECK-DAG: <<Convert:i\d+>> TypeConversion [<<ConstMax>>]
+ // CHECK-DAG: Return [<<Convert>>]
+
+ // CHECK-START: int Main.ReturnIntMax() constant_folding (after)
+ // CHECK-DAG: <<ConstMax:i\d+>> IntConstant 2147483647
+ // CHECK-DAG: Return [<<ConstMax>>]
+
+ public static int ReturnIntMax() {
+ float imm = 1.0e34f;
+ return (int) imm;
+ }
+
+ // CHECK-START: int Main.ReturnInt0() constant_folding (before)
+ // CHECK-DAG: <<ConstNaN:d\d+>> DoubleConstant nan
+ // CHECK-DAG: <<Convert:i\d+>> TypeConversion [<<ConstNaN>>]
+ // CHECK-DAG: Return [<<Convert>>]
+
+ // CHECK-START: int Main.ReturnInt0() constant_folding (after)
+ // CHECK-DAG: <<Const0:i\d+>> IntConstant 0
+ // CHECK-DAG: Return [<<Const0>>]
+
+ public static int ReturnInt0() {
+ double imm = Double.NaN;
+ return (int) imm;
+ }
+
+ // CHECK-START: long Main.ReturnLong33() constant_folding (before)
+ // CHECK-DAG: <<Const33:i\d+>> IntConstant 33
+ // CHECK-DAG: <<Convert:j\d+>> TypeConversion [<<Const33>>]
+ // CHECK-DAG: Return [<<Convert>>]
+
+ // CHECK-START: long Main.ReturnLong33() constant_folding (after)
+ // CHECK-DAG: <<Const33:j\d+>> LongConstant 33
+ // CHECK-DAG: Return [<<Const33>>]
+
+ public static long ReturnLong33() {
+ int imm = 33;
+ return (long) imm;
+ }
+
+ // CHECK-START: long Main.ReturnLong34() constant_folding (before)
+ // CHECK-DAG: <<Const34:f\d+>> FloatConstant 34
+ // CHECK-DAG: <<Convert:j\d+>> TypeConversion [<<Const34>>]
+ // CHECK-DAG: Return [<<Convert>>]
+
+ // CHECK-START: long Main.ReturnLong34() constant_folding (after)
+ // CHECK-DAG: <<Const34:j\d+>> LongConstant 34
+ // CHECK-DAG: Return [<<Const34>>]
+
+ public static long ReturnLong34() {
+ float imm = 34.0f;
+ return (long) imm;
+ }
+
+ // CHECK-START: long Main.ReturnLong0() constant_folding (before)
+ // CHECK-DAG: <<ConstNaN:d\d+>> DoubleConstant nan
+ // CHECK-DAG: <<Convert:j\d+>> TypeConversion [<<ConstNaN>>]
+ // CHECK-DAG: Return [<<Convert>>]
+
+ // CHECK-START: long Main.ReturnLong0() constant_folding (after)
+ // CHECK-DAG: <<Const0:j\d+>> LongConstant 0
+ // CHECK-DAG: Return [<<Const0>>]
+
+ public static long ReturnLong0() {
+ double imm = -Double.NaN;
+ return (long) imm;
+ }
+
+ // CHECK-START: float Main.ReturnFloat33() constant_folding (before)
+ // CHECK-DAG: <<Const33:i\d+>> IntConstant 33
+ // CHECK-DAG: <<Convert:f\d+>> TypeConversion [<<Const33>>]
+ // CHECK-DAG: Return [<<Convert>>]
+
+ // CHECK-START: float Main.ReturnFloat33() constant_folding (after)
+ // CHECK-DAG: <<Const33:f\d+>> FloatConstant 33
+ // CHECK-DAG: Return [<<Const33>>]
+
+ public static float ReturnFloat33() {
+ int imm = 33;
+ return (float) imm;
+ }
+
+ // CHECK-START: float Main.ReturnFloat34() constant_folding (before)
+ // CHECK-DAG: <<Const34:j\d+>> LongConstant 34
+ // CHECK-DAG: <<Convert:f\d+>> TypeConversion [<<Const34>>]
+ // CHECK-DAG: Return [<<Convert>>]
+
+ // CHECK-START: float Main.ReturnFloat34() constant_folding (after)
+ // CHECK-DAG: <<Const34:f\d+>> FloatConstant 34
+ // CHECK-DAG: Return [<<Const34>>]
+
+ public static float ReturnFloat34() {
+ long imm = 34L;
+ return (float) imm;
+ }
+
+ // CHECK-START: float Main.ReturnFloat99P25() constant_folding (before)
+ // CHECK-DAG: <<Const:d\d+>> DoubleConstant 99.25
+ // CHECK-DAG: <<Convert:f\d+>> TypeConversion [<<Const>>]
+ // CHECK-DAG: Return [<<Convert>>]
+
+ // CHECK-START: float Main.ReturnFloat99P25() constant_folding (after)
+ // CHECK-DAG: <<Const:f\d+>> FloatConstant 99.25
+ // CHECK-DAG: Return [<<Const>>]
+
+ public static float ReturnFloat99P25() {
+ double imm = 99.25;
+ return (float) imm;
+ }
+
+ // CHECK-START: double Main.ReturnDouble33() constant_folding (before)
+ // CHECK-DAG: <<Const33:i\d+>> IntConstant 33
+ // CHECK-DAG: <<Convert:d\d+>> TypeConversion [<<Const33>>]
+ // CHECK-DAG: Return [<<Convert>>]
+
+ // CHECK-START: double Main.ReturnDouble33() constant_folding (after)
+ // CHECK-DAG: <<Const33:d\d+>> DoubleConstant 33
+ // CHECK-DAG: Return [<<Const33>>]
+
+ public static double ReturnDouble33() {
+ int imm = 33;
+ return (double) imm;
+ }
+
+ // CHECK-START: double Main.ReturnDouble34() constant_folding (before)
+ // CHECK-DAG: <<Const34:j\d+>> LongConstant 34
+ // CHECK-DAG: <<Convert:d\d+>> TypeConversion [<<Const34>>]
+ // CHECK-DAG: Return [<<Convert>>]
+
+ // CHECK-START: double Main.ReturnDouble34() constant_folding (after)
+ // CHECK-DAG: <<Const34:d\d+>> DoubleConstant 34
+ // CHECK-DAG: Return [<<Const34>>]
+
+ public static double ReturnDouble34() {
+ long imm = 34L;
+ return (double) imm;
+ }
+
+ // CHECK-START: double Main.ReturnDouble99P25() constant_folding (before)
+ // CHECK-DAG: <<Const:f\d+>> FloatConstant 99.25
+ // CHECK-DAG: <<Convert:d\d+>> TypeConversion [<<Const>>]
+ // CHECK-DAG: Return [<<Convert>>]
+
+ // CHECK-START: double Main.ReturnDouble99P25() constant_folding (after)
+ // CHECK-DAG: <<Const:d\d+>> DoubleConstant 99.25
+ // CHECK-DAG: Return [<<Const>>]
+
+ public static double ReturnDouble99P25() {
+ float imm = 99.25f;
+ return (double) imm;
+ }
+
public static void main(String[] args) {
assertIntEquals(IntNegation(), -42);
assertIntEquals(IntAddition1(), 3);
@@ -417,17 +651,31 @@
assertIntEquals(StaticCondition(), 5);
assertIntEquals(JumpsAndConditionals(true), 7);
assertIntEquals(JumpsAndConditionals(false), 3);
- int random = 123456; // Chosen randomly.
- assertIntEquals(And0(random), 0);
- assertLongEquals(Mul0(random), 0);
- assertIntEquals(OrAllOnes(random), -1);
- assertLongEquals(Rem0(random), 0);
- assertIntEquals(Rem1(random), 0);
- assertLongEquals(RemN1(random), 0);
- assertIntEquals(Shl0(random), 0);
- assertLongEquals(Shr0(random), 0);
- assertLongEquals(SubSameLong(random), 0);
- assertIntEquals(UShr0(random), 0);
- assertIntEquals(XorSameInt(random), 0);
+ int arbitrary = 123456; // Value chosen arbitrarily.
+ assertIntEquals(And0(arbitrary), 0);
+ assertLongEquals(Mul0(arbitrary), 0);
+ assertIntEquals(OrAllOnes(arbitrary), -1);
+ assertLongEquals(Rem0(arbitrary), 0);
+ assertIntEquals(Rem1(arbitrary), 0);
+ assertLongEquals(RemN1(arbitrary), 0);
+ assertIntEquals(Shl0(arbitrary), 0);
+ assertLongEquals(Shr0(arbitrary), 0);
+ assertLongEquals(SubSameLong(arbitrary), 0);
+ assertIntEquals(UShr0(arbitrary), 0);
+ assertIntEquals(XorSameInt(arbitrary), 0);
+ assertFalse(CmpFloatGreaterThanNaN(arbitrary));
+ assertFalse(CmpDoubleLessThanNaN(arbitrary));
+ assertIntEquals(ReturnInt33(), 33);
+ assertIntEquals(ReturnIntMax(), 2147483647);
+ assertIntEquals(ReturnInt0(), 0);
+ assertLongEquals(ReturnLong33(), 33);
+ assertLongEquals(ReturnLong34(), 34);
+ assertLongEquals(ReturnLong0(), 0);
+ assertFloatEquals(ReturnFloat33(), 33);
+ assertFloatEquals(ReturnFloat34(), 34);
+ assertFloatEquals(ReturnFloat99P25(), 99.25f);
+ assertDoubleEquals(ReturnDouble33(), 33);
+ assertDoubleEquals(ReturnDouble34(), 34);
+ assertDoubleEquals(ReturnDouble99P25(), 99.25);
}
}
diff --git a/test/445-checker-licm/src/Main.java b/test/445-checker-licm/src/Main.java
index 91ac2ed..96918d3 100644
--- a/test/445-checker-licm/src/Main.java
+++ b/test/445-checker-licm/src/Main.java
@@ -17,13 +17,13 @@
public class Main {
// CHECK-START: int Main.div() licm (before)
- // CHECK-DAG: Div ( loop_header:{{B\d+}} )
+ // CHECK-DAG: Div loop:{{B\d+}}
// CHECK-START: int Main.div() licm (after)
- // CHECK-NOT: Div ( loop_header:{{B\d+}} )
+ // CHECK-NOT: Div loop:{{B\d+}}
// CHECK-START: int Main.div() licm (after)
- // CHECK-DAG: Div ( loop_header:null )
+ // CHECK-DAG: Div loop:none
public static int div() {
int result = 0;
@@ -34,13 +34,13 @@
}
// CHECK-START: int Main.innerDiv() licm (before)
- // CHECK-DAG: Div ( loop_header:{{B\d+}} )
+ // CHECK-DAG: Div loop:{{B\d+}}
// CHECK-START: int Main.innerDiv() licm (after)
- // CHECK-NOT: Div ( loop_header:{{B\d+}} )
+ // CHECK-NOT: Div loop:{{B\d+}}
// CHECK-START: int Main.innerDiv() licm (after)
- // CHECK-DAG: Div ( loop_header:null )
+ // CHECK-DAG: Div loop:none
public static int innerDiv() {
int result = 0;
@@ -53,10 +53,10 @@
}
// CHECK-START: int Main.innerDiv2() licm (before)
- // CHECK-DAG: Mul ( loop_header:{{B4}} )
+ // CHECK-DAG: Mul loop:B4
// CHECK-START: int Main.innerDiv2() licm (after)
- // CHECK-DAG: Mul ( loop_header:{{B2}} )
+ // CHECK-DAG: Mul loop:B2
public static int innerDiv2() {
int result = 0;
@@ -72,10 +72,10 @@
}
// CHECK-START: int Main.innerDiv3(int, int) licm (before)
- // CHECK-DAG: Div ( loop_header:{{B\d+}} )
+ // CHECK-DAG: Div loop:{{B\d+}}
// CHECK-START: int Main.innerDiv3(int, int) licm (after)
- // CHECK-DAG: Div ( loop_header:{{B\d+}} )
+ // CHECK-DAG: Div loop:{{B\d+}}
public static int innerDiv3(int a, int b) {
int result = 0;
@@ -88,16 +88,16 @@
}
// CHECK-START: int Main.arrayLength(int[]) licm (before)
- // CHECK-DAG: [[NullCheck:l\d+]] NullCheck ( loop_header:{{B\d+}} )
- // CHECK-DAG: ArrayLength [ [[NullCheck]] ] ( loop_header:{{B\d+}} )
+ // CHECK-DAG: <<NullCheck:l\d+>> NullCheck loop:{{B\d+}}
+ // CHECK-DAG: ArrayLength [<<NullCheck>>] loop:{{B\d+}}
// CHECK-START: int Main.arrayLength(int[]) licm (after)
- // CHECK-NOT: NullCheck ( loop_header:{{B\d+}} )
- // CHECK-NOT: ArrayLength ( loop_header:{{B\d+}} )
+ // CHECK-NOT: NullCheck loop:{{B\d+}}
+ // CHECK-NOT: ArrayLength loop:{{B\d+}}
// CHECK-START: int Main.arrayLength(int[]) licm (after)
- // CHECK-DAG: [[NullCheck:l\d+]] NullCheck ( loop_header:null )
- // CHECK-DAG: ArrayLength [ [[NullCheck]] ] ( loop_header:null )
+ // CHECK-DAG: <<NullCheck:l\d+>> NullCheck loop:none
+ // CHECK-DAG: ArrayLength [<<NullCheck>>] loop:none
public static int arrayLength(int[] array) {
int result = 0;
diff --git a/test/446-checker-inliner2/src/Main.java b/test/446-checker-inliner2/src/Main.java
index ecf071e..9ed66d6 100644
--- a/test/446-checker-inliner2/src/Main.java
+++ b/test/446-checker-inliner2/src/Main.java
@@ -17,15 +17,15 @@
public class Main {
// CHECK-START: int Main.inlineInstanceCall(Main) inliner (before)
- // CHECK-DAG: [[Invoke:i\d+]] InvokeStaticOrDirect
- // CHECK-DAG: Return [ [[Invoke]] ]
+ // CHECK-DAG: <<Invoke:i\d+>> InvokeStaticOrDirect
+ // CHECK-DAG: Return [<<Invoke>>]
// CHECK-START: int Main.inlineInstanceCall(Main) inliner (after)
// CHECK-NOT: InvokeStaticOrDirect
// CHECK-START: int Main.inlineInstanceCall(Main) inliner (after)
- // CHECK-DAG: [[Field:i\d+]] InstanceFieldGet
- // CHECK-DAG: Return [ [[Field]] ]
+ // CHECK-DAG: <<Field:i\d+>> InstanceFieldGet
+ // CHECK-DAG: Return [<<Field>>]
public static int inlineInstanceCall(Main m) {
return m.foo();
@@ -38,15 +38,15 @@
int field = 42;
// CHECK-START: int Main.inlineNestedCall() inliner (before)
- // CHECK-DAG: [[Invoke:i\d+]] InvokeStaticOrDirect
- // CHECK-DAG: Return [ [[Invoke]] ]
+ // CHECK-DAG: <<Invoke:i\d+>> InvokeStaticOrDirect
+ // CHECK-DAG: Return [<<Invoke>>]
// CHECK-START: int Main.inlineNestedCall() inliner (after)
// CHECK-NOT: InvokeStaticOrDirect
// CHECK-START: int Main.inlineNestedCall() inliner (after)
- // CHECK-DAG: [[Const38:i\d+]] IntConstant 38
- // CHECK-DAG: Return [ [[Const38]] ]
+ // CHECK-DAG: <<Const38:i\d+>> IntConstant 38
+ // CHECK-DAG: Return [<<Const38>>]
public static int inlineNestedCall() {
return nestedCall();
diff --git a/test/447-checker-inliner3/src/Main.java b/test/447-checker-inliner3/src/Main.java
index db4b236..9d022b9 100644
--- a/test/447-checker-inliner3/src/Main.java
+++ b/test/447-checker-inliner3/src/Main.java
@@ -17,8 +17,8 @@
public class Main {
// CHECK-START: int Main.inlineIfThenElse() inliner (before)
- // CHECK-DAG: [[Invoke:i\d+]] InvokeStaticOrDirect
- // CHECK-DAG: Return [ [[Invoke]] ]
+ // CHECK-DAG: <<Invoke:i\d+>> InvokeStaticOrDirect
+ // CHECK-DAG: Return [<<Invoke>>]
// CHECK-START: int Main.inlineIfThenElse() inliner (after)
// CHECK-NOT: InvokeStaticOrDirect
diff --git a/test/449-checker-bce/expected.txt b/test/449-checker-bce/expected.txt
index 29d6383..e69de29 100644
--- a/test/449-checker-bce/expected.txt
+++ b/test/449-checker-bce/expected.txt
@@ -1 +0,0 @@
-100
diff --git a/test/449-checker-bce/src/Main.java b/test/449-checker-bce/src/Main.java
index 17039a3..f90d85d 100644
--- a/test/449-checker-bce/src/Main.java
+++ b/test/449-checker-bce/src/Main.java
@@ -608,6 +608,380 @@
}
+ int sum;
+
+ // CHECK-START: void Main.foo1(int[], int, int) BCE (before)
+ // CHECK: BoundsCheck
+ // CHECK: ArraySet
+ // CHECK-NOT: BoundsCheck
+ // CHECK: ArrayGet
+
+ // CHECK-START: void Main.foo1(int[], int, int) BCE (after)
+ // CHECK: Deoptimize
+ // CHECK: Deoptimize
+ // CHECK: Deoptimize
+ // CHECK-NOT: Deoptimize
+ // CHECK: Phi
+ // CHECK-NOT: BoundsCheck
+ // CHECK: ArraySet
+ // CHECK-NOT: BoundsCheck
+ // CHECK: ArrayGet
+
+ void foo1(int[] array, int start, int end) {
+ // Three HDeoptimize will be added. One for
+ // start >= 0, one for end <= array.length,
+ // and one for null check on array (to hoist null
+ // check and array.length out of loop).
+ for (int i = start ; i < end; i++) {
+ array[i] = 1;
+ sum += array[i];
+ }
+ }
+
+
+ // CHECK-START: void Main.foo2(int[], int, int) BCE (before)
+ // CHECK: BoundsCheck
+ // CHECK: ArraySet
+ // CHECK-NOT: BoundsCheck
+ // CHECK: ArrayGet
+
+ // CHECK-START: void Main.foo2(int[], int, int) BCE (after)
+ // CHECK: Deoptimize
+ // CHECK: Deoptimize
+ // CHECK: Deoptimize
+ // CHECK-NOT: Deoptimize
+ // CHECK: Phi
+ // CHECK-NOT: BoundsCheck
+ // CHECK: ArraySet
+ // CHECK-NOT: BoundsCheck
+ // CHECK: ArrayGet
+
+ void foo2(int[] array, int start, int end) {
+ // Three HDeoptimize will be added. One for
+ // start >= 0, one for end <= array.length,
+ // and one for null check on array (to hoist null
+ // check and array.length out of loop).
+ for (int i = start ; i <= end; i++) {
+ array[i] = 1;
+ sum += array[i];
+ }
+ }
+
+
+ // CHECK-START: void Main.foo3(int[], int) BCE (before)
+ // CHECK: BoundsCheck
+ // CHECK: ArraySet
+ // CHECK-NOT: BoundsCheck
+ // CHECK: ArrayGet
+
+ // CHECK-START: void Main.foo3(int[], int) BCE (after)
+ // CHECK: Deoptimize
+ // CHECK: Deoptimize
+ // CHECK-NOT: Deoptimize
+ // CHECK: Phi
+ // CHECK-NOT: BoundsCheck
+ // CHECK: ArraySet
+ // CHECK-NOT: BoundsCheck
+ // CHECK: ArrayGet
+
+ void foo3(int[] array, int end) {
+ // Two HDeoptimize will be added. One for end < array.length,
+ // and one for null check on array (to hoist null check
+ // and array.length out of loop).
+ for (int i = 3 ; i <= end; i++) {
+ array[i] = 1;
+ sum += array[i];
+ }
+ }
+
+ // CHECK-START: void Main.foo4(int[], int) BCE (before)
+ // CHECK: BoundsCheck
+ // CHECK: ArraySet
+ // CHECK-NOT: BoundsCheck
+ // CHECK: ArrayGet
+
+ // CHECK-START: void Main.foo4(int[], int) BCE (after)
+ // CHECK: Deoptimize
+ // CHECK: Deoptimize
+ // CHECK-NOT: Deoptimize
+ // CHECK: Phi
+ // CHECK-NOT: BoundsCheck
+ // CHECK: ArraySet
+ // CHECK-NOT: BoundsCheck
+ // CHECK: ArrayGet
+
+ void foo4(int[] array, int end) {
+ // Two HDeoptimize will be added. One for end <= array.length,
+ // and one for null check on array (to hoist null check
+ // and array.length out of loop).
+ for (int i = end ; i > 0; i--) {
+ array[i - 1] = 1;
+ sum += array[i - 1];
+ }
+ }
+
+
+ // CHECK-START: void Main.foo5(int[], int) BCE (before)
+ // CHECK: BoundsCheck
+ // CHECK: ArraySet
+ // CHECK: BoundsCheck
+ // CHECK: ArrayGet
+ // CHECK: BoundsCheck
+ // CHECK: ArrayGet
+ // CHECK: BoundsCheck
+ // CHECK: ArrayGet
+
+ // CHECK-START: void Main.foo5(int[], int) BCE (after)
+ // CHECK-NOT: BoundsCheck
+ // CHECK: ArraySet
+ // CHECK: Deoptimize
+ // CHECK-NOT: Deoptimize
+ // CHECK: Phi
+ // CHECK-NOT: BoundsCheck
+ // CHECK: ArrayGet
+ // CHECK-NOT: BoundsCheck
+ // CHECK: ArrayGet
+ // CHECK-NOT: BoundsCheck
+ // CHECK: ArrayGet
+
+ void foo5(int[] array, int end) {
+ // Bounds check in this loop can be eliminated without deoptimization.
+ for (int i = array.length - 1 ; i >= 0; i--) {
+ array[i] = 1;
+ }
+ // One HDeoptimize will be added.
+ // It's for (end - 2 <= array.length - 2).
+ for (int i = end - 2 ; i > 0; i--) {
+ sum += array[i - 1];
+ sum += array[i];
+ sum += array[i + 1];
+ }
+ }
+
+
+ // CHECK-START: void Main.foo6(int[], int, int) BCE (before)
+ // CHECK: BoundsCheck
+ // CHECK: ArrayGet
+ // CHECK: BoundsCheck
+ // CHECK: ArrayGet
+ // CHECK: BoundsCheck
+ // CHECK: ArrayGet
+ // CHECK: BoundsCheck
+ // CHECK: ArrayGet
+ // CHECK: BoundsCheck
+ // CHECK: ArrayGet
+ // CHECK-NOT: BoundsCheck
+ // CHECK: ArraySet
+
+ // CHECK-START: void Main.foo6(int[], int, int) BCE (after)
+ // CHECK: Deoptimize
+ // CHECK: Deoptimize
+ // CHECK: Deoptimize
+ // CHECK-NOT: Deoptimize
+ // CHECK: Phi
+ // CHECK-NOT: BoundsCheck
+ // CHECK: ArrayGet
+ // CHECK-NOT: BoundsCheck
+ // CHECK: ArrayGet
+ // CHECK-NOT: BoundsCheck
+ // CHECK: ArrayGet
+ // CHECK-NOT: BoundsCheck
+ // CHECK: ArrayGet
+ // CHECK-NOT: BoundsCheck
+ // CHECK: ArrayGet
+ // CHECK-NOT: BoundsCheck
+ // CHECK: ArraySet
+
+ void foo6(int[] array, int start, int end) {
+ // Three HDeoptimize will be added. One for
+ // start >= 2, one for end <= array.length - 3,
+ // and one for null check on array (to hoist null
+ // check and array.length out of loop).
+ for (int i = end; i >= start; i--) {
+ array[i] = (array[i-2] + array[i-1] + array[i] + array[i+1] + array[i+2]) / 5;
+ }
+ }
+
+
+ // CHECK-START: void Main.foo7(int[], int, int, boolean) BCE (before)
+ // CHECK: BoundsCheck
+ // CHECK: ArrayGet
+ // CHECK: BoundsCheck
+ // CHECK: ArrayGet
+
+ // CHECK-START: void Main.foo7(int[], int, int, boolean) BCE (after)
+ // CHECK: Deoptimize
+ // CHECK: Deoptimize
+ // CHECK: Deoptimize
+ // CHECK-NOT: Deoptimize
+ // CHECK: Phi
+ // CHECK: BoundsCheck
+ // CHECK: ArrayGet
+ // CHECK-NOT: BoundsCheck
+ // CHECK: ArrayGet
+
+ void foo7(int[] array, int start, int end, boolean lowEnd) {
+ // Three HDeoptimize will be added. One for
+ // start >= 0, one for end <= array.length,
+ // and one for null check on array (to hoist null
+ // check and array.length out of loop).
+ for (int i = start ; i < end; i++) {
+ if (lowEnd) {
+ // This array access isn't certain. So we don't
+ // use +1000 offset in decision making for deoptimization
+ // conditions.
+ sum += array[i + 1000];
+ }
+ sum += array[i];
+ }
+ }
+
+
+ // CHECK-START: void Main.partialLooping(int[], int, int) BCE (before)
+ // CHECK: BoundsCheck
+ // CHECK: ArraySet
+
+ // CHECK-START: void Main.partialLooping(int[], int, int) BCE (after)
+ // CHECK-NOT: Deoptimize
+ // CHECK: BoundsCheck
+ // CHECK: ArraySet
+
+ void partialLooping(int[] array, int start, int end) {
+ // This loop doesn't cover the full range of [start, end) so
+ // adding deoptimization is too aggressive, since end can be
+ // greater than array.length but the loop is never going to work on
+ // more than 2 elements.
+ for (int i = start; i < end; i++) {
+ if (i == 2) {
+ return;
+ }
+ array[i] = 1;
+ }
+ }
+
+
+ static void testUnknownBounds() {
+ boolean caught = false;
+ Main main = new Main();
+ main.foo1(new int[10], 0, 10);
+ if (main.sum != 10) {
+ System.out.println("foo1 failed!");
+ }
+
+ caught = false;
+ main = new Main();
+ try {
+ main.foo1(new int[10], 0, 11);
+ } catch (ArrayIndexOutOfBoundsException e) {
+ caught = true;
+ }
+ if (!caught || main.sum != 10) {
+ System.out.println("foo1 exception failed!");
+ }
+
+ main = new Main();
+ main.foo2(new int[10], 0, 9);
+ if (main.sum != 10) {
+ System.out.println("foo2 failed!");
+ }
+
+ caught = false;
+ main = new Main();
+ try {
+ main.foo2(new int[10], 0, 10);
+ } catch (ArrayIndexOutOfBoundsException e) {
+ caught = true;
+ }
+ if (!caught || main.sum != 10) {
+ System.out.println("foo2 exception failed!");
+ }
+
+ main = new Main();
+ main.foo3(new int[10], 9);
+ if (main.sum != 7) {
+ System.out.println("foo3 failed!");
+ }
+
+ caught = false;
+ main = new Main();
+ try {
+ main.foo3(new int[10], 10);
+ } catch (ArrayIndexOutOfBoundsException e) {
+ caught = true;
+ }
+ if (!caught || main.sum != 7) {
+ System.out.println("foo3 exception failed!");
+ }
+
+ main = new Main();
+ main.foo4(new int[10], 10);
+ if (main.sum != 10) {
+ System.out.println("foo4 failed!");
+ }
+
+ caught = false;
+ main = new Main();
+ try {
+ main.foo4(new int[10], 11);
+ } catch (ArrayIndexOutOfBoundsException e) {
+ caught = true;
+ }
+ if (!caught || main.sum != 0) {
+ System.out.println("foo4 exception failed!");
+ }
+
+ main = new Main();
+ main.foo5(new int[10], 10);
+ if (main.sum != 24) {
+ System.out.println("foo5 failed!");
+ }
+
+ caught = false;
+ main = new Main();
+ try {
+ main.foo5(new int[10], 11);
+ } catch (ArrayIndexOutOfBoundsException e) {
+ caught = true;
+ }
+ if (!caught || main.sum != 2) {
+ System.out.println("foo5 exception failed!");
+ }
+
+ main = new Main();
+ main.foo6(new int[10], 2, 7);
+
+ main = new Main();
+ int[] array = new int[4];
+ main.partialLooping(new int[3], 0, 4);
+ if ((array[0] != 1) && (array[1] != 1) &&
+ (array[2] != 0) && (array[3] != 0)) {
+ System.out.println("partialLooping failed!");
+ }
+
+ caught = false;
+ main = new Main();
+ try {
+ main.foo6(new int[10], 2, 8);
+ } catch (ArrayIndexOutOfBoundsException e) {
+ caught = true;
+ }
+ if (!caught) {
+ System.out.println("foo6 exception failed!");
+ }
+
+ caught = false;
+ main = new Main();
+ try {
+ main.foo6(new int[10], 1, 7);
+ } catch (ArrayIndexOutOfBoundsException e) {
+ caught = true;
+ }
+ if (!caught) {
+ System.out.println("foo6 exception failed!");
+ }
+
+ }
+
// Make sure this method is compiled with optimizing.
// CHECK-START: void Main.main(java.lang.String[]) register (after)
// CHECK: ParallelMove
@@ -643,7 +1017,11 @@
// Make sure this value is kept after deoptimization.
int i = 1;
- System.out.println(foo() + i);
+ if (foo() + i != 100) {
+ System.out.println("foo failed!");
+ };
+
+ testUnknownBounds();
}
}
diff --git a/test/454-get-vreg/get_vreg_jni.cc b/test/454-get-vreg/get_vreg_jni.cc
index 6b4bc11..0ef2964 100644
--- a/test/454-get-vreg/get_vreg_jni.cc
+++ b/test/454-get-vreg/get_vreg_jni.cc
@@ -29,7 +29,9 @@
public:
TestVisitor(Thread* thread, Context* context, mirror::Object* this_value)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- : StackVisitor(thread, context), this_value_(this_value), found_method_index_(0) {}
+ : StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
+ this_value_(this_value),
+ found_method_index_(0) {}
bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::ArtMethod* m = GetMethod();
diff --git a/test/455-set-vreg/set_vreg_jni.cc b/test/455-set-vreg/set_vreg_jni.cc
index 0a83ac0..dffbfa4 100644
--- a/test/455-set-vreg/set_vreg_jni.cc
+++ b/test/455-set-vreg/set_vreg_jni.cc
@@ -29,7 +29,8 @@
public:
TestVisitor(Thread* thread, Context* context, mirror::Object* this_value)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- : StackVisitor(thread, context), this_value_(this_value) {}
+ : StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
+ this_value_(this_value) {}
bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::ArtMethod* m = GetMethod();
diff --git a/test/457-regs/regs_jni.cc b/test/457-regs/regs_jni.cc
index 1b32348..193ab9d 100644
--- a/test/457-regs/regs_jni.cc
+++ b/test/457-regs/regs_jni.cc
@@ -29,7 +29,7 @@
public:
TestVisitor(Thread* thread, Context* context)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- : StackVisitor(thread, context) {}
+ : StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {}
bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::ArtMethod* m = GetMethod();
diff --git a/test/458-checker-instruction-simplification/src/Main.java b/test/458-checker-instruction-simplification/src/Main.java
index 65be6cb..742210c 100644
--- a/test/458-checker-instruction-simplification/src/Main.java
+++ b/test/458-checker-instruction-simplification/src/Main.java
@@ -34,19 +34,31 @@
}
}
+ public static void assertFloatEquals(float expected, float result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public static void assertDoubleEquals(double expected, double result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
/**
* Tiny programs exercising optimizations of arithmetic identities.
*/
// CHECK-START: long Main.Add0(long) instruction_simplifier (before)
- // CHECK-DAG: [[Arg:j\d+]] ParameterValue
- // CHECK-DAG: [[Const0:j\d+]] LongConstant 0
- // CHECK-DAG: [[Add:j\d+]] Add [ [[Const0]] [[Arg]] ]
- // CHECK-DAG: Return [ [[Add]] ]
+ // CHECK-DAG: <<Arg:j\d+>> ParameterValue
+ // CHECK-DAG: <<Const0:j\d+>> LongConstant 0
+ // CHECK-DAG: <<Add:j\d+>> Add [<<Const0>>,<<Arg>>]
+ // CHECK-DAG: Return [<<Add>>]
// CHECK-START: long Main.Add0(long) instruction_simplifier (after)
- // CHECK-DAG: [[Arg:j\d+]] ParameterValue
- // CHECK-DAG: Return [ [[Arg]] ]
+ // CHECK-DAG: <<Arg:j\d+>> ParameterValue
+ // CHECK-DAG: Return [<<Arg>>]
// CHECK-START: long Main.Add0(long) instruction_simplifier (after)
// CHECK-NOT: Add
@@ -56,14 +68,14 @@
}
// CHECK-START: int Main.AndAllOnes(int) instruction_simplifier (before)
- // CHECK-DAG: [[Arg:i\d+]] ParameterValue
- // CHECK-DAG: [[ConstF:i\d+]] IntConstant -1
- // CHECK-DAG: [[And:i\d+]] And [ [[Arg]] [[ConstF]] ]
- // CHECK-DAG: Return [ [[And]] ]
+ // CHECK-DAG: <<Arg:i\d+>> ParameterValue
+ // CHECK-DAG: <<ConstF:i\d+>> IntConstant -1
+ // CHECK-DAG: <<And:i\d+>> And [<<Arg>>,<<ConstF>>]
+ // CHECK-DAG: Return [<<And>>]
// CHECK-START: int Main.AndAllOnes(int) instruction_simplifier (after)
- // CHECK-DAG: [[Arg:i\d+]] ParameterValue
- // CHECK-DAG: Return [ [[Arg]] ]
+ // CHECK-DAG: <<Arg:i\d+>> ParameterValue
+ // CHECK-DAG: Return [<<Arg>>]
// CHECK-START: int Main.AndAllOnes(int) instruction_simplifier (after)
// CHECK-NOT: And
@@ -73,14 +85,14 @@
}
// CHECK-START: long Main.Div1(long) instruction_simplifier (before)
- // CHECK-DAG: [[Arg:j\d+]] ParameterValue
- // CHECK-DAG: [[Const1:j\d+]] LongConstant 1
- // CHECK-DAG: [[Div:j\d+]] Div [ [[Arg]] [[Const1]] ]
- // CHECK-DAG: Return [ [[Div]] ]
+ // CHECK-DAG: <<Arg:j\d+>> ParameterValue
+ // CHECK-DAG: <<Const1:j\d+>> LongConstant 1
+ // CHECK-DAG: <<Div:j\d+>> Div [<<Arg>>,<<Const1>>]
+ // CHECK-DAG: Return [<<Div>>]
// CHECK-START: long Main.Div1(long) instruction_simplifier (after)
- // CHECK-DAG: [[Arg:j\d+]] ParameterValue
- // CHECK-DAG: Return [ [[Arg]] ]
+ // CHECK-DAG: <<Arg:j\d+>> ParameterValue
+ // CHECK-DAG: Return [<<Arg>>]
// CHECK-START: long Main.Div1(long) instruction_simplifier (after)
// CHECK-NOT: Div
@@ -90,15 +102,15 @@
}
// CHECK-START: int Main.DivN1(int) instruction_simplifier (before)
- // CHECK-DAG: [[Arg:i\d+]] ParameterValue
- // CHECK-DAG: [[ConstN1:i\d+]] IntConstant -1
- // CHECK-DAG: [[Div:i\d+]] Div [ [[Arg]] [[ConstN1]] ]
- // CHECK-DAG: Return [ [[Div]] ]
+ // CHECK-DAG: <<Arg:i\d+>> ParameterValue
+ // CHECK-DAG: <<ConstN1:i\d+>> IntConstant -1
+ // CHECK-DAG: <<Div:i\d+>> Div [<<Arg>>,<<ConstN1>>]
+ // CHECK-DAG: Return [<<Div>>]
// CHECK-START: int Main.DivN1(int) instruction_simplifier (after)
- // CHECK-DAG: [[Arg:i\d+]] ParameterValue
- // CHECK-DAG: [[Neg:i\d+]] Neg [ [[Arg]] ]
- // CHECK-DAG: Return [ [[Neg]] ]
+ // CHECK-DAG: <<Arg:i\d+>> ParameterValue
+ // CHECK-DAG: <<Neg:i\d+>> Neg [<<Arg>>]
+ // CHECK-DAG: Return [<<Neg>>]
// CHECK-START: int Main.DivN1(int) instruction_simplifier (after)
// CHECK-NOT: Div
@@ -108,14 +120,14 @@
}
// CHECK-START: long Main.Mul1(long) instruction_simplifier (before)
- // CHECK-DAG: [[Arg:j\d+]] ParameterValue
- // CHECK-DAG: [[Const1:j\d+]] LongConstant 1
- // CHECK-DAG: [[Mul:j\d+]] Mul [ [[Arg]] [[Const1]] ]
- // CHECK-DAG: Return [ [[Mul]] ]
+ // CHECK-DAG: <<Arg:j\d+>> ParameterValue
+ // CHECK-DAG: <<Const1:j\d+>> LongConstant 1
+ // CHECK-DAG: <<Mul:j\d+>> Mul [<<Arg>>,<<Const1>>]
+ // CHECK-DAG: Return [<<Mul>>]
// CHECK-START: long Main.Mul1(long) instruction_simplifier (after)
- // CHECK-DAG: [[Arg:j\d+]] ParameterValue
- // CHECK-DAG: Return [ [[Arg]] ]
+ // CHECK-DAG: <<Arg:j\d+>> ParameterValue
+ // CHECK-DAG: Return [<<Arg>>]
// CHECK-START: long Main.Mul1(long) instruction_simplifier (after)
// CHECK-NOT: Mul
@@ -125,15 +137,15 @@
}
// CHECK-START: int Main.MulN1(int) instruction_simplifier (before)
- // CHECK-DAG: [[Arg:i\d+]] ParameterValue
- // CHECK-DAG: [[ConstN1:i\d+]] IntConstant -1
- // CHECK-DAG: [[Mul:i\d+]] Mul [ [[Arg]] [[ConstN1]] ]
- // CHECK-DAG: Return [ [[Mul]] ]
+ // CHECK-DAG: <<Arg:i\d+>> ParameterValue
+ // CHECK-DAG: <<ConstN1:i\d+>> IntConstant -1
+ // CHECK-DAG: <<Mul:i\d+>> Mul [<<Arg>>,<<ConstN1>>]
+ // CHECK-DAG: Return [<<Mul>>]
// CHECK-START: int Main.MulN1(int) instruction_simplifier (after)
- // CHECK-DAG: [[Arg:i\d+]] ParameterValue
- // CHECK-DAG: [[Neg:i\d+]] Neg [ [[Arg]] ]
- // CHECK-DAG: Return [ [[Neg]] ]
+ // CHECK-DAG: <<Arg:i\d+>> ParameterValue
+ // CHECK-DAG: <<Neg:i\d+>> Neg [<<Arg>>]
+ // CHECK-DAG: Return [<<Neg>>]
// CHECK-START: int Main.MulN1(int) instruction_simplifier (after)
// CHECK-NOT: Mul
@@ -143,16 +155,16 @@
}
// CHECK-START: long Main.MulPowerOfTwo128(long) instruction_simplifier (before)
- // CHECK-DAG: [[Arg:j\d+]] ParameterValue
- // CHECK-DAG: [[Const128:j\d+]] LongConstant 128
- // CHECK-DAG: [[Mul:j\d+]] Mul [ [[Arg]] [[Const128]] ]
- // CHECK-DAG: Return [ [[Mul]] ]
+ // CHECK-DAG: <<Arg:j\d+>> ParameterValue
+ // CHECK-DAG: <<Const128:j\d+>> LongConstant 128
+ // CHECK-DAG: <<Mul:j\d+>> Mul [<<Arg>>,<<Const128>>]
+ // CHECK-DAG: Return [<<Mul>>]
// CHECK-START: long Main.MulPowerOfTwo128(long) instruction_simplifier (after)
- // CHECK-DAG: [[Arg:j\d+]] ParameterValue
- // CHECK-DAG: [[Const7:i\d+]] IntConstant 7
- // CHECK-DAG: [[Shl:j\d+]] Shl [ [[Arg]] [[Const7]] ]
- // CHECK-DAG: Return [ [[Shl]] ]
+ // CHECK-DAG: <<Arg:j\d+>> ParameterValue
+ // CHECK-DAG: <<Const7:i\d+>> IntConstant 7
+ // CHECK-DAG: <<Shl:j\d+>> Shl [<<Arg>>,<<Const7>>]
+ // CHECK-DAG: Return [<<Shl>>]
// CHECK-START: long Main.MulPowerOfTwo128(long) instruction_simplifier (after)
// CHECK-NOT: Mul
@@ -162,14 +174,14 @@
}
// CHECK-START: int Main.Or0(int) instruction_simplifier (before)
- // CHECK-DAG: [[Arg:i\d+]] ParameterValue
- // CHECK-DAG: [[Const0:i\d+]] IntConstant 0
- // CHECK-DAG: [[Or:i\d+]] Or [ [[Arg]] [[Const0]] ]
- // CHECK-DAG: Return [ [[Or]] ]
+ // CHECK-DAG: <<Arg:i\d+>> ParameterValue
+ // CHECK-DAG: <<Const0:i\d+>> IntConstant 0
+ // CHECK-DAG: <<Or:i\d+>> Or [<<Arg>>,<<Const0>>]
+ // CHECK-DAG: Return [<<Or>>]
// CHECK-START: int Main.Or0(int) instruction_simplifier (after)
- // CHECK-DAG: [[Arg:i\d+]] ParameterValue
- // CHECK-DAG: Return [ [[Arg]] ]
+ // CHECK-DAG: <<Arg:i\d+>> ParameterValue
+ // CHECK-DAG: Return [<<Arg>>]
// CHECK-START: int Main.Or0(int) instruction_simplifier (after)
// CHECK-NOT: Or
@@ -179,13 +191,13 @@
}
// CHECK-START: long Main.OrSame(long) instruction_simplifier (before)
- // CHECK-DAG: [[Arg:j\d+]] ParameterValue
- // CHECK-DAG: [[Or:j\d+]] Or [ [[Arg]] [[Arg]] ]
- // CHECK-DAG: Return [ [[Or]] ]
+ // CHECK-DAG: <<Arg:j\d+>> ParameterValue
+ // CHECK-DAG: <<Or:j\d+>> Or [<<Arg>>,<<Arg>>]
+ // CHECK-DAG: Return [<<Or>>]
// CHECK-START: long Main.OrSame(long) instruction_simplifier (after)
- // CHECK-DAG: [[Arg:j\d+]] ParameterValue
- // CHECK-DAG: Return [ [[Arg]] ]
+ // CHECK-DAG: <<Arg:j\d+>> ParameterValue
+ // CHECK-DAG: Return [<<Arg>>]
// CHECK-START: long Main.OrSame(long) instruction_simplifier (after)
// CHECK-NOT: Or
@@ -195,14 +207,14 @@
}
// CHECK-START: int Main.Shl0(int) instruction_simplifier (before)
- // CHECK-DAG: [[Arg:i\d+]] ParameterValue
- // CHECK-DAG: [[Const0:i\d+]] IntConstant 0
- // CHECK-DAG: [[Shl:i\d+]] Shl [ [[Arg]] [[Const0]] ]
- // CHECK-DAG: Return [ [[Shl]] ]
+ // CHECK-DAG: <<Arg:i\d+>> ParameterValue
+ // CHECK-DAG: <<Const0:i\d+>> IntConstant 0
+ // CHECK-DAG: <<Shl:i\d+>> Shl [<<Arg>>,<<Const0>>]
+ // CHECK-DAG: Return [<<Shl>>]
// CHECK-START: int Main.Shl0(int) instruction_simplifier (after)
- // CHECK-DAG: [[Arg:i\d+]] ParameterValue
- // CHECK-DAG: Return [ [[Arg]] ]
+ // CHECK-DAG: <<Arg:i\d+>> ParameterValue
+ // CHECK-DAG: Return [<<Arg>>]
// CHECK-START: int Main.Shl0(int) instruction_simplifier (after)
// CHECK-NOT: Shl
@@ -211,15 +223,33 @@
return arg << 0;
}
+ // CHECK-START: int Main.Shl1(int) instruction_simplifier (before)
+ // CHECK-DAG: <<Arg:i\d+>> ParameterValue
+ // CHECK-DAG: <<Const1:i\d+>> IntConstant 1
+ // CHECK-DAG: <<Shl:i\d+>> Shl [<<Arg>>,<<Const1>>]
+ // CHECK-DAG: Return [<<Shl>>]
+
+ // CHECK-START: int Main.Shl1(int) instruction_simplifier (after)
+ // CHECK-DAG: <<Arg:i\d+>> ParameterValue
+ // CHECK-DAG: <<Add:i\d+>> Add [<<Arg>>,<<Arg>>]
+ // CHECK-DAG: Return [<<Add>>]
+
+ // CHECK-START: int Main.Shl1(int) instruction_simplifier (after)
+ // CHECK-NOT: Shl
+
+ public static int Shl1(int arg) {
+ return arg << 1;
+ }
+
// CHECK-START: long Main.Shr0(long) instruction_simplifier (before)
- // CHECK-DAG: [[Arg:j\d+]] ParameterValue
- // CHECK-DAG: [[Const0:i\d+]] IntConstant 0
- // CHECK-DAG: [[Shr:j\d+]] Shr [ [[Arg]] [[Const0]] ]
- // CHECK-DAG: Return [ [[Shr]] ]
+ // CHECK-DAG: <<Arg:j\d+>> ParameterValue
+ // CHECK-DAG: <<Const0:i\d+>> IntConstant 0
+ // CHECK-DAG: <<Shr:j\d+>> Shr [<<Arg>>,<<Const0>>]
+ // CHECK-DAG: Return [<<Shr>>]
// CHECK-START: long Main.Shr0(long) instruction_simplifier (after)
- // CHECK-DAG: [[Arg:j\d+]] ParameterValue
- // CHECK-DAG: Return [ [[Arg]] ]
+ // CHECK-DAG: <<Arg:j\d+>> ParameterValue
+ // CHECK-DAG: Return [<<Arg>>]
// CHECK-START: long Main.Shr0(long) instruction_simplifier (after)
// CHECK-NOT: Shr
@@ -229,14 +259,14 @@
}
// CHECK-START: long Main.Sub0(long) instruction_simplifier (before)
- // CHECK-DAG: [[Arg:j\d+]] ParameterValue
- // CHECK-DAG: [[Const0:j\d+]] LongConstant 0
- // CHECK-DAG: [[Sub:j\d+]] Sub [ [[Arg]] [[Const0]] ]
- // CHECK-DAG: Return [ [[Sub]] ]
+ // CHECK-DAG: <<Arg:j\d+>> ParameterValue
+ // CHECK-DAG: <<Const0:j\d+>> LongConstant 0
+ // CHECK-DAG: <<Sub:j\d+>> Sub [<<Arg>>,<<Const0>>]
+ // CHECK-DAG: Return [<<Sub>>]
// CHECK-START: long Main.Sub0(long) instruction_simplifier (after)
- // CHECK-DAG: [[Arg:j\d+]] ParameterValue
- // CHECK-DAG: Return [ [[Arg]] ]
+ // CHECK-DAG: <<Arg:j\d+>> ParameterValue
+ // CHECK-DAG: Return [<<Arg>>]
// CHECK-START: long Main.Sub0(long) instruction_simplifier (after)
// CHECK-NOT: Sub
@@ -246,15 +276,15 @@
}
// CHECK-START: int Main.SubAliasNeg(int) instruction_simplifier (before)
- // CHECK-DAG: [[Arg:i\d+]] ParameterValue
- // CHECK-DAG: [[Const0:i\d+]] IntConstant 0
- // CHECK-DAG: [[Sub:i\d+]] Sub [ [[Const0]] [[Arg]] ]
- // CHECK-DAG: Return [ [[Sub]] ]
+ // CHECK-DAG: <<Arg:i\d+>> ParameterValue
+ // CHECK-DAG: <<Const0:i\d+>> IntConstant 0
+ // CHECK-DAG: <<Sub:i\d+>> Sub [<<Const0>>,<<Arg>>]
+ // CHECK-DAG: Return [<<Sub>>]
// CHECK-START: int Main.SubAliasNeg(int) instruction_simplifier (after)
- // CHECK-DAG: [[Arg:i\d+]] ParameterValue
- // CHECK-DAG: [[Neg:i\d+]] Neg [ [[Arg]] ]
- // CHECK-DAG: Return [ [[Neg]] ]
+ // CHECK-DAG: <<Arg:i\d+>> ParameterValue
+ // CHECK-DAG: <<Neg:i\d+>> Neg [<<Arg>>]
+ // CHECK-DAG: Return [<<Neg>>]
// CHECK-START: int Main.SubAliasNeg(int) instruction_simplifier (after)
// CHECK-NOT: Sub
@@ -264,14 +294,14 @@
}
// CHECK-START: long Main.UShr0(long) instruction_simplifier (before)
- // CHECK-DAG: [[Arg:j\d+]] ParameterValue
- // CHECK-DAG: [[Const0:i\d+]] IntConstant 0
- // CHECK-DAG: [[UShr:j\d+]] UShr [ [[Arg]] [[Const0]] ]
- // CHECK-DAG: Return [ [[UShr]] ]
+ // CHECK-DAG: <<Arg:j\d+>> ParameterValue
+ // CHECK-DAG: <<Const0:i\d+>> IntConstant 0
+ // CHECK-DAG: <<UShr:j\d+>> UShr [<<Arg>>,<<Const0>>]
+ // CHECK-DAG: Return [<<UShr>>]
// CHECK-START: long Main.UShr0(long) instruction_simplifier (after)
- // CHECK-DAG: [[Arg:j\d+]] ParameterValue
- // CHECK-DAG: Return [ [[Arg]] ]
+ // CHECK-DAG: <<Arg:j\d+>> ParameterValue
+ // CHECK-DAG: Return [<<Arg>>]
// CHECK-START: long Main.UShr0(long) instruction_simplifier (after)
// CHECK-NOT: UShr
@@ -281,14 +311,14 @@
}
// CHECK-START: int Main.Xor0(int) instruction_simplifier (before)
- // CHECK-DAG: [[Arg:i\d+]] ParameterValue
- // CHECK-DAG: [[Const0:i\d+]] IntConstant 0
- // CHECK-DAG: [[Xor:i\d+]] Xor [ [[Arg]] [[Const0]] ]
- // CHECK-DAG: Return [ [[Xor]] ]
+ // CHECK-DAG: <<Arg:i\d+>> ParameterValue
+ // CHECK-DAG: <<Const0:i\d+>> IntConstant 0
+ // CHECK-DAG: <<Xor:i\d+>> Xor [<<Arg>>,<<Const0>>]
+ // CHECK-DAG: Return [<<Xor>>]
// CHECK-START: int Main.Xor0(int) instruction_simplifier (after)
- // CHECK-DAG: [[Arg:i\d+]] ParameterValue
- // CHECK-DAG: Return [ [[Arg]] ]
+ // CHECK-DAG: <<Arg:i\d+>> ParameterValue
+ // CHECK-DAG: Return [<<Arg>>]
// CHECK-START: int Main.Xor0(int) instruction_simplifier (after)
// CHECK-NOT: Xor
@@ -298,15 +328,15 @@
}
// CHECK-START: int Main.XorAllOnes(int) instruction_simplifier (before)
- // CHECK-DAG: [[Arg:i\d+]] ParameterValue
- // CHECK-DAG: [[ConstF:i\d+]] IntConstant -1
- // CHECK-DAG: [[Xor:i\d+]] Xor [ [[Arg]] [[ConstF]] ]
- // CHECK-DAG: Return [ [[Xor]] ]
+ // CHECK-DAG: <<Arg:i\d+>> ParameterValue
+ // CHECK-DAG: <<ConstF:i\d+>> IntConstant -1
+ // CHECK-DAG: <<Xor:i\d+>> Xor [<<Arg>>,<<ConstF>>]
+ // CHECK-DAG: Return [<<Xor>>]
// CHECK-START: int Main.XorAllOnes(int) instruction_simplifier (after)
- // CHECK-DAG: [[Arg:i\d+]] ParameterValue
- // CHECK-DAG: [[Not:i\d+]] Not [ [[Arg]] ]
- // CHECK-DAG: Return [ [[Not]] ]
+ // CHECK-DAG: <<Arg:i\d+>> ParameterValue
+ // CHECK-DAG: <<Not:i\d+>> Not [<<Arg>>]
+ // CHECK-DAG: Return [<<Not>>]
// CHECK-START: int Main.XorAllOnes(int) instruction_simplifier (after)
// CHECK-NOT: Xor
@@ -323,20 +353,20 @@
*/
// CHECK-START: int Main.AddNegs1(int, int) instruction_simplifier (before)
- // CHECK-DAG: [[Arg1:i\d+]] ParameterValue
- // CHECK-DAG: [[Arg2:i\d+]] ParameterValue
- // CHECK-DAG: [[Neg1:i\d+]] Neg [ [[Arg1]] ]
- // CHECK-DAG: [[Neg2:i\d+]] Neg [ [[Arg2]] ]
- // CHECK-DAG: [[Add:i\d+]] Add [ [[Neg1]] [[Neg2]] ]
- // CHECK-DAG: Return [ [[Add]] ]
+ // CHECK-DAG: <<Arg1:i\d+>> ParameterValue
+ // CHECK-DAG: <<Arg2:i\d+>> ParameterValue
+ // CHECK-DAG: <<Neg1:i\d+>> Neg [<<Arg1>>]
+ // CHECK-DAG: <<Neg2:i\d+>> Neg [<<Arg2>>]
+ // CHECK-DAG: <<Add:i\d+>> Add [<<Neg1>>,<<Neg2>>]
+ // CHECK-DAG: Return [<<Add>>]
// CHECK-START: int Main.AddNegs1(int, int) instruction_simplifier (after)
- // CHECK-DAG: [[Arg1:i\d+]] ParameterValue
- // CHECK-DAG: [[Arg2:i\d+]] ParameterValue
+ // CHECK-DAG: <<Arg1:i\d+>> ParameterValue
+ // CHECK-DAG: <<Arg2:i\d+>> ParameterValue
// CHECK-NOT: Neg
- // CHECK-DAG: [[Add:i\d+]] Add [ [[Arg1]] [[Arg2]] ]
- // CHECK-DAG: [[Neg:i\d+]] Neg [ [[Add]] ]
- // CHECK-DAG: Return [ [[Neg]] ]
+ // CHECK-DAG: <<Add:i\d+>> Add [<<Arg1>>,<<Arg2>>]
+ // CHECK-DAG: <<Neg:i\d+>> Neg [<<Add>>]
+ // CHECK-DAG: Return [<<Neg>>]
public static int AddNegs1(int arg1, int arg2) {
return -arg1 + -arg2;
@@ -354,25 +384,34 @@
*/
// CHECK-START: int Main.AddNegs2(int, int) instruction_simplifier (before)
- // CHECK-DAG: [[Arg1:i\d+]] ParameterValue
- // CHECK-DAG: [[Arg2:i\d+]] ParameterValue
- // CHECK-DAG: [[Neg1:i\d+]] Neg [ [[Arg1]] ]
- // CHECK-DAG: [[Neg2:i\d+]] Neg [ [[Arg2]] ]
- // CHECK-DAG: [[Add1:i\d+]] Add [ [[Neg1]] [[Neg2]] ]
- // CHECK-DAG: [[Add2:i\d+]] Add [ [[Neg1]] [[Neg2]] ]
- // CHECK-DAG: [[Or:i\d+]] Or [ [[Add1]] [[Add2]] ]
- // CHECK-DAG: Return [ [[Or]] ]
+ // CHECK-DAG: <<Arg1:i\d+>> ParameterValue
+ // CHECK-DAG: <<Arg2:i\d+>> ParameterValue
+ // CHECK-DAG: <<Neg1:i\d+>> Neg [<<Arg1>>]
+ // CHECK-DAG: <<Neg2:i\d+>> Neg [<<Arg2>>]
+ // CHECK-DAG: <<Add1:i\d+>> Add [<<Neg1>>,<<Neg2>>]
+ // CHECK-DAG: <<Add2:i\d+>> Add [<<Neg1>>,<<Neg2>>]
+ // CHECK-DAG: <<Or:i\d+>> Or [<<Add1>>,<<Add2>>]
+ // CHECK-DAG: Return [<<Or>>]
// CHECK-START: int Main.AddNegs2(int, int) instruction_simplifier (after)
- // CHECK-DAG: [[Arg1:i\d+]] ParameterValue
- // CHECK-DAG: [[Arg2:i\d+]] ParameterValue
- // CHECK-DAG: [[Neg1:i\d+]] Neg [ [[Arg1]] ]
- // CHECK-DAG: [[Neg2:i\d+]] Neg [ [[Arg2]] ]
- // CHECK-DAG: [[Add1:i\d+]] Add [ [[Neg1]] [[Neg2]] ]
- // CHECK-DAG: [[Add2:i\d+]] Add [ [[Neg1]] [[Neg2]] ]
+ // CHECK-DAG: <<Arg1:i\d+>> ParameterValue
+ // CHECK-DAG: <<Arg2:i\d+>> ParameterValue
+ // CHECK-DAG: <<Neg1:i\d+>> Neg [<<Arg1>>]
+ // CHECK-DAG: <<Neg2:i\d+>> Neg [<<Arg2>>]
+ // CHECK-DAG: <<Add1:i\d+>> Add [<<Neg1>>,<<Neg2>>]
+ // CHECK-DAG: <<Add2:i\d+>> Add [<<Neg1>>,<<Neg2>>]
// CHECK-NOT: Neg
- // CHECK-DAG: [[Or:i\d+]] Or [ [[Add1]] [[Add2]] ]
- // CHECK-DAG: Return [ [[Or]] ]
+ // CHECK-DAG: <<Or:i\d+>> Or [<<Add1>>,<<Add2>>]
+ // CHECK-DAG: Return [<<Or>>]
+
+ // CHECK-START: int Main.AddNegs2(int, int) GVN (after)
+ // CHECK-DAG: <<Arg1:i\d+>> ParameterValue
+ // CHECK-DAG: <<Arg2:i\d+>> ParameterValue
+ // CHECK-DAG: <<Neg1:i\d+>> Neg [<<Arg1>>]
+ // CHECK-DAG: <<Neg2:i\d+>> Neg [<<Arg2>>]
+ // CHECK-DAG: <<Add:i\d+>> Add [<<Neg1>>,<<Neg2>>]
+ // CHECK-DAG: <<Or:i\d+>> Or [<<Add>>,<<Add>>]
+ // CHECK-DAG: Return [<<Or>>]
public static int AddNegs2(int arg1, int arg2) {
int temp1 = -arg1;
@@ -390,26 +429,26 @@
// CHECK-START: long Main.AddNegs3(long, long) instruction_simplifier (before)
// -------------- Arguments and initial negation operations.
- // CHECK-DAG: [[Arg1:j\d+]] ParameterValue
- // CHECK-DAG: [[Arg2:j\d+]] ParameterValue
- // CHECK-DAG: [[Neg1:j\d+]] Neg [ [[Arg1]] ]
- // CHECK-DAG: [[Neg2:j\d+]] Neg [ [[Arg2]] ]
+ // CHECK-DAG: <<Arg1:j\d+>> ParameterValue
+ // CHECK-DAG: <<Arg2:j\d+>> ParameterValue
+ // CHECK-DAG: <<Neg1:j\d+>> Neg [<<Arg1>>]
+ // CHECK-DAG: <<Neg2:j\d+>> Neg [<<Arg2>>]
// CHECK: Goto
// -------------- Loop
// CHECK: SuspendCheck
- // CHECK: [[Add:j\d+]] Add [ [[Neg1]] [[Neg2]] ]
+ // CHECK: <<Add:j\d+>> Add [<<Neg1>>,<<Neg2>>]
// CHECK: Goto
// CHECK-START: long Main.AddNegs3(long, long) instruction_simplifier (after)
// -------------- Arguments and initial negation operations.
- // CHECK-DAG: [[Arg1:j\d+]] ParameterValue
- // CHECK-DAG: [[Arg2:j\d+]] ParameterValue
- // CHECK-DAG: [[Neg1:j\d+]] Neg [ [[Arg1]] ]
- // CHECK-DAG: [[Neg2:j\d+]] Neg [ [[Arg2]] ]
+ // CHECK-DAG: <<Arg1:j\d+>> ParameterValue
+ // CHECK-DAG: <<Arg2:j\d+>> ParameterValue
+ // CHECK-DAG: <<Neg1:j\d+>> Neg [<<Arg1>>]
+ // CHECK-DAG: <<Neg2:j\d+>> Neg [<<Arg2>>]
// CHECK: Goto
// -------------- Loop
// CHECK: SuspendCheck
- // CHECK: [[Add:j\d+]] Add [ [[Neg1]] [[Neg2]] ]
+ // CHECK: <<Add:j\d+>> Add [<<Neg1>>,<<Neg2>>]
// CHECK-NOT: Neg
// CHECK: Goto
@@ -430,17 +469,17 @@
*/
// CHECK-START: long Main.AddNeg1(long, long) instruction_simplifier (before)
- // CHECK-DAG: [[Arg1:j\d+]] ParameterValue
- // CHECK-DAG: [[Arg2:j\d+]] ParameterValue
- // CHECK-DAG: [[Neg:j\d+]] Neg [ [[Arg1]] ]
- // CHECK-DAG: [[Add:j\d+]] Add [ [[Neg]] [[Arg2]] ]
- // CHECK-DAG: Return [ [[Add]] ]
+ // CHECK-DAG: <<Arg1:j\d+>> ParameterValue
+ // CHECK-DAG: <<Arg2:j\d+>> ParameterValue
+ // CHECK-DAG: <<Neg:j\d+>> Neg [<<Arg1>>]
+ // CHECK-DAG: <<Add:j\d+>> Add [<<Neg>>,<<Arg2>>]
+ // CHECK-DAG: Return [<<Add>>]
// CHECK-START: long Main.AddNeg1(long, long) instruction_simplifier (after)
- // CHECK-DAG: [[Arg1:j\d+]] ParameterValue
- // CHECK-DAG: [[Arg2:j\d+]] ParameterValue
- // CHECK-DAG: [[Sub:j\d+]] Sub [ [[Arg2]] [[Arg1]] ]
- // CHECK-DAG: Return [ [[Sub]] ]
+ // CHECK-DAG: <<Arg1:j\d+>> ParameterValue
+ // CHECK-DAG: <<Arg2:j\d+>> ParameterValue
+ // CHECK-DAG: <<Sub:j\d+>> Sub [<<Arg2>>,<<Arg1>>]
+ // CHECK-DAG: Return [<<Sub>>]
// CHECK-START: long Main.AddNeg1(long, long) instruction_simplifier (after)
// CHECK-NOT: Neg
@@ -460,22 +499,22 @@
*/
// CHECK-START: long Main.AddNeg2(long, long) instruction_simplifier (before)
- // CHECK-DAG: [[Arg1:j\d+]] ParameterValue
- // CHECK-DAG: [[Arg2:j\d+]] ParameterValue
- // CHECK-DAG: [[Neg:j\d+]] Neg [ [[Arg2]] ]
- // CHECK-DAG: [[Add1:j\d+]] Add [ [[Arg1]] [[Neg]] ]
- // CHECK-DAG: [[Add2:j\d+]] Add [ [[Arg1]] [[Neg]] ]
- // CHECK-DAG: [[Res:j\d+]] Or [ [[Add1]] [[Add2]] ]
- // CHECK-DAG: Return [ [[Res]] ]
+ // CHECK-DAG: <<Arg1:j\d+>> ParameterValue
+ // CHECK-DAG: <<Arg2:j\d+>> ParameterValue
+ // CHECK-DAG: <<Neg:j\d+>> Neg [<<Arg2>>]
+ // CHECK-DAG: <<Add1:j\d+>> Add [<<Arg1>>,<<Neg>>]
+ // CHECK-DAG: <<Add2:j\d+>> Add [<<Arg1>>,<<Neg>>]
+ // CHECK-DAG: <<Res:j\d+>> Or [<<Add1>>,<<Add2>>]
+ // CHECK-DAG: Return [<<Res>>]
// CHECK-START: long Main.AddNeg2(long, long) instruction_simplifier (after)
- // CHECK-DAG: [[Arg1:j\d+]] ParameterValue
- // CHECK-DAG: [[Arg2:j\d+]] ParameterValue
- // CHECK-DAG: [[Neg:j\d+]] Neg [ [[Arg2]] ]
- // CHECK-DAG: [[Add1:j\d+]] Add [ [[Arg1]] [[Neg]] ]
- // CHECK-DAG: [[Add2:j\d+]] Add [ [[Arg1]] [[Neg]] ]
- // CHECK-DAG: [[Res:j\d+]] Or [ [[Add1]] [[Add2]] ]
- // CHECK-DAG: Return [ [[Res]] ]
+ // CHECK-DAG: <<Arg1:j\d+>> ParameterValue
+ // CHECK-DAG: <<Arg2:j\d+>> ParameterValue
+ // CHECK-DAG: <<Neg:j\d+>> Neg [<<Arg2>>]
+ // CHECK-DAG: <<Add1:j\d+>> Add [<<Arg1>>,<<Neg>>]
+ // CHECK-DAG: <<Add2:j\d+>> Add [<<Arg1>>,<<Neg>>]
+ // CHECK-DAG: <<Res:j\d+>> Or [<<Add1>>,<<Add2>>]
+ // CHECK-DAG: Return [<<Res>>]
// CHECK-START: long Main.AddNeg2(long, long) instruction_simplifier (after)
// CHECK-NOT: Sub
@@ -491,14 +530,14 @@
*/
// CHECK-START: long Main.NegNeg1(long) instruction_simplifier (before)
- // CHECK-DAG: [[Arg:j\d+]] ParameterValue
- // CHECK-DAG: [[Neg1:j\d+]] Neg [ [[Arg]] ]
- // CHECK-DAG: [[Neg2:j\d+]] Neg [ [[Neg1]] ]
- // CHECK-DAG: Return [ [[Neg2]] ]
+ // CHECK-DAG: <<Arg:j\d+>> ParameterValue
+ // CHECK-DAG: <<Neg1:j\d+>> Neg [<<Arg>>]
+ // CHECK-DAG: <<Neg2:j\d+>> Neg [<<Neg1>>]
+ // CHECK-DAG: Return [<<Neg2>>]
// CHECK-START: long Main.NegNeg1(long) instruction_simplifier (after)
- // CHECK-DAG: [[Arg:j\d+]] ParameterValue
- // CHECK-DAG: Return [ [[Arg]] ]
+ // CHECK-DAG: <<Arg:j\d+>> ParameterValue
+ // CHECK-DAG: Return [<<Arg>>]
// CHECK-START: long Main.NegNeg1(long) instruction_simplifier (after)
// CHECK-NOT: Neg
@@ -515,21 +554,27 @@
*/
// CHECK-START: int Main.NegNeg2(int) instruction_simplifier (before)
- // CHECK-DAG: [[Arg:i\d+]] ParameterValue
- // CHECK-DAG: [[Neg1:i\d+]] Neg [ [[Arg]] ]
- // CHECK-DAG: [[Neg2:i\d+]] Neg [ [[Neg1]] ]
- // CHECK-DAG: [[Add:i\d+]] Add [ [[Neg1]] [[Neg2]] ]
- // CHECK-DAG: Return [ [[Add]] ]
+ // CHECK-DAG: <<Arg:i\d+>> ParameterValue
+ // CHECK-DAG: <<Neg1:i\d+>> Neg [<<Arg>>]
+ // CHECK-DAG: <<Neg2:i\d+>> Neg [<<Neg1>>]
+ // CHECK-DAG: <<Add:i\d+>> Add [<<Neg1>>,<<Neg2>>]
+ // CHECK-DAG: Return [<<Add>>]
// CHECK-START: int Main.NegNeg2(int) instruction_simplifier (after)
- // CHECK-DAG: [[Arg:i\d+]] ParameterValue
- // CHECK-DAG: [[Sub:i\d+]] Sub [ [[Arg]] [[Arg]] ]
- // CHECK-DAG: Return [ [[Sub]] ]
+ // CHECK-DAG: <<Arg:i\d+>> ParameterValue
+ // CHECK-DAG: <<Sub:i\d+>> Sub [<<Arg>>,<<Arg>>]
+ // CHECK-DAG: Return [<<Sub>>]
// CHECK-START: int Main.NegNeg2(int) instruction_simplifier (after)
// CHECK-NOT: Neg
// CHECK-NOT: Add
+ // CHECK-START: int Main.NegNeg2(int) constant_folding_after_inlining (after)
+ // CHECK: <<Const0:i\d+>> IntConstant 0
+ // CHECK-NOT: Neg
+ // CHECK-NOT: Add
+ // CHECK: Return [<<Const0>>]
+
public static int NegNeg2(int arg) {
int temp = -arg;
return temp + -temp;
@@ -543,15 +588,15 @@
*/
// CHECK-START: long Main.NegNeg3(long) instruction_simplifier (before)
- // CHECK-DAG: [[Arg:j\d+]] ParameterValue
- // CHECK-DAG: [[Const0:j\d+]] LongConstant 0
- // CHECK-DAG: [[Neg:j\d+]] Neg [ [[Arg]] ]
- // CHECK-DAG: [[Sub:j\d+]] Sub [ [[Const0]] [[Neg]] ]
- // CHECK-DAG: Return [ [[Sub]] ]
+ // CHECK-DAG: <<Arg:j\d+>> ParameterValue
+ // CHECK-DAG: <<Const0:j\d+>> LongConstant 0
+ // CHECK-DAG: <<Neg:j\d+>> Neg [<<Arg>>]
+ // CHECK-DAG: <<Sub:j\d+>> Sub [<<Const0>>,<<Neg>>]
+ // CHECK-DAG: Return [<<Sub>>]
// CHECK-START: long Main.NegNeg3(long) instruction_simplifier (after)
- // CHECK-DAG: [[Arg:j\d+]] ParameterValue
- // CHECK-DAG: Return [ [[Arg]] ]
+ // CHECK-DAG: <<Arg:j\d+>> ParameterValue
+ // CHECK-DAG: Return [<<Arg>>]
// CHECK-START: long Main.NegNeg3(long) instruction_simplifier (after)
// CHECK-NOT: Neg
@@ -568,17 +613,17 @@
*/
// CHECK-START: int Main.NegSub1(int, int) instruction_simplifier (before)
- // CHECK-DAG: [[Arg1:i\d+]] ParameterValue
- // CHECK-DAG: [[Arg2:i\d+]] ParameterValue
- // CHECK-DAG: [[Sub:i\d+]] Sub [ [[Arg1]] [[Arg2]] ]
- // CHECK-DAG: [[Neg:i\d+]] Neg [ [[Sub]] ]
- // CHECK-DAG: Return [ [[Neg]] ]
+ // CHECK-DAG: <<Arg1:i\d+>> ParameterValue
+ // CHECK-DAG: <<Arg2:i\d+>> ParameterValue
+ // CHECK-DAG: <<Sub:i\d+>> Sub [<<Arg1>>,<<Arg2>>]
+ // CHECK-DAG: <<Neg:i\d+>> Neg [<<Sub>>]
+ // CHECK-DAG: Return [<<Neg>>]
// CHECK-START: int Main.NegSub1(int, int) instruction_simplifier (after)
- // CHECK-DAG: [[Arg1:i\d+]] ParameterValue
- // CHECK-DAG: [[Arg2:i\d+]] ParameterValue
- // CHECK-DAG: [[Sub:i\d+]] Sub [ [[Arg2]] [[Arg1]] ]
- // CHECK-DAG: Return [ [[Sub]] ]
+ // CHECK-DAG: <<Arg1:i\d+>> ParameterValue
+ // CHECK-DAG: <<Arg2:i\d+>> ParameterValue
+ // CHECK-DAG: <<Sub:i\d+>> Sub [<<Arg2>>,<<Arg1>>]
+ // CHECK-DAG: Return [<<Sub>>]
// CHECK-START: int Main.NegSub1(int, int) instruction_simplifier (after)
// CHECK-NOT: Neg
@@ -598,22 +643,22 @@
*/
// CHECK-START: int Main.NegSub2(int, int) instruction_simplifier (before)
- // CHECK-DAG: [[Arg1:i\d+]] ParameterValue
- // CHECK-DAG: [[Arg2:i\d+]] ParameterValue
- // CHECK-DAG: [[Sub:i\d+]] Sub [ [[Arg1]] [[Arg2]] ]
- // CHECK-DAG: [[Neg1:i\d+]] Neg [ [[Sub]] ]
- // CHECK-DAG: [[Neg2:i\d+]] Neg [ [[Sub]] ]
- // CHECK-DAG: [[Or:i\d+]] Or [ [[Neg1]] [[Neg2]] ]
- // CHECK-DAG: Return [ [[Or]] ]
+ // CHECK-DAG: <<Arg1:i\d+>> ParameterValue
+ // CHECK-DAG: <<Arg2:i\d+>> ParameterValue
+ // CHECK-DAG: <<Sub:i\d+>> Sub [<<Arg1>>,<<Arg2>>]
+ // CHECK-DAG: <<Neg1:i\d+>> Neg [<<Sub>>]
+ // CHECK-DAG: <<Neg2:i\d+>> Neg [<<Sub>>]
+ // CHECK-DAG: <<Or:i\d+>> Or [<<Neg1>>,<<Neg2>>]
+ // CHECK-DAG: Return [<<Or>>]
// CHECK-START: int Main.NegSub2(int, int) instruction_simplifier (after)
- // CHECK-DAG: [[Arg1:i\d+]] ParameterValue
- // CHECK-DAG: [[Arg2:i\d+]] ParameterValue
- // CHECK-DAG: [[Sub:i\d+]] Sub [ [[Arg1]] [[Arg2]] ]
- // CHECK-DAG: [[Neg1:i\d+]] Neg [ [[Sub]] ]
- // CHECK-DAG: [[Neg2:i\d+]] Neg [ [[Sub]] ]
- // CHECK-DAG: [[Or:i\d+]] Or [ [[Neg1]] [[Neg2]] ]
- // CHECK-DAG: Return [ [[Or]] ]
+ // CHECK-DAG: <<Arg1:i\d+>> ParameterValue
+ // CHECK-DAG: <<Arg2:i\d+>> ParameterValue
+ // CHECK-DAG: <<Sub:i\d+>> Sub [<<Arg1>>,<<Arg2>>]
+ // CHECK-DAG: <<Neg1:i\d+>> Neg [<<Sub>>]
+ // CHECK-DAG: <<Neg2:i\d+>> Neg [<<Sub>>]
+ // CHECK-DAG: <<Or:i\d+>> Or [<<Neg1>>,<<Neg2>>]
+ // CHECK-DAG: Return [<<Or>>]
public static int NegSub2(int arg1, int arg2) {
int temp = arg1 - arg2;
@@ -626,15 +671,15 @@
*/
// CHECK-START: long Main.NotNot1(long) instruction_simplifier (before)
- // CHECK-DAG: [[Arg:j\d+]] ParameterValue
- // CHECK-DAG: [[ConstF1:j\d+]] LongConstant -1
- // CHECK-DAG: [[Xor1:j\d+]] Xor [ [[Arg]] [[ConstF1]] ]
- // CHECK-DAG: [[Xor2:j\d+]] Xor [ [[Xor1]] [[ConstF1]] ]
- // CHECK-DAG: Return [ [[Xor2]] ]
+ // CHECK-DAG: <<Arg:j\d+>> ParameterValue
+ // CHECK-DAG: <<ConstF1:j\d+>> LongConstant -1
+ // CHECK-DAG: <<Xor1:j\d+>> Xor [<<Arg>>,<<ConstF1>>]
+ // CHECK-DAG: <<Xor2:j\d+>> Xor [<<Xor1>>,<<ConstF1>>]
+ // CHECK-DAG: Return [<<Xor2>>]
// CHECK-START: long Main.NotNot1(long) instruction_simplifier (after)
- // CHECK-DAG: [[Arg:j\d+]] ParameterValue
- // CHECK-DAG: Return [ [[Arg]] ]
+ // CHECK-DAG: <<Arg:j\d+>> ParameterValue
+ // CHECK-DAG: Return [<<Arg>>]
// CHECK-START: long Main.NotNot1(long) instruction_simplifier (after)
// CHECK-NOT: Xor
@@ -644,18 +689,18 @@
}
// CHECK-START: int Main.NotNot2(int) instruction_simplifier (before)
- // CHECK-DAG: [[Arg:i\d+]] ParameterValue
- // CHECK-DAG: [[ConstF1:i\d+]] IntConstant -1
- // CHECK-DAG: [[Xor1:i\d+]] Xor [ [[Arg]] [[ConstF1]] ]
- // CHECK-DAG: [[Xor2:i\d+]] Xor [ [[Xor1]] [[ConstF1]] ]
- // CHECK-DAG: [[Add:i\d+]] Add [ [[Xor1]] [[Xor2]] ]
- // CHECK-DAG: Return [ [[Add]] ]
+ // CHECK-DAG: <<Arg:i\d+>> ParameterValue
+ // CHECK-DAG: <<ConstF1:i\d+>> IntConstant -1
+ // CHECK-DAG: <<Xor1:i\d+>> Xor [<<Arg>>,<<ConstF1>>]
+ // CHECK-DAG: <<Xor2:i\d+>> Xor [<<Xor1>>,<<ConstF1>>]
+ // CHECK-DAG: <<Add:i\d+>> Add [<<Xor1>>,<<Xor2>>]
+ // CHECK-DAG: Return [<<Add>>]
// CHECK-START: int Main.NotNot2(int) instruction_simplifier (after)
- // CHECK-DAG: [[Arg:i\d+]] ParameterValue
- // CHECK-DAG: [[Not:i\d+]] Not [ [[Arg]] ]
- // CHECK-DAG: [[Add:i\d+]] Add [ [[Not]] [[Arg]] ]
- // CHECK-DAG: Return [ [[Add]] ]
+ // CHECK-DAG: <<Arg:i\d+>> ParameterValue
+ // CHECK-DAG: <<Not:i\d+>> Not [<<Arg>>]
+ // CHECK-DAG: <<Add:i\d+>> Add [<<Not>>,<<Arg>>]
+ // CHECK-DAG: Return [<<Add>>]
// CHECK-START: int Main.NotNot2(int) instruction_simplifier (after)
// CHECK-NOT: Xor
@@ -671,18 +716,18 @@
*/
// CHECK-START: int Main.SubNeg1(int, int) instruction_simplifier (before)
- // CHECK-DAG: [[Arg1:i\d+]] ParameterValue
- // CHECK-DAG: [[Arg2:i\d+]] ParameterValue
- // CHECK-DAG: [[Neg:i\d+]] Neg [ [[Arg1]] ]
- // CHECK-DAG: [[Sub:i\d+]] Sub [ [[Neg]] [[Arg2]] ]
- // CHECK-DAG: Return [ [[Sub]] ]
+ // CHECK-DAG: <<Arg1:i\d+>> ParameterValue
+ // CHECK-DAG: <<Arg2:i\d+>> ParameterValue
+ // CHECK-DAG: <<Neg:i\d+>> Neg [<<Arg1>>]
+ // CHECK-DAG: <<Sub:i\d+>> Sub [<<Neg>>,<<Arg2>>]
+ // CHECK-DAG: Return [<<Sub>>]
// CHECK-START: int Main.SubNeg1(int, int) instruction_simplifier (after)
- // CHECK-DAG: [[Arg1:i\d+]] ParameterValue
- // CHECK-DAG: [[Arg2:i\d+]] ParameterValue
- // CHECK-DAG: [[Add:i\d+]] Add [ [[Arg1]] [[Arg2]] ]
- // CHECK-DAG: [[Neg:i\d+]] Neg [ [[Add]] ]
- // CHECK-DAG: Return [ [[Neg]] ]
+ // CHECK-DAG: <<Arg1:i\d+>> ParameterValue
+ // CHECK-DAG: <<Arg2:i\d+>> ParameterValue
+ // CHECK-DAG: <<Add:i\d+>> Add [<<Arg1>>,<<Arg2>>]
+ // CHECK-DAG: <<Neg:i\d+>> Neg [<<Add>>]
+ // CHECK-DAG: Return [<<Neg>>]
// CHECK-START: int Main.SubNeg1(int, int) instruction_simplifier (after)
// CHECK-NOT: Sub
@@ -702,22 +747,22 @@
*/
// CHECK-START: int Main.SubNeg2(int, int) instruction_simplifier (before)
- // CHECK-DAG: [[Arg1:i\d+]] ParameterValue
- // CHECK-DAG: [[Arg2:i\d+]] ParameterValue
- // CHECK-DAG: [[Neg:i\d+]] Neg [ [[Arg1]] ]
- // CHECK-DAG: [[Sub1:i\d+]] Sub [ [[Neg]] [[Arg2]] ]
- // CHECK-DAG: [[Sub2:i\d+]] Sub [ [[Neg]] [[Arg2]] ]
- // CHECK-DAG: [[Or:i\d+]] Or [ [[Sub1]] [[Sub2]] ]
- // CHECK-DAG: Return [ [[Or]] ]
+ // CHECK-DAG: <<Arg1:i\d+>> ParameterValue
+ // CHECK-DAG: <<Arg2:i\d+>> ParameterValue
+ // CHECK-DAG: <<Neg:i\d+>> Neg [<<Arg1>>]
+ // CHECK-DAG: <<Sub1:i\d+>> Sub [<<Neg>>,<<Arg2>>]
+ // CHECK-DAG: <<Sub2:i\d+>> Sub [<<Neg>>,<<Arg2>>]
+ // CHECK-DAG: <<Or:i\d+>> Or [<<Sub1>>,<<Sub2>>]
+ // CHECK-DAG: Return [<<Or>>]
// CHECK-START: int Main.SubNeg2(int, int) instruction_simplifier (after)
- // CHECK-DAG: [[Arg1:i\d+]] ParameterValue
- // CHECK-DAG: [[Arg2:i\d+]] ParameterValue
- // CHECK-DAG: [[Neg:i\d+]] Neg [ [[Arg1]] ]
- // CHECK-DAG: [[Sub1:i\d+]] Sub [ [[Neg]] [[Arg2]] ]
- // CHECK-DAG: [[Sub2:i\d+]] Sub [ [[Neg]] [[Arg2]] ]
- // CHECK-DAG: [[Or:i\d+]] Or [ [[Sub1]] [[Sub2]] ]
- // CHECK-DAG: Return [ [[Or]] ]
+ // CHECK-DAG: <<Arg1:i\d+>> ParameterValue
+ // CHECK-DAG: <<Arg2:i\d+>> ParameterValue
+ // CHECK-DAG: <<Neg:i\d+>> Neg [<<Arg1>>]
+ // CHECK-DAG: <<Sub1:i\d+>> Sub [<<Neg>>,<<Arg2>>]
+ // CHECK-DAG: <<Sub2:i\d+>> Sub [<<Neg>>,<<Arg2>>]
+ // CHECK-DAG: <<Or:i\d+>> Or [<<Sub1>>,<<Sub2>>]
+ // CHECK-DAG: Return [<<Or>>]
// CHECK-START: int Main.SubNeg2(int, int) instruction_simplifier (after)
// CHECK-NOT: Add
@@ -736,24 +781,24 @@
// CHECK-START: long Main.SubNeg3(long, long) instruction_simplifier (before)
// -------------- Arguments and initial negation operation.
- // CHECK-DAG: [[Arg1:j\d+]] ParameterValue
- // CHECK-DAG: [[Arg2:j\d+]] ParameterValue
- // CHECK-DAG: [[Neg:j\d+]] Neg [ [[Arg1]] ]
+ // CHECK-DAG: <<Arg1:j\d+>> ParameterValue
+ // CHECK-DAG: <<Arg2:j\d+>> ParameterValue
+ // CHECK-DAG: <<Neg:j\d+>> Neg [<<Arg1>>]
// CHECK: Goto
// -------------- Loop
// CHECK: SuspendCheck
- // CHECK: [[Sub:j\d+]] Sub [ [[Neg]] [[Arg2]] ]
+ // CHECK: <<Sub:j\d+>> Sub [<<Neg>>,<<Arg2>>]
// CHECK: Goto
// CHECK-START: long Main.SubNeg3(long, long) instruction_simplifier (after)
// -------------- Arguments and initial negation operation.
- // CHECK-DAG: [[Arg1:j\d+]] ParameterValue
- // CHECK-DAG: [[Arg2:j\d+]] ParameterValue
- // CHECK-DAG: [[Neg:j\d+]] Neg [ [[Arg1]] ]
+ // CHECK-DAG: <<Arg1:j\d+>> ParameterValue
+ // CHECK-DAG: <<Arg2:j\d+>> ParameterValue
+ // CHECK-DAG: <<Neg:j\d+>> Neg [<<Arg1>>]
// CHECK-DAG: Goto
// -------------- Loop
// CHECK: SuspendCheck
- // CHECK: [[Sub:j\d+]] Sub [ [[Neg]] [[Arg2]] ]
+ // CHECK: <<Sub:j\d+>> Sub [<<Neg>>,<<Arg2>>]
// CHECK-NOT: Neg
// CHECK: Goto
@@ -767,116 +812,116 @@
}
// CHECK-START: int Main.EqualTrueRhs(boolean) instruction_simplifier (before)
- // CHECK-DAG: [[Arg:z\d+]] ParameterValue
- // CHECK-DAG: [[Const1:i\d+]] IntConstant 1
- // CHECK-DAG: [[Cond:z\d+]] Equal [ [[Arg]] [[Const1]] ]
- // CHECK-DAG: If [ [[Cond]] ]
+ // CHECK-DAG: <<Arg:z\d+>> ParameterValue
+ // CHECK-DAG: <<Const1:i\d+>> IntConstant 1
+ // CHECK-DAG: <<Cond:z\d+>> Equal [<<Arg>>,<<Const1>>]
+ // CHECK-DAG: If [<<Cond>>]
// CHECK-START: int Main.EqualTrueRhs(boolean) instruction_simplifier (after)
- // CHECK-DAG: [[Arg:z\d+]] ParameterValue
- // CHECK-DAG: If [ [[Arg]] ]
+ // CHECK-DAG: <<Arg:z\d+>> ParameterValue
+ // CHECK-DAG: If [<<Arg>>]
public static int EqualTrueRhs(boolean arg) {
return (arg != true) ? 3 : 5;
}
// CHECK-START: int Main.EqualTrueLhs(boolean) instruction_simplifier (before)
- // CHECK-DAG: [[Arg:z\d+]] ParameterValue
- // CHECK-DAG: [[Const1:i\d+]] IntConstant 1
- // CHECK-DAG: [[Cond:z\d+]] Equal [ [[Const1]] [[Arg]] ]
- // CHECK-DAG: If [ [[Cond]] ]
+ // CHECK-DAG: <<Arg:z\d+>> ParameterValue
+ // CHECK-DAG: <<Const1:i\d+>> IntConstant 1
+ // CHECK-DAG: <<Cond:z\d+>> Equal [<<Const1>>,<<Arg>>]
+ // CHECK-DAG: If [<<Cond>>]
// CHECK-START: int Main.EqualTrueLhs(boolean) instruction_simplifier (after)
- // CHECK-DAG: [[Arg:z\d+]] ParameterValue
- // CHECK-DAG: If [ [[Arg]] ]
+ // CHECK-DAG: <<Arg:z\d+>> ParameterValue
+ // CHECK-DAG: If [<<Arg>>]
public static int EqualTrueLhs(boolean arg) {
return (true != arg) ? 3 : 5;
}
// CHECK-START: int Main.EqualFalseRhs(boolean) instruction_simplifier (before)
- // CHECK-DAG: [[Arg:z\d+]] ParameterValue
- // CHECK-DAG: [[Const0:i\d+]] IntConstant 0
- // CHECK-DAG: [[Cond:z\d+]] Equal [ [[Arg]] [[Const0]] ]
- // CHECK-DAG: If [ [[Cond]] ]
+ // CHECK-DAG: <<Arg:z\d+>> ParameterValue
+ // CHECK-DAG: <<Const0:i\d+>> IntConstant 0
+ // CHECK-DAG: <<Cond:z\d+>> Equal [<<Arg>>,<<Const0>>]
+ // CHECK-DAG: If [<<Cond>>]
// CHECK-START: int Main.EqualFalseRhs(boolean) instruction_simplifier (after)
- // CHECK-DAG: [[Arg:z\d+]] ParameterValue
- // CHECK-DAG: [[NotArg:z\d+]] BooleanNot [ [[Arg]] ]
- // CHECK-DAG: If [ [[NotArg]] ]
+ // CHECK-DAG: <<Arg:z\d+>> ParameterValue
+ // CHECK-DAG: <<NotArg:z\d+>> BooleanNot [<<Arg>>]
+ // CHECK-DAG: If [<<NotArg>>]
public static int EqualFalseRhs(boolean arg) {
return (arg != false) ? 3 : 5;
}
// CHECK-START: int Main.EqualFalseLhs(boolean) instruction_simplifier (before)
- // CHECK-DAG: [[Arg:z\d+]] ParameterValue
- // CHECK-DAG: [[Const0:i\d+]] IntConstant 0
- // CHECK-DAG: [[Cond:z\d+]] Equal [ [[Const0]] [[Arg]] ]
- // CHECK-DAG: If [ [[Cond]] ]
+ // CHECK-DAG: <<Arg:z\d+>> ParameterValue
+ // CHECK-DAG: <<Const0:i\d+>> IntConstant 0
+ // CHECK-DAG: <<Cond:z\d+>> Equal [<<Const0>>,<<Arg>>]
+ // CHECK-DAG: If [<<Cond>>]
// CHECK-START: int Main.EqualFalseLhs(boolean) instruction_simplifier (after)
- // CHECK-DAG: [[Arg:z\d+]] ParameterValue
- // CHECK-DAG: [[NotArg:z\d+]] BooleanNot [ [[Arg]] ]
- // CHECK-DAG: If [ [[NotArg]] ]
+ // CHECK-DAG: <<Arg:z\d+>> ParameterValue
+ // CHECK-DAG: <<NotArg:z\d+>> BooleanNot [<<Arg>>]
+ // CHECK-DAG: If [<<NotArg>>]
public static int EqualFalseLhs(boolean arg) {
return (false != arg) ? 3 : 5;
}
// CHECK-START: int Main.NotEqualTrueRhs(boolean) instruction_simplifier (before)
- // CHECK-DAG: [[Arg:z\d+]] ParameterValue
- // CHECK-DAG: [[Const1:i\d+]] IntConstant 1
- // CHECK-DAG: [[Cond:z\d+]] NotEqual [ [[Arg]] [[Const1]] ]
- // CHECK-DAG: If [ [[Cond]] ]
+ // CHECK-DAG: <<Arg:z\d+>> ParameterValue
+ // CHECK-DAG: <<Const1:i\d+>> IntConstant 1
+ // CHECK-DAG: <<Cond:z\d+>> NotEqual [<<Arg>>,<<Const1>>]
+ // CHECK-DAG: If [<<Cond>>]
// CHECK-START: int Main.NotEqualTrueRhs(boolean) instruction_simplifier (after)
- // CHECK-DAG: [[Arg:z\d+]] ParameterValue
- // CHECK-DAG: [[NotArg:z\d+]] BooleanNot [ [[Arg]] ]
- // CHECK-DAG: If [ [[NotArg]] ]
+ // CHECK-DAG: <<Arg:z\d+>> ParameterValue
+ // CHECK-DAG: <<NotArg:z\d+>> BooleanNot [<<Arg>>]
+ // CHECK-DAG: If [<<NotArg>>]
public static int NotEqualTrueRhs(boolean arg) {
return (arg == true) ? 3 : 5;
}
// CHECK-START: int Main.NotEqualTrueLhs(boolean) instruction_simplifier (before)
- // CHECK-DAG: [[Arg:z\d+]] ParameterValue
- // CHECK-DAG: [[Const1:i\d+]] IntConstant 1
- // CHECK-DAG: [[Cond:z\d+]] NotEqual [ [[Const1]] [[Arg]] ]
- // CHECK-DAG: If [ [[Cond]] ]
+ // CHECK-DAG: <<Arg:z\d+>> ParameterValue
+ // CHECK-DAG: <<Const1:i\d+>> IntConstant 1
+ // CHECK-DAG: <<Cond:z\d+>> NotEqual [<<Const1>>,<<Arg>>]
+ // CHECK-DAG: If [<<Cond>>]
// CHECK-START: int Main.NotEqualTrueLhs(boolean) instruction_simplifier (after)
- // CHECK-DAG: [[Arg:z\d+]] ParameterValue
- // CHECK-DAG: [[NotArg:z\d+]] BooleanNot [ [[Arg]] ]
- // CHECK-DAG: If [ [[NotArg]] ]
+ // CHECK-DAG: <<Arg:z\d+>> ParameterValue
+ // CHECK-DAG: <<NotArg:z\d+>> BooleanNot [<<Arg>>]
+ // CHECK-DAG: If [<<NotArg>>]
public static int NotEqualTrueLhs(boolean arg) {
return (true == arg) ? 3 : 5;
}
// CHECK-START: int Main.NotEqualFalseRhs(boolean) instruction_simplifier (before)
- // CHECK-DAG: [[Arg:z\d+]] ParameterValue
- // CHECK-DAG: [[Const0:i\d+]] IntConstant 0
- // CHECK-DAG: [[Cond:z\d+]] NotEqual [ [[Arg]] [[Const0]] ]
- // CHECK-DAG: If [ [[Cond]] ]
+ // CHECK-DAG: <<Arg:z\d+>> ParameterValue
+ // CHECK-DAG: <<Const0:i\d+>> IntConstant 0
+ // CHECK-DAG: <<Cond:z\d+>> NotEqual [<<Arg>>,<<Const0>>]
+ // CHECK-DAG: If [<<Cond>>]
// CHECK-START: int Main.NotEqualFalseRhs(boolean) instruction_simplifier (after)
- // CHECK-DAG: [[Arg:z\d+]] ParameterValue
- // CHECK-DAG: If [ [[Arg]] ]
+ // CHECK-DAG: <<Arg:z\d+>> ParameterValue
+ // CHECK-DAG: If [<<Arg>>]
public static int NotEqualFalseRhs(boolean arg) {
return (arg == false) ? 3 : 5;
}
// CHECK-START: int Main.NotEqualFalseLhs(boolean) instruction_simplifier (before)
- // CHECK-DAG: [[Arg:z\d+]] ParameterValue
- // CHECK-DAG: [[Const0:i\d+]] IntConstant 0
- // CHECK-DAG: [[Cond:z\d+]] NotEqual [ [[Const0]] [[Arg]] ]
- // CHECK-DAG: If [ [[Cond]] ]
+ // CHECK-DAG: <<Arg:z\d+>> ParameterValue
+ // CHECK-DAG: <<Const0:i\d+>> IntConstant 0
+ // CHECK-DAG: <<Cond:z\d+>> NotEqual [<<Const0>>,<<Arg>>]
+ // CHECK-DAG: If [<<Cond>>]
// CHECK-START: int Main.NotEqualFalseLhs(boolean) instruction_simplifier (after)
- // CHECK-DAG: [[Arg:z\d+]] ParameterValue
- // CHECK-DAG: If [ [[Arg]] ]
+ // CHECK-DAG: <<Arg:z\d+>> ParameterValue
+ // CHECK-DAG: If [<<Arg>>]
public static int NotEqualFalseLhs(boolean arg) {
return (false == arg) ? 3 : 5;
@@ -889,22 +934,100 @@
*/
// CHECK-START: boolean Main.NotNotBool(boolean) instruction_simplifier_after_types (before)
- // CHECK-DAG: [[Arg:z\d+]] ParameterValue
- // CHECK-DAG: [[NotArg:z\d+]] BooleanNot [ [[Arg]] ]
- // CHECK-DAG: [[NotNotArg:z\d+]] BooleanNot [ [[NotArg]] ]
- // CHECK-DAG: Return [ [[NotNotArg]] ]
+ // CHECK-DAG: <<Arg:z\d+>> ParameterValue
+ // CHECK-DAG: <<NotArg:z\d+>> BooleanNot [<<Arg>>]
+ // CHECK-DAG: <<NotNotArg:z\d+>> BooleanNot [<<NotArg>>]
+ // CHECK-DAG: Return [<<NotNotArg>>]
// CHECK-START: boolean Main.NotNotBool(boolean) instruction_simplifier_after_types (after)
- // CHECK-DAG: [[Arg:z\d+]] ParameterValue
- // CHECK-DAG: BooleanNot [ [[Arg]] ]
- // CHECK-DAG: Return [ [[Arg]] ]
+ // CHECK-DAG: <<Arg:z\d+>> ParameterValue
+ // CHECK-DAG: BooleanNot [<<Arg>>]
+ // CHECK-DAG: Return [<<Arg>>]
// CHECK-START: boolean Main.NotNotBool(boolean) instruction_simplifier_after_types (after)
// CHECK: BooleanNot
// CHECK-NOT: BooleanNot
+ public static boolean NegateValue(boolean arg) {
+ return !arg;
+ }
+
public static boolean NotNotBool(boolean arg) {
- return !(!arg);
+ return !(NegateValue(arg));
+ }
+
+ // CHECK-START: float Main.Div2(float) instruction_simplifier (before)
+ // CHECK-DAG: <<Arg:f\d+>> ParameterValue
+ // CHECK-DAG: <<Const2:f\d+>> FloatConstant 2
+ // CHECK-DAG: <<Div:f\d+>> Div [<<Arg>>,<<Const2>>]
+ // CHECK-DAG: Return [<<Div>>]
+
+ // CHECK-START: float Main.Div2(float) instruction_simplifier (after)
+ // CHECK-DAG: <<Arg:f\d+>> ParameterValue
+ // CHECK-DAG: <<ConstP5:f\d+>> FloatConstant 0.5
+ // CHECK-DAG: <<Mul:f\d+>> Mul [<<Arg>>,<<ConstP5>>]
+ // CHECK-DAG: Return [<<Mul>>]
+
+ // CHECK-START: float Main.Div2(float) instruction_simplifier (after)
+ // CHECK-NOT: Div
+
+ public static float Div2(float arg) {
+ return arg / 2.0f;
+ }
+
+ // CHECK-START: double Main.Div2(double) instruction_simplifier (before)
+ // CHECK-DAG: <<Arg:d\d+>> ParameterValue
+ // CHECK-DAG: <<Const2:d\d+>> DoubleConstant 2
+ // CHECK-DAG: <<Div:d\d+>> Div [<<Arg>>,<<Const2>>]
+ // CHECK-DAG: Return [<<Div>>]
+
+ // CHECK-START: double Main.Div2(double) instruction_simplifier (after)
+ // CHECK-DAG: <<Arg:d\d+>> ParameterValue
+ // CHECK-DAG: <<ConstP5:d\d+>> DoubleConstant 0.5
+ // CHECK-DAG: <<Mul:d\d+>> Mul [<<Arg>>,<<ConstP5>>]
+ // CHECK-DAG: Return [<<Mul>>]
+
+ // CHECK-START: double Main.Div2(double) instruction_simplifier (after)
+ // CHECK-NOT: Div
+ public static double Div2(double arg) {
+ return arg / 2.0;
+ }
+
+ // CHECK-START: float Main.DivMP25(float) instruction_simplifier (before)
+ // CHECK-DAG: <<Arg:f\d+>> ParameterValue
+ // CHECK-DAG: <<ConstMP25:f\d+>> FloatConstant -0.25
+ // CHECK-DAG: <<Div:f\d+>> Div [<<Arg>>,<<ConstMP25>>]
+ // CHECK-DAG: Return [<<Div>>]
+
+ // CHECK-START: float Main.DivMP25(float) instruction_simplifier (after)
+ // CHECK-DAG: <<Arg:f\d+>> ParameterValue
+ // CHECK-DAG: <<ConstM4:f\d+>> FloatConstant -4
+ // CHECK-DAG: <<Mul:f\d+>> Mul [<<Arg>>,<<ConstM4>>]
+ // CHECK-DAG: Return [<<Mul>>]
+
+ // CHECK-START: float Main.DivMP25(float) instruction_simplifier (after)
+ // CHECK-NOT: Div
+
+ public static float DivMP25(float arg) {
+ return arg / -0.25f;
+ }
+
+ // CHECK-START: double Main.DivMP25(double) instruction_simplifier (before)
+ // CHECK-DAG: <<Arg:d\d+>> ParameterValue
+ // CHECK-DAG: <<ConstMP25:d\d+>> DoubleConstant -0.25
+ // CHECK-DAG: <<Div:d\d+>> Div [<<Arg>>,<<ConstMP25>>]
+ // CHECK-DAG: Return [<<Div>>]
+
+ // CHECK-START: double Main.DivMP25(double) instruction_simplifier (after)
+ // CHECK-DAG: <<Arg:d\d+>> ParameterValue
+ // CHECK-DAG: <<ConstM4:d\d+>> DoubleConstant -4
+ // CHECK-DAG: <<Mul:d\d+>> Mul [<<Arg>>,<<ConstM4>>]
+ // CHECK-DAG: Return [<<Mul>>]
+
+ // CHECK-START: double Main.DivMP25(double) instruction_simplifier (after)
+ // CHECK-NOT: Div
+ public static double DivMP25(double arg) {
+ return arg / -0.25f;
}
public static void main(String[] args) {
@@ -941,7 +1064,6 @@
assertIntEquals(SubNeg1(arg, arg + 1), -(arg + arg + 1));
assertIntEquals(SubNeg2(arg, arg + 1), -(arg + arg + 1));
assertLongEquals(SubNeg3(arg, arg + 1), -(2 * arg + 1));
-
assertIntEquals(EqualTrueRhs(true), 5);
assertIntEquals(EqualTrueLhs(true), 5);
assertIntEquals(EqualFalseRhs(true), 3);
@@ -952,5 +1074,10 @@
assertIntEquals(NotEqualFalseLhs(true), 5);
assertBooleanEquals(NotNotBool(true), true);
assertBooleanEquals(NotNotBool(false), false);
+ assertFloatEquals(Div2(100.0f), 50.0f);
+ assertDoubleEquals(Div2(150.0), 75.0);
+ assertFloatEquals(DivMP25(100.0f), -400.0f);
+ assertDoubleEquals(DivMP25(150.0), -600.0);
+ assertLongEquals(Shl1(100), 200);
}
}
diff --git a/test/461-get-reference-vreg/get_reference_vreg_jni.cc b/test/461-get-reference-vreg/get_reference_vreg_jni.cc
index f0b78e1..a8ef684 100644
--- a/test/461-get-reference-vreg/get_reference_vreg_jni.cc
+++ b/test/461-get-reference-vreg/get_reference_vreg_jni.cc
@@ -29,7 +29,9 @@
public:
TestVisitor(Thread* thread, Context* context, mirror::Object* this_value)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- : StackVisitor(thread, context), this_value_(this_value), found_method_index_(0) {}
+ : StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
+ this_value_(this_value),
+ found_method_index_(0) {}
bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::ArtMethod* m = GetMethod();
diff --git a/test/462-checker-inlining-across-dex-files/src/Main.java b/test/462-checker-inlining-across-dex-files/src/Main.java
index d5563b8..218c7ce 100644
--- a/test/462-checker-inlining-across-dex-files/src/Main.java
+++ b/test/462-checker-inlining-across-dex-files/src/Main.java
@@ -22,7 +22,7 @@
public class Main {
// CHECK-START: void Main.inlineEmptyMethod() inliner (before)
- // CHECK-DAG: [[Invoke:v\d+]] InvokeStaticOrDirect
+ // CHECK-DAG: <<Invoke:v\d+>> InvokeStaticOrDirect
// CHECK-DAG: ReturnVoid
// CHECK-START: void Main.inlineEmptyMethod() inliner (after)
@@ -33,120 +33,124 @@
}
// CHECK-START: int Main.inlineReturnIntMethod() inliner (before)
- // CHECK-DAG: [[Invoke:i\d+]] InvokeStaticOrDirect
- // CHECK-DAG: Return [ [[Invoke]] ]
+ // CHECK-DAG: <<Invoke:i\d+>> InvokeStaticOrDirect
+ // CHECK-DAG: Return [<<Invoke>>]
// CHECK-START: int Main.inlineReturnIntMethod() inliner (after)
// CHECK-NOT: InvokeStaticOrDirect
// CHECK-START: int Main.inlineReturnIntMethod() inliner (after)
- // CHECK-DAG: [[Const38:i\d+]] IntConstant 38
- // CHECK-DAG: Return [ [[Const38]] ]
+ // CHECK-DAG: <<Const38:i\d+>> IntConstant 38
+ // CHECK-DAG: Return [<<Const38>>]
public static int inlineReturnIntMethod() {
return OtherDex.returnIntMethod();
}
// CHECK-START: int Main.dontInlineOtherDexStatic() inliner (before)
- // CHECK-DAG: [[Invoke:i\d+]] InvokeStaticOrDirect
- // CHECK-DAG: Return [ [[Invoke]] ]
+ // CHECK-DAG: <<Invoke:i\d+>> InvokeStaticOrDirect
+ // CHECK-DAG: Return [<<Invoke>>]
// CHECK-START: int Main.dontInlineOtherDexStatic() inliner (after)
- // CHECK-DAG: [[Invoke:i\d+]] InvokeStaticOrDirect
- // CHECK-DAG: Return [ [[Invoke]] ]
+ // CHECK-DAG: <<Invoke:i\d+>> InvokeStaticOrDirect
+ // CHECK-DAG: Return [<<Invoke>>]
public static int dontInlineOtherDexStatic() {
return OtherDex.returnOtherDexStatic();
}
// CHECK-START: int Main.inlineMainStatic() inliner (before)
- // CHECK-DAG: [[Invoke:i\d+]] InvokeStaticOrDirect
- // CHECK-DAG: Return [ [[Invoke]] ]
+ // CHECK-DAG: <<Invoke:i\d+>> InvokeStaticOrDirect
+ // CHECK-DAG: Return [<<Invoke>>]
// CHECK-START: int Main.inlineMainStatic() inliner (after)
// CHECK-NOT: InvokeStaticOrDirect
// CHECK-START: int Main.inlineMainStatic() inliner (after)
- // CHECK-DAG: [[Static:i\d+]] StaticFieldGet
- // CHECK-DAG: Return [ [[Static]] ]
+ // CHECK-DAG: <<Static:i\d+>> StaticFieldGet
+ // CHECK-DAG: Return [<<Static>>]
public static int inlineMainStatic() {
return OtherDex.returnMainStatic();
}
// CHECK-START: int Main.dontInlineRecursiveCall() inliner (before)
- // CHECK-DAG: [[Invoke:i\d+]] InvokeStaticOrDirect
- // CHECK-DAG: Return [ [[Invoke]] ]
+ // CHECK-DAG: <<Invoke:i\d+>> InvokeStaticOrDirect
+ // CHECK-DAG: Return [<<Invoke>>]
// CHECK-START: int Main.dontInlineRecursiveCall() inliner (after)
- // CHECK-DAG: [[Invoke:i\d+]] InvokeStaticOrDirect
- // CHECK-DAG: Return [ [[Invoke]] ]
+ // CHECK-DAG: <<Invoke:i\d+>> InvokeStaticOrDirect
+ // CHECK-DAG: Return [<<Invoke>>]
public static int dontInlineRecursiveCall() {
return OtherDex.recursiveCall();
}
// CHECK-START: java.lang.String Main.dontInlineReturnString() inliner (before)
- // CHECK-DAG: [[Invoke:l\d+]] InvokeStaticOrDirect
- // CHECK-DAG: Return [ [[Invoke]] ]
+ // CHECK-DAG: <<Invoke:l\d+>> InvokeStaticOrDirect
+ // CHECK-DAG: Return [<<Invoke>>]
// CHECK-START: java.lang.String Main.dontInlineReturnString() inliner (after)
- // CHECK-DAG: [[Invoke:l\d+]] InvokeStaticOrDirect
- // CHECK-DAG: Return [ [[Invoke]] ]
+ // CHECK-DAG: <<Invoke:l\d+>> InvokeStaticOrDirect
+ // CHECK-DAG: Return [<<Invoke>>]
public static String dontInlineReturnString() {
return OtherDex.returnString();
}
// CHECK-START: java.lang.Class Main.dontInlineOtherDexClass() inliner (before)
- // CHECK-DAG: [[Invoke:l\d+]] InvokeStaticOrDirect
- // CHECK-DAG: Return [ [[Invoke]] ]
+ // CHECK-DAG: <<Invoke:l\d+>> InvokeStaticOrDirect
+ // CHECK-DAG: Return [<<Invoke>>]
// CHECK-START: java.lang.Class Main.dontInlineOtherDexClass() inliner (after)
- // CHECK-DAG: [[Invoke:l\d+]] InvokeStaticOrDirect
- // CHECK-DAG: Return [ [[Invoke]] ]
+ // CHECK-DAG: <<Invoke:l\d+>> InvokeStaticOrDirect
+ // CHECK-DAG: Return [<<Invoke>>]
public static Class dontInlineOtherDexClass() {
return OtherDex.returnOtherDexClass();
}
// CHECK-START: java.lang.Class Main.inlineMainClass() inliner (before)
- // CHECK-DAG: [[Invoke:l\d+]] InvokeStaticOrDirect
- // CHECK-DAG: Return [ [[Invoke]] ]
+ // CHECK-DAG: <<Invoke:l\d+>> InvokeStaticOrDirect
+ // CHECK-DAG: Return [<<Invoke>>]
// CHECK-START: java.lang.Class Main.inlineMainClass() inliner (after)
// CHECK-NOT: InvokeStaticOrDirect
// CHECK-START: java.lang.Class Main.inlineMainClass() inliner (after)
- // CHECK-DAG: [[Class:l\d+]] LoadClass
- // CHECK-DAG: Return [ [[Class]] ]
+ // CHECK-DAG: Return [<<Class:l\d+>>]
+ // CHECK-DAG: <<Class>> LoadClass
+ // Note: There are two LoadClass instructions. We obtain the correct
+ // instruction id by matching the Return's input list first.
public static Class inlineMainClass() {
return OtherDex.returnMainClass();
}
// CHECK-START: java.lang.Class Main.dontInlineOtherDexClassStaticCall() inliner (before)
- // CHECK-DAG: [[Invoke:l\d+]] InvokeStaticOrDirect
- // CHECK-DAG: Return [ [[Invoke]] ]
+ // CHECK-DAG: <<Invoke:l\d+>> InvokeStaticOrDirect
+ // CHECK-DAG: Return [<<Invoke>>]
// CHECK-START: java.lang.Class Main.dontInlineOtherDexClassStaticCall() inliner (after)
- // CHECK-DAG: [[Invoke:l\d+]] InvokeStaticOrDirect
- // CHECK-DAG: Return [ [[Invoke]] ]
+ // CHECK-DAG: <<Invoke:l\d+>> InvokeStaticOrDirect
+ // CHECK-DAG: Return [<<Invoke>>]
public static Class dontInlineOtherDexClassStaticCall() {
return OtherDex.returnOtherDexClassStaticCall();
}
// CHECK-START: java.lang.Class Main.inlineOtherDexCallingMain() inliner (before)
- // CHECK-DAG: [[Invoke:l\d+]] InvokeStaticOrDirect
- // CHECK-DAG: Return [ [[Invoke]] ]
+ // CHECK-DAG: <<Invoke:l\d+>> InvokeStaticOrDirect
+ // CHECK-DAG: Return [<<Invoke>>]
// CHECK-START: java.lang.Class Main.inlineOtherDexCallingMain() inliner (after)
// CHECK-NOT: InvokeStaticOrDirect
// CHECK-START: java.lang.Class Main.inlineOtherDexCallingMain() inliner (after)
- // CHECK-DAG: [[Class:l\d+]] LoadClass
- // CHECK-DAG: Return [ [[Class]] ]
+ // CHECK-DAG: Return [<<Class:l\d+>>]
+ // CHECK-DAG: <<Class>> LoadClass
+ // Note: There are two LoadClass instructions. We obtain the correct
+ // instruction id by matching the Return's input list first.
public static Class inlineOtherDexCallingMain() {
return OtherDex.returnOtherDexCallingMain();
diff --git a/test/463-checker-boolean-simplifier/src/Main.java b/test/463-checker-boolean-simplifier/src/Main.java
index 3daf693..e237448 100644
--- a/test/463-checker-boolean-simplifier/src/Main.java
+++ b/test/463-checker-boolean-simplifier/src/Main.java
@@ -26,18 +26,24 @@
}
}
+ public static void assertIntEquals(int expected, int result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
/*
* Elementary test negating a boolean. Verifies that blocks are merged and
* empty branches removed.
*/
// CHECK-START: boolean Main.BooleanNot(boolean) boolean_simplifier (before)
- // CHECK-DAG: [[Param:z\d+]] ParameterValue
- // CHECK-DAG: [[Const0:i\d+]] IntConstant 0
- // CHECK-DAG: [[Const1:i\d+]] IntConstant 1
- // CHECK-DAG: If [ [[Param]] ]
- // CHECK-DAG: [[Phi:i\d+]] Phi [ [[Const1]] [[Const0]] ]
- // CHECK-DAG: Return [ [[Phi]] ]
+ // CHECK-DAG: <<Param:z\d+>> ParameterValue
+ // CHECK-DAG: <<Const0:i\d+>> IntConstant 0
+ // CHECK-DAG: <<Const1:i\d+>> IntConstant 1
+ // CHECK-DAG: If [<<Param>>]
+ // CHECK-DAG: <<Phi:i\d+>> Phi [<<Const1>>,<<Const0>>]
+ // CHECK-DAG: Return [<<Phi>>]
// CHECK-START: boolean Main.BooleanNot(boolean) boolean_simplifier (before)
// CHECK: Goto
@@ -46,10 +52,10 @@
// CHECK-NOT: Goto
// CHECK-START: boolean Main.BooleanNot(boolean) boolean_simplifier (after)
- // CHECK-DAG: [[Param:z\d+]] ParameterValue
- // CHECK-DAG: [[Const0:i\d+]] IntConstant 0
- // CHECK-DAG: [[NotParam:z\d+]] BooleanNot [ [[Param]] ]
- // CHECK-DAG: Return [ [[NotParam]] ]
+ // CHECK-DAG: <<Param:z\d+>> ParameterValue
+ // CHECK-DAG: <<Const0:i\d+>> IntConstant 0
+ // CHECK-DAG: <<NotParam:z\d+>> BooleanNot [<<Param>>]
+ // CHECK-DAG: Return [<<NotParam>>]
// CHECK-START: boolean Main.BooleanNot(boolean) boolean_simplifier (after)
// CHECK-NOT: If
@@ -69,22 +75,22 @@
*/
// CHECK-START: boolean Main.GreaterThan(int, int) boolean_simplifier (before)
- // CHECK-DAG: [[ParamX:i\d+]] ParameterValue
- // CHECK-DAG: [[ParamY:i\d+]] ParameterValue
- // CHECK-DAG: [[Const0:i\d+]] IntConstant 0
- // CHECK-DAG: [[Const1:i\d+]] IntConstant 1
- // CHECK-DAG: [[Cond:z\d+]] GreaterThan [ [[ParamX]] [[ParamY]] ]
- // CHECK-DAG: If [ [[Cond]] ]
- // CHECK-DAG: [[Phi:i\d+]] Phi [ [[Const0]] [[Const1]] ]
- // CHECK-DAG: Return [ [[Phi]] ]
+ // CHECK-DAG: <<ParamX:i\d+>> ParameterValue
+ // CHECK-DAG: <<ParamY:i\d+>> ParameterValue
+ // CHECK-DAG: <<Const0:i\d+>> IntConstant 0
+ // CHECK-DAG: <<Const1:i\d+>> IntConstant 1
+ // CHECK-DAG: <<Cond:z\d+>> GreaterThan [<<ParamX>>,<<ParamY>>]
+ // CHECK-DAG: If [<<Cond>>]
+ // CHECK-DAG: <<Phi:i\d+>> Phi [<<Const0>>,<<Const1>>]
+ // CHECK-DAG: Return [<<Phi>>]
// CHECK-START: boolean Main.GreaterThan(int, int) boolean_simplifier (after)
- // CHECK-DAG: [[ParamX:i\d+]] ParameterValue
- // CHECK-DAG: [[ParamY:i\d+]] ParameterValue
- // CHECK-DAG: [[Const0:i\d+]] IntConstant 0
- // CHECK-DAG: [[Const1:i\d+]] IntConstant 1
- // CHECK-DAG: [[Cond:z\d+]] GreaterThan [ [[ParamX]] [[ParamY]] ]
- // CHECK-DAG: Return [ [[Cond]] ]
+ // CHECK-DAG: <<ParamX:i\d+>> ParameterValue
+ // CHECK-DAG: <<ParamY:i\d+>> ParameterValue
+ // CHECK-DAG: <<Const0:i\d+>> IntConstant 0
+ // CHECK-DAG: <<Const1:i\d+>> IntConstant 1
+ // CHECK-DAG: <<Cond:z\d+>> GreaterThan [<<ParamX>>,<<ParamY>>]
+ // CHECK-DAG: Return [<<Cond>>]
public static boolean GreaterThan(int x, int y) {
return (x <= y) ? false : true;
@@ -96,22 +102,22 @@
*/
// CHECK-START: boolean Main.LessThan(int, int) boolean_simplifier (before)
- // CHECK-DAG: [[ParamX:i\d+]] ParameterValue
- // CHECK-DAG: [[ParamY:i\d+]] ParameterValue
- // CHECK-DAG: [[Const0:i\d+]] IntConstant 0
- // CHECK-DAG: [[Const1:i\d+]] IntConstant 1
- // CHECK-DAG: [[Cond:z\d+]] GreaterThanOrEqual [ [[ParamX]] [[ParamY]] ]
- // CHECK-DAG: If [ [[Cond]] ]
- // CHECK-DAG: [[Phi:i\d+]] Phi [ [[Const1]] [[Const0]] ]
- // CHECK-DAG: Return [ [[Phi]] ]
+ // CHECK-DAG: <<ParamX:i\d+>> ParameterValue
+ // CHECK-DAG: <<ParamY:i\d+>> ParameterValue
+ // CHECK-DAG: <<Const0:i\d+>> IntConstant 0
+ // CHECK-DAG: <<Const1:i\d+>> IntConstant 1
+ // CHECK-DAG: <<Cond:z\d+>> GreaterThanOrEqual [<<ParamX>>,<<ParamY>>]
+ // CHECK-DAG: If [<<Cond>>]
+ // CHECK-DAG: <<Phi:i\d+>> Phi [<<Const1>>,<<Const0>>]
+ // CHECK-DAG: Return [<<Phi>>]
// CHECK-START: boolean Main.LessThan(int, int) boolean_simplifier (after)
- // CHECK-DAG: [[ParamX:i\d+]] ParameterValue
- // CHECK-DAG: [[ParamY:i\d+]] ParameterValue
- // CHECK-DAG: [[Const0:i\d+]] IntConstant 0
- // CHECK-DAG: [[Const1:i\d+]] IntConstant 1
- // CHECK-DAG: [[Cond:z\d+]] LessThan [ [[ParamX]] [[ParamY]] ]
- // CHECK-DAG: Return [ [[Cond]] ]
+ // CHECK-DAG: <<ParamX:i\d+>> ParameterValue
+ // CHECK-DAG: <<ParamY:i\d+>> ParameterValue
+ // CHECK-DAG: <<Const0:i\d+>> IntConstant 0
+ // CHECK-DAG: <<Const1:i\d+>> IntConstant 1
+ // CHECK-DAG: <<Cond:z\d+>> LessThan [<<ParamX>>,<<ParamY>>]
+ // CHECK-DAG: Return [<<Cond>>]
// CHECK-START: boolean Main.LessThan(int, int) boolean_simplifier (after)
// CHECK-NOT: GreaterThanOrEqual
@@ -126,35 +132,65 @@
*/
// CHECK-START: boolean Main.ValuesOrdered(int, int, int) boolean_simplifier (before)
- // CHECK-DAG: [[ParamX:i\d+]] ParameterValue
- // CHECK-DAG: [[ParamY:i\d+]] ParameterValue
- // CHECK-DAG: [[ParamZ:i\d+]] ParameterValue
- // CHECK-DAG: [[Const0:i\d+]] IntConstant 0
- // CHECK-DAG: [[Const1:i\d+]] IntConstant 1
- // CHECK-DAG: [[CondXY:z\d+]] GreaterThan [ [[ParamX]] [[ParamY]] ]
- // CHECK-DAG: If [ [[CondXY]] ]
- // CHECK-DAG: [[CondYZ:z\d+]] GreaterThan [ [[ParamY]] [[ParamZ]] ]
- // CHECK-DAG: If [ [[CondYZ]] ]
- // CHECK-DAG: [[CondXYZ:z\d+]] NotEqual [ [[PhiXY:i\d+]] [[PhiYZ:i\d+]] ]
- // CHECK-DAG: If [ [[CondXYZ]] ]
- // CHECK-DAG: Return [ [[PhiXYZ:i\d+]] ]
- // CHECK-DAG: [[PhiXY]] Phi [ [[Const1]] [[Const0]] ]
- // CHECK-DAG: [[PhiYZ]] Phi [ [[Const1]] [[Const0]] ]
- // CHECK-DAG: [[PhiXYZ]] Phi [ [[Const1]] [[Const0]] ]
+ // CHECK-DAG: <<ParamX:i\d+>> ParameterValue
+ // CHECK-DAG: <<ParamY:i\d+>> ParameterValue
+ // CHECK-DAG: <<ParamZ:i\d+>> ParameterValue
+ // CHECK-DAG: <<Const0:i\d+>> IntConstant 0
+ // CHECK-DAG: <<Const1:i\d+>> IntConstant 1
+ // CHECK-DAG: <<CondXY:z\d+>> GreaterThan [<<ParamX>>,<<ParamY>>]
+ // CHECK-DAG: If [<<CondXY>>]
+ // CHECK-DAG: <<CondYZ:z\d+>> GreaterThan [<<ParamY>>,<<ParamZ>>]
+ // CHECK-DAG: If [<<CondYZ>>]
+ // CHECK-DAG: <<CondXYZ:z\d+>> NotEqual [<<PhiXY:i\d+>>,<<PhiYZ:i\d+>>]
+ // CHECK-DAG: If [<<CondXYZ>>]
+ // CHECK-DAG: Return [<<PhiXYZ:i\d+>>]
+ // CHECK-DAG: <<PhiXY>> Phi [<<Const1>>,<<Const0>>]
+ // CHECK-DAG: <<PhiYZ>> Phi [<<Const1>>,<<Const0>>]
+ // CHECK-DAG: <<PhiXYZ>> Phi [<<Const1>>,<<Const0>>]
// CHECK-START: boolean Main.ValuesOrdered(int, int, int) boolean_simplifier (after)
- // CHECK-DAG: [[ParamX:i\d+]] ParameterValue
- // CHECK-DAG: [[ParamY:i\d+]] ParameterValue
- // CHECK-DAG: [[ParamZ:i\d+]] ParameterValue
- // CHECK-DAG: [[CmpXY:z\d+]] LessThanOrEqual [ [[ParamX]] [[ParamY]] ]
- // CHECK-DAG: [[CmpYZ:z\d+]] LessThanOrEqual [ [[ParamY]] [[ParamZ]] ]
- // CHECK-DAG: [[CmpXYZ:z\d+]] Equal [ [[CmpXY]] [[CmpYZ]] ]
- // CHECK-DAG: Return [ [[CmpXYZ]] ]
+ // CHECK-DAG: <<ParamX:i\d+>> ParameterValue
+ // CHECK-DAG: <<ParamY:i\d+>> ParameterValue
+ // CHECK-DAG: <<ParamZ:i\d+>> ParameterValue
+ // CHECK-DAG: <<CmpXY:z\d+>> LessThanOrEqual [<<ParamX>>,<<ParamY>>]
+ // CHECK-DAG: <<CmpYZ:z\d+>> LessThanOrEqual [<<ParamY>>,<<ParamZ>>]
+ // CHECK-DAG: <<CmpXYZ:z\d+>> Equal [<<CmpXY>>,<<CmpYZ>>]
+ // CHECK-DAG: Return [<<CmpXYZ>>]
public static boolean ValuesOrdered(int x, int y, int z) {
return (x <= y) == (y <= z);
}
+ // CHECK-START: int Main.NegatedCondition(boolean) boolean_simplifier (before)
+ // CHECK-DAG: <<Param:z\d+>> ParameterValue
+ // CHECK-DAG: <<Const42:i\d+>> IntConstant 42
+ // CHECK-DAG: <<Const43:i\d+>> IntConstant 43
+ // CHECK-DAG: <<NotParam:z\d+>> BooleanNot [<<Param>>]
+ // CHECK-DAG: If [<<NotParam>>]
+ // CHECK-DAG: <<Phi:i\d+>> Phi [<<Const42>>,<<Const43>>]
+ // CHECK-DAG: Return [<<Phi>>]
+
+ // CHECK-START: int Main.NegatedCondition(boolean) boolean_simplifier (after)
+ // CHECK-DAG: <<Param:z\d+>> ParameterValue
+ // CHECK-DAG: <<Const42:i\d+>> IntConstant 42
+ // CHECK-DAG: <<Const43:i\d+>> IntConstant 43
+ // CHECK-DAG: If [<<Param>>]
+ // CHECK-DAG: <<Phi:i\d+>> Phi [<<Const42>>,<<Const43>>]
+ // CHECK-DAG: Return [<<Phi>>]
+
+ // Note: The fact that branches are swapped is verified by running the test.
+
+ // CHECK-START: int Main.NegatedCondition(boolean) boolean_simplifier (after)
+ // CHECK-NOT: BooleanNot
+
+ public static int NegatedCondition(boolean x) {
+ if (x != false) {
+ return 42;
+ } else {
+ return 43;
+ }
+ }
+
public static void main(String[] args) {
assertBoolEquals(false, BooleanNot(true));
assertBoolEquals(true, BooleanNot(false));
@@ -171,5 +207,7 @@
assertBoolEquals(true, ValuesOrdered(3, 3, 3));
assertBoolEquals(true, ValuesOrdered(3, 3, 5));
assertBoolEquals(false, ValuesOrdered(5, 5, 3));
+ assertIntEquals(42, NegatedCondition(true));
+ assertIntEquals(43, NegatedCondition(false));
}
}
diff --git a/test/464-checker-inline-sharpen-calls/src/Main.java b/test/464-checker-inline-sharpen-calls/src/Main.java
index 1b25b42..e451f70 100644
--- a/test/464-checker-inline-sharpen-calls/src/Main.java
+++ b/test/464-checker-inline-sharpen-calls/src/Main.java
@@ -20,7 +20,7 @@
}
// CHECK-START: void Main.inlineSharpenInvokeVirtual(Main) inliner (before)
- // CHECK-DAG: [[Invoke:v\d+]] InvokeStaticOrDirect
+ // CHECK-DAG: <<Invoke:v\d+>> InvokeStaticOrDirect
// CHECK-DAG: ReturnVoid
// CHECK-START: void Main.inlineSharpenInvokeVirtual(Main) inliner (after)
@@ -31,15 +31,15 @@
}
// CHECK-START: int Main.inlineSharpenStringInvoke() inliner (before)
- // CHECK-DAG: [[Invoke:i\d+]] InvokeStaticOrDirect
- // CHECK-DAG: Return [ [[Invoke]] ]
+ // CHECK-DAG: <<Invoke:i\d+>> InvokeStaticOrDirect
+ // CHECK-DAG: Return [<<Invoke>>]
// CHECK-START: int Main.inlineSharpenStringInvoke() inliner (after)
// CHECK-NOT: InvokeStaticOrDirect
// CHECK-START: int Main.inlineSharpenStringInvoke() inliner (after)
- // CHECK-DAG: [[Field:i\d+]] InstanceFieldGet
- // CHECK-DAG: Return [ [[Field]] ]
+ // CHECK-DAG: <<Field:i\d+>> InstanceFieldGet
+ // CHECK-DAG: Return [<<Field>>]
public static int inlineSharpenStringInvoke() {
return "Foo".length();
diff --git a/test/465-checker-clinit-gvn/src/Main.java b/test/465-checker-clinit-gvn/src/Main.java
index dcaef6f..ac2863c 100644
--- a/test/465-checker-clinit-gvn/src/Main.java
+++ b/test/465-checker-clinit-gvn/src/Main.java
@@ -27,14 +27,14 @@
public final class Main {
// CHECK-START: int Main.accessTwoStatics() GVN (before)
- // CHECK-DAG: [[Class1:l\d+]] LoadClass
- // CHECK-DAG: ClinitCheck [ [[Class1]] ]
- // CHECK-DAG: [[Class2:l\d+]] LoadClass
- // CHECK-DAG: ClinitCheck [ [[Class2]] ]
+ // CHECK-DAG: <<Class1:l\d+>> LoadClass
+ // CHECK-DAG: ClinitCheck [<<Class1>>]
+ // CHECK-DAG: <<Class2:l\d+>> LoadClass
+ // CHECK-DAG: ClinitCheck [<<Class2>>]
// CHECK-START: int Main.accessTwoStatics() GVN (after)
- // CHECK-DAG: [[Class:l\d+]] LoadClass
- // CHECK-DAG: ClinitCheck [ [[Class]] ]
+ // CHECK-DAG: <<Class:l\d+>> LoadClass
+ // CHECK-DAG: ClinitCheck [<<Class>>]
// CHECK-NOT: ClinitCheck
public static int accessTwoStatics() {
@@ -42,14 +42,14 @@
}
// CHECK-START: int Main.accessTwoStaticsCallInBetween() GVN (before)
- // CHECK-DAG: [[Class1:l\d+]] LoadClass
- // CHECK-DAG: ClinitCheck [ [[Class1]] ]
- // CHECK-DAG: [[Class2:l\d+]] LoadClass
- // CHECK-DAG: ClinitCheck [ [[Class2]] ]
+ // CHECK-DAG: <<Class1:l\d+>> LoadClass
+ // CHECK-DAG: ClinitCheck [<<Class1>>]
+ // CHECK-DAG: <<Class2:l\d+>> LoadClass
+ // CHECK-DAG: ClinitCheck [<<Class2>>]
// CHECK-START: int Main.accessTwoStaticsCallInBetween() GVN (after)
- // CHECK-DAG: [[Class:l\d+]] LoadClass
- // CHECK-DAG: ClinitCheck [ [[Class]] ]
+ // CHECK-DAG: <<Class:l\d+>> LoadClass
+ // CHECK-DAG: ClinitCheck [<<Class>>]
// CHECK-NOT: ClinitCheck
public static int accessTwoStaticsCallInBetween() {
diff --git a/test/466-get-live-vreg/get_live_vreg_jni.cc b/test/466-get-live-vreg/get_live_vreg_jni.cc
index 6715ba1..4724e8e 100644
--- a/test/466-get-live-vreg/get_live_vreg_jni.cc
+++ b/test/466-get-live-vreg/get_live_vreg_jni.cc
@@ -28,7 +28,7 @@
class TestVisitor : public StackVisitor {
public:
TestVisitor(Thread* thread, Context* context) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- : StackVisitor(thread, context) {}
+ : StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {}
bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::ArtMethod* m = GetMethod();
diff --git a/test/468-checker-bool-simplifier-regression/smali/TestCase.smali b/test/468-checker-bool-simplifier-regression/smali/TestCase.smali
index f36304d..33e6dc3 100644
--- a/test/468-checker-bool-simplifier-regression/smali/TestCase.smali
+++ b/test/468-checker-bool-simplifier-regression/smali/TestCase.smali
@@ -18,6 +18,19 @@
.field public static value:Z
+# CHECK-START: boolean TestCase.testCase() boolean_simplifier (before)
+# CHECK-DAG: <<Const0:i\d+>> IntConstant 0
+# CHECK-DAG: <<Const1:i\d+>> IntConstant 1
+# CHECK-DAG: <<Value:z\d+>> StaticFieldGet
+# CHECK-DAG: If [<<Value>>]
+# CHECK-DAG: <<Phi:i\d+>> Phi [<<Const1>>,<<Const0>>]
+# CHECK-DAG: Return [<<Phi>>]
+
+# CHECK-START: boolean TestCase.testCase() boolean_simplifier (after)
+# CHECK-DAG: <<Value:z\d+>> StaticFieldGet
+# CHECK-DAG: <<Not:z\d+>> BooleanNot [<<Value>>]
+# CHECK-DAG: Return [<<Not>>]
+
.method public static testCase()Z
.registers 2
sget-boolean v0, LTestCase;->value:Z
diff --git a/test/468-checker-bool-simplifier-regression/src/Main.java b/test/468-checker-bool-simplifier-regression/src/Main.java
index d45f3bf..8fe05c7 100644
--- a/test/468-checker-bool-simplifier-regression/src/Main.java
+++ b/test/468-checker-bool-simplifier-regression/src/Main.java
@@ -18,19 +18,6 @@
public class Main {
- // CHECK-START: boolean TestCase.testCase() boolean_simplifier (before)
- // CHECK-DAG: [[Const0:i\d+]] IntConstant 0
- // CHECK-DAG: [[Const1:i\d+]] IntConstant 1
- // CHECK-DAG: [[Value:z\d+]] StaticFieldGet
- // CHECK-DAG: If [ [[Value]] ]
- // CHECK-DAG: [[Phi:i\d+]] Phi [ [[Const1]] [[Const0]] ]
- // CHECK-DAG: Return [ [[Phi]] ]
-
- // CHECK-START: boolean TestCase.testCase() boolean_simplifier (after)
- // CHECK-DAG: [[Value:z\d+]] StaticFieldGet
- // CHECK-DAG: [[Not:z\d+]] BooleanNot [ [[Value]] ]
- // CHECK-DAG: Return [ [[Not]] ]
-
public static boolean runTest(boolean input) throws Exception {
Class<?> c = Class.forName("TestCase");
Method m = c.getMethod("testCase");
diff --git a/test/472-unreachable-if-regression/expected.txt b/test/472-unreachable-if-regression/expected.txt
new file mode 100644
index 0000000..9fc8bea
--- /dev/null
+++ b/test/472-unreachable-if-regression/expected.txt
@@ -0,0 +1,3 @@
+Test started.
+Successfully called UnreachableIf().
+Successfully called UnreachablePackedSwitch().
diff --git a/test/472-unreachable-if-regression/info.txt b/test/472-unreachable-if-regression/info.txt
new file mode 100644
index 0000000..d8b5a45
--- /dev/null
+++ b/test/472-unreachable-if-regression/info.txt
@@ -0,0 +1,3 @@
+Regression test for crashes during compilation of methods which end
+with an if-cc or switch, i.e. there's a fall-through out of method code.
+Also tests a packed-switch with negative offset to its data.
diff --git a/test/472-unreachable-if-regression/smali/Test.smali b/test/472-unreachable-if-regression/smali/Test.smali
new file mode 100644
index 0000000..c7107d1
--- /dev/null
+++ b/test/472-unreachable-if-regression/smali/Test.smali
@@ -0,0 +1,46 @@
+#
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LTest;
+
+.super Ljava/lang/Object;
+
+.method public static UnreachableIf()V
+ .registers 1
+ return-void
+ : unreachable
+ not-int v0, v0
+ if-lt v0, v0, :unreachable
+ # fall-through out of code item
+.end method
+
+.method public static UnreachablePackedSwitch()V
+ .registers 1
+ return-void
+ : unreachable
+ goto :pswitch_2
+ :pswitch_data
+ .packed-switch 1
+ :pswitch_1
+ :pswitch_2
+ :pswitch_1
+ :pswitch_2
+ .end packed-switch
+ :pswitch_1
+ not-int v0, v0
+ :pswitch_2
+ packed-switch v0, :pswitch_data
+ # fall-through out of code item
+.end method
diff --git a/test/472-unreachable-if-regression/src/Main.java b/test/472-unreachable-if-regression/src/Main.java
new file mode 100644
index 0000000..c9f9511
--- /dev/null
+++ b/test/472-unreachable-if-regression/src/Main.java
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Method;
+
+public class Main {
+
+ // Workaround for b/18051191.
+ class InnerClass {}
+
+ public static void main(String args[]) throws Exception {
+ System.out.println("Test started.");
+ Class<?> c = Class.forName("Test");
+
+ Method unreachableIf = c.getMethod("UnreachableIf", (Class[]) null);
+ unreachableIf.invoke(null, (Object[]) null);
+ System.out.println("Successfully called UnreachableIf().");
+
+ Method unreachablePackedSwitch = c.getMethod("UnreachablePackedSwitch", (Class[]) null);
+ unreachablePackedSwitch.invoke(null, (Object[]) null);
+ System.out.println("Successfully called UnreachablePackedSwitch().");
+ }
+
+}
diff --git a/test/474-checker-boolean-input/src/Main.java b/test/474-checker-boolean-input/src/Main.java
index 1ebe14e..490f7f9 100644
--- a/test/474-checker-boolean-input/src/Main.java
+++ b/test/474-checker-boolean-input/src/Main.java
@@ -23,37 +23,13 @@
}
/*
- * Test that zero/one constants are accepted as Boolean inputs.
- */
-
- // CHECK-START: boolean Main.TestConstAsBoolean() inliner (before)
- // CHECK-DAG: [[Invoke:z\d+]] InvokeStaticOrDirect
- // CHECK-DAG: BooleanNot [ [[Invoke]] ]
-
- // CHECK-START: boolean Main.TestConstAsBoolean() inliner (after)
- // CHECK-DAG: [[Const:i\d+]] IntConstant 1
- // CHECK-DAG: BooleanNot [ [[Const]] ]
-
- public static boolean InlineConst() {
- return true;
- }
-
- public static boolean TestConstAsBoolean() {
- return InlineConst() != true ? true : false;
- }
-
- /*
* Test that integer Phis are accepted as Boolean inputs until
* we implement a suitable type analysis.
*/
- // CHECK-START: boolean Main.TestPhiAsBoolean(int) inliner (before)
- // CHECK-DAG: [[Invoke:z\d+]] InvokeStaticOrDirect
- // CHECK-DAG: BooleanNot [ [[Invoke]] ]
-
- // CHECK-START: boolean Main.TestPhiAsBoolean(int) inliner (after)
- // CHECK-DAG: [[Phi:i\d+]] Phi
- // CHECK-DAG: BooleanNot [ [[Phi]] ]
+ // CHECK-START: boolean Main.TestPhiAsBoolean(int) boolean_simplifier (after)
+ // CHECK-DAG: <<Phi:i\d+>> Phi
+ // CHECK-DAG: BooleanNot [<<Phi>>]
public static boolean f1;
public static boolean f2;
@@ -71,13 +47,9 @@
* we implement a suitable type analysis.
*/
- // CHECK-START: boolean Main.TestAndAsBoolean(boolean, boolean) inliner (before)
- // CHECK-DAG: [[Invoke:z\d+]] InvokeStaticOrDirect
- // CHECK-DAG: BooleanNot [ [[Invoke]] ]
-
- // CHECK-START: boolean Main.TestAndAsBoolean(boolean, boolean) inliner (after)
- // CHECK-DAG: [[And:i\d+]] And
- // CHECK-DAG: BooleanNot [ [[And]] ]
+ // CHECK-START: boolean Main.TestAndAsBoolean(boolean, boolean) boolean_simplifier (after)
+ // CHECK-DAG: <<And:i\d+>> And
+ // CHECK-DAG: BooleanNot [<<And>>]
public static boolean InlineAnd(boolean x, boolean y) {
return x & y;
@@ -92,13 +64,9 @@
* we implement a suitable type analysis.
*/
- // CHECK-START: boolean Main.TestOrAsBoolean(boolean, boolean) inliner (before)
- // CHECK-DAG: [[Invoke:z\d+]] InvokeStaticOrDirect
- // CHECK-DAG: BooleanNot [ [[Invoke]] ]
-
- // CHECK-START: boolean Main.TestOrAsBoolean(boolean, boolean) inliner (after)
- // CHECK-DAG: [[Or:i\d+]] Or
- // CHECK-DAG: BooleanNot [ [[Or]] ]
+ // CHECK-START: boolean Main.TestOrAsBoolean(boolean, boolean) boolean_simplifier (after)
+ // CHECK-DAG: <<Or:i\d+>> Or
+ // CHECK-DAG: BooleanNot [<<Or>>]
public static boolean InlineOr(boolean x, boolean y) {
return x | y;
@@ -113,13 +81,9 @@
* we implement a suitable type analysis.
*/
- // CHECK-START: boolean Main.TestXorAsBoolean(boolean, boolean) inliner (before)
- // CHECK-DAG: [[Invoke:z\d+]] InvokeStaticOrDirect
- // CHECK-DAG: BooleanNot [ [[Invoke]] ]
-
- // CHECK-START: boolean Main.TestXorAsBoolean(boolean, boolean) inliner (after)
- // CHECK-DAG: [[Xor:i\d+]] Xor
- // CHECK-DAG: BooleanNot [ [[Xor]] ]
+ // CHECK-START: boolean Main.TestXorAsBoolean(boolean, boolean) boolean_simplifier (after)
+ // CHECK-DAG: <<Xor:i\d+>> Xor
+ // CHECK-DAG: BooleanNot [<<Xor>>]
public static boolean InlineXor(boolean x, boolean y) {
return x ^ y;
@@ -132,7 +96,6 @@
public static void main(String[] args) {
f1 = true;
f2 = false;
- assertBoolEquals(false, TestConstAsBoolean());
assertBoolEquals(true, TestPhiAsBoolean(0));
assertBoolEquals(false, TestPhiAsBoolean(42));
assertBoolEquals(true, TestAndAsBoolean(true, false));
diff --git a/test/475-simplify-mul-zero/expected.txt b/test/475-simplify-mul-zero/expected.txt
new file mode 100644
index 0000000..7ed6ff8
--- /dev/null
+++ b/test/475-simplify-mul-zero/expected.txt
@@ -0,0 +1 @@
+5
diff --git a/test/475-simplify-mul-zero/info.txt b/test/475-simplify-mul-zero/info.txt
new file mode 100644
index 0000000..0db11f2
--- /dev/null
+++ b/test/475-simplify-mul-zero/info.txt
@@ -0,0 +1,2 @@
+Regression check for optimizing simplify instruction pass.
+Mul should expect zero constant as input.
\ No newline at end of file
diff --git a/test/475-simplify-mul-zero/src/Main.java b/test/475-simplify-mul-zero/src/Main.java
new file mode 100644
index 0000000..57adcff
--- /dev/null
+++ b/test/475-simplify-mul-zero/src/Main.java
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public static void main(String[] args) {
+ long l3 = 2207693990L;
+ int i12 = 5;
+
+ for (int i = 1; i < 2; ++i) {
+ i12 ^= (int)(-((-(-(l3 - l3))) * i));
+ }
+
+ System.out.println(i12);
+ }
+}
diff --git a/test/476-checker-ctor-memory-barrier/src/Main.java b/test/476-checker-ctor-memory-barrier/src/Main.java
index 10aa2ab..f24dc4a 100644
--- a/test/476-checker-ctor-memory-barrier/src/Main.java
+++ b/test/476-checker-ctor-memory-barrier/src/Main.java
@@ -14,10 +14,11 @@
* limitations under the License.
*/
+// TODO: Add more tests after we can inline functions with calls.
class ClassWithoutFinals {
// CHECK-START: void ClassWithoutFinals.<init>() register (after)
- // CHECK-NOT: MemoryBarrier {{StoreStore}}
+ // CHECK-NOT: MemoryBarrier kind:StoreStore
public ClassWithoutFinals() {}
}
@@ -26,9 +27,8 @@
public ClassWithFinals obj;
// CHECK-START: void ClassWithFinals.<init>(boolean) register (after)
- // CHECK: MemoryBarrier {{StoreStore}}
- // CHECK-NOT: {{.*}}
- // CHECK: ReturnVoid
+ // CHECK: MemoryBarrier kind:StoreStore
+ // CHECK-NEXT: ReturnVoid
public ClassWithFinals(boolean cond) {
x = 0;
if (cond) {
@@ -38,18 +38,16 @@
}
// CHECK-START: void ClassWithFinals.<init>() register (after)
- // CHECK: MemoryBarrier {{StoreStore}}
- // CHECK-NOT: {{.*}}
- // CHECK: ReturnVoid
+ // CHECK: MemoryBarrier kind:StoreStore
+ // CHECK-NEXT: ReturnVoid
public ClassWithFinals() {
x = 0;
}
// CHECK-START: void ClassWithFinals.<init>(int) register (after)
- // CHECK: MemoryBarrier {{StoreStore}}
- // CHECK: MemoryBarrier {{StoreStore}}
- // CHECK-NOT: {{.*}}
- // CHECK: ReturnVoid
+ // CHECK: MemoryBarrier kind:StoreStore
+ // CHECK: MemoryBarrier kind:StoreStore
+ // CHECK-NEXT: ReturnVoid
public ClassWithFinals(int x) {
// This should have two barriers:
// - one for the constructor
@@ -61,87 +59,135 @@
class InheritFromClassWithFinals extends ClassWithFinals {
// CHECK-START: void InheritFromClassWithFinals.<init>() register (after)
- // CHECK: MemoryBarrier {{StoreStore}}
- // CHECK-NOT: {{.*}}
- // CHECK: ReturnVoid
+ // CHECK: MemoryBarrier kind:StoreStore
+ // CHECK-NEXT: ReturnVoid
// CHECK-START: void InheritFromClassWithFinals.<init>() register (after)
- // CHECK-NOT: InvokeStaticOrDirect
+ // CHECK-NOT: InvokeStaticOrDirect
public InheritFromClassWithFinals() {
// Should inline the super constructor.
}
// CHECK-START: void InheritFromClassWithFinals.<init>(boolean) register (after)
- // CHECK: InvokeStaticOrDirect
+ // CHECK: InvokeStaticOrDirect
// CHECK-START: void InheritFromClassWithFinals.<init>(boolean) register (after)
- // CHECK-NOT: MemoryBarrier {{StoreStore}}
+ // CHECK-NOT: MemoryBarrier kind:StoreStore
public InheritFromClassWithFinals(boolean cond) {
super(cond);
// should not inline the super constructor
}
+
+ // CHECK-START: void InheritFromClassWithFinals.<init>(int) register (after)
+ // CHECK: MemoryBarrier kind:StoreStore
+ // CHECK: MemoryBarrier kind:StoreStore
+ // CHECK: ReturnVoid
+
+ // CHECK-START: void InheritFromClassWithFinals.<init>(int) register (after)
+ // CHECK-NOT: InvokeStaticOrDirect
+ public InheritFromClassWithFinals(int unused) {
+ // Should inline the super constructor and insert a memory barrier.
+
+ // Should inline the new instance call and insert another memory barrier.
+ new InheritFromClassWithFinals();
+ }
}
class HaveFinalsAndInheritFromClassWithFinals extends ClassWithFinals {
final int y;
// CHECK-START: void HaveFinalsAndInheritFromClassWithFinals.<init>() register (after)
- // CHECK: MemoryBarrier {{StoreStore}}
- // CHECK: MemoryBarrier {{StoreStore}}
- // CHECK-NOT: {{.*}}
- // CHECK: ReturnVoid
+ // CHECK: MemoryBarrier kind:StoreStore
+ // CHECK-NEXT: ReturnVoid
// CHECK-START: void HaveFinalsAndInheritFromClassWithFinals.<init>() register (after)
// CHECK-NOT: InvokeStaticOrDirect
public HaveFinalsAndInheritFromClassWithFinals() {
- // Should inline the super constructor.
+ // Should inline the super constructor and remove the memory barrier.
y = 0;
}
// CHECK-START: void HaveFinalsAndInheritFromClassWithFinals.<init>(boolean) register (after)
- // CHECK: InvokeStaticOrDirect
- // CHECK: MemoryBarrier {{StoreStore}}
- // CHECK-NOT: {{.*}}
- // CHECK: ReturnVoid
+ // CHECK: InvokeStaticOrDirect
+ // CHECK: MemoryBarrier kind:StoreStore
+ // CHECK-NEXT: ReturnVoid
public HaveFinalsAndInheritFromClassWithFinals(boolean cond) {
super(cond);
// should not inline the super constructor
y = 0;
}
+
+ // CHECK-START: void HaveFinalsAndInheritFromClassWithFinals.<init>(int) register (after)
+ // CHECK: MemoryBarrier kind:StoreStore
+ // CHECK: MemoryBarrier kind:StoreStore
+ // CHECK: MemoryBarrier kind:StoreStore
+ // CHECK-NEXT: ReturnVoid
+
+ // CHECK-START: void HaveFinalsAndInheritFromClassWithFinals.<init>(int) register (after)
+ // CHECK-NOT: InvokeStaticOrDirect
+ public HaveFinalsAndInheritFromClassWithFinals(int unused) {
+ // Should inline the super constructor and keep just one memory barrier.
+ y = 0;
+
+ // Should inline new instance and keep one barrier.
+ new HaveFinalsAndInheritFromClassWithFinals();
+ // Should inline new instance and keep one barrier.
+ new InheritFromClassWithFinals();
+ }
}
public class Main {
// CHECK-START: ClassWithFinals Main.noInlineNoConstructorBarrier() register (after)
- // CHECK: InvokeStaticOrDirect
+ // CHECK: InvokeStaticOrDirect
// CHECK-START: ClassWithFinals Main.noInlineNoConstructorBarrier() register (after)
- // CHECK-NOT: MemoryBarrier {{StoreStore}}
+ // CHECK-NOT: MemoryBarrier kind:StoreStore
public static ClassWithFinals noInlineNoConstructorBarrier() {
return new ClassWithFinals(false);
}
- // CHECK-START: ClassWithFinals Main.inlineConstructorBarrier() register (after)
- // CHECK: MemoryBarrier {{StoreStore}}
- // CHECK-NOT: {{.*}}
- // CHECK: Return
+ // CHECK-START: void Main.inlineNew() register (after)
+ // CHECK: MemoryBarrier kind:StoreStore
+ // CHECK-NEXT: ReturnVoid
- // CHECK-START: ClassWithFinals Main.inlineConstructorBarrier() register (after)
- // CHECK-NOT: InvokeStaticOrDirect
- public static ClassWithFinals inlineConstructorBarrier() {
- return new ClassWithFinals();
+ // CHECK-START: void Main.inlineNew() register (after)
+ // CHECK-NOT: InvokeStaticOrDirect
+ public static void inlineNew() {
+ new ClassWithFinals();
}
- // CHECK-START: InheritFromClassWithFinals Main.doubleInlineConstructorBarrier() register (after)
- // CHECK: MemoryBarrier {{StoreStore}}
- // CHECK-NOT: {{.*}}
- // CHECK: Return
+ // CHECK-START: void Main.inlineNew1() register (after)
+ // CHECK: MemoryBarrier kind:StoreStore
+ // CHECK-NEXT: ReturnVoid
- // CHECK-START: InheritFromClassWithFinals Main.doubleInlineConstructorBarrier() register (after)
- // CHECK-NOT: InvokeStaticOrDirect
- public static InheritFromClassWithFinals doubleInlineConstructorBarrier() {
- return new InheritFromClassWithFinals();
+ // CHECK-START: void Main.inlineNew1() register (after)
+ // CHECK-NOT: InvokeStaticOrDirect
+ public static void inlineNew1() {
+ new InheritFromClassWithFinals();
}
- public static void main(String[] args) { }
+ // CHECK-START: void Main.inlineNew2() register (after)
+ // CHECK: MemoryBarrier kind:StoreStore
+ // CHECK-NEXT: ReturnVoid
+
+ // CHECK-START: void Main.inlineNew2() register (after)
+ // CHECK-NOT: InvokeStaticOrDirect
+ public static void inlineNew2() {
+ new HaveFinalsAndInheritFromClassWithFinals();
+ }
+
+ // CHECK-START: void Main.inlineNew3() register (after)
+ // CHECK: MemoryBarrier kind:StoreStore
+ // CHECK: MemoryBarrier kind:StoreStore
+ // CHECK-NEXT: ReturnVoid
+
+ // CHECK-START: void Main.inlineNew3() register (after)
+ // CHECK-NOT: InvokeStaticOrDirect
+ public static void inlineNew3() {
+ new HaveFinalsAndInheritFromClassWithFinals();
+ new HaveFinalsAndInheritFromClassWithFinals();
+ }
+
+ public static void main(String[] args) {}
}
diff --git a/test/476-clinit-check-inlining-static-invoke/expected.txt b/test/476-clinit-check-inlining-static-invoke/expected.txt
new file mode 100644
index 0000000..c55bf72
--- /dev/null
+++ b/test/476-clinit-check-inlining-static-invoke/expected.txt
@@ -0,0 +1,2 @@
+checkClinitCheckBeforeStaticMethodInvoke START
+checkClinitCheckBeforeStaticMethodInvoke PASSED
diff --git a/test/476-clinit-check-inlining-static-invoke/info.txt b/test/476-clinit-check-inlining-static-invoke/info.txt
new file mode 100644
index 0000000..1a439fc
--- /dev/null
+++ b/test/476-clinit-check-inlining-static-invoke/info.txt
@@ -0,0 +1,3 @@
+Regression test for a bug where an inlined call to a static method
+failed to emit a prior initialization check of the method's declaring
+class.
diff --git a/test/476-clinit-check-inlining-static-invoke/src/Main.java b/test/476-clinit-check-inlining-static-invoke/src/Main.java
new file mode 100644
index 0000000..a7d3bcd
--- /dev/null
+++ b/test/476-clinit-check-inlining-static-invoke/src/Main.java
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+
+ public static void main(String[] args) {
+ checkClinitCheckBeforeStaticMethodInvoke();
+ }
+
+ static void checkClinitCheckBeforeStaticMethodInvoke() {
+ System.out.println("checkClinitCheckBeforeStaticMethodInvoke START");
+
+ // Call static method to cause implicit class initialization, even
+ // if it is inlined.
+ ClassWithClinit.$opt$inline$StaticMethod();
+ if (!classWithClinitInitialized) {
+ System.out.println("checkClinitCheckBeforeStaticMethodInvoke FAILED");
+ return;
+ }
+
+ System.out.println("checkClinitCheckBeforeStaticMethodInvoke PASSED");
+ }
+
+ static class ClassWithClinit {
+ static {
+ Main.classWithClinitInitialized = true;
+ }
+
+ static void $opt$inline$StaticMethod() {
+ }
+ }
+
+ static boolean classWithClinitInitialized = false;
+}
diff --git a/test/478-checker-clinit-check-pruning/expected.txt b/test/478-checker-clinit-check-pruning/expected.txt
new file mode 100644
index 0000000..387e1a7
--- /dev/null
+++ b/test/478-checker-clinit-check-pruning/expected.txt
@@ -0,0 +1,6 @@
+Main$ClassWithClinit1's static initializer
+Main$ClassWithClinit2's static initializer
+Main$ClassWithClinit3's static initializer
+Main$ClassWithClinit4's static initializer
+Main$ClassWithClinit5's static initializer
+Main$ClassWithClinit6's static initializer
diff --git a/test/478-checker-clinit-check-pruning/info.txt b/test/478-checker-clinit-check-pruning/info.txt
new file mode 100644
index 0000000..deb64de
--- /dev/null
+++ b/test/478-checker-clinit-check-pruning/info.txt
@@ -0,0 +1,3 @@
+Test ensuring class initializations checks (and load class instructions)
+added by the graph builder during the construction of a static invoke
+are properly pruned.
diff --git a/test/478-checker-clinit-check-pruning/src/Main.java b/test/478-checker-clinit-check-pruning/src/Main.java
new file mode 100644
index 0000000..e8739b8
--- /dev/null
+++ b/test/478-checker-clinit-check-pruning/src/Main.java
@@ -0,0 +1,314 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+
+ /*
+ * Ensure an inlined static invoke explicitly triggers the
+ * initialization check of the called method's declaring class, and
+ * that the corresponding load class instruction does not get
+ * removed before register allocation & code generation.
+ */
+
+ // CHECK-START: void Main.invokeStaticInlined() builder (after)
+ // CHECK-DAG: <<LoadClass:l\d+>> LoadClass gen_clinit_check:false
+ // CHECK-DAG: <<ClinitCheck:l\d+>> ClinitCheck [<<LoadClass>>]
+ // CHECK-DAG: InvokeStaticOrDirect [<<ClinitCheck>>]
+
+ // CHECK-START: void Main.invokeStaticInlined() inliner (after)
+ // CHECK-DAG: <<LoadClass:l\d+>> LoadClass gen_clinit_check:false
+ // CHECK-DAG: <<ClinitCheck:l\d+>> ClinitCheck [<<LoadClass>>]
+
+ // CHECK-START: void Main.invokeStaticInlined() inliner (after)
+ // CHECK-NOT: InvokeStaticOrDirect
+
+ // The following checks ensure the clinit check instruction added by
+ // the builder is pruned by the PrepareForRegisterAllocation, while
+ // the load class instruction is preserved. As the control flow
+ // graph is not dumped after (nor before) this step, we check the
+ // CFG as it is before the next pass (liveness analysis) instead.
+
+ // CHECK-START: void Main.invokeStaticInlined() liveness (before)
+ // CHECK-DAG: LoadClass gen_clinit_check:true
+
+ // CHECK-START: void Main.invokeStaticInlined() liveness (before)
+ // CHECK-NOT: ClinitCheck
+ // CHECK-NOT: InvokeStaticOrDirect
+
+ static void invokeStaticInlined() {
+ ClassWithClinit1.$opt$inline$StaticMethod();
+ }
+
+ static class ClassWithClinit1 {
+ static {
+ System.out.println("Main$ClassWithClinit1's static initializer");
+ }
+
+ static void $opt$inline$StaticMethod() {
+ }
+ }
+
+ /*
+ * Ensure a non-inlined static invoke eventually has an implicit
+ * initialization check of the called method's declaring class.
+ */
+
+ // CHECK-START: void Main.invokeStaticNotInlined() builder (after)
+ // CHECK-DAG: <<LoadClass:l\d+>> LoadClass gen_clinit_check:false
+ // CHECK-DAG: <<ClinitCheck:l\d+>> ClinitCheck [<<LoadClass>>]
+ // CHECK-DAG: InvokeStaticOrDirect [<<ClinitCheck>>]
+
+ // CHECK-START: void Main.invokeStaticNotInlined() inliner (after)
+ // CHECK-DAG: <<LoadClass:l\d+>> LoadClass gen_clinit_check:false
+ // CHECK-DAG: <<ClinitCheck:l\d+>> ClinitCheck [<<LoadClass>>]
+ // CHECK-DAG: InvokeStaticOrDirect [<<ClinitCheck>>]
+
+ // The following checks ensure the clinit check and load class
+ // instructions added by the builder are pruned by the
+ // PrepareForRegisterAllocation. As the control flow graph is not
+ // dumped after (nor before) this step, we check the CFG as it is
+ // before the next pass (liveness analysis) instead.
+
+ // CHECK-START: void Main.invokeStaticNotInlined() liveness (before)
+ // CHECK-DAG: InvokeStaticOrDirect
+
+ // CHECK-START: void Main.invokeStaticNotInlined() liveness (before)
+ // CHECK-NOT: LoadClass
+ // CHECK-NOT: ClinitCheck
+
+ static void invokeStaticNotInlined() {
+ ClassWithClinit2.staticMethod();
+ }
+
+ static class ClassWithClinit2 {
+ static {
+ System.out.println("Main$ClassWithClinit2's static initializer");
+ }
+
+ static boolean doThrow = false;
+
+ static void staticMethod() {
+ if (doThrow) {
+ // Try defeating inlining.
+ throw new Error();
+ }
+ }
+ }
+
+ /*
+ * Ensure an inlined call to a static method whose declaring class
+ * is statically known to have been initialized does not require an
+ * explicit clinit check.
+ */
+
+ // CHECK-START: void Main$ClassWithClinit3.invokeStaticInlined() builder (after)
+ // CHECK-DAG: InvokeStaticOrDirect
+
+ // CHECK-START: void Main$ClassWithClinit3.invokeStaticInlined() builder (after)
+ // CHECK-NOT: LoadClass
+ // CHECK-NOT: ClinitCheck
+
+ // CHECK-START: void Main$ClassWithClinit3.invokeStaticInlined() inliner (after)
+ // CHECK-NOT: LoadClass
+ // CHECK-NOT: ClinitCheck
+ // CHECK-NOT: InvokeStaticOrDirect
+
+ static class ClassWithClinit3 {
+ static void invokeStaticInlined() {
+ // The invocation of invokeStaticInlined triggers the
+ // initialization of ClassWithClinit3, meaning that the
+ // hereinbelow call to $opt$inline$StaticMethod does not need a
+ // clinit check.
+ $opt$inline$StaticMethod();
+ }
+
+ static {
+ System.out.println("Main$ClassWithClinit3's static initializer");
+ }
+
+ static void $opt$inline$StaticMethod() {
+ }
+ }
+
+ /*
+ * Ensure an non-inlined call to a static method whose declaring
+ * class is statically known to have been initialized does not
+ * require an explicit clinit check.
+ */
+
+ // CHECK-START: void Main$ClassWithClinit4.invokeStaticNotInlined() builder (after)
+ // CHECK-DAG: InvokeStaticOrDirect
+
+ // CHECK-START: void Main$ClassWithClinit4.invokeStaticNotInlined() builder (after)
+ // CHECK-NOT: LoadClass
+ // CHECK-NOT: ClinitCheck
+
+ // CHECK-START: void Main$ClassWithClinit4.invokeStaticNotInlined() inliner (after)
+ // CHECK-DAG: InvokeStaticOrDirect
+
+ // CHECK-START: void Main$ClassWithClinit4.invokeStaticNotInlined() inliner (after)
+ // CHECK-NOT: LoadClass
+ // CHECK-NOT: ClinitCheck
+
+ static class ClassWithClinit4 {
+ static void invokeStaticNotInlined() {
+ // The invocation of invokeStaticNotInlined triggers the
+ // initialization of ClassWithClinit4, meaning that the
+ // hereinbelow call to staticMethod does not need a clinit
+ // check.
+ staticMethod();
+ }
+
+ static {
+ System.out.println("Main$ClassWithClinit4's static initializer");
+ }
+
+ static boolean doThrow = false;
+
+ static void staticMethod() {
+ if (doThrow) {
+ // Try defeating inlining.
+ throw new Error();
+ }
+ }
+ }
+
+ /*
+ * Ensure an inlined call to a static method whose declaring class
+ * is a super class of the caller's class does not require an
+ * explicit clinit check.
+ */
+
+ // CHECK-START: void Main$SubClassOfClassWithClinit5.invokeStaticInlined() builder (after)
+ // CHECK-DAG: InvokeStaticOrDirect
+
+ // CHECK-START: void Main$SubClassOfClassWithClinit5.invokeStaticInlined() builder (after)
+ // CHECK-NOT: LoadClass
+ // CHECK-NOT: ClinitCheck
+
+ // CHECK-START: void Main$SubClassOfClassWithClinit5.invokeStaticInlined() inliner (after)
+ // CHECK-NOT: LoadClass
+ // CHECK-NOT: ClinitCheck
+ // CHECK-NOT: InvokeStaticOrDirect
+
+ static class ClassWithClinit5 {
+ static void $opt$inline$StaticMethod() {
+ }
+
+ static {
+ System.out.println("Main$ClassWithClinit5's static initializer");
+ }
+ }
+
+ static class SubClassOfClassWithClinit5 extends ClassWithClinit5 {
+ static void invokeStaticInlined() {
+ ClassWithClinit5.$opt$inline$StaticMethod();
+ }
+ }
+
+ /*
+ * Ensure an non-inlined call to a static method whose declaring
+ * class is a super class of the caller's class does not require an
+ * explicit clinit check.
+ */
+
+ // CHECK-START: void Main$SubClassOfClassWithClinit6.invokeStaticNotInlined() builder (after)
+ // CHECK-DAG: InvokeStaticOrDirect
+
+ // CHECK-START: void Main$SubClassOfClassWithClinit6.invokeStaticNotInlined() builder (after)
+ // CHECK-NOT: LoadClass
+ // CHECK-NOT: ClinitCheck
+
+ // CHECK-START: void Main$SubClassOfClassWithClinit6.invokeStaticNotInlined() inliner (after)
+ // CHECK-DAG: InvokeStaticOrDirect
+
+ // CHECK-START: void Main$SubClassOfClassWithClinit6.invokeStaticNotInlined() inliner (after)
+ // CHECK-NOT: LoadClass
+ // CHECK-NOT: ClinitCheck
+
+ static class ClassWithClinit6 {
+ static boolean doThrow = false;
+
+ static void staticMethod() {
+ if (doThrow) {
+ // Try defeating inlining.
+ throw new Error();
+ }
+ }
+
+ static {
+ System.out.println("Main$ClassWithClinit6's static initializer");
+ }
+ }
+
+ static class SubClassOfClassWithClinit6 extends ClassWithClinit6 {
+ static void invokeStaticNotInlined() {
+ ClassWithClinit6.staticMethod();
+ }
+ }
+
+
+ /*
+ * Verify that if we have a static call immediately after the load class
+ * we don't do generate a clinit check.
+ */
+
+ // CHECK-START: void Main.noClinitBecauseOfInvokeStatic() liveness (before)
+ // CHECK-DAG: <<IntConstant:i\d+>> IntConstant 0
+ // CHECK-DAG: <<LoadClass:l\d+>> LoadClass gen_clinit_check:false
+ // CHECK-DAG: InvokeStaticOrDirect
+ // CHECK-DAG: StaticFieldSet [<<LoadClass>>,<<IntConstant>>]
+
+ // CHECK-START: void Main.noClinitBecauseOfInvokeStatic() liveness (before)
+ // CHECK-NOT: ClinitCheck
+
+ static void noClinitBecauseOfInvokeStatic() {
+ ClassWithClinit2.staticMethod();
+ ClassWithClinit2.doThrow = false;
+ }
+
+ /*
+ * Verify that if the static call is after a field access, the load class
+ * will generate a clinit check.
+ */
+
+ // CHECK-START: void Main.clinitBecauseOfFieldAccess() liveness (before)
+ // CHECK-DAG: <<IntConstant:i\d+>> IntConstant 0
+ // CHECK-DAG: <<LoadClass:l\d+>> LoadClass gen_clinit_check:true
+ // CHECK-DAG: StaticFieldSet [<<LoadClass>>,<<IntConstant>>]
+ // CHECK-DAG: InvokeStaticOrDirect
+
+ // CHECK-START: void Main.clinitBecauseOfFieldAccess() liveness (before)
+ // CHECK-NOT: ClinitCheck
+ static void clinitBecauseOfFieldAccess() {
+ ClassWithClinit2.doThrow = false;
+ ClassWithClinit2.staticMethod();
+ }
+
+ // TODO: Add a test for the case of a static method whose declaring
+ // class type index is not available (i.e. when `storage_index`
+ // equals `DexFile::kDexNoIndex` in
+ // art::HGraphBuilder::BuildInvoke).
+
+ public static void main(String[] args) {
+ invokeStaticInlined();
+ invokeStaticNotInlined();
+ ClassWithClinit3.invokeStaticInlined();
+ ClassWithClinit4.invokeStaticNotInlined();
+ SubClassOfClassWithClinit5.invokeStaticInlined();
+ SubClassOfClassWithClinit6.invokeStaticNotInlined();
+ }
+}
diff --git a/test/479-regression-implicit-null-check/expected.txt b/test/479-regression-implicit-null-check/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/479-regression-implicit-null-check/expected.txt
diff --git a/test/479-regression-implicit-null-check/info.txt b/test/479-regression-implicit-null-check/info.txt
new file mode 100644
index 0000000..0bfca8c
--- /dev/null
+++ b/test/479-regression-implicit-null-check/info.txt
@@ -0,0 +1,2 @@
+Tests a regression in which we moved the null check to an instruction which
+checked a different object. This lead to valid null checks being elided.
diff --git a/test/479-regression-implicit-null-check/src/Main.java b/test/479-regression-implicit-null-check/src/Main.java
new file mode 100644
index 0000000..6b6f2e4
--- /dev/null
+++ b/test/479-regression-implicit-null-check/src/Main.java
@@ -0,0 +1,50 @@
+/*
+* Copyright (C) 2015 The Android Open Source Project
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+
+public class Main {
+ public int x = 0;
+
+ public Main(Main c) {
+ // After inlining the graph will look like:
+ // NullCheck c
+ // InstanceFieldGet c
+ // InstanceFieldSet this 3
+ // The dead code will eliminate the InstanceFieldGet and we'll end up with:
+ // NullCheck c
+ // InstanceFieldSet this 3
+ // At codegen, when verifying if we can move the null check to the user,
+ // we should check that we actually have the same user (not only that the
+ // next instruction can do implicit null checks).
+ // In this case we should generate code for the NullCheck since the next
+ // instruction checks a different object.
+ c.willBeInlined();
+ x = 3;
+ }
+
+ private int willBeInlined() {
+ return x;
+ }
+
+ public static void main(String[] args) {
+ try {
+ new Main(null);
+ throw new RuntimeException("Failed to throw NullPointerException");
+ } catch (NullPointerException e) {
+ // expected
+ }
+ }
+}
diff --git a/test/480-checker-dead-blocks/expected.txt b/test/480-checker-dead-blocks/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/480-checker-dead-blocks/expected.txt
diff --git a/test/480-checker-dead-blocks/info.txt b/test/480-checker-dead-blocks/info.txt
new file mode 100644
index 0000000..5aeafac
--- /dev/null
+++ b/test/480-checker-dead-blocks/info.txt
@@ -0,0 +1 @@
+Test removal of dead blocks.
\ No newline at end of file
diff --git a/test/480-checker-dead-blocks/src/Main.java b/test/480-checker-dead-blocks/src/Main.java
new file mode 100644
index 0000000..b76755e
--- /dev/null
+++ b/test/480-checker-dead-blocks/src/Main.java
@@ -0,0 +1,194 @@
+/*
+* Copyright (C) 2015 The Android Open Source Project
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+public class Main {
+
+ public static void assertIntEquals(int expected, int result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public static boolean inlineTrue() {
+ return true;
+ }
+
+ public static boolean inlineFalse() {
+ return false;
+ }
+
+ // CHECK-START: int Main.testTrueBranch(int, int) dead_code_elimination_final (before)
+ // CHECK-DAG: <<ArgX:i\d+>> ParameterValue
+ // CHECK-DAG: <<ArgY:i\d+>> ParameterValue
+ // CHECK-DAG: If
+ // CHECK-DAG: <<Add:i\d+>> Add [<<ArgX>>,<<ArgY>>]
+ // CHECK-DAG: <<Sub:i\d+>> Sub [<<ArgX>>,<<ArgY>>]
+ // CHECK-DAG: <<Phi:i\d+>> Phi [<<Add>>,<<Sub>>]
+ // CHECK-DAG: Return [<<Phi>>]
+
+ // CHECK-START: int Main.testTrueBranch(int, int) dead_code_elimination_final (after)
+ // CHECK-DAG: <<ArgX:i\d+>> ParameterValue
+ // CHECK-DAG: <<ArgY:i\d+>> ParameterValue
+ // CHECK-DAG: <<Add:i\d+>> Add [<<ArgX>>,<<ArgY>>]
+ // CHECK-DAG: Return [<<Add>>]
+
+ // CHECK-START: int Main.testTrueBranch(int, int) dead_code_elimination_final (after)
+ // CHECK-NOT: If
+ // CHECK-NOT: Sub
+ // CHECK-NOT: Phi
+
+ public static int testTrueBranch(int x, int y) {
+ int z;
+ if (inlineTrue()) {
+ z = x + y;
+ } else {
+ z = x - y;
+ }
+ return z;
+ }
+
+ // CHECK-START: int Main.testFalseBranch(int, int) dead_code_elimination_final (before)
+ // CHECK-DAG: <<ArgX:i\d+>> ParameterValue
+ // CHECK-DAG: <<ArgY:i\d+>> ParameterValue
+ // CHECK-DAG: If
+ // CHECK-DAG: <<Add:i\d+>> Add [<<ArgX>>,<<ArgY>>]
+ // CHECK-DAG: <<Sub:i\d+>> Sub [<<ArgX>>,<<ArgY>>]
+ // CHECK-DAG: <<Phi:i\d+>> Phi [<<Add>>,<<Sub>>]
+ // CHECK-DAG: Return [<<Phi>>]
+
+ // CHECK-START: int Main.testFalseBranch(int, int) dead_code_elimination_final (after)
+ // CHECK-DAG: <<ArgX:i\d+>> ParameterValue
+ // CHECK-DAG: <<ArgY:i\d+>> ParameterValue
+ // CHECK-DAG: <<Sub:i\d+>> Sub [<<ArgX>>,<<ArgY>>]
+ // CHECK-DAG: Return [<<Sub>>]
+
+ // CHECK-START: int Main.testFalseBranch(int, int) dead_code_elimination_final (after)
+ // CHECK-NOT: If
+ // CHECK-NOT: Add
+ // CHECK-NOT: Phi
+
+ public static int testFalseBranch(int x, int y) {
+ int z;
+ if (inlineFalse()) {
+ z = x + y;
+ } else {
+ z = x - y;
+ }
+ return z;
+ }
+
+ // CHECK-START: int Main.testRemoveLoop(int) dead_code_elimination_final (before)
+ // CHECK: Mul
+
+ // CHECK-START: int Main.testRemoveLoop(int) dead_code_elimination_final (after)
+ // CHECK-NOT: Mul
+
+ public static int testRemoveLoop(int x) {
+ if (inlineFalse()) {
+ for (int i = 0; i < x; ++i) {
+ x *= x;
+ }
+ }
+ return x;
+ }
+
+ // CHECK-START: int Main.testInfiniteLoop(int) dead_code_elimination_final (before)
+ // CHECK-DAG: Return
+ // CHECK-DAG: Exit
+
+ // CHECK-START: int Main.testInfiniteLoop(int) dead_code_elimination_final (after)
+ // CHECK-NOT: Return
+ // CHECK-NOT: Exit
+
+ public static int testInfiniteLoop(int x) {
+ while (inlineTrue()) {
+ x++;
+ }
+ return x;
+ }
+
+ // CHECK-START: int Main.testDeadLoop(int) dead_code_elimination_final (before)
+ // CHECK-DAG: If
+ // CHECK-DAG: Add
+
+ // CHECK-START: int Main.testDeadLoop(int) dead_code_elimination_final (after)
+ // CHECK-DAG: <<Arg:i\d+>> ParameterValue
+ // CHECK-DAG: Return [<<Arg>>]
+
+ // CHECK-START: int Main.testDeadLoop(int) dead_code_elimination_final (after)
+ // CHECK-NOT: If
+ // CHECK-NOT: Add
+
+ public static int testDeadLoop(int x) {
+ while (inlineFalse()) {
+ x++;
+ }
+ return x;
+ }
+
+ // CHECK-START: int Main.testUpdateLoopInformation(int) dead_code_elimination_final (before)
+ // CHECK-DAG: If
+ // CHECK-DAG: If
+ // CHECK-DAG: Add
+
+ // CHECK-START: int Main.testUpdateLoopInformation(int) dead_code_elimination_final (after)
+ // CHECK-DAG: <<Arg:i\d+>> ParameterValue
+ // CHECK-DAG: Return [<<Arg>>]
+
+ // CHECK-START: int Main.testUpdateLoopInformation(int) dead_code_elimination_final (after)
+ // CHECK-NOT: If
+ // CHECK-NOT: Add
+
+ public static int testUpdateLoopInformation(int x) {
+ // Use of Or in the condition generates a dead loop where not all of its
+ // blocks are removed. This forces DCE to update their loop information.
+ while (inlineFalse() || !inlineTrue()) {
+ x++;
+ }
+ return x;
+ }
+
+ // CHECK-START: int Main.testRemoveSuspendCheck(int, int) dead_code_elimination_final (before)
+ // CHECK: SuspendCheck
+ // CHECK: SuspendCheck
+ // CHECK: SuspendCheck
+ // CHECK-NOT: SuspendCheck
+
+ // CHECK-START: int Main.testRemoveSuspendCheck(int, int) dead_code_elimination_final (after)
+ // CHECK: SuspendCheck
+ // CHECK: SuspendCheck
+ // CHECK-NOT: SuspendCheck
+
+ public static int testRemoveSuspendCheck(int x, int y) {
+ // Inner loop will leave behind the header with its SuspendCheck. DCE must
+ // remove it, otherwise the outer loop would end up with two.
+ while (y > 0) {
+ while (inlineFalse() || !inlineTrue()) {
+ x++;
+ }
+ y--;
+ }
+ return x;
+ }
+
+ public static void main(String[] args) {
+ assertIntEquals(7, testTrueBranch(4, 3));
+ assertIntEquals(1, testFalseBranch(4, 3));
+ assertIntEquals(42, testRemoveLoop(42));
+ assertIntEquals(23, testUpdateLoopInformation(23));
+ assertIntEquals(12, testRemoveSuspendCheck(12, 5));
+ }
+}
diff --git a/test/481-regression-phi-cond/expected.txt b/test/481-regression-phi-cond/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/481-regression-phi-cond/expected.txt
diff --git a/test/481-regression-phi-cond/info.txt b/test/481-regression-phi-cond/info.txt
new file mode 100644
index 0000000..7ac3bb6
--- /dev/null
+++ b/test/481-regression-phi-cond/info.txt
@@ -0,0 +1,2 @@
+Tests a regression in which simplification of a boolean selection could attempt
+to remove a Phi from the wrong instruction list.
diff --git a/test/481-regression-phi-cond/src/Main.java b/test/481-regression-phi-cond/src/Main.java
new file mode 100644
index 0000000..bad9669
--- /dev/null
+++ b/test/481-regression-phi-cond/src/Main.java
@@ -0,0 +1,51 @@
+/*
+* Copyright (C) 2015 The Android Open Source Project
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+
+public class Main {
+ public static void assertBooleanEquals(boolean expected, boolean result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public static boolean inlinePhi(boolean x, boolean y, boolean z) {
+ boolean phi;
+ if (z) {
+ phi = x;
+ } else {
+ phi = y;
+ }
+ return phi;
+ }
+
+ public static boolean dontUseParam(boolean x) {
+ return false;
+ }
+
+ public static boolean testCase(boolean x, boolean y, boolean z) {
+ // First create a Phi(x, y).
+ boolean phi = inlinePhi(x, y, z);
+ // Now use the phi as a condition which the boolean simplifier will try to
+ // optimize out. If the result is not used, the algorithm will try to remove
+ // the original condition (phi) and crash.
+ return dontUseParam(phi == false ? false : true);
+ }
+
+ public static void main(String[] args) {
+ assertBooleanEquals(false, testCase(true, true, true));
+ }
+}
diff --git a/test/482-checker-loop-back-edge-use/expected.txt b/test/482-checker-loop-back-edge-use/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/482-checker-loop-back-edge-use/expected.txt
diff --git a/test/482-checker-loop-back-edge-use/info.txt b/test/482-checker-loop-back-edge-use/info.txt
new file mode 100644
index 0000000..f7fdeff
--- /dev/null
+++ b/test/482-checker-loop-back-edge-use/info.txt
@@ -0,0 +1,2 @@
+Tests the register allocator's optimization of adding synthesized uses
+at back edges.
diff --git a/test/482-checker-loop-back-edge-use/src/Main.java b/test/482-checker-loop-back-edge-use/src/Main.java
new file mode 100644
index 0000000..0ed9267
--- /dev/null
+++ b/test/482-checker-loop-back-edge-use/src/Main.java
@@ -0,0 +1,131 @@
+/*
+* Copyright (C) 2015 The Android Open Source Project
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+
+public class Main {
+
+ // CHECK-START: void Main.loop1(boolean) liveness (after)
+ // CHECK: ParameterValue liveness:2 ranges:{[2,22)} uses:[17,22]
+ // CHECK: Goto liveness:20
+ public static void loop1(boolean incoming) {
+ while (incoming) {}
+ }
+
+ // CHECK-START: void Main.loop2(boolean) liveness (after)
+ // CHECK: ParameterValue liveness:2 ranges:{[2,42)} uses:[33,38,42]
+ // CHECK: Goto liveness:36
+ // CHECK: Goto liveness:40
+ public static void loop2(boolean incoming) {
+ while (true) {
+ System.out.println("foo");
+ while (incoming) {}
+ }
+ }
+
+ // CHECK-START: void Main.loop3(boolean) liveness (after)
+ // CHECK: ParameterValue liveness:2 ranges:{[2,60)} uses:[56,60]
+ // CHECK: Goto liveness:58
+
+ // CHECK-START: void Main.loop3(boolean) liveness (after)
+ // CHECK-NOT: Goto liveness:54
+ public static void loop3(boolean incoming) {
+ // 'incoming' only needs a use at the outer loop's back edge.
+ while (System.currentTimeMillis() != 42) {
+ while (Runtime.getRuntime() != null) {}
+ System.out.println(incoming);
+ }
+ }
+
+ // CHECK-START: void Main.loop4(boolean) liveness (after)
+ // CHECK: ParameterValue liveness:2 ranges:{[2,22)} uses:[22]
+
+ // CHECK-START: void Main.loop4(boolean) liveness (after)
+ // CHECK-NOT: Goto liveness:20
+ public static void loop4(boolean incoming) {
+ // 'incoming' has no loop use, so should not have back edge uses.
+ System.out.println(incoming);
+ while (System.currentTimeMillis() != 42) {
+ while (Runtime.getRuntime() != null) {}
+ }
+ }
+
+ // CHECK-START: void Main.loop5(boolean) liveness (after)
+ // CHECK: ParameterValue liveness:2 ranges:{[2,50)} uses:[33,42,46,50]
+ // CHECK: Goto liveness:44
+ // CHECK: Goto liveness:48
+ public static void loop5(boolean incoming) {
+ // 'incoming' must have a use at both back edges.
+ while (Runtime.getRuntime() != null) {
+ while (incoming) {
+ System.out.println(incoming);
+ }
+ }
+ }
+
+ // CHECK-START: void Main.loop6(boolean) liveness (after)
+ // CHECK ParameterValue liveness:2 ranges:{[2,46)} uses:[24,46]
+ // CHECK: Goto liveness:44
+
+ // CHECK-START: void Main.loop6(boolean) liveness (after)
+ // CHECK-NOT: Goto liveness:22
+ public static void loop6(boolean incoming) {
+ // 'incoming' must have a use only at the first loop's back edge.
+ while (true) {
+ System.out.println(incoming);
+ while (Runtime.getRuntime() != null) {}
+ }
+ }
+
+ // CHECK-START: void Main.loop7(boolean) liveness (after)
+ // CHECK: ParameterValue liveness:2 ranges:{[2,50)} uses:[32,41,46,50]
+ // CHECK: Goto liveness:44
+ // CHECK: Goto liveness:48
+ public static void loop7(boolean incoming) {
+ // 'incoming' must have a use at both back edges.
+ while (Runtime.getRuntime() != null) {
+ System.out.println(incoming);
+ while (incoming) {}
+ }
+ }
+
+ // CHECK-START: void Main.loop8() liveness (after)
+ // CHECK: StaticFieldGet liveness:12 ranges:{[12,44)} uses:[35,40,44]
+ // CHECK: Goto liveness:38
+ // CHECK: Goto liveness:42
+ public static void loop8() {
+ // 'incoming' must have a use at both back edges.
+ boolean incoming = field;
+ while (Runtime.getRuntime() != null) {
+ while (incoming) {}
+ }
+ }
+
+ // CHECK-START: void Main.loop9() liveness (after)
+ // CHECK: StaticFieldGet liveness:22 ranges:{[22,36)} uses:[31,36]
+ // CHECK: Goto liveness:38
+ public static void loop9() {
+ while (Runtime.getRuntime() != null) {
+ // 'incoming' must only have a use in the inner loop.
+ boolean incoming = field;
+ while (incoming) {}
+ }
+ }
+
+ public static void main(String[] args) {
+ }
+
+ static boolean field;
+}
diff --git a/test/483-dce-block/expected.txt b/test/483-dce-block/expected.txt
new file mode 100644
index 0000000..ef48625
--- /dev/null
+++ b/test/483-dce-block/expected.txt
@@ -0,0 +1 @@
+class Main
diff --git a/test/483-dce-block/info.txt b/test/483-dce-block/info.txt
new file mode 100644
index 0000000..3db88ab
--- /dev/null
+++ b/test/483-dce-block/info.txt
@@ -0,0 +1,2 @@
+Regression test for optimizing that used to crash
+compiling the `foo` method.
diff --git a/test/483-dce-block/src/Main.java b/test/483-dce-block/src/Main.java
new file mode 100644
index 0000000..2f66a74
--- /dev/null
+++ b/test/483-dce-block/src/Main.java
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public static void foo(Object o, int a) {
+ Object result = null;
+ if (o instanceof Main) {
+ // The compiler optimizes the type of `o` by introducing
+ // a `HBoundType` in this block.
+ while (a != 3) {
+ if (a == 2) {
+ a++;
+ result = o;
+ continue;
+ } else if (willInline()) {
+ // This block will be detected as dead after inlining.
+ result = new Object();
+ continue;
+ }
+ result = new Object();
+ }
+ // The compiler produces a phi at the back edge for `result`.
+ // Before dead block elimination, the phi has three inputs:
+ // result = (new Object(), new Object(), HBoundType)
+ //
+ // After dead block elimination, the phi has now two inputs:
+ // result = (new Object(), HBoundType)
+ //
+ // Our internal data structure for linking users and inputs expect
+ // the input index stored in that data structure to be the index
+ // in the inputs array. So the index before dead block elimination
+ // of the `HBoundType` would be 2. Dead block elimination must update
+ // that index to be 1.
+ }
+ System.out.println(result.getClass());
+ }
+
+ public static boolean willInline() {
+ return false;
+ }
+
+ public static void main(String[] args) {
+ foo(new Main(), 2);
+ }
+}
diff --git a/test/484-checker-register-hints/expected.txt b/test/484-checker-register-hints/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/484-checker-register-hints/expected.txt
diff --git a/test/484-checker-register-hints/info.txt b/test/484-checker-register-hints/info.txt
new file mode 100644
index 0000000..8923680
--- /dev/null
+++ b/test/484-checker-register-hints/info.txt
@@ -0,0 +1,4 @@
+Checks that the register allocator does not punish other
+blocks because one block forced spilling. The block that
+forces the spilling should restore the registers at the merge
+point.
diff --git a/test/484-checker-register-hints/src/Main.java b/test/484-checker-register-hints/src/Main.java
new file mode 100644
index 0000000..33952d9
--- /dev/null
+++ b/test/484-checker-register-hints/src/Main.java
@@ -0,0 +1,138 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+
+ // CHECK-START: void Main.test1(boolean, int, int, int, int, int) register (after)
+ // CHECK: name "B0"
+ // CHECK-NOT: ParallelMove
+ // CHECK: name "B1"
+ // CHECK-NOT: end_block
+ // CHECK: If
+ // CHECK-NOT: ParallelMove
+ // CHECK: name "B3"
+ // CHECK-NOT: end_block
+ // CHECK: ArraySet
+ // We could check here that there is a parallel move, but it's only valid
+ // for some architectures (for example x86), as other architectures may
+ // not do move at all.
+ // CHECK: end_block
+ // CHECK-NOT: ParallelMove
+
+ public static void test1(boolean z, int a, int b, int c, int d, int m) {
+ int e = live1;
+ int f = live2;
+ int g = live3;
+ if (z) {
+ } else {
+ // Create enough live instructions to force spilling on x86.
+ int h = live4;
+ int i = live5;
+ array[2] = e + i + h;
+ array[3] = f + i + h;
+ array[4] = g + i + h;
+ array[0] = h;
+ array[1] = i + h;
+
+ }
+ live1 = e + f + g;
+ }
+
+ // CHECK-START: void Main.test2(boolean, int, int, int, int, int) register (after)
+ // CHECK: name "B0"
+ // CHECK-NOT: ParallelMove
+ // CHECK: name "B1"
+ // CHECK-NOT: end_block
+ // CHECK: If
+ // CHECK-NOT: ParallelMove
+ // CHECK: name "B3"
+ // CHECK-NOT: end_block
+ // CHECK: ArraySet
+ // We could check here that there is a parallel move, but it's only valid
+ // for some architectures (for example x86), as other architectures may
+ // not do move at all.
+ // CHECK: end_block
+ // CHECK-NOT: ParallelMove
+
+ public static void test2(boolean z, int a, int b, int c, int d, int m) {
+ int e = live1;
+ int f = live2;
+ int g = live3;
+ if (z) {
+ if (y) {
+ int h = live4;
+ int i = live5;
+ array[2] = e + i + h;
+ array[3] = f + i + h;
+ array[4] = g + i + h;
+ array[0] = h;
+ array[1] = i + h;
+ }
+ }
+ live1 = e + f + g;
+ }
+
+ // CHECK-START: void Main.test3(boolean, int, int, int, int, int) register (after)
+ // CHECK: name "B0"
+ // CHECK-NOT: ParallelMove
+ // CHECK: name "B1"
+ // CHECK-NOT: end_block
+ // CHECK: If
+ // CHECK-NOT: ParallelMove
+ // CHECK: name "B6"
+ // CHECK-NOT: end_block
+ // CHECK: ArraySet
+ // We could check here that there is a parallel move, but it's only valid
+ // for some architectures (for example x86), as other architectures may
+ // not do move at all.
+ // CHECK: end_block
+ // CHECK-NOT: ParallelMove
+
+ public static void test3(boolean z, int a, int b, int c, int d, int m) {
+ // Same version as test2, but with branches reversed, to ensure
+ // whatever linear order is computed, we will get the same results.
+ int e = live1;
+ int f = live2;
+ int g = live3;
+ if (z) {
+ live1 = e;
+ } else {
+ if (y) {
+ live1 = e;
+ } else {
+ int h = live4;
+ int i = live5;
+ array[2] = e + i + h;
+ array[3] = f + i + h;
+ array[4] = g + i + h;
+ array[0] = h;
+ array[1] = i + h;
+ }
+ }
+ live1 = e + f + g;
+ }
+
+ public static void main(String[] args) {
+ }
+
+ static boolean y;
+ static int live1;
+ static int live2;
+ static int live3;
+ static int live4;
+ static int live5;
+ static int[] array;
+}
diff --git a/test/485-checker-dce-loop-update/expected.txt b/test/485-checker-dce-loop-update/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/485-checker-dce-loop-update/expected.txt
diff --git a/test/485-checker-dce-loop-update/info.txt b/test/485-checker-dce-loop-update/info.txt
new file mode 100644
index 0000000..fccf10c
--- /dev/null
+++ b/test/485-checker-dce-loop-update/info.txt
@@ -0,0 +1,2 @@
+Tests loop information update after DCE because block removal can disconnect loops, leaving other
+live blocks outside the loop they had been a member of.
\ No newline at end of file
diff --git a/test/485-checker-dce-loop-update/smali/TestCase.smali b/test/485-checker-dce-loop-update/smali/TestCase.smali
new file mode 100644
index 0000000..487a5df
--- /dev/null
+++ b/test/485-checker-dce-loop-update/smali/TestCase.smali
@@ -0,0 +1,274 @@
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LTestCase;
+
+.super Ljava/lang/Object;
+
+.method public static $inline$True()Z
+ .registers 1
+ const/4 v0, 1
+ return v0
+.end method
+
+
+# CHECK-START: int TestCase.testSingleExit(int, boolean) dead_code_elimination_final (before)
+# CHECK-DAG: <<ArgX:i\d+>> ParameterValue
+# CHECK-DAG: <<ArgY:z\d+>> ParameterValue
+# CHECK-DAG: <<Cst1:i\d+>> IntConstant 1
+# CHECK-DAG: <<Cst5:i\d+>> IntConstant 5
+# CHECK-DAG: <<Cst7:i\d+>> IntConstant 7
+# CHECK-DAG: <<PhiX:i\d+>> Phi [<<ArgX>>,<<Add5:i\d+>>,<<Add7:i\d+>>] loop:<<HeaderY:B\d+>>
+# CHECK-DAG: If [<<ArgY>>] loop:<<HeaderY>>
+# CHECK-DAG: If [<<Cst1>>] loop:<<HeaderY>>
+# CHECK-DAG: <<Add5>> Add [<<PhiX>>,<<Cst5>>] loop:<<HeaderY>>
+# CHECK-DAG: <<Add7>> Add [<<PhiX>>,<<Cst7>>] loop:<<HeaderY>>
+# CHECK-DAG: Return [<<PhiX>>] loop:none
+
+# CHECK-START: int TestCase.testSingleExit(int, boolean) dead_code_elimination_final (after)
+# CHECK-DAG: <<ArgX:i\d+>> ParameterValue
+# CHECK-DAG: <<ArgY:z\d+>> ParameterValue
+# CHECK-DAG: <<Cst7:i\d+>> IntConstant 7
+# CHECK-DAG: <<PhiX:i\d+>> Phi [<<ArgX>>,<<AddX:i\d+>>] loop:<<HeaderY:B\d+>>
+# CHECK-DAG: If [<<ArgY>>] loop:<<HeaderY>>
+# CHECK-DAG: <<AddX>> Add [<<PhiX>>,<<Cst7>>] loop:<<HeaderY>>
+# CHECK-DAG: Return [<<PhiX>>] loop:none
+
+.method public static testSingleExit(IZ)I
+ .registers 3
+
+ # p0 = int X
+ # p1 = boolean Y
+ # v0 = true
+
+ invoke-static {}, LTestCase;->$inline$True()Z
+ move-result v0
+
+ :loop_start
+ if-eqz p1, :loop_body # cannot be determined statically
+ if-nez v0, :loop_end # will always exit
+
+ # Dead block
+ add-int/lit8 p0, p0, 5
+ goto :loop_start
+
+ # Live block
+ :loop_body
+ add-int/lit8 p0, p0, 7
+ goto :loop_start
+
+ :loop_end
+ return p0
+.end method
+
+
+# CHECK-START: int TestCase.testMultipleExits(int, boolean, boolean) dead_code_elimination_final (before)
+# CHECK-DAG: <<ArgX:i\d+>> ParameterValue
+# CHECK-DAG: <<ArgY:z\d+>> ParameterValue
+# CHECK-DAG: <<ArgZ:z\d+>> ParameterValue
+# CHECK-DAG: <<Cst1:i\d+>> IntConstant 1
+# CHECK-DAG: <<Cst5:i\d+>> IntConstant 5
+# CHECK-DAG: <<Cst7:i\d+>> IntConstant 7
+# CHECK-DAG: <<PhiX:i\d+>> Phi [<<ArgX>>,<<Add5:i\d+>>,<<Add7:i\d+>>] loop:<<HeaderY:B\d+>>
+# CHECK-DAG: If [<<ArgY>>] loop:<<HeaderY>>
+# CHECK-DAG: If [<<ArgZ>>] loop:<<HeaderY>>
+# CHECK-DAG: If [<<Cst1>>] loop:<<HeaderY>>
+# CHECK-DAG: <<Add5>> Add [<<PhiX>>,<<Cst5>>] loop:<<HeaderY>>
+# CHECK-DAG: <<Add7>> Add [<<PhiX>>,<<Cst7>>] loop:<<HeaderY>>
+# CHECK-DAG: Return [<<PhiX>>] loop:none
+
+# CHECK-START: int TestCase.testMultipleExits(int, boolean, boolean) dead_code_elimination_final (after)
+# CHECK-DAG: <<ArgX:i\d+>> ParameterValue
+# CHECK-DAG: <<ArgY:z\d+>> ParameterValue
+# CHECK-DAG: <<ArgZ:z\d+>> ParameterValue
+# CHECK-DAG: <<Cst7:i\d+>> IntConstant 7
+# CHECK-DAG: <<PhiX:i\d+>> Phi [<<ArgX>>,<<Add7:i\d+>>] loop:<<HeaderY:B\d+>>
+# CHECK-DAG: If [<<ArgY>>] loop:<<HeaderY>>
+# CHECK-DAG: <<Add7>> Add [<<PhiX>>,<<Cst7>>] loop:<<HeaderY>>
+# CHECK-DAG: If [<<ArgZ>>] loop:none
+# CHECK-DAG: Return [<<PhiX>>] loop:none
+
+.method public static testMultipleExits(IZZ)I
+ .registers 4
+
+ # p0 = int X
+ # p1 = boolean Y
+ # p2 = boolean Z
+ # v0 = true
+
+ invoke-static {}, LTestCase;->$inline$True()Z
+ move-result v0
+
+ :loop_start
+ if-eqz p1, :loop_body # cannot be determined statically
+ if-nez p2, :loop_end # may exit
+ if-nez v0, :loop_end # will always exit
+
+ # Dead block
+ add-int/lit8 p0, p0, 5
+ goto :loop_start
+
+ # Live block
+ :loop_body
+ add-int/lit8 p0, p0, 7
+ goto :loop_start
+
+ :loop_end
+ return p0
+.end method
+
+
+# CHECK-START: int TestCase.testExitPredecessors(int, boolean, boolean) dead_code_elimination_final (before)
+# CHECK-DAG: <<ArgX:i\d+>> ParameterValue
+# CHECK-DAG: <<ArgY:z\d+>> ParameterValue
+# CHECK-DAG: <<ArgZ:z\d+>> ParameterValue
+# CHECK-DAG: <<Cst1:i\d+>> IntConstant 1
+# CHECK-DAG: <<Cst5:i\d+>> IntConstant 5
+# CHECK-DAG: <<Cst7:i\d+>> IntConstant 7
+# CHECK-DAG: <<Cst9:i\d+>> IntConstant 9
+# CHECK-DAG: <<PhiX1:i\d+>> Phi [<<ArgX>>,<<Add5:i\d+>>,<<Add7:i\d+>>] loop:<<HeaderY:B\d+>>
+# CHECK-DAG: If [<<ArgY>>] loop:<<HeaderY>>
+# CHECK-DAG: If [<<ArgZ>>] loop:<<HeaderY>>
+# CHECK-DAG: <<Mul9:i\d+>> Mul [<<PhiX1>>,<<Cst9>>] loop:<<HeaderY>>
+# CHECK-DAG: <<PhiX2:i\d+>> Phi [<<Mul9>>,<<PhiX1>>] loop:<<HeaderY>>
+# CHECK-DAG: If [<<Cst1>>] loop:<<HeaderY>>
+# CHECK-DAG: <<Add5>> Add [<<PhiX2>>,<<Cst5>>] loop:<<HeaderY>>
+# CHECK-DAG: <<Add7>> Add [<<PhiX1>>,<<Cst7>>] loop:<<HeaderY>>
+# CHECK-DAG: Return [<<PhiX2>>] loop:none
+
+# CHECK-START: int TestCase.testExitPredecessors(int, boolean, boolean) dead_code_elimination_final (after)
+# CHECK-DAG: <<ArgX:i\d+>> ParameterValue
+# CHECK-DAG: <<ArgY:z\d+>> ParameterValue
+# CHECK-DAG: <<ArgZ:z\d+>> ParameterValue
+# CHECK-DAG: <<Cst7:i\d+>> IntConstant 7
+# CHECK-DAG: <<Cst9:i\d+>> IntConstant 9
+# CHECK-DAG: <<PhiX1:i\d+>> Phi [<<ArgX>>,<<Add7:i\d+>>] loop:<<HeaderY:B\d+>>
+# CHECK-DAG: If [<<ArgY>>] loop:<<HeaderY>>
+# CHECK-DAG: <<Add7>> Add [<<PhiX1>>,<<Cst7>>] loop:<<HeaderY>>
+# CHECK-DAG: If [<<ArgZ>>] loop:none
+# CHECK-DAG: <<Mul9:i\d+>> Mul [<<PhiX1>>,<<Cst9>>] loop:none
+# CHECK-DAG: <<PhiX2:i\d+>> Phi [<<Mul9>>,<<PhiX1>>] loop:none
+# CHECK-DAG: Return [<<PhiX2>>] loop:none
+
+.method public static testExitPredecessors(IZZ)I
+ .registers 4
+
+ # p0 = int X
+ # p1 = boolean Y
+ # p2 = boolean Z
+ # v0 = true
+
+ invoke-static {}, LTestCase;->$inline$True()Z
+ move-result v0
+
+ :loop_start
+ if-eqz p1, :loop_body # cannot be determined statically
+
+ # Additional logic which will end up outside the loop
+ if-eqz p2, :skip_if
+ mul-int/lit8 p0, p0, 9
+ :skip_if
+
+ if-nez v0, :loop_end # will always take the branch
+
+ # Dead block
+ add-int/lit8 p0, p0, 5
+ goto :loop_start
+
+ # Live block
+ :loop_body
+ add-int/lit8 p0, p0, 7
+ goto :loop_start
+
+ :loop_end
+ return p0
+.end method
+
+
+# CHECK-START: int TestCase.testInnerLoop(int, boolean, boolean) dead_code_elimination_final (before)
+# CHECK-DAG: <<ArgX:i\d+>> ParameterValue
+# CHECK-DAG: <<ArgY:z\d+>> ParameterValue
+# CHECK-DAG: <<ArgZ:z\d+>> ParameterValue
+# CHECK-DAG: <<Cst0:i\d+>> IntConstant 0
+# CHECK-DAG: <<Cst1:i\d+>> IntConstant 1
+# CHECK-DAG: <<Cst5:i\d+>> IntConstant 5
+# CHECK-DAG: <<Cst7:i\d+>> IntConstant 7
+#
+# CHECK-DAG: <<PhiX:i\d+>> Phi [<<ArgX>>,<<Add5:i\d+>>,<<Add7:i\d+>>] loop:<<HeaderY:B\d+>>
+# CHECK-DAG: <<PhiZ1:i\d+>> Phi [<<ArgZ>>,<<XorZ:i\d+>>,<<PhiZ1>>] loop:<<HeaderY>>
+# CHECK-DAG: If [<<ArgY>>] loop:<<HeaderY>>
+#
+# ### Inner loop ###
+# CHECK-DAG: <<PhiZ2:i\d+>> Phi [<<PhiZ1>>,<<XorZ>>] loop:<<HeaderZ:B\d+>>
+# CHECK-DAG: <<XorZ>> Xor [<<PhiZ2>>,<<Cst1>>] loop:<<HeaderZ>>
+# CHECK-DAG: <<CondZ:z\d+>> Equal [<<XorZ>>,<<Cst0>>] loop:<<HeaderZ>>
+# CHECK-DAG: If [<<CondZ>>] loop:<<HeaderZ>>
+#
+# CHECK-DAG: <<Add5>> Add [<<PhiX>>,<<Cst5>>] loop:<<HeaderY>>
+# CHECK-DAG: <<Add7>> Add [<<PhiX>>,<<Cst7>>] loop:<<HeaderY>>
+# CHECK-DAG: Return [<<PhiX>>] loop:none
+
+# CHECK-START: int TestCase.testInnerLoop(int, boolean, boolean) dead_code_elimination_final (after)
+# CHECK-DAG: <<ArgX:i\d+>> ParameterValue
+# CHECK-DAG: <<ArgY:z\d+>> ParameterValue
+# CHECK-DAG: <<ArgZ:z\d+>> ParameterValue
+# CHECK-DAG: <<Cst0:i\d+>> IntConstant 0
+# CHECK-DAG: <<Cst1:i\d+>> IntConstant 1
+# CHECK-DAG: <<Cst7:i\d+>> IntConstant 7
+#
+# CHECK-DAG: <<PhiX:i\d+>> Phi [<<ArgX>>,<<Add7:i\d+>>] loop:<<HeaderY:B\d+>>
+# CHECK-DAG: If [<<ArgY>>] loop:<<HeaderY>>
+# CHECK-DAG: <<Add7>> Add [<<PhiX>>,<<Cst7>>] loop:<<HeaderY>>
+#
+# ### Inner loop ###
+# CHECK-DAG: <<PhiZ:i\d+>> Phi [<<ArgZ>>,<<XorZ:i\d+>>] loop:<<HeaderZ:B\d+>>
+# CHECK-DAG: <<XorZ>> Xor [<<PhiZ>>,<<Cst1>>] loop:<<HeaderZ>>
+# CHECK-DAG: <<CondZ:z\d+>> Equal [<<XorZ>>,<<Cst0>>] loop:<<HeaderZ>>
+# CHECK-DAG: If [<<CondZ>>] loop:<<HeaderZ>>
+#
+# CHECK-DAG: Return [<<PhiX>>] loop:none
+
+.method public static testInnerLoop(IZZ)I
+ .registers 4
+
+ # p0 = int X
+ # p1 = boolean Y
+ # p2 = boolean Z
+ # v0 = true
+
+ invoke-static {}, LTestCase;->$inline$True()Z
+ move-result v0
+
+ :loop_start
+ if-eqz p1, :loop_body # cannot be determined statically
+
+ # Inner loop which will end up outside its parent
+ :inner_loop_start
+ xor-int/lit8 p2, p2, 1
+ if-eqz p2, :inner_loop_start
+
+ if-nez v0, :loop_end # will always take the branch
+
+ # Dead block
+ add-int/lit8 p0, p0, 5
+ goto :loop_start
+
+ # Live block
+ :loop_body
+ add-int/lit8 p0, p0, 7
+ goto :loop_start
+
+ :loop_end
+ return p0
+.end method
diff --git a/test/485-checker-dce-loop-update/src/Main.java b/test/485-checker-dce-loop-update/src/Main.java
new file mode 100644
index 0000000..6bfe08b
--- /dev/null
+++ b/test/485-checker-dce-loop-update/src/Main.java
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Method;
+
+public class Main {
+
+ // Workaround for b/18051191.
+ class InnerClass {}
+
+ public static void main(String[] args) throws Exception {
+ return;
+ }
+}
diff --git a/test/486-checker-must-do-null-check/expected.txt b/test/486-checker-must-do-null-check/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/486-checker-must-do-null-check/expected.txt
diff --git a/test/486-checker-must-do-null-check/info.txt b/test/486-checker-must-do-null-check/info.txt
new file mode 100644
index 0000000..494ff1c
--- /dev/null
+++ b/test/486-checker-must-do-null-check/info.txt
@@ -0,0 +1 @@
+Verifies MustDoNullCheck() on InstanceOf and CheckCast
diff --git a/test/486-checker-must-do-null-check/src/Main.java b/test/486-checker-must-do-null-check/src/Main.java
new file mode 100644
index 0000000..f285566
--- /dev/null
+++ b/test/486-checker-must-do-null-check/src/Main.java
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ // CHECK-START: void Main.InstanceOfPreChecked(java.lang.Object) instruction_simplifier (after)
+ // CHECK: InstanceOf must_do_null_check:false
+ public void InstanceOfPreChecked(Object o) throws Exception {
+ o.toString();
+ if (o instanceof Main) {
+ throw new Exception();
+ }
+ }
+
+ // CHECK-START: void Main.InstanceOf(java.lang.Object) instruction_simplifier (after)
+ // CHECK: InstanceOf must_do_null_check:true
+ public void InstanceOf(Object o) throws Exception {
+ if (o instanceof Main) {
+ throw new Exception();
+ }
+ }
+
+ // CHECK-START: void Main.CheckCastPreChecked(java.lang.Object) instruction_simplifier (after)
+ // CHECK: CheckCast must_do_null_check:false
+ public void CheckCastPreChecked(Object o) {
+ o.toString();
+ ((Main)o).Bar();
+ }
+
+ // CHECK-START: void Main.CheckCast(java.lang.Object) instruction_simplifier (after)
+ // CHECK: CheckCast must_do_null_check:true
+ public void CheckCast(Object o) {
+ ((Main)o).Bar();
+ }
+
+ void Bar() {throw new RuntimeException();}
+
+ public static void main(String[] sa) {
+ Main t = new Main();
+ }
+}
diff --git a/test/701-easy-div-rem/build b/test/701-easy-div-rem/build
new file mode 100644
index 0000000..1dc8452
--- /dev/null
+++ b/test/701-easy-div-rem/build
@@ -0,0 +1,28 @@
+#!/bin/bash
+#
+# Copyright (C) 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Stop if something fails.
+set -e
+
+# Write out the source file.
+mkdir src
+python ./genMain.py
+
+# Increase the file size limitation for classes.lst as the machine generated
+# source file contains a lot of methods and is quite large.
+ulimit -S 4096
+
+./default-build
diff --git a/test/701-easy-div-rem/genMain.py b/test/701-easy-div-rem/genMain.py
index 80eac34..75eee17 100644
--- a/test/701-easy-div-rem/genMain.py
+++ b/test/701-easy-div-rem/genMain.py
@@ -12,15 +12,28 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+upper_bound_int_pow2 = 31
+upper_bound_long_pow2 = 63
+upper_bound_constant = 100
all_tests = [
({'@INT@': 'int', '@SUFFIX@':''},
- [('CheckDiv', 'idiv_by_pow2_', [2**i for i in range(31)]),
- ('CheckDiv', 'idiv_by_small_', [i for i in range(3, 16) if i not in (4, 8)]),
- ('CheckRem', 'irem_by_pow2_', [2**i for i in range(31)])]),
+ [('CheckDiv', 'idiv_by_pow2_', [2**i for i in range(upper_bound_int_pow2)]),
+ ('CheckDiv', 'idiv_by_pow2_neg_', [-2**i for i in range(upper_bound_int_pow2)]),
+ ('CheckDiv', 'idiv_by_constant_', [i for i in range(1, upper_bound_constant)]),
+ ('CheckDiv', 'idiv_by_constant_neg_', [-i for i in range(1, upper_bound_constant)]),
+ ('CheckRem', 'irem_by_pow2_', [2**i for i in range(upper_bound_int_pow2)]),
+ ('CheckRem', 'irem_by_pow2_neg_', [-2**i for i in range(upper_bound_int_pow2)]),
+ ('CheckRem', 'irem_by_constant_', [i for i in range(1, upper_bound_constant)]),
+ ('CheckRem', 'irem_by_constant_neg_', [-i for i in range(1, upper_bound_constant)])]),
({'@INT@': 'long', '@SUFFIX@': 'l'},
- [('CheckDiv', 'ldiv_by_pow2_', [2**i for i in range(63)]),
- ('CheckDiv', 'ldiv_by_small_', [i for i in range(3, 16) if i not in (4, 8)]),
- ('CheckRem', 'lrem_by_pow2_', [2**i for i in range(63)])])
+ [('CheckDiv', 'ldiv_by_pow2_', [2**i for i in range(upper_bound_long_pow2)]),
+ ('CheckDiv', 'ldiv_by_pow2_neg_', [-2**i for i in range(upper_bound_long_pow2)]),
+ ('CheckDiv', 'ldiv_by_constant_', [i for i in range(1, upper_bound_constant)]),
+ ('CheckDiv', 'ldiv_by_constant_neg_', [-i for i in range(1, upper_bound_constant)]),
+ ('CheckRem', 'lrem_by_pow2_', [2**i for i in range(upper_bound_long_pow2)]),
+ ('CheckRem', 'lrem_by_pow2_neg_', [-2**i for i in range(upper_bound_long_pow2)]),
+ ('CheckRem', 'lrem_by_constant_', [i for i in range(1, upper_bound_constant)]),
+ ('CheckRem', 'lrem_by_constant_neg_', [-i for i in range(1, upper_bound_constant)])])
]
def subst_vars(variables, text):
diff --git a/test/701-easy-div-rem/src/Main.java b/test/701-easy-div-rem/src/Main.java
deleted file mode 100644
index f995f61..0000000
--- a/test/701-easy-div-rem/src/Main.java
+++ /dev/null
@@ -1,529 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-public class Main {
- public static int num_errors = 0;
-
- public static void reportError(String message) {
- if (num_errors == 10) {
- System.out.println("Omitting other error messages...");
- } else if (num_errors < 10) {
- System.out.println(message);
- }
- num_errors += 1;
- }
-
- public static void intCheckDiv(String desc, int result, int dividend, int divisor) {
- int correct_result = dividend / divisor;
- if (result != correct_result) {
- reportError(desc + "(" + dividend + ") == " + result +
- " should be " + correct_result);
- }
- }
- public static void intCheckRem(String desc, int result, int dividend, int divisor) {
- int correct_result = dividend % divisor;
- if (result != correct_result) {
- reportError(desc + "(" + dividend + ") == " + result +
- " should be " + correct_result);
- }
- }
- public static void longCheckDiv(String desc, long result, long dividend, long divisor) {
- long correct_result = dividend / divisor;
- if (result != correct_result) {
- reportError(desc + "(" + dividend + ") == " + result +
- " should be " + correct_result);
- }
- }
- public static void longCheckRem(String desc, long result, long dividend, long divisor) {
- long correct_result = dividend % divisor;
- if (result != correct_result) {
- reportError(desc + "(" + dividend + ") == " + result +
- " should be " + correct_result);
- }
- }
-
- public static int idiv_by_pow2_0(int x) {return x / 1;}
- public static int idiv_by_pow2_1(int x) {return x / 2;}
- public static int idiv_by_pow2_2(int x) {return x / 4;}
- public static int idiv_by_pow2_3(int x) {return x / 8;}
- public static int idiv_by_pow2_4(int x) {return x / 16;}
- public static int idiv_by_pow2_5(int x) {return x / 32;}
- public static int idiv_by_pow2_6(int x) {return x / 64;}
- public static int idiv_by_pow2_7(int x) {return x / 128;}
- public static int idiv_by_pow2_8(int x) {return x / 256;}
- public static int idiv_by_pow2_9(int x) {return x / 512;}
- public static int idiv_by_pow2_10(int x) {return x / 1024;}
- public static int idiv_by_pow2_11(int x) {return x / 2048;}
- public static int idiv_by_pow2_12(int x) {return x / 4096;}
- public static int idiv_by_pow2_13(int x) {return x / 8192;}
- public static int idiv_by_pow2_14(int x) {return x / 16384;}
- public static int idiv_by_pow2_15(int x) {return x / 32768;}
- public static int idiv_by_pow2_16(int x) {return x / 65536;}
- public static int idiv_by_pow2_17(int x) {return x / 131072;}
- public static int idiv_by_pow2_18(int x) {return x / 262144;}
- public static int idiv_by_pow2_19(int x) {return x / 524288;}
- public static int idiv_by_pow2_20(int x) {return x / 1048576;}
- public static int idiv_by_pow2_21(int x) {return x / 2097152;}
- public static int idiv_by_pow2_22(int x) {return x / 4194304;}
- public static int idiv_by_pow2_23(int x) {return x / 8388608;}
- public static int idiv_by_pow2_24(int x) {return x / 16777216;}
- public static int idiv_by_pow2_25(int x) {return x / 33554432;}
- public static int idiv_by_pow2_26(int x) {return x / 67108864;}
- public static int idiv_by_pow2_27(int x) {return x / 134217728;}
- public static int idiv_by_pow2_28(int x) {return x / 268435456;}
- public static int idiv_by_pow2_29(int x) {return x / 536870912;}
- public static int idiv_by_pow2_30(int x) {return x / 1073741824;}
- public static int idiv_by_small_0(int x) {return x / 3;}
- public static int idiv_by_small_1(int x) {return x / 5;}
- public static int idiv_by_small_2(int x) {return x / 6;}
- public static int idiv_by_small_3(int x) {return x / 7;}
- public static int idiv_by_small_4(int x) {return x / 9;}
- public static int idiv_by_small_5(int x) {return x / 10;}
- public static int idiv_by_small_6(int x) {return x / 11;}
- public static int idiv_by_small_7(int x) {return x / 12;}
- public static int idiv_by_small_8(int x) {return x / 13;}
- public static int idiv_by_small_9(int x) {return x / 14;}
- public static int idiv_by_small_10(int x) {return x / 15;}
- public static int irem_by_pow2_0(int x) {return x % 1;}
- public static int irem_by_pow2_1(int x) {return x % 2;}
- public static int irem_by_pow2_2(int x) {return x % 4;}
- public static int irem_by_pow2_3(int x) {return x % 8;}
- public static int irem_by_pow2_4(int x) {return x % 16;}
- public static int irem_by_pow2_5(int x) {return x % 32;}
- public static int irem_by_pow2_6(int x) {return x % 64;}
- public static int irem_by_pow2_7(int x) {return x % 128;}
- public static int irem_by_pow2_8(int x) {return x % 256;}
- public static int irem_by_pow2_9(int x) {return x % 512;}
- public static int irem_by_pow2_10(int x) {return x % 1024;}
- public static int irem_by_pow2_11(int x) {return x % 2048;}
- public static int irem_by_pow2_12(int x) {return x % 4096;}
- public static int irem_by_pow2_13(int x) {return x % 8192;}
- public static int irem_by_pow2_14(int x) {return x % 16384;}
- public static int irem_by_pow2_15(int x) {return x % 32768;}
- public static int irem_by_pow2_16(int x) {return x % 65536;}
- public static int irem_by_pow2_17(int x) {return x % 131072;}
- public static int irem_by_pow2_18(int x) {return x % 262144;}
- public static int irem_by_pow2_19(int x) {return x % 524288;}
- public static int irem_by_pow2_20(int x) {return x % 1048576;}
- public static int irem_by_pow2_21(int x) {return x % 2097152;}
- public static int irem_by_pow2_22(int x) {return x % 4194304;}
- public static int irem_by_pow2_23(int x) {return x % 8388608;}
- public static int irem_by_pow2_24(int x) {return x % 16777216;}
- public static int irem_by_pow2_25(int x) {return x % 33554432;}
- public static int irem_by_pow2_26(int x) {return x % 67108864;}
- public static int irem_by_pow2_27(int x) {return x % 134217728;}
- public static int irem_by_pow2_28(int x) {return x % 268435456;}
- public static int irem_by_pow2_29(int x) {return x % 536870912;}
- public static int irem_by_pow2_30(int x) {return x % 1073741824;}
- public static long ldiv_by_pow2_0(long x) {return x / 1l;}
- public static long ldiv_by_pow2_1(long x) {return x / 2l;}
- public static long ldiv_by_pow2_2(long x) {return x / 4l;}
- public static long ldiv_by_pow2_3(long x) {return x / 8l;}
- public static long ldiv_by_pow2_4(long x) {return x / 16l;}
- public static long ldiv_by_pow2_5(long x) {return x / 32l;}
- public static long ldiv_by_pow2_6(long x) {return x / 64l;}
- public static long ldiv_by_pow2_7(long x) {return x / 128l;}
- public static long ldiv_by_pow2_8(long x) {return x / 256l;}
- public static long ldiv_by_pow2_9(long x) {return x / 512l;}
- public static long ldiv_by_pow2_10(long x) {return x / 1024l;}
- public static long ldiv_by_pow2_11(long x) {return x / 2048l;}
- public static long ldiv_by_pow2_12(long x) {return x / 4096l;}
- public static long ldiv_by_pow2_13(long x) {return x / 8192l;}
- public static long ldiv_by_pow2_14(long x) {return x / 16384l;}
- public static long ldiv_by_pow2_15(long x) {return x / 32768l;}
- public static long ldiv_by_pow2_16(long x) {return x / 65536l;}
- public static long ldiv_by_pow2_17(long x) {return x / 131072l;}
- public static long ldiv_by_pow2_18(long x) {return x / 262144l;}
- public static long ldiv_by_pow2_19(long x) {return x / 524288l;}
- public static long ldiv_by_pow2_20(long x) {return x / 1048576l;}
- public static long ldiv_by_pow2_21(long x) {return x / 2097152l;}
- public static long ldiv_by_pow2_22(long x) {return x / 4194304l;}
- public static long ldiv_by_pow2_23(long x) {return x / 8388608l;}
- public static long ldiv_by_pow2_24(long x) {return x / 16777216l;}
- public static long ldiv_by_pow2_25(long x) {return x / 33554432l;}
- public static long ldiv_by_pow2_26(long x) {return x / 67108864l;}
- public static long ldiv_by_pow2_27(long x) {return x / 134217728l;}
- public static long ldiv_by_pow2_28(long x) {return x / 268435456l;}
- public static long ldiv_by_pow2_29(long x) {return x / 536870912l;}
- public static long ldiv_by_pow2_30(long x) {return x / 1073741824l;}
- public static long ldiv_by_pow2_31(long x) {return x / 2147483648l;}
- public static long ldiv_by_pow2_32(long x) {return x / 4294967296l;}
- public static long ldiv_by_pow2_33(long x) {return x / 8589934592l;}
- public static long ldiv_by_pow2_34(long x) {return x / 17179869184l;}
- public static long ldiv_by_pow2_35(long x) {return x / 34359738368l;}
- public static long ldiv_by_pow2_36(long x) {return x / 68719476736l;}
- public static long ldiv_by_pow2_37(long x) {return x / 137438953472l;}
- public static long ldiv_by_pow2_38(long x) {return x / 274877906944l;}
- public static long ldiv_by_pow2_39(long x) {return x / 549755813888l;}
- public static long ldiv_by_pow2_40(long x) {return x / 1099511627776l;}
- public static long ldiv_by_pow2_41(long x) {return x / 2199023255552l;}
- public static long ldiv_by_pow2_42(long x) {return x / 4398046511104l;}
- public static long ldiv_by_pow2_43(long x) {return x / 8796093022208l;}
- public static long ldiv_by_pow2_44(long x) {return x / 17592186044416l;}
- public static long ldiv_by_pow2_45(long x) {return x / 35184372088832l;}
- public static long ldiv_by_pow2_46(long x) {return x / 70368744177664l;}
- public static long ldiv_by_pow2_47(long x) {return x / 140737488355328l;}
- public static long ldiv_by_pow2_48(long x) {return x / 281474976710656l;}
- public static long ldiv_by_pow2_49(long x) {return x / 562949953421312l;}
- public static long ldiv_by_pow2_50(long x) {return x / 1125899906842624l;}
- public static long ldiv_by_pow2_51(long x) {return x / 2251799813685248l;}
- public static long ldiv_by_pow2_52(long x) {return x / 4503599627370496l;}
- public static long ldiv_by_pow2_53(long x) {return x / 9007199254740992l;}
- public static long ldiv_by_pow2_54(long x) {return x / 18014398509481984l;}
- public static long ldiv_by_pow2_55(long x) {return x / 36028797018963968l;}
- public static long ldiv_by_pow2_56(long x) {return x / 72057594037927936l;}
- public static long ldiv_by_pow2_57(long x) {return x / 144115188075855872l;}
- public static long ldiv_by_pow2_58(long x) {return x / 288230376151711744l;}
- public static long ldiv_by_pow2_59(long x) {return x / 576460752303423488l;}
- public static long ldiv_by_pow2_60(long x) {return x / 1152921504606846976l;}
- public static long ldiv_by_pow2_61(long x) {return x / 2305843009213693952l;}
- public static long ldiv_by_pow2_62(long x) {return x / 4611686018427387904l;}
- public static long ldiv_by_small_0(long x) {return x / 3l;}
- public static long ldiv_by_small_1(long x) {return x / 5l;}
- public static long ldiv_by_small_2(long x) {return x / 6l;}
- public static long ldiv_by_small_3(long x) {return x / 7l;}
- public static long ldiv_by_small_4(long x) {return x / 9l;}
- public static long ldiv_by_small_5(long x) {return x / 10l;}
- public static long ldiv_by_small_6(long x) {return x / 11l;}
- public static long ldiv_by_small_7(long x) {return x / 12l;}
- public static long ldiv_by_small_8(long x) {return x / 13l;}
- public static long ldiv_by_small_9(long x) {return x / 14l;}
- public static long ldiv_by_small_10(long x) {return x / 15l;}
- public static long lrem_by_pow2_0(long x) {return x % 1l;}
- public static long lrem_by_pow2_1(long x) {return x % 2l;}
- public static long lrem_by_pow2_2(long x) {return x % 4l;}
- public static long lrem_by_pow2_3(long x) {return x % 8l;}
- public static long lrem_by_pow2_4(long x) {return x % 16l;}
- public static long lrem_by_pow2_5(long x) {return x % 32l;}
- public static long lrem_by_pow2_6(long x) {return x % 64l;}
- public static long lrem_by_pow2_7(long x) {return x % 128l;}
- public static long lrem_by_pow2_8(long x) {return x % 256l;}
- public static long lrem_by_pow2_9(long x) {return x % 512l;}
- public static long lrem_by_pow2_10(long x) {return x % 1024l;}
- public static long lrem_by_pow2_11(long x) {return x % 2048l;}
- public static long lrem_by_pow2_12(long x) {return x % 4096l;}
- public static long lrem_by_pow2_13(long x) {return x % 8192l;}
- public static long lrem_by_pow2_14(long x) {return x % 16384l;}
- public static long lrem_by_pow2_15(long x) {return x % 32768l;}
- public static long lrem_by_pow2_16(long x) {return x % 65536l;}
- public static long lrem_by_pow2_17(long x) {return x % 131072l;}
- public static long lrem_by_pow2_18(long x) {return x % 262144l;}
- public static long lrem_by_pow2_19(long x) {return x % 524288l;}
- public static long lrem_by_pow2_20(long x) {return x % 1048576l;}
- public static long lrem_by_pow2_21(long x) {return x % 2097152l;}
- public static long lrem_by_pow2_22(long x) {return x % 4194304l;}
- public static long lrem_by_pow2_23(long x) {return x % 8388608l;}
- public static long lrem_by_pow2_24(long x) {return x % 16777216l;}
- public static long lrem_by_pow2_25(long x) {return x % 33554432l;}
- public static long lrem_by_pow2_26(long x) {return x % 67108864l;}
- public static long lrem_by_pow2_27(long x) {return x % 134217728l;}
- public static long lrem_by_pow2_28(long x) {return x % 268435456l;}
- public static long lrem_by_pow2_29(long x) {return x % 536870912l;}
- public static long lrem_by_pow2_30(long x) {return x % 1073741824l;}
- public static long lrem_by_pow2_31(long x) {return x % 2147483648l;}
- public static long lrem_by_pow2_32(long x) {return x % 4294967296l;}
- public static long lrem_by_pow2_33(long x) {return x % 8589934592l;}
- public static long lrem_by_pow2_34(long x) {return x % 17179869184l;}
- public static long lrem_by_pow2_35(long x) {return x % 34359738368l;}
- public static long lrem_by_pow2_36(long x) {return x % 68719476736l;}
- public static long lrem_by_pow2_37(long x) {return x % 137438953472l;}
- public static long lrem_by_pow2_38(long x) {return x % 274877906944l;}
- public static long lrem_by_pow2_39(long x) {return x % 549755813888l;}
- public static long lrem_by_pow2_40(long x) {return x % 1099511627776l;}
- public static long lrem_by_pow2_41(long x) {return x % 2199023255552l;}
- public static long lrem_by_pow2_42(long x) {return x % 4398046511104l;}
- public static long lrem_by_pow2_43(long x) {return x % 8796093022208l;}
- public static long lrem_by_pow2_44(long x) {return x % 17592186044416l;}
- public static long lrem_by_pow2_45(long x) {return x % 35184372088832l;}
- public static long lrem_by_pow2_46(long x) {return x % 70368744177664l;}
- public static long lrem_by_pow2_47(long x) {return x % 140737488355328l;}
- public static long lrem_by_pow2_48(long x) {return x % 281474976710656l;}
- public static long lrem_by_pow2_49(long x) {return x % 562949953421312l;}
- public static long lrem_by_pow2_50(long x) {return x % 1125899906842624l;}
- public static long lrem_by_pow2_51(long x) {return x % 2251799813685248l;}
- public static long lrem_by_pow2_52(long x) {return x % 4503599627370496l;}
- public static long lrem_by_pow2_53(long x) {return x % 9007199254740992l;}
- public static long lrem_by_pow2_54(long x) {return x % 18014398509481984l;}
- public static long lrem_by_pow2_55(long x) {return x % 36028797018963968l;}
- public static long lrem_by_pow2_56(long x) {return x % 72057594037927936l;}
- public static long lrem_by_pow2_57(long x) {return x % 144115188075855872l;}
- public static long lrem_by_pow2_58(long x) {return x % 288230376151711744l;}
- public static long lrem_by_pow2_59(long x) {return x % 576460752303423488l;}
- public static long lrem_by_pow2_60(long x) {return x % 1152921504606846976l;}
- public static long lrem_by_pow2_61(long x) {return x % 2305843009213693952l;}
- public static long lrem_by_pow2_62(long x) {return x % 4611686018427387904l;}
-
- public static void intCheckAll(int x) {
- intCheckDiv("idiv_by_pow2_0", idiv_by_pow2_0(x), x, 1);
- intCheckDiv("idiv_by_pow2_1", idiv_by_pow2_1(x), x, 2);
- intCheckDiv("idiv_by_pow2_2", idiv_by_pow2_2(x), x, 4);
- intCheckDiv("idiv_by_pow2_3", idiv_by_pow2_3(x), x, 8);
- intCheckDiv("idiv_by_pow2_4", idiv_by_pow2_4(x), x, 16);
- intCheckDiv("idiv_by_pow2_5", idiv_by_pow2_5(x), x, 32);
- intCheckDiv("idiv_by_pow2_6", idiv_by_pow2_6(x), x, 64);
- intCheckDiv("idiv_by_pow2_7", idiv_by_pow2_7(x), x, 128);
- intCheckDiv("idiv_by_pow2_8", idiv_by_pow2_8(x), x, 256);
- intCheckDiv("idiv_by_pow2_9", idiv_by_pow2_9(x), x, 512);
- intCheckDiv("idiv_by_pow2_10", idiv_by_pow2_10(x), x, 1024);
- intCheckDiv("idiv_by_pow2_11", idiv_by_pow2_11(x), x, 2048);
- intCheckDiv("idiv_by_pow2_12", idiv_by_pow2_12(x), x, 4096);
- intCheckDiv("idiv_by_pow2_13", idiv_by_pow2_13(x), x, 8192);
- intCheckDiv("idiv_by_pow2_14", idiv_by_pow2_14(x), x, 16384);
- intCheckDiv("idiv_by_pow2_15", idiv_by_pow2_15(x), x, 32768);
- intCheckDiv("idiv_by_pow2_16", idiv_by_pow2_16(x), x, 65536);
- intCheckDiv("idiv_by_pow2_17", idiv_by_pow2_17(x), x, 131072);
- intCheckDiv("idiv_by_pow2_18", idiv_by_pow2_18(x), x, 262144);
- intCheckDiv("idiv_by_pow2_19", idiv_by_pow2_19(x), x, 524288);
- intCheckDiv("idiv_by_pow2_20", idiv_by_pow2_20(x), x, 1048576);
- intCheckDiv("idiv_by_pow2_21", idiv_by_pow2_21(x), x, 2097152);
- intCheckDiv("idiv_by_pow2_22", idiv_by_pow2_22(x), x, 4194304);
- intCheckDiv("idiv_by_pow2_23", idiv_by_pow2_23(x), x, 8388608);
- intCheckDiv("idiv_by_pow2_24", idiv_by_pow2_24(x), x, 16777216);
- intCheckDiv("idiv_by_pow2_25", idiv_by_pow2_25(x), x, 33554432);
- intCheckDiv("idiv_by_pow2_26", idiv_by_pow2_26(x), x, 67108864);
- intCheckDiv("idiv_by_pow2_27", idiv_by_pow2_27(x), x, 134217728);
- intCheckDiv("idiv_by_pow2_28", idiv_by_pow2_28(x), x, 268435456);
- intCheckDiv("idiv_by_pow2_29", idiv_by_pow2_29(x), x, 536870912);
- intCheckDiv("idiv_by_pow2_30", idiv_by_pow2_30(x), x, 1073741824);
- intCheckDiv("idiv_by_small_0", idiv_by_small_0(x), x, 3);
- intCheckDiv("idiv_by_small_1", idiv_by_small_1(x), x, 5);
- intCheckDiv("idiv_by_small_2", idiv_by_small_2(x), x, 6);
- intCheckDiv("idiv_by_small_3", idiv_by_small_3(x), x, 7);
- intCheckDiv("idiv_by_small_4", idiv_by_small_4(x), x, 9);
- intCheckDiv("idiv_by_small_5", idiv_by_small_5(x), x, 10);
- intCheckDiv("idiv_by_small_6", idiv_by_small_6(x), x, 11);
- intCheckDiv("idiv_by_small_7", idiv_by_small_7(x), x, 12);
- intCheckDiv("idiv_by_small_8", idiv_by_small_8(x), x, 13);
- intCheckDiv("idiv_by_small_9", idiv_by_small_9(x), x, 14);
- intCheckDiv("idiv_by_small_10", idiv_by_small_10(x), x, 15);
- intCheckRem("irem_by_pow2_0", irem_by_pow2_0(x), x, 1);
- intCheckRem("irem_by_pow2_1", irem_by_pow2_1(x), x, 2);
- intCheckRem("irem_by_pow2_2", irem_by_pow2_2(x), x, 4);
- intCheckRem("irem_by_pow2_3", irem_by_pow2_3(x), x, 8);
- intCheckRem("irem_by_pow2_4", irem_by_pow2_4(x), x, 16);
- intCheckRem("irem_by_pow2_5", irem_by_pow2_5(x), x, 32);
- intCheckRem("irem_by_pow2_6", irem_by_pow2_6(x), x, 64);
- intCheckRem("irem_by_pow2_7", irem_by_pow2_7(x), x, 128);
- intCheckRem("irem_by_pow2_8", irem_by_pow2_8(x), x, 256);
- intCheckRem("irem_by_pow2_9", irem_by_pow2_9(x), x, 512);
- intCheckRem("irem_by_pow2_10", irem_by_pow2_10(x), x, 1024);
- intCheckRem("irem_by_pow2_11", irem_by_pow2_11(x), x, 2048);
- intCheckRem("irem_by_pow2_12", irem_by_pow2_12(x), x, 4096);
- intCheckRem("irem_by_pow2_13", irem_by_pow2_13(x), x, 8192);
- intCheckRem("irem_by_pow2_14", irem_by_pow2_14(x), x, 16384);
- intCheckRem("irem_by_pow2_15", irem_by_pow2_15(x), x, 32768);
- intCheckRem("irem_by_pow2_16", irem_by_pow2_16(x), x, 65536);
- intCheckRem("irem_by_pow2_17", irem_by_pow2_17(x), x, 131072);
- intCheckRem("irem_by_pow2_18", irem_by_pow2_18(x), x, 262144);
- intCheckRem("irem_by_pow2_19", irem_by_pow2_19(x), x, 524288);
- intCheckRem("irem_by_pow2_20", irem_by_pow2_20(x), x, 1048576);
- intCheckRem("irem_by_pow2_21", irem_by_pow2_21(x), x, 2097152);
- intCheckRem("irem_by_pow2_22", irem_by_pow2_22(x), x, 4194304);
- intCheckRem("irem_by_pow2_23", irem_by_pow2_23(x), x, 8388608);
- intCheckRem("irem_by_pow2_24", irem_by_pow2_24(x), x, 16777216);
- intCheckRem("irem_by_pow2_25", irem_by_pow2_25(x), x, 33554432);
- intCheckRem("irem_by_pow2_26", irem_by_pow2_26(x), x, 67108864);
- intCheckRem("irem_by_pow2_27", irem_by_pow2_27(x), x, 134217728);
- intCheckRem("irem_by_pow2_28", irem_by_pow2_28(x), x, 268435456);
- intCheckRem("irem_by_pow2_29", irem_by_pow2_29(x), x, 536870912);
- intCheckRem("irem_by_pow2_30", irem_by_pow2_30(x), x, 1073741824);
- }
-
- public static void longCheckAll(long x) {
- longCheckDiv("ldiv_by_pow2_0", ldiv_by_pow2_0(x), x, 1l);
- longCheckDiv("ldiv_by_pow2_1", ldiv_by_pow2_1(x), x, 2l);
- longCheckDiv("ldiv_by_pow2_2", ldiv_by_pow2_2(x), x, 4l);
- longCheckDiv("ldiv_by_pow2_3", ldiv_by_pow2_3(x), x, 8l);
- longCheckDiv("ldiv_by_pow2_4", ldiv_by_pow2_4(x), x, 16l);
- longCheckDiv("ldiv_by_pow2_5", ldiv_by_pow2_5(x), x, 32l);
- longCheckDiv("ldiv_by_pow2_6", ldiv_by_pow2_6(x), x, 64l);
- longCheckDiv("ldiv_by_pow2_7", ldiv_by_pow2_7(x), x, 128l);
- longCheckDiv("ldiv_by_pow2_8", ldiv_by_pow2_8(x), x, 256l);
- longCheckDiv("ldiv_by_pow2_9", ldiv_by_pow2_9(x), x, 512l);
- longCheckDiv("ldiv_by_pow2_10", ldiv_by_pow2_10(x), x, 1024l);
- longCheckDiv("ldiv_by_pow2_11", ldiv_by_pow2_11(x), x, 2048l);
- longCheckDiv("ldiv_by_pow2_12", ldiv_by_pow2_12(x), x, 4096l);
- longCheckDiv("ldiv_by_pow2_13", ldiv_by_pow2_13(x), x, 8192l);
- longCheckDiv("ldiv_by_pow2_14", ldiv_by_pow2_14(x), x, 16384l);
- longCheckDiv("ldiv_by_pow2_15", ldiv_by_pow2_15(x), x, 32768l);
- longCheckDiv("ldiv_by_pow2_16", ldiv_by_pow2_16(x), x, 65536l);
- longCheckDiv("ldiv_by_pow2_17", ldiv_by_pow2_17(x), x, 131072l);
- longCheckDiv("ldiv_by_pow2_18", ldiv_by_pow2_18(x), x, 262144l);
- longCheckDiv("ldiv_by_pow2_19", ldiv_by_pow2_19(x), x, 524288l);
- longCheckDiv("ldiv_by_pow2_20", ldiv_by_pow2_20(x), x, 1048576l);
- longCheckDiv("ldiv_by_pow2_21", ldiv_by_pow2_21(x), x, 2097152l);
- longCheckDiv("ldiv_by_pow2_22", ldiv_by_pow2_22(x), x, 4194304l);
- longCheckDiv("ldiv_by_pow2_23", ldiv_by_pow2_23(x), x, 8388608l);
- longCheckDiv("ldiv_by_pow2_24", ldiv_by_pow2_24(x), x, 16777216l);
- longCheckDiv("ldiv_by_pow2_25", ldiv_by_pow2_25(x), x, 33554432l);
- longCheckDiv("ldiv_by_pow2_26", ldiv_by_pow2_26(x), x, 67108864l);
- longCheckDiv("ldiv_by_pow2_27", ldiv_by_pow2_27(x), x, 134217728l);
- longCheckDiv("ldiv_by_pow2_28", ldiv_by_pow2_28(x), x, 268435456l);
- longCheckDiv("ldiv_by_pow2_29", ldiv_by_pow2_29(x), x, 536870912l);
- longCheckDiv("ldiv_by_pow2_30", ldiv_by_pow2_30(x), x, 1073741824l);
- longCheckDiv("ldiv_by_pow2_31", ldiv_by_pow2_31(x), x, 2147483648l);
- longCheckDiv("ldiv_by_pow2_32", ldiv_by_pow2_32(x), x, 4294967296l);
- longCheckDiv("ldiv_by_pow2_33", ldiv_by_pow2_33(x), x, 8589934592l);
- longCheckDiv("ldiv_by_pow2_34", ldiv_by_pow2_34(x), x, 17179869184l);
- longCheckDiv("ldiv_by_pow2_35", ldiv_by_pow2_35(x), x, 34359738368l);
- longCheckDiv("ldiv_by_pow2_36", ldiv_by_pow2_36(x), x, 68719476736l);
- longCheckDiv("ldiv_by_pow2_37", ldiv_by_pow2_37(x), x, 137438953472l);
- longCheckDiv("ldiv_by_pow2_38", ldiv_by_pow2_38(x), x, 274877906944l);
- longCheckDiv("ldiv_by_pow2_39", ldiv_by_pow2_39(x), x, 549755813888l);
- longCheckDiv("ldiv_by_pow2_40", ldiv_by_pow2_40(x), x, 1099511627776l);
- longCheckDiv("ldiv_by_pow2_41", ldiv_by_pow2_41(x), x, 2199023255552l);
- longCheckDiv("ldiv_by_pow2_42", ldiv_by_pow2_42(x), x, 4398046511104l);
- longCheckDiv("ldiv_by_pow2_43", ldiv_by_pow2_43(x), x, 8796093022208l);
- longCheckDiv("ldiv_by_pow2_44", ldiv_by_pow2_44(x), x, 17592186044416l);
- longCheckDiv("ldiv_by_pow2_45", ldiv_by_pow2_45(x), x, 35184372088832l);
- longCheckDiv("ldiv_by_pow2_46", ldiv_by_pow2_46(x), x, 70368744177664l);
- longCheckDiv("ldiv_by_pow2_47", ldiv_by_pow2_47(x), x, 140737488355328l);
- longCheckDiv("ldiv_by_pow2_48", ldiv_by_pow2_48(x), x, 281474976710656l);
- longCheckDiv("ldiv_by_pow2_49", ldiv_by_pow2_49(x), x, 562949953421312l);
- longCheckDiv("ldiv_by_pow2_50", ldiv_by_pow2_50(x), x, 1125899906842624l);
- longCheckDiv("ldiv_by_pow2_51", ldiv_by_pow2_51(x), x, 2251799813685248l);
- longCheckDiv("ldiv_by_pow2_52", ldiv_by_pow2_52(x), x, 4503599627370496l);
- longCheckDiv("ldiv_by_pow2_53", ldiv_by_pow2_53(x), x, 9007199254740992l);
- longCheckDiv("ldiv_by_pow2_54", ldiv_by_pow2_54(x), x, 18014398509481984l);
- longCheckDiv("ldiv_by_pow2_55", ldiv_by_pow2_55(x), x, 36028797018963968l);
- longCheckDiv("ldiv_by_pow2_56", ldiv_by_pow2_56(x), x, 72057594037927936l);
- longCheckDiv("ldiv_by_pow2_57", ldiv_by_pow2_57(x), x, 144115188075855872l);
- longCheckDiv("ldiv_by_pow2_58", ldiv_by_pow2_58(x), x, 288230376151711744l);
- longCheckDiv("ldiv_by_pow2_59", ldiv_by_pow2_59(x), x, 576460752303423488l);
- longCheckDiv("ldiv_by_pow2_60", ldiv_by_pow2_60(x), x, 1152921504606846976l);
- longCheckDiv("ldiv_by_pow2_61", ldiv_by_pow2_61(x), x, 2305843009213693952l);
- longCheckDiv("ldiv_by_pow2_62", ldiv_by_pow2_62(x), x, 4611686018427387904l);
- longCheckDiv("ldiv_by_small_0", ldiv_by_small_0(x), x, 3l);
- longCheckDiv("ldiv_by_small_1", ldiv_by_small_1(x), x, 5l);
- longCheckDiv("ldiv_by_small_2", ldiv_by_small_2(x), x, 6l);
- longCheckDiv("ldiv_by_small_3", ldiv_by_small_3(x), x, 7l);
- longCheckDiv("ldiv_by_small_4", ldiv_by_small_4(x), x, 9l);
- longCheckDiv("ldiv_by_small_5", ldiv_by_small_5(x), x, 10l);
- longCheckDiv("ldiv_by_small_6", ldiv_by_small_6(x), x, 11l);
- longCheckDiv("ldiv_by_small_7", ldiv_by_small_7(x), x, 12l);
- longCheckDiv("ldiv_by_small_8", ldiv_by_small_8(x), x, 13l);
- longCheckDiv("ldiv_by_small_9", ldiv_by_small_9(x), x, 14l);
- longCheckDiv("ldiv_by_small_10", ldiv_by_small_10(x), x, 15l);
- longCheckRem("lrem_by_pow2_0", lrem_by_pow2_0(x), x, 1l);
- longCheckRem("lrem_by_pow2_1", lrem_by_pow2_1(x), x, 2l);
- longCheckRem("lrem_by_pow2_2", lrem_by_pow2_2(x), x, 4l);
- longCheckRem("lrem_by_pow2_3", lrem_by_pow2_3(x), x, 8l);
- longCheckRem("lrem_by_pow2_4", lrem_by_pow2_4(x), x, 16l);
- longCheckRem("lrem_by_pow2_5", lrem_by_pow2_5(x), x, 32l);
- longCheckRem("lrem_by_pow2_6", lrem_by_pow2_6(x), x, 64l);
- longCheckRem("lrem_by_pow2_7", lrem_by_pow2_7(x), x, 128l);
- longCheckRem("lrem_by_pow2_8", lrem_by_pow2_8(x), x, 256l);
- longCheckRem("lrem_by_pow2_9", lrem_by_pow2_9(x), x, 512l);
- longCheckRem("lrem_by_pow2_10", lrem_by_pow2_10(x), x, 1024l);
- longCheckRem("lrem_by_pow2_11", lrem_by_pow2_11(x), x, 2048l);
- longCheckRem("lrem_by_pow2_12", lrem_by_pow2_12(x), x, 4096l);
- longCheckRem("lrem_by_pow2_13", lrem_by_pow2_13(x), x, 8192l);
- longCheckRem("lrem_by_pow2_14", lrem_by_pow2_14(x), x, 16384l);
- longCheckRem("lrem_by_pow2_15", lrem_by_pow2_15(x), x, 32768l);
- longCheckRem("lrem_by_pow2_16", lrem_by_pow2_16(x), x, 65536l);
- longCheckRem("lrem_by_pow2_17", lrem_by_pow2_17(x), x, 131072l);
- longCheckRem("lrem_by_pow2_18", lrem_by_pow2_18(x), x, 262144l);
- longCheckRem("lrem_by_pow2_19", lrem_by_pow2_19(x), x, 524288l);
- longCheckRem("lrem_by_pow2_20", lrem_by_pow2_20(x), x, 1048576l);
- longCheckRem("lrem_by_pow2_21", lrem_by_pow2_21(x), x, 2097152l);
- longCheckRem("lrem_by_pow2_22", lrem_by_pow2_22(x), x, 4194304l);
- longCheckRem("lrem_by_pow2_23", lrem_by_pow2_23(x), x, 8388608l);
- longCheckRem("lrem_by_pow2_24", lrem_by_pow2_24(x), x, 16777216l);
- longCheckRem("lrem_by_pow2_25", lrem_by_pow2_25(x), x, 33554432l);
- longCheckRem("lrem_by_pow2_26", lrem_by_pow2_26(x), x, 67108864l);
- longCheckRem("lrem_by_pow2_27", lrem_by_pow2_27(x), x, 134217728l);
- longCheckRem("lrem_by_pow2_28", lrem_by_pow2_28(x), x, 268435456l);
- longCheckRem("lrem_by_pow2_29", lrem_by_pow2_29(x), x, 536870912l);
- longCheckRem("lrem_by_pow2_30", lrem_by_pow2_30(x), x, 1073741824l);
- longCheckRem("lrem_by_pow2_31", lrem_by_pow2_31(x), x, 2147483648l);
- longCheckRem("lrem_by_pow2_32", lrem_by_pow2_32(x), x, 4294967296l);
- longCheckRem("lrem_by_pow2_33", lrem_by_pow2_33(x), x, 8589934592l);
- longCheckRem("lrem_by_pow2_34", lrem_by_pow2_34(x), x, 17179869184l);
- longCheckRem("lrem_by_pow2_35", lrem_by_pow2_35(x), x, 34359738368l);
- longCheckRem("lrem_by_pow2_36", lrem_by_pow2_36(x), x, 68719476736l);
- longCheckRem("lrem_by_pow2_37", lrem_by_pow2_37(x), x, 137438953472l);
- longCheckRem("lrem_by_pow2_38", lrem_by_pow2_38(x), x, 274877906944l);
- longCheckRem("lrem_by_pow2_39", lrem_by_pow2_39(x), x, 549755813888l);
- longCheckRem("lrem_by_pow2_40", lrem_by_pow2_40(x), x, 1099511627776l);
- longCheckRem("lrem_by_pow2_41", lrem_by_pow2_41(x), x, 2199023255552l);
- longCheckRem("lrem_by_pow2_42", lrem_by_pow2_42(x), x, 4398046511104l);
- longCheckRem("lrem_by_pow2_43", lrem_by_pow2_43(x), x, 8796093022208l);
- longCheckRem("lrem_by_pow2_44", lrem_by_pow2_44(x), x, 17592186044416l);
- longCheckRem("lrem_by_pow2_45", lrem_by_pow2_45(x), x, 35184372088832l);
- longCheckRem("lrem_by_pow2_46", lrem_by_pow2_46(x), x, 70368744177664l);
- longCheckRem("lrem_by_pow2_47", lrem_by_pow2_47(x), x, 140737488355328l);
- longCheckRem("lrem_by_pow2_48", lrem_by_pow2_48(x), x, 281474976710656l);
- longCheckRem("lrem_by_pow2_49", lrem_by_pow2_49(x), x, 562949953421312l);
- longCheckRem("lrem_by_pow2_50", lrem_by_pow2_50(x), x, 1125899906842624l);
- longCheckRem("lrem_by_pow2_51", lrem_by_pow2_51(x), x, 2251799813685248l);
- longCheckRem("lrem_by_pow2_52", lrem_by_pow2_52(x), x, 4503599627370496l);
- longCheckRem("lrem_by_pow2_53", lrem_by_pow2_53(x), x, 9007199254740992l);
- longCheckRem("lrem_by_pow2_54", lrem_by_pow2_54(x), x, 18014398509481984l);
- longCheckRem("lrem_by_pow2_55", lrem_by_pow2_55(x), x, 36028797018963968l);
- longCheckRem("lrem_by_pow2_56", lrem_by_pow2_56(x), x, 72057594037927936l);
- longCheckRem("lrem_by_pow2_57", lrem_by_pow2_57(x), x, 144115188075855872l);
- longCheckRem("lrem_by_pow2_58", lrem_by_pow2_58(x), x, 288230376151711744l);
- longCheckRem("lrem_by_pow2_59", lrem_by_pow2_59(x), x, 576460752303423488l);
- longCheckRem("lrem_by_pow2_60", lrem_by_pow2_60(x), x, 1152921504606846976l);
- longCheckRem("lrem_by_pow2_61", lrem_by_pow2_61(x), x, 2305843009213693952l);
- longCheckRem("lrem_by_pow2_62", lrem_by_pow2_62(x), x, 4611686018427387904l);
- }
-
- public static void main(String[] args) {
- int i;
- long l;
-
- System.out.println("Begin");
-
- System.out.println("Int: checking some equally spaced dividends...");
- for (i = -1000; i < 1000; i += 300) {
- intCheckAll(i);
- intCheckAll(-i);
- }
-
- System.out.println("Int: checking small dividends...");
- for (i = 1; i < 100; i += 1) {
- intCheckAll(i);
- intCheckAll(-i);
- }
-
- System.out.println("Int: checking big dividends...");
- for (i = 0; i < 100; i += 1) {
- intCheckAll(Integer.MAX_VALUE - i);
- intCheckAll(Integer.MIN_VALUE + i);
- }
-
- System.out.println("Long: checking some equally spaced dividends...");
- for (l = 0l; l < 1000000000000l; l += 300000000000l) {
- longCheckAll(l);
- longCheckAll(-l);
- }
-
- System.out.println("Long: checking small dividends...");
- for (l = 1l; l < 100l; l += 1l) {
- longCheckAll(l);
- longCheckAll(-l);
- }
-
- System.out.println("Long: checking big dividends...");
- for (l = 0l; l < 100l; l += 1l) {
- longCheckAll(Long.MAX_VALUE - l);
- longCheckAll(Long.MIN_VALUE + l);
- }
-
- System.out.println("End");
- }
-}
diff --git a/test/702-LargeBranchOffset/build b/test/702-LargeBranchOffset/build
index eacf730..20030fa 100644
--- a/test/702-LargeBranchOffset/build
+++ b/test/702-LargeBranchOffset/build
@@ -17,11 +17,7 @@
# Stop if something fails.
set -e
-# Write out a bunch of source files.
+# Write out the source file.
cpp -P src/Main.java.in src/Main.java
-mkdir classes
-${JAVAC} -d classes src/*.java
-
-${DX} --debug --dex --output=classes.dex classes
-zip $TEST_NAME.jar classes.dex
+./default-build
diff --git a/test/Android.libarttest.mk b/test/Android.libarttest.mk
index 5e768ee..57d06c4 100644
--- a/test/Android.libarttest.mk
+++ b/test/Android.libarttest.mk
@@ -28,6 +28,8 @@
116-nodex2oat/nodex2oat.cc \
117-nopatchoat/nopatchoat.cc \
118-noimage-dex2oat/noimage-dex2oat.cc \
+ 137-cfi/cfi.cc \
+ 139-register-natives/regnative.cc \
454-get-vreg/get_vreg_jni.cc \
455-set-vreg/set_vreg_jni.cc \
457-regs/regs_jni.cc \
@@ -56,7 +58,7 @@
LOCAL_MODULE_TAGS := tests
endif
LOCAL_SRC_FILES := $(LIBARTTEST_COMMON_SRC_FILES)
- LOCAL_SHARED_LIBRARIES += libartd
+ LOCAL_SHARED_LIBRARIES += libartd libbacktrace
LOCAL_C_INCLUDES += $(ART_C_INCLUDES) art/runtime
LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common_build.mk
LOCAL_ADDITIONAL_DEPENDENCIES += $(LOCAL_PATH)/Android.libarttest.mk
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 731c040..a3a639b 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -95,7 +95,7 @@
RELOCATE_TYPES += no-relocate
endif
ifeq ($(ART_TEST_RUN_TEST_RELOCATE_NO_PATCHOAT),true)
- RELOCATE_TYPES := relocate-npatchoat
+ RELOCATE_TYPES += relocate-npatchoat
endif
TRACE_TYPES := ntrace
ifeq ($(ART_TEST_TRACE),true)
@@ -250,6 +250,12 @@
$(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES),$(IMAGE_TYPES), \
$(PICTEST_TYPES),$(DEBUGGABLE_TYPES),130-hprof,$(ALL_ADDRESS_SIZES))
+# 131 is an old test. The functionality has been implemented at an earlier stage and is checked
+# in tests 138.
+ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
+ $(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES),$(IMAGE_TYPES), \
+ $(PICTEST_TYPES),$(DEBUGGABLE_TYPES),131-structural-change,$(ALL_ADDRESS_SIZES))
+
# All these tests check that we have sane behavior if we don't have a patchoat or dex2oat.
# Therefore we shouldn't run them in situations where we actually don't have these since they
# explicitly test for them. These all also assume we have an image.
@@ -257,7 +263,14 @@
116-nodex2oat \
117-nopatchoat \
118-noimage-dex2oat \
- 119-noimage-patchoat
+ 119-noimage-patchoat \
+ 137-cfi \
+ 138-duplicate-classes-check2
+
+# This test fails without an image.
+TEST_ART_BROKEN_NO_IMAGE_RUN_TESTS := \
+ 137-cfi \
+ 138-duplicate-classes-check
ifneq (,$(filter no-dex2oat,$(PREBUILD_TYPES)))
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),no-dex2oat, \
@@ -270,6 +283,9 @@
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
$(COMPILER_TYPES), $(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES),no-image, \
$(PICTEST_TYPES), $(DEBUGGABLE_TYPES), $(TEST_ART_BROKEN_FALLBACK_RUN_TESTS),$(ALL_ADDRESS_SIZES))
+ ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
+ $(COMPILER_TYPES), $(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES),no-image, \
+ $(PICTEST_TYPES), $(DEBUGGABLE_TYPES), $(TEST_ART_BROKEN_NO_IMAGE_RUN_TESTS),$(ALL_ADDRESS_SIZES))
endif
ifneq (,$(filter relocate-npatchoat,$(RELOCATE_TYPES)))
@@ -280,9 +296,13 @@
TEST_ART_BROKEN_FALLBACK_RUN_TESTS :=
+# 137:
+# This test unrolls and expects managed frames, but tracing means we run the interpreter.
+# 802:
# This test dynamically enables tracing to force a deoptimization. This makes the test meaningless
# when already tracing, and writes an error message that we do not want to check for.
TEST_ART_BROKEN_TRACING_RUN_TESTS := \
+ 137-cfi \
802-deoptimization
ifneq (,$(filter trace,$(TRACE_TYPES)))
@@ -309,6 +329,8 @@
118-noimage-dex2oat \
119-noimage-patchoat \
131-structural-change \
+ 137-cfi \
+ 139-register-natives \
454-get-vreg \
455-set-vreg \
457-regs \
@@ -323,6 +345,33 @@
TEST_ART_BROKEN_NDEBUG_TESTS :=
+# Known broken tests for the interpreter.
+# CFI unwinding expects managed frames.
+TEST_ART_BROKEN_INTERPRETER_RUN_TESTS := \
+ 137-cfi
+
+ifneq (,$(filter interpreter,$(COMPILER_TYPES)))
+ ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
+ interpreter,$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
+ $(IMAGE_TYPES),$(PICTEST_TYPES),$(DEBUGGABLE_TYPES),$(TEST_ART_BROKEN_INTERPRETER_RUN_TESTS),$(ALL_ADDRESS_SIZES))
+endif
+
+TEST_ART_BROKEN_INTERPRETER_RUN_TESTS :=
+
+# Known broken tests for the JIT.
+# CFI unwinding expects managed frames, and the test does not iterate enough to even compile. JIT
+# also uses Generic JNI instead of the JNI compiler.
+TEST_ART_BROKEN_JIT_RUN_TESTS := \
+ 137-cfi
+
+ifneq (,$(filter jit,$(COMPILER_TYPES)))
+ ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
+ jit,$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
+ $(IMAGE_TYPES),$(PICTEST_TYPES),$(DEBUGGABLE_TYPES),$(TEST_ART_BROKEN_JIT_RUN_TESTS),$(ALL_ADDRESS_SIZES))
+endif
+
+TEST_ART_BROKEN_JIT_RUN_TESTS :=
+
# Known broken tests for the default compiler (Quick).
TEST_ART_BROKEN_DEFAULT_RUN_TESTS := \
457-regs
@@ -346,6 +395,16 @@
endif
endif
+ifdef TARGET_2ND_ARCH
+ ifeq ($(TARGET_2ND_ARCH),arm)
+ ifneq (,$(filter 32,$(ALL_ADDRESS_SIZES)))
+ ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,target,$(RUN_TYPES),$(PREBUILD_TYPES), \
+ $(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
+ $(IMAGE_TYPES),$(PICTEST_TYPES),$(DEBUGGABLE_TYPES),$(TEST_ART_BROKEN_ARM_RUN_TESTS),32)
+ endif
+ endif
+endif
+
TEST_ART_BROKEN_ARM_RUN_TESTS :=
# Known broken tests for the arm64 optimizing compiler backend.
@@ -361,8 +420,6 @@
# Known broken tests for the optimizing compiler.
TEST_ART_BROKEN_OPTIMIZING_RUN_TESTS :=
-TEST_ART_BROKEN_OPTIMIZING_RUN_TESTS += 099-vmdebug # b/18098594
-TEST_ART_BROKEN_OPTIMIZING_RUN_TESTS += 802-deoptimization # b/18547544
ifneq (,$(filter optimizing,$(COMPILER_TYPES)))
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
@@ -405,6 +462,23 @@
TEST_ART_BROKEN_OPTIMIZING_DEBUGGABLE_RUN_TESTS :=
+# Tests that should fail in the read barrier configuration.
+# 137: Read barrier forces interpreter. Cannot run this with the interpreter.
+TEST_ART_BROKEN_READ_BARRIER_RUN_TESTS := \
+ 137-cfi
+
+ifeq ($(ART_USE_READ_BARRIER),true)
+ ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
+ $(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
+ $(IMAGE_TYPES),$(PICTEST_TYPES),$(DEBUGGABLE_TYPES),$(TEST_ART_BROKEN_READ_BARRIER_RUN_TESTS),$(ALL_ADDRESS_SIZES))
+endif
+
+TEST_ART_BROKEN_READ_BARRIER_RUN_TESTS :=
+
+# Test 137-cfi works in 32-bit only until we enable 64-bit ELF files.
+ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
+ $(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
+ $(IMAGE_TYPES),$(PICTEST_TYPES),$(DEBUGGABLE_TYPES),137-cfi,64)
# Clear variables ahead of appending to them when defining tests.
$(foreach target, $(TARGET_TYPES), $(eval ART_RUN_TEST_$(call name-to-var,$(target))_RULES :=))
diff --git a/test/Instrumentation/Instrumentation.java b/test/Instrumentation/Instrumentation.java
new file mode 100644
index 0000000..09d4342
--- /dev/null
+++ b/test/Instrumentation/Instrumentation.java
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Instrumentation {
+ // Direct method
+ private void instanceMethod() {
+ System.out.println("instanceMethod");
+ }
+}
diff --git a/test/MultiDexModifiedSecondary/Main.java b/test/MultiDexModifiedSecondary/Main.java
new file mode 100644
index 0000000..659dba9
--- /dev/null
+++ b/test/MultiDexModifiedSecondary/Main.java
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class Main {
+ public static void main(String args[]) {
+ Second second = new Second();
+ System.out.println(second.getSecond());
+ }
+}
diff --git a/test/MultiDexModifiedSecondary/README.txt b/test/MultiDexModifiedSecondary/README.txt
new file mode 100644
index 0000000..4cf3a56
--- /dev/null
+++ b/test/MultiDexModifiedSecondary/README.txt
@@ -0,0 +1,4 @@
+MultiDexModifiedSecondary is designed to result in a multidex file that has
+the same classes.dex file as MultiDex, but a different classes2.dex.
+
+This is used in the OatFileAssistantTest.MultiDexSecondaryOutOfDate gtest.
diff --git a/test/MultiDexModifiedSecondary/Second.java b/test/MultiDexModifiedSecondary/Second.java
new file mode 100644
index 0000000..3555a7f
--- /dev/null
+++ b/test/MultiDexModifiedSecondary/Second.java
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class Second {
+ public String getThird() {
+ return "I Third That.";
+ }
+
+ public String getSecond() {
+ return "I Second That.";
+ }
+}
diff --git a/test/MultiDexModifiedSecondary/main.jpp b/test/MultiDexModifiedSecondary/main.jpp
new file mode 100644
index 0000000..a5d7a6c
--- /dev/null
+++ b/test/MultiDexModifiedSecondary/main.jpp
@@ -0,0 +1,3 @@
+main:
+ @@com.android.jack.annotations.ForceInMainDex
+ class Main
diff --git a/test/MultiDexModifiedSecondary/main.list b/test/MultiDexModifiedSecondary/main.list
new file mode 100644
index 0000000..44ba78e
--- /dev/null
+++ b/test/MultiDexModifiedSecondary/main.list
@@ -0,0 +1 @@
+Main.class
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index 414e4df..240ed41 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -225,7 +225,12 @@
fi
if [ "$USE_JVM" = "y" ]; then
- ${JAVA} ${DEBUGGER_OPTS} ${JVM_VERIFY_ARG} -classpath classes $MAIN "$@"
+ # Xmx is necessary since we don't pass down the ART flags to JVM.
+ cmdline="${JAVA} ${DEBUGGER_OPTS} ${JVM_VERIFY_ARG} -Xmx256m -classpath classes ${FLAGS} $MAIN $@"
+ if [ "$DEV_MODE" = "y" ]; then
+ echo $cmdline
+ fi
+ $cmdline
exit
fi
@@ -363,6 +368,7 @@
export ANDROID_ROOT=$ANDROID_ROOT && \
$mkdir_cmdline && \
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH && \
+ export PATH=$ANDROID_ROOT/bin:$PATH && \
$dex2oat_cmdline && \
$dalvikvm_cmdline"
diff --git a/test/run-test b/test/run-test
index 2873a35..ed33099 100755
--- a/test/run-test
+++ b/test/run-test
@@ -39,7 +39,7 @@
else
tmp_dir="${TMPDIR}/$USER/${test_dir}"
fi
-checker="${progdir}/../tools/checker.py"
+checker="${progdir}/../tools/checker/checker.py"
export JAVA="java"
export JAVAC="javac -g"
@@ -75,7 +75,7 @@
check_cmd="check"
output="output.txt"
build_output="build-output.txt"
-cfg_output="cfg-output.txt"
+cfg_output="graph.cfg"
lib="libartd.so"
run_args="--quiet"
build_args=""
@@ -368,6 +368,9 @@
else
run_args="${run_args} --no-relocate"
fi
+elif [ "$runtime" = "jvm" ]; then
+ # TODO: Detect whether the host is 32-bit or 64-bit.
+ run_args="${run_args} --runtime-option -Djava.library.path=${ANDROID_HOST_OUT}/lib64"
fi
if [ "$have_image" = "no" ]; then
@@ -501,14 +504,20 @@
if [ '!' -r "$build" ]; then
cp "${progdir}/etc/default-build" build
+else
+ cp "${progdir}/etc/default-build" .
fi
if [ '!' -r "$run" ]; then
cp "${progdir}/etc/default-run" run
+else
+ cp "${progdir}/etc/default-run" .
fi
if [ '!' -r "$check_cmd" ]; then
cp "${progdir}/etc/default-check" check
+else
+ cp "${progdir}/etc/default-check" .
fi
chmod 755 "$build"
diff --git a/tools/art b/tools/art
index 6c89a60..f167a73 100644
--- a/tools/art
+++ b/tools/art
@@ -92,8 +92,10 @@
ANDROID_DATA=$ANDROID_DATA \
ANDROID_ROOT=$ANDROID_ROOT \
LD_LIBRARY_PATH=$LD_LIBRARY_PATH \
+ PATH=$ANDROID_ROOT/bin:$PATH \
$invoke_with $ANDROID_ROOT/bin/$DALVIKVM $lib \
-XXlib:$LIBART \
+ -Xnorelocate \
-Ximage:$ANDROID_ROOT/framework/core.art \
-Xcompiler-option --include-debug-symbols \
"$@"
diff --git a/tools/checker.py b/tools/checker.py
deleted file mode 100755
index 0bce236..0000000
--- a/tools/checker.py
+++ /dev/null
@@ -1,777 +0,0 @@
-#!/usr/bin/env python2
-#
-# Copyright (C) 2014 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# Checker is a testing tool which compiles a given test file and compares the
-# state of the control-flow graph before and after each optimization pass
-# against a set of assertions specified alongside the tests.
-#
-# Tests are written in Java, turned into DEX and compiled with the Optimizing
-# compiler. "Check lines" are assertions formatted as comments of the Java file.
-# They begin with prefix 'CHECK' followed by a pattern that the engine attempts
-# to match in the compiler-generated output.
-#
-# Assertions are tested in groups which correspond to the individual compiler
-# passes. Each group of check lines therefore must start with a 'CHECK-START'
-# header which specifies the output group it should be tested against. The group
-# name must exactly match one of the groups recognized in the output (they can
-# be listed with the '--list-groups' command-line flag).
-#
-# Matching of check lines is carried out in the order of appearance in the
-# source file. There are three types of check lines:
-# - CHECK: Must match an output line which appears in the output group
-# later than lines matched against any preceeding checks. Output
-# lines must therefore match the check lines in the same order.
-# These are referred to as "in-order" checks in the code.
-# - CHECK-DAG: Must match an output line which appears in the output group
-# later than lines matched against any preceeding in-order checks.
-# In other words, the order of output lines does not matter
-# between consecutive DAG checks.
-# - CHECK-NOT: Must not match any output line which appears in the output group
-# later than lines matched against any preceeding checks and
-# earlier than lines matched against any subsequent checks.
-# Surrounding non-negative checks (or boundaries of the group)
-# therefore create a scope within which the assertion is verified.
-#
-# Check-line patterns are treated as plain text rather than regular expressions
-# but are whitespace agnostic.
-#
-# Actual regex patterns can be inserted enclosed in '{{' and '}}' brackets. If
-# curly brackets need to be used inside the body of the regex, they need to be
-# enclosed in round brackets. For example, the pattern '{{foo{2}}}' will parse
-# the invalid regex 'foo{2', but '{{(fo{2})}}' will match 'foo'.
-#
-# Regex patterns can be named and referenced later. A new variable is defined
-# with '[[name:regex]]' and can be referenced with '[[name]]'. Variables are
-# only valid within the scope of the defining group. Within a group they cannot
-# be redefined or used undefined.
-#
-# Example:
-# The following assertions can be placed in a Java source file:
-#
-# // CHECK-START: int MyClass.MyMethod() constant_folding (after)
-# // CHECK: [[ID:i[0-9]+]] IntConstant {{11|22}}
-# // CHECK: Return [ [[ID]] ]
-#
-# The engine will attempt to match the check lines against the output of the
-# group named on the first line. Together they verify that the CFG after
-# constant folding returns an integer constant with value either 11 or 22.
-#
-
-from __future__ import print_function
-import argparse
-import os
-import re
-import shutil
-import sys
-import tempfile
-
-class Logger(object):
-
- class Level(object):
- NoOutput, Error, Info = range(3)
-
- class Color(object):
- Default, Blue, Gray, Purple, Red = range(5)
-
- @staticmethod
- def terminalCode(color, out=sys.stdout):
- if not out.isatty():
- return ''
- elif color == Logger.Color.Blue:
- return '\033[94m'
- elif color == Logger.Color.Gray:
- return '\033[37m'
- elif color == Logger.Color.Purple:
- return '\033[95m'
- elif color == Logger.Color.Red:
- return '\033[91m'
- else:
- return '\033[0m'
-
- Verbosity = Level.Info
-
- @staticmethod
- def log(text, level=Level.Info, color=Color.Default, newLine=True, out=sys.stdout):
- if level <= Logger.Verbosity:
- text = Logger.Color.terminalCode(color, out) + text + \
- Logger.Color.terminalCode(Logger.Color.Default, out)
- if newLine:
- print(text, file=out)
- else:
- print(text, end="", file=out)
- out.flush()
-
- @staticmethod
- def fail(msg, file=None, line=-1):
- location = ""
- if file:
- location += file + ":"
- if line > 0:
- location += str(line) + ":"
- if location:
- location += " "
-
- Logger.log(location, Logger.Level.Error, color=Logger.Color.Gray, newLine=False, out=sys.stderr)
- Logger.log("error: ", Logger.Level.Error, color=Logger.Color.Red, newLine=False, out=sys.stderr)
- Logger.log(msg, Logger.Level.Error, out=sys.stderr)
- sys.exit(msg)
-
- @staticmethod
- def startTest(name):
- Logger.log("TEST ", color=Logger.Color.Purple, newLine=False)
- Logger.log(name + "... ", newLine=False)
-
- @staticmethod
- def testPassed():
- Logger.log("PASS", color=Logger.Color.Blue)
-
- @staticmethod
- def testFailed(msg, file=None, line=-1):
- Logger.log("FAIL", color=Logger.Color.Red)
- Logger.fail(msg, file, line)
-
-class CommonEqualityMixin:
- """Mixin for class equality as equality of the fields."""
- def __eq__(self, other):
- return (isinstance(other, self.__class__)
- and self.__dict__ == other.__dict__)
-
- def __ne__(self, other):
- return not self.__eq__(other)
-
- def __repr__(self):
- return "<%s: %s>" % (type(self).__name__, str(self.__dict__))
-
-
-class CheckElement(CommonEqualityMixin):
- """Single element of the check line."""
-
- class Variant(object):
- """Supported language constructs."""
- Text, Pattern, VarRef, VarDef, Separator = range(5)
-
- rStartOptional = r"("
- rEndOptional = r")?"
-
- rName = r"([a-zA-Z][a-zA-Z0-9]*)"
- rRegex = r"(.+?)"
- rPatternStartSym = r"(\{\{)"
- rPatternEndSym = r"(\}\})"
- rVariableStartSym = r"(\[\[)"
- rVariableEndSym = r"(\]\])"
- rVariableSeparator = r"(:)"
-
- regexPattern = rPatternStartSym + rRegex + rPatternEndSym
- regexVariable = rVariableStartSym + \
- rName + \
- (rStartOptional + rVariableSeparator + rRegex + rEndOptional) + \
- rVariableEndSym
-
- def __init__(self, variant, name, pattern):
- self.variant = variant
- self.name = name
- self.pattern = pattern
-
- @staticmethod
- def newSeparator():
- return CheckElement(CheckElement.Variant.Separator, None, None)
-
- @staticmethod
- def parseText(text):
- return CheckElement(CheckElement.Variant.Text, None, re.escape(text))
-
- @staticmethod
- def parsePattern(patternElem):
- return CheckElement(CheckElement.Variant.Pattern, None, patternElem[2:-2])
-
- @staticmethod
- def parseVariable(varElem):
- colonPos = varElem.find(":")
- if colonPos == -1:
- # Variable reference
- name = varElem[2:-2]
- return CheckElement(CheckElement.Variant.VarRef, name, None)
- else:
- # Variable definition
- name = varElem[2:colonPos]
- body = varElem[colonPos+1:-2]
- return CheckElement(CheckElement.Variant.VarDef, name, body)
-
-class CheckLine(CommonEqualityMixin):
- """Representation of a single assertion in the check file formed of one or
- more regex elements. Matching against an output line is successful only
- if all regex elements can be matched in the given order."""
-
- class Variant(object):
- """Supported types of assertions."""
- InOrder, DAG, Not = range(3)
-
- def __init__(self, content, variant=Variant.InOrder, fileName=None, lineNo=-1):
- self.fileName = fileName
- self.lineNo = lineNo
- self.content = content.strip()
-
- self.variant = variant
- self.lineParts = self.__parse(self.content)
- if not self.lineParts:
- Logger.fail("Empty check line", self.fileName, self.lineNo)
-
- if self.variant == CheckLine.Variant.Not:
- for elem in self.lineParts:
- if elem.variant == CheckElement.Variant.VarDef:
- Logger.fail("CHECK-NOT lines cannot define variables", self.fileName, self.lineNo)
-
- def __eq__(self, other):
- return (isinstance(other, self.__class__) and
- self.variant == other.variant and
- self.lineParts == other.lineParts)
-
- # Returns True if the given Match object was at the beginning of the line.
- def __isMatchAtStart(self, match):
- return (match is not None) and (match.start() == 0)
-
- # Takes in a list of Match objects and returns the minimal start point among
- # them. If there aren't any successful matches it returns the length of
- # the searched string.
- def __firstMatch(self, matches, string):
- starts = map(lambda m: len(string) if m is None else m.start(), matches)
- return min(starts)
-
- # This method parses the content of a check line stripped of the initial
- # comment symbol and the CHECK keyword.
- def __parse(self, line):
- lineParts = []
- # Loop as long as there is something to parse.
- while line:
- # Search for the nearest occurrence of the special markers.
- matchWhitespace = re.search(r"\s+", line)
- matchPattern = re.search(CheckElement.regexPattern, line)
- matchVariable = re.search(CheckElement.regexVariable, line)
-
- # If one of the above was identified at the current position, extract them
- # from the line, parse them and add to the list of line parts.
- if self.__isMatchAtStart(matchWhitespace):
- # A whitespace in the check line creates a new separator of line parts.
- # This allows for ignored output between the previous and next parts.
- line = line[matchWhitespace.end():]
- lineParts.append(CheckElement.newSeparator())
- elif self.__isMatchAtStart(matchPattern):
- pattern = line[0:matchPattern.end()]
- line = line[matchPattern.end():]
- lineParts.append(CheckElement.parsePattern(pattern))
- elif self.__isMatchAtStart(matchVariable):
- var = line[0:matchVariable.end()]
- line = line[matchVariable.end():]
- lineParts.append(CheckElement.parseVariable(var))
- else:
- # If we're not currently looking at a special marker, this is a plain
- # text match all the way until the first special marker (or the end
- # of the line).
- firstMatch = self.__firstMatch([ matchWhitespace, matchPattern, matchVariable ], line)
- text = line[0:firstMatch]
- line = line[firstMatch:]
- lineParts.append(CheckElement.parseText(text))
- return lineParts
-
- # Returns the regex pattern to be matched in the output line. Variable
- # references are substituted with their current values provided in the
- # 'varState' argument.
- # An exception is raised if a referenced variable is undefined.
- def __generatePattern(self, linePart, varState):
- if linePart.variant == CheckElement.Variant.VarRef:
- try:
- return re.escape(varState[linePart.name])
- except KeyError:
- Logger.testFailed("Use of undefined variable \"" + linePart.name + "\"",
- self.fileName, self.lineNo)
- else:
- return linePart.pattern
-
- def __isSeparated(self, outputLine, matchStart):
- return (matchStart == 0) or (outputLine[matchStart - 1:matchStart].isspace())
-
- # Attempts to match the check line against a line from the output file with
- # the given initial variable values. It returns the new variable state if
- # successful and None otherwise.
- def match(self, outputLine, initialVarState):
- # Do the full matching on a shadow copy of the variable state. If the
- # matching fails half-way, we will not need to revert the state.
- varState = dict(initialVarState)
-
- matchStart = 0
- isAfterSeparator = True
-
- # Now try to parse all of the parts of the check line in the right order.
- # Variable values are updated on-the-fly, meaning that a variable can
- # be referenced immediately after its definition.
- for part in self.lineParts:
- if part.variant == CheckElement.Variant.Separator:
- isAfterSeparator = True
- continue
-
- # Find the earliest match for this line part.
- pattern = self.__generatePattern(part, varState)
- while True:
- match = re.search(pattern, outputLine[matchStart:])
- if (match is None) or (not isAfterSeparator and not self.__isMatchAtStart(match)):
- return None
- matchEnd = matchStart + match.end()
- matchStart += match.start()
-
- # Check if this is a valid match if we expect a whitespace separator
- # before the matched text. Otherwise loop and look for another match.
- if not isAfterSeparator or self.__isSeparated(outputLine, matchStart):
- break
- else:
- matchStart += 1
-
- if part.variant == CheckElement.Variant.VarDef:
- if part.name in varState:
- Logger.testFailed("Multiple definitions of variable \"" + part.name + "\"",
- self.fileName, self.lineNo)
- varState[part.name] = outputLine[matchStart:matchEnd]
-
- matchStart = matchEnd
- isAfterSeparator = False
-
- # All parts were successfully matched. Return the new variable state.
- return varState
-
-
-class CheckGroup(CommonEqualityMixin):
- """Represents a named collection of check lines which are to be matched
- against an output group of the same name."""
-
- def __init__(self, name, lines, fileName=None, lineNo=-1):
- self.fileName = fileName
- self.lineNo = lineNo
-
- if not name:
- Logger.fail("Check group does not have a name", self.fileName, self.lineNo)
- if not lines:
- Logger.fail("Check group does not have a body", self.fileName, self.lineNo)
-
- self.name = name
- self.lines = lines
-
- def __eq__(self, other):
- return (isinstance(other, self.__class__) and
- self.name == other.name and
- self.lines == other.lines)
-
- def __headAndTail(self, list):
- return list[0], list[1:]
-
- # Splits a list of check lines at index 'i' such that lines[i] is the first
- # element whose variant is not equal to the given parameter.
- def __splitByVariant(self, lines, variant):
- i = 0
- while i < len(lines) and lines[i].variant == variant:
- i += 1
- return lines[:i], lines[i:]
-
- # Extracts the first sequence of check lines which are independent of each
- # other's match location, i.e. either consecutive DAG lines or a single
- # InOrder line. Any Not lines preceeding this sequence are also extracted.
- def __nextIndependentChecks(self, checkLines):
- notChecks, checkLines = self.__splitByVariant(checkLines, CheckLine.Variant.Not)
- if not checkLines:
- return notChecks, [], []
-
- head, tail = self.__headAndTail(checkLines)
- if head.variant == CheckLine.Variant.InOrder:
- return notChecks, [head], tail
- else:
- assert head.variant == CheckLine.Variant.DAG
- independentChecks, checkLines = self.__splitByVariant(checkLines, CheckLine.Variant.DAG)
- return notChecks, independentChecks, checkLines
-
- # If successful, returns the line number of the first output line matching the
- # check line and the updated variable state. Otherwise returns -1 and None,
- # respectively. The 'lineFilter' parameter can be used to supply a list of
- # line numbers (counting from 1) which should be skipped.
- def __findFirstMatch(self, checkLine, outputLines, startLineNo, lineFilter, varState):
- matchLineNo = startLineNo
- for outputLine in outputLines:
- if matchLineNo not in lineFilter:
- newVarState = checkLine.match(outputLine, varState)
- if newVarState is not None:
- return matchLineNo, newVarState
- matchLineNo += 1
- return -1, None
-
- # Matches the given positive check lines against the output in order of
- # appearance. Variable state is propagated but the scope of the search remains
- # the same for all checks. Each output line can only be matched once.
- # If all check lines are matched, the resulting variable state is returned
- # together with the remaining output. The function also returns output lines
- # which appear before either of the matched lines so they can be tested
- # against Not checks.
- def __matchIndependentChecks(self, checkLines, outputLines, startLineNo, varState):
- # If no checks are provided, skip over the entire output.
- if not checkLines:
- return outputLines, [], startLineNo + len(outputLines), varState
-
- # Keep track of which lines have been matched.
- matchedLines = []
-
- # Find first unused output line which matches each check line.
- for checkLine in checkLines:
- matchLineNo, varState = \
- self.__findFirstMatch(checkLine, outputLines, startLineNo, matchedLines, varState)
- if varState is None:
- Logger.testFailed("Could not match check line \"" + checkLine.content + "\" " +
- "starting from output line " + str(startLineNo),
- self.fileName, checkLine.lineNo)
- matchedLines.append(matchLineNo)
-
- # Return new variable state and the output lines which lie outside the
- # match locations of this independent group.
- minMatchLineNo = min(matchedLines)
- maxMatchLineNo = max(matchedLines)
- preceedingLines = outputLines[:minMatchLineNo - startLineNo]
- remainingLines = outputLines[maxMatchLineNo - startLineNo + 1:]
- return preceedingLines, remainingLines, maxMatchLineNo + 1, varState
-
- # Makes sure that the given check lines do not match any of the given output
- # lines. Variable state does not change.
- def __matchNotLines(self, checkLines, outputLines, startLineNo, varState):
- for checkLine in checkLines:
- assert checkLine.variant == CheckLine.Variant.Not
- matchLineNo, matchVarState = \
- self.__findFirstMatch(checkLine, outputLines, startLineNo, [], varState)
- if matchVarState is not None:
- Logger.testFailed("CHECK-NOT line \"" + checkLine.content + "\" matches output line " + \
- str(matchLineNo), self.fileName, checkLine.lineNo)
-
- # Matches the check lines in this group against an output group. It is
- # responsible for running the checks in the right order and scope, and
- # for propagating the variable state between the check lines.
- def match(self, outputGroup):
- varState = {}
- checkLines = self.lines
- outputLines = outputGroup.body
- startLineNo = outputGroup.lineNo
-
- while checkLines:
- # Extract the next sequence of location-independent checks to be matched.
- notChecks, independentChecks, checkLines = self.__nextIndependentChecks(checkLines)
-
- # Match the independent checks.
- notOutput, outputLines, newStartLineNo, newVarState = \
- self.__matchIndependentChecks(independentChecks, outputLines, startLineNo, varState)
-
- # Run the Not checks against the output lines which lie between the last
- # two independent groups or the bounds of the output.
- self.__matchNotLines(notChecks, notOutput, startLineNo, varState)
-
- # Update variable state.
- startLineNo = newStartLineNo
- varState = newVarState
-
-class OutputGroup(CommonEqualityMixin):
- """Represents a named part of the test output against which a check group of
- the same name is to be matched."""
-
- def __init__(self, name, body, fileName=None, lineNo=-1):
- if not name:
- Logger.fail("Output group does not have a name", fileName, lineNo)
- if not body:
- Logger.fail("Output group does not have a body", fileName, lineNo)
-
- self.name = name
- self.body = body
- self.lineNo = lineNo
-
- def __eq__(self, other):
- return (isinstance(other, self.__class__) and
- self.name == other.name and
- self.body == other.body)
-
-
-class FileSplitMixin(object):
- """Mixin for representing text files which need to be split into smaller
- chunks before being parsed."""
-
- def _parseStream(self, stream):
- lineNo = 0
- allGroups = []
- currentGroup = None
-
- for line in stream:
- lineNo += 1
- line = line.strip()
- if not line:
- continue
-
- # Let the child class process the line and return information about it.
- # The _processLine method can modify the content of the line (or delete it
- # entirely) and specify whether it starts a new group.
- processedLine, newGroupName = self._processLine(line, lineNo)
- if newGroupName is not None:
- currentGroup = (newGroupName, [], lineNo)
- allGroups.append(currentGroup)
- if processedLine is not None:
- if currentGroup is not None:
- currentGroup[1].append(processedLine)
- else:
- self._exceptionLineOutsideGroup(line, lineNo)
-
- # Finally, take the generated line groups and let the child class process
- # each one before storing the final outcome.
- return list(map(lambda group: self._processGroup(group[0], group[1], group[2]), allGroups))
-
-
-class CheckFile(FileSplitMixin):
- """Collection of check groups extracted from the input test file."""
-
- def __init__(self, prefix, checkStream, fileName=None):
- self.fileName = fileName
- self.prefix = prefix
- self.groups = self._parseStream(checkStream)
-
- # Attempts to parse a check line. The regex searches for a comment symbol
- # followed by the CHECK keyword, given attribute and a colon at the very
- # beginning of the line. Whitespaces are ignored.
- def _extractLine(self, prefix, line):
- rIgnoreWhitespace = r"\s*"
- rCommentSymbols = [r"//", r"#"]
- regexPrefix = rIgnoreWhitespace + \
- r"(" + r"|".join(rCommentSymbols) + r")" + \
- rIgnoreWhitespace + \
- prefix + r":"
-
- # The 'match' function succeeds only if the pattern is matched at the
- # beginning of the line.
- match = re.match(regexPrefix, line)
- if match is not None:
- return line[match.end():].strip()
- else:
- return None
-
- # This function is invoked on each line of the check file and returns a pair
- # which instructs the parser how the line should be handled. If the line is to
- # be included in the current check group, it is returned in the first value.
- # If the line starts a new check group, the name of the group is returned in
- # the second value.
- def _processLine(self, line, lineNo):
- # Lines beginning with 'CHECK-START' start a new check group.
- startLine = self._extractLine(self.prefix + "-START", line)
- if startLine is not None:
- return None, startLine
-
- # Lines starting only with 'CHECK' are matched in order.
- plainLine = self._extractLine(self.prefix, line)
- if plainLine is not None:
- return (plainLine, CheckLine.Variant.InOrder, lineNo), None
-
- # 'CHECK-DAG' lines are no-order assertions.
- dagLine = self._extractLine(self.prefix + "-DAG", line)
- if dagLine is not None:
- return (dagLine, CheckLine.Variant.DAG, lineNo), None
-
- # 'CHECK-NOT' lines are no-order negative assertions.
- notLine = self._extractLine(self.prefix + "-NOT", line)
- if notLine is not None:
- return (notLine, CheckLine.Variant.Not, lineNo), None
-
- # Other lines are ignored.
- return None, None
-
- def _exceptionLineOutsideGroup(self, line, lineNo):
- Logger.fail("Check line not inside a group", self.fileName, lineNo)
-
- # Constructs a check group from the parser-collected check lines.
- def _processGroup(self, name, lines, lineNo):
- checkLines = list(map(lambda line: CheckLine(line[0], line[1], self.fileName, line[2]), lines))
- return CheckGroup(name, checkLines, self.fileName, lineNo)
-
- def match(self, outputFile):
- for checkGroup in self.groups:
- # TODO: Currently does not handle multiple occurrences of the same group
- # name, e.g. when a pass is run multiple times. It will always try to
- # match a check group against the first output group of the same name.
- outputGroup = outputFile.findGroup(checkGroup.name)
- if outputGroup is None:
- Logger.fail("Group \"" + checkGroup.name + "\" not found in the output",
- self.fileName, checkGroup.lineNo)
- Logger.startTest(checkGroup.name)
- checkGroup.match(outputGroup)
- Logger.testPassed()
-
-
-class OutputFile(FileSplitMixin):
- """Representation of the output generated by the test and split into groups
- within which the checks are performed.
-
- C1visualizer format is parsed with a state machine which differentiates
- between the 'compilation' and 'cfg' blocks. The former marks the beginning
- of a method. It is parsed for the method's name but otherwise ignored. Each
- subsequent CFG block represents one stage of the compilation pipeline and
- is parsed into an output group named "<method name> <pass name>".
- """
-
- class ParsingState:
- OutsideBlock, InsideCompilationBlock, StartingCfgBlock, InsideCfgBlock = range(4)
-
- def __init__(self, outputStream, fileName=None):
- self.fileName = fileName
-
- # Initialize the state machine
- self.lastMethodName = None
- self.state = OutputFile.ParsingState.OutsideBlock
- self.groups = self._parseStream(outputStream)
-
- # This function is invoked on each line of the output file and returns a pair
- # which instructs the parser how the line should be handled. If the line is to
- # be included in the current group, it is returned in the first value. If the
- # line starts a new output group, the name of the group is returned in the
- # second value.
- def _processLine(self, line, lineNo):
- if self.state == OutputFile.ParsingState.StartingCfgBlock:
- # Previous line started a new 'cfg' block which means that this one must
- # contain the name of the pass (this is enforced by C1visualizer).
- if re.match("name\s+\"[^\"]+\"", line):
- # Extract the pass name, prepend it with the name of the method and
- # return as the beginning of a new group.
- self.state = OutputFile.ParsingState.InsideCfgBlock
- return (None, self.lastMethodName + " " + line.split("\"")[1])
- else:
- Logger.fail("Expected output group name", self.fileName, lineNo)
-
- elif self.state == OutputFile.ParsingState.InsideCfgBlock:
- if line == "end_cfg":
- self.state = OutputFile.ParsingState.OutsideBlock
- return (None, None)
- else:
- return (line, None)
-
- elif self.state == OutputFile.ParsingState.InsideCompilationBlock:
- # Search for the method's name. Format: method "<name>"
- if re.match("method\s+\"[^\"]*\"", line):
- methodName = line.split("\"")[1].strip()
- if not methodName:
- Logger.fail("Empty method name in output", self.fileName, lineNo)
- self.lastMethodName = methodName
- elif line == "end_compilation":
- self.state = OutputFile.ParsingState.OutsideBlock
- return (None, None)
-
- else:
- assert self.state == OutputFile.ParsingState.OutsideBlock
- if line == "begin_cfg":
- # The line starts a new group but we'll wait until the next line from
- # which we can extract the name of the pass.
- if self.lastMethodName is None:
- Logger.fail("Expected method header", self.fileName, lineNo)
- self.state = OutputFile.ParsingState.StartingCfgBlock
- return (None, None)
- elif line == "begin_compilation":
- self.state = OutputFile.ParsingState.InsideCompilationBlock
- return (None, None)
- else:
- Logger.fail("Output line not inside a group", self.fileName, lineNo)
-
- # Constructs an output group from the parser-collected output lines.
- def _processGroup(self, name, lines, lineNo):
- return OutputGroup(name, lines, self.fileName, lineNo + 1)
-
- def findGroup(self, name):
- for group in self.groups:
- if group.name == name:
- return group
- return None
-
-
-def ParseArguments():
- parser = argparse.ArgumentParser()
- parser.add_argument("tested_file",
- help="text file the checks should be verified against")
- parser.add_argument("source_path", nargs="?",
- help="path to file/folder with checking annotations")
- parser.add_argument("--check-prefix", dest="check_prefix", default="CHECK", metavar="PREFIX",
- help="prefix of checks in the test files (default: CHECK)")
- parser.add_argument("--list-groups", dest="list_groups", action="store_true",
- help="print a list of all groups found in the tested file")
- parser.add_argument("--dump-group", dest="dump_group", metavar="GROUP",
- help="print the contents of an output group")
- parser.add_argument("-q", "--quiet", action="store_true",
- help="print only errors")
- return parser.parse_args()
-
-
-def ListGroups(outputFilename):
- outputFile = OutputFile(open(outputFilename, "r"))
- for group in outputFile.groups:
- Logger.log(group.name)
-
-
-def DumpGroup(outputFilename, groupName):
- outputFile = OutputFile(open(outputFilename, "r"))
- group = outputFile.findGroup(groupName)
- if group:
- lineNo = group.lineNo
- maxLineNo = lineNo + len(group.body)
- lenLineNo = len(str(maxLineNo)) + 2
- for line in group.body:
- Logger.log((str(lineNo) + ":").ljust(lenLineNo) + line)
- lineNo += 1
- else:
- Logger.fail("Group \"" + groupName + "\" not found in the output")
-
-
-# Returns a list of files to scan for check annotations in the given path. Path
-# to a file is returned as a single-element list, directories are recursively
-# traversed and all '.java' files returned.
-def FindCheckFiles(path):
- if not path:
- Logger.fail("No source path provided")
- elif os.path.isfile(path):
- return [ path ]
- elif os.path.isdir(path):
- foundFiles = []
- for root, dirs, files in os.walk(path):
- for file in files:
- if os.path.splitext(file)[1] == ".java":
- foundFiles.append(os.path.join(root, file))
- return foundFiles
- else:
- Logger.fail("Source path \"" + path + "\" not found")
-
-
-def RunChecks(checkPrefix, checkPath, outputFilename):
- outputBaseName = os.path.basename(outputFilename)
- outputFile = OutputFile(open(outputFilename, "r"), outputBaseName)
-
- for checkFilename in FindCheckFiles(checkPath):
- checkBaseName = os.path.basename(checkFilename)
- checkFile = CheckFile(checkPrefix, open(checkFilename, "r"), checkBaseName)
- checkFile.match(outputFile)
-
-
-if __name__ == "__main__":
- args = ParseArguments()
-
- if args.quiet:
- Logger.Verbosity = Logger.Level.Error
-
- if args.list_groups:
- ListGroups(args.tested_file)
- elif args.dump_group:
- DumpGroup(args.tested_file, args.dump_group)
- else:
- RunChecks(args.check_prefix, args.source_path, args.tested_file)
diff --git a/tools/checker/README b/tools/checker/README
new file mode 100644
index 0000000..858a773
--- /dev/null
+++ b/tools/checker/README
@@ -0,0 +1,54 @@
+Checker is a testing tool which compiles a given test file and compares the
+state of the control-flow graph before and after each optimization pass
+against a set of assertions specified alongside the tests.
+
+Tests are written in Java, turned into DEX and compiled with the Optimizing
+compiler. "Check lines" are assertions formatted as comments of the Java file.
+They begin with prefix 'CHECK' followed by a pattern that the engine attempts
+to match in the compiler-generated output.
+
+Assertions are tested in groups which correspond to the individual compiler
+passes. Each group of check lines therefore must start with a 'CHECK-START'
+header which specifies the output group it should be tested against. The group
+name must exactly match one of the groups recognized in the output (they can
+be listed with the '--list-passes' command-line flag).
+
+Matching of check lines is carried out in the order of appearance in the
+source file. There are three types of check lines:
+ - CHECK: Must match an output line which appears in the output group
+ later than lines matched against any preceeding checks. Output
+ lines must therefore match the check lines in the same order.
+ These are referred to as "in-order" checks in the code.
+ - CHECK-DAG: Must match an output line which appears in the output group
+ later than lines matched against any preceeding in-order checks.
+ In other words, the order of output lines does not matter
+ between consecutive DAG checks.
+ - CHECK-NOT: Must not match any output line which appears in the output group
+ later than lines matched against any preceeding checks and
+ earlier than lines matched against any subsequent checks.
+ Surrounding non-negative checks (or boundaries of the group)
+ therefore create a scope within which the assertion is verified.
+
+Check-line patterns are treated as plain text rather than regular expressions
+but are whitespace agnostic.
+
+Actual regex patterns can be inserted enclosed in '{{' and '}}' brackets. If
+curly brackets need to be used inside the body of the regex, they need to be
+enclosed in round brackets. For example, the pattern '{{foo{2}}}' will parse
+the invalid regex 'foo{2', but '{{(fo{2})}}' will match 'foo'.
+
+Regex patterns can be named and referenced later. A new variable is defined
+with '<<name:regex>>' and can be referenced with '<<name>>'. Variables are
+only valid within the scope of the defining group. Within a group they cannot
+be redefined or used undefined.
+
+Example:
+ The following assertions can be placed in a Java source file:
+
+ // CHECK-START: int MyClass.MyMethod() constant_folding (after)
+ // CHECK: <<ID:i\d+>> IntConstant {{11|22}}
+ // CHECK: Return [<<ID>>]
+
+ The engine will attempt to match the check lines against the output of the
+ group named on the first line. Together they verify that the CFG after
+ constant folding returns an integer constant with value either 11 or 22.
diff --git a/tools/checker/checker.py b/tools/checker/checker.py
new file mode 100755
index 0000000..ed630e3
--- /dev/null
+++ b/tools/checker/checker.py
@@ -0,0 +1,103 @@
+#!/usr/bin/env python2
+#
+# Copyright (C) 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import argparse
+import os
+
+from common.logger import Logger
+from file_format.c1visualizer.parser import ParseC1visualizerStream
+from file_format.checker.parser import ParseCheckerStream
+from match.file import MatchFiles
+
+def ParseArguments():
+ parser = argparse.ArgumentParser()
+ parser.add_argument("tested_file",
+ help="text file the checks should be verified against")
+ parser.add_argument("source_path", nargs="?",
+ help="path to file/folder with checking annotations")
+ parser.add_argument("--check-prefix", dest="check_prefix", default="CHECK", metavar="PREFIX",
+ help="prefix of checks in the test files (default: CHECK)")
+ parser.add_argument("--list-passes", dest="list_passes", action="store_true",
+ help="print a list of all passes found in the tested file")
+ parser.add_argument("--dump-pass", dest="dump_pass", metavar="PASS",
+ help="print a compiler pass dump")
+ parser.add_argument("-q", "--quiet", action="store_true",
+ help="print only errors")
+ return parser.parse_args()
+
+
+def ListPasses(outputFilename):
+ c1File = ParseC1visualizerStream(os.path.basename(outputFilename), open(outputFilename, "r"))
+ for compiler_pass in c1File.passes:
+ Logger.log(compiler_pass.name)
+
+
+def DumpPass(outputFilename, passName):
+ c1File = ParseC1visualizerStream(os.path.basename(outputFilename), open(outputFilename, "r"))
+ compiler_pass = c1File.findPass(passName)
+ if compiler_pass:
+ maxLineNo = compiler_pass.startLineNo + len(compiler_pass.body)
+ lenLineNo = len(str(maxLineNo)) + 2
+ curLineNo = compiler_pass.startLineNo
+ for line in compiler_pass.body:
+ Logger.log((str(curLineNo) + ":").ljust(lenLineNo) + line)
+ curLineNo += 1
+ else:
+ Logger.fail("Pass \"" + passName + "\" not found in the output")
+
+
+def FindCheckerFiles(path):
+ """ Returns a list of files to scan for check annotations in the given path.
+ Path to a file is returned as a single-element list, directories are
+ recursively traversed and all '.java' files returned.
+ """
+ if not path:
+ Logger.fail("No source path provided")
+ elif os.path.isfile(path):
+ return [ path ]
+ elif os.path.isdir(path):
+ foundFiles = []
+ for root, dirs, files in os.walk(path):
+ for file in files:
+ extension = os.path.splitext(file)[1]
+ if extension in [".java", ".smali"]:
+ foundFiles.append(os.path.join(root, file))
+ return foundFiles
+ else:
+ Logger.fail("Source path \"" + path + "\" not found")
+
+
+def RunTests(checkPrefix, checkPath, outputFilename):
+ c1File = ParseC1visualizerStream(os.path.basename(outputFilename), open(outputFilename, "r"))
+ for checkFilename in FindCheckerFiles(checkPath):
+ checkerFile = ParseCheckerStream(os.path.basename(checkFilename),
+ checkPrefix,
+ open(checkFilename, "r"))
+ MatchFiles(checkerFile, c1File)
+
+
+if __name__ == "__main__":
+ args = ParseArguments()
+
+ if args.quiet:
+ Logger.Verbosity = Logger.Level.Error
+
+ if args.list_passes:
+ ListPasses(args.tested_file)
+ elif args.dump_pass:
+ DumpPass(args.tested_file, args.dump_pass)
+ else:
+ RunTests(args.check_prefix, args.source_path, args.tested_file)
diff --git a/tools/checker/common/__init__.py b/tools/checker/common/__init__.py
new file mode 100644
index 0000000..d0a140b
--- /dev/null
+++ b/tools/checker/common/__init__.py
@@ -0,0 +1,13 @@
+# Copyright (C) 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/tools/checker/common/immutables.py b/tools/checker/common/immutables.py
new file mode 100644
index 0000000..e016867
--- /dev/null
+++ b/tools/checker/common/immutables.py
@@ -0,0 +1,25 @@
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+class ImmutableDict(dict):
+ def __setitem__(self, key, value):
+ raise RuntimeError("Cannot modify ImmutableDict")
+
+ def __delitem__(self, key):
+ raise RuntimeError("Cannot modify ImmutableDict")
+
+ def copyWith(self, key, value):
+ newDict = ImmutableDict(self)
+ dict.__setitem__(newDict, key, value)
+ return newDict
diff --git a/tools/checker/common/logger.py b/tools/checker/common/logger.py
new file mode 100644
index 0000000..28bb458
--- /dev/null
+++ b/tools/checker/common/logger.py
@@ -0,0 +1,81 @@
+# Copyright (C) 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import print_function
+import sys
+
+class Logger(object):
+
+ class Level(object):
+ NoOutput, Error, Info = range(3)
+
+ class Color(object):
+ Default, Blue, Gray, Purple, Red = range(5)
+
+ @staticmethod
+ def terminalCode(color, out=sys.stdout):
+ if not out.isatty():
+ return ''
+ elif color == Logger.Color.Blue:
+ return '\033[94m'
+ elif color == Logger.Color.Gray:
+ return '\033[37m'
+ elif color == Logger.Color.Purple:
+ return '\033[95m'
+ elif color == Logger.Color.Red:
+ return '\033[91m'
+ else:
+ return '\033[0m'
+
+ Verbosity = Level.Info
+
+ @staticmethod
+ def log(text, level=Level.Info, color=Color.Default, newLine=True, out=sys.stdout):
+ if level <= Logger.Verbosity:
+ text = Logger.Color.terminalCode(color, out) + text + \
+ Logger.Color.terminalCode(Logger.Color.Default, out)
+ if newLine:
+ print(text, file=out)
+ else:
+ print(text, end="", file=out)
+ out.flush()
+
+ @staticmethod
+ def fail(msg, file=None, line=-1):
+ location = ""
+ if file:
+ location += file + ":"
+ if line > 0:
+ location += str(line) + ":"
+ if location:
+ location += " "
+
+ Logger.log(location, Logger.Level.Error, color=Logger.Color.Gray, newLine=False, out=sys.stderr)
+ Logger.log("error: ", Logger.Level.Error, color=Logger.Color.Red, newLine=False, out=sys.stderr)
+ Logger.log(msg, Logger.Level.Error, out=sys.stderr)
+ sys.exit(msg)
+
+ @staticmethod
+ def startTest(name):
+ Logger.log("TEST ", color=Logger.Color.Purple, newLine=False)
+ Logger.log(name + "... ", newLine=False)
+
+ @staticmethod
+ def testPassed():
+ Logger.log("PASS", color=Logger.Color.Blue)
+
+ @staticmethod
+ def testFailed(msg, file=None, line=-1):
+ Logger.log("FAIL", color=Logger.Color.Red)
+ Logger.fail(msg, file, line)
diff --git a/tools/checker/common/mixins.py b/tools/checker/common/mixins.py
new file mode 100644
index 0000000..819de24
--- /dev/null
+++ b/tools/checker/common/mixins.py
@@ -0,0 +1,26 @@
+# Copyright (C) 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+class EqualityMixin:
+ """ Object equality via equality of dictionaries. """
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) \
+ and self.__dict__ == other.__dict__
+
+class PrintableMixin:
+ """ Prints object as name-dictionary pair. """
+
+ def __repr__(self):
+ return "<%s: %s>" % (type(self).__name__, str(self.__dict__))
diff --git a/tools/checker/common/testing.py b/tools/checker/common/testing.py
new file mode 100644
index 0000000..1299c07
--- /dev/null
+++ b/tools/checker/common/testing.py
@@ -0,0 +1,22 @@
+# Copyright (C) 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+def ToUnicode(string):
+ """ Converts a string into Unicode.
+
+ This is a delegate function for the built-in `unicode`. It checks if the input
+ is not `None`, because `unicode` turns it into an actual "None" string.
+ """
+ assert string is not None
+ return unicode(string)
diff --git a/tools/checker/file_format/__init__.py b/tools/checker/file_format/__init__.py
new file mode 100644
index 0000000..d0a140b
--- /dev/null
+++ b/tools/checker/file_format/__init__.py
@@ -0,0 +1,13 @@
+# Copyright (C) 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/tools/checker/file_format/c1visualizer/__init__.py b/tools/checker/file_format/c1visualizer/__init__.py
new file mode 100644
index 0000000..d0a140b
--- /dev/null
+++ b/tools/checker/file_format/c1visualizer/__init__.py
@@ -0,0 +1,13 @@
+# Copyright (C) 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/tools/checker/file_format/c1visualizer/parser.py b/tools/checker/file_format/c1visualizer/parser.py
new file mode 100644
index 0000000..335a195
--- /dev/null
+++ b/tools/checker/file_format/c1visualizer/parser.py
@@ -0,0 +1,87 @@
+# Copyright (C) 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from common.logger import Logger
+from file_format.common import SplitStream
+from file_format.c1visualizer.struct import C1visualizerFile, C1visualizerPass
+
+import re
+
+class C1ParserState:
+ OutsideBlock, InsideCompilationBlock, StartingCfgBlock, InsideCfgBlock = range(4)
+
+ def __init__(self):
+ self.currentState = C1ParserState.OutsideBlock
+ self.lastMethodName = None
+
+def __parseC1Line(line, lineNo, state, fileName):
+ """ This function is invoked on each line of the output file and returns
+ a pair which instructs the parser how the line should be handled. If the
+ line is to be included in the current group, it is returned in the first
+ value. If the line starts a new output group, the name of the group is
+ returned in the second value.
+ """
+ if state.currentState == C1ParserState.StartingCfgBlock:
+ # Previous line started a new 'cfg' block which means that this one must
+ # contain the name of the pass (this is enforced by C1visualizer).
+ if re.match("name\s+\"[^\"]+\"", line):
+ # Extract the pass name, prepend it with the name of the method and
+ # return as the beginning of a new group.
+ state.currentState = C1ParserState.InsideCfgBlock
+ return (None, state.lastMethodName + " " + line.split("\"")[1])
+ else:
+ Logger.fail("Expected output group name", fileName, lineNo)
+
+ elif state.currentState == C1ParserState.InsideCfgBlock:
+ if line == "end_cfg":
+ state.currentState = C1ParserState.OutsideBlock
+ return (None, None)
+ else:
+ return (line, None)
+
+ elif state.currentState == C1ParserState.InsideCompilationBlock:
+ # Search for the method's name. Format: method "<name>"
+ if re.match("method\s+\"[^\"]*\"", line):
+ methodName = line.split("\"")[1].strip()
+ if not methodName:
+ Logger.fail("Empty method name in output", fileName, lineNo)
+ state.lastMethodName = methodName
+ elif line == "end_compilation":
+ state.currentState = C1ParserState.OutsideBlock
+ return (None, None)
+
+ else:
+ assert state.currentState == C1ParserState.OutsideBlock
+ if line == "begin_cfg":
+ # The line starts a new group but we'll wait until the next line from
+ # which we can extract the name of the pass.
+ if state.lastMethodName is None:
+ Logger.fail("Expected method header", fileName, lineNo)
+ state.currentState = C1ParserState.StartingCfgBlock
+ return (None, None)
+ elif line == "begin_compilation":
+ state.currentState = C1ParserState.InsideCompilationBlock
+ return (None, None)
+ else:
+ Logger.fail("C1visualizer line not inside a group", fileName, lineNo)
+
+def ParseC1visualizerStream(fileName, stream):
+ c1File = C1visualizerFile(fileName)
+ state = C1ParserState()
+ fnProcessLine = lambda line, lineNo: __parseC1Line(line, lineNo, state, fileName)
+ fnLineOutsideChunk = lambda line, lineNo: \
+ Logger.fail("C1visualizer line not inside a group", fileName, lineNo)
+ for passName, passLines, startLineNo in SplitStream(stream, fnProcessLine, fnLineOutsideChunk):
+ C1visualizerPass(c1File, passName, passLines, startLineNo + 1)
+ return c1File
diff --git a/tools/checker/file_format/c1visualizer/struct.py b/tools/checker/file_format/c1visualizer/struct.py
new file mode 100644
index 0000000..991564e
--- /dev/null
+++ b/tools/checker/file_format/c1visualizer/struct.py
@@ -0,0 +1,60 @@
+# Copyright (C) 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from common.logger import Logger
+from common.mixins import PrintableMixin
+
+class C1visualizerFile(PrintableMixin):
+
+ def __init__(self, fileName):
+ self.fileName = fileName
+ self.passes = []
+
+ def addPass(self, new_pass):
+ self.passes.append(new_pass)
+
+ def findPass(self, name):
+ for entry in self.passes:
+ if entry.name == name:
+ return entry
+ return None
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) \
+ and self.passes == other.passes
+
+
+class C1visualizerPass(PrintableMixin):
+
+ def __init__(self, parent, name, body, startLineNo):
+ self.parent = parent
+ self.name = name
+ self.body = body
+ self.startLineNo = startLineNo
+
+ if not self.name:
+ Logger.fail("C1visualizer pass does not have a name", self.fileName, self.startLineNo)
+ if not self.body:
+ Logger.fail("C1visualizer pass does not have a body", self.fileName, self.startLineNo)
+
+ self.parent.addPass(self)
+
+ @property
+ def fileName(self):
+ return self.parent.fileName
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) \
+ and self.name == other.name \
+ and self.body == other.body
diff --git a/tools/checker/file_format/c1visualizer/test.py b/tools/checker/file_format/c1visualizer/test.py
new file mode 100644
index 0000000..812a4cf
--- /dev/null
+++ b/tools/checker/file_format/c1visualizer/test.py
@@ -0,0 +1,105 @@
+#!/usr/bin/env python2
+#
+# Copyright (C) 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from common.testing import ToUnicode
+from file_format.c1visualizer.parser import ParseC1visualizerStream
+from file_format.c1visualizer.struct import C1visualizerFile, C1visualizerPass
+
+import io
+import unittest
+
+class C1visualizerParser_Test(unittest.TestCase):
+
+ def createFile(self, passList):
+ """ Creates an instance of CheckerFile from provided info.
+
+ Data format: [ ( <case-name>, [ ( <text>, <assert-variant> ), ... ] ), ... ]
+ """
+ c1File = C1visualizerFile("<c1_file>")
+ for passEntry in passList:
+ passName = passEntry[0]
+ passBody = passEntry[1]
+ c1Pass = C1visualizerPass(c1File, passName, passBody, 0)
+ return c1File
+
+ def assertParsesTo(self, c1Text, expectedData):
+ expectedFile = self.createFile(expectedData)
+ actualFile = ParseC1visualizerStream("<c1_file>", io.StringIO(ToUnicode(c1Text)))
+ return self.assertEqual(expectedFile, actualFile)
+
+ def test_EmptyFile(self):
+ self.assertParsesTo("", [])
+
+ def test_SingleGroup(self):
+ self.assertParsesTo(
+ """
+ begin_compilation
+ method "MyMethod"
+ end_compilation
+ begin_cfg
+ name "pass1"
+ foo
+ bar
+ end_cfg
+ """,
+ [ ( "MyMethod pass1", [ "foo", "bar" ] ) ])
+
+ def test_MultipleGroups(self):
+ self.assertParsesTo(
+ """
+ begin_compilation
+ name "xyz1"
+ method "MyMethod1"
+ date 1234
+ end_compilation
+ begin_cfg
+ name "pass1"
+ foo
+ bar
+ end_cfg
+ begin_cfg
+ name "pass2"
+ abc
+ def
+ end_cfg
+ """,
+ [ ( "MyMethod1 pass1", [ "foo", "bar" ] ),
+ ( "MyMethod1 pass2", [ "abc", "def" ] ) ])
+ self.assertParsesTo(
+ """
+ begin_compilation
+ name "xyz1"
+ method "MyMethod1"
+ date 1234
+ end_compilation
+ begin_cfg
+ name "pass1"
+ foo
+ bar
+ end_cfg
+ begin_compilation
+ name "xyz2"
+ method "MyMethod2"
+ date 5678
+ end_compilation
+ begin_cfg
+ name "pass2"
+ abc
+ def
+ end_cfg
+ """,
+ [ ( "MyMethod1 pass1", [ "foo", "bar" ] ),
+ ( "MyMethod2 pass2", [ "abc", "def" ] ) ])
diff --git a/tools/checker/file_format/checker/__init__.py b/tools/checker/file_format/checker/__init__.py
new file mode 100644
index 0000000..d0a140b
--- /dev/null
+++ b/tools/checker/file_format/checker/__init__.py
@@ -0,0 +1,13 @@
+# Copyright (C) 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/tools/checker/file_format/checker/parser.py b/tools/checker/file_format/checker/parser.py
new file mode 100644
index 0000000..4eed391
--- /dev/null
+++ b/tools/checker/file_format/checker/parser.py
@@ -0,0 +1,147 @@
+# Copyright (C) 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from file_format.common import SplitStream
+from file_format.checker.struct import CheckerFile, TestCase, TestAssertion, RegexExpression
+
+import re
+
+def __extractLine(prefix, line):
+ """ Attempts to parse a check line. The regex searches for a comment symbol
+ followed by the CHECK keyword, given attribute and a colon at the very
+ beginning of the line. Whitespaces are ignored.
+ """
+ rIgnoreWhitespace = r"\s*"
+ rCommentSymbols = [r"//", r"#"]
+ regexPrefix = rIgnoreWhitespace + \
+ r"(" + r"|".join(rCommentSymbols) + r")" + \
+ rIgnoreWhitespace + \
+ prefix + r":"
+
+ # The 'match' function succeeds only if the pattern is matched at the
+ # beginning of the line.
+ match = re.match(regexPrefix, line)
+ if match is not None:
+ return line[match.end():].strip()
+ else:
+ return None
+
+def __processLine(line, lineNo, prefix):
+ """ This function is invoked on each line of the check file and returns a pair
+ which instructs the parser how the line should be handled. If the line is
+ to be included in the current check group, it is returned in the first
+ value. If the line starts a new check group, the name of the group is
+ returned in the second value.
+ """
+ # Lines beginning with 'CHECK-START' start a new test case.
+ startLine = __extractLine(prefix + "-START", line)
+ if startLine is not None:
+ return None, startLine
+
+ # Lines starting only with 'CHECK' are matched in order.
+ plainLine = __extractLine(prefix, line)
+ if plainLine is not None:
+ return (plainLine, TestAssertion.Variant.InOrder, lineNo), None
+
+ # 'CHECK-NEXT' lines are in-order but must match the very next line.
+ nextLine = __extractLine(prefix + "-NEXT", line)
+ if nextLine is not None:
+ return (nextLine, TestAssertion.Variant.NextLine, lineNo), None
+
+ # 'CHECK-DAG' lines are no-order assertions.
+ dagLine = __extractLine(prefix + "-DAG", line)
+ if dagLine is not None:
+ return (dagLine, TestAssertion.Variant.DAG, lineNo), None
+
+ # 'CHECK-NOT' lines are no-order negative assertions.
+ notLine = __extractLine(prefix + "-NOT", line)
+ if notLine is not None:
+ return (notLine, TestAssertion.Variant.Not, lineNo), None
+
+ # Other lines are ignored.
+ return None, None
+
+def __isMatchAtStart(match):
+ """ Tests if the given Match occurred at the beginning of the line. """
+ return (match is not None) and (match.start() == 0)
+
+def __firstMatch(matches, string):
+ """ Takes in a list of Match objects and returns the minimal start point among
+ them. If there aren't any successful matches it returns the length of
+ the searched string.
+ """
+ starts = map(lambda m: len(string) if m is None else m.start(), matches)
+ return min(starts)
+
+def ParseCheckerAssertion(parent, line, variant, lineNo):
+ """ This method parses the content of a check line stripped of the initial
+ comment symbol and the CHECK keyword.
+ """
+ assertion = TestAssertion(parent, variant, line, lineNo)
+ # Loop as long as there is something to parse.
+ while line:
+ # Search for the nearest occurrence of the special markers.
+ matchWhitespace = re.search(r"\s+", line)
+ matchPattern = re.search(RegexExpression.Regex.regexPattern, line)
+ matchVariableReference = re.search(RegexExpression.Regex.regexVariableReference, line)
+ matchVariableDefinition = re.search(RegexExpression.Regex.regexVariableDefinition, line)
+
+ # If one of the above was identified at the current position, extract them
+ # from the line, parse them and add to the list of line parts.
+ if __isMatchAtStart(matchWhitespace):
+ # A whitespace in the check line creates a new separator of line parts.
+ # This allows for ignored output between the previous and next parts.
+ line = line[matchWhitespace.end():]
+ assertion.addExpression(RegexExpression.createSeparator())
+ elif __isMatchAtStart(matchPattern):
+ pattern = line[0:matchPattern.end()]
+ pattern = pattern[2:-2]
+ line = line[matchPattern.end():]
+ assertion.addExpression(RegexExpression.createPattern(pattern))
+ elif __isMatchAtStart(matchVariableReference):
+ var = line[0:matchVariableReference.end()]
+ line = line[matchVariableReference.end():]
+ name = var[2:-2]
+ assertion.addExpression(RegexExpression.createVariableReference(name))
+ elif __isMatchAtStart(matchVariableDefinition):
+ var = line[0:matchVariableDefinition.end()]
+ line = line[matchVariableDefinition.end():]
+ colonPos = var.find(":")
+ name = var[2:colonPos]
+ body = var[colonPos+1:-2]
+ assertion.addExpression(RegexExpression.createVariableDefinition(name, body))
+ else:
+ # If we're not currently looking at a special marker, this is a plain
+ # text match all the way until the first special marker (or the end
+ # of the line).
+ firstMatch = __firstMatch([ matchWhitespace,
+ matchPattern,
+ matchVariableReference,
+ matchVariableDefinition ],
+ line)
+ text = line[0:firstMatch]
+ line = line[firstMatch:]
+ assertion.addExpression(RegexExpression.createText(text))
+ return assertion
+
+def ParseCheckerStream(fileName, prefix, stream):
+ checkerFile = CheckerFile(fileName)
+ fnProcessLine = lambda line, lineNo: __processLine(line, lineNo, prefix)
+ fnLineOutsideChunk = lambda line, lineNo: \
+ Logger.fail("C1visualizer line not inside a group", fileName, lineNo)
+ for caseName, caseLines, startLineNo in SplitStream(stream, fnProcessLine, fnLineOutsideChunk):
+ testCase = TestCase(checkerFile, caseName, startLineNo)
+ for caseLine in caseLines:
+ ParseCheckerAssertion(testCase, caseLine[0], caseLine[1], caseLine[2])
+ return checkerFile
diff --git a/tools/checker/file_format/checker/struct.py b/tools/checker/file_format/checker/struct.py
new file mode 100644
index 0000000..6a54142
--- /dev/null
+++ b/tools/checker/file_format/checker/struct.py
@@ -0,0 +1,163 @@
+# Copyright (C) 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from common.logger import Logger
+from common.mixins import EqualityMixin, PrintableMixin
+
+import re
+
+class CheckerFile(PrintableMixin):
+
+ def __init__(self, fileName):
+ self.fileName = fileName
+ self.testCases = []
+
+ def addTestCase(self, new_test_case):
+ self.testCases.append(new_test_case)
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) \
+ and self.testCases == other.testCases
+
+
+class TestCase(PrintableMixin):
+
+ def __init__(self, parent, name, startLineNo):
+ assert isinstance(parent, CheckerFile)
+
+ self.parent = parent
+ self.name = name
+ self.assertions = []
+ self.startLineNo = startLineNo
+
+ if not self.name:
+ Logger.fail("Test case does not have a name", self.fileName, self.startLineNo)
+
+ self.parent.addTestCase(self)
+
+ @property
+ def fileName(self):
+ return self.parent.fileName
+
+ def addAssertion(self, new_assertion):
+ if new_assertion.variant == TestAssertion.Variant.NextLine:
+ if not self.assertions or \
+ (self.assertions[-1].variant != TestAssertion.Variant.InOrder and \
+ self.assertions[-1].variant != TestAssertion.Variant.NextLine):
+ Logger.fail("A next-line assertion can only be placed after an "
+ "in-order assertion or another next-line assertion.",
+ new_assertion.fileName, new_assertion.lineNo)
+ self.assertions.append(new_assertion)
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) \
+ and self.name == other.name \
+ and self.assertions == other.assertions
+
+
+class TestAssertion(PrintableMixin):
+
+ class Variant(object):
+ """Supported types of assertions."""
+ InOrder, NextLine, DAG, Not = range(4)
+
+ def __init__(self, parent, variant, originalText, lineNo):
+ assert isinstance(parent, TestCase)
+
+ self.parent = parent
+ self.variant = variant
+ self.expressions = []
+ self.lineNo = lineNo
+ self.originalText = originalText
+
+ self.parent.addAssertion(self)
+
+ @property
+ def fileName(self):
+ return self.parent.fileName
+
+ def addExpression(self, new_expression):
+ assert isinstance(new_expression, RegexExpression)
+ if self.variant == TestAssertion.Variant.Not:
+ if new_expression.variant == RegexExpression.Variant.VarDef:
+ Logger.fail("CHECK-NOT lines cannot define variables", self.fileName, self.lineNo)
+ self.expressions.append(new_expression)
+
+ def toRegex(self):
+ """ Returns a regex pattern for this entire assertion. Only used in tests. """
+ regex = ""
+ for expression in self.expressions:
+ if expression.variant == RegexExpression.Variant.Separator:
+ regex = regex + ", "
+ else:
+ regex = regex + "(" + expression.pattern + ")"
+ return regex
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) \
+ and self.variant == other.variant \
+ and self.expressions == other.expressions
+
+
+class RegexExpression(EqualityMixin, PrintableMixin):
+
+ class Variant(object):
+ """Supported language constructs."""
+ Text, Pattern, VarRef, VarDef, Separator = range(5)
+
+ class Regex(object):
+ rName = r"([a-zA-Z][a-zA-Z0-9]*)"
+ rRegex = r"(.+?)"
+ rPatternStartSym = r"(\{\{)"
+ rPatternEndSym = r"(\}\})"
+ rVariableStartSym = r"(<<)"
+ rVariableEndSym = r"(>>)"
+ rVariableSeparator = r"(:)"
+
+ regexPattern = rPatternStartSym + rRegex + rPatternEndSym
+ regexVariableReference = rVariableStartSym + rName + rVariableEndSym
+ regexVariableDefinition = rVariableStartSym + rName + rVariableSeparator + rRegex + rVariableEndSym
+
+ def __init__(self, variant, name, pattern):
+ self.variant = variant
+ self.name = name
+ self.pattern = pattern
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) \
+ and self.variant == other.variant \
+ and self.name == other.name \
+ and self.pattern == other.pattern
+
+ @staticmethod
+ def createSeparator():
+ return RegexExpression(RegexExpression.Variant.Separator, None, None)
+
+ @staticmethod
+ def createText(text):
+ return RegexExpression(RegexExpression.Variant.Text, None, re.escape(text))
+
+ @staticmethod
+ def createPattern(pattern):
+ return RegexExpression(RegexExpression.Variant.Pattern, None, pattern)
+
+ @staticmethod
+ def createVariableReference(name):
+ assert re.match(RegexExpression.Regex.rName, name)
+ return RegexExpression(RegexExpression.Variant.VarRef, name, None)
+
+ @staticmethod
+ def createVariableDefinition(name, pattern):
+ assert re.match(RegexExpression.Regex.rName, name)
+ return RegexExpression(RegexExpression.Variant.VarDef, name, pattern)
diff --git a/tools/checker/file_format/checker/test.py b/tools/checker/file_format/checker/test.py
new file mode 100644
index 0000000..453deed
--- /dev/null
+++ b/tools/checker/file_format/checker/test.py
@@ -0,0 +1,269 @@
+#!/usr/bin/env python2
+#
+# Copyright (C) 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from common.testing import ToUnicode
+from file_format.checker.parser import ParseCheckerStream
+from file_format.checker.struct import CheckerFile, TestCase, TestAssertion, RegexExpression
+
+import io
+import unittest
+
+CheckerException = SystemExit
+
+class CheckerParser_PrefixTest(unittest.TestCase):
+
+ def tryParse(self, string):
+ checkerText = u"// CHECK-START: pass\n" + ToUnicode(string)
+ checkFile = ParseCheckerStream("<test-file>", "CHECK", io.StringIO(checkerText))
+ self.assertEqual(len(checkFile.testCases), 1)
+ testCase = checkFile.testCases[0]
+ return len(testCase.assertions) != 0
+
+ def test_InvalidFormat(self):
+ self.assertFalse(self.tryParse("CHECK"))
+ self.assertFalse(self.tryParse(":CHECK"))
+ self.assertFalse(self.tryParse("CHECK:"))
+ self.assertFalse(self.tryParse("//CHECK"))
+ self.assertFalse(self.tryParse("#CHECK"))
+
+ self.assertTrue(self.tryParse("//CHECK:foo"))
+ self.assertTrue(self.tryParse("#CHECK:bar"))
+
+ def test_InvalidLabel(self):
+ self.assertFalse(self.tryParse("//ACHECK:foo"))
+ self.assertFalse(self.tryParse("#ACHECK:foo"))
+
+ def test_NotFirstOnTheLine(self):
+ self.assertFalse(self.tryParse("A// CHECK: foo"))
+ self.assertFalse(self.tryParse("A # CHECK: foo"))
+ self.assertFalse(self.tryParse("// // CHECK: foo"))
+ self.assertFalse(self.tryParse("# # CHECK: foo"))
+
+ def test_WhitespaceAgnostic(self):
+ self.assertTrue(self.tryParse(" //CHECK: foo"))
+ self.assertTrue(self.tryParse("// CHECK: foo"))
+ self.assertTrue(self.tryParse(" //CHECK: foo"))
+ self.assertTrue(self.tryParse("// CHECK: foo"))
+
+
+class CheckerParser_RegexExpressionTest(unittest.TestCase):
+
+ def parseAssertion(self, string, variant=""):
+ checkerText = u"// CHECK-START: pass\n// CHECK" + ToUnicode(variant) + u": " + ToUnicode(string)
+ checkerFile = ParseCheckerStream("<test-file>", "CHECK", io.StringIO(checkerText))
+ self.assertEqual(len(checkerFile.testCases), 1)
+ testCase = checkerFile.testCases[0]
+ self.assertEqual(len(testCase.assertions), 1)
+ return testCase.assertions[0]
+
+ def parseExpression(self, string):
+ line = self.parseAssertion(string)
+ self.assertEqual(1, len(line.expressions))
+ return line.expressions[0]
+
+ def assertEqualsRegex(self, string, expected):
+ self.assertEqual(expected, self.parseAssertion(string).toRegex())
+
+ def assertEqualsText(self, string, text):
+ self.assertEqual(self.parseExpression(string), RegexExpression.createText(text))
+
+ def assertEqualsPattern(self, string, pattern):
+ self.assertEqual(self.parseExpression(string), RegexExpression.createPattern(pattern))
+
+ def assertEqualsVarRef(self, string, name):
+ self.assertEqual(self.parseExpression(string), RegexExpression.createVariableReference(name))
+
+ def assertEqualsVarDef(self, string, name, pattern):
+ self.assertEqual(self.parseExpression(string),
+ RegexExpression.createVariableDefinition(name, pattern))
+
+ def assertVariantNotEqual(self, string, variant):
+ self.assertNotEqual(variant, self.parseExpression(string).variant)
+
+ # Test that individual parts of the line are recognized
+
+ def test_TextOnly(self):
+ self.assertEqualsText("foo", "foo")
+ self.assertEqualsText(" foo ", "foo")
+ self.assertEqualsRegex("f$o^o", "(f\$o\^o)")
+
+ def test_PatternOnly(self):
+ self.assertEqualsPattern("{{a?b.c}}", "a?b.c")
+
+ def test_VarRefOnly(self):
+ self.assertEqualsVarRef("<<ABC>>", "ABC")
+
+ def test_VarDefOnly(self):
+ self.assertEqualsVarDef("<<ABC:a?b.c>>", "ABC", "a?b.c")
+
+ def test_TextWithWhitespace(self):
+ self.assertEqualsRegex("foo bar", "(foo), (bar)")
+ self.assertEqualsRegex("foo bar", "(foo), (bar)")
+
+ def test_TextWithRegex(self):
+ self.assertEqualsRegex("foo{{abc}}bar", "(foo)(abc)(bar)")
+
+ def test_TextWithVar(self):
+ self.assertEqualsRegex("foo<<ABC:abc>>bar", "(foo)(abc)(bar)")
+
+ def test_PlainWithRegexAndWhitespaces(self):
+ self.assertEqualsRegex("foo {{abc}}bar", "(foo), (abc)(bar)")
+ self.assertEqualsRegex("foo{{abc}} bar", "(foo)(abc), (bar)")
+ self.assertEqualsRegex("foo {{abc}} bar", "(foo), (abc), (bar)")
+
+ def test_PlainWithVarAndWhitespaces(self):
+ self.assertEqualsRegex("foo <<ABC:abc>>bar", "(foo), (abc)(bar)")
+ self.assertEqualsRegex("foo<<ABC:abc>> bar", "(foo)(abc), (bar)")
+ self.assertEqualsRegex("foo <<ABC:abc>> bar", "(foo), (abc), (bar)")
+
+ def test_AllKinds(self):
+ self.assertEqualsRegex("foo <<ABC:abc>>{{def}}bar", "(foo), (abc)(def)(bar)")
+ self.assertEqualsRegex("foo<<ABC:abc>> {{def}}bar", "(foo)(abc), (def)(bar)")
+ self.assertEqualsRegex("foo <<ABC:abc>> {{def}} bar", "(foo), (abc), (def), (bar)")
+
+ # # Test that variables and patterns are parsed correctly
+
+ def test_ValidPattern(self):
+ self.assertEqualsPattern("{{abc}}", "abc")
+ self.assertEqualsPattern("{{a[b]c}}", "a[b]c")
+ self.assertEqualsPattern("{{(a{bc})}}", "(a{bc})")
+
+ def test_ValidRef(self):
+ self.assertEqualsVarRef("<<ABC>>", "ABC")
+ self.assertEqualsVarRef("<<A1BC2>>", "A1BC2")
+
+ def test_ValidDef(self):
+ self.assertEqualsVarDef("<<ABC:abc>>", "ABC", "abc")
+ self.assertEqualsVarDef("<<ABC:ab:c>>", "ABC", "ab:c")
+ self.assertEqualsVarDef("<<ABC:a[b]c>>", "ABC", "a[b]c")
+ self.assertEqualsVarDef("<<ABC:(a[bc])>>", "ABC", "(a[bc])")
+
+ def test_Empty(self):
+ self.assertVariantNotEqual("{{}}", RegexExpression.Variant.Pattern)
+ self.assertVariantNotEqual("<<>>", RegexExpression.Variant.VarRef)
+ self.assertVariantNotEqual("<<:>>", RegexExpression.Variant.VarDef)
+
+ def test_InvalidVarName(self):
+ self.assertVariantNotEqual("<<0ABC>>", RegexExpression.Variant.VarRef)
+ self.assertVariantNotEqual("<<AB=C>>", RegexExpression.Variant.VarRef)
+ self.assertVariantNotEqual("<<ABC=>>", RegexExpression.Variant.VarRef)
+ self.assertVariantNotEqual("<<0ABC:abc>>", RegexExpression.Variant.VarDef)
+ self.assertVariantNotEqual("<<AB=C:abc>>", RegexExpression.Variant.VarDef)
+ self.assertVariantNotEqual("<<ABC=:abc>>", RegexExpression.Variant.VarDef)
+
+ def test_BodyMatchNotGreedy(self):
+ self.assertEqualsRegex("{{abc}}{{def}}", "(abc)(def)")
+ self.assertEqualsRegex("<<ABC:abc>><<DEF:def>>", "(abc)(def)")
+
+ def test_NoVarDefsInNotChecks(self):
+ with self.assertRaises(CheckerException):
+ self.parseAssertion("<<ABC:abc>>", "-NOT")
+
+
+class CheckerParser_FileLayoutTest(unittest.TestCase):
+
+ # Creates an instance of CheckerFile from provided info.
+ # Data format: [ ( <case-name>, [ ( <text>, <assert-variant> ), ... ] ), ... ]
+ def createFile(self, caseList):
+ testFile = CheckerFile("<test_file>")
+ for caseEntry in caseList:
+ caseName = caseEntry[0]
+ testCase = TestCase(testFile, caseName, 0)
+ assertionList = caseEntry[1]
+ for assertionEntry in assertionList:
+ content = assertionEntry[0]
+ variant = assertionEntry[1]
+ assertion = TestAssertion(testCase, variant, content, 0)
+ assertion.addExpression(RegexExpression.createText(content))
+ return testFile
+
+ def assertParsesTo(self, checkerText, expectedData):
+ expectedFile = self.createFile(expectedData)
+ actualFile = self.parse(checkerText)
+ return self.assertEqual(expectedFile, actualFile)
+
+ def parse(self, checkerText):
+ return ParseCheckerStream("<test_file>", "CHECK", io.StringIO(ToUnicode(checkerText)))
+
+ def test_EmptyFile(self):
+ self.assertParsesTo("", [])
+
+ def test_SingleGroup(self):
+ self.assertParsesTo(
+ """
+ // CHECK-START: Example Group
+ // CHECK: foo
+ // CHECK: bar
+ """,
+ [ ( "Example Group", [ ("foo", TestAssertion.Variant.InOrder),
+ ("bar", TestAssertion.Variant.InOrder) ] ) ])
+
+ def test_MultipleGroups(self):
+ self.assertParsesTo(
+ """
+ // CHECK-START: Example Group1
+ // CHECK: foo
+ // CHECK: bar
+ // CHECK-START: Example Group2
+ // CHECK: abc
+ // CHECK: def
+ """,
+ [ ( "Example Group1", [ ("foo", TestAssertion.Variant.InOrder),
+ ("bar", TestAssertion.Variant.InOrder) ] ),
+ ( "Example Group2", [ ("abc", TestAssertion.Variant.InOrder),
+ ("def", TestAssertion.Variant.InOrder) ] ) ])
+
+ def test_AssertionVariants(self):
+ self.assertParsesTo(
+ """
+ // CHECK-START: Example Group
+ // CHECK: foo1
+ // CHECK: foo2
+ // CHECK-NEXT: foo3
+ // CHECK-NEXT: foo4
+ // CHECK-NOT: bar
+ // CHECK-DAG: abc
+ // CHECK-DAG: def
+ """,
+ [ ( "Example Group", [ ("foo1", TestAssertion.Variant.InOrder),
+ ("foo2", TestAssertion.Variant.InOrder),
+ ("foo3", TestAssertion.Variant.NextLine),
+ ("foo4", TestAssertion.Variant.NextLine),
+ ("bar", TestAssertion.Variant.Not),
+ ("abc", TestAssertion.Variant.DAG),
+ ("def", TestAssertion.Variant.DAG) ] ) ])
+
+ def test_MisplacedNext(self):
+ with self.assertRaises(CheckerException):
+ self.parse(
+ """
+ // CHECK-START: Example Group
+ // CHECK-DAG: foo
+ // CHECK-NEXT: bar
+ """)
+ with self.assertRaises(CheckerException):
+ self.parse(
+ """
+ // CHECK-START: Example Group
+ // CHECK-NOT: foo
+ // CHECK-NEXT: bar
+ """)
+ with self.assertRaises(CheckerException):
+ self.parse(
+ """
+ // CHECK-START: Example Group
+ // CHECK-NEXT: bar
+ """)
diff --git a/tools/checker/file_format/common.py b/tools/checker/file_format/common.py
new file mode 100644
index 0000000..f91fdeb
--- /dev/null
+++ b/tools/checker/file_format/common.py
@@ -0,0 +1,48 @@
+# Copyright (C) 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+def SplitStream(stream, fnProcessLine, fnLineOutsideChunk):
+ """ Reads the given input stream and splits it into chunks based on
+ information extracted from individual lines.
+
+ Arguments:
+ - fnProcessLine: Called on each line with the text and line number. Must
+ return a pair, name of the chunk started on this line and data extracted
+ from this line (or None in both cases).
+ - fnLineOutsideChunk: Called on attempt to attach data prior to creating
+ a chunk.
+ """
+ lineNo = 0
+ allChunks = []
+ currentChunk = None
+
+ for line in stream:
+ lineNo += 1
+ line = line.strip()
+ if not line:
+ continue
+
+ # Let the child class process the line and return information about it.
+ # The _processLine method can modify the content of the line (or delete it
+ # entirely) and specify whether it starts a new group.
+ processedLine, newChunkName = fnProcessLine(line, lineNo)
+ if newChunkName is not None:
+ currentChunk = (newChunkName, [], lineNo)
+ allChunks.append(currentChunk)
+ if processedLine is not None:
+ if currentChunk is not None:
+ currentChunk[1].append(processedLine)
+ else:
+ fnLineOutsideChunk(line, lineNo)
+ return allChunks
diff --git a/tools/checker/match/__init__.py b/tools/checker/match/__init__.py
new file mode 100644
index 0000000..d0a140b
--- /dev/null
+++ b/tools/checker/match/__init__.py
@@ -0,0 +1,13 @@
+# Copyright (C) 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/tools/checker/match/file.py b/tools/checker/match/file.py
new file mode 100644
index 0000000..b22211a
--- /dev/null
+++ b/tools/checker/match/file.py
@@ -0,0 +1,174 @@
+# Copyright (C) 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from collections import namedtuple
+from common.immutables import ImmutableDict
+from common.logger import Logger
+from file_format.c1visualizer.struct import C1visualizerFile, C1visualizerPass
+from file_format.checker.struct import CheckerFile, TestCase, TestAssertion
+from match.line import MatchLines
+
+MatchScope = namedtuple("MatchScope", ["start", "end"])
+MatchInfo = namedtuple("MatchInfo", ["scope", "variables"])
+
+class MatchFailedException(Exception):
+ def __init__(self, assertion, lineNo):
+ self.assertion = assertion
+ self.lineNo = lineNo
+
+def splitIntoGroups(assertions):
+ """ Breaks up a list of assertions, grouping instructions which should be
+ tested in the same scope (consecutive DAG and NOT instructions).
+ """
+ splitAssertions = []
+ lastVariant = None
+ for assertion in assertions:
+ if (assertion.variant == lastVariant and
+ assertion.variant in [TestAssertion.Variant.DAG, TestAssertion.Variant.Not]):
+ splitAssertions[-1].append(assertion)
+ else:
+ splitAssertions.append([assertion])
+ lastVariant = assertion.variant
+ return splitAssertions
+
+def findMatchingLine(assertion, c1Pass, scope, variables, excludeLines=[]):
+ """ Finds the first line in `c1Pass` which matches `assertion`.
+
+ Scan only lines numbered between `scope.start` and `scope.end` and not on the
+ `excludeLines` list.
+
+ Returns the index of the `c1Pass` line matching the assertion and variables
+ values after the match.
+
+ Raises MatchFailedException if no such `c1Pass` line can be found.
+ """
+ for i in range(scope.start, scope.end):
+ if i in excludeLines: continue
+ newVariables = MatchLines(assertion, c1Pass.body[i], variables)
+ if newVariables is not None:
+ return MatchInfo(MatchScope(i, i), newVariables)
+ raise MatchFailedException(assertion, scope.start)
+
+def matchDagGroup(assertions, c1Pass, scope, variables):
+ """ Attempts to find matching `c1Pass` lines for a group of DAG assertions.
+
+ Assertions are matched in the list order and variable values propagated. Only
+ lines in `scope` are scanned and each line can only match one assertion.
+
+ Returns the range of `c1Pass` lines covered by this group (min/max of matching
+ line numbers) and the variable values after the match of the last assertion.
+
+ Raises MatchFailedException when an assertion cannot be satisfied.
+ """
+ matchedLines = []
+ for assertion in assertions:
+ assert assertion.variant == TestAssertion.Variant.DAG
+ match = findMatchingLine(assertion, c1Pass, scope, variables, matchedLines)
+ variables = match.variables
+ assert match.scope.start == match.scope.end
+ assert match.scope.start not in matchedLines
+ matchedLines.append(match.scope.start)
+ return MatchInfo(MatchScope(min(matchedLines), max(matchedLines)), variables)
+
+def testNotGroup(assertions, c1Pass, scope, variables):
+ """ Verifies that none of the given NOT assertions matches a line inside
+ the given `scope` of `c1Pass` lines.
+
+ Raises MatchFailedException if an assertion matches a line in the scope.
+ """
+ for i in range(scope.start, scope.end):
+ line = c1Pass.body[i]
+ for assertion in assertions:
+ assert assertion.variant == TestAssertion.Variant.Not
+ if MatchLines(assertion, line, variables) is not None:
+ raise MatchFailedException(assertion, i)
+
+def MatchTestCase(testCase, c1Pass):
+ """ Runs a test case against a C1visualizer graph dump.
+
+ Raises MatchFailedException when an assertion cannot be satisfied.
+ """
+ assert testCase.name == c1Pass.name
+
+ matchFrom = 0
+ variables = ImmutableDict()
+ c1Length = len(c1Pass.body)
+
+ # NOT assertions are verified retrospectively, once the scope is known.
+ pendingNotAssertions = None
+
+ # Prepare assertions by grouping those that are verified in the same scope.
+ # We also add None as an EOF assertion that will set scope for NOTs.
+ assertionGroups = splitIntoGroups(testCase.assertions)
+ assertionGroups.append(None)
+
+ for assertionGroup in assertionGroups:
+ if assertionGroup is None:
+ # EOF marker always matches the last+1 line of c1Pass.
+ match = MatchInfo(MatchScope(c1Length, c1Length), None)
+ elif assertionGroup[0].variant == TestAssertion.Variant.Not:
+ # NOT assertions will be tested together with the next group.
+ assert not pendingNotAssertions
+ pendingNotAssertions = assertionGroup
+ continue
+ elif assertionGroup[0].variant == TestAssertion.Variant.InOrder:
+ # Single in-order assertion. Find the first line that matches.
+ assert len(assertionGroup) == 1
+ scope = MatchScope(matchFrom, c1Length)
+ match = findMatchingLine(assertionGroup[0], c1Pass, scope, variables)
+ elif assertionGroup[0].variant == TestAssertion.Variant.NextLine:
+ # Single next-line assertion. Test if the current line matches.
+ assert len(assertionGroup) == 1
+ scope = MatchScope(matchFrom, matchFrom + 1)
+ match = findMatchingLine(assertionGroup[0], c1Pass, scope, variables)
+ else:
+ # A group of DAG assertions. Match them all starting from the same point.
+ assert assertionGroup[0].variant == TestAssertion.Variant.DAG
+ scope = MatchScope(matchFrom, c1Length)
+ match = matchDagGroup(assertionGroup, c1Pass, scope, variables)
+
+ if pendingNotAssertions:
+ # Previous group were NOT assertions. Make sure they don't match any lines
+ # in the [matchFrom, match.start) scope.
+ scope = MatchScope(matchFrom, match.scope.start)
+ testNotGroup(pendingNotAssertions, c1Pass, scope, variables)
+ pendingNotAssertions = None
+
+ # Update state.
+ assert matchFrom <= match.scope.end
+ matchFrom = match.scope.end + 1
+ variables = match.variables
+
+def MatchFiles(checkerFile, c1File):
+ for testCase in checkerFile.testCases:
+ # TODO: Currently does not handle multiple occurrences of the same group
+ # name, e.g. when a pass is run multiple times. It will always try to
+ # match a check group against the first output group of the same name.
+ c1Pass = c1File.findPass(testCase.name)
+ if c1Pass is None:
+ Logger.fail("Test case \"{}\" not found in the CFG file".format(testCase.name),
+ testCase.fileName, testCase.startLineNo)
+
+ Logger.startTest(testCase.name)
+ try:
+ MatchTestCase(testCase, c1Pass)
+ Logger.testPassed()
+ except MatchFailedException as e:
+ lineNo = c1Pass.startLineNo + e.lineNo
+ if e.assertion.variant == TestAssertion.Variant.Not:
+ Logger.testFailed("NOT assertion matched line {}".format(lineNo),
+ e.assertion.fileName, e.assertion.lineNo)
+ else:
+ Logger.testFailed("Assertion could not be matched starting from line {}".format(lineNo),
+ e.assertion.fileName, e.assertion.lineNo)
diff --git a/tools/checker/match/line.py b/tools/checker/match/line.py
new file mode 100644
index 0000000..711d814
--- /dev/null
+++ b/tools/checker/match/line.py
@@ -0,0 +1,96 @@
+# Copyright (C) 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from common.logger import Logger
+from file_format.checker.struct import RegexExpression
+
+import re
+
+def headAndTail(list):
+ return list[0], list[1:]
+
+def splitAtSeparators(expressions):
+ """ Splits a list of RegexExpressions at separators. """
+ splitExpressions = []
+ wordStart = 0
+ for index, expression in enumerate(expressions):
+ if expression.variant == RegexExpression.Variant.Separator:
+ splitExpressions.append(expressions[wordStart:index])
+ wordStart = index + 1
+ splitExpressions.append(expressions[wordStart:])
+ return splitExpressions
+
+def matchWords(checkerWord, stringWord, variables, pos):
+ """ Attempts to match a list of RegexExpressions against a string.
+ Returns updated variable dictionary if successful and None otherwise.
+ """
+ for expression in checkerWord:
+ # If `expression` is a variable reference, replace it with the value.
+ if expression.variant == RegexExpression.Variant.VarRef:
+ if expression.name in variables:
+ pattern = re.escape(variables[expression.name])
+ else:
+ Logger.testFailed("Multiple definitions of variable \"{}\"".format(expression.name),
+ pos.fileName, pos.lineNo)
+ else:
+ pattern = expression.pattern
+
+ # Match the expression's regex pattern against the remainder of the word.
+ # Note: re.match will succeed only if matched from the beginning.
+ match = re.match(pattern, stringWord)
+ if not match:
+ return None
+
+ # If `expression` was a variable definition, set the variable's value.
+ if expression.variant == RegexExpression.Variant.VarDef:
+ if expression.name not in variables:
+ variables = variables.copyWith(expression.name, stringWord[:match.end()])
+ else:
+ Logger.testFailed("Multiple definitions of variable \"{}\"".format(expression.name),
+ pos.fileName, pos.lineNo)
+
+ # Move cursor by deleting the matched characters.
+ stringWord = stringWord[match.end():]
+
+ # Make sure the entire word matched, i.e. `stringWord` is empty.
+ if stringWord:
+ return None
+
+ return variables
+
+def MatchLines(checkerLine, stringLine, variables):
+ """ Attempts to match a CHECK line against a string. Returns variable state
+ after the match if successful and None otherwise.
+ """
+ checkerWords = splitAtSeparators(checkerLine.expressions)
+ stringWords = stringLine.split()
+
+ while checkerWords:
+ # Get the next run of RegexExpressions which must match one string word.
+ checkerWord, checkerWords = headAndTail(checkerWords)
+
+ # Keep reading words until a match is found.
+ wordMatched = False
+ while stringWords:
+ stringWord, stringWords = headAndTail(stringWords)
+ newVariables = matchWords(checkerWord, stringWord, variables, checkerLine)
+ if newVariables is not None:
+ wordMatched = True
+ variables = newVariables
+ break
+ if not wordMatched:
+ return None
+
+ # All RegexExpressions matched. Return new variable state.
+ return variables
diff --git a/tools/checker/match/test.py b/tools/checker/match/test.py
new file mode 100644
index 0000000..348c1d2
--- /dev/null
+++ b/tools/checker/match/test.py
@@ -0,0 +1,388 @@
+# Copyright (C) 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from common.immutables import ImmutableDict
+from common.testing import ToUnicode
+from file_format.c1visualizer.parser import ParseC1visualizerStream
+from file_format.c1visualizer.struct import C1visualizerFile, C1visualizerPass
+from file_format.checker.parser import ParseCheckerStream, ParseCheckerAssertion
+from file_format.checker.struct import CheckerFile, TestCase, TestAssertion, RegexExpression
+from match.file import MatchTestCase, MatchFailedException
+from match.line import MatchLines
+
+import io
+import unittest
+
+CheckerException = SystemExit
+
+class MatchLines_Test(unittest.TestCase):
+
+ def createTestAssertion(self, checkerString):
+ checkerFile = CheckerFile("<checker-file>")
+ testCase = TestCase(checkerFile, "TestMethod TestPass", 0)
+ return ParseCheckerAssertion(testCase, checkerString, TestAssertion.Variant.InOrder, 0)
+
+ def tryMatch(self, checkerString, c1String, varState={}):
+ return MatchLines(self.createTestAssertion(checkerString),
+ ToUnicode(c1String),
+ ImmutableDict(varState))
+
+ def assertMatches(self, checkerString, c1String, varState={}):
+ self.assertIsNotNone(self.tryMatch(checkerString, c1String, varState))
+
+ def assertDoesNotMatch(self, checkerString, c1String, varState={}):
+ self.assertIsNone(self.tryMatch(checkerString, c1String, varState))
+
+ def test_TextAndWhitespace(self):
+ self.assertMatches("foo", "foo")
+ self.assertMatches("foo", " foo ")
+ self.assertMatches("foo", "foo bar")
+ self.assertDoesNotMatch("foo", "XfooX")
+ self.assertDoesNotMatch("foo", "zoo")
+
+ self.assertMatches("foo bar", "foo bar")
+ self.assertMatches("foo bar", "abc foo bar def")
+ self.assertMatches("foo bar", "foo foo bar bar")
+
+ self.assertMatches("foo bar", "foo X bar")
+ self.assertDoesNotMatch("foo bar", "foo Xbar")
+
+ def test_Pattern(self):
+ self.assertMatches("foo{{A|B}}bar", "fooAbar")
+ self.assertMatches("foo{{A|B}}bar", "fooBbar")
+ self.assertDoesNotMatch("foo{{A|B}}bar", "fooCbar")
+
+ def test_VariableReference(self):
+ self.assertMatches("foo<<X>>bar", "foobar", {"X": ""})
+ self.assertMatches("foo<<X>>bar", "fooAbar", {"X": "A"})
+ self.assertMatches("foo<<X>>bar", "fooBbar", {"X": "B"})
+ self.assertDoesNotMatch("foo<<X>>bar", "foobar", {"X": "A"})
+ self.assertDoesNotMatch("foo<<X>>bar", "foo bar", {"X": "A"})
+ with self.assertRaises(CheckerException):
+ self.tryMatch("foo<<X>>bar", "foobar", {})
+
+ def test_VariableDefinition(self):
+ self.assertMatches("foo<<X:A|B>>bar", "fooAbar")
+ self.assertMatches("foo<<X:A|B>>bar", "fooBbar")
+ self.assertDoesNotMatch("foo<<X:A|B>>bar", "fooCbar")
+
+ env = self.tryMatch("foo<<X:A.*B>>bar", "fooABbar", {})
+ self.assertEqual(env, {"X": "AB"})
+ env = self.tryMatch("foo<<X:A.*B>>bar", "fooAxxBbar", {})
+ self.assertEqual(env, {"X": "AxxB"})
+
+ self.assertMatches("foo<<X:A|B>>bar<<X>>baz", "fooAbarAbaz")
+ self.assertMatches("foo<<X:A|B>>bar<<X>>baz", "fooBbarBbaz")
+ self.assertDoesNotMatch("foo<<X:A|B>>bar<<X>>baz", "fooAbarBbaz")
+
+ def test_NoVariableRedefinition(self):
+ with self.assertRaises(CheckerException):
+ self.tryMatch("<<X:...>><<X>><<X:...>><<X>>", "foofoobarbar")
+
+ def test_EnvNotChangedOnPartialMatch(self):
+ env = {"Y": "foo"}
+ self.assertDoesNotMatch("<<X:A>>bar", "Abaz", env)
+ self.assertFalse("X" in env.keys())
+
+ def test_VariableContentEscaped(self):
+ self.assertMatches("<<X:..>>foo<<X>>", ".*foo.*")
+ self.assertDoesNotMatch("<<X:..>>foo<<X>>", ".*fooAAAA")
+
+
+class MatchFiles_Test(unittest.TestCase):
+
+ def assertMatches(self, checkerString, c1String):
+ checkerString = \
+ """
+ // CHECK-START: MyMethod MyPass
+ """ + checkerString
+ c1String = \
+ """
+ begin_compilation
+ name "MyMethod"
+ method "MyMethod"
+ date 1234
+ end_compilation
+ begin_cfg
+ name "MyPass"
+ """ + c1String + \
+ """
+ end_cfg
+ """
+ checkerFile = ParseCheckerStream("<test-file>", "CHECK", io.StringIO(ToUnicode(checkerString)))
+ c1File = ParseC1visualizerStream("<c1-file>", io.StringIO(ToUnicode(c1String)))
+ assert len(checkerFile.testCases) == 1
+ assert len(c1File.passes) == 1
+ MatchTestCase(checkerFile.testCases[0], c1File.passes[0])
+
+ def assertDoesNotMatch(self, checkerString, c1String):
+ with self.assertRaises(MatchFailedException):
+ self.assertMatches(checkerString, c1String)
+
+ def test_Text(self):
+ self.assertMatches("// CHECK: foo bar", "foo bar")
+ self.assertDoesNotMatch("// CHECK: foo bar", "abc def")
+
+ def test_Pattern(self):
+ self.assertMatches("// CHECK: abc {{de.}}", "abc de#")
+ self.assertDoesNotMatch("// CHECK: abc {{de.}}", "abc d#f")
+
+ def test_Variables(self):
+ self.assertMatches(
+ """
+ // CHECK: foo<<X:.>>bar
+ // CHECK: abc<<X>>def
+ """,
+ """
+ foo0bar
+ abc0def
+ """)
+ self.assertMatches(
+ """
+ // CHECK: foo<<X:([0-9]+)>>bar
+ // CHECK: abc<<X>>def
+ // CHECK: ### <<X>> ###
+ """,
+ """
+ foo1234bar
+ abc1234def
+ ### 1234 ###
+ """)
+ self.assertDoesNotMatch(
+ """
+ // CHECK: foo<<X:([0-9]+)>>bar
+ // CHECK: abc<<X>>def
+ """,
+ """
+ foo1234bar
+ abc1235def
+ """)
+
+ def test_WholeWordMustMatch(self):
+ self.assertMatches("// CHECK: b{{.}}r", "abc bar def")
+ self.assertDoesNotMatch("// CHECK: b{{.}}r", "abc Xbar def")
+ self.assertDoesNotMatch("// CHECK: b{{.}}r", "abc barX def")
+ self.assertDoesNotMatch("// CHECK: b{{.}}r", "abc b r def")
+
+ def test_InOrderAssertions(self):
+ self.assertMatches(
+ """
+ // CHECK: foo
+ // CHECK: bar
+ """,
+ """
+ foo
+ bar
+ """)
+ self.assertDoesNotMatch(
+ """
+ // CHECK: foo
+ // CHECK: bar
+ """,
+ """
+ bar
+ foo
+ """)
+
+ def test_NextLineAssertions(self):
+ self.assertMatches(
+ """
+ // CHECK: foo
+ // CHECK-NEXT: bar
+ // CHECK-NEXT: abc
+ // CHECK: def
+ """,
+ """
+ foo
+ bar
+ abc
+ def
+ """)
+ self.assertMatches(
+ """
+ // CHECK: foo
+ // CHECK-NEXT: bar
+ // CHECK: def
+ """,
+ """
+ foo
+ bar
+ abc
+ def
+ """)
+ self.assertDoesNotMatch(
+ """
+ // CHECK: foo
+ // CHECK-NEXT: bar
+ """,
+ """
+ foo
+ abc
+ bar
+ """)
+
+ self.assertDoesNotMatch(
+ """
+ // CHECK: foo
+ // CHECK-NEXT: bar
+ """,
+ """
+ bar
+ foo
+ abc
+ """)
+
+ def test_DagAssertions(self):
+ self.assertMatches(
+ """
+ // CHECK-DAG: foo
+ // CHECK-DAG: bar
+ """,
+ """
+ foo
+ bar
+ """)
+ self.assertMatches(
+ """
+ // CHECK-DAG: foo
+ // CHECK-DAG: bar
+ """,
+ """
+ bar
+ foo
+ """)
+
+ def test_DagAssertionsScope(self):
+ self.assertMatches(
+ """
+ // CHECK: foo
+ // CHECK-DAG: abc
+ // CHECK-DAG: def
+ // CHECK: bar
+ """,
+ """
+ foo
+ def
+ abc
+ bar
+ """)
+ self.assertDoesNotMatch(
+ """
+ // CHECK: foo
+ // CHECK-DAG: abc
+ // CHECK-DAG: def
+ // CHECK: bar
+ """,
+ """
+ foo
+ abc
+ bar
+ def
+ """)
+ self.assertDoesNotMatch(
+ """
+ // CHECK: foo
+ // CHECK-DAG: abc
+ // CHECK-DAG: def
+ // CHECK: bar
+ """,
+ """
+ foo
+ def
+ bar
+ abc
+ """)
+
+ def test_NotAssertions(self):
+ self.assertMatches(
+ """
+ // CHECK-NOT: foo
+ """,
+ """
+ abc
+ def
+ """)
+ self.assertDoesNotMatch(
+ """
+ // CHECK-NOT: foo
+ """,
+ """
+ abc foo
+ def
+ """)
+ self.assertDoesNotMatch(
+ """
+ // CHECK-NOT: foo
+ // CHECK-NOT: bar
+ """,
+ """
+ abc
+ def bar
+ """)
+
+ def test_NotAssertionsScope(self):
+ self.assertMatches(
+ """
+ // CHECK: abc
+ // CHECK-NOT: foo
+ // CHECK: def
+ """,
+ """
+ abc
+ def
+ """)
+ self.assertMatches(
+ """
+ // CHECK: abc
+ // CHECK-NOT: foo
+ // CHECK: def
+ """,
+ """
+ abc
+ def
+ foo
+ """)
+ self.assertDoesNotMatch(
+ """
+ // CHECK: abc
+ // CHECK-NOT: foo
+ // CHECK: def
+ """,
+ """
+ abc
+ foo
+ def
+ """)
+
+ def test_LineOnlyMatchesOnce(self):
+ self.assertMatches(
+ """
+ // CHECK-DAG: foo
+ // CHECK-DAG: foo
+ """,
+ """
+ foo
+ abc
+ foo
+ """)
+ self.assertDoesNotMatch(
+ """
+ // CHECK-DAG: foo
+ // CHECK-DAG: foo
+ """,
+ """
+ foo
+ abc
+ bar
+ """)
diff --git a/tools/checker/run_unit_tests.py b/tools/checker/run_unit_tests.py
new file mode 100755
index 0000000..01708db
--- /dev/null
+++ b/tools/checker/run_unit_tests.py
@@ -0,0 +1,29 @@
+#!/usr/bin/env python2
+#
+# Copyright (C) 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from common.logger import Logger
+from file_format.c1visualizer.test import C1visualizerParser_Test
+from file_format.checker.test import CheckerParser_PrefixTest, \
+ CheckerParser_RegexExpressionTest, \
+ CheckerParser_FileLayoutTest
+from match.test import MatchLines_Test, \
+ MatchFiles_Test
+
+import unittest
+
+if __name__ == '__main__':
+ Logger.Verbosity = Logger.Level.NoOutput
+ unittest.main(verbosity=2)
diff --git a/tools/checker_test.py b/tools/checker_test.py
deleted file mode 100755
index 667ca90..0000000
--- a/tools/checker_test.py
+++ /dev/null
@@ -1,474 +0,0 @@
-#!/usr/bin/env python2
-#
-# Copyright (C) 2014 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# This is a test file which exercises all feautres supported by the domain-
-# specific markup language implemented by Checker.
-
-import checker
-import io
-import unittest
-
-# The parent type of exception expected to be thrown by Checker during tests.
-# It must be specific enough to not cover exceptions thrown due to actual flaws
-# in Checker.
-CheckerException = SystemExit
-
-
-class TestCheckFile_PrefixExtraction(unittest.TestCase):
- def __tryParse(self, string):
- checkFile = checker.CheckFile(None, [])
- return checkFile._extractLine("CHECK", string)
-
- def test_InvalidFormat(self):
- self.assertIsNone(self.__tryParse("CHECK"))
- self.assertIsNone(self.__tryParse(":CHECK"))
- self.assertIsNone(self.__tryParse("CHECK:"))
- self.assertIsNone(self.__tryParse("//CHECK"))
- self.assertIsNone(self.__tryParse("#CHECK"))
-
- self.assertIsNotNone(self.__tryParse("//CHECK:foo"))
- self.assertIsNotNone(self.__tryParse("#CHECK:bar"))
-
- def test_InvalidLabel(self):
- self.assertIsNone(self.__tryParse("//ACHECK:foo"))
- self.assertIsNone(self.__tryParse("#ACHECK:foo"))
-
- def test_NotFirstOnTheLine(self):
- self.assertIsNone(self.__tryParse("A// CHECK: foo"))
- self.assertIsNone(self.__tryParse("A # CHECK: foo"))
- self.assertIsNone(self.__tryParse("// // CHECK: foo"))
- self.assertIsNone(self.__tryParse("# # CHECK: foo"))
-
- def test_WhitespaceAgnostic(self):
- self.assertIsNotNone(self.__tryParse(" //CHECK: foo"))
- self.assertIsNotNone(self.__tryParse("// CHECK: foo"))
- self.assertIsNotNone(self.__tryParse(" //CHECK: foo"))
- self.assertIsNotNone(self.__tryParse("// CHECK: foo"))
-
-
-class TestCheckLine_Parse(unittest.TestCase):
- def __getPartPattern(self, linePart):
- if linePart.variant == checker.CheckElement.Variant.Separator:
- return "\s+"
- else:
- return linePart.pattern
-
- def __getRegex(self, checkLine):
- return "".join(map(lambda x: "(" + self.__getPartPattern(x) + ")", checkLine.lineParts))
-
- def __tryParse(self, string):
- return checker.CheckLine(string)
-
- def __parsesTo(self, string, expected):
- self.assertEqual(expected, self.__getRegex(self.__tryParse(string)))
-
- def __tryParseNot(self, string):
- return checker.CheckLine(string, checker.CheckLine.Variant.Not)
-
- def __parsesPattern(self, string, pattern):
- line = self.__tryParse(string)
- self.assertEqual(1, len(line.lineParts))
- self.assertEqual(checker.CheckElement.Variant.Pattern, line.lineParts[0].variant)
- self.assertEqual(pattern, line.lineParts[0].pattern)
-
- def __parsesVarRef(self, string, name):
- line = self.__tryParse(string)
- self.assertEqual(1, len(line.lineParts))
- self.assertEqual(checker.CheckElement.Variant.VarRef, line.lineParts[0].variant)
- self.assertEqual(name, line.lineParts[0].name)
-
- def __parsesVarDef(self, string, name, body):
- line = self.__tryParse(string)
- self.assertEqual(1, len(line.lineParts))
- self.assertEqual(checker.CheckElement.Variant.VarDef, line.lineParts[0].variant)
- self.assertEqual(name, line.lineParts[0].name)
- self.assertEqual(body, line.lineParts[0].pattern)
-
- def __doesNotParse(self, string, partType):
- line = self.__tryParse(string)
- self.assertEqual(1, len(line.lineParts))
- self.assertNotEqual(partType, line.lineParts[0].variant)
-
- # Test that individual parts of the line are recognized
-
- def test_TextOnly(self):
- self.__parsesTo("foo", "(foo)")
- self.__parsesTo(" foo ", "(foo)")
- self.__parsesTo("f$o^o", "(f\$o\^o)")
-
- def test_TextWithWhitespace(self):
- self.__parsesTo("foo bar", "(foo)(\s+)(bar)")
- self.__parsesTo("foo bar", "(foo)(\s+)(bar)")
-
- def test_RegexOnly(self):
- self.__parsesPattern("{{a?b.c}}", "a?b.c")
-
- def test_VarRefOnly(self):
- self.__parsesVarRef("[[ABC]]", "ABC")
-
- def test_VarDefOnly(self):
- self.__parsesVarDef("[[ABC:a?b.c]]", "ABC", "a?b.c")
-
- def test_TextWithRegex(self):
- self.__parsesTo("foo{{abc}}bar", "(foo)(abc)(bar)")
-
- def test_TextWithVar(self):
- self.__parsesTo("foo[[ABC:abc]]bar", "(foo)(abc)(bar)")
-
- def test_PlainWithRegexAndWhitespaces(self):
- self.__parsesTo("foo {{abc}}bar", "(foo)(\s+)(abc)(bar)")
- self.__parsesTo("foo{{abc}} bar", "(foo)(abc)(\s+)(bar)")
- self.__parsesTo("foo {{abc}} bar", "(foo)(\s+)(abc)(\s+)(bar)")
-
- def test_PlainWithVarAndWhitespaces(self):
- self.__parsesTo("foo [[ABC:abc]]bar", "(foo)(\s+)(abc)(bar)")
- self.__parsesTo("foo[[ABC:abc]] bar", "(foo)(abc)(\s+)(bar)")
- self.__parsesTo("foo [[ABC:abc]] bar", "(foo)(\s+)(abc)(\s+)(bar)")
-
- def test_AllKinds(self):
- self.__parsesTo("foo [[ABC:abc]]{{def}}bar", "(foo)(\s+)(abc)(def)(bar)")
- self.__parsesTo("foo[[ABC:abc]] {{def}}bar", "(foo)(abc)(\s+)(def)(bar)")
- self.__parsesTo("foo [[ABC:abc]] {{def}} bar", "(foo)(\s+)(abc)(\s+)(def)(\s+)(bar)")
-
- # Test that variables and patterns are parsed correctly
-
- def test_ValidPattern(self):
- self.__parsesPattern("{{abc}}", "abc")
- self.__parsesPattern("{{a[b]c}}", "a[b]c")
- self.__parsesPattern("{{(a{bc})}}", "(a{bc})")
-
- def test_ValidRef(self):
- self.__parsesVarRef("[[ABC]]", "ABC")
- self.__parsesVarRef("[[A1BC2]]", "A1BC2")
-
- def test_ValidDef(self):
- self.__parsesVarDef("[[ABC:abc]]", "ABC", "abc")
- self.__parsesVarDef("[[ABC:ab:c]]", "ABC", "ab:c")
- self.__parsesVarDef("[[ABC:a[b]c]]", "ABC", "a[b]c")
- self.__parsesVarDef("[[ABC:(a[bc])]]", "ABC", "(a[bc])")
-
- def test_Empty(self):
- self.__doesNotParse("{{}}", checker.CheckElement.Variant.Pattern)
- self.__doesNotParse("[[]]", checker.CheckElement.Variant.VarRef)
- self.__doesNotParse("[[:]]", checker.CheckElement.Variant.VarDef)
-
- def test_InvalidVarName(self):
- self.__doesNotParse("[[0ABC]]", checker.CheckElement.Variant.VarRef)
- self.__doesNotParse("[[AB=C]]", checker.CheckElement.Variant.VarRef)
- self.__doesNotParse("[[ABC=]]", checker.CheckElement.Variant.VarRef)
- self.__doesNotParse("[[0ABC:abc]]", checker.CheckElement.Variant.VarDef)
- self.__doesNotParse("[[AB=C:abc]]", checker.CheckElement.Variant.VarDef)
- self.__doesNotParse("[[ABC=:abc]]", checker.CheckElement.Variant.VarDef)
-
- def test_BodyMatchNotGreedy(self):
- self.__parsesTo("{{abc}}{{def}}", "(abc)(def)")
- self.__parsesTo("[[ABC:abc]][[DEF:def]]", "(abc)(def)")
-
- def test_NoVarDefsInNotChecks(self):
- with self.assertRaises(CheckerException):
- self.__tryParseNot("[[ABC:abc]]")
-
-class TestCheckLine_Match(unittest.TestCase):
- def __matchSingle(self, checkString, outputString, varState={}):
- checkLine = checker.CheckLine(checkString)
- newVarState = checkLine.match(outputString, varState)
- self.assertIsNotNone(newVarState)
- return newVarState
-
- def __notMatchSingle(self, checkString, outputString, varState={}):
- checkLine = checker.CheckLine(checkString)
- self.assertIsNone(checkLine.match(outputString, varState))
-
- def test_TextAndWhitespace(self):
- self.__matchSingle("foo", "foo")
- self.__matchSingle("foo", " foo ")
- self.__matchSingle("foo", "foo bar")
- self.__notMatchSingle("foo", "XfooX")
- self.__notMatchSingle("foo", "zoo")
-
- self.__matchSingle("foo bar", "foo bar")
- self.__matchSingle("foo bar", "abc foo bar def")
- self.__matchSingle("foo bar", "foo foo bar bar")
-
- self.__matchSingle("foo bar", "foo X bar")
- self.__notMatchSingle("foo bar", "foo Xbar")
-
- def test_Pattern(self):
- self.__matchSingle("foo{{A|B}}bar", "fooAbar")
- self.__matchSingle("foo{{A|B}}bar", "fooBbar")
- self.__notMatchSingle("foo{{A|B}}bar", "fooCbar")
-
- def test_VariableReference(self):
- self.__matchSingle("foo[[X]]bar", "foobar", {"X": ""})
- self.__matchSingle("foo[[X]]bar", "fooAbar", {"X": "A"})
- self.__matchSingle("foo[[X]]bar", "fooBbar", {"X": "B"})
- self.__notMatchSingle("foo[[X]]bar", "foobar", {"X": "A"})
- self.__notMatchSingle("foo[[X]]bar", "foo bar", {"X": "A"})
- with self.assertRaises(CheckerException):
- self.__matchSingle("foo[[X]]bar", "foobar", {})
-
- def test_VariableDefinition(self):
- self.__matchSingle("foo[[X:A|B]]bar", "fooAbar")
- self.__matchSingle("foo[[X:A|B]]bar", "fooBbar")
- self.__notMatchSingle("foo[[X:A|B]]bar", "fooCbar")
-
- env = self.__matchSingle("foo[[X:A.*B]]bar", "fooABbar", {})
- self.assertEqual(env, {"X": "AB"})
- env = self.__matchSingle("foo[[X:A.*B]]bar", "fooAxxBbar", {})
- self.assertEqual(env, {"X": "AxxB"})
-
- self.__matchSingle("foo[[X:A|B]]bar[[X]]baz", "fooAbarAbaz")
- self.__matchSingle("foo[[X:A|B]]bar[[X]]baz", "fooBbarBbaz")
- self.__notMatchSingle("foo[[X:A|B]]bar[[X]]baz", "fooAbarBbaz")
-
- def test_NoVariableRedefinition(self):
- with self.assertRaises(CheckerException):
- self.__matchSingle("[[X:...]][[X]][[X:...]][[X]]", "foofoobarbar")
-
- def test_EnvNotChangedOnPartialMatch(self):
- env = {"Y": "foo"}
- self.__notMatchSingle("[[X:A]]bar", "Abaz", env)
- self.assertFalse("X" in env.keys())
-
- def test_VariableContentEscaped(self):
- self.__matchSingle("[[X:..]]foo[[X]]", ".*foo.*")
- self.__notMatchSingle("[[X:..]]foo[[X]]", ".*fooAAAA")
-
-
-CheckVariant = checker.CheckLine.Variant
-
-def prepareSingleCheck(line):
- if isinstance(line, str):
- return checker.CheckLine(line)
- else:
- return checker.CheckLine(line[0], line[1])
-
-def prepareChecks(lines):
- if isinstance(lines, str):
- lines = lines.splitlines()
- return list(map(lambda line: prepareSingleCheck(line), lines))
-
-
-class TestCheckGroup_Match(unittest.TestCase):
- def __matchMulti(self, checkLines, outputString):
- checkGroup = checker.CheckGroup("MyGroup", prepareChecks(checkLines))
- outputGroup = checker.OutputGroup("MyGroup", outputString.splitlines())
- return checkGroup.match(outputGroup)
-
- def __notMatchMulti(self, checkString, outputString):
- with self.assertRaises(CheckerException):
- self.__matchMulti(checkString, outputString)
-
- def test_TextAndPattern(self):
- self.__matchMulti("""foo bar
- abc {{def}}""",
- """foo bar
- abc def""");
- self.__matchMulti("""foo bar
- abc {{de.}}""",
- """=======
- foo bar
- =======
- abc de#
- =======""");
- self.__notMatchMulti("""//XYZ: foo bar
- //XYZ: abc {{def}}""",
- """=======
- foo bar
- =======
- abc de#
- =======""");
-
- def test_Variables(self):
- self.__matchMulti("""foo[[X:.]]bar
- abc[[X]]def""",
- """foo bar
- abc def""");
- self.__matchMulti("""foo[[X:([0-9]+)]]bar
- abc[[X]]def
- ### [[X]] ###""",
- """foo1234bar
- abc1234def
- ### 1234 ###""");
-
- def test_Ordering(self):
- self.__matchMulti([("foo", CheckVariant.InOrder),
- ("bar", CheckVariant.InOrder)],
- """foo
- bar""")
- self.__notMatchMulti([("foo", CheckVariant.InOrder),
- ("bar", CheckVariant.InOrder)],
- """bar
- foo""")
- self.__matchMulti([("abc", CheckVariant.DAG),
- ("def", CheckVariant.DAG)],
- """abc
- def""")
- self.__matchMulti([("abc", CheckVariant.DAG),
- ("def", CheckVariant.DAG)],
- """def
- abc""")
- self.__matchMulti([("foo", CheckVariant.InOrder),
- ("abc", CheckVariant.DAG),
- ("def", CheckVariant.DAG),
- ("bar", CheckVariant.InOrder)],
- """foo
- def
- abc
- bar""")
- self.__notMatchMulti([("foo", CheckVariant.InOrder),
- ("abc", CheckVariant.DAG),
- ("def", CheckVariant.DAG),
- ("bar", CheckVariant.InOrder)],
- """foo
- abc
- bar""")
- self.__notMatchMulti([("foo", CheckVariant.InOrder),
- ("abc", CheckVariant.DAG),
- ("def", CheckVariant.DAG),
- ("bar", CheckVariant.InOrder)],
- """foo
- def
- bar""")
-
- def test_NotAssertions(self):
- self.__matchMulti([("foo", CheckVariant.Not)],
- """abc
- def""")
- self.__notMatchMulti([("foo", CheckVariant.Not)],
- """abc foo
- def""")
- self.__notMatchMulti([("foo", CheckVariant.Not),
- ("bar", CheckVariant.Not)],
- """abc
- def bar""")
-
- def test_LineOnlyMatchesOnce(self):
- self.__matchMulti([("foo", CheckVariant.DAG),
- ("foo", CheckVariant.DAG)],
- """foo
- foo""")
- self.__notMatchMulti([("foo", CheckVariant.DAG),
- ("foo", CheckVariant.DAG)],
- """foo
- bar""")
-
-class TestOutputFile_Parse(unittest.TestCase):
- def __parsesTo(self, string, expected):
- if isinstance(string, str):
- string = unicode(string)
- outputStream = io.StringIO(string)
- return self.assertEqual(checker.OutputFile(outputStream).groups, expected)
-
- def test_NoInput(self):
- self.__parsesTo(None, [])
- self.__parsesTo("", [])
-
- def test_SingleGroup(self):
- self.__parsesTo("""begin_compilation
- method "MyMethod"
- end_compilation
- begin_cfg
- name "pass1"
- foo
- bar
- end_cfg""",
- [ checker.OutputGroup("MyMethod pass1", [ "foo", "bar" ]) ])
-
- def test_MultipleGroups(self):
- self.__parsesTo("""begin_compilation
- name "xyz1"
- method "MyMethod1"
- date 1234
- end_compilation
- begin_cfg
- name "pass1"
- foo
- bar
- end_cfg
- begin_cfg
- name "pass2"
- abc
- def
- end_cfg""",
- [ checker.OutputGroup("MyMethod1 pass1", [ "foo", "bar" ]),
- checker.OutputGroup("MyMethod1 pass2", [ "abc", "def" ]) ])
-
- self.__parsesTo("""begin_compilation
- name "xyz1"
- method "MyMethod1"
- date 1234
- end_compilation
- begin_cfg
- name "pass1"
- foo
- bar
- end_cfg
- begin_compilation
- name "xyz2"
- method "MyMethod2"
- date 5678
- end_compilation
- begin_cfg
- name "pass2"
- abc
- def
- end_cfg""",
- [ checker.OutputGroup("MyMethod1 pass1", [ "foo", "bar" ]),
- checker.OutputGroup("MyMethod2 pass2", [ "abc", "def" ]) ])
-
-class TestCheckFile_Parse(unittest.TestCase):
- def __parsesTo(self, string, expected):
- if isinstance(string, str):
- string = unicode(string)
- checkStream = io.StringIO(string)
- return self.assertEqual(checker.CheckFile("CHECK", checkStream).groups, expected)
-
- def test_NoInput(self):
- self.__parsesTo(None, [])
- self.__parsesTo("", [])
-
- def test_SingleGroup(self):
- self.__parsesTo("""// CHECK-START: Example Group
- // CHECK: foo
- // CHECK: bar""",
- [ checker.CheckGroup("Example Group", prepareChecks([ "foo", "bar" ])) ])
-
- def test_MultipleGroups(self):
- self.__parsesTo("""// CHECK-START: Example Group1
- // CHECK: foo
- // CHECK: bar
- // CHECK-START: Example Group2
- // CHECK: abc
- // CHECK: def""",
- [ checker.CheckGroup("Example Group1", prepareChecks([ "foo", "bar" ])),
- checker.CheckGroup("Example Group2", prepareChecks([ "abc", "def" ])) ])
-
- def test_CheckVariants(self):
- self.__parsesTo("""// CHECK-START: Example Group
- // CHECK: foo
- // CHECK-NOT: bar
- // CHECK-DAG: abc
- // CHECK-DAG: def""",
- [ checker.CheckGroup("Example Group",
- prepareChecks([ ("foo", CheckVariant.InOrder),
- ("bar", CheckVariant.Not),
- ("abc", CheckVariant.DAG),
- ("def", CheckVariant.DAG) ])) ])
-
-if __name__ == '__main__':
- checker.Logger.Verbosity = checker.Logger.Level.NoOutput
- unittest.main()
diff --git a/tools/libcore_failures.txt b/tools/libcore_failures.txt
index 2040b57..de45c49 100644
--- a/tools/libcore_failures.txt
+++ b/tools/libcore_failures.txt
@@ -21,28 +21,19 @@
name: "libcore.java.lang.SystemTest#testSystemProperties_mutable"
},
{
- description: "Differences between vogar and cts",
+ description: "Differences between vogar and cts. Passes with --mode activity",
result: EXEC_FAILED,
modes: [device],
- names: ["libcore.java.lang.OldSystemTest#test_getProperties",
- "org.apache.harmony.tests.java.lang.Process2Test#test_getErrorStream",
- "org.apache.harmony.tests.java.lang.ProcessTest#test_exitValue"]
+ names: ["libcore.java.lang.OldSystemTest#test_getProperties"]
},
{
- description: "Failures needing investigation",
+ description: "Differences between vogar and cts. EACCESS when run with vogar.
+ Passes on host, passes with cts. Passes with vogar with su
+ (--invoke-with \"su root\"). Does not pass after setting chmod
+ 777 all directories on path to socket (on device without su).",
result: EXEC_FAILED,
modes: [device],
- names: ["libcore.java.util.TimeZoneTest#testDisplayNames",
- "libcore.java.util.TimeZoneTest#test_useDaylightTime_Taiwan",
- "libcore.java.util.TimeZoneTest#testAllDisplayNames",
- "libcore.io.OsTest#testUnixDomainSockets_in_file_system",
- "org.apache.harmony.luni.tests.java.net.URLConnectionTest#test_setReadTimeoutI",
- "org.apache.harmony.tests.java.util.DateTest#test_Constructor",
- "org.apache.harmony.tests.java.util.ScannerTest#test_Constructor_LReadableByteChannel",
- "org.apache.harmony.tests.java.util.TimeZoneTest#test_hasSameRules_Ljava_util_TimeZone",
- "org.apache.harmony.tests.java.text.ChoiceFormatTest#testEscapedPatternWithConsecutiveQuotes",
- "org.apache.harmony.tests.java.text.ChoiceFormatTest#testToPatternWithInfinities",
- "org.apache.harmony.tests.java.text.MessageFormatTest#test19011159"]
+ names: ["libcore.io.OsTest#testUnixDomainSockets_in_file_system"]
},
{
description: "Failing due to a locale problem on hammerhead.",
@@ -109,20 +100,20 @@
bug: 19165288
},
{
- description: "Bug in libcore",
- result: EXEC_FAILED,
- names: ["libcore.javax.crypto.ECDHKeyAgreementTest#testInit_withUnsupportedPrivateKeyType"],
- bug: 19730263
-},
-{
- description: "Needs to be run as root",
- result: EXEC_FAILED,
- modes: [host],
- names: ["libcore.io.OsTest#test_PacketSocketAddress"]
-},
-{
description: "Needs kernel updates on host/device",
result: EXEC_FAILED,
names: ["libcore.io.OsTest#test_socketPing"]
+},
+{
+ description: "Linker issues in chrooted environment",
+ modes: [device],
+ result: EXEC_FAILED,
+ names: ["org.apache.harmony.tests.java.lang.ProcessManagerTest#testEnvironment"]
+},
+{
+ description: "Crypto failures",
+ result: EXEC_FAILED,
+ names: ["libcore.javax.crypto.CipherTest#testCipher_ShortBlock_Failure",
+ "libcore.javax.crypto.CipherTest#testCipher_Success"]
}
]
diff --git a/tools/run-jdwp-tests.sh b/tools/run-jdwp-tests.sh
index 301708b..77e8004 100755
--- a/tools/run-jdwp-tests.sh
+++ b/tools/run-jdwp-tests.sh
@@ -19,22 +19,17 @@
exit 1
fi
-if [[ $ANDROID_SERIAL == HT4CTJT03670 ]] || [[ $ANDROID_SERIAL == HT49CJT00070 ]]; then
- echo "Not running on buildbot because of failures on volantis. Investigating."
- exit 0
-fi
-
# Jar containing all the tests.
test_jar=out/host/linux-x86/framework/apache-harmony-jdwp-tests-hostdex.jar
-junit_jar=out/host/linux-x86/framework/junit.jar
-if [ ! -f $test_jar -o ! -f $junit_jar ]; then
+if [ ! -f $test_jar ]; then
echo "Before running, you must build jdwp tests and vogar:" \
- "make junit apache-harmony-jdwp-tests-hostdex vogar vogar.jar"
+ "make apache-harmony-jdwp-tests-hostdex vogar vogar.jar"
exit 1
fi
art="/data/local/tmp/system/bin/art"
+art_debugee="sh /data/local/tmp/system/bin/art"
# We use Quick's image on target because optimizing's image is not compiled debuggable.
image="-Ximage:/data/art-test/core.art"
args=$@
@@ -50,6 +45,7 @@
# Specify bash explicitly since the art script cannot, since it has to run on the device
# with mksh.
art="bash out/host/linux-x86/bin/art"
+ art_debugee="bash out/host/linux-x86/bin/art"
# We force generation of a new image to avoid build-time and run-time classpath differences.
image="-Ximage:/system/non/existent"
# We do not need a device directory on host.
@@ -77,13 +73,12 @@
$args \
$device_dir \
$image_compiler_option \
- --timeout 600 \
+ --timeout 800 \
--vm-arg -Djpda.settings.verbose=true \
--vm-arg -Djpda.settings.syncPort=34016 \
--vm-arg -Djpda.settings.transportAddress=127.0.0.1:55107 \
- --vm-arg -Djpda.settings.debuggeeJavaPath="$art $image $debuggee_args" \
+ --vm-arg -Djpda.settings.debuggeeJavaPath="\"$art_debugee $image $debuggee_args\"" \
--classpath $test_jar \
- --classpath $junit_jar \
--vm-arg -Xcompiler-option --vm-arg --compiler-backend=Optimizing \
--vm-arg -Xcompiler-option --vm-arg --debuggable \
org.apache.harmony.jpda.tests.share.AllTests