Merge "ART: Add Security cutout to unstarted runtime"
diff --git a/compiler/dwarf/debug_frame_opcode_writer.h b/compiler/dwarf/debug_frame_opcode_writer.h
index d0d1821..4112c84 100644
--- a/compiler/dwarf/debug_frame_opcode_writer.h
+++ b/compiler/dwarf/debug_frame_opcode_writer.h
@@ -17,9 +17,9 @@
#ifndef ART_COMPILER_DWARF_DEBUG_FRAME_OPCODE_WRITER_H_
#define ART_COMPILER_DWARF_DEBUG_FRAME_OPCODE_WRITER_H_
-#include "dwarf.h"
-#include "register.h"
-#include "writer.h"
+#include "dwarf/dwarf_constants.h"
+#include "dwarf/register.h"
+#include "dwarf/writer.h"
#include "utils.h"
namespace art {
diff --git a/compiler/dwarf/debug_info_entry_writer.h b/compiler/dwarf/debug_info_entry_writer.h
index a2c9f5f..f5b9ca5 100644
--- a/compiler/dwarf/debug_info_entry_writer.h
+++ b/compiler/dwarf/debug_info_entry_writer.h
@@ -20,9 +20,9 @@
#include <cstdint>
#include <unordered_map>
-#include "dwarf.h"
+#include "dwarf/dwarf_constants.h"
+#include "dwarf/writer.h"
#include "leb128.h"
-#include "writer.h"
namespace art {
namespace dwarf {
diff --git a/compiler/dwarf/debug_line_opcode_writer.h b/compiler/dwarf/debug_line_opcode_writer.h
index 77ed154..bdc25e4 100644
--- a/compiler/dwarf/debug_line_opcode_writer.h
+++ b/compiler/dwarf/debug_line_opcode_writer.h
@@ -19,8 +19,8 @@
#include <cstdint>
-#include "dwarf.h"
-#include "writer.h"
+#include "dwarf/dwarf_constants.h"
+#include "dwarf/writer.h"
namespace art {
namespace dwarf {
diff --git a/runtime/dwarf.h b/compiler/dwarf/dwarf_constants.h
similarity index 98%
rename from runtime/dwarf.h
rename to compiler/dwarf/dwarf_constants.h
index b491f47..8e39ca7 100644
--- a/runtime/dwarf.h
+++ b/compiler/dwarf/dwarf_constants.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_DWARF_H_
-#define ART_RUNTIME_DWARF_H_
+#ifndef ART_COMPILER_DWARF_DWARF_CONSTANTS_H_
+#define ART_COMPILER_DWARF_DWARF_CONSTANTS_H_
namespace art {
namespace dwarf {
@@ -661,4 +661,4 @@
} // namespace dwarf
} // namespace art
-#endif // ART_RUNTIME_DWARF_H_
+#endif // ART_COMPILER_DWARF_DWARF_CONSTANTS_H_
diff --git a/compiler/dwarf/headers.h b/compiler/dwarf/headers.h
index d17d327..760f53c 100644
--- a/compiler/dwarf/headers.h
+++ b/compiler/dwarf/headers.h
@@ -19,11 +19,11 @@
#include <cstdint>
-#include "debug_frame_opcode_writer.h"
-#include "debug_info_entry_writer.h"
-#include "debug_line_opcode_writer.h"
-#include "register.h"
-#include "writer.h"
+#include "dwarf/debug_frame_opcode_writer.h"
+#include "dwarf/debug_info_entry_writer.h"
+#include "dwarf/debug_line_opcode_writer.h"
+#include "dwarf/register.h"
+#include "dwarf/writer.h"
namespace art {
namespace dwarf {
diff --git a/compiler/elf_writer_debug.cc b/compiler/elf_writer_debug.cc
index f7811dd..6fe05a4 100644
--- a/compiler/elf_writer_debug.cc
+++ b/compiler/elf_writer_debug.cc
@@ -16,6 +16,8 @@
#include "elf_writer_debug.h"
+#include <unordered_set>
+
#include "compiled_method.h"
#include "driver/compiler_driver.h"
#include "dex_file-inl.h"
@@ -193,6 +195,15 @@
cunit_high_pc = std::max(cunit_high_pc, method_info.high_pc_);
}
+ // Find all addresses (low_pc) which contain deduped methods.
+ // The first instance of method is not marked deduped_, but the rest is.
+ std::unordered_set<uint32_t> deduped_addresses;
+ for (auto it = method_infos.begin(); it != method_infos.end(); ++it) {
+ if (it->deduped_) {
+ deduped_addresses.insert(it->low_pc_);
+ }
+ }
+
// Write .debug_info section.
size_t debug_abbrev_offset = debug_abbrev->size();
DebugInfoEntryWriter<> info(false /* 32 bit */, debug_abbrev);
@@ -205,10 +216,8 @@
for (auto method_info : method_infos) {
std::string method_name = PrettyMethod(method_info.dex_method_index_,
*method_info.dex_file_, true);
- if (method_info.deduped_) {
- // TODO We should place the DEDUPED tag on the first instance of a deduplicated symbol
- // so that it will show up in a debuggerd crash report.
- method_name += " [ DEDUPED ]";
+ if (deduped_addresses.find(method_info.low_pc_) != deduped_addresses.end()) {
+ method_name += " [DEDUPED]";
}
info.StartTag(DW_TAG_subprogram, DW_CHILDREN_no);
info.WriteStrp(DW_AT_name, method_name.data(), debug_str);
diff --git a/compiler/elf_writer_quick.cc b/compiler/elf_writer_quick.cc
index 737b9d6..429cd85 100644
--- a/compiler/elf_writer_quick.cc
+++ b/compiler/elf_writer_quick.cc
@@ -17,6 +17,7 @@
#include "elf_writer_quick.h"
#include <unordered_map>
+#include <unordered_set>
#include "base/logging.h"
#include "base/unix_file/fd_file.h"
@@ -181,16 +182,23 @@
ElfBuilder<Elf_Word, Elf_Sword, Elf_Addr, Elf_Dyn,
Elf_Sym, Elf_Ehdr, Elf_Phdr, Elf_Shdr>* builder,
OatWriter* oat_writer) {
- // Iterate over the compiled methods.
const std::vector<OatWriter::DebugInfo>& method_info = oat_writer->GetMethodDebugInfo();
+
+ // Find all addresses (low_pc) which contain deduped methods.
+ // The first instance of method is not marked deduped_, but the rest is.
+ std::unordered_set<uint32_t> deduped_addresses;
+ for (auto it = method_info.begin(); it != method_info.end(); ++it) {
+ if (it->deduped_) {
+ deduped_addresses.insert(it->low_pc_);
+ }
+ }
+
ElfSymtabBuilder<Elf_Word, Elf_Sword, Elf_Addr, Elf_Sym, Elf_Shdr>* symtab =
builder->GetSymtabBuilder();
for (auto it = method_info.begin(); it != method_info.end(); ++it) {
std::string name = PrettyMethod(it->dex_method_index_, *it->dex_file_, true);
- if (it->deduped_) {
- // TODO We should place the DEDUPED tag on the first instance of a deduplicated symbol
- // so that it will show up in a debuggerd crash report.
- name += " [ DEDUPED ]";
+ if (deduped_addresses.find(it->low_pc_) != deduped_addresses.end()) {
+ name += " [DEDUPED]";
}
uint32_t low_pc = it->low_pc_;
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 33eacba..0fa4fa4 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -1380,7 +1380,7 @@
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetInAt(1, ARM64EncodableConstantOrRegister(instruction->InputAt(1), instruction));
if (instruction->HasUses()) {
locations->SetOut(Location::SameAsFirstInput());
}
diff --git a/compiler/optimizing/common_arm64.h b/compiler/optimizing/common_arm64.h
index 966165b..53f1f3c 100644
--- a/compiler/optimizing/common_arm64.h
+++ b/compiler/optimizing/common_arm64.h
@@ -194,7 +194,8 @@
int64_t value = CodeGenerator::GetInt64ValueOf(constant);
- if (instr->IsAdd() || instr->IsSub() || instr->IsCondition() || instr->IsCompare()) {
+ if (instr->IsAdd() || instr->IsSub() || instr->IsCondition() ||
+ instr->IsCompare() || instr->IsBoundsCheck()) {
// Uses aliases of ADD/SUB instructions.
return vixl::Assembler::IsImmAddSub(value);
} else if (instr->IsAnd() || instr->IsOr() || instr->IsXor()) {
diff --git a/compiler/optimizing/primitive_type_propagation.cc b/compiler/optimizing/primitive_type_propagation.cc
index c20c8a1..af93438 100644
--- a/compiler/optimizing/primitive_type_propagation.cc
+++ b/compiler/optimizing/primitive_type_propagation.cc
@@ -65,6 +65,10 @@
if (equivalent->IsPhi()) {
equivalent->AsPhi()->SetLive();
AddToWorklist(equivalent->AsPhi());
+ } else if (equivalent == input) {
+ // The input has changed its type. It can be an input of other phis,
+ // so we need to put phi users in the work list.
+ AddDependentInstructionsToWorklist(equivalent);
}
}
}
@@ -117,10 +121,10 @@
worklist_.Add(instruction);
}
-void PrimitiveTypePropagation::AddDependentInstructionsToWorklist(HPhi* instruction) {
+void PrimitiveTypePropagation::AddDependentInstructionsToWorklist(HInstruction* instruction) {
for (HUseIterator<HInstruction*> it(instruction->GetUses()); !it.Done(); it.Advance()) {
HPhi* phi = it.Current()->GetUser()->AsPhi();
- if (phi != nullptr && phi->IsLive()) {
+ if (phi != nullptr && phi->IsLive() && phi->GetType() != instruction->GetType()) {
AddToWorklist(phi);
}
}
diff --git a/compiler/optimizing/primitive_type_propagation.h b/compiler/optimizing/primitive_type_propagation.h
index 1374cbb..6d370ed 100644
--- a/compiler/optimizing/primitive_type_propagation.h
+++ b/compiler/optimizing/primitive_type_propagation.h
@@ -33,7 +33,7 @@
void VisitBasicBlock(HBasicBlock* block);
void ProcessWorklist();
void AddToWorklist(HPhi* phi);
- void AddDependentInstructionsToWorklist(HPhi* phi);
+ void AddDependentInstructionsToWorklist(HInstruction* instruction);
bool UpdateType(HPhi* phi);
HGraph* const graph_;
diff --git a/runtime/arch/arm64/instruction_set_features_arm64.h b/runtime/arch/arm64/instruction_set_features_arm64.h
index 3b3e2c9..e59ff58 100644
--- a/runtime/arch/arm64/instruction_set_features_arm64.h
+++ b/runtime/arch/arm64/instruction_set_features_arm64.h
@@ -66,13 +66,12 @@
return fix_cortex_a53_843419_;
}
- // TODO: Tune this on a per CPU basis. For now, we pessimistically assume
- // that all ARM64 CPUs prefer explicit memory barriers over acquire-release.
- //
- // NOTE: This should not be the case! However we want to exercise the
- // explicit memory barriers code paths in the Optimizing Compiler.
+ // NOTE: This flag can be tunned on a CPU basis. In general all ARMv8 CPUs
+ // should prefer the Acquire-Release semantics over the explicit DMBs when
+ // handling load/store-volatile. For a specific use case see the ARM64
+ // Optimizing backend.
bool PreferAcquireRelease() const {
- return false;
+ return true;
}
virtual ~Arm64InstructionSetFeatures() {}
diff --git a/runtime/arch/arm64/instruction_set_features_arm64_test.cc b/runtime/arch/arm64/instruction_set_features_arm64_test.cc
index 753107b..599f24e 100644
--- a/runtime/arch/arm64/instruction_set_features_arm64_test.cc
+++ b/runtime/arch/arm64/instruction_set_features_arm64_test.cc
@@ -31,7 +31,7 @@
EXPECT_STREQ("smp,a53", arm64_features->GetFeatureString().c_str());
EXPECT_EQ(arm64_features->AsBitmap(), 3U);
// See the comments in instruction_set_features_arm64.h.
- EXPECT_FALSE(arm64_features->AsArm64InstructionSetFeatures()->PreferAcquireRelease());
+ EXPECT_TRUE(arm64_features->AsArm64InstructionSetFeatures()->PreferAcquireRelease());
}
} // namespace art
diff --git a/runtime/base/histogram-inl.h b/runtime/base/histogram-inl.h
index 812ed86..0f969b9 100644
--- a/runtime/base/histogram-inl.h
+++ b/runtime/base/histogram-inl.h
@@ -165,6 +165,23 @@
}
template <class Value>
+inline void Histogram<Value>::DumpBins(std::ostream& os) const {
+ DCHECK_GT(sample_size_, 0ull);
+ bool dumped_one = false;
+ for (size_t bin_idx = 0; bin_idx < frequency_.size(); ++bin_idx) {
+ if (frequency_[bin_idx] != 0U) {
+ if (dumped_one) {
+ // Prepend a comma if not the first bin.
+ os << ",";
+ } else {
+ dumped_one = true;
+ }
+ os << GetRange(bin_idx) << ":" << frequency_[bin_idx];
+ }
+ }
+}
+
+template <class Value>
inline void Histogram<Value>::PrintConfidenceIntervals(std::ostream &os, double interval,
const CumulativeData& data) const {
static constexpr size_t kFractionalDigits = 3;
@@ -249,4 +266,3 @@
} // namespace art
#endif // ART_RUNTIME_BASE_HISTOGRAM_INL_H_
-
diff --git a/runtime/base/histogram.h b/runtime/base/histogram.h
index 78f6e1c..c312fb2 100644
--- a/runtime/base/histogram.h
+++ b/runtime/base/histogram.h
@@ -61,6 +61,7 @@
void PrintConfidenceIntervals(std::ostream& os, double interval,
const CumulativeData& data) const;
void PrintBins(std::ostream& os, const CumulativeData& data) const;
+ void DumpBins(std::ostream& os) const;
Value GetRange(size_t bucket_idx) const;
size_t GetBucketCount() const;
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 935c401..b380fed 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -1259,94 +1259,124 @@
return ClassPathEntry(nullptr, nullptr);
}
-mirror::Class* ClassLinker::FindClassInPathClassLoader(ScopedObjectAccessAlreadyRunnable& soa,
- Thread* self, const char* descriptor,
- size_t hash,
- Handle<mirror::ClassLoader> class_loader) {
- // Can we special case for a well understood PathClassLoader with the BootClassLoader as parent?
- if (class_loader->GetClass() !=
- soa.Decode<mirror::Class*>(WellKnownClasses::dalvik_system_PathClassLoader) ||
- class_loader->GetParent()->GetClass() !=
- soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_BootClassLoader)) {
- return nullptr;
- }
- ClassPathEntry pair = FindInClassPath(descriptor, hash, boot_class_path_);
- // Check if this would be found in the parent boot class loader.
- if (pair.second != nullptr) {
- mirror::Class* klass = LookupClass(self, descriptor, hash, nullptr);
- if (klass != nullptr) {
- // May return null if resolution on another thread fails.
- klass = EnsureResolved(self, descriptor, klass);
+static bool IsBootClassLoader(ScopedObjectAccessAlreadyRunnable& soa,
+ mirror::ClassLoader* class_loader)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return class_loader == nullptr ||
+ class_loader->GetClass() ==
+ soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_BootClassLoader);
+}
+
+bool ClassLinker::FindClassInPathClassLoader(ScopedObjectAccessAlreadyRunnable& soa,
+ Thread* self, const char* descriptor,
+ size_t hash,
+ Handle<mirror::ClassLoader> class_loader,
+ mirror::Class** result) {
+ // Termination case: boot class-loader.
+ if (IsBootClassLoader(soa, class_loader.Get())) {
+ // The boot class loader, search the boot class path.
+ ClassPathEntry pair = FindInClassPath(descriptor, hash, boot_class_path_);
+ if (pair.second != nullptr) {
+ mirror::Class* klass = LookupClass(self, descriptor, hash, nullptr);
+ if (klass != nullptr) {
+ *result = EnsureResolved(self, descriptor, klass);
+ } else {
+ *result = DefineClass(self, descriptor, hash, NullHandle<mirror::ClassLoader>(),
+ *pair.first, *pair.second);
+ }
+ if (*result == nullptr) {
+ CHECK(self->IsExceptionPending()) << descriptor;
+ self->ClearException();
+ }
} else {
- // May OOME.
- klass = DefineClass(self, descriptor, hash, NullHandle<mirror::ClassLoader>(), *pair.first,
- *pair.second);
+ *result = nullptr;
}
- if (klass == nullptr) {
- CHECK(self->IsExceptionPending()) << descriptor;
- self->ClearException();
- }
- return klass;
- } else {
- // Handle as if this is the child PathClassLoader.
- // Handles as RegisterDexFile may allocate dex caches (and cause thread suspension).
- StackHandleScope<3> hs(self);
- // The class loader is a PathClassLoader which inherits from BaseDexClassLoader.
- // We need to get the DexPathList and loop through it.
- ArtField* const cookie_field = soa.DecodeField(WellKnownClasses::dalvik_system_DexFile_cookie);
- ArtField* const dex_file_field =
- soa.DecodeField(WellKnownClasses::dalvik_system_DexPathList__Element_dexFile);
- mirror::Object* dex_path_list =
- soa.DecodeField(WellKnownClasses::dalvik_system_PathClassLoader_pathList)->
- GetObject(class_loader.Get());
- if (dex_path_list != nullptr && dex_file_field != nullptr && cookie_field != nullptr) {
- // DexPathList has an array dexElements of Elements[] which each contain a dex file.
- mirror::Object* dex_elements_obj =
- soa.DecodeField(WellKnownClasses::dalvik_system_DexPathList_dexElements)->
- GetObject(dex_path_list);
- // Loop through each dalvik.system.DexPathList$Element's dalvik.system.DexFile and look
- // at the mCookie which is a DexFile vector.
- if (dex_elements_obj != nullptr) {
- Handle<mirror::ObjectArray<mirror::Object>> dex_elements =
- hs.NewHandle(dex_elements_obj->AsObjectArray<mirror::Object>());
- for (int32_t i = 0; i < dex_elements->GetLength(); ++i) {
- mirror::Object* element = dex_elements->GetWithoutChecks(i);
- if (element == nullptr) {
- // Should never happen, fall back to java code to throw a NPE.
+ return true;
+ }
+
+ // Unsupported class-loader?
+ if (class_loader->GetClass() !=
+ soa.Decode<mirror::Class*>(WellKnownClasses::dalvik_system_PathClassLoader)) {
+ *result = nullptr;
+ return false;
+ }
+
+ // Handles as RegisterDexFile may allocate dex caches (and cause thread suspension).
+ StackHandleScope<4> hs(self);
+ Handle<mirror::ClassLoader> h_parent(hs.NewHandle(class_loader->GetParent()));
+ bool recursive_result = FindClassInPathClassLoader(soa, self, descriptor, hash, h_parent, result);
+
+ if (!recursive_result) {
+ // Something wrong up the chain.
+ return false;
+ }
+
+ if (*result != nullptr) {
+ // Found the class up the chain.
+ return true;
+ }
+
+ // Handle this step.
+ // Handle as if this is the child PathClassLoader.
+ // The class loader is a PathClassLoader which inherits from BaseDexClassLoader.
+ // We need to get the DexPathList and loop through it.
+ ArtField* const cookie_field = soa.DecodeField(WellKnownClasses::dalvik_system_DexFile_cookie);
+ ArtField* const dex_file_field =
+ soa.DecodeField(WellKnownClasses::dalvik_system_DexPathList__Element_dexFile);
+ mirror::Object* dex_path_list =
+ soa.DecodeField(WellKnownClasses::dalvik_system_PathClassLoader_pathList)->
+ GetObject(class_loader.Get());
+ if (dex_path_list != nullptr && dex_file_field != nullptr && cookie_field != nullptr) {
+ // DexPathList has an array dexElements of Elements[] which each contain a dex file.
+ mirror::Object* dex_elements_obj =
+ soa.DecodeField(WellKnownClasses::dalvik_system_DexPathList_dexElements)->
+ GetObject(dex_path_list);
+ // Loop through each dalvik.system.DexPathList$Element's dalvik.system.DexFile and look
+ // at the mCookie which is a DexFile vector.
+ if (dex_elements_obj != nullptr) {
+ Handle<mirror::ObjectArray<mirror::Object>> dex_elements =
+ hs.NewHandle(dex_elements_obj->AsObjectArray<mirror::Object>());
+ for (int32_t i = 0; i < dex_elements->GetLength(); ++i) {
+ mirror::Object* element = dex_elements->GetWithoutChecks(i);
+ if (element == nullptr) {
+ // Should never happen, fall back to java code to throw a NPE.
+ break;
+ }
+ mirror::Object* dex_file = dex_file_field->GetObject(element);
+ if (dex_file != nullptr) {
+ mirror::LongArray* long_array = cookie_field->GetObject(dex_file)->AsLongArray();
+ if (long_array == nullptr) {
+ // This should never happen so log a warning.
+ LOG(WARNING) << "Null DexFile::mCookie for " << descriptor;
break;
}
- mirror::Object* dex_file = dex_file_field->GetObject(element);
- if (dex_file != nullptr) {
- mirror::LongArray* long_array = cookie_field->GetObject(dex_file)->AsLongArray();
- if (long_array == nullptr) {
- // This should never happen so log a warning.
- LOG(WARNING) << "Null DexFile::mCookie for " << descriptor;
- break;
- }
- int32_t long_array_size = long_array->GetLength();
- for (int32_t j = 0; j < long_array_size; ++j) {
- const DexFile* cp_dex_file = reinterpret_cast<const DexFile*>(static_cast<uintptr_t>(
- long_array->GetWithoutChecks(j)));
- const DexFile::ClassDef* dex_class_def = cp_dex_file->FindClassDef(descriptor, hash);
- if (dex_class_def != nullptr) {
- RegisterDexFile(*cp_dex_file);
- mirror::Class* klass = DefineClass(self, descriptor, hash, class_loader,
- *cp_dex_file, *dex_class_def);
- if (klass == nullptr) {
- CHECK(self->IsExceptionPending()) << descriptor;
- self->ClearException();
- return nullptr;
- }
- return klass;
+ int32_t long_array_size = long_array->GetLength();
+ for (int32_t j = 0; j < long_array_size; ++j) {
+ const DexFile* cp_dex_file = reinterpret_cast<const DexFile*>(static_cast<uintptr_t>(
+ long_array->GetWithoutChecks(j)));
+ const DexFile::ClassDef* dex_class_def = cp_dex_file->FindClassDef(descriptor, hash);
+ if (dex_class_def != nullptr) {
+ RegisterDexFile(*cp_dex_file);
+ mirror::Class* klass = DefineClass(self, descriptor, hash, class_loader,
+ *cp_dex_file, *dex_class_def);
+ if (klass == nullptr) {
+ CHECK(self->IsExceptionPending()) << descriptor;
+ self->ClearException();
+ // TODO: Is it really right to break here, and not check the other dex files?
+ return true;
}
+ *result = klass;
+ return true;
}
}
}
}
}
self->AssertNoPendingException();
- return nullptr;
}
+
+ // Result is still null from the parent call, no need to set it again...
+ return true;
}
mirror::Class* ClassLinker::FindClass(Thread* self, const char* descriptor,
@@ -1384,10 +1414,18 @@
}
} else {
ScopedObjectAccessUnchecked soa(self);
- mirror::Class* cp_klass = FindClassInPathClassLoader(soa, self, descriptor, hash,
- class_loader);
- if (cp_klass != nullptr) {
- return cp_klass;
+ mirror::Class* cp_klass;
+ if (FindClassInPathClassLoader(soa, self, descriptor, hash, class_loader, &cp_klass)) {
+ // The chain was understood. So the value in cp_klass is either the class we were looking
+ // for, or not found.
+ if (cp_klass != nullptr) {
+ return cp_klass;
+ }
+ // TODO: We handle the boot classpath loader in FindClassInPathClassLoader. Try to unify this
+ // and the branch above. TODO: throw the right exception here.
+
+ // We'll let the Java-side rediscover all this and throw the exception with the right stack
+ // trace.
}
if (Runtime::Current()->IsAotCompiler()) {
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 2427462..68624b0 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -117,11 +117,15 @@
Handle<mirror::ClassLoader> class_loader)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Find a class in the path class loader, loading it if necessary without using JNI. Hash
- // function is supposed to be ComputeModifiedUtf8Hash(descriptor).
- mirror::Class* FindClassInPathClassLoader(ScopedObjectAccessAlreadyRunnable& soa,
- Thread* self, const char* descriptor, size_t hash,
- Handle<mirror::ClassLoader> class_loader)
+ // Finds a class in the path class loader, loading it if necessary without using JNI. Hash
+ // function is supposed to be ComputeModifiedUtf8Hash(descriptor). Returns true if the
+ // class-loader chain could be handled, false otherwise, i.e., a non-supported class-loader
+ // was encountered while walking the parent chain (currently only BootClassLoader and
+ // PathClassLoader are supported).
+ bool FindClassInPathClassLoader(ScopedObjectAccessAlreadyRunnable& soa,
+ Thread* self, const char* descriptor, size_t hash,
+ Handle<mirror::ClassLoader> class_loader,
+ mirror::Class** result)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Finds a class by its descriptor using the "system" class loader, ie by searching the
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index ed2e295..bb8d876 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -490,29 +490,21 @@
class VerifyRootVisitor : public SingleRootVisitor {
public:
- explicit VerifyRootVisitor(MarkSweep* collector) : collector_(collector) { }
-
void VisitRoot(mirror::Object* root, const RootInfo& info) OVERRIDE
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
- collector_->VerifyRoot(root, info);
- }
-
- private:
- MarkSweep* const collector_;
-};
-
-void MarkSweep::VerifyRoot(const Object* root, const RootInfo& root_info) {
- // See if the root is on any space bitmap.
- if (heap_->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == nullptr) {
- space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
- if (large_object_space != nullptr && !large_object_space->Contains(root)) {
- LOG(ERROR) << "Found invalid root: " << root << " " << root_info;
+ // See if the root is on any space bitmap.
+ auto* heap = Runtime::Current()->GetHeap();
+ if (heap->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == nullptr) {
+ space::LargeObjectSpace* large_object_space = heap->GetLargeObjectsSpace();
+ if (large_object_space != nullptr && !large_object_space->Contains(root)) {
+ LOG(ERROR) << "Found invalid root: " << root << " " << info;
+ }
}
}
-}
+};
void MarkSweep::VerifyRoots() {
- VerifyRootVisitor visitor(this);
+ VerifyRootVisitor visitor;
Runtime::Current()->GetThreadList()->VisitRoots(&visitor);
}
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index 31cea17..fad3403 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -248,9 +248,6 @@
// whether or not we care about pauses.
size_t GetThreadCount(bool paused) const;
- void VerifyRoot(const mirror::Object* root, const RootInfo& root_info)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
-
// Push a single reference on a mark stack.
void PushOnMarkStack(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 83da5a8..beaf067 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -195,7 +195,17 @@
last_time_homogeneous_space_compaction_by_oom_(NanoTime()),
pending_collector_transition_(nullptr),
pending_heap_trim_(nullptr),
- use_homogeneous_space_compaction_for_oom_(use_homogeneous_space_compaction_for_oom) {
+ use_homogeneous_space_compaction_for_oom_(use_homogeneous_space_compaction_for_oom),
+ running_collection_is_blocking_(false),
+ blocking_gc_count_(0U),
+ blocking_gc_time_(0U),
+ last_update_time_gc_count_rate_histograms_( // Round down by the window duration.
+ (NanoTime() / kGcCountRateHistogramWindowDuration) * kGcCountRateHistogramWindowDuration),
+ gc_count_last_window_(0U),
+ blocking_gc_count_last_window_(0U),
+ gc_count_rate_histogram_("gc count rate histogram", 1U, kGcCountRateMaxBucketCount),
+ blocking_gc_count_rate_histogram_("blocking gc count rate histogram", 1U,
+ kGcCountRateMaxBucketCount) {
if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
LOG(INFO) << "Heap() entering";
}
@@ -926,7 +936,6 @@
total_duration += collector->GetCumulativeTimings().GetTotalNs();
total_paused_time += collector->GetTotalPausedTimeNs();
collector->DumpPerformanceInfo(os);
- collector->ResetMeasurements();
}
uint64_t allocation_time =
static_cast<uint64_t>(total_allocation_time_.LoadRelaxed()) * kTimeAdjust;
@@ -940,8 +949,8 @@
}
uint64_t total_objects_allocated = GetObjectsAllocatedEver();
os << "Total number of allocations " << total_objects_allocated << "\n";
- uint64_t total_bytes_allocated = GetBytesAllocatedEver();
- os << "Total bytes allocated " << PrettySize(total_bytes_allocated) << "\n";
+ os << "Total bytes allocated " << PrettySize(GetBytesAllocatedEver()) << "\n";
+ os << "Total bytes freed " << PrettySize(GetBytesFreedEver()) << "\n";
os << "Free memory " << PrettySize(GetFreeMemory()) << "\n";
os << "Free memory until GC " << PrettySize(GetFreeMemoryUntilGC()) << "\n";
os << "Free memory until OOME " << PrettySize(GetFreeMemoryUntilOOME()) << "\n";
@@ -956,10 +965,68 @@
os << "Zygote space size " << PrettySize(zygote_space_->Size()) << "\n";
}
os << "Total mutator paused time: " << PrettyDuration(total_paused_time) << "\n";
- os << "Total time waiting for GC to complete: " << PrettyDuration(total_wait_time_);
+ os << "Total time waiting for GC to complete: " << PrettyDuration(total_wait_time_) << "\n";
+ os << "Total GC count: " << GetGcCount() << "\n";
+ os << "Total GC time: " << PrettyDuration(GetGcTime()) << "\n";
+ os << "Total blocking GC count: " << GetBlockingGcCount() << "\n";
+ os << "Total blocking GC time: " << PrettyDuration(GetBlockingGcTime()) << "\n";
+
+ {
+ MutexLock mu(Thread::Current(), *gc_complete_lock_);
+ if (gc_count_rate_histogram_.SampleSize() > 0U) {
+ os << "Histogram of GC count per " << NsToMs(kGcCountRateHistogramWindowDuration) << " ms: ";
+ gc_count_rate_histogram_.DumpBins(os);
+ os << "\n";
+ }
+ if (blocking_gc_count_rate_histogram_.SampleSize() > 0U) {
+ os << "Histogram of blocking GC count per "
+ << NsToMs(kGcCountRateHistogramWindowDuration) << " ms: ";
+ blocking_gc_count_rate_histogram_.DumpBins(os);
+ os << "\n";
+ }
+ }
+
BaseMutex::DumpAll(os);
}
+uint64_t Heap::GetGcCount() const {
+ uint64_t gc_count = 0U;
+ for (auto& collector : garbage_collectors_) {
+ gc_count += collector->GetCumulativeTimings().GetIterations();
+ }
+ return gc_count;
+}
+
+uint64_t Heap::GetGcTime() const {
+ uint64_t gc_time = 0U;
+ for (auto& collector : garbage_collectors_) {
+ gc_time += collector->GetCumulativeTimings().GetTotalNs();
+ }
+ return gc_time;
+}
+
+uint64_t Heap::GetBlockingGcCount() const {
+ return blocking_gc_count_;
+}
+
+uint64_t Heap::GetBlockingGcTime() const {
+ return blocking_gc_time_;
+}
+
+void Heap::DumpGcCountRateHistogram(std::ostream& os) const {
+ MutexLock mu(Thread::Current(), *gc_complete_lock_);
+ if (gc_count_rate_histogram_.SampleSize() > 0U) {
+ gc_count_rate_histogram_.DumpBins(os);
+ }
+}
+
+void Heap::DumpBlockingGcCountRateHistogram(std::ostream& os) const {
+ MutexLock mu(Thread::Current(), *gc_complete_lock_);
+ if (blocking_gc_count_rate_histogram_.SampleSize() > 0U) {
+ blocking_gc_count_rate_histogram_.DumpBins(os);
+ }
+}
+
Heap::~Heap() {
VLOG(heap) << "Starting ~Heap()";
STLDeleteElements(&garbage_collectors_);
@@ -2274,7 +2341,6 @@
}
collector_type_running_ = collector_type_;
}
-
if (gc_cause == kGcCauseForAlloc && runtime->HasStatsEnabled()) {
++runtime->GetStats()->gc_for_alloc_count;
++self->GetStats()->gc_for_alloc_count;
@@ -2389,11 +2455,55 @@
collector_type_running_ = kCollectorTypeNone;
if (gc_type != collector::kGcTypeNone) {
last_gc_type_ = gc_type;
+
+ // Update stats.
+ ++gc_count_last_window_;
+ if (running_collection_is_blocking_) {
+ // If the currently running collection was a blocking one,
+ // increment the counters and reset the flag.
+ ++blocking_gc_count_;
+ blocking_gc_time_ += GetCurrentGcIteration()->GetDurationNs();
+ ++blocking_gc_count_last_window_;
+ }
+ // Update the gc count rate histograms if due.
+ UpdateGcCountRateHistograms();
}
+ // Reset.
+ running_collection_is_blocking_ = false;
// Wake anyone who may have been waiting for the GC to complete.
gc_complete_cond_->Broadcast(self);
}
+void Heap::UpdateGcCountRateHistograms() {
+ // Invariant: if the time since the last update includes more than
+ // one windows, all the GC runs (if > 0) must have happened in first
+ // window because otherwise the update must have already taken place
+ // at an earlier GC run. So, we report the non-first windows with
+ // zero counts to the histograms.
+ DCHECK_EQ(last_update_time_gc_count_rate_histograms_ % kGcCountRateHistogramWindowDuration, 0U);
+ uint64_t now = NanoTime();
+ DCHECK_GE(now, last_update_time_gc_count_rate_histograms_);
+ uint64_t time_since_last_update = now - last_update_time_gc_count_rate_histograms_;
+ uint64_t num_of_windows = time_since_last_update / kGcCountRateHistogramWindowDuration;
+ if (time_since_last_update >= kGcCountRateHistogramWindowDuration) {
+ // Record the first window.
+ gc_count_rate_histogram_.AddValue(gc_count_last_window_ - 1); // Exclude the current run.
+ blocking_gc_count_rate_histogram_.AddValue(running_collection_is_blocking_ ?
+ blocking_gc_count_last_window_ - 1 : blocking_gc_count_last_window_);
+ // Record the other windows (with zero counts).
+ for (uint64_t i = 0; i < num_of_windows - 1; ++i) {
+ gc_count_rate_histogram_.AddValue(0);
+ blocking_gc_count_rate_histogram_.AddValue(0);
+ }
+ // Update the last update time and reset the counters.
+ last_update_time_gc_count_rate_histograms_ =
+ (now / kGcCountRateHistogramWindowDuration) * kGcCountRateHistogramWindowDuration;
+ gc_count_last_window_ = 1; // Include the current run.
+ blocking_gc_count_last_window_ = running_collection_is_blocking_ ? 1 : 0;
+ }
+ DCHECK_EQ(last_update_time_gc_count_rate_histograms_ % kGcCountRateHistogramWindowDuration, 0U);
+}
+
class RootMatchesObjectVisitor : public SingleRootVisitor {
public:
explicit RootMatchesObjectVisitor(const mirror::Object* obj) : obj_(obj) { }
@@ -3003,6 +3113,14 @@
collector::GcType last_gc_type = collector::kGcTypeNone;
uint64_t wait_start = NanoTime();
while (collector_type_running_ != kCollectorTypeNone) {
+ if (self != task_processor_->GetRunningThread()) {
+ // The current thread is about to wait for a currently running
+ // collection to finish. If the waiting thread is not the heap
+ // task daemon thread, the currently running collection is
+ // considered as a blocking GC.
+ running_collection_is_blocking_ = true;
+ VLOG(gc) << "Waiting for a blocking GC " << cause;
+ }
ATRACE_BEGIN("GC: Wait For Completion");
// We must wait, change thread state then sleep on gc_complete_cond_;
gc_complete_cond_->Wait(self);
@@ -3015,6 +3133,13 @@
LOG(INFO) << "WaitForGcToComplete blocked for " << PrettyDuration(wait_time)
<< " for cause " << cause;
}
+ if (self != task_processor_->GetRunningThread()) {
+ // The current thread is about to run a collection. If the thread
+ // is not the heap task daemon thread, it's considered as a
+ // blocking GC (i.e., blocking itself).
+ running_collection_is_blocking_ = true;
+ VLOG(gc) << "Starting a blocking GC " << cause;
+ }
return last_gc_type;
}
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 603cbfd..2f62798 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -672,6 +672,14 @@
min_interval_homogeneous_space_compaction_by_oom_ = interval;
}
+ // Helpers for android.os.Debug.getRuntimeStat().
+ uint64_t GetGcCount() const;
+ uint64_t GetGcTime() const;
+ uint64_t GetBlockingGcCount() const;
+ uint64_t GetBlockingGcTime() const;
+ void DumpGcCountRateHistogram(std::ostream& os) const;
+ void DumpBlockingGcCountRateHistogram(std::ostream& os) const;
+
private:
class ConcurrentGCTask;
class CollectorTransitionTask;
@@ -873,6 +881,8 @@
EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
+ void UpdateGcCountRateHistograms() EXCLUSIVE_LOCKS_REQUIRED(gc_complete_lock_);
+
// All-known continuous spaces, where objects lie within fixed bounds.
std::vector<space::ContinuousSpace*> continuous_spaces_;
@@ -1156,6 +1166,28 @@
// Whether or not we use homogeneous space compaction to avoid OOM errors.
bool use_homogeneous_space_compaction_for_oom_;
+ // True if the currently running collection has made some thread wait.
+ bool running_collection_is_blocking_ GUARDED_BY(gc_complete_lock_);
+ // The number of blocking GC runs.
+ uint64_t blocking_gc_count_;
+ // The total duration of blocking GC runs.
+ uint64_t blocking_gc_time_;
+ // The duration of the window for the GC count rate histograms.
+ static constexpr uint64_t kGcCountRateHistogramWindowDuration = MsToNs(10 * 1000); // 10s.
+ // The last time when the GC count rate histograms were updated.
+ // This is rounded by kGcCountRateHistogramWindowDuration (a multiple of 10s).
+ uint64_t last_update_time_gc_count_rate_histograms_;
+ // The running count of GC runs in the last window.
+ uint64_t gc_count_last_window_;
+ // The running count of blocking GC runs in the last window.
+ uint64_t blocking_gc_count_last_window_;
+ // The maximum number of buckets in the GC count rate histograms.
+ static constexpr size_t kGcCountRateMaxBucketCount = 200;
+ // The histogram of the number of GC invocations per window duration.
+ Histogram<uint64_t> gc_count_rate_histogram_ GUARDED_BY(gc_complete_lock_);
+ // The histogram of the number of blocking GC invocations per window duration.
+ Histogram<uint64_t> blocking_gc_count_rate_histogram_ GUARDED_BY(gc_complete_lock_);
+
friend class CollectorTransitionTask;
friend class collector::GarbageCollector;
friend class collector::MarkCompact;
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index 5c8e4b9..a4a9d80 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -37,6 +37,15 @@
explicit ValgrindLargeObjectMapSpace(const std::string& name) : LargeObjectMapSpace(name) {
}
+ ~ValgrindLargeObjectMapSpace() OVERRIDE {
+ // Keep valgrind happy if there is any large objects such as dex cache arrays which aren't
+ // freed since they are held live by the class linker.
+ MutexLock mu(Thread::Current(), lock_);
+ for (auto& m : mem_maps_) {
+ delete m.second;
+ }
+ }
+
virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size, size_t* bytes_tl_bulk_allocated)
OVERRIDE {
diff --git a/runtime/gc/task_processor.cc b/runtime/gc/task_processor.cc
index 2ca4b3f..ef34c68 100644
--- a/runtime/gc/task_processor.cc
+++ b/runtime/gc/task_processor.cc
@@ -22,7 +22,8 @@
namespace gc {
TaskProcessor::TaskProcessor()
- : lock_(new Mutex("Task processor lock", kReferenceProcessorLock)), is_running_(false) {
+ : lock_(new Mutex("Task processor lock", kReferenceProcessorLock)), is_running_(false),
+ running_thread_(nullptr) {
// Piggyback off the reference processor lock level.
cond_.reset(new ConditionVariable("Task processor condition", *lock_));
}
@@ -96,15 +97,22 @@
return is_running_;
}
+Thread* TaskProcessor::GetRunningThread() const {
+ MutexLock mu(Thread::Current(), *lock_);
+ return running_thread_;
+}
+
void TaskProcessor::Stop(Thread* self) {
MutexLock mu(self, *lock_);
is_running_ = false;
+ running_thread_ = nullptr;
cond_->Broadcast(self);
}
void TaskProcessor::Start(Thread* self) {
MutexLock mu(self, *lock_);
is_running_ = true;
+ running_thread_ = self;
}
void TaskProcessor::RunAllTasks(Thread* self) {
diff --git a/runtime/gc/task_processor.h b/runtime/gc/task_processor.h
index 765f035..67e3a54 100644
--- a/runtime/gc/task_processor.h
+++ b/runtime/gc/task_processor.h
@@ -63,6 +63,7 @@
bool IsRunning() const LOCKS_EXCLUDED(lock_);
void UpdateTargetRunTime(Thread* self, HeapTask* target_time, uint64_t new_target_time)
LOCKS_EXCLUDED(lock_);
+ Thread* GetRunningThread() const LOCKS_EXCLUDED(lock_);
private:
class CompareByTargetRunTime {
@@ -76,6 +77,7 @@
bool is_running_ GUARDED_BY(lock_);
std::unique_ptr<ConditionVariable> cond_ GUARDED_BY(lock_);
std::multiset<HeapTask*, CompareByTargetRunTime> tasks_ GUARDED_BY(lock_);
+ Thread* running_thread_ GUARDED_BY(lock_);
};
} // namespace gc
diff --git a/runtime/gc_root.h b/runtime/gc_root.h
index 0d3c93b..b67e9c2 100644
--- a/runtime/gc_root.h
+++ b/runtime/gc_root.h
@@ -50,7 +50,7 @@
};
std::ostream& operator<<(std::ostream& os, const RootType& root_type);
-// Only used by hprof. tid and root_type are only used by hprof.
+// Only used by hprof. thread_id_ and type_ are only used by hprof.
class RootInfo {
public:
// Thread id 0 is for non thread roots.
@@ -85,12 +85,13 @@
public:
virtual ~RootVisitor() { }
- // Single root versions, not overridable.
+ // Single root version, not overridable.
ALWAYS_INLINE void VisitRoot(mirror::Object** roots, const RootInfo& info)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
VisitRoots(&roots, 1, info);
}
+ // Single root version, not overridable.
ALWAYS_INLINE void VisitRootIfNonNull(mirror::Object** roots, const RootInfo& info)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (*roots != nullptr) {
@@ -161,6 +162,9 @@
ALWAYS_INLINE GcRoot(MirrorType* ref = nullptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
private:
+ // Root visitors take pointers to root_ and place the min CompressedReference** arrays. We use a
+ // CompressedReference<mirror::Object> here since it violates strict aliasing requirements to
+ // cast CompressedReference<MirrorType>* to CompressedReference<mirror::Object>*.
mutable mirror::CompressedReference<mirror::Object> root_;
template <size_t kBufferSize> friend class BufferedRootVisitor;
diff --git a/runtime/handle.h b/runtime/handle.h
index 3ebb2d5..d94d875 100644
--- a/runtime/handle.h
+++ b/runtime/handle.h
@@ -70,8 +70,8 @@
return reinterpret_cast<jobject>(reference_);
}
- StackReference<mirror::Object>* GetReference() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- ALWAYS_INLINE {
+ ALWAYS_INLINE StackReference<mirror::Object>* GetReference()
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return reference_;
}
diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc
index 5012965..d6f9682 100644
--- a/runtime/indirect_reference_table.cc
+++ b/runtime/indirect_reference_table.cc
@@ -258,7 +258,10 @@
void IndirectReferenceTable::VisitRoots(RootVisitor* visitor, const RootInfo& root_info) {
BufferedRootVisitor<kDefaultBufferedRootCount> root_visitor(visitor, root_info);
for (auto ref : *this) {
- root_visitor.VisitRootIfNonNull(*ref);
+ if (!ref->IsNull()) {
+ root_visitor.VisitRoot(*ref);
+ DCHECK(!ref->IsNull());
+ }
}
}
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
index 9039e3c..fbb07e8 100644
--- a/runtime/interpreter/unstarted_runtime.cc
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -124,7 +124,12 @@
static void UnstartedClassForNameLong(
Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::String* class_name = shadow_frame->GetVRegReference(arg_offset)->AsString();
+ mirror::Object* param = shadow_frame->GetVRegReference(arg_offset);
+ if (param == nullptr) {
+ AbortTransactionOrFail(self, "Null-pointer in Class.forName.");
+ return;
+ }
+ mirror::String* class_name = param->AsString();
bool initialize_class = shadow_frame->GetVReg(arg_offset + 1) != 0;
mirror::ClassLoader* class_loader =
down_cast<mirror::ClassLoader*>(shadow_frame->GetVRegReference(arg_offset + 2));
diff --git a/runtime/jdwp/jdwp.h b/runtime/jdwp/jdwp.h
index a503b17..8dffee6 100644
--- a/runtime/jdwp/jdwp.h
+++ b/runtime/jdwp/jdwp.h
@@ -51,22 +51,24 @@
* Fundamental types.
*
* ObjectId and RefTypeId must be the same size.
+ * Its OK to change MethodId and FieldId sizes as long as the size is <= 8 bytes.
+ * Note that ArtFields are 64 bit pointers on 64 bit targets. So this one must remain 8 bytes.
*/
-typedef uint32_t FieldId; /* static or instance field */
-typedef uint32_t MethodId; /* any kind of method, including constructors */
+typedef uint64_t FieldId; /* static or instance field */
+typedef uint64_t MethodId; /* any kind of method, including constructors */
typedef uint64_t ObjectId; /* any object (threadID, stringID, arrayID, etc) */
typedef uint64_t RefTypeId; /* like ObjectID, but unique for Class objects */
typedef uint64_t FrameId; /* short-lived stack frame ID */
ObjectId ReadObjectId(const uint8_t** pBuf);
-static inline void SetFieldId(uint8_t* buf, FieldId val) { return Set4BE(buf, val); }
-static inline void SetMethodId(uint8_t* buf, MethodId val) { return Set4BE(buf, val); }
+static inline void SetFieldId(uint8_t* buf, FieldId val) { return Set8BE(buf, val); }
+static inline void SetMethodId(uint8_t* buf, MethodId val) { return Set8BE(buf, val); }
static inline void SetObjectId(uint8_t* buf, ObjectId val) { return Set8BE(buf, val); }
static inline void SetRefTypeId(uint8_t* buf, RefTypeId val) { return Set8BE(buf, val); }
static inline void SetFrameId(uint8_t* buf, FrameId val) { return Set8BE(buf, val); }
-static inline void expandBufAddFieldId(ExpandBuf* pReply, FieldId id) { expandBufAdd4BE(pReply, id); }
-static inline void expandBufAddMethodId(ExpandBuf* pReply, MethodId id) { expandBufAdd4BE(pReply, id); }
+static inline void expandBufAddFieldId(ExpandBuf* pReply, FieldId id) { expandBufAdd8BE(pReply, id); }
+static inline void expandBufAddMethodId(ExpandBuf* pReply, MethodId id) { expandBufAdd8BE(pReply, id); }
static inline void expandBufAddObjectId(ExpandBuf* pReply, ObjectId id) { expandBufAdd8BE(pReply, id); }
static inline void expandBufAddRefTypeId(ExpandBuf* pReply, RefTypeId id) { expandBufAdd8BE(pReply, id); }
static inline void expandBufAddFrameId(ExpandBuf* pReply, FrameId id) { expandBufAdd8BE(pReply, id); }
diff --git a/runtime/jdwp/jdwp_event.cc b/runtime/jdwp/jdwp_event.cc
index ccf8bff..1ec800f 100644
--- a/runtime/jdwp/jdwp_event.cc
+++ b/runtime/jdwp/jdwp_event.cc
@@ -957,7 +957,7 @@
VLOG(jdwp) << StringPrintf(" this=%#" PRIx64, instance_id);
VLOG(jdwp) << StringPrintf(" type=%#" PRIx64, field_type_id) << " "
<< Dbg::GetClassName(field_id);
- VLOG(jdwp) << StringPrintf(" field=%#" PRIx32, field_id) << " "
+ VLOG(jdwp) << StringPrintf(" field=%#" PRIx64, field_id) << " "
<< Dbg::GetFieldName(field_id);
VLOG(jdwp) << " suspend_policy=" << suspend_policy;
}
diff --git a/runtime/jdwp/jdwp_handler.cc b/runtime/jdwp/jdwp_handler.cc
index d0ca214..2457f14 100644
--- a/runtime/jdwp/jdwp_handler.cc
+++ b/runtime/jdwp/jdwp_handler.cc
@@ -38,11 +38,11 @@
namespace JDWP {
std::string DescribeField(const FieldId& field_id) {
- return StringPrintf("%#x (%s)", field_id, Dbg::GetFieldName(field_id).c_str());
+ return StringPrintf("%#" PRIx64 " (%s)", field_id, Dbg::GetFieldName(field_id).c_str());
}
std::string DescribeMethod(const MethodId& method_id) {
- return StringPrintf("%#x (%s)", method_id, Dbg::GetMethodName(method_id).c_str());
+ return StringPrintf("%#" PRIx64 " (%s)", method_id, Dbg::GetMethodName(method_id).c_str());
}
std::string DescribeRefTypeId(const RefTypeId& ref_type_id) {
@@ -101,8 +101,8 @@
VLOG(jdwp) << StringPrintf(" --> thread_id=%#" PRIx64 " object_id=%#" PRIx64,
thread_id, object_id);
- VLOG(jdwp) << StringPrintf(" class_id=%#" PRIx64 " method_id=%x %s.%s", class_id,
- method_id, Dbg::GetClassName(class_id).c_str(),
+ VLOG(jdwp) << StringPrintf(" class_id=%#" PRIx64 " method_id=%#" PRIx64 " %s.%s",
+ class_id, method_id, Dbg::GetClassName(class_id).c_str(),
Dbg::GetMethodName(method_id).c_str());
VLOG(jdwp) << StringPrintf(" %d args:", arg_count);
@@ -256,8 +256,6 @@
/*
* Respond with the sizes of the basic debugger types.
- *
- * All IDs are 8 bytes.
*/
static JdwpError VM_IDSizes(JdwpState*, Request*, ExpandBuf* pReply)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
diff --git a/runtime/jdwp/jdwp_request.cc b/runtime/jdwp/jdwp_request.cc
index 7b15d6d..18f40a1 100644
--- a/runtime/jdwp/jdwp_request.cc
+++ b/runtime/jdwp/jdwp_request.cc
@@ -87,13 +87,13 @@
}
FieldId Request::ReadFieldId() {
- FieldId id = Read4BE();
+ FieldId id = Read8BE();
VLOG(jdwp) << " field id " << DescribeField(id);
return id;
}
MethodId Request::ReadMethodId() {
- MethodId id = Read4BE();
+ MethodId id = Read8BE();
VLOG(jdwp) << " method id " << DescribeMethod(id);
return id;
}
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index f4656ec..aaa66f9 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -809,18 +809,24 @@
template<class Visitor>
void mirror::Class::VisitFieldRoots(Visitor& visitor) {
ArtField* const sfields = GetSFieldsUnchecked();
- for (size_t i = 0, count = NumStaticFields(); i < count; ++i) {
- if (kIsDebugBuild && GetStatus() != kStatusRetired) {
- CHECK_EQ(sfields[i].GetDeclaringClass(), this);
+ // Since we visit class roots while we may be writing these fields, check against null.
+ // TODO: Is this safe for concurrent compaction?
+ if (sfields != nullptr) {
+ for (size_t i = 0, count = NumStaticFields(); i < count; ++i) {
+ if (kIsDebugBuild && IsResolved()) {
+ CHECK_EQ(sfields[i].GetDeclaringClass(), this) << GetStatus();
+ }
+ visitor.VisitRoot(sfields[i].DeclaringClassRoot().AddressWithoutBarrier());
}
- visitor.VisitRoot(sfields[i].DeclaringClassRoot().AddressWithoutBarrier());
}
ArtField* const ifields = GetIFieldsUnchecked();
- for (size_t i = 0, count = NumInstanceFields(); i < count; ++i) {
- if (kIsDebugBuild && GetStatus() != kStatusRetired) {
- CHECK_EQ(ifields[i].GetDeclaringClass(), this);
+ if (ifields != nullptr) {
+ for (size_t i = 0, count = NumInstanceFields(); i < count; ++i) {
+ if (kIsDebugBuild && IsResolved()) {
+ CHECK_EQ(ifields[i].GetDeclaringClass(), this) << GetStatus();
+ }
+ visitor.VisitRoot(ifields[i].DeclaringClassRoot().AddressWithoutBarrier());
}
- visitor.VisitRoot(ifields[i].DeclaringClassRoot().AddressWithoutBarrier());
}
}
diff --git a/runtime/mirror/object_reference.h b/runtime/mirror/object_reference.h
index 5edda8b..055be85 100644
--- a/runtime/mirror/object_reference.h
+++ b/runtime/mirror/object_reference.h
@@ -91,7 +91,7 @@
: ObjectReference<kPoisonHeapReferences, MirrorType>(mirror_ptr) {}
};
-// Standard compressed reference used in the runtime. Used for StackRefernce and GC roots.
+// Standard compressed reference used in the runtime. Used for StackReference and GC roots.
template<class MirrorType>
class MANAGED CompressedReference : public mirror::ObjectReference<false, MirrorType> {
public:
diff --git a/runtime/native/dalvik_system_VMDebug.cc b/runtime/native/dalvik_system_VMDebug.cc
index 2724d91..876e29a 100644
--- a/runtime/native/dalvik_system_VMDebug.cc
+++ b/runtime/native/dalvik_system_VMDebug.cc
@@ -19,6 +19,9 @@
#include <string.h>
#include <unistd.h>
+#include <sstream>
+
+#include "base/histogram-inl.h"
#include "class_linker.h"
#include "common_throws.h"
#include "debugger.h"
@@ -329,6 +332,123 @@
env->ReleasePrimitiveArrayCritical(data, arr, 0);
}
+// The runtime stat names for VMDebug.getRuntimeStat().
+enum class VMDebugRuntimeStatId {
+ kArtGcGcCount = 0,
+ kArtGcGcTime,
+ kArtGcBytesAllocated,
+ kArtGcBytesFreed,
+ kArtGcBlockingGcCount,
+ kArtGcBlockingGcTime,
+ kArtGcGcCountRateHistogram,
+ kArtGcBlockingGcCountRateHistogram,
+ kNumRuntimeStats,
+};
+
+static jobject VMDebug_getRuntimeStatInternal(JNIEnv* env, jclass, jint statId) {
+ gc::Heap* heap = Runtime::Current()->GetHeap();
+ switch (static_cast<VMDebugRuntimeStatId>(statId)) {
+ case VMDebugRuntimeStatId::kArtGcGcCount: {
+ std::string output = std::to_string(heap->GetGcCount());
+ return env->NewStringUTF(output.c_str());
+ }
+ case VMDebugRuntimeStatId::kArtGcGcTime: {
+ std::string output = std::to_string(NsToMs(heap->GetGcTime()));
+ return env->NewStringUTF(output.c_str());
+ }
+ case VMDebugRuntimeStatId::kArtGcBytesAllocated: {
+ std::string output = std::to_string(heap->GetBytesAllocatedEver());
+ return env->NewStringUTF(output.c_str());
+ }
+ case VMDebugRuntimeStatId::kArtGcBytesFreed: {
+ std::string output = std::to_string(heap->GetBytesFreedEver());
+ return env->NewStringUTF(output.c_str());
+ }
+ case VMDebugRuntimeStatId::kArtGcBlockingGcCount: {
+ std::string output = std::to_string(heap->GetBlockingGcCount());
+ return env->NewStringUTF(output.c_str());
+ }
+ case VMDebugRuntimeStatId::kArtGcBlockingGcTime: {
+ std::string output = std::to_string(NsToMs(heap->GetBlockingGcTime()));
+ return env->NewStringUTF(output.c_str());
+ }
+ case VMDebugRuntimeStatId::kArtGcGcCountRateHistogram: {
+ std::ostringstream output;
+ heap->DumpGcCountRateHistogram(output);
+ return env->NewStringUTF(output.str().c_str());
+ }
+ case VMDebugRuntimeStatId::kArtGcBlockingGcCountRateHistogram: {
+ std::ostringstream output;
+ heap->DumpBlockingGcCountRateHistogram(output);
+ return env->NewStringUTF(output.str().c_str());
+ }
+ default:
+ return nullptr;
+ }
+}
+
+static bool SetRuntimeStatValue(JNIEnv* env, jobjectArray result, VMDebugRuntimeStatId id,
+ std::string value) {
+ ScopedLocalRef<jstring> jvalue(env, env->NewStringUTF(value.c_str()));
+ if (jvalue.get() == nullptr) {
+ return false;
+ }
+ env->SetObjectArrayElement(result, static_cast<jint>(id), jvalue.get());
+ return true;
+}
+
+static jobjectArray VMDebug_getRuntimeStatsInternal(JNIEnv* env, jclass) {
+ jobjectArray result = env->NewObjectArray(
+ static_cast<jint>(VMDebugRuntimeStatId::kNumRuntimeStats),
+ WellKnownClasses::java_lang_String,
+ nullptr);
+ if (result == nullptr) {
+ return nullptr;
+ }
+ gc::Heap* heap = Runtime::Current()->GetHeap();
+ if (!SetRuntimeStatValue(env, result, VMDebugRuntimeStatId::kArtGcGcCount,
+ std::to_string(heap->GetGcCount()))) {
+ return nullptr;
+ }
+ if (!SetRuntimeStatValue(env, result, VMDebugRuntimeStatId::kArtGcGcTime,
+ std::to_string(NsToMs(heap->GetGcTime())))) {
+ return nullptr;
+ }
+ if (!SetRuntimeStatValue(env, result, VMDebugRuntimeStatId::kArtGcBytesAllocated,
+ std::to_string(heap->GetBytesAllocatedEver()))) {
+ return nullptr;
+ }
+ if (!SetRuntimeStatValue(env, result, VMDebugRuntimeStatId::kArtGcBytesFreed,
+ std::to_string(heap->GetBytesFreedEver()))) {
+ return nullptr;
+ }
+ if (!SetRuntimeStatValue(env, result, VMDebugRuntimeStatId::kArtGcBlockingGcCount,
+ std::to_string(heap->GetBlockingGcCount()))) {
+ return nullptr;
+ }
+ if (!SetRuntimeStatValue(env, result, VMDebugRuntimeStatId::kArtGcBlockingGcTime,
+ std::to_string(NsToMs(heap->GetBlockingGcTime())))) {
+ return nullptr;
+ }
+ {
+ std::ostringstream output;
+ heap->DumpGcCountRateHistogram(output);
+ if (!SetRuntimeStatValue(env, result, VMDebugRuntimeStatId::kArtGcGcCountRateHistogram,
+ output.str())) {
+ return nullptr;
+ }
+ }
+ {
+ std::ostringstream output;
+ heap->DumpBlockingGcCountRateHistogram(output);
+ if (!SetRuntimeStatValue(env, result, VMDebugRuntimeStatId::kArtGcBlockingGcCountRateHistogram,
+ output.str())) {
+ return nullptr;
+ }
+ }
+ return result;
+}
+
static JNINativeMethod gMethods[] = {
NATIVE_METHOD(VMDebug, countInstancesOfClass, "(Ljava/lang/Class;Z)J"),
NATIVE_METHOD(VMDebug, crash, "()V"),
@@ -359,6 +479,8 @@
NATIVE_METHOD(VMDebug, stopInstructionCounting, "()V"),
NATIVE_METHOD(VMDebug, stopMethodTracing, "()V"),
NATIVE_METHOD(VMDebug, threadCpuTimeNanos, "!()J"),
+ NATIVE_METHOD(VMDebug, getRuntimeStatInternal, "(I)Ljava/lang/String;"),
+ NATIVE_METHOD(VMDebug, getRuntimeStatsInternal, "()[Ljava/lang/String;")
};
void register_dalvik_system_VMDebug(JNIEnv* env) {
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index 1a6adf8..196a231 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -250,8 +250,7 @@
class PreloadDexCachesStringsVisitor : public SingleRootVisitor {
public:
- explicit PreloadDexCachesStringsVisitor(StringTable* table) : table_(table) {
- }
+ explicit PreloadDexCachesStringsVisitor(StringTable* table) : table_(table) { }
void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED)
OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
diff --git a/runtime/native/java_lang_VMClassLoader.cc b/runtime/native/java_lang_VMClassLoader.cc
index 35932e0..0c39f2b 100644
--- a/runtime/native/java_lang_VMClassLoader.cc
+++ b/runtime/native/java_lang_VMClassLoader.cc
@@ -44,8 +44,8 @@
if (loader != nullptr) {
// Try the common case.
StackHandleScope<1> hs(soa.Self());
- c = cl->FindClassInPathClassLoader(soa, soa.Self(), descriptor.c_str(), descriptor_hash,
- hs.NewHandle(loader));
+ cl->FindClassInPathClassLoader(soa, soa.Self(), descriptor.c_str(), descriptor_hash,
+ hs.NewHandle(loader), &c);
if (c != nullptr) {
return soa.AddLocalReference<jclass>(c);
}
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 58b272b..5ca51fb 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -1070,12 +1070,7 @@
// If we're currently in native code, dump that stack before dumping the managed stack.
if (dump_for_abort || ShouldShowNativeStack(this)) {
DumpKernelStack(os, GetTid(), " kernel: ", false);
- // b/20040863. Temporary workaround for x86 libunwind issue.
-#if defined(__i386__) && defined(HAVE_ANDROID_OS)
- os << "Cannot dump native stack. b/20040863.\n";
-#else
DumpNativeStack(os, GetTid(), " native: ", GetCurrentMethod(nullptr, !dump_for_abort));
-#endif
}
DumpJavaStack(os);
} else {
@@ -1391,6 +1386,8 @@
visitor, RootInfo(kRootNativeStack, thread_id));
for (HandleScope* cur = tlsPtr_.top_handle_scope; cur; cur = cur->GetLink()) {
for (size_t j = 0, count = cur->NumberOfReferences(); j < count; ++j) {
+ // GetReference returns a pointer to the stack reference within the handle scope. If this
+ // needs to be updated, it will be done by the root visitor.
buffered_visitor.VisitRootIfNonNull(cur->GetHandle(j).GetReference());
}
}
@@ -2317,6 +2314,7 @@
ReleaseLongJumpContext(context);
for (instrumentation::InstrumentationStackFrame& frame : *GetInstrumentationStack()) {
visitor->VisitRootIfNonNull(&frame.this_object_, RootInfo(kRootVMInternal, thread_id));
+ DCHECK(frame.method_ != nullptr);
visitor->VisitRoot(reinterpret_cast<mirror::Object**>(&frame.method_),
RootInfo(kRootVMInternal, thread_id));
}
diff --git a/test/099-vmdebug/src/Main.java b/test/099-vmdebug/src/Main.java
index 4d781c3..a8db069 100644
--- a/test/099-vmdebug/src/Main.java
+++ b/test/099-vmdebug/src/Main.java
@@ -17,6 +17,7 @@
import java.io.File;
import java.io.IOException;
import java.lang.reflect.Method;
+import java.util.Map;
public class Main {
public static void main(String[] args) throws Exception {
@@ -26,6 +27,7 @@
return;
}
testMethodTracing();
+ testRuntimeStat();
}
private static File createTempFile() throws Exception {
@@ -109,10 +111,108 @@
tempFile.delete();
}
+ private static void checkNumber(String s) throws Exception {
+ if (s == null) {
+ System.out.println("Got null string");
+ return;
+ }
+ long n = Long.valueOf(s);
+ if (n < 0) {
+ System.out.println("Got negative number " + n);
+ }
+ }
+
+ private static void checkHistogram(String s) throws Exception {
+ if (s == null || s.length() == 0) {
+ System.out.println("Got null or empty string");
+ return;
+ }
+ String[] buckets = s.split(",");
+ long last_key = 0;
+ for (int i = 0; i < buckets.length; ++i) {
+ String bucket = buckets[i];
+ if (bucket.length() == 0) {
+ System.out.println("Got empty bucket");
+ continue;
+ }
+ String[] kv = bucket.split(":");
+ if (kv.length != 2 || kv[0].length() == 0 || kv[1].length() == 0) {
+ System.out.println("Got bad bucket " + bucket);
+ continue;
+ }
+ long key = Long.valueOf(kv[0]);
+ long value = Long.valueOf(kv[1]);
+ if (key < 0 || value < 0) {
+ System.out.println("Got negative key or value " + bucket);
+ continue;
+ }
+ if (key < last_key) {
+ System.out.println("Got decreasing key " + bucket);
+ continue;
+ }
+ last_key = key;
+ }
+ }
+
+ private static void testRuntimeStat() throws Exception {
+ // Invoke at least one GC and wait for 20 seconds or so so we get at
+ // least one bucket in the histograms.
+ for (int i = 0; i < 20; ++i) {
+ Runtime.getRuntime().gc();
+ Thread.sleep(1000L);
+ }
+ String gc_count = VMDebug.getRuntimeStat("art.gc.gc-count");
+ String gc_time = VMDebug.getRuntimeStat("art.gc.gc-time");
+ String bytes_allocated = VMDebug.getRuntimeStat("art.gc.bytes-allocated");
+ String bytes_freed = VMDebug.getRuntimeStat("art.gc.bytes-freed");
+ String blocking_gc_count = VMDebug.getRuntimeStat("art.gc.blocking-gc-count");
+ String blocking_gc_time = VMDebug.getRuntimeStat("art.gc.blocking-gc-time");
+ String gc_count_rate_histogram = VMDebug.getRuntimeStat("art.gc.gc-count-rate-histogram");
+ String blocking_gc_count_rate_histogram =
+ VMDebug.getRuntimeStat("art.gc.blocking-gc-count-rate-histogram");
+ checkNumber(gc_count);
+ checkNumber(gc_time);
+ checkNumber(bytes_allocated);
+ checkNumber(bytes_freed);
+ checkNumber(blocking_gc_count);
+ checkNumber(blocking_gc_time);
+ checkHistogram(gc_count_rate_histogram);
+ checkHistogram(blocking_gc_count_rate_histogram);
+ }
+
+ private static void testRuntimeStats() throws Exception {
+ // Invoke at least one GC and wait for 20 seconds or so so we get at
+ // least one bucket in the histograms.
+ for (int i = 0; i < 20; ++i) {
+ Runtime.getRuntime().gc();
+ Thread.sleep(1000L);
+ }
+ Map<String, String> map = VMDebug.getRuntimeStats();
+ String gc_count = map.get("art.gc.gc-count");
+ String gc_time = map.get("art.gc.gc-time");
+ String bytes_allocated = map.get("art.gc.bytes-allocated");
+ String bytes_freed = map.get("art.gc.bytes-freed");
+ String blocking_gc_count = map.get("art.gc.blocking-gc-count");
+ String blocking_gc_time = map.get("art.gc.blocking-gc-time");
+ String gc_count_rate_histogram = map.get("art.gc.gc-count-rate-histogram");
+ String blocking_gc_count_rate_histogram =
+ map.get("art.gc.blocking-gc-count-rate-histogram");
+ checkNumber(gc_count);
+ checkNumber(gc_time);
+ checkNumber(bytes_allocated);
+ checkNumber(bytes_freed);
+ checkNumber(blocking_gc_count);
+ checkNumber(blocking_gc_time);
+ checkHistogram(gc_count_rate_histogram);
+ checkHistogram(blocking_gc_count_rate_histogram);
+ }
+
private static class VMDebug {
private static final Method startMethodTracingMethod;
private static final Method stopMethodTracingMethod;
private static final Method getMethodTracingModeMethod;
+ private static final Method getRuntimeStatMethod;
+ private static final Method getRuntimeStatsMethod;
static {
try {
Class c = Class.forName("dalvik.system.VMDebug");
@@ -120,6 +220,8 @@
Integer.TYPE, Integer.TYPE, Boolean.TYPE, Integer.TYPE);
stopMethodTracingMethod = c.getDeclaredMethod("stopMethodTracing");
getMethodTracingModeMethod = c.getDeclaredMethod("getMethodTracingMode");
+ getRuntimeStatMethod = c.getDeclaredMethod("getRuntimeStat", String.class);
+ getRuntimeStatsMethod = c.getDeclaredMethod("getRuntimeStats");
} catch (Exception e) {
throw new RuntimeException(e);
}
@@ -136,5 +238,11 @@
public static int getMethodTracingMode() throws Exception {
return (int) getMethodTracingModeMethod.invoke(null);
}
+ public static String getRuntimeStat(String statName) throws Exception {
+ return (String) getRuntimeStatMethod.invoke(null, statName);
+ }
+ public static Map<String, String> getRuntimeStats() throws Exception {
+ return (Map<String, String>) getRuntimeStatsMethod.invoke(null);
+ }
}
}
diff --git a/test/104-growth-limit/src/Main.java b/test/104-growth-limit/src/Main.java
index 55469db..d666377 100644
--- a/test/104-growth-limit/src/Main.java
+++ b/test/104-growth-limit/src/Main.java
@@ -21,8 +21,14 @@
public class Main {
public static void main(String[] args) throws Exception {
-
int alloc1 = 1;
+ // Setup reflection stuff before allocating to prevent OOME caused by allocations from
+ // Class.forName or getDeclaredMethod.
+ // Reflective equivalent of: dalvik.system.VMRuntime.getRuntime().clearGrowthLimit();
+ final Class<?> vm_runtime = Class.forName("dalvik.system.VMRuntime");
+ final Method get_runtime = vm_runtime.getDeclaredMethod("getRuntime");
+ final Object runtime = get_runtime.invoke(null);
+ final Method clear_growth_limit = vm_runtime.getDeclaredMethod("clearGrowthLimit");
try {
List<byte[]> l = new ArrayList<byte[]>();
while (true) {
@@ -33,13 +39,7 @@
} catch (OutOfMemoryError e) {
}
// Expand the heap to the maximum size.
- // Reflective equivalent of: dalvik.system.VMRuntime.getRuntime().clearGrowthLimit();
- Class<?> vm_runtime = Class.forName("dalvik.system.VMRuntime");
- Method get_runtime = vm_runtime.getDeclaredMethod("getRuntime");
- Object runtime = get_runtime.invoke(null);
- Method clear_growth_limit = vm_runtime.getDeclaredMethod("clearGrowthLimit");
clear_growth_limit.invoke(runtime);
-
int alloc2 = 1;
try {
List<byte[]> l = new ArrayList<byte[]>();
diff --git a/test/472-type-propagation/expected.txt b/test/472-type-propagation/expected.txt
new file mode 100644
index 0000000..0b29bb1
--- /dev/null
+++ b/test/472-type-propagation/expected.txt
@@ -0,0 +1,2 @@
+4.3
+1.2
diff --git a/test/472-type-propagation/info.txt b/test/472-type-propagation/info.txt
new file mode 100644
index 0000000..b86e5a2
--- /dev/null
+++ b/test/472-type-propagation/info.txt
@@ -0,0 +1,3 @@
+Regression test for optimizing's type propagation:
+If a phi requests its inputs to be of a certain type, the inputs need
+to propagate that type to their users, as those users might be phis.
diff --git a/test/472-type-propagation/src/Main.java b/test/472-type-propagation/src/Main.java
new file mode 100644
index 0000000..f9e302f
--- /dev/null
+++ b/test/472-type-propagation/src/Main.java
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class Main {
+ public static void main(String[] args) {
+ ssaBuilderDouble(new double[] { 1.2, 4.3, 5.2 });
+ ssaBuilderDouble(new double[] { 1.2, 4.3, 5.2, 6.8 });
+ }
+
+ public static void ssaBuilderDouble(double[] array) {
+ double x;
+ if (array.length > 3) {
+ x = array[0];
+ } else {
+ x = array[1];
+ }
+ array[2] = x;
+ System.out.println(x);
+ }
+}