Merge "Revert "Use dlopen to load oat files.""
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 32bde8e..73e121f 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -110,10 +110,6 @@
CheckNoDexObjects();
}
- if (!AllocMemory()) {
- return false;
- }
-
if (kIsDebugBuild) {
ScopedObjectAccess soa(Thread::Current());
CheckNonImageClassesRemoved();
@@ -123,6 +119,12 @@
CalculateNewObjectOffsets();
Thread::Current()->TransitionFromRunnableToSuspended(kNative);
+ // This needs to happen after CalculateNewObjectOffsets since it relies on intern_table_bytes_ and
+ // bin size sums being calculated.
+ if (!AllocMemory()) {
+ return false;
+ }
+
return true;
}
@@ -205,7 +207,7 @@
}
// Write out the image bitmap at the page aligned start of the image end.
- const auto& bitmap_section = image_header->GetImageSection(ImageHeader::kSectionImageBitmap);
+ const ImageSection& bitmap_section = image_header->GetImageSection(ImageHeader::kSectionImageBitmap);
CHECK_ALIGNED(bitmap_section.Offset(), kPageSize);
if (!image_file->Write(reinterpret_cast<char*>(image_bitmap_->Begin()),
bitmap_section.Size(), bitmap_section.Offset())) {
@@ -222,26 +224,10 @@
return true;
}
-void ImageWriter::SetImageOffset(mirror::Object* object,
- ImageWriter::BinSlot bin_slot,
- size_t offset) {
+void ImageWriter::SetImageOffset(mirror::Object* object, size_t offset) {
DCHECK(object != nullptr);
DCHECK_NE(offset, 0U);
- mirror::Object* obj = reinterpret_cast<mirror::Object*>(image_->Begin() + offset);
- DCHECK_ALIGNED(obj, kObjectAlignment);
- static size_t max_offset = 0;
- max_offset = std::max(max_offset, offset);
- image_bitmap_->Set(obj); // Mark the obj as mutated, since we will end up changing it.
- {
- // Remember the object-inside-of-the-image's hash code so we can restore it after the copy.
- auto hash_it = saved_hashes_map_.find(bin_slot);
- if (hash_it != saved_hashes_map_.end()) {
- std::pair<BinSlot, uint32_t> slot_hash = *hash_it;
- saved_hashes_.push_back(std::make_pair(obj, slot_hash.second));
- saved_hashes_map_.erase(hash_it);
- }
- }
// The object is already deflated from when we set the bin slot. Just overwrite the lock word.
object->SetLockWord(LockWord::FromForwardingAddress(offset), false);
DCHECK_EQ(object->GetLockWord(false).ReadBarrierState(), 0u);
@@ -262,7 +248,7 @@
size_t new_offset = image_objects_offset_begin_ + previous_bin_sizes + bin_slot.GetIndex();
DCHECK_ALIGNED(new_offset, kObjectAlignment);
- SetImageOffset(object, bin_slot, new_offset);
+ SetImageOffset(object, new_offset);
DCHECK_LT(new_offset, image_end_);
}
@@ -302,14 +288,14 @@
// No hash, don't need to save it.
break;
case LockWord::kHashCode:
- saved_hashes_map_[bin_slot] = lw.GetHashCode();
+ DCHECK(saved_hashcode_map_.find(object) == saved_hashcode_map_.end());
+ saved_hashcode_map_.emplace(object, lw.GetHashCode());
break;
default:
LOG(FATAL) << "Unreachable.";
UNREACHABLE();
}
- object->SetLockWord(LockWord::FromForwardingAddress(static_cast<uint32_t>(bin_slot)),
- false);
+ object->SetLockWord(LockWord::FromForwardingAddress(bin_slot.Uint32Value()), false);
DCHECK_EQ(object->GetLockWord(false).ReadBarrierState(), 0u);
DCHECK(IsImageBinSlotAssigned(object));
}
@@ -487,11 +473,8 @@
++bin_slot_count_[bin];
- DCHECK_LT(GetBinSizeSum(), image_->Size());
-
// Grow the image closer to the end by the object we just assigned.
image_end_ += offset_delta;
- DCHECK_LT(image_end_, image_->Size());
}
bool ImageWriter::WillMethodBeDirty(ArtMethod* m) const {
@@ -535,10 +518,8 @@
}
bool ImageWriter::AllocMemory() {
- auto* runtime = Runtime::Current();
- const size_t heap_size = runtime->GetHeap()->GetTotalMemory();
- // Add linear alloc usage since we need to have room for the ArtFields.
- const size_t length = RoundUp(heap_size + runtime->GetLinearAlloc()->GetUsedMemory(), kPageSize);
+ const size_t length = RoundUp(image_objects_offset_begin_ + GetBinSizeSum() + intern_table_bytes_,
+ kPageSize);
std::string error_msg;
image_.reset(MemMap::MapAnonymous("image writer image", nullptr, length, PROT_READ | PROT_WRITE,
false, false, &error_msg));
@@ -547,9 +528,10 @@
return false;
}
- // Create the image bitmap.
- image_bitmap_.reset(gc::accounting::ContinuousSpaceBitmap::Create("image bitmap", image_->Begin(),
- RoundUp(length, kPageSize)));
+ // Create the image bitmap, only needs to cover mirror object section which is up to image_end_.
+ CHECK_LE(image_end_, length);
+ image_bitmap_.reset(gc::accounting::ContinuousSpaceBitmap::Create(
+ "image bitmap", image_->Begin(), RoundUp(image_end_, kPageSize)));
if (image_bitmap_.get() == nullptr) {
LOG(ERROR) << "Failed to allocate memory for image bitmap";
return false;
@@ -569,42 +551,6 @@
return true;
}
-// Collect all the java.lang.String in the heap and put them in the output strings_ array.
-class StringCollector {
- public:
- StringCollector(Handle<mirror::ObjectArray<mirror::String>> strings, size_t index)
- : strings_(strings), index_(index) {
- }
- static void Callback(Object* obj, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- auto* collector = reinterpret_cast<StringCollector*>(arg);
- if (obj->GetClass()->IsStringClass()) {
- collector->strings_->SetWithoutChecks<false>(collector->index_++, obj->AsString());
- }
- }
- size_t GetIndex() const {
- return index_;
- }
-
- private:
- Handle<mirror::ObjectArray<mirror::String>> strings_;
- size_t index_;
-};
-
-// Compare strings based on length, used for sorting strings by length / reverse length.
-class LexicographicalStringComparator {
- public:
- bool operator()(const mirror::HeapReference<mirror::String>& lhs,
- const mirror::HeapReference<mirror::String>& rhs) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::String* lhs_s = lhs.AsMirrorPtr();
- mirror::String* rhs_s = rhs.AsMirrorPtr();
- uint16_t* lhs_begin = lhs_s->GetValue();
- uint16_t* rhs_begin = rhs_s->GetValue();
- return std::lexicographical_compare(lhs_begin, lhs_begin + lhs_s->GetLength(),
- rhs_begin, rhs_begin + rhs_s->GetLength());
- }
-};
-
void ImageWriter::ComputeEagerResolvedStringsCallback(Object* obj, void* arg ATTRIBUTE_UNUSED) {
if (!obj->GetClass()->IsStringClass()) {
return;
@@ -769,7 +715,8 @@
DCHECK_EQ(obj, obj->AsString()->Intern());
return;
}
- mirror::String* const interned = obj->AsString()->Intern();
+ mirror::String* const interned = Runtime::Current()->GetInternTable()->InternStrong(
+ obj->AsString()->Intern());
if (obj != interned) {
if (!IsImageBinSlotAssigned(interned)) {
// interned obj is after us, allocate its location early
@@ -965,7 +912,6 @@
// know where image_roots is going to end up
image_end_ += RoundUp(sizeof(ImageHeader), kObjectAlignment); // 64-bit-alignment
- DCHECK_LT(image_end_, image_->Size());
image_objects_offset_begin_ = image_end_;
// Prepare bin slots for dex cache arrays.
PrepareDexCacheArraySlots();
@@ -997,7 +943,6 @@
// Transform each object's bin slot into an offset which will be used to do the final copy.
heap->VisitObjects(UnbinObjectsIntoOffsetCallback, this);
- DCHECK(saved_hashes_map_.empty()); // All binslot hashes should've been put into vector by now.
DCHECK_EQ(image_end_, GetBinSizeSum(kBinMirrorCount) + image_objects_offset_begin_);
@@ -1010,6 +955,11 @@
bin_slot_previous_sizes_[native_reloc.bin_type];
}
+ // Calculate how big the intern table will be after being serialized.
+ auto* const intern_table = Runtime::Current()->GetInternTable();
+ CHECK_EQ(intern_table->WeakSize(), 0u) << " should have strong interned all the strings";
+ intern_table_bytes_ = intern_table->WriteToMemory(nullptr);
+
// Note that image_end_ is left at end of used mirror object section.
}
@@ -1039,6 +989,10 @@
CHECK_EQ(image_objects_offset_begin_ + bin_slot_previous_sizes_[kBinArtMethodClean],
methods_section->Offset());
cur_pos = methods_section->End();
+ // Calculate the size of the interned strings.
+ auto* interned_strings_section = §ions[ImageHeader::kSectionInternedStrings];
+ *interned_strings_section = ImageSection(cur_pos, intern_table_bytes_);
+ cur_pos = interned_strings_section->End();
// Finally bitmap section.
const size_t bitmap_bytes = image_bitmap_->Size();
auto* bitmap_section = §ions[ImageHeader::kSectionImageBitmap];
@@ -1046,16 +1000,19 @@
cur_pos = bitmap_section->End();
if (kIsDebugBuild) {
size_t idx = 0;
- for (auto& section : sections) {
+ for (const ImageSection& section : sections) {
LOG(INFO) << static_cast<ImageHeader::ImageSections>(idx) << " " << section;
++idx;
}
LOG(INFO) << "Methods: clean=" << clean_methods_ << " dirty=" << dirty_methods_;
}
+ const size_t image_end = static_cast<uint32_t>(interned_strings_section->End());
+ CHECK_EQ(AlignUp(image_begin_ + image_end, kPageSize), oat_file_begin) <<
+ "Oat file should be right after the image.";
// Create the header.
new (image_->Begin()) ImageHeader(
- PointerToLowMemUInt32(image_begin_), static_cast<uint32_t>(methods_section->End()), sections,
- image_roots_address_, oat_file_->GetOatHeader().GetChecksum(),
+ PointerToLowMemUInt32(image_begin_), image_end,
+ sections, image_roots_address_, oat_file_->GetOatHeader().GetChecksum(),
PointerToLowMemUInt32(oat_file_begin), PointerToLowMemUInt32(oat_data_begin_),
PointerToLowMemUInt32(oat_data_end), PointerToLowMemUInt32(oat_file_end), target_ptr_size_,
compile_pic_);
@@ -1068,6 +1025,37 @@
return reinterpret_cast<ArtMethod*>(image_begin_ + it->second.offset);
}
+class FixupRootVisitor : public RootVisitor {
+ public:
+ explicit FixupRootVisitor(ImageWriter* image_writer) : image_writer_(image_writer) {
+ }
+
+ void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED)
+ OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ for (size_t i = 0; i < count; ++i) {
+ *roots[i] = ImageAddress(*roots[i]);
+ }
+ }
+
+ void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
+ const RootInfo& info ATTRIBUTE_UNUSED)
+ OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ for (size_t i = 0; i < count; ++i) {
+ roots[i]->Assign(ImageAddress(roots[i]->AsMirrorPtr()));
+ }
+ }
+
+ private:
+ ImageWriter* const image_writer_;
+
+ mirror::Object* ImageAddress(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ const size_t offset = image_writer_->GetImageOffset(obj);
+ auto* const dest = reinterpret_cast<Object*>(image_writer_->image_begin_ + offset);
+ VLOG(compiler) << "Update root from " << obj << " to " << dest;
+ return dest;
+ }
+};
+
void ImageWriter::CopyAndFixupNativeData() {
// Copy ArtFields and methods to their locations and update the array for convenience.
for (auto& pair : native_object_reloc_) {
@@ -1088,7 +1076,7 @@
}
// Fixup the image method roots.
auto* image_header = reinterpret_cast<ImageHeader*>(image_->Begin());
- const auto& methods_section = image_header->GetMethodsSection();
+ const ImageSection& methods_section = image_header->GetMethodsSection();
for (size_t i = 0; i < ImageHeader::kImageMethodsCount; ++i) {
auto* m = image_methods_[i];
CHECK(m != nullptr);
@@ -1101,18 +1089,35 @@
auto* dest = reinterpret_cast<ArtMethod*>(image_begin_ + it->second.offset);
image_header->SetImageMethod(static_cast<ImageHeader::ImageMethod>(i), dest);
}
+ // Write the intern table into the image.
+ const ImageSection& intern_table_section = image_header->GetImageSection(
+ ImageHeader::kSectionInternedStrings);
+ InternTable* const intern_table = Runtime::Current()->GetInternTable();
+ uint8_t* const memory_ptr = image_->Begin() + intern_table_section.Offset();
+ const size_t intern_table_bytes = intern_table->WriteToMemory(memory_ptr);
+ // Fixup the pointers in the newly written intern table to contain image addresses.
+ InternTable temp_table;
+ // Note that we require that ReadFromMemory does not make an internal copy of the elements so that
+ // the VisitRoots() will update the memory directly rather than the copies.
+ // This also relies on visit roots not doing any verification which could fail after we update
+ // the roots to be the image addresses.
+ temp_table.ReadFromMemory(memory_ptr);
+ CHECK_EQ(temp_table.Size(), intern_table->Size());
+ FixupRootVisitor visitor(this);
+ temp_table.VisitRoots(&visitor, kVisitRootFlagAllRoots);
+ CHECK_EQ(intern_table_bytes, intern_table_bytes_);
}
void ImageWriter::CopyAndFixupObjects() {
gc::Heap* heap = Runtime::Current()->GetHeap();
heap->VisitObjects(CopyAndFixupObjectsCallback, this);
// Fix up the object previously had hash codes.
- for (const std::pair<mirror::Object*, uint32_t>& hash_pair : saved_hashes_) {
+ for (const auto& hash_pair : saved_hashcode_map_) {
Object* obj = hash_pair.first;
DCHECK_EQ(obj->GetLockWord<kVerifyNone>(false).ReadBarrierState(), 0U);
obj->SetLockWord<kVerifyNone>(LockWord::FromHashCode(hash_pair.second, 0U), false);
}
- saved_hashes_.clear();
+ saved_hashcode_map_.clear();
}
void ImageWriter::CopyAndFixupObjectsCallback(Object* obj, void* arg) {
@@ -1155,18 +1160,22 @@
}
void ImageWriter::CopyAndFixupObject(Object* obj) {
- // see GetLocalAddress for similar computation
size_t offset = GetImageOffset(obj);
auto* dst = reinterpret_cast<Object*>(image_->Begin() + offset);
- const uint8_t* src = reinterpret_cast<const uint8_t*>(obj);
+ DCHECK_LT(offset, image_end_);
+ const auto* src = reinterpret_cast<const uint8_t*>(obj);
- size_t n = obj->SizeOf();
+ image_bitmap_->Set(dst); // Mark the obj as live.
+
+ const size_t n = obj->SizeOf();
DCHECK_LE(offset + n, image_->Size());
memcpy(dst, src, n);
// Write in a hash code of objects which have inflated monitors or a hash code in their monitor
// word.
- dst->SetLockWord(LockWord::Default(), false);
+ const auto it = saved_hashcode_map_.find(obj);
+ dst->SetLockWord(it != saved_hashcode_map_.end() ?
+ LockWord::FromHashCode(it->second, 0u) : LockWord::Default(), false);
FixupObject(obj, dst);
}
@@ -1176,7 +1185,7 @@
FixupVisitor(ImageWriter* image_writer, Object* copy) : image_writer_(image_writer), copy_(copy) {
}
- void operator()(Object* obj, MemberOffset offset, bool /*is_static*/) const
+ void operator()(Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
Object* ref = obj->GetFieldObject<Object, kVerifyNone>(offset);
// Use SetFieldObjectWithoutWriteBarrier to avoid card marking since we are writing to the
@@ -1186,7 +1195,7 @@
}
// java.lang.ref.Reference visitor.
- void operator()(mirror::Class* /*klass*/, mirror::Reference* ref) const
+ void operator()(mirror::Class* klass ATTRIBUTE_UNUSED, mirror::Reference* ref) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
copy_->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(
@@ -1490,4 +1499,11 @@
return lockword_ & ~kBinMask;
}
+uint8_t* ImageWriter::GetOatFileBegin() const {
+ DCHECK_GT(intern_table_bytes_, 0u);
+ return image_begin_ + RoundUp(
+ image_end_ + bin_slot_sizes_[kBinArtField] + bin_slot_sizes_[kBinArtMethodDirty] +
+ bin_slot_sizes_[kBinArtMethodClean] + intern_table_bytes_, kPageSize);
+}
+
} // namespace art
diff --git a/compiler/image_writer.h b/compiler/image_writer.h
index a35d6ad..9d45ce2 100644
--- a/compiler/image_writer.h
+++ b/compiler/image_writer.h
@@ -54,7 +54,7 @@
quick_to_interpreter_bridge_offset_(0), compile_pic_(compile_pic),
target_ptr_size_(InstructionSetPointerSize(compiler_driver_.GetInstructionSet())),
bin_slot_sizes_(), bin_slot_previous_sizes_(), bin_slot_count_(),
- dirty_methods_(0u), clean_methods_(0u) {
+ intern_table_bytes_(0u), dirty_methods_(0u), clean_methods_(0u) {
CHECK_NE(image_begin, 0U);
std::fill(image_methods_, image_methods_ + arraysize(image_methods_), nullptr);
}
@@ -84,11 +84,7 @@
image_begin_ + RoundUp(sizeof(ImageHeader), kObjectAlignment) + it->second + offset);
}
- uint8_t* GetOatFileBegin() const {
- return image_begin_ + RoundUp(
- image_end_ + bin_slot_sizes_[kBinArtField] + bin_slot_sizes_[kBinArtMethodDirty] +
- bin_slot_sizes_[kBinArtMethodClean], kPageSize);
- }
+ uint8_t* GetOatFileBegin() const;
bool Write(const std::string& image_filename, const std::string& oat_filename,
const std::string& oat_location)
@@ -158,7 +154,7 @@
// The offset in bytes from the beginning of the bin. Aligned to object size.
uint32_t GetIndex() const;
// Pack into a single uint32_t, for storing into a lock word.
- explicit operator uint32_t() const { return lockword_; }
+ uint32_t Uint32Value() const { return lockword_; }
// Comparison operator for map support
bool operator<(const BinSlot& other) const { return lockword_ < other.lockword_; }
@@ -170,7 +166,7 @@
// We use the lock word to store the offset of the object in the image.
void AssignImageOffset(mirror::Object* object, BinSlot bin_slot)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void SetImageOffset(mirror::Object* object, BinSlot bin_slot, size_t offset)
+ void SetImageOffset(mirror::Object* object, size_t offset)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool IsImageOffsetAssigned(mirror::Object* object) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -330,11 +326,9 @@
// The start offsets of the dex cache arrays.
SafeMap<const DexFile*, size_t> dex_cache_array_starts_;
- // Saved hashes (objects are inside of the image so that they don't move).
- std::vector<std::pair<mirror::Object*, uint32_t>> saved_hashes_;
-
- // Saved hashes (objects are bin slots to inside of the image, not yet allocated an address).
- std::map<BinSlot, uint32_t> saved_hashes_map_;
+ // Saved hash codes. We use these to restore lockwords which were temporarily used to have
+ // forwarding addresses as well as copying over hash codes.
+ std::unordered_map<mirror::Object*, uint32_t> saved_hashcode_map_;
// Beginning target oat address for the pointers from the output image to its oat file.
const uint8_t* oat_data_begin_;
@@ -360,6 +354,9 @@
size_t bin_slot_previous_sizes_[kBinSize]; // Number of bytes in previous bins.
size_t bin_slot_count_[kBinSize]; // Number of objects in a bin
+ // Cached size of the intern table for when we allocate memory.
+ size_t intern_table_bytes_;
+
// ArtField, ArtMethod relocating map. These are allocated as array of structs but we want to
// have one entry per art field for convenience. ArtFields are placed right after the end of the
// image objects (aka sum of bin_slot_sizes_). ArtMethods are placed right after the ArtFields.
@@ -376,8 +373,9 @@
uint64_t dirty_methods_;
uint64_t clean_methods_;
- friend class FixupVisitor;
friend class FixupClassVisitor;
+ friend class FixupRootVisitor;
+ friend class FixupVisitor;
DISALLOW_COPY_AND_ASSIGN(ImageWriter);
};
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index e4680ff..1f9287c 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -723,10 +723,16 @@
}
}
- invoke = new (arena_) HInvokeStaticOrDirect(
- arena_, number_of_arguments, return_type, dex_pc, target_method.dex_method_index,
- is_recursive, string_init_offset, invoke_type, optimized_invoke_type,
- clinit_check_requirement);
+ invoke = new (arena_) HInvokeStaticOrDirect(arena_,
+ number_of_arguments,
+ return_type,
+ dex_pc,
+ target_method.dex_method_index,
+ is_recursive,
+ string_init_offset,
+ invoke_type,
+ optimized_invoke_type,
+ clinit_check_requirement);
}
size_t start_index = 0;
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 049b3e3..130f0e9 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -508,19 +508,14 @@
dex_compilation_unit.GetVerifiedMethod()->GetDexGcMap();
verifier::DexPcToReferenceMap dex_gc_map(&(gc_map_raw)[0]);
- uint32_t max_native_offset = 0;
- for (size_t i = 0; i < pc_infos_.Size(); i++) {
- uint32_t native_offset = pc_infos_.Get(i).native_pc;
- if (native_offset > max_native_offset) {
- max_native_offset = native_offset;
- }
- }
+ uint32_t max_native_offset = stack_map_stream_.ComputeMaxNativePcOffset();
- GcMapBuilder builder(data, pc_infos_.Size(), max_native_offset, dex_gc_map.RegWidth());
- for (size_t i = 0; i < pc_infos_.Size(); i++) {
- struct PcInfo pc_info = pc_infos_.Get(i);
- uint32_t native_offset = pc_info.native_pc;
- uint32_t dex_pc = pc_info.dex_pc;
+ size_t num_stack_maps = stack_map_stream_.GetNumberOfStackMaps();
+ GcMapBuilder builder(data, num_stack_maps, max_native_offset, dex_gc_map.RegWidth());
+ for (size_t i = 0; i != num_stack_maps; ++i) {
+ const StackMapStream::StackMapEntry& stack_map_entry = stack_map_stream_.GetStackMap(i);
+ uint32_t native_offset = stack_map_entry.native_pc_offset;
+ uint32_t dex_pc = stack_map_entry.dex_pc;
const uint8_t* references = dex_gc_map.FindBitMap(dex_pc, false);
CHECK(references != nullptr) << "Missing ref for dex pc 0x" << std::hex << dex_pc;
builder.AddEntry(native_offset, references);
@@ -528,17 +523,17 @@
}
void CodeGenerator::BuildSourceMap(DefaultSrcMap* src_map) const {
- for (size_t i = 0; i < pc_infos_.Size(); i++) {
- struct PcInfo pc_info = pc_infos_.Get(i);
- uint32_t pc2dex_offset = pc_info.native_pc;
- int32_t pc2dex_dalvik_offset = pc_info.dex_pc;
+ for (size_t i = 0, num = stack_map_stream_.GetNumberOfStackMaps(); i != num; ++i) {
+ const StackMapStream::StackMapEntry& stack_map_entry = stack_map_stream_.GetStackMap(i);
+ uint32_t pc2dex_offset = stack_map_entry.native_pc_offset;
+ int32_t pc2dex_dalvik_offset = stack_map_entry.dex_pc;
src_map->push_back(SrcMapElem({pc2dex_offset, pc2dex_dalvik_offset}));
}
}
void CodeGenerator::BuildMappingTable(std::vector<uint8_t>* data) const {
uint32_t pc2dex_data_size = 0u;
- uint32_t pc2dex_entries = pc_infos_.Size();
+ uint32_t pc2dex_entries = stack_map_stream_.GetNumberOfStackMaps();
uint32_t pc2dex_offset = 0u;
int32_t pc2dex_dalvik_offset = 0;
uint32_t dex2pc_data_size = 0u;
@@ -547,11 +542,11 @@
int32_t dex2pc_dalvik_offset = 0;
for (size_t i = 0; i < pc2dex_entries; i++) {
- struct PcInfo pc_info = pc_infos_.Get(i);
- pc2dex_data_size += UnsignedLeb128Size(pc_info.native_pc - pc2dex_offset);
- pc2dex_data_size += SignedLeb128Size(pc_info.dex_pc - pc2dex_dalvik_offset);
- pc2dex_offset = pc_info.native_pc;
- pc2dex_dalvik_offset = pc_info.dex_pc;
+ const StackMapStream::StackMapEntry& stack_map_entry = stack_map_stream_.GetStackMap(i);
+ pc2dex_data_size += UnsignedLeb128Size(stack_map_entry.native_pc_offset - pc2dex_offset);
+ pc2dex_data_size += SignedLeb128Size(stack_map_entry.dex_pc - pc2dex_dalvik_offset);
+ pc2dex_offset = stack_map_entry.native_pc_offset;
+ pc2dex_dalvik_offset = stack_map_entry.dex_pc;
}
// Walk over the blocks and find which ones correspond to catch block entries.
@@ -586,12 +581,12 @@
dex2pc_dalvik_offset = 0u;
for (size_t i = 0; i < pc2dex_entries; i++) {
- struct PcInfo pc_info = pc_infos_.Get(i);
- DCHECK(pc2dex_offset <= pc_info.native_pc);
- write_pos = EncodeUnsignedLeb128(write_pos, pc_info.native_pc - pc2dex_offset);
- write_pos = EncodeSignedLeb128(write_pos, pc_info.dex_pc - pc2dex_dalvik_offset);
- pc2dex_offset = pc_info.native_pc;
- pc2dex_dalvik_offset = pc_info.dex_pc;
+ const StackMapStream::StackMapEntry& stack_map_entry = stack_map_stream_.GetStackMap(i);
+ DCHECK(pc2dex_offset <= stack_map_entry.native_pc_offset);
+ write_pos = EncodeUnsignedLeb128(write_pos, stack_map_entry.native_pc_offset - pc2dex_offset);
+ write_pos = EncodeSignedLeb128(write_pos, stack_map_entry.dex_pc - pc2dex_dalvik_offset);
+ pc2dex_offset = stack_map_entry.native_pc_offset;
+ pc2dex_dalvik_offset = stack_map_entry.dex_pc;
}
for (size_t i = 0; i < graph_->GetBlocks().Size(); ++i) {
@@ -617,9 +612,9 @@
auto it = table.PcToDexBegin();
auto it2 = table.DexToPcBegin();
for (size_t i = 0; i < pc2dex_entries; i++) {
- struct PcInfo pc_info = pc_infos_.Get(i);
- CHECK_EQ(pc_info.native_pc, it.NativePcOffset());
- CHECK_EQ(pc_info.dex_pc, it.DexPc());
+ const StackMapStream::StackMapEntry& stack_map_entry = stack_map_stream_.GetStackMap(i);
+ CHECK_EQ(stack_map_entry.native_pc_offset, it.NativePcOffset());
+ CHECK_EQ(stack_map_entry.dex_pc, it.DexPc());
++it;
}
for (size_t i = 0; i < graph_->GetBlocks().Size(); ++i) {
@@ -695,14 +690,11 @@
}
// Collect PC infos for the mapping table.
- struct PcInfo pc_info;
- pc_info.dex_pc = outer_dex_pc;
- pc_info.native_pc = GetAssembler()->CodeSize();
- pc_infos_.Add(pc_info);
+ uint32_t native_pc = GetAssembler()->CodeSize();
if (instruction == nullptr) {
// For stack overflow checks.
- stack_map_stream_.BeginStackMapEntry(pc_info.dex_pc, pc_info.native_pc, 0, 0, 0, 0);
+ stack_map_stream_.BeginStackMapEntry(outer_dex_pc, native_pc, 0, 0, 0, 0);
stack_map_stream_.EndStackMapEntry();
return;
}
@@ -719,8 +711,8 @@
}
// The register mask must be a subset of callee-save registers.
DCHECK_EQ(register_mask & core_callee_save_mask_, register_mask);
- stack_map_stream_.BeginStackMapEntry(pc_info.dex_pc,
- pc_info.native_pc,
+ stack_map_stream_.BeginStackMapEntry(outer_dex_pc,
+ native_pc,
register_mask,
locations->GetStackMask(),
outer_environment_size,
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index c6ebf6d..e6b1f7c 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -64,11 +64,6 @@
DISALLOW_COPY_AND_ASSIGN(CodeAllocator);
};
-struct PcInfo {
- uint32_t dex_pc;
- uintptr_t native_pc;
-};
-
class SlowPathCode : public ArenaObject<kArenaAllocSlowPaths> {
public:
SlowPathCode() {
@@ -366,7 +361,6 @@
is_baseline_(false),
graph_(graph),
compiler_options_(compiler_options),
- pc_infos_(graph->GetArena(), 32),
slow_paths_(graph->GetArena(), 8),
block_order_(nullptr),
current_block_index_(0),
@@ -455,7 +449,6 @@
HGraph* const graph_;
const CompilerOptions& compiler_options_;
- GrowableArray<PcInfo> pc_infos_;
GrowableArray<SlowPathCode*> slow_paths_;
// The order to use for code generation.
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 5aeaad2..92ebf06 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -256,7 +256,7 @@
return false;
}
- if (!TryBuildAndInline(resolved_method, invoke_instruction, method_index, same_dex_file)) {
+ if (!TryBuildAndInline(resolved_method, invoke_instruction, same_dex_file)) {
return false;
}
@@ -267,11 +267,11 @@
bool HInliner::TryBuildAndInline(ArtMethod* resolved_method,
HInvoke* invoke_instruction,
- uint32_t method_index,
bool same_dex_file) const {
ScopedObjectAccess soa(Thread::Current());
const DexFile::CodeItem* code_item = resolved_method->GetCodeItem();
- const DexFile& caller_dex_file = *caller_compilation_unit_.GetDexFile();
+ const DexFile& callee_dex_file = *resolved_method->GetDexFile();
+ uint32_t method_index = resolved_method->GetDexMethodIndex();
DexCompilationUnit dex_compilation_unit(
nullptr,
@@ -311,7 +311,7 @@
}
HGraph* callee_graph = new (graph_->GetArena()) HGraph(
graph_->GetArena(),
- caller_dex_file,
+ callee_dex_file,
method_index,
requires_ctor_barrier,
compiler_driver_->GetInstructionSet(),
@@ -328,7 +328,7 @@
&inline_stats);
if (!builder.BuildGraph(*code_item)) {
- VLOG(compiler) << "Method " << PrettyMethod(method_index, caller_dex_file)
+ VLOG(compiler) << "Method " << PrettyMethod(method_index, callee_dex_file)
<< " could not be built, so cannot be inlined";
// There could be multiple reasons why the graph could not be built, including
// unaccessible methods/fields due to using a different dex cache. We do not mark
@@ -338,14 +338,14 @@
if (!RegisterAllocator::CanAllocateRegistersFor(*callee_graph,
compiler_driver_->GetInstructionSet())) {
- VLOG(compiler) << "Method " << PrettyMethod(method_index, caller_dex_file)
+ VLOG(compiler) << "Method " << PrettyMethod(method_index, callee_dex_file)
<< " cannot be inlined because of the register allocator";
resolved_method->SetShouldNotInline();
return false;
}
if (!callee_graph->TryBuildingSsa()) {
- VLOG(compiler) << "Method " << PrettyMethod(method_index, caller_dex_file)
+ VLOG(compiler) << "Method " << PrettyMethod(method_index, callee_dex_file)
<< " could not be transformed to SSA";
resolved_method->SetShouldNotInline();
return false;
@@ -385,7 +385,7 @@
// a throw predecessor.
HBasicBlock* exit_block = callee_graph->GetExitBlock();
if (exit_block == nullptr) {
- VLOG(compiler) << "Method " << PrettyMethod(method_index, caller_dex_file)
+ VLOG(compiler) << "Method " << PrettyMethod(method_index, callee_dex_file)
<< " could not be inlined because it has an infinite loop";
resolved_method->SetShouldNotInline();
return false;
@@ -399,7 +399,7 @@
}
}
if (has_throw_predecessor) {
- VLOG(compiler) << "Method " << PrettyMethod(method_index, caller_dex_file)
+ VLOG(compiler) << "Method " << PrettyMethod(method_index, callee_dex_file)
<< " could not be inlined because one branch always throws";
resolved_method->SetShouldNotInline();
return false;
@@ -410,7 +410,7 @@
for (; !it.Done(); it.Advance()) {
HBasicBlock* block = it.Current();
if (block->IsLoopHeader()) {
- VLOG(compiler) << "Method " << PrettyMethod(method_index, caller_dex_file)
+ VLOG(compiler) << "Method " << PrettyMethod(method_index, callee_dex_file)
<< " could not be inlined because it contains a loop";
resolved_method->SetShouldNotInline();
return false;
@@ -424,21 +424,21 @@
if (current->IsInvokeInterface()) {
// Disable inlining of interface calls. The cost in case of entering the
// resolution conflict is currently too high.
- VLOG(compiler) << "Method " << PrettyMethod(method_index, caller_dex_file)
+ VLOG(compiler) << "Method " << PrettyMethod(method_index, callee_dex_file)
<< " could not be inlined because it has an interface call.";
resolved_method->SetShouldNotInline();
return false;
}
if (!same_dex_file && current->NeedsEnvironment()) {
- VLOG(compiler) << "Method " << PrettyMethod(method_index, caller_dex_file)
+ VLOG(compiler) << "Method " << PrettyMethod(method_index, callee_dex_file)
<< " could not be inlined because " << current->DebugName()
<< " needs an environment and is in a different dex file";
return false;
}
if (!same_dex_file && current->NeedsDexCache()) {
- VLOG(compiler) << "Method " << PrettyMethod(method_index, caller_dex_file)
+ VLOG(compiler) << "Method " << PrettyMethod(method_index, callee_dex_file)
<< " could not be inlined because " << current->DebugName()
<< " it is in a different dex file and requires access to the dex cache";
// Do not flag the method as not-inlineable. A caller within the same
diff --git a/compiler/optimizing/inliner.h b/compiler/optimizing/inliner.h
index 7465278..24044b7 100644
--- a/compiler/optimizing/inliner.h
+++ b/compiler/optimizing/inliner.h
@@ -52,7 +52,6 @@
bool TryInline(HInvoke* invoke_instruction, uint32_t method_index) const;
bool TryBuildAndInline(ArtMethod* resolved_method,
HInvoke* invoke_instruction,
- uint32_t method_index,
bool same_dex_file) const;
const DexCompilationUnit& outer_compilation_unit_;
diff --git a/compiler/optimizing/instruction_simplifier.h b/compiler/optimizing/instruction_simplifier.h
index 0244620..668956a 100644
--- a/compiler/optimizing/instruction_simplifier.h
+++ b/compiler/optimizing/instruction_simplifier.h
@@ -36,6 +36,9 @@
static constexpr const char* kInstructionSimplifierPassName = "instruction_simplifier";
void Run() OVERRIDE;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(InstructionSimplifier);
};
} // namespace art
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 126b3b9..7ef6955 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -2483,7 +2483,7 @@
intrinsic_ = intrinsic;
}
- bool IsInlined() const {
+ bool IsFromInlinedInvoke() const {
return GetEnvironment()->GetParent() != nullptr;
}
@@ -3603,7 +3603,7 @@
bool CanThrow() const OVERRIDE {
// May call runtime and and therefore can throw.
// TODO: finer grain decision.
- return !is_referrers_class_;
+ return CanCallRuntime();
}
ReferenceTypeInfo GetLoadedClassRTI() {
diff --git a/compiler/optimizing/optimization.h b/compiler/optimizing/optimization.h
index ccf8de9..2d1c0ba 100644
--- a/compiler/optimizing/optimization.h
+++ b/compiler/optimizing/optimization.h
@@ -17,6 +17,7 @@
#ifndef ART_COMPILER_OPTIMIZING_OPTIMIZATION_H_
#define ART_COMPILER_OPTIMIZING_OPTIMIZATION_H_
+#include "base/arena_object.h"
#include "nodes.h"
#include "optimizing_compiler_stats.h"
@@ -25,7 +26,7 @@
/**
* Abstraction to implement an optimization pass.
*/
-class HOptimization : public ValueObject {
+class HOptimization : public ArenaObject<kArenaAllocMisc> {
public:
HOptimization(HGraph* graph,
bool is_in_ssa_form,
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index bf0b9fa..303a7cb 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -318,49 +318,55 @@
const DexCompilationUnit& dex_compilation_unit,
PassInfoPrinter* pass_info_printer,
StackHandleScopeCollection* handles) {
- HDeadCodeElimination dce1(graph, stats,
- HDeadCodeElimination::kInitialDeadCodeEliminationPassName);
- HDeadCodeElimination dce2(graph, stats,
- HDeadCodeElimination::kFinalDeadCodeEliminationPassName);
- HConstantFolding fold1(graph);
- InstructionSimplifier simplify1(graph, stats);
- HBooleanSimplifier boolean_simplify(graph);
+ ArenaAllocator* arena = graph->GetArena();
+ HDeadCodeElimination* dce1 = new (arena) HDeadCodeElimination(
+ graph, stats, HDeadCodeElimination::kInitialDeadCodeEliminationPassName);
+ HDeadCodeElimination* dce2 = new (arena) HDeadCodeElimination(
+ graph, stats, HDeadCodeElimination::kFinalDeadCodeEliminationPassName);
+ HConstantFolding* fold1 = new (arena) HConstantFolding(graph);
+ InstructionSimplifier* simplify1 = new (arena) InstructionSimplifier(graph, stats);
+ HBooleanSimplifier* boolean_simplify = new (arena) HBooleanSimplifier(graph);
- HInliner inliner(graph, dex_compilation_unit, dex_compilation_unit, driver, handles, stats);
+ HInliner* inliner = new (arena) HInliner(
+ graph, dex_compilation_unit, dex_compilation_unit, driver, handles, stats);
- HConstantFolding fold2(graph, "constant_folding_after_inlining");
- SideEffectsAnalysis side_effects(graph);
- GVNOptimization gvn(graph, side_effects);
- LICM licm(graph, side_effects);
- BoundsCheckElimination bce(graph);
- ReferenceTypePropagation type_propagation(graph, handles);
- InstructionSimplifier simplify2(graph, stats, "instruction_simplifier_after_types");
- InstructionSimplifier simplify3(graph, stats, "last_instruction_simplifier");
- ReferenceTypePropagation type_propagation2(graph, handles);
+ HConstantFolding* fold2 = new (arena) HConstantFolding(graph, "constant_folding_after_inlining");
+ SideEffectsAnalysis* side_effects = new (arena) SideEffectsAnalysis(graph);
+ GVNOptimization* gvn = new (arena) GVNOptimization(graph, *side_effects);
+ LICM* licm = new (arena) LICM(graph, *side_effects);
+ BoundsCheckElimination* bce = new (arena) BoundsCheckElimination(graph);
+ ReferenceTypePropagation* type_propagation =
+ new (arena) ReferenceTypePropagation(graph, handles);
+ InstructionSimplifier* simplify2 = new (arena) InstructionSimplifier(
+ graph, stats, "instruction_simplifier_after_types");
+ InstructionSimplifier* simplify3 = new (arena) InstructionSimplifier(
+ graph, stats, "last_instruction_simplifier");
+ ReferenceTypePropagation* type_propagation2 =
+ new (arena) ReferenceTypePropagation(graph, handles);
- IntrinsicsRecognizer intrinsics(graph, driver);
+ IntrinsicsRecognizer* intrinsics = new (arena) IntrinsicsRecognizer(graph, driver);
HOptimization* optimizations[] = {
- &intrinsics,
- &dce1,
- &fold1,
- &simplify1,
- &type_propagation,
- &simplify2,
- &inliner,
+ intrinsics,
+ dce1,
+ fold1,
+ simplify1,
+ type_propagation,
+ simplify2,
+ inliner,
// Run another type propagation phase: inlining will open up more opprotunities
// to remove checkast/instanceof and null checks.
- &type_propagation2,
+ type_propagation2,
// BooleanSimplifier depends on the InstructionSimplifier removing redundant
// suspend checks to recognize empty blocks.
- &boolean_simplify,
- &fold2,
- &side_effects,
- &gvn,
- &licm,
- &bce,
- &simplify3,
- &dce2,
+ boolean_simplify,
+ fold2,
+ side_effects,
+ gvn,
+ licm,
+ bce,
+ simplify3,
+ dce2,
};
RunOptimizations(optimizations, arraysize(optimizations), pass_info_printer);
diff --git a/compiler/optimizing/prepare_for_register_allocation.cc b/compiler/optimizing/prepare_for_register_allocation.cc
index a249aa9..ca928ae 100644
--- a/compiler/optimizing/prepare_for_register_allocation.cc
+++ b/compiler/optimizing/prepare_for_register_allocation.cc
@@ -86,16 +86,6 @@
DCHECK(last_input != nullptr)
<< "Last input is not HLoadClass. It is " << last_input->DebugName();
- // The static call will initialize the class so there's no need for a clinit check if
- // it's the first user.
- // There is one special case where we still need the clinit check, when inlining. Because
- // currently the callee is responsible for reporting parameters to the GC, the code
- // that walks the stack during `artQuickResolutionTrampoline` cannot be interrupted for GC.
- // Therefore we cannot allocate any object in that code, including loading a new class.
- if (last_input == invoke->GetPrevious() && !invoke->IsInlined()) {
- last_input->SetMustGenerateClinitCheck(false);
- }
-
// Remove a load class instruction as last input of a static
// invoke, which has been added (along with a clinit check,
// removed by PrepareForRegisterAllocation::VisitClinitCheck
@@ -104,10 +94,20 @@
// stage (i.e., after inlining has been performed).
invoke->RemoveLoadClassAsLastInput();
- // If the load class instruction is no longer used, remove it from
- // the graph.
- if (!last_input->HasUses() && !(last_input->MustGenerateClinitCheck() && invoke->IsInlined())) {
- last_input->GetBlock()->RemoveInstruction(last_input);
+ // The static call will initialize the class so there's no need for a clinit check if
+ // it's the first user.
+ // There is one special case where we still need the clinit check, when inlining. Because
+ // currently the callee is responsible for reporting parameters to the GC, the code
+ // that walks the stack during `artQuickResolutionTrampoline` cannot be interrupted for GC.
+ // Therefore we cannot allocate any object in that code, including loading a new class.
+ if (last_input == invoke->GetPrevious() && !invoke->IsFromInlinedInvoke()) {
+ last_input->SetMustGenerateClinitCheck(false);
+
+ // If the load class instruction is no longer used, remove it from
+ // the graph.
+ if (!last_input->HasUses()) {
+ last_input->GetBlock()->RemoveInstruction(last_input);
+ }
}
}
}
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index 3d81c20..a048c85 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -23,6 +23,30 @@
namespace art {
+class RTPVisitor : public HGraphDelegateVisitor {
+ public:
+ RTPVisitor(HGraph* graph, StackHandleScopeCollection* handles)
+ : HGraphDelegateVisitor(graph),
+ handles_(handles) {}
+
+ void VisitNewInstance(HNewInstance* new_instance) OVERRIDE;
+ void VisitLoadClass(HLoadClass* load_class) OVERRIDE;
+ void VisitNewArray(HNewArray* instr) OVERRIDE;
+ void UpdateFieldAccessTypeInfo(HInstruction* instr, const FieldInfo& info);
+ void SetClassAsTypeInfo(HInstruction* instr, mirror::Class* klass, bool is_exact);
+ void VisitInstanceFieldGet(HInstanceFieldGet* instr) OVERRIDE;
+ void VisitStaticFieldGet(HStaticFieldGet* instr) OVERRIDE;
+ void VisitInvoke(HInvoke* instr) OVERRIDE;
+ void VisitArrayGet(HArrayGet* instr) OVERRIDE;
+ void UpdateReferenceTypeInfo(HInstruction* instr,
+ uint16_t type_idx,
+ const DexFile& dex_file,
+ bool is_exact);
+
+ private:
+ StackHandleScopeCollection* handles_;
+};
+
void ReferenceTypePropagation::Run() {
// To properly propagate type info we need to visit in the dominator-based order.
// Reverse post order guarantees a node's dominators are visited first.
@@ -35,23 +59,13 @@
void ReferenceTypePropagation::VisitBasicBlock(HBasicBlock* block) {
// TODO: handle other instructions that give type info
- // (Call/array accesses)
+ // (array accesses)
+ RTPVisitor visitor(graph_, handles_);
// Initialize exact types first for faster convergence.
for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
HInstruction* instr = it.Current();
- // TODO: Make ReferenceTypePropagation a visitor or create a new one.
- if (instr->IsNewInstance()) {
- VisitNewInstance(instr->AsNewInstance());
- } else if (instr->IsLoadClass()) {
- VisitLoadClass(instr->AsLoadClass());
- } else if (instr->IsNewArray()) {
- VisitNewArray(instr->AsNewArray());
- } else if (instr->IsInstanceFieldGet()) {
- VisitInstanceFieldGet(instr->AsInstanceFieldGet());
- } else if (instr->IsStaticFieldGet()) {
- VisitStaticFieldGet(instr->AsStaticFieldGet());
- }
+ instr->Accept(&visitor);
}
// Handle Phis.
@@ -166,9 +180,9 @@
}
}
-void ReferenceTypePropagation::SetClassAsTypeInfo(HInstruction* instr,
- mirror::Class* klass,
- bool is_exact) {
+void RTPVisitor::SetClassAsTypeInfo(HInstruction* instr,
+ mirror::Class* klass,
+ bool is_exact) {
if (klass != nullptr) {
ScopedObjectAccess soa(Thread::Current());
MutableHandle<mirror::Class> handle = handles_->NewHandle(klass);
@@ -177,10 +191,10 @@
}
}
-void ReferenceTypePropagation::UpdateReferenceTypeInfo(HInstruction* instr,
- uint16_t type_idx,
- const DexFile& dex_file,
- bool is_exact) {
+void RTPVisitor::UpdateReferenceTypeInfo(HInstruction* instr,
+ uint16_t type_idx,
+ const DexFile& dex_file,
+ bool is_exact) {
DCHECK_EQ(instr->GetType(), Primitive::kPrimNot);
ScopedObjectAccess soa(Thread::Current());
@@ -189,16 +203,16 @@
SetClassAsTypeInfo(instr, dex_cache->GetResolvedType(type_idx), is_exact);
}
-void ReferenceTypePropagation::VisitNewInstance(HNewInstance* instr) {
+void RTPVisitor::VisitNewInstance(HNewInstance* instr) {
UpdateReferenceTypeInfo(instr, instr->GetTypeIndex(), instr->GetDexFile(), /* is_exact */ true);
}
-void ReferenceTypePropagation::VisitNewArray(HNewArray* instr) {
+void RTPVisitor::VisitNewArray(HNewArray* instr) {
UpdateReferenceTypeInfo(instr, instr->GetTypeIndex(), instr->GetDexFile(), /* is_exact */ true);
}
-void ReferenceTypePropagation::UpdateFieldAccessTypeInfo(HInstruction* instr,
- const FieldInfo& info) {
+void RTPVisitor::UpdateFieldAccessTypeInfo(HInstruction* instr,
+ const FieldInfo& info) {
// The field index is unknown only during tests.
if (instr->GetType() != Primitive::kPrimNot || info.GetFieldIndex() == kUnknownFieldIndex) {
return;
@@ -213,15 +227,15 @@
SetClassAsTypeInfo(instr, klass, /* is_exact */ false);
}
-void ReferenceTypePropagation::VisitInstanceFieldGet(HInstanceFieldGet* instr) {
+void RTPVisitor::VisitInstanceFieldGet(HInstanceFieldGet* instr) {
UpdateFieldAccessTypeInfo(instr, instr->GetFieldInfo());
}
-void ReferenceTypePropagation::VisitStaticFieldGet(HStaticFieldGet* instr) {
+void RTPVisitor::VisitStaticFieldGet(HStaticFieldGet* instr) {
UpdateFieldAccessTypeInfo(instr, instr->GetFieldInfo());
}
-void ReferenceTypePropagation::VisitLoadClass(HLoadClass* instr) {
+void RTPVisitor::VisitLoadClass(HLoadClass* instr) {
ScopedObjectAccess soa(Thread::Current());
mirror::DexCache* dex_cache =
Runtime::Current()->GetClassLinker()->FindDexCache(instr->GetDexFile());
@@ -299,6 +313,34 @@
return !previous_rti.IsEqual(instr->GetReferenceTypeInfo());
}
+void RTPVisitor::VisitInvoke(HInvoke* instr) {
+ if (instr->GetType() != Primitive::kPrimNot) {
+ return;
+ }
+
+ ScopedObjectAccess soa(Thread::Current());
+ ClassLinker* cl = Runtime::Current()->GetClassLinker();
+ mirror::DexCache* dex_cache = cl->FindDexCache(instr->GetDexFile());
+ ArtMethod* method = dex_cache->GetResolvedMethod(
+ instr->GetDexMethodIndex(), cl->GetImagePointerSize());
+ DCHECK(method != nullptr);
+ mirror::Class* klass = method->GetReturnType(false);
+ SetClassAsTypeInfo(instr, klass, /* is_exact */ false);
+}
+
+void RTPVisitor::VisitArrayGet(HArrayGet* instr) {
+ if (instr->GetType() != Primitive::kPrimNot) {
+ return;
+ }
+
+ HInstruction* parent = instr->InputAt(0);
+ ScopedObjectAccess soa(Thread::Current());
+ Handle<mirror::Class> handle = parent->GetReferenceTypeInfo().GetTypeHandle();
+ if (handle.GetReference() != nullptr && handle->IsObjectArrayClass()) {
+ SetClassAsTypeInfo(instr, handle->GetComponentType(), /* is_exact */ false);
+ }
+}
+
void ReferenceTypePropagation::UpdateBoundType(HBoundType* instr) {
ReferenceTypeInfo new_rti = instr->InputAt(0)->GetReferenceTypeInfo();
// Be sure that we don't go over the bounded type.
diff --git a/compiler/optimizing/reference_type_propagation.h b/compiler/optimizing/reference_type_propagation.h
index 0a1d4c4..0d687d2 100644
--- a/compiler/optimizing/reference_type_propagation.h
+++ b/compiler/optimizing/reference_type_propagation.h
@@ -40,26 +40,12 @@
static constexpr const char* kReferenceTypePropagationPassName = "reference_type_propagation";
private:
- void VisitNewInstance(HNewInstance* new_instance);
- void VisitLoadClass(HLoadClass* load_class);
- void VisitNewArray(HNewArray* instr);
void VisitPhi(HPhi* phi);
void VisitBasicBlock(HBasicBlock* block);
- void UpdateFieldAccessTypeInfo(HInstruction* instr, const FieldInfo& info);
- void SetClassAsTypeInfo(HInstruction* instr, mirror::Class* klass, bool is_exact);
-
void UpdateBoundType(HBoundType* bound_type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void UpdatePhi(HPhi* phi) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
void BoundTypeForIfNotNull(HBasicBlock* block);
void BoundTypeForIfInstanceOf(HBasicBlock* block);
- void UpdateReferenceTypeInfo(HInstruction* instr,
- uint16_t type_idx,
- const DexFile& dex_file,
- bool is_exact);
- void VisitInstanceFieldGet(HInstanceFieldGet* instr);
- void VisitStaticFieldGet(HStaticFieldGet* instr);
-
void ProcessWorklist();
void AddToWorklist(HInstruction* instr);
void AddDependentInstructionsToWorklist(HInstruction* instr);
diff --git a/compiler/optimizing/ssa_builder.cc b/compiler/optimizing/ssa_builder.cc
index c4612af..2a86e60 100644
--- a/compiler/optimizing/ssa_builder.cc
+++ b/compiler/optimizing/ssa_builder.cc
@@ -184,22 +184,24 @@
}
HInstruction* left = equality_instr->InputAt(0);
HInstruction* right = equality_instr->InputAt(1);
- HInstruction* null_instr = nullptr;
+ HInstruction* int_operand = nullptr;
- if ((left->GetType() == Primitive::kPrimNot) && right->IsIntConstant()) {
- null_instr = right;
- } else if ((right->GetType() == Primitive::kPrimNot) && left->IsIntConstant()) {
- null_instr = left;
+ if ((left->GetType() == Primitive::kPrimNot) && (right->GetType() == Primitive::kPrimInt)) {
+ int_operand = right;
+ } else if ((right->GetType() == Primitive::kPrimNot)
+ && (left->GetType() == Primitive::kPrimInt)) {
+ int_operand = left;
} else {
continue;
}
// If we got here, we are comparing against a reference and the int constant
// should be replaced with a null constant.
- if (null_instr->IsIntConstant()) {
- DCHECK_EQ(0, null_instr->AsIntConstant()->GetValue());
- equality_instr->ReplaceInput(GetGraph()->GetNullConstant(), null_instr == right ? 1 : 0);
- }
+ // Both type propagation and redundant phi elimination ensure `int_operand`
+ // can only be the 0 constant.
+ DCHECK(int_operand->IsIntConstant());
+ DCHECK_EQ(0, int_operand->AsIntConstant()->GetValue());
+ equality_instr->ReplaceInput(GetGraph()->GetNullConstant(), int_operand == right ? 1 : 0);
}
}
}
@@ -255,21 +257,18 @@
PrimitiveTypePropagation type_propagation(GetGraph());
type_propagation.Run();
- // 5) Fix the type for null constants which are part of an equality comparison.
- FixNullConstantType();
-
- // 6) When creating equivalent phis we copy the inputs of the original phi which
- // may be improperly typed. This will be fixed during the type propagation but
+ // 5) When creating equivalent phis we copy the inputs of the original phi which
+ // may be improperly typed. This was fixed during the type propagation in 4) but
// as a result we may end up with two equivalent phis with the same type for
// the same dex register. This pass cleans them up.
EquivalentPhisCleanup();
- // 7) Mark dead phis again. Step 4) may have introduced new phis.
- // Step 6) might enable the death of new phis.
+ // 6) Mark dead phis again. Step 4) may have introduced new phis.
+ // Step 5) might enable the death of new phis.
SsaDeadPhiElimination dead_phis(GetGraph());
dead_phis.MarkDeadPhis();
- // 8) Now that the graph is correctly typed, we can get rid of redundant phis.
+ // 7) Now that the graph is correctly typed, we can get rid of redundant phis.
// Note that we cannot do this phase before type propagation, otherwise
// we could get rid of phi equivalents, whose presence is a requirement for the
// type propagation phase. Note that this is to satisfy statement (a) of the
@@ -277,6 +276,13 @@
SsaRedundantPhiElimination redundant_phi(GetGraph());
redundant_phi.Run();
+ // 8) Fix the type for null constants which are part of an equality comparison.
+ // We need to do this after redundant phi elimination, to ensure the only cases
+ // that we can see are reference comparison against 0. The redundant phi
+ // elimination ensures we do not see a phi taking two 0 constants in a HEqual
+ // or HNotEqual.
+ FixNullConstantType();
+
// 9) Make sure environments use the right phi "equivalent": a phi marked dead
// can have a phi equivalent that is not dead. We must therefore update
// all environment uses of the dead phi to use its equivalent. Note that there
diff --git a/compiler/optimizing/stack_map_stream.cc b/compiler/optimizing/stack_map_stream.cc
index 42b9182..65610d5 100644
--- a/compiler/optimizing/stack_map_stream.cc
+++ b/compiler/optimizing/stack_map_stream.cc
@@ -49,7 +49,6 @@
}
dex_pc_max_ = std::max(dex_pc_max_, dex_pc);
- native_pc_offset_max_ = std::max(native_pc_offset_max_, native_pc_offset);
register_mask_max_ = std::max(register_mask_max_, register_mask);
current_dex_register_ = 0;
}
@@ -128,16 +127,25 @@
current_inline_info_ = InlineInfoEntry();
}
+uint32_t StackMapStream::ComputeMaxNativePcOffset() const {
+ uint32_t max_native_pc_offset = 0u;
+ for (size_t i = 0, size = stack_maps_.Size(); i != size; ++i) {
+ max_native_pc_offset = std::max(max_native_pc_offset, stack_maps_.Get(i).native_pc_offset);
+ }
+ return max_native_pc_offset;
+}
+
size_t StackMapStream::PrepareForFillIn() {
int stack_mask_number_of_bits = stack_mask_max_ + 1; // Need room for max element too.
stack_mask_size_ = RoundUp(stack_mask_number_of_bits, kBitsPerByte) / kBitsPerByte;
inline_info_size_ = ComputeInlineInfoSize();
dex_register_maps_size_ = ComputeDexRegisterMapsSize();
+ uint32_t max_native_pc_offset = ComputeMaxNativePcOffset();
stack_map_encoding_ = StackMapEncoding::CreateFromSizes(stack_mask_size_,
inline_info_size_,
dex_register_maps_size_,
dex_pc_max_,
- native_pc_offset_max_,
+ max_native_pc_offset,
register_mask_max_);
stack_maps_size_ = stack_maps_.Size() * stack_map_encoding_.ComputeStackMapSize();
dex_register_location_catalog_size_ = ComputeDexRegisterLocationCatalogSize();
diff --git a/compiler/optimizing/stack_map_stream.h b/compiler/optimizing/stack_map_stream.h
index 274d573..bc3653d 100644
--- a/compiler/optimizing/stack_map_stream.h
+++ b/compiler/optimizing/stack_map_stream.h
@@ -67,7 +67,6 @@
inline_infos_(allocator, 2),
stack_mask_max_(-1),
dex_pc_max_(0),
- native_pc_offset_max_(0),
register_mask_max_(0),
number_of_stack_maps_with_inline_info_(0),
dex_map_hash_to_stack_map_indices_(std::less<uint32_t>(), allocator->Adapter()),
@@ -126,6 +125,17 @@
uint32_t num_dex_registers);
void EndInlineInfoEntry();
+ size_t GetNumberOfStackMaps() const {
+ return stack_maps_.Size();
+ }
+
+ const StackMapEntry& GetStackMap(size_t i) const {
+ DCHECK_LT(i, stack_maps_.Size());
+ return stack_maps_.GetRawStorage()[i];
+ }
+
+ uint32_t ComputeMaxNativePcOffset() const;
+
// Prepares the stream to fill in a memory region. Must be called before FillIn.
// Returns the size (in bytes) needed to store this stream.
size_t PrepareForFillIn();
@@ -163,7 +173,6 @@
GrowableArray<InlineInfoEntry> inline_infos_;
int stack_mask_max_;
uint32_t dex_pc_max_;
- uint32_t native_pc_offset_max_;
uint32_t register_mask_max_;
size_t number_of_stack_maps_with_inline_info_;
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index 96d5654..9e9dea6 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -1610,6 +1610,8 @@
const auto& bitmap_section = image_header_.GetImageSection(ImageHeader::kSectionImageBitmap);
const auto& field_section = image_header_.GetImageSection(ImageHeader::kSectionArtFields);
const auto& method_section = image_header_.GetMethodsSection();
+ const auto& intern_section = image_header_.GetImageSection(
+ ImageHeader::kSectionInternedStrings);
stats_.header_bytes = header_bytes;
size_t alignment_bytes = RoundUp(header_bytes, kObjectAlignment) - header_bytes;
stats_.alignment_bytes += alignment_bytes;
@@ -1617,6 +1619,7 @@
stats_.bitmap_bytes += bitmap_section.Size();
stats_.art_field_bytes += field_section.Size();
stats_.art_method_bytes += method_section.Size();
+ stats_.interned_strings_bytes += intern_section.Size();
stats_.Dump(os);
os << "\n";
@@ -1945,6 +1948,7 @@
size_t object_bytes;
size_t art_field_bytes;
size_t art_method_bytes;
+ size_t interned_strings_bytes;
size_t bitmap_bytes;
size_t alignment_bytes;
@@ -1974,6 +1978,7 @@
object_bytes(0),
art_field_bytes(0),
art_method_bytes(0),
+ interned_strings_bytes(0),
bitmap_bytes(0),
alignment_bytes(0),
managed_code_bytes(0),
@@ -2131,21 +2136,24 @@
<< "art_file_bytes = header_bytes + object_bytes + alignment_bytes\n";
Indenter indent_filter(os.rdbuf(), kIndentChar, kIndentBy1Count);
std::ostream indent_os(&indent_filter);
- indent_os << StringPrintf("header_bytes = %8zd (%2.0f%% of art file bytes)\n"
- "object_bytes = %8zd (%2.0f%% of art file bytes)\n"
- "art_field_bytes = %8zd (%2.0f%% of art file bytes)\n"
- "art_method_bytes = %8zd (%2.0f%% of art file bytes)\n"
- "bitmap_bytes = %8zd (%2.0f%% of art file bytes)\n"
- "alignment_bytes = %8zd (%2.0f%% of art file bytes)\n\n",
+ indent_os << StringPrintf("header_bytes = %8zd (%2.0f%% of art file bytes)\n"
+ "object_bytes = %8zd (%2.0f%% of art file bytes)\n"
+ "art_field_bytes = %8zd (%2.0f%% of art file bytes)\n"
+ "art_method_bytes = %8zd (%2.0f%% of art file bytes)\n"
+ "interned_string_bytes = %8zd (%2.0f%% of art file bytes)\n"
+ "bitmap_bytes = %8zd (%2.0f%% of art file bytes)\n"
+ "alignment_bytes = %8zd (%2.0f%% of art file bytes)\n\n",
header_bytes, PercentOfFileBytes(header_bytes),
object_bytes, PercentOfFileBytes(object_bytes),
art_field_bytes, PercentOfFileBytes(art_field_bytes),
art_method_bytes, PercentOfFileBytes(art_method_bytes),
+ interned_strings_bytes,
+ PercentOfFileBytes(interned_strings_bytes),
bitmap_bytes, PercentOfFileBytes(bitmap_bytes),
alignment_bytes, PercentOfFileBytes(alignment_bytes))
<< std::flush;
CHECK_EQ(file_bytes, header_bytes + object_bytes + art_field_bytes + art_method_bytes +
- bitmap_bytes + alignment_bytes);
+ interned_strings_bytes + bitmap_bytes + alignment_bytes);
}
os << "object_bytes breakdown:\n";
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
index 007125c..0401727 100644
--- a/patchoat/patchoat.cc
+++ b/patchoat/patchoat.cc
@@ -437,6 +437,41 @@
}
}
+class FixupRootVisitor : public RootVisitor {
+ public:
+ explicit FixupRootVisitor(const PatchOat* patch_oat) : patch_oat_(patch_oat) {
+ }
+
+ void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED)
+ OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ for (size_t i = 0; i < count; ++i) {
+ *roots[i] = patch_oat_->RelocatedAddressOfPointer(*roots[i]);
+ }
+ }
+
+ void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
+ const RootInfo& info ATTRIBUTE_UNUSED)
+ OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ for (size_t i = 0; i < count; ++i) {
+ roots[i]->Assign(patch_oat_->RelocatedAddressOfPointer(roots[i]->AsMirrorPtr()));
+ }
+ }
+
+ private:
+ const PatchOat* const patch_oat_;
+};
+
+void PatchOat::PatchInternedStrings(const ImageHeader* image_header) {
+ const auto& section = image_header->GetImageSection(ImageHeader::kSectionInternedStrings);
+ InternTable temp_table;
+ // Note that we require that ReadFromMemory does not make an internal copy of the elements.
+ // This also relies on visit roots not doing any verification which could fail after we update
+ // the roots to be the image addresses.
+ temp_table.ReadFromMemory(image_->Begin() + section.Offset());
+ FixupRootVisitor visitor(this);
+ temp_table.VisitRoots(&visitor, kVisitRootFlagAllRoots);
+}
+
void PatchOat::PatchDexFileArrays(mirror::ObjectArray<mirror::Object>* img_roots) {
auto* dex_caches = down_cast<mirror::ObjectArray<mirror::DexCache>*>(
img_roots->Get(ImageHeader::kDexCaches));
@@ -483,12 +518,9 @@
auto* img_roots = image_header->GetImageRoots();
image_header->RelocateImage(delta_);
- // Patch and update ArtFields.
PatchArtFields(image_header);
-
- // Patch and update ArtMethods.
PatchArtMethods(image_header);
-
+ PatchInternedStrings(image_header);
// Patch dex file int/long arrays which point to ArtFields.
PatchDexFileArrays(img_roots);
diff --git a/patchoat/patchoat.h b/patchoat/patchoat.h
index 7b9c8bd..23abca8 100644
--- a/patchoat/patchoat.h
+++ b/patchoat/patchoat.h
@@ -116,6 +116,8 @@
bool PatchImage() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void PatchArtFields(const ImageHeader* image_header) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void PatchArtMethods(const ImageHeader* image_header) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void PatchInternedStrings(const ImageHeader* image_header)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void PatchDexFileArrays(mirror::ObjectArray<mirror::Object>* img_roots)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -123,7 +125,7 @@
bool WriteImage(File* out);
template <typename T>
- T* RelocatedCopyOf(T* obj) {
+ T* RelocatedCopyOf(T* obj) const {
if (obj == nullptr) {
return nullptr;
}
@@ -136,7 +138,7 @@
}
template <typename T>
- T* RelocatedAddressOfPointer(T* obj) {
+ T* RelocatedAddressOfPointer(T* obj) const {
if (obj == nullptr) {
return obj;
}
@@ -149,7 +151,7 @@
}
template <typename T>
- T RelocatedAddressOfIntPointer(T obj) {
+ T RelocatedAddressOfIntPointer(T obj) const {
if (obj == 0) {
return obj;
}
@@ -199,6 +201,7 @@
TimingLogger* timings_;
+ friend class FixupRootVisitor;
DISALLOW_IMPLICIT_CONSTRUCTORS(PatchOat);
};
diff --git a/runtime/base/hash_set.h b/runtime/base/hash_set.h
index ab63ddd..8daf6d4 100644
--- a/runtime/base/hash_set.h
+++ b/runtime/base/hash_set.h
@@ -22,6 +22,7 @@
#include <stdint.h>
#include <utility>
+#include "bit_utils.h"
#include "logging.h"
namespace art {
@@ -121,6 +122,7 @@
typedef BaseIterator<T, HashSet> Iterator;
typedef BaseIterator<const T, const HashSet> ConstIterator;
+ // If we don't own the data, this will create a new array which owns the data.
void Clear() {
DeallocateStorage();
AllocateStorage(1);
@@ -128,19 +130,70 @@
elements_until_expand_ = 0;
}
- HashSet() : num_elements_(0), num_buckets_(0), data_(nullptr),
+ HashSet() : num_elements_(0), num_buckets_(0), owns_data_(false), data_(nullptr),
min_load_factor_(kDefaultMinLoadFactor), max_load_factor_(kDefaultMaxLoadFactor) {
Clear();
}
- HashSet(const HashSet& other) : num_elements_(0), num_buckets_(0), data_(nullptr) {
+ HashSet(const HashSet& other) : num_elements_(0), num_buckets_(0), owns_data_(false),
+ data_(nullptr) {
*this = other;
}
- HashSet(HashSet&& other) : num_elements_(0), num_buckets_(0), data_(nullptr) {
+ HashSet(HashSet&& other) : num_elements_(0), num_buckets_(0), owns_data_(false),
+ data_(nullptr) {
*this = std::move(other);
}
+ // Construct from existing data.
+ // Read from a block of memory, if make_copy_of_data is false, then data_ points to within the
+ // passed in ptr_.
+ HashSet(const uint8_t* ptr, bool make_copy_of_data, size_t* read_count) {
+ uint64_t temp;
+ size_t offset = 0;
+ offset = ReadFromBytes(ptr, offset, &temp);
+ num_elements_ = static_cast<uint64_t>(temp);
+ offset = ReadFromBytes(ptr, offset, &temp);
+ num_buckets_ = static_cast<uint64_t>(temp);
+ CHECK_LE(num_elements_, num_buckets_);
+ offset = ReadFromBytes(ptr, offset, &temp);
+ elements_until_expand_ = static_cast<uint64_t>(temp);
+ offset = ReadFromBytes(ptr, offset, &min_load_factor_);
+ offset = ReadFromBytes(ptr, offset, &max_load_factor_);
+ if (!make_copy_of_data) {
+ owns_data_ = false;
+ data_ = const_cast<T*>(reinterpret_cast<const T*>(ptr + offset));
+ offset += sizeof(*data_) * num_buckets_;
+ } else {
+ AllocateStorage(num_buckets_);
+ // Write elements, not that this may not be safe for cross compilation if the elements are
+ // pointer sized.
+ for (size_t i = 0; i < num_buckets_; ++i) {
+ offset = ReadFromBytes(ptr, offset, &data_[i]);
+ }
+ }
+ // Caller responsible for aligning.
+ *read_count = offset;
+ }
+
+ // Returns how large the table is after being written. If target is null, then no writing happens
+ // but the size is still returned. Target must be 8 byte aligned.
+ size_t WriteToMemory(uint8_t* ptr) {
+ size_t offset = 0;
+ offset = WriteToBytes(ptr, offset, static_cast<uint64_t>(num_elements_));
+ offset = WriteToBytes(ptr, offset, static_cast<uint64_t>(num_buckets_));
+ offset = WriteToBytes(ptr, offset, static_cast<uint64_t>(elements_until_expand_));
+ offset = WriteToBytes(ptr, offset, min_load_factor_);
+ offset = WriteToBytes(ptr, offset, max_load_factor_);
+ // Write elements, not that this may not be safe for cross compilation if the elements are
+ // pointer sized.
+ for (size_t i = 0; i < num_buckets_; ++i) {
+ offset = WriteToBytes(ptr, offset, data_[i]);
+ }
+ // Caller responsible for aligning.
+ return offset;
+ }
+
~HashSet() {
DeallocateStorage();
}
@@ -152,6 +205,7 @@
std::swap(elements_until_expand_, other.elements_until_expand_);
std::swap(min_load_factor_, other.min_load_factor_);
std::swap(max_load_factor_, other.max_load_factor_);
+ std::swap(owns_data_, other.owns_data_);
return *this;
}
@@ -386,6 +440,7 @@
void AllocateStorage(size_t num_buckets) {
num_buckets_ = num_buckets;
data_ = allocfn_.allocate(num_buckets_);
+ owns_data_ = true;
for (size_t i = 0; i < num_buckets_; ++i) {
allocfn_.construct(allocfn_.address(data_[i]));
emptyfn_.MakeEmpty(data_[i]);
@@ -394,10 +449,13 @@
void DeallocateStorage() {
if (num_buckets_ != 0) {
- for (size_t i = 0; i < NumBuckets(); ++i) {
- allocfn_.destroy(allocfn_.address(data_[i]));
+ if (owns_data_) {
+ for (size_t i = 0; i < NumBuckets(); ++i) {
+ allocfn_.destroy(allocfn_.address(data_[i]));
+ }
+ allocfn_.deallocate(data_, NumBuckets());
+ owns_data_ = false;
}
- allocfn_.deallocate(data_, NumBuckets());
data_ = nullptr;
num_buckets_ = 0;
}
@@ -418,18 +476,23 @@
// Expand / shrink the table to the new specified size.
void Resize(size_t new_size) {
DCHECK_GE(new_size, Size());
- T* old_data = data_;
+ T* const old_data = data_;
size_t old_num_buckets = num_buckets_;
// Reinsert all of the old elements.
+ const bool owned_data = owns_data_;
AllocateStorage(new_size);
for (size_t i = 0; i < old_num_buckets; ++i) {
T& element = old_data[i];
if (!emptyfn_.IsEmpty(element)) {
data_[FirstAvailableSlot(IndexForHash(hashfn_(element)))] = std::move(element);
}
- allocfn_.destroy(allocfn_.address(element));
+ if (owned_data) {
+ allocfn_.destroy(allocfn_.address(element));
+ }
}
- allocfn_.deallocate(old_data, old_num_buckets);
+ if (owned_data) {
+ allocfn_.deallocate(old_data, old_num_buckets);
+ }
}
ALWAYS_INLINE size_t FirstAvailableSlot(size_t index) const {
@@ -439,6 +502,24 @@
return index;
}
+ // Return new offset.
+ template <typename Elem>
+ static size_t WriteToBytes(uint8_t* ptr, size_t offset, Elem n) {
+ DCHECK_ALIGNED(ptr + offset, sizeof(n));
+ if (ptr != nullptr) {
+ *reinterpret_cast<Elem*>(ptr + offset) = n;
+ }
+ return offset + sizeof(n);
+ }
+
+ template <typename Elem>
+ static size_t ReadFromBytes(const uint8_t* ptr, size_t offset, Elem* out) {
+ DCHECK(ptr != nullptr);
+ DCHECK_ALIGNED(ptr + offset, sizeof(*out));
+ *out = *reinterpret_cast<const Elem*>(ptr + offset);
+ return offset + sizeof(*out);
+ }
+
Alloc allocfn_; // Allocator function.
HashFn hashfn_; // Hashing function.
EmptyFn emptyfn_; // IsEmpty/SetEmpty function.
@@ -446,6 +527,7 @@
size_t num_elements_; // Number of inserted elements.
size_t num_buckets_; // Number of hash table buckets.
size_t elements_until_expand_; // Maxmimum number of elements until we expand the table.
+ bool owns_data_; // If we own data_ and are responsible for freeing it.
T* data_; // Backing storage.
double min_load_factor_;
double max_load_factor_;
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index f2be85e..0ab148e 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -94,7 +94,6 @@
kMonitorListLock,
kJniLoadLibraryLock,
kThreadListLock,
- kBreakpointInvokeLock,
kAllocTrackerLock,
kDeoptimizationLock,
kProfilerLock,
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 429fa5b..c4d978f 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -808,18 +808,11 @@
}
const OatFile* ClassLinker::GetBootOatFile() {
- // To grab the boot oat, look at the dex files in the boot classpath. Any of those is fine, as
- // they were all compiled into the same oat file. So grab the first one, which is guaranteed to
- // exist if the boot class-path isn't empty.
- if (boot_class_path_.empty()) {
+ gc::space::ImageSpace* image_space = Runtime::Current()->GetHeap()->GetImageSpace();
+ if (image_space == nullptr) {
return nullptr;
}
- const DexFile* boot_dex_file = boot_class_path_[0];
- // Is it from an oat file?
- if (boot_dex_file->GetOatDexFile() != nullptr) {
- return boot_dex_file->GetOatDexFile()->GetOatFile();
- }
- return nullptr;
+ return image_space->GetOatFile();
}
const OatFile* ClassLinker::GetPrimaryOatFile() {
@@ -1055,7 +1048,7 @@
static void SanityCheckObjectsCallback(mirror::Object* obj, void* arg ATTRIBUTE_UNUSED)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK(obj != nullptr);
- CHECK(obj->GetClass() != nullptr) << "Null class " << obj;
+ CHECK(obj->GetClass() != nullptr) << "Null class in object " << obj;
CHECK(obj->GetClass()->GetClass() != nullptr) << "Null class class " << obj;
if (obj->IsClass()) {
auto klass = obj->AsClass();
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 3c75daf..5918c10 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -33,6 +33,7 @@
#include "gc/space/large_object_space.h"
#include "gc/space/space-inl.h"
#include "handle_scope.h"
+#include "jdwp/jdwp_priv.h"
#include "jdwp/object_registry.h"
#include "mirror/class.h"
#include "mirror/class-inl.h"
@@ -1658,6 +1659,51 @@
return BasicTagFromDescriptor(FromFieldId(field_id)->GetTypeDescriptor());
}
+static JValue GetArtFieldValue(ArtField* f, mirror::Object* o)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ Primitive::Type fieldType = f->GetTypeAsPrimitiveType();
+ JValue field_value;
+ switch (fieldType) {
+ case Primitive::kPrimBoolean:
+ field_value.SetZ(f->GetBoolean(o));
+ return field_value;
+
+ case Primitive::kPrimByte:
+ field_value.SetB(f->GetByte(o));
+ return field_value;
+
+ case Primitive::kPrimChar:
+ field_value.SetC(f->GetChar(o));
+ return field_value;
+
+ case Primitive::kPrimShort:
+ field_value.SetS(f->GetShort(o));
+ return field_value;
+
+ case Primitive::kPrimInt:
+ case Primitive::kPrimFloat:
+ // Int and Float must be treated as 32-bit values in JDWP.
+ field_value.SetI(f->GetInt(o));
+ return field_value;
+
+ case Primitive::kPrimLong:
+ case Primitive::kPrimDouble:
+ // Long and Double must be treated as 64-bit values in JDWP.
+ field_value.SetJ(f->GetLong(o));
+ return field_value;
+
+ case Primitive::kPrimNot:
+ field_value.SetL(f->GetObject(o));
+ return field_value;
+
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "Attempt to read from field of type 'void'";
+ UNREACHABLE();
+ }
+ LOG(FATAL) << "Attempt to read from field of unknown type";
+ UNREACHABLE();
+}
+
static JDWP::JdwpError GetFieldValueImpl(JDWP::RefTypeId ref_type_id, JDWP::ObjectId object_id,
JDWP::FieldId field_id, JDWP::ExpandBuf* pReply,
bool is_static)
@@ -1692,27 +1738,17 @@
}
} else {
if (f->IsStatic()) {
- LOG(WARNING) << "Ignoring non-nullptr receiver for ObjectReference.SetValues on static field "
- << PrettyField(f);
+ LOG(WARNING) << "Ignoring non-nullptr receiver for ObjectReference.GetValues"
+ << " on static field " << PrettyField(f);
}
}
if (f->IsStatic()) {
o = f->GetDeclaringClass();
}
+ JValue field_value(GetArtFieldValue(f, o));
JDWP::JdwpTag tag = BasicTagFromDescriptor(f->GetTypeDescriptor());
- JValue field_value;
- if (tag == JDWP::JT_VOID) {
- LOG(FATAL) << "Unknown tag: " << tag;
- } else if (!IsPrimitiveTag(tag)) {
- field_value.SetL(f->GetObject(o));
- } else if (tag == JDWP::JT_DOUBLE || tag == JDWP::JT_LONG) {
- field_value.SetJ(f->Get64(o));
- } else {
- field_value.SetI(f->Get32(o));
- }
Dbg::OutputJValue(tag, &field_value, pReply);
-
return JDWP::ERR_NONE;
}
@@ -1726,6 +1762,76 @@
return GetFieldValueImpl(ref_type_id, 0, field_id, pReply, true);
}
+static JDWP::JdwpError SetArtFieldValue(ArtField* f, mirror::Object* o, uint64_t value, int width)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ Primitive::Type fieldType = f->GetTypeAsPrimitiveType();
+ // Debugging only happens at runtime so we know we are not running in a transaction.
+ static constexpr bool kNoTransactionMode = false;
+ switch (fieldType) {
+ case Primitive::kPrimBoolean:
+ CHECK_EQ(width, 1);
+ f->SetBoolean<kNoTransactionMode>(o, static_cast<uint8_t>(value));
+ return JDWP::ERR_NONE;
+
+ case Primitive::kPrimByte:
+ CHECK_EQ(width, 1);
+ f->SetByte<kNoTransactionMode>(o, static_cast<uint8_t>(value));
+ return JDWP::ERR_NONE;
+
+ case Primitive::kPrimChar:
+ CHECK_EQ(width, 2);
+ f->SetChar<kNoTransactionMode>(o, static_cast<uint16_t>(value));
+ return JDWP::ERR_NONE;
+
+ case Primitive::kPrimShort:
+ CHECK_EQ(width, 2);
+ f->SetShort<kNoTransactionMode>(o, static_cast<int16_t>(value));
+ return JDWP::ERR_NONE;
+
+ case Primitive::kPrimInt:
+ case Primitive::kPrimFloat:
+ CHECK_EQ(width, 4);
+ // Int and Float must be treated as 32-bit values in JDWP.
+ f->SetInt<kNoTransactionMode>(o, static_cast<int32_t>(value));
+ return JDWP::ERR_NONE;
+
+ case Primitive::kPrimLong:
+ case Primitive::kPrimDouble:
+ CHECK_EQ(width, 8);
+ // Long and Double must be treated as 64-bit values in JDWP.
+ f->SetLong<kNoTransactionMode>(o, value);
+ return JDWP::ERR_NONE;
+
+ case Primitive::kPrimNot: {
+ JDWP::JdwpError error;
+ mirror::Object* v = Dbg::GetObjectRegistry()->Get<mirror::Object*>(value, &error);
+ if (error != JDWP::ERR_NONE) {
+ return JDWP::ERR_INVALID_OBJECT;
+ }
+ if (v != nullptr) {
+ mirror::Class* field_type;
+ {
+ StackHandleScope<2> hs(Thread::Current());
+ HandleWrapper<mirror::Object> h_v(hs.NewHandleWrapper(&v));
+ HandleWrapper<mirror::Object> h_o(hs.NewHandleWrapper(&o));
+ field_type = f->GetType<true>();
+ }
+ if (!field_type->IsAssignableFrom(v->GetClass())) {
+ return JDWP::ERR_INVALID_OBJECT;
+ }
+ }
+ f->SetObject<kNoTransactionMode>(o, v);
+ return JDWP::ERR_NONE;
+ }
+
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "Attempt to write to field of type 'void'";
+ UNREACHABLE();
+ }
+ LOG(FATAL) << "Attempt to write to field of unknown type";
+ UNREACHABLE();
+}
+
static JDWP::JdwpError SetFieldValueImpl(JDWP::ObjectId object_id, JDWP::FieldId field_id,
uint64_t value, int width, bool is_static)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -1744,47 +1850,14 @@
}
} else {
if (f->IsStatic()) {
- LOG(WARNING) << "Ignoring non-nullptr receiver for ObjectReference.SetValues on static field " << PrettyField(f);
+ LOG(WARNING) << "Ignoring non-nullptr receiver for ObjectReference.SetValues"
+ << " on static field " << PrettyField(f);
}
}
if (f->IsStatic()) {
o = f->GetDeclaringClass();
}
-
- JDWP::JdwpTag tag = BasicTagFromDescriptor(f->GetTypeDescriptor());
-
- if (IsPrimitiveTag(tag)) {
- if (tag == JDWP::JT_DOUBLE || tag == JDWP::JT_LONG) {
- CHECK_EQ(width, 8);
- // Debugging can't use transactional mode (runtime only).
- f->Set64<false>(o, value);
- } else {
- CHECK_LE(width, 4);
- // Debugging can't use transactional mode (runtime only).
- f->Set32<false>(o, value);
- }
- } else {
- mirror::Object* v = Dbg::GetObjectRegistry()->Get<mirror::Object*>(value, &error);
- if (error != JDWP::ERR_NONE) {
- return JDWP::ERR_INVALID_OBJECT;
- }
- if (v != nullptr) {
- mirror::Class* field_type;
- {
- StackHandleScope<2> hs(Thread::Current());
- HandleWrapper<mirror::Object> h_v(hs.NewHandleWrapper(&v));
- HandleWrapper<mirror::Object> h_o(hs.NewHandleWrapper(&o));
- field_type = f->GetType<true>();
- }
- if (!field_type->IsAssignableFrom(v->GetClass())) {
- return JDWP::ERR_INVALID_OBJECT;
- }
- }
- // Debugging can't use transactional mode (runtime only).
- f->SetObject<false>(o, v);
- }
-
- return JDWP::ERR_NONE;
+ return SetArtFieldValue(f, o, value, width);
}
JDWP::JdwpError Dbg::SetFieldValue(JDWP::ObjectId object_id, JDWP::FieldId field_id, uint64_t value,
@@ -3660,17 +3733,16 @@
}
}
-JDWP::JdwpError Dbg::InvokeMethod(JDWP::ObjectId thread_id, JDWP::ObjectId object_id,
- JDWP::RefTypeId class_id, JDWP::MethodId method_id,
- uint32_t arg_count, uint64_t* arg_values,
- JDWP::JdwpTag* arg_types, uint32_t options,
- JDWP::JdwpTag* pResultTag, uint64_t* pResultValue,
- JDWP::ObjectId* pExceptionId) {
- ThreadList* thread_list = Runtime::Current()->GetThreadList();
+JDWP::JdwpError Dbg::PrepareInvokeMethod(uint32_t request_id, JDWP::ObjectId thread_id,
+ JDWP::ObjectId object_id, JDWP::RefTypeId class_id,
+ JDWP::MethodId method_id, uint32_t arg_count,
+ uint64_t arg_values[], JDWP::JdwpTag* arg_types,
+ uint32_t options) {
+ Thread* const self = Thread::Current();
+ CHECK_EQ(self, GetDebugThread()) << "This must be called by the JDWP thread";
+ ThreadList* thread_list = Runtime::Current()->GetThreadList();
Thread* targetThread = nullptr;
- std::unique_ptr<DebugInvokeReq> req;
- Thread* self = Thread::Current();
{
ScopedObjectAccessUnchecked soa(self);
JDWP::JdwpError error;
@@ -3780,99 +3852,82 @@
}
// Allocates a DebugInvokeReq.
- req.reset(new (std::nothrow) DebugInvokeReq(receiver, c, m, options, arg_values, arg_count));
- if (req.get() == nullptr) {
+ DebugInvokeReq* req = new (std::nothrow) DebugInvokeReq(request_id, thread_id, receiver, c, m,
+ options, arg_values, arg_count);
+ if (req == nullptr) {
LOG(ERROR) << "Failed to allocate DebugInvokeReq";
return JDWP::ERR_OUT_OF_MEMORY;
}
- // Attach the DebugInvokeReq to the target thread so it executes the method when
- // it is resumed. Once the invocation completes, it will detach it and signal us
- // before suspending itself.
- targetThread->SetDebugInvokeReq(req.get());
+ // Attaches the DebugInvokeReq to the target thread so it executes the method when
+ // it is resumed. Once the invocation completes, the target thread will delete it before
+ // suspending itself (see ThreadList::SuspendSelfForDebugger).
+ targetThread->SetDebugInvokeReq(req);
}
// The fact that we've released the thread list lock is a bit risky --- if the thread goes
- // away we're sitting high and dry -- but we must release this before the ResumeAllThreads
- // call, and it's unwise to hold it during WaitForSuspend.
+ // away we're sitting high and dry -- but we must release this before the UndoDebuggerSuspensions
+ // call.
- {
- /*
- * We change our (JDWP thread) status, which should be THREAD_RUNNING,
- * so we can suspend for a GC if the invoke request causes us to
- * run out of memory. It's also a good idea to change it before locking
- * the invokeReq mutex, although that should never be held for long.
- */
- self->TransitionFromRunnableToSuspended(kWaitingForDebuggerSend);
-
- VLOG(jdwp) << " Transferring control to event thread";
- {
- MutexLock mu(self, req->lock);
-
- if ((options & JDWP::INVOKE_SINGLE_THREADED) == 0) {
- VLOG(jdwp) << " Resuming all threads";
- thread_list->UndoDebuggerSuspensions();
- } else {
- VLOG(jdwp) << " Resuming event thread only";
- thread_list->Resume(targetThread, true);
- }
-
- // The target thread is resumed but needs the JDWP token we're holding.
- // We release it now and will acquire it again when the invocation is
- // complete and the target thread suspends itself.
- gJdwpState->ReleaseJdwpTokenForCommand();
-
- // Wait for the request to finish executing.
- while (targetThread->GetInvokeReq() != nullptr) {
- req->cond.Wait(self);
- }
- }
- VLOG(jdwp) << " Control has returned from event thread";
-
- /* wait for thread to re-suspend itself */
- SuspendThread(thread_id, false /* request_suspension */);
-
- // Now the thread is suspended again, we can re-acquire the JDWP token.
- gJdwpState->AcquireJdwpTokenForCommand();
-
- self->TransitionFromSuspendedToRunnable();
- }
-
- /*
- * Suspend the threads. We waited for the target thread to suspend
- * itself, so all we need to do is suspend the others.
- *
- * The SuspendAllForDebugger() call will double-suspend the event thread,
- * so we want to resume the target thread once to keep the books straight.
- */
if ((options & JDWP::INVOKE_SINGLE_THREADED) == 0) {
- self->TransitionFromRunnableToSuspended(kWaitingForDebuggerSuspension);
- VLOG(jdwp) << " Suspending all threads";
- thread_list->SuspendAllForDebugger();
- self->TransitionFromSuspendedToRunnable();
- VLOG(jdwp) << " Resuming event thread to balance the count";
+ VLOG(jdwp) << " Resuming all threads";
+ thread_list->UndoDebuggerSuspensions();
+ } else {
+ VLOG(jdwp) << " Resuming event thread only";
thread_list->Resume(targetThread, true);
}
- // Copy the result.
- *pResultTag = req->result_tag;
- *pResultValue = req->result_value;
- *pExceptionId = req->exception;
- return req->error;
+ return JDWP::ERR_NONE;
}
void Dbg::ExecuteMethod(DebugInvokeReq* pReq) {
- ScopedObjectAccess soa(Thread::Current());
+ Thread* const self = Thread::Current();
+ CHECK_NE(self, GetDebugThread()) << "This must be called by the event thread";
+
+ ScopedObjectAccess soa(self);
// We can be called while an exception is pending. We need
// to preserve that across the method invocation.
- StackHandleScope<3> hs(soa.Self());
- auto old_exception = hs.NewHandle<mirror::Throwable>(soa.Self()->GetException());
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::Throwable> old_exception = hs.NewHandle(soa.Self()->GetException());
soa.Self()->ClearException();
+ // Execute the method then sends reply to the debugger.
+ ExecuteMethodWithoutPendingException(soa, pReq);
+
+ // If an exception was pending before the invoke, restore it now.
+ if (old_exception.Get() != nullptr) {
+ soa.Self()->SetException(old_exception.Get());
+ }
+}
+
+// Helper function: write a variable-width value into the output input buffer.
+static void WriteValue(JDWP::ExpandBuf* pReply, int width, uint64_t value) {
+ switch (width) {
+ case 1:
+ expandBufAdd1(pReply, value);
+ break;
+ case 2:
+ expandBufAdd2BE(pReply, value);
+ break;
+ case 4:
+ expandBufAdd4BE(pReply, value);
+ break;
+ case 8:
+ expandBufAdd8BE(pReply, value);
+ break;
+ default:
+ LOG(FATAL) << width;
+ UNREACHABLE();
+ }
+}
+
+void Dbg::ExecuteMethodWithoutPendingException(ScopedObjectAccess& soa, DebugInvokeReq* pReq) {
+ soa.Self()->AssertNoPendingException();
+
// Translate the method through the vtable, unless the debugger wants to suppress it.
- auto* m = pReq->method;
- auto image_pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
+ ArtMethod* m = pReq->method;
+ size_t image_pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
if ((pReq->options & JDWP::INVOKE_NONVIRTUAL) == 0 && pReq->receiver.Read() != nullptr) {
ArtMethod* actual_method =
pReq->klass.Read()->FindVirtualMethodForVirtualOrInterface(m, image_pointer_size);
@@ -3889,39 +3944,133 @@
CHECK_EQ(sizeof(jvalue), sizeof(uint64_t));
+ // Invoke the method.
ScopedLocalRef<jobject> ref(soa.Env(), soa.AddLocalReference<jobject>(pReq->receiver.Read()));
JValue result = InvokeWithJValues(soa, ref.get(), soa.EncodeMethod(m),
- reinterpret_cast<jvalue*>(pReq->arg_values));
+ reinterpret_cast<jvalue*>(pReq->arg_values.get()));
- pReq->result_tag = BasicTagFromDescriptor(m->GetShorty());
- const bool is_object_result = (pReq->result_tag == JDWP::JT_OBJECT);
+ // Prepare JDWP ids for the reply.
+ JDWP::JdwpTag result_tag = BasicTagFromDescriptor(m->GetShorty());
+ const bool is_object_result = (result_tag == JDWP::JT_OBJECT);
+ StackHandleScope<2> hs(soa.Self());
Handle<mirror::Object> object_result = hs.NewHandle(is_object_result ? result.GetL() : nullptr);
Handle<mirror::Throwable> exception = hs.NewHandle(soa.Self()->GetException());
soa.Self()->ClearException();
- pReq->exception = gRegistry->Add(exception);
- if (pReq->exception != 0) {
+
+ if (!IsDebuggerActive()) {
+ // The debugger detached: we must not re-suspend threads. We also don't need to fill the reply
+ // because it won't be sent either.
+ return;
+ }
+
+ JDWP::ObjectId exceptionObjectId = gRegistry->Add(exception);
+ uint64_t result_value = 0;
+ if (exceptionObjectId != 0) {
VLOG(jdwp) << " JDWP invocation returning with exception=" << exception.Get()
<< " " << exception->Dump();
- pReq->result_value = 0;
+ result_value = 0;
} else if (is_object_result) {
- /* if no exception thrown, examine object result more closely */
+ /* if no exception was thrown, examine object result more closely */
JDWP::JdwpTag new_tag = TagFromObject(soa, object_result.Get());
- if (new_tag != pReq->result_tag) {
- VLOG(jdwp) << " JDWP promoted result from " << pReq->result_tag << " to " << new_tag;
- pReq->result_tag = new_tag;
+ if (new_tag != result_tag) {
+ VLOG(jdwp) << " JDWP promoted result from " << result_tag << " to " << new_tag;
+ result_tag = new_tag;
}
// Register the object in the registry and reference its ObjectId. This ensures
// GC safety and prevents from accessing stale reference if the object is moved.
- pReq->result_value = gRegistry->Add(object_result.Get());
+ result_value = gRegistry->Add(object_result.Get());
} else {
// Primitive result.
- DCHECK(IsPrimitiveTag(pReq->result_tag));
- pReq->result_value = result.GetJ();
+ DCHECK(IsPrimitiveTag(result_tag));
+ result_value = result.GetJ();
+ }
+ const bool is_constructor = m->IsConstructor() && !m->IsStatic();
+ if (is_constructor) {
+ // If we invoked a constructor (which actually returns void), return the receiver,
+ // unless we threw, in which case we return null.
+ result_tag = JDWP::JT_OBJECT;
+ if (exceptionObjectId == 0) {
+ // TODO we could keep the receiver ObjectId in the DebugInvokeReq to avoid looking into the
+ // object registry.
+ result_value = GetObjectRegistry()->Add(pReq->receiver.Read());
+ } else {
+ result_value = 0;
+ }
}
- if (old_exception.Get() != nullptr) {
- soa.Self()->SetException(old_exception.Get());
+ // Suspend other threads if the invoke is not single-threaded.
+ if ((pReq->options & JDWP::INVOKE_SINGLE_THREADED) == 0) {
+ soa.Self()->TransitionFromRunnableToSuspended(kWaitingForDebuggerSuspension);
+ VLOG(jdwp) << " Suspending all threads";
+ Runtime::Current()->GetThreadList()->SuspendAllForDebugger();
+ soa.Self()->TransitionFromSuspendedToRunnable();
+ }
+
+ VLOG(jdwp) << " --> returned " << result_tag
+ << StringPrintf(" %#" PRIx64 " (except=%#" PRIx64 ")", result_value,
+ exceptionObjectId);
+
+ // Show detailed debug output.
+ if (result_tag == JDWP::JT_STRING && exceptionObjectId == 0) {
+ if (result_value != 0) {
+ if (VLOG_IS_ON(jdwp)) {
+ std::string result_string;
+ JDWP::JdwpError error = Dbg::StringToUtf8(result_value, &result_string);
+ CHECK_EQ(error, JDWP::ERR_NONE);
+ VLOG(jdwp) << " string '" << result_string << "'";
+ }
+ } else {
+ VLOG(jdwp) << " string (null)";
+ }
+ }
+
+ // Attach the reply to DebugInvokeReq so it can be sent to the debugger when the event thread
+ // is ready to suspend.
+ BuildInvokeReply(pReq->reply, pReq->request_id, result_tag, result_value, exceptionObjectId);
+}
+
+void Dbg::BuildInvokeReply(JDWP::ExpandBuf* pReply, uint32_t request_id, JDWP::JdwpTag result_tag,
+ uint64_t result_value, JDWP::ObjectId exception) {
+ // Make room for the JDWP header since we do not know the size of the reply yet.
+ JDWP::expandBufAddSpace(pReply, kJDWPHeaderLen);
+
+ size_t width = GetTagWidth(result_tag);
+ JDWP::expandBufAdd1(pReply, result_tag);
+ if (width != 0) {
+ WriteValue(pReply, width, result_value);
+ }
+ JDWP::expandBufAdd1(pReply, JDWP::JT_OBJECT);
+ JDWP::expandBufAddObjectId(pReply, exception);
+
+ // Now we know the size, we can complete the JDWP header.
+ uint8_t* buf = expandBufGetBuffer(pReply);
+ JDWP::Set4BE(buf + kJDWPHeaderSizeOffset, expandBufGetLength(pReply));
+ JDWP::Set4BE(buf + kJDWPHeaderIdOffset, request_id);
+ JDWP::Set1(buf + kJDWPHeaderFlagsOffset, kJDWPFlagReply); // flags
+ JDWP::Set2BE(buf + kJDWPHeaderErrorCodeOffset, JDWP::ERR_NONE);
+}
+
+void Dbg::FinishInvokeMethod(DebugInvokeReq* pReq) {
+ CHECK_NE(Thread::Current(), GetDebugThread()) << "This must be called by the event thread";
+
+ JDWP::ExpandBuf* const pReply = pReq->reply;
+ CHECK(pReply != nullptr) << "No reply attached to DebugInvokeReq";
+
+ // We need to prevent other threads (including JDWP thread) from interacting with the debugger
+ // while we send the reply but are not yet suspended. The JDWP token will be released just before
+ // we suspend ourself again (see ThreadList::SuspendSelfForDebugger).
+ gJdwpState->AcquireJdwpTokenForEvent(pReq->thread_id);
+
+ // Send the reply unless the debugger detached before the completion of the method.
+ if (IsDebuggerActive()) {
+ const size_t replyDataLength = expandBufGetLength(pReply) - kJDWPHeaderLen;
+ VLOG(jdwp) << StringPrintf("REPLY INVOKE id=0x%06x (length=%zu)",
+ pReq->request_id, replyDataLength);
+
+ gJdwpState->SendRequest(pReply);
+ } else {
+ VLOG(jdwp) << "Not sending invoke reply because debugger detached";
}
}
diff --git a/runtime/debugger.h b/runtime/debugger.h
index 3b92d36..fd7d46c 100644
--- a/runtime/debugger.h
+++ b/runtime/debugger.h
@@ -42,6 +42,7 @@
class ArtField;
class ArtMethod;
class ObjectRegistry;
+class ScopedObjectAccess;
class ScopedObjectAccessUnchecked;
class StackVisitor;
class Thread;
@@ -50,33 +51,32 @@
* Invoke-during-breakpoint support.
*/
struct DebugInvokeReq {
- DebugInvokeReq(mirror::Object* invoke_receiver, mirror::Class* invoke_class,
+ DebugInvokeReq(uint32_t invoke_request_id, JDWP::ObjectId invoke_thread_id,
+ mirror::Object* invoke_receiver, mirror::Class* invoke_class,
ArtMethod* invoke_method, uint32_t invoke_options,
- uint64_t* args, uint32_t args_count)
- : receiver(invoke_receiver), klass(invoke_class), method(invoke_method),
- arg_count(args_count), arg_values(args), options(invoke_options),
- error(JDWP::ERR_NONE), result_tag(JDWP::JT_VOID), result_value(0), exception(0),
- lock("a DebugInvokeReq lock", kBreakpointInvokeLock),
- cond("a DebugInvokeReq condition variable", lock) {
+ uint64_t args[], uint32_t args_count)
+ : request_id(invoke_request_id), thread_id(invoke_thread_id), receiver(invoke_receiver),
+ klass(invoke_class), method(invoke_method), arg_count(args_count), arg_values(args),
+ options(invoke_options), reply(JDWP::expandBufAlloc()) {
}
- /* request */
- GcRoot<mirror::Object> receiver; // not used for ClassType.InvokeMethod
+ ~DebugInvokeReq() {
+ JDWP::expandBufFree(reply);
+ }
+
+ // Request
+ const uint32_t request_id;
+ const JDWP::ObjectId thread_id;
+ GcRoot<mirror::Object> receiver; // not used for ClassType.InvokeMethod.
GcRoot<mirror::Class> klass;
- ArtMethod* method;
+ ArtMethod* const method;
const uint32_t arg_count;
- uint64_t* const arg_values; // will be null if arg_count_ == 0
+ std::unique_ptr<uint64_t[]> arg_values; // will be null if arg_count_ == 0. We take ownership
+ // of this array so we must delete it upon destruction.
const uint32_t options;
- /* result */
- JDWP::JdwpError error;
- JDWP::JdwpTag result_tag;
- uint64_t result_value; // either a primitive value or an ObjectId
- JDWP::ObjectId exception;
-
- /* condition variable to wait on while the method executes */
- Mutex lock DEFAULT_MUTEX_ACQUIRED_AFTER;
- ConditionVariable cond GUARDED_BY(lock);
+ // Reply
+ JDWP::ExpandBuf* const reply;
void VisitRoots(RootVisitor* visitor, const RootInfo& root_info)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -605,19 +605,39 @@
LOCKS_EXCLUDED(Locks::thread_list_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Invoke support for commands ClassType.InvokeMethod, ClassType.NewInstance and
- // ObjectReference.InvokeMethod.
- static JDWP::JdwpError InvokeMethod(JDWP::ObjectId thread_id, JDWP::ObjectId object_id,
- JDWP::RefTypeId class_id, JDWP::MethodId method_id,
- uint32_t arg_count, uint64_t* arg_values,
- JDWP::JdwpTag* arg_types, uint32_t options,
- JDWP::JdwpTag* pResultTag, uint64_t* pResultValue,
- JDWP::ObjectId* pExceptObj)
+ /*
+ * Invoke support
+ */
+
+ // Called by the JDWP thread to prepare invocation in the event thread (suspended on an event).
+ // If the information sent by the debugger is incorrect, it will send a reply with the
+ // appropriate error code. Otherwise, it will attach a DebugInvokeReq object to the event thread
+ // and resume it (and possibly other threads depending on the invoke options).
+ // Unlike other commands, the JDWP thread will not send the reply to the debugger (see
+ // JdwpState::ProcessRequest). The reply will be sent by the event thread itself after method
+ // invocation completes (see FinishInvokeMethod). This is required to allow the JDWP thread to
+ // process incoming commands from the debugger while the invocation is still in progress in the
+ // event thread, especially if it gets suspended by a debug event occurring in another thread.
+ static JDWP::JdwpError PrepareInvokeMethod(uint32_t request_id, JDWP::ObjectId thread_id,
+ JDWP::ObjectId object_id, JDWP::RefTypeId class_id,
+ JDWP::MethodId method_id, uint32_t arg_count,
+ uint64_t arg_values[], JDWP::JdwpTag* arg_types,
+ uint32_t options)
LOCKS_EXCLUDED(Locks::thread_list_lock_,
Locks::thread_suspend_count_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Called by the event thread to execute a method prepared by the JDWP thread in the given
+ // DebugInvokeReq object. Once the invocation completes, the event thread attaches a reply
+ // to that DebugInvokeReq object so it can be sent to the debugger only when the event thread
+ // is ready to suspend (see FinishInvokeMethod).
static void ExecuteMethod(DebugInvokeReq* pReq);
+ // Called by the event thread to send the reply of the invoke (created in ExecuteMethod)
+ // before suspending itself. This is to ensure the thread is ready to suspend before the
+ // debugger receives the reply.
+ static void FinishInvokeMethod(DebugInvokeReq* pReq);
+
/*
* DDM support.
*/
@@ -694,6 +714,14 @@
}
private:
+ static void ExecuteMethodWithoutPendingException(ScopedObjectAccess& soa, DebugInvokeReq* pReq)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ static void BuildInvokeReply(JDWP::ExpandBuf* pReply, uint32_t request_id,
+ JDWP::JdwpTag result_tag, uint64_t result_value,
+ JDWP::ObjectId exception)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
static JDWP::JdwpError GetLocalValue(const StackVisitor& visitor,
ScopedObjectAccessUnchecked& soa, int slot,
JDWP::JdwpTag tag, uint8_t* buf, size_t width)
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index b0cbd02..de925b7 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -39,9 +39,12 @@
namespace art {
inline ArtMethod* GetResolvedMethod(ArtMethod* outer_method,
- uint32_t method_index,
- InvokeType invoke_type)
+ const InlineInfo& inline_info,
+ uint8_t inlining_depth)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ uint32_t method_index = inline_info.GetMethodIndexAtDepth(inlining_depth);
+ InvokeType invoke_type = static_cast<InvokeType>(
+ inline_info.GetInvokeTypeAtDepth(inlining_depth));
ArtMethod* caller = outer_method->GetDexCacheResolvedMethod(method_index, sizeof(void*));
if (!caller->IsRuntimeMethod()) {
return caller;
@@ -51,10 +54,19 @@
// the stub that will then update the dex cache. Therefore, we need to do the
// resolution ourselves.
+ // We first find the class loader of our caller. If it is the outer method, we can directly
+ // use its class loader. Otherwise, we also need to resolve our caller.
StackHandleScope<2> hs(Thread::Current());
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- Handle<mirror::ClassLoader> class_loader(hs.NewHandle(outer_method->GetClassLoader()));
+ MutableHandle<mirror::ClassLoader> class_loader(hs.NewHandle<mirror::Class>(nullptr));
Handle<mirror::DexCache> dex_cache(hs.NewHandle(outer_method->GetDexCache()));
+ if (inlining_depth == 0) {
+ class_loader.Assign(outer_method->GetClassLoader());
+ } else {
+ caller = GetResolvedMethod(outer_method, inline_info, inlining_depth - 1);
+ class_loader.Assign(caller->GetClassLoader());
+ }
+
return class_linker->ResolveMethod(
*outer_method->GetDexFile(), method_index, dex_cache, class_loader, nullptr, invoke_type);
}
@@ -82,10 +94,7 @@
DCHECK(stack_map.IsValid());
if (stack_map.HasInlineInfo(encoding)) {
InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding);
- uint32_t method_index = inline_info.GetMethodIndexAtDepth(inline_info.GetDepth() - 1);
- InvokeType invoke_type = static_cast<InvokeType>(
- inline_info.GetInvokeTypeAtDepth(inline_info.GetDepth() - 1));
- caller = GetResolvedMethod(outer_method, method_index, invoke_type);
+ caller = GetResolvedMethod(outer_method, inline_info, inline_info.GetDepth() - 1);
}
}
diff --git a/runtime/gc/accounting/space_bitmap-inl.h b/runtime/gc/accounting/space_bitmap-inl.h
index c16f5d3..006d2c7 100644
--- a/runtime/gc/accounting/space_bitmap-inl.h
+++ b/runtime/gc/accounting/space_bitmap-inl.h
@@ -159,6 +159,7 @@
inline bool SpaceBitmap<kAlignment>::Modify(const mirror::Object* obj) {
uintptr_t addr = reinterpret_cast<uintptr_t>(obj);
DCHECK_GE(addr, heap_begin_);
+ DCHECK(HasAddress(obj)) << obj;
const uintptr_t offset = addr - heap_begin_;
const size_t index = OffsetToIndex(offset);
const uintptr_t mask = OffsetToMask(offset);
diff --git a/runtime/gc/accounting/space_bitmap.cc b/runtime/gc/accounting/space_bitmap.cc
index fe2b284..6546eb4 100644
--- a/runtime/gc/accounting/space_bitmap.cc
+++ b/runtime/gc/accounting/space_bitmap.cc
@@ -35,6 +35,11 @@
}
template<size_t kAlignment>
+size_t SpaceBitmap<kAlignment>::ComputeHeapSize(uint64_t bitmap_bytes) {
+ return bitmap_bytes * kBitsPerByte * kAlignment;
+}
+
+template<size_t kAlignment>
SpaceBitmap<kAlignment>* SpaceBitmap<kAlignment>::CreateFromMemMap(
const std::string& name, MemMap* mem_map, uint8_t* heap_begin, size_t heap_capacity) {
CHECK(mem_map != nullptr);
diff --git a/runtime/gc/accounting/space_bitmap.h b/runtime/gc/accounting/space_bitmap.h
index d6b3ed4..35faff3 100644
--- a/runtime/gc/accounting/space_bitmap.h
+++ b/runtime/gc/accounting/space_bitmap.h
@@ -188,15 +188,16 @@
std::string Dump() const;
+ // Helper function for computing bitmap size based on a 64 bit capacity.
+ static size_t ComputeBitmapSize(uint64_t capacity);
+ static size_t ComputeHeapSize(uint64_t bitmap_bytes);
+
private:
// TODO: heap_end_ is initialized so that the heap bitmap is empty, this doesn't require the -1,
// however, we document that this is expected on heap_end_
SpaceBitmap(const std::string& name, MemMap* mem_map, uintptr_t* bitmap_begin, size_t bitmap_size,
const void* heap_begin);
- // Helper function for computing bitmap size based on a 64 bit capacity.
- static size_t ComputeBitmapSize(uint64_t capacity);
-
template<bool kSetBit>
bool Modify(const mirror::Object* obj);
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 14fa740..22207ee 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -1001,6 +1001,27 @@
BaseMutex::DumpAll(os);
}
+void Heap::ResetGcPerformanceInfo() {
+ for (auto& collector : garbage_collectors_) {
+ collector->ResetMeasurements();
+ }
+ total_allocation_time_.StoreRelaxed(0);
+ total_bytes_freed_ever_ = 0;
+ total_objects_freed_ever_ = 0;
+ total_wait_time_ = 0;
+ blocking_gc_count_ = 0;
+ blocking_gc_time_ = 0;
+ gc_count_last_window_ = 0;
+ blocking_gc_count_last_window_ = 0;
+ last_update_time_gc_count_rate_histograms_ = // Round down by the window duration.
+ (NanoTime() / kGcCountRateHistogramWindowDuration) * kGcCountRateHistogramWindowDuration;
+ {
+ MutexLock mu(Thread::Current(), *gc_complete_lock_);
+ gc_count_rate_histogram_.Reset();
+ blocking_gc_count_rate_histogram_.Reset();
+ }
+}
+
uint64_t Heap::GetGcCount() const {
uint64_t gc_count = 0U;
for (auto& collector : garbage_collectors_) {
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 09bd370..18244c8 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -598,6 +598,7 @@
// GC performance measuring
void DumpGcPerformanceInfo(std::ostream& os);
+ void ResetGcPerformanceInfo();
// Returns true if we currently care about pause times.
bool CareAboutPauseTimes() const {
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 437fd8c..1923d24 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -694,7 +694,7 @@
const auto section_idx = static_cast<ImageHeader::ImageSections>(i);
auto& section = image_header.GetImageSection(section_idx);
LOG(INFO) << section_idx << " start="
- << reinterpret_cast<void*>(image_header.GetImageBegin() + section.Offset())
+ << reinterpret_cast<void*>(image_header.GetImageBegin() + section.Offset()) << " "
<< section;
}
}
@@ -730,9 +730,9 @@
std::string bitmap_name(StringPrintf("imagespace %s live-bitmap %u", image_filename,
bitmap_index));
std::unique_ptr<accounting::ContinuousSpaceBitmap> bitmap(
- accounting::ContinuousSpaceBitmap::CreateFromMemMap(bitmap_name, image_map.release(),
- reinterpret_cast<uint8_t*>(map->Begin()),
- map->Size()));
+ accounting::ContinuousSpaceBitmap::CreateFromMemMap(
+ bitmap_name, image_map.release(), reinterpret_cast<uint8_t*>(map->Begin()),
+ accounting::ContinuousSpaceBitmap::ComputeHeapSize(bitmap_section.Size())));
if (bitmap.get() == nullptr) {
*error_msg = StringPrintf("Could not create bitmap '%s'", bitmap_name.c_str());
return nullptr;
@@ -755,6 +755,7 @@
DCHECK(!error_msg->empty());
return nullptr;
}
+ space->oat_file_non_owned_ = space->oat_file_.get();
if (validate_oat_file && !space->ValidateOatFile(error_msg)) {
DCHECK(!error_msg->empty());
@@ -838,10 +839,12 @@
return true;
}
+
const OatFile* ImageSpace::GetOatFile() const {
- return oat_file_.get();
+ return oat_file_non_owned_;
}
+
OatFile* ImageSpace::ReleaseOatFile() {
CHECK(oat_file_.get() != nullptr);
return oat_file_.release();
diff --git a/runtime/gc/space/image_space.h b/runtime/gc/space/image_space.h
index 54dc7a6..93ff8aa 100644
--- a/runtime/gc/space/image_space.h
+++ b/runtime/gc/space/image_space.h
@@ -152,6 +152,10 @@
// the ClassLinker during it's initialization.
std::unique_ptr<OatFile> oat_file_;
+ // There are times when we need to find the boot image oat file. As
+ // we release ownership during startup, keep a non-owned reference.
+ const OatFile* oat_file_non_owned_;
+
const std::string image_location_;
DISALLOW_COPY_AND_ASSIGN(ImageSpace);
diff --git a/runtime/hprof/hprof.cc b/runtime/hprof/hprof.cc
index 85eefb0..f32d5a1 100644
--- a/runtime/hprof/hprof.cc
+++ b/runtime/hprof/hprof.cc
@@ -75,7 +75,7 @@
static constexpr size_t kMaxBytesPerSegment = 4096;
// The static field-name for the synthetic object generated to account for class static overhead.
-static constexpr const char* kStaticOverheadName = "$staticOverhead";
+static constexpr const char* kClassOverheadName = "$classOverhead";
enum HprofTag {
HPROF_TAG_STRING = 0x01,
@@ -1092,17 +1092,23 @@
// Class is allocated but not yet loaded: we cannot access its fields or super class.
return;
}
- size_t sFieldCount = klass->NumStaticFields();
- if (sFieldCount != 0) {
- int byteLength = sFieldCount * sizeof(JValue); // TODO bogus; fields are packed
+ const size_t num_static_fields = klass->NumStaticFields();
+ // Total class size including embedded IMT, embedded vtable, and static fields.
+ const size_t class_size = klass->GetClassSize();
+ // Class size excluding static fields (relies on reference fields being the first static fields).
+ const size_t class_size_without_overhead = sizeof(mirror::Class);
+ CHECK_LE(class_size_without_overhead, class_size);
+ const size_t overhead_size = class_size - class_size_without_overhead;
+
+ if (overhead_size != 0) {
// Create a byte array to reflect the allocation of the
// StaticField array at the end of this class.
__ AddU1(HPROF_PRIMITIVE_ARRAY_DUMP);
__ AddClassStaticsId(klass);
__ AddStackTraceSerialNumber(LookupStackTraceSerialNumber(klass));
- __ AddU4(byteLength);
+ __ AddU4(overhead_size);
__ AddU1(hprof_basic_byte);
- for (int i = 0; i < byteLength; ++i) {
+ for (size_t i = 0; i < overhead_size; ++i) {
__ AddU1(0);
}
}
@@ -1119,7 +1125,7 @@
if (klass->IsClassClass()) {
// ClassObjects have their static fields appended, so aren't all the same size.
// But they're at least this size.
- __ AddU4(sizeof(mirror::Class)); // instance size
+ __ AddU4(class_size_without_overhead); // instance size
} else if (klass->IsStringClass()) {
// Strings are variable length with character data at the end like arrays.
// This outputs the size of an empty string.
@@ -1133,15 +1139,15 @@
__ AddU2(0); // empty const pool
// Static fields
- if (sFieldCount == 0) {
- __ AddU2((uint16_t)0);
+ if (overhead_size == 0) {
+ __ AddU2(static_cast<uint16_t>(0));
} else {
- __ AddU2((uint16_t)(sFieldCount+1));
- __ AddStringId(LookupStringId(kStaticOverheadName));
+ __ AddU2(static_cast<uint16_t>(num_static_fields + 1));
+ __ AddStringId(LookupStringId(kClassOverheadName));
__ AddU1(hprof_basic_object);
__ AddClassStaticsId(klass);
- for (size_t i = 0; i < sFieldCount; ++i) {
+ for (size_t i = 0; i < num_static_fields; ++i) {
ArtField* f = klass->GetStaticField(i);
size_t size;
diff --git a/runtime/image.cc b/runtime/image.cc
index 947c914..44193da 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -24,7 +24,7 @@
namespace art {
const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' };
-const uint8_t ImageHeader::kImageVersion[] = { '0', '1', '6', '\0' };
+const uint8_t ImageHeader::kImageVersion[] = { '0', '1', '7', '\0' };
ImageHeader::ImageHeader(uint32_t image_begin,
uint32_t image_size,
diff --git a/runtime/image.h b/runtime/image.h
index c6be7ef..d856f21 100644
--- a/runtime/image.h
+++ b/runtime/image.h
@@ -142,6 +142,7 @@
kSectionObjects,
kSectionArtFields,
kSectionArtMethods,
+ kSectionInternedStrings,
kSectionImageBitmap,
kSectionCount, // Number of elements in enum.
};
diff --git a/runtime/intern_table.cc b/runtime/intern_table.cc
index 9abbca8..2a96278 100644
--- a/runtime/intern_table.cc
+++ b/runtime/intern_table.cc
@@ -152,20 +152,28 @@
CHECK(image_space != nullptr);
MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
if (!image_added_to_intern_table_) {
- mirror::Object* root = image_space->GetImageHeader().GetImageRoot(ImageHeader::kDexCaches);
- mirror::ObjectArray<mirror::DexCache>* dex_caches = root->AsObjectArray<mirror::DexCache>();
- for (int32_t i = 0; i < dex_caches->GetLength(); ++i) {
- mirror::DexCache* dex_cache = dex_caches->Get(i);
- const DexFile* dex_file = dex_cache->GetDexFile();
- const size_t num_strings = dex_file->NumStringIds();
- for (size_t j = 0; j < num_strings; ++j) {
- mirror::String* image_string = dex_cache->GetResolvedString(j);
- if (image_string != nullptr) {
- mirror::String* found = LookupStrong(image_string);
- if (found == nullptr) {
- InsertStrong(image_string);
- } else {
- DCHECK_EQ(found, image_string);
+ const ImageHeader* const header = &image_space->GetImageHeader();
+ // Check if we have the interned strings section.
+ const ImageSection& section = header->GetImageSection(ImageHeader::kSectionInternedStrings);
+ if (section.Size() > 0) {
+ ReadFromMemoryLocked(image_space->Begin() + section.Offset());
+ } else {
+ // TODO: Delete this logic?
+ mirror::Object* root = header->GetImageRoot(ImageHeader::kDexCaches);
+ mirror::ObjectArray<mirror::DexCache>* dex_caches = root->AsObjectArray<mirror::DexCache>();
+ for (int32_t i = 0; i < dex_caches->GetLength(); ++i) {
+ mirror::DexCache* dex_cache = dex_caches->Get(i);
+ const DexFile* dex_file = dex_cache->GetDexFile();
+ const size_t num_strings = dex_file->NumStringIds();
+ for (size_t j = 0; j < num_strings; ++j) {
+ mirror::String* image_string = dex_cache->GetResolvedString(j);
+ if (image_string != nullptr) {
+ mirror::String* found = LookupStrong(image_string);
+ if (found == nullptr) {
+ InsertStrong(image_string);
+ } else {
+ DCHECK_EQ(found, image_string);
+ }
}
}
}
@@ -285,6 +293,29 @@
weak_interns_.SweepWeaks(callback, arg);
}
+void InternTable::AddImageInternTable(gc::space::ImageSpace* image_space) {
+ const ImageSection& intern_section = image_space->GetImageHeader().GetImageSection(
+ ImageHeader::kSectionInternedStrings);
+ // Read the string tables from the image.
+ const uint8_t* ptr = image_space->Begin() + intern_section.Offset();
+ const size_t offset = ReadFromMemory(ptr);
+ CHECK_LE(offset, intern_section.Size());
+}
+
+size_t InternTable::ReadFromMemory(const uint8_t* ptr) {
+ MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
+ return ReadFromMemoryLocked(ptr);
+}
+
+size_t InternTable::ReadFromMemoryLocked(const uint8_t* ptr) {
+ return strong_interns_.ReadIntoPreZygoteTable(ptr);
+}
+
+size_t InternTable::WriteToMemory(uint8_t* ptr) {
+ MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
+ return strong_interns_.WriteFromPostZygoteTable(ptr);
+}
+
std::size_t InternTable::StringHashEquals::operator()(const GcRoot<mirror::String>& root) const {
if (kIsDebugBuild) {
Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
@@ -300,6 +331,17 @@
return a.Read()->Equals(b.Read());
}
+size_t InternTable::Table::ReadIntoPreZygoteTable(const uint8_t* ptr) {
+ CHECK_EQ(pre_zygote_table_.Size(), 0u);
+ size_t read_count = 0;
+ pre_zygote_table_ = UnorderedSet(ptr, false /* make copy */, &read_count);
+ return read_count;
+}
+
+size_t InternTable::Table::WriteFromPostZygoteTable(uint8_t* ptr) {
+ return post_zygote_table_.WriteToMemory(ptr);
+}
+
void InternTable::Table::Remove(mirror::String* s) {
auto it = post_zygote_table_.Find(GcRoot<mirror::String>(s));
if (it != post_zygote_table_.end()) {
@@ -325,9 +367,13 @@
}
void InternTable::Table::SwapPostZygoteWithPreZygote() {
- CHECK(pre_zygote_table_.Empty());
- std::swap(pre_zygote_table_, post_zygote_table_);
- VLOG(heap) << "Swapping " << pre_zygote_table_.Size() << " interns to the pre zygote table";
+ if (pre_zygote_table_.Empty()) {
+ std::swap(pre_zygote_table_, post_zygote_table_);
+ VLOG(heap) << "Swapping " << pre_zygote_table_.Size() << " interns to the pre zygote table";
+ } else {
+ // This case happens if read the intern table from the image.
+ VLOG(heap) << "Not swapping due to non-empty pre_zygote_table_";
+ }
}
void InternTable::Table::Insert(mirror::String* s) {
diff --git a/runtime/intern_table.h b/runtime/intern_table.h
index 1e5d3c2..97ce73c 100644
--- a/runtime/intern_table.h
+++ b/runtime/intern_table.h
@@ -97,6 +97,20 @@
void SwapPostZygoteWithPreZygote()
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(Locks::intern_table_lock_);
+ // Add an intern table which was serialized to the image.
+ void AddImageInternTable(gc::space::ImageSpace* image_space)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(Locks::intern_table_lock_);
+
+ // Read the intern table from memory. The elements aren't copied, the intern hash set data will
+ // point to somewhere within ptr. Only reads the strong interns.
+ size_t ReadFromMemory(const uint8_t* ptr) LOCKS_EXCLUDED(Locks::intern_table_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Write the post zygote intern table to a pointer. Only writes the strong interns since it is
+ // expected that there is no weak interns since this is called from the image writer.
+ size_t WriteToMemory(uint8_t* ptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ LOCKS_EXCLUDED(Locks::intern_table_lock_);
+
private:
class StringHashEquals {
public:
@@ -133,6 +147,16 @@
EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
void SwapPostZygoteWithPreZygote() EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
size_t Size() const EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
+ // Read pre zygote table is called from ReadFromMemory which happens during runtime creation
+ // when we load the image intern table. Returns how many bytes were read.
+ size_t ReadIntoPreZygoteTable(const uint8_t* ptr)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // The image writer calls WritePostZygoteTable through WriteToMemory, it writes the interns in
+ // the post zygote table. Returns how many bytes were written.
+ size_t WriteFromPostZygoteTable(uint8_t* ptr)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
private:
typedef HashSet<GcRoot<mirror::String>, GcRootEmptyFn, StringHashEquals, StringHashEquals,
@@ -192,6 +216,10 @@
EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
friend class Transaction;
+ size_t ReadFromMemoryLocked(const uint8_t* ptr)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
bool image_added_to_intern_table_ GUARDED_BY(Locks::intern_table_lock_);
bool log_new_roots_ GUARDED_BY(Locks::intern_table_lock_);
bool allow_new_interns_ GUARDED_BY(Locks::intern_table_lock_);
diff --git a/runtime/jdwp/jdwp.h b/runtime/jdwp/jdwp.h
index e18d10f..7c48985 100644
--- a/runtime/jdwp/jdwp.h
+++ b/runtime/jdwp/jdwp.h
@@ -297,7 +297,7 @@
private:
explicit JdwpState(const JdwpOptions* options);
- size_t ProcessRequest(Request* request, ExpandBuf* pReply);
+ size_t ProcessRequest(Request* request, ExpandBuf* pReply, bool* skip_reply);
bool InvokeInProgress();
bool IsConnected();
void SuspendByPolicy(JdwpSuspendPolicy suspend_policy, JDWP::ObjectId thread_self_id)
diff --git a/runtime/jdwp/jdwp_event.cc b/runtime/jdwp/jdwp_event.cc
index 612af8b..14f097f 100644
--- a/runtime/jdwp/jdwp_event.cc
+++ b/runtime/jdwp/jdwp_event.cc
@@ -99,10 +99,6 @@
don't allow anyone else to interfere with us.
*/
-
-#define kJdwpEventCommandSet 64
-#define kJdwpCompositeCommand 100
-
namespace art {
namespace JDWP {
@@ -612,13 +608,10 @@
*/
DebugInvokeReq* const pReq = Dbg::GetInvokeReq();
if (pReq == nullptr) {
- /*LOGD("SuspendByPolicy: no invoke needed");*/
break;
}
- /* grab this before posting/suspending again */
- AcquireJdwpTokenForEvent(thread_self_id);
-
+ // Execute method.
Dbg::ExecuteMethod(pReq);
}
}
@@ -749,11 +742,11 @@
void JdwpState::EventFinish(ExpandBuf* pReq) {
uint8_t* buf = expandBufGetBuffer(pReq);
- Set4BE(buf, expandBufGetLength(pReq));
- Set4BE(buf + 4, NextRequestSerial());
- Set1(buf + 8, 0); /* flags */
- Set1(buf + 9, kJdwpEventCommandSet);
- Set1(buf + 10, kJdwpCompositeCommand);
+ Set4BE(buf + kJDWPHeaderSizeOffset, expandBufGetLength(pReq));
+ Set4BE(buf + kJDWPHeaderIdOffset, NextRequestSerial());
+ Set1(buf + kJDWPHeaderFlagsOffset, 0); /* flags */
+ Set1(buf + kJDWPHeaderCmdSetOffset, kJDWPEventCmdSet);
+ Set1(buf + kJDWPHeaderCmdOffset, kJDWPEventCompositeCmd);
SendRequest(pReq);
diff --git a/runtime/jdwp/jdwp_handler.cc b/runtime/jdwp/jdwp_handler.cc
index f7f70f6..d4e2656 100644
--- a/runtime/jdwp/jdwp_handler.cc
+++ b/runtime/jdwp/jdwp_handler.cc
@@ -52,17 +52,6 @@
return StringPrintf("%#" PRIx64 " (%s)", ref_type_id, signature.c_str());
}
-// Helper function: write a variable-width value into the output input buffer.
-static void WriteValue(ExpandBuf* pReply, int width, uint64_t value) {
- switch (width) {
- case 1: expandBufAdd1(pReply, value); break;
- case 2: expandBufAdd2BE(pReply, value); break;
- case 4: expandBufAdd4BE(pReply, value); break;
- case 8: expandBufAdd8BE(pReply, value); break;
- default: LOG(FATAL) << width; break;
- }
-}
-
static JdwpError WriteTaggedObject(ExpandBuf* reply, ObjectId object_id)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
uint8_t tag;
@@ -92,7 +81,7 @@
* If "is_constructor" is set, this returns "object_id" rather than the
* expected-to-be-void return value of the called function.
*/
-static JdwpError RequestInvoke(JdwpState*, Request* request, ExpandBuf* pReply,
+static JdwpError RequestInvoke(JdwpState*, Request* request,
ObjectId thread_id, ObjectId object_id,
RefTypeId class_id, MethodId method_id, bool is_constructor)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -122,49 +111,15 @@
(options & INVOKE_SINGLE_THREADED) ? " (SINGLE_THREADED)" : "",
(options & INVOKE_NONVIRTUAL) ? " (NONVIRTUAL)" : "");
- JdwpTag resultTag;
- uint64_t resultValue;
- ObjectId exceptObjId;
- JdwpError err = Dbg::InvokeMethod(thread_id, object_id, class_id, method_id, arg_count,
- argValues.get(), argTypes.get(), options, &resultTag,
- &resultValue, &exceptObjId);
- if (err != ERR_NONE) {
- return err;
+ JDWP::JdwpError error = Dbg::PrepareInvokeMethod(request->GetId(), thread_id, object_id,
+ class_id, method_id, arg_count,
+ argValues.get(), argTypes.get(), options);
+ if (error == JDWP::ERR_NONE) {
+ // We successfully requested the invoke. The event thread now owns the arguments array in its
+ // DebugInvokeReq mailbox.
+ argValues.release();
}
-
- if (is_constructor) {
- // If we invoked a constructor (which actually returns void), return the receiver,
- // unless we threw, in which case we return null.
- resultTag = JT_OBJECT;
- resultValue = (exceptObjId == 0) ? object_id : 0;
- }
-
- size_t width = Dbg::GetTagWidth(resultTag);
- expandBufAdd1(pReply, resultTag);
- if (width != 0) {
- WriteValue(pReply, width, resultValue);
- }
- expandBufAdd1(pReply, JT_OBJECT);
- expandBufAddObjectId(pReply, exceptObjId);
-
- VLOG(jdwp) << " --> returned " << resultTag
- << StringPrintf(" %#" PRIx64 " (except=%#" PRIx64 ")", resultValue, exceptObjId);
-
- /* show detailed debug output */
- if (resultTag == JT_STRING && exceptObjId == 0) {
- if (resultValue != 0) {
- if (VLOG_IS_ON(jdwp)) {
- std::string result_string;
- JDWP::JdwpError error = Dbg::StringToUtf8(resultValue, &result_string);
- CHECK_EQ(error, JDWP::ERR_NONE);
- VLOG(jdwp) << " string '" << result_string << "'";
- }
- } else {
- VLOG(jdwp) << " string (null)";
- }
- }
-
- return err;
+ return error;
}
static JdwpError VM_Version(JdwpState*, Request*, ExpandBuf* pReply)
@@ -684,13 +639,14 @@
* Example: Eclipse sometimes uses java/lang/Class.forName(String s) on
* values in the "variables" display.
*/
-static JdwpError CT_InvokeMethod(JdwpState* state, Request* request, ExpandBuf* pReply)
+static JdwpError CT_InvokeMethod(JdwpState* state, Request* request,
+ ExpandBuf* pReply ATTRIBUTE_UNUSED)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
RefTypeId class_id = request->ReadRefTypeId();
ObjectId thread_id = request->ReadThreadId();
MethodId method_id = request->ReadMethodId();
- return RequestInvoke(state, request, pReply, thread_id, 0, class_id, method_id, false);
+ return RequestInvoke(state, request, thread_id, 0, class_id, method_id, false);
}
/*
@@ -700,7 +656,8 @@
* Example: in IntelliJ, create a watch on "new String(myByteArray)" to
* see the contents of a byte[] as a string.
*/
-static JdwpError CT_NewInstance(JdwpState* state, Request* request, ExpandBuf* pReply)
+static JdwpError CT_NewInstance(JdwpState* state, Request* request,
+ ExpandBuf* pReply ATTRIBUTE_UNUSED)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
RefTypeId class_id = request->ReadRefTypeId();
ObjectId thread_id = request->ReadThreadId();
@@ -711,7 +668,7 @@
if (status != ERR_NONE) {
return status;
}
- return RequestInvoke(state, request, pReply, thread_id, object_id, class_id, method_id, true);
+ return RequestInvoke(state, request, thread_id, object_id, class_id, method_id, true);
}
/*
@@ -863,14 +820,15 @@
* object), it will try to invoke the object's toString() function. This
* feature becomes crucial when examining ArrayLists with Eclipse.
*/
-static JdwpError OR_InvokeMethod(JdwpState* state, Request* request, ExpandBuf* pReply)
+static JdwpError OR_InvokeMethod(JdwpState* state, Request* request,
+ ExpandBuf* pReply ATTRIBUTE_UNUSED)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ObjectId object_id = request->ReadObjectId();
ObjectId thread_id = request->ReadThreadId();
RefTypeId class_id = request->ReadRefTypeId();
MethodId method_id = request->ReadMethodId();
- return RequestInvoke(state, request, pReply, thread_id, object_id, class_id, method_id, false);
+ return RequestInvoke(state, request, thread_id, object_id, class_id, method_id, false);
}
static JdwpError OR_DisableCollection(JdwpState*, Request* request, ExpandBuf*)
@@ -1602,13 +1560,27 @@
return result;
}
+// Returns true if the given command_set and command identify an "invoke" command.
+static bool IsInvokeCommand(uint8_t command_set, uint8_t command) {
+ if (command_set == kJDWPClassTypeCmdSet) {
+ return command == kJDWPClassTypeInvokeMethodCmd || command == kJDWPClassTypeNewInstanceCmd;
+ } else if (command_set == kJDWPObjectReferenceCmdSet) {
+ return command == kJDWPObjectReferenceInvokeCmd;
+ } else {
+ return false;
+ }
+}
+
/*
- * Process a request from the debugger.
+ * Process a request from the debugger. The skip_reply flag is set to true to indicate to the
+ * caller the reply must not be sent to the debugger. This is used for invoke commands where the
+ * reply is sent by the event thread after completing the invoke.
*
* On entry, the JDWP thread is in VMWAIT.
*/
-size_t JdwpState::ProcessRequest(Request* request, ExpandBuf* pReply) {
+size_t JdwpState::ProcessRequest(Request* request, ExpandBuf* pReply, bool* skip_reply) {
JdwpError result = ERR_NONE;
+ *skip_reply = false;
if (request->GetCommandSet() != kJDWPDdmCmdSet) {
/*
@@ -1661,24 +1633,31 @@
result = ERR_NOT_IMPLEMENTED;
}
- /*
- * Set up the reply header.
- *
- * If we encountered an error, only send the header back.
- */
- uint8_t* replyBuf = expandBufGetBuffer(pReply);
- size_t replyLength = (result == ERR_NONE) ? expandBufGetLength(pReply) : kJDWPHeaderLen;
- Set4BE(replyBuf + 0, replyLength);
- Set4BE(replyBuf + 4, request->GetId());
- Set1(replyBuf + 8, kJDWPFlagReply);
- Set2BE(replyBuf + 9, result);
+ size_t replyLength = 0U;
+ if (result == ERR_NONE && IsInvokeCommand(request->GetCommandSet(), request->GetCommand())) {
+ // We successfully request an invoke in the event thread. It will send the reply once the
+ // invoke completes so we must not send it now.
+ *skip_reply = true;
+ } else {
+ /*
+ * Set up the reply header.
+ *
+ * If we encountered an error, only send the header back.
+ */
+ uint8_t* replyBuf = expandBufGetBuffer(pReply);
+ replyLength = (result == ERR_NONE) ? expandBufGetLength(pReply) : kJDWPHeaderLen;
+ Set4BE(replyBuf + kJDWPHeaderSizeOffset, replyLength);
+ Set4BE(replyBuf + kJDWPHeaderIdOffset, request->GetId());
+ Set1(replyBuf + kJDWPHeaderFlagsOffset, kJDWPFlagReply);
+ Set2BE(replyBuf + kJDWPHeaderErrorCodeOffset, result);
- CHECK_GT(expandBufGetLength(pReply), 0U) << GetCommandName(request) << " " << request->GetId();
+ CHECK_GT(expandBufGetLength(pReply), 0U) << GetCommandName(request) << " " << request->GetId();
- size_t respLen = expandBufGetLength(pReply) - kJDWPHeaderLen;
- VLOG(jdwp) << "REPLY: " << GetCommandName(request) << " " << result << " (length=" << respLen << ")";
- if (false) {
- VLOG(jdwp) << HexDump(expandBufGetBuffer(pReply) + kJDWPHeaderLen, respLen, false, "");
+ size_t respLen = expandBufGetLength(pReply) - kJDWPHeaderLen;
+ VLOG(jdwp) << "REPLY: " << GetCommandName(request) << " " << result << " (length=" << respLen << ")";
+ if (false) {
+ VLOG(jdwp) << HexDump(expandBufGetBuffer(pReply) + kJDWPHeaderLen, respLen, false, "");
+ }
}
VLOG(jdwp) << "----------";
diff --git a/runtime/jdwp/jdwp_main.cc b/runtime/jdwp/jdwp_main.cc
index e6b97a2..6bc5e27 100644
--- a/runtime/jdwp/jdwp_main.cc
+++ b/runtime/jdwp/jdwp_main.cc
@@ -395,8 +395,15 @@
JDWP::Request request(netStateBase->input_buffer_, netStateBase->input_count_);
ExpandBuf* pReply = expandBufAlloc();
- size_t replyLength = ProcessRequest(&request, pReply);
- ssize_t cc = netStateBase->WritePacket(pReply, replyLength);
+ bool skip_reply = false;
+ size_t replyLength = ProcessRequest(&request, pReply, &skip_reply);
+ ssize_t cc = 0;
+ if (!skip_reply) {
+ cc = netStateBase->WritePacket(pReply, replyLength);
+ } else {
+ DCHECK_EQ(replyLength, 0U);
+ }
+ expandBufFree(pReply);
/*
* We processed this request and sent its reply so we can release the JDWP token.
@@ -405,10 +412,8 @@
if (cc != static_cast<ssize_t>(replyLength)) {
PLOG(ERROR) << "Failed sending reply to debugger";
- expandBufFree(pReply);
return false;
}
- expandBufFree(pReply);
netStateBase->ConsumeBytes(request.GetLength());
{
MutexLock mu(self, shutdown_lock_);
diff --git a/runtime/jdwp/jdwp_priv.h b/runtime/jdwp/jdwp_priv.h
index f290be0..d58467d 100644
--- a/runtime/jdwp/jdwp_priv.h
+++ b/runtime/jdwp/jdwp_priv.h
@@ -29,15 +29,32 @@
/*
* JDWP constants.
*/
-#define kJDWPHeaderLen 11
-#define kJDWPFlagReply 0x80
+static constexpr size_t kJDWPHeaderSizeOffset = 0U;
+static constexpr size_t kJDWPHeaderIdOffset = 4U;
+static constexpr size_t kJDWPHeaderFlagsOffset = 8U;
+static constexpr size_t kJDWPHeaderErrorCodeOffset = 9U;
+static constexpr size_t kJDWPHeaderCmdSetOffset = 9U;
+static constexpr size_t kJDWPHeaderCmdOffset = 10U;
+static constexpr size_t kJDWPHeaderLen = 11U;
+static constexpr uint8_t kJDWPFlagReply = 0x80;
-#define kMagicHandshake "JDWP-Handshake"
-#define kMagicHandshakeLen (sizeof(kMagicHandshake)-1)
+static constexpr const char kMagicHandshake[] = "JDWP-Handshake";
+static constexpr size_t kMagicHandshakeLen = sizeof(kMagicHandshake) - 1;
+
+/* Invoke commands */
+static constexpr uint8_t kJDWPClassTypeCmdSet = 3U;
+static constexpr uint8_t kJDWPClassTypeInvokeMethodCmd = 3U;
+static constexpr uint8_t kJDWPClassTypeNewInstanceCmd = 4U;
+static constexpr uint8_t kJDWPObjectReferenceCmdSet = 9U;
+static constexpr uint8_t kJDWPObjectReferenceInvokeCmd = 6U;
+
+/* Event command */
+static constexpr uint8_t kJDWPEventCmdSet = 64U;
+static constexpr uint8_t kJDWPEventCompositeCmd = 100U;
/* DDM support */
-#define kJDWPDdmCmdSet 199 /* 0xc7, or 'G'+128 */
-#define kJDWPDdmCmd 1
+static constexpr uint8_t kJDWPDdmCmdSet = 199U; // 0xc7, or 'G'+128
+static constexpr uint8_t kJDWPDdmCmd = 1U;
namespace art {
diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc
index 189306d..2e335dc 100644
--- a/runtime/mem_map.cc
+++ b/runtime/mem_map.cc
@@ -436,7 +436,7 @@
return nullptr;
}
return new MemMap(name, reinterpret_cast<uint8_t*>(actual), byte_count, actual,
- page_aligned_byte_count, prot, false);
+ page_aligned_byte_count, prot, reuse);
}
MemMap* MemMap::MapFileAtAddress(uint8_t* expected_ptr, size_t byte_count, int prot, int flags,
diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc
index 94024ef..67dcc9c 100644
--- a/runtime/native/java_lang_Class.cc
+++ b/runtime/native/java_lang_Class.cc
@@ -380,8 +380,8 @@
static jobjectArray Class_getDeclaredMethodsUnchecked(JNIEnv* env, jobject javaThis,
jboolean publicOnly) {
ScopedFastNativeObjectAccess soa(env);
- StackHandleScope<3> hs(soa.Self());
- auto* klass = DecodeClass(soa, javaThis);
+ StackHandleScope<2> hs(soa.Self());
+ auto klass = hs.NewHandle(DecodeClass(soa, javaThis));
size_t num_methods = 0;
for (auto& m : klass->GetVirtualMethods(sizeof(void*))) {
auto modifiers = m.GetAccessFlags();
diff --git a/runtime/native/java_lang_Runtime.cc b/runtime/native/java_lang_Runtime.cc
index bd043a8..abac815 100644
--- a/runtime/native/java_lang_Runtime.cc
+++ b/runtime/native/java_lang_Runtime.cc
@@ -52,52 +52,29 @@
exit(status);
}
-static void SetLdLibraryPath(JNIEnv* env, jstring javaLdLibraryPathJstr, jstring javaDexPathJstr) {
+static void SetLdLibraryPath(JNIEnv* env, jstring javaLdLibraryPathJstr) {
#ifdef HAVE_ANDROID_OS
- std::stringstream ss;
if (javaLdLibraryPathJstr != nullptr) {
- ScopedUtfChars javaLdLibraryPath(env, javaLdLibraryPathJstr);
- if (javaLdLibraryPath.c_str() != nullptr) {
- ss << javaLdLibraryPath.c_str();
+ ScopedUtfChars ldLibraryPath(env, javaLdLibraryPathJstr);
+ if (ldLibraryPath.c_str() != nullptr) {
+ android_update_LD_LIBRARY_PATH(ldLibraryPath.c_str());
}
}
- if (javaDexPathJstr != nullptr) {
- ScopedUtfChars javaDexPath(env, javaDexPathJstr);
- if (javaDexPath.c_str() != nullptr) {
- std::vector<std::string> dexPathVector;
- Split(javaDexPath.c_str(), ':', &dexPathVector);
-
- for (auto abi : art::Runtime::Current()->GetCpuAbilist()) {
- for (auto zip_path : dexPathVector) {
- // Native libraries live under lib/<abi>/ inside .apk file.
- ss << ":" << zip_path << "!" << "lib/" << abi;
- }
- }
- }
- }
-
- std::string ldLibraryPathStr = ss.str();
- const char* ldLibraryPath = ldLibraryPathStr.c_str();
- if (*ldLibraryPath == ':') {
- ++ldLibraryPath;
- }
-
- android_update_LD_LIBRARY_PATH(ldLibraryPath);
#else
LOG(WARNING) << "android_update_LD_LIBRARY_PATH not found; .so dependencies will not work!";
- UNUSED(javaLdLibraryPathJstr, javaDexPathJstr, env);
+ UNUSED(javaLdLibraryPathJstr, env);
#endif
}
static jstring Runtime_nativeLoad(JNIEnv* env, jclass, jstring javaFilename, jobject javaLoader,
- jstring javaLdLibraryPathJstr, jstring javaDexPathJstr) {
+ jstring javaLdLibraryPathJstr) {
ScopedUtfChars filename(env, javaFilename);
if (filename.c_str() == nullptr) {
return nullptr;
}
- SetLdLibraryPath(env, javaLdLibraryPathJstr, javaDexPathJstr);
+ SetLdLibraryPath(env, javaLdLibraryPathJstr);
std::string error_msg;
{
@@ -130,7 +107,7 @@
NATIVE_METHOD(Runtime, gc, "()V"),
NATIVE_METHOD(Runtime, maxMemory, "!()J"),
NATIVE_METHOD(Runtime, nativeExit, "(I)V"),
- NATIVE_METHOD(Runtime, nativeLoad, "(Ljava/lang/String;Ljava/lang/ClassLoader;Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String;"),
+ NATIVE_METHOD(Runtime, nativeLoad, "(Ljava/lang/String;Ljava/lang/ClassLoader;Ljava/lang/String;)Ljava/lang/String;"),
NATIVE_METHOD(Runtime, totalMemory, "!()J"),
};
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index cb7f5f3..66ec7cc 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -646,6 +646,10 @@
// Create the thread pools.
heap_->CreateThreadPool();
+ // Reset the gc performance data at zygote fork so that the GCs
+ // before fork aren't attributed to an app.
+ heap_->ResetGcPerformanceInfo();
+
if (jit_.get() == nullptr && jit_options_->UseJIT()) {
// Create the JIT if the flag is set and we haven't already create it (happens for run-tests).
CreateJit();
diff --git a/runtime/stack.cc b/runtime/stack.cc
index 5aeca98..11c94db 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -126,10 +126,7 @@
if (IsInInlinedFrame()) {
size_t depth_in_stack_map = current_inlining_depth_ - 1;
InlineInfo inline_info = GetCurrentInlineInfo();
- uint32_t method_index = inline_info.GetMethodIndexAtDepth(depth_in_stack_map);
- InvokeType invoke_type =
- static_cast<InvokeType>(inline_info.GetInvokeTypeAtDepth(depth_in_stack_map));
- return GetResolvedMethod(*GetCurrentQuickFrame(), method_index, invoke_type);
+ return GetResolvedMethod(*GetCurrentQuickFrame(), inline_info, depth_in_stack_map);
} else {
return *cur_quick_frame_;
}
diff --git a/runtime/thread.cc b/runtime/thread.cc
index fe98b0a..fe8b0d8 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -2578,12 +2578,11 @@
}
void Thread::ClearDebugInvokeReq() {
- CHECK(Dbg::IsDebuggerActive());
CHECK(GetInvokeReq() != nullptr) << "Debug invoke req not active in thread " << *this;
CHECK(Thread::Current() == this) << "Debug invoke must be finished by the thread itself";
- // We do not own the DebugInvokeReq* so we must not delete it, it is the responsibility of
- // the owner (the JDWP thread).
+ DebugInvokeReq* req = tlsPtr_.debug_invoke_req;
tlsPtr_.debug_invoke_req = nullptr;
+ delete req;
}
void Thread::PushVerifier(verifier::MethodVerifier* verifier) {
diff --git a/runtime/thread.h b/runtime/thread.h
index 9311bef..0e71c08 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -781,15 +781,14 @@
void DeactivateSingleStepControl();
// Sets debug invoke request for debugging. When the thread is resumed,
- // it executes the method described by this request then suspends itself.
- // The thread does not take ownership of the given DebugInvokeReq*, it is
- // owned by the JDWP thread which is waiting for the execution of the
- // method.
+ // it executes the method described by this request then sends the reply
+ // before suspending itself. The thread takes the ownership of the given
+ // DebugInvokeReq*. It is deleted by a call to ClearDebugInvokeReq.
void SetDebugInvokeReq(DebugInvokeReq* req);
// Clears debug invoke request for debugging. When the thread completes
- // method invocation, it clears its debug invoke request, signals the
- // JDWP thread and suspends itself.
+ // method invocation, it deletes its debug invoke request and suspends
+ // itself.
void ClearDebugInvokeReq();
// Returns the fake exception used to activate deoptimization.
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index af9ba68..b697b43 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -875,31 +875,36 @@
// The debugger thread must not suspend itself due to debugger activity!
Thread* debug_thread = Dbg::GetDebugThread();
- CHECK(debug_thread != nullptr);
CHECK(self != debug_thread);
CHECK_NE(self->GetState(), kRunnable);
Locks::mutator_lock_->AssertNotHeld(self);
- {
+ // The debugger may have detached while we were executing an invoke request. In that case, we
+ // must not suspend ourself.
+ DebugInvokeReq* pReq = self->GetInvokeReq();
+ const bool skip_thread_suspension = (pReq != nullptr && !Dbg::IsDebuggerActive());
+ if (!skip_thread_suspension) {
// Collisions with other suspends aren't really interesting. We want
// to ensure that we're the only one fiddling with the suspend count
// though.
MutexLock mu(self, *Locks::thread_suspend_count_lock_);
self->ModifySuspendCount(self, +1, true);
CHECK_GT(self->GetSuspendCount(), 0);
+
+ VLOG(threads) << *self << " self-suspending (debugger)";
+ } else {
+ // We must no longer be subject to debugger suspension.
+ MutexLock mu(self, *Locks::thread_suspend_count_lock_);
+ CHECK_EQ(self->GetDebugSuspendCount(), 0) << "Debugger detached without resuming us";
+
+ VLOG(threads) << *self << " not self-suspending because debugger detached during invoke";
}
- VLOG(threads) << *self << " self-suspending (debugger)";
-
- // Tell JDWP we've completed invocation and are ready to suspend.
- DebugInvokeReq* const pReq = self->GetInvokeReq();
+ // If the debugger requested an invoke, we need to send the reply and clear the request.
if (pReq != nullptr) {
- // Clear debug invoke request before signaling.
+ Dbg::FinishInvokeMethod(pReq);
self->ClearDebugInvokeReq();
-
- VLOG(jdwp) << "invoke complete, signaling";
- MutexLock mu(self, pReq->lock);
- pReq->cond.Signal(self);
+ pReq = nullptr; // object has been deleted, clear it for safety.
}
// Tell JDWP that we've completed suspension. The JDWP thread can't
diff --git a/runtime/well_known_classes.cc b/runtime/well_known_classes.cc
index e7857a0..0c7cce9 100644
--- a/runtime/well_known_classes.cc
+++ b/runtime/well_known_classes.cc
@@ -369,7 +369,7 @@
void WellKnownClasses::LateInit(JNIEnv* env) {
ScopedLocalRef<jclass> java_lang_Runtime(env, env->FindClass("java/lang/Runtime"));
- java_lang_Runtime_nativeLoad = CacheMethod(env, java_lang_Runtime.get(), true, "nativeLoad", "(Ljava/lang/String;Ljava/lang/ClassLoader;Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String;");
+ java_lang_Runtime_nativeLoad = CacheMethod(env, java_lang_Runtime.get(), true, "nativeLoad", "(Ljava/lang/String;Ljava/lang/ClassLoader;Ljava/lang/String;)Ljava/lang/String;");
}
mirror::Class* WellKnownClasses::ToClass(jclass global_jclass) {
diff --git a/test/098-ddmc/src/Main.java b/test/098-ddmc/src/Main.java
index f74a17e..4914ba2 100644
--- a/test/098-ddmc/src/Main.java
+++ b/test/098-ddmc/src/Main.java
@@ -43,7 +43,9 @@
System.out.println("Confirm when we overflow, we don't roll over to zero. b/17392248");
final int overflowAllocations = 64 * 1024; // Won't fit in unsigned 16-bit value.
- // Keep the new objects live so they are not garbage collected.
+ // TODO: Temporary fix. Keep the new objects live so they are not garbage collected.
+ // This will cause OOM exception for GC stress tests. The root cause is changed behaviour of
+ // getRecentAllocations(). Working on restoring its old behaviour. b/20037135
Object[] objects = new Object[overflowAllocations];
for (int i = 0; i < overflowAllocations; i++) {
objects[i] = new Object();
@@ -53,6 +55,12 @@
System.out.println("after > before=" + (after.numberOfEntries > before.numberOfEntries));
System.out.println("after.numberOfEntries=" + after.numberOfEntries);
+ // TODO: Temporary fix as above. b/20037135
+ objects = null;
+ Runtime.getRuntime().gc();
+ final int fillerStrings = 16 * 1024;
+ String[] strings = new String[fillerStrings];
+
System.out.println("Disable and confirm back to empty");
DdmVmInternal.enableRecentAllocations(false);
System.out.println("status=" + DdmVmInternal.getRecentAllocationStatus());
@@ -68,8 +76,8 @@
System.out.println("Confirm we can reenable twice in a row without losing allocations");
DdmVmInternal.enableRecentAllocations(true);
System.out.println("status=" + DdmVmInternal.getRecentAllocationStatus());
- for (int i = 0; i < 16 * 1024; i++) {
- objects[i] = new String("fnord");
+ for (int i = 0; i < fillerStrings; i++) {
+ strings[i] = new String("fnord");
}
Allocations first = new Allocations(DdmVmInternal.getRecentAllocations());
DdmVmInternal.enableRecentAllocations(true);
diff --git a/test/137-cfi/cfi.cc b/test/137-cfi/cfi.cc
index 83f7711..601fbaa 100644
--- a/test/137-cfi/cfi.cc
+++ b/test/137-cfi/cfi.cc
@@ -29,6 +29,9 @@
#include "base/logging.h"
#include "base/macros.h"
+#include "gc/heap.h"
+#include "gc/space/image_space.h"
+#include "oat_file.h"
#include "utils.h"
namespace art {
@@ -84,8 +87,26 @@
}
#endif
+// Currently we have to fall back to our own loader for the boot image when it's compiled PIC
+// because its base is zero. Thus in-process unwinding through it won't work. This is a helper
+// detecting this.
+#if __linux__
+static bool IsPicImage() {
+ gc::space::ImageSpace* image_space = Runtime::Current()->GetHeap()->GetImageSpace();
+ CHECK(image_space != nullptr); // We should be running with an image.
+ const OatFile* oat_file = image_space->GetOatFile();
+ CHECK(oat_file != nullptr); // We should have an oat file to go with the image.
+ return oat_file->IsPic();
+}
+#endif
+
extern "C" JNIEXPORT jboolean JNICALL Java_Main_unwindInProcess(JNIEnv*, jobject, jint, jboolean) {
#if __linux__
+ if (IsPicImage()) {
+ LOG(INFO) << "Image is pic, in-process unwinding check bypassed.";
+ return JNI_TRUE;
+ }
+
// TODO: What to do on Valgrind?
std::unique_ptr<Backtrace> bt(Backtrace::Create(BACKTRACE_CURRENT_PROCESS, GetTid()));
diff --git a/test/450-checker-types/src/Main.java b/test/450-checker-types/src/Main.java
index 4056275..9070627 100644
--- a/test/450-checker-types/src/Main.java
+++ b/test/450-checker-types/src/Main.java
@@ -364,6 +364,37 @@
((SubclassA)b).$noinline$g();
}
+ public SubclassA $noinline$getSubclass() { throw new RuntimeException(); }
+
+ /// CHECK-START: void Main.testArraySimpleRemove() instruction_simplifier_after_types (before)
+ /// CHECK: CheckCast
+
+ /// CHECK-START: void Main.testArraySimpleRemove() instruction_simplifier_after_types (after)
+ /// CHECK-NOT: CheckCast
+ public void testArraySimpleRemove() {
+ Super[] b = new SubclassA[10];
+ SubclassA[] c = (SubclassA[])b;
+ }
+
+ /// CHECK-START: void Main.testInvokeSimpleRemove() instruction_simplifier_after_types (before)
+ /// CHECK: CheckCast
+
+ /// CHECK-START: void Main.testInvokeSimpleRemove() instruction_simplifier_after_types (after)
+ /// CHECK-NOT: CheckCast
+ public void testInvokeSimpleRemove() {
+ Super b = $noinline$getSubclass();
+ ((SubclassA)b).$noinline$g();
+ }
+ /// CHECK-START: void Main.testArrayGetSimpleRemove() instruction_simplifier_after_types (before)
+ /// CHECK: CheckCast
+
+ /// CHECK-START: void Main.testArrayGetSimpleRemove() instruction_simplifier_after_types (after)
+ /// CHECK-NOT: CheckCast
+ public void testArrayGetSimpleRemove() {
+ Super[] a = new SubclassA[10];
+ ((SubclassA)a[0]).$noinline$g();
+ }
+
public static void main(String[] args) {
}
}
diff --git a/test/482-checker-loop-back-edge-use/src/Main.java b/test/482-checker-loop-back-edge-use/src/Main.java
index 5754723..a4280de 100644
--- a/test/482-checker-loop-back-edge-use/src/Main.java
+++ b/test/482-checker-loop-back-edge-use/src/Main.java
@@ -36,8 +36,8 @@
}
/// CHECK-START: void Main.loop3(boolean) liveness (after)
- /// CHECK: ParameterValue liveness:4 ranges:{[4,62)} uses:[58,62]
- /// CHECK: Goto liveness:60
+ /// CHECK: ParameterValue liveness:4 ranges:{[4,64)} uses:[60,64]
+ /// CHECK: Goto liveness:62
/// CHECK-START: void Main.loop3(boolean) liveness (after)
/// CHECK-NOT: Goto liveness:56
@@ -63,9 +63,9 @@
}
/// CHECK-START: void Main.loop5(boolean) liveness (after)
- /// CHECK: ParameterValue liveness:4 ranges:{[4,52)} uses:[35,44,48,52]
- /// CHECK: Goto liveness:46
- /// CHECK: Goto liveness:50
+ /// CHECK: ParameterValue liveness:4 ranges:{[4,54)} uses:[37,46,50,54]
+ /// CHECK: Goto liveness:48
+ /// CHECK: Goto liveness:52
public static void loop5(boolean incoming) {
// 'incoming' must have a use at both back edges.
while (Runtime.getRuntime() != null) {
@@ -76,8 +76,8 @@
}
/// CHECK-START: void Main.loop6(boolean) liveness (after)
- /// CHECK: ParameterValue liveness:4 ranges:{[4,48)} uses:[26,48]
- /// CHECK: Goto liveness:46
+ /// CHECK: ParameterValue liveness:4 ranges:{[4,50)} uses:[26,50]
+ /// CHECK: Goto liveness:48
/// CHECK-START: void Main.loop6(boolean) liveness (after)
/// CHECK-NOT: Goto liveness:24
@@ -90,9 +90,9 @@
}
/// CHECK-START: void Main.loop7(boolean) liveness (after)
- /// CHECK: ParameterValue liveness:4 ranges:{[4,52)} uses:[34,43,48,52]
- /// CHECK: Goto liveness:46
- /// CHECK: Goto liveness:50
+ /// CHECK: ParameterValue liveness:4 ranges:{[4,54)} uses:[36,45,50,54]
+ /// CHECK: Goto liveness:48
+ /// CHECK: Goto liveness:52
public static void loop7(boolean incoming) {
// 'incoming' must have a use at both back edges.
while (Runtime.getRuntime() != null) {
@@ -102,9 +102,9 @@
}
/// CHECK-START: void Main.loop8() liveness (after)
- /// CHECK: StaticFieldGet liveness:14 ranges:{[14,46)} uses:[37,42,46]
- /// CHECK: Goto liveness:40
- /// CHECK: Goto liveness:44
+ /// CHECK: StaticFieldGet liveness:14 ranges:{[14,48)} uses:[39,44,48]
+ /// CHECK: Goto liveness:42
+ /// CHECK: Goto liveness:46
public static void loop8() {
// 'incoming' must have a use at both back edges.
boolean incoming = field;
@@ -114,8 +114,8 @@
}
/// CHECK-START: void Main.loop9() liveness (after)
- /// CHECK: StaticFieldGet liveness:24 ranges:{[24,38)} uses:[33,38]
- /// CHECK: Goto liveness:40
+ /// CHECK: StaticFieldGet liveness:26 ranges:{[26,40)} uses:[35,40]
+ /// CHECK: Goto liveness:42
public static void loop9() {
while (Runtime.getRuntime() != null) {
// 'incoming' must only have a use in the inner loop.
diff --git a/test/496-checker-inlining-and-class-loader/expected.txt b/test/496-checker-inlining-and-class-loader/expected.txt
new file mode 100644
index 0000000..c6fcb51
--- /dev/null
+++ b/test/496-checker-inlining-and-class-loader/expected.txt
@@ -0,0 +1,4 @@
+Request for LoadedByMyClassLoader
+Request for Main
+In between the two calls.
+In $noinline$bar
diff --git a/test/496-checker-inlining-and-class-loader/info.txt b/test/496-checker-inlining-and-class-loader/info.txt
new file mode 100644
index 0000000..aa4b256
--- /dev/null
+++ b/test/496-checker-inlining-and-class-loader/info.txt
@@ -0,0 +1,2 @@
+Regression test to ensure compilers preserve JLS
+semantics of class loading.
diff --git a/test/496-checker-inlining-and-class-loader/src/Main.java b/test/496-checker-inlining-and-class-loader/src/Main.java
new file mode 100644
index 0000000..f6d0b41
--- /dev/null
+++ b/test/496-checker-inlining-and-class-loader/src/Main.java
@@ -0,0 +1,125 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Field;
+import java.lang.reflect.Method;
+import java.util.List;
+
+class MyClassLoader extends ClassLoader {
+ MyClassLoader() throws Exception {
+ super(MyClassLoader.class.getClassLoader());
+
+ // Some magic to get access to the pathList field of BaseDexClassLoader.
+ ClassLoader loader = getClass().getClassLoader();
+ Class<?> baseDexClassLoader = loader.getClass().getSuperclass();
+ Field f = baseDexClassLoader.getDeclaredField("pathList");
+ f.setAccessible(true);
+ Object pathList = f.get(loader);
+
+ // Some magic to get access to the dexField field of pathList.
+ f = pathList.getClass().getDeclaredField("dexElements");
+ f.setAccessible(true);
+ dexElements = (Object[]) f.get(pathList);
+ dexFileField = dexElements[0].getClass().getDeclaredField("dexFile");
+ dexFileField.setAccessible(true);
+ }
+
+ Object[] dexElements;
+ Field dexFileField;
+
+ protected Class<?> loadClass(String className, boolean resolve) throws ClassNotFoundException {
+ System.out.println("Request for " + className);
+
+ // We're only going to handle LoadedByMyClassLoader.
+ if (className != "LoadedByMyClassLoader") {
+ return getParent().loadClass(className);
+ }
+
+ // Mimic what DexPathList.findClass is doing.
+ try {
+ for (Object element : dexElements) {
+ Object dex = dexFileField.get(element);
+ Method method = dex.getClass().getDeclaredMethod(
+ "loadClassBinaryName", String.class, ClassLoader.class, List.class);
+
+ if (dex != null) {
+ Class clazz = (Class)method.invoke(dex, className, this, null);
+ if (clazz != null) {
+ return clazz;
+ }
+ }
+ }
+ } catch (Exception e) { /* Ignore */ }
+ return null;
+ }
+}
+
+class LoadedByMyClassLoader {
+ /// CHECK-START: void LoadedByMyClassLoader.bar() inliner (before)
+ /// CHECK: LoadClass
+ /// CHECK-NEXT: ClinitCheck
+ /// CHECK-NEXT: InvokeStaticOrDirect
+ /// CHECK-NEXT: LoadClass
+ /// CHECK-NEXT: ClinitCheck
+ /// CHECK-NEXT: StaticFieldGet
+ /// CHECK-NEXT: LoadString
+ /// CHECK-NEXT: NullCheck
+ /// CHECK-NEXT: InvokeVirtual
+
+ /// CHECK-START: void LoadedByMyClassLoader.bar() inliner (after)
+ /// CHECK: LoadClass
+ /// CHECK-NEXT: ClinitCheck
+ /* We inlined Main.$inline$bar */
+ /// CHECK-NEXT: LoadClass
+ /// CHECK-NEXT: ClinitCheck
+ /// CHECK-NEXT: StaticFieldGet
+ /// CHECK-NEXT: LoadString
+ /// CHECK-NEXT: NullCheck
+ /// CHECK-NEXT: InvokeVirtual
+
+ /// CHECK-START: void LoadedByMyClassLoader.bar() register (before)
+ /* Load and initialize Main */
+ /// CHECK: LoadClass gen_clinit_check:true
+ /* Load and initialize System */
+ /// CHECK-NEXT: LoadClass gen_clinit_check:true
+ /// CHECK-NEXT: StaticFieldGet
+ /// CHECK-NEXT: LoadString
+ /// CHECK-NEXT: NullCheck
+ /// CHECK-NEXT: InvokeVirtual
+ public static void bar() {
+ Main.$inline$bar();
+ System.out.println("In between the two calls.");
+ Main.$noinline$bar();
+ }
+}
+
+class Main {
+ public static void main(String[] args) throws Exception {
+ MyClassLoader o = new MyClassLoader();
+ Class foo = o.loadClass("LoadedByMyClassLoader");
+ Method m = foo.getDeclaredMethod("bar");
+ m.invoke(null);
+ }
+
+ public static void $inline$bar() {
+ }
+
+ public static void $noinline$bar() {
+ try {
+ System.out.println("In $noinline$bar");
+ } catch (Throwable t) { /* Ignore */ }
+ }
+}
diff --git a/test/497-inlining-and-class-loader/clear_dex_cache.cc b/test/497-inlining-and-class-loader/clear_dex_cache.cc
new file mode 100644
index 0000000..f9b33a2
--- /dev/null
+++ b/test/497-inlining-and-class-loader/clear_dex_cache.cc
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "art_method-inl.h"
+#include "jni.h"
+#include "scoped_thread_state_change.h"
+#include "stack.h"
+#include "thread.h"
+
+namespace art {
+
+namespace {
+
+extern "C" JNIEXPORT jobject JNICALL Java_Main_cloneResolvedMethods(JNIEnv*, jclass, jclass cls) {
+ ScopedObjectAccess soa(Thread::Current());
+ return soa.Vm()->AddGlobalRef(
+ soa.Self(),
+ soa.Decode<mirror::Class*>(cls)->GetDexCache()->GetResolvedMethods()->Clone(soa.Self()));
+}
+
+extern "C" JNIEXPORT void JNICALL Java_Main_restoreResolvedMethods(
+ JNIEnv*, jclass, jclass cls, jobject old_cache) {
+ ScopedObjectAccess soa(Thread::Current());
+ mirror::PointerArray* now = soa.Decode<mirror::Class*>(cls)->GetDexCache()->GetResolvedMethods();
+ mirror::PointerArray* old = soa.Decode<mirror::PointerArray*>(old_cache);
+ for (size_t i = 0, e = old->GetLength(); i < e; ++i) {
+ now->SetElementPtrSize(i, old->GetElementPtrSize<void*>(i, sizeof(void*)), sizeof(void*));
+ }
+}
+
+} // namespace
+
+} // namespace art
diff --git a/test/497-inlining-and-class-loader/expected.txt b/test/497-inlining-and-class-loader/expected.txt
new file mode 100644
index 0000000..3e1d85e
--- /dev/null
+++ b/test/497-inlining-and-class-loader/expected.txt
@@ -0,0 +1,7 @@
+java.lang.Exception
+ at Main.$noinline$bar(Main.java:127)
+ at Level2.$inline$bar(Level1.java:25)
+ at Level1.$inline$bar(Level1.java:19)
+ at LoadedByMyClassLoader.bar(Main.java:82)
+ at java.lang.reflect.Method.invoke(Native Method)
+ at Main.main(Main.java:101)
diff --git a/test/497-inlining-and-class-loader/info.txt b/test/497-inlining-and-class-loader/info.txt
new file mode 100644
index 0000000..e7f02aa
--- /dev/null
+++ b/test/497-inlining-and-class-loader/info.txt
@@ -0,0 +1,2 @@
+Regression test for optimizing to ensure it is using
+the correct class loader when walking inlined frames.
diff --git a/test/497-inlining-and-class-loader/src/Level1.java b/test/497-inlining-and-class-loader/src/Level1.java
new file mode 100644
index 0000000..977af83
--- /dev/null
+++ b/test/497-inlining-and-class-loader/src/Level1.java
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Level1 {
+ public static void $inline$bar() {
+ Level2.$inline$bar();
+ }
+}
+
+class Level2 {
+ public static void $inline$bar() {
+ Main.$noinline$bar();
+ }
+}
diff --git a/test/497-inlining-and-class-loader/src/Main.java b/test/497-inlining-and-class-loader/src/Main.java
new file mode 100644
index 0000000..0f7eb59
--- /dev/null
+++ b/test/497-inlining-and-class-loader/src/Main.java
@@ -0,0 +1,133 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Field;
+import java.lang.reflect.Method;
+import java.util.List;
+
+class MyClassLoader extends ClassLoader {
+ MyClassLoader() throws Exception {
+ super(MyClassLoader.class.getClassLoader());
+
+ // Some magic to get access to the pathList field of BaseDexClassLoader.
+ ClassLoader loader = getClass().getClassLoader();
+ Class<?> baseDexClassLoader = loader.getClass().getSuperclass();
+ Field f = baseDexClassLoader.getDeclaredField("pathList");
+ f.setAccessible(true);
+ Object pathList = f.get(loader);
+
+ // Some magic to get access to the dexField field of pathList.
+ f = pathList.getClass().getDeclaredField("dexElements");
+ f.setAccessible(true);
+ dexElements = (Object[]) f.get(pathList);
+ dexFileField = dexElements[0].getClass().getDeclaredField("dexFile");
+ dexFileField.setAccessible(true);
+ }
+
+ Object[] dexElements;
+ Field dexFileField;
+
+ static ClassLoader level1ClassLoader;
+
+ protected Class<?> loadClass(String className, boolean resolve) throws ClassNotFoundException {
+ if (this != level1ClassLoader) {
+ if (className.equals("Level1")) {
+ return level1ClassLoader.loadClass(className);
+ } else if (className.equals("Level2")) {
+ throw new ClassNotFoundException("None of my methods require Level2!");
+ } else if (!className.equals("LoadedByMyClassLoader")) {
+ // We're only going to handle LoadedByMyClassLoader.
+ return getParent().loadClass(className);
+ }
+ } else {
+ if (className != "Level1" && className != "Level2") {
+ return getParent().loadClass(className);
+ }
+ }
+
+ // Mimic what DexPathList.findClass is doing.
+ try {
+ for (Object element : dexElements) {
+ Object dex = dexFileField.get(element);
+ Method method = dex.getClass().getDeclaredMethod(
+ "loadClassBinaryName", String.class, ClassLoader.class, List.class);
+
+ if (dex != null) {
+ Class clazz = (Class)method.invoke(dex, className, this, null);
+ if (clazz != null) {
+ return clazz;
+ }
+ }
+ }
+ } catch (Exception e) { /* Ignore */ }
+ return null;
+ }
+}
+
+class LoadedByMyClassLoader {
+ public static void bar() {
+ Level1.$inline$bar();
+ }
+}
+
+class Main {
+ static {
+ System.loadLibrary("arttest");
+ }
+
+ public static void main(String[] args) throws Exception {
+ // Clone resolved methods, to restore the original version just
+ // before we walk the stack in $noinline$bar.
+ savedResolvedMethods = cloneResolvedMethods(Main.class);
+
+ MyClassLoader o = new MyClassLoader();
+ MyClassLoader.level1ClassLoader = new MyClassLoader();
+ Class foo = o.loadClass("LoadedByMyClassLoader");
+ Method m = foo.getDeclaredMethod("bar");
+ try {
+ m.invoke(null);
+ } catch (Error e) { /* Ignore */ }
+ }
+
+ public static void $inline$bar() {
+ }
+
+ public static void $noinline$bar() {
+ try {
+ // Be evil and clear all dex cache entries.
+ Field f = Class.class.getDeclaredField("dexCache");
+ f.setAccessible(true);
+ Object dexCache = f.get(Main.class);
+ f = dexCache.getClass().getDeclaredField("resolvedTypes");
+ f.setAccessible(true);
+ Object[] array = (Object[]) f.get(dexCache);
+ for (int i = 0; i < array.length; i++) {
+ array[i] = null;
+ }
+ restoreResolvedMethods(Main.class, savedResolvedMethods);
+ } catch (Throwable t) { /* Ignore */ }
+
+ // This will walk the stack, trying to resolve methods in it.
+ // Because we cleared dex cache entries, we will have to find
+ // classes again, which require to use the correct class loader
+ // in the presence of inlining.
+ new Exception().printStackTrace();
+ }
+ static Object savedResolvedMethods;
+
+ static native Object cloneResolvedMethods(Class<?> cls);
+ static native void restoreResolvedMethods(Class<?> cls, Object saved);
+}
diff --git a/test/498-type-propagation/expected.txt b/test/498-type-propagation/expected.txt
new file mode 100644
index 0000000..ccaf6f8
--- /dev/null
+++ b/test/498-type-propagation/expected.txt
@@ -0,0 +1 @@
+Enter
diff --git a/test/498-type-propagation/info.txt b/test/498-type-propagation/info.txt
new file mode 100644
index 0000000..b895e91
--- /dev/null
+++ b/test/498-type-propagation/info.txt
@@ -0,0 +1,2 @@
+Regression test for the SSA building of the optimizing
+compiler. See comment in smali file.
diff --git a/test/498-type-propagation/smali/TypePropagation.smali b/test/498-type-propagation/smali/TypePropagation.smali
new file mode 100644
index 0000000..088ca89
--- /dev/null
+++ b/test/498-type-propagation/smali/TypePropagation.smali
@@ -0,0 +1,30 @@
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LTypePropagation;
+
+.super Ljava/lang/Object;
+
+.method public static method([I)V
+ .registers 2
+ const/4 v0, 0
+ # When building the SSA graph, we will create a phi for v0, which will be of type
+ # integer. Only when we get rid of that phi in the redundant phi elimination will
+ # we realize it's just null.
+ :start
+ if-eq v1, v0, :end
+ if-eq v1, v0, :start
+ :end
+ return-void
+.end method
diff --git a/test/498-type-propagation/src/Main.java b/test/498-type-propagation/src/Main.java
new file mode 100644
index 0000000..7a14172
--- /dev/null
+++ b/test/498-type-propagation/src/Main.java
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Method;
+
+public class Main {
+ public static void main(String[] args) throws Exception {
+ // Workaround for b/18051191.
+ System.out.println("Enter");
+ Class<?> c = Class.forName("TypePropagation");
+ Method m = c.getMethod("method", int[].class);
+ int[] array = new int[7];
+ Object[] arguments = { array };
+ m.invoke(null, arguments);
+ }
+}
diff --git a/test/Android.libarttest.mk b/test/Android.libarttest.mk
index 847ad0d..fcb9f8a 100644
--- a/test/Android.libarttest.mk
+++ b/test/Android.libarttest.mk
@@ -34,7 +34,8 @@
455-set-vreg/set_vreg_jni.cc \
457-regs/regs_jni.cc \
461-get-reference-vreg/get_reference_vreg_jni.cc \
- 466-get-live-vreg/get_live_vreg_jni.cc
+ 466-get-live-vreg/get_live_vreg_jni.cc \
+ 497-inlining-and-class-loader/clear_dex_cache.cc
ART_TARGET_LIBARTTEST_$(ART_PHONY_TEST_TARGET_SUFFIX) += $(ART_TARGET_TEST_OUT)/$(TARGET_ARCH)/libarttest.so
ifdef TARGET_2ND_ARCH
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index e95f147..469df1f 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -229,16 +229,10 @@
TEST_ART_BROKEN_NO_RELOCATE_TESTS :=
-# Tests that are broken with GC stress.
-TEST_ART_BROKEN_GCSTRESS_RUN_TESTS :=
-
-ifneq (,$(filter gcstress,$(GC_TYPES)))
- ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
- $(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),gcstress,$(JNI_TYPES), \
- $(IMAGE_TYPES), $(PICTEST_TYPES), $(DBEUGGABLE_TYPES), $(TEST_ART_BROKEN_GCSTRESS_RUN_TESTS), $(ALL_ADDRESS_SIZES))
-endif
-
-TEST_ART_BROKEN_GCSTRESS_RUN_TESTS :=
+# 098-ddmc is broken until we restore the old behavior of getRecentAllocation() of DDMS. b/20037135
+ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
+ $(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
+ $(IMAGE_TYPES), $(PICTEST_TYPES), $(DEBUGGABLE_TYPES), 098-ddmc, $(ALL_ADDRESS_SIZES))
# 115-native-bridge setup is complicated. Need to implement it correctly for the target.
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,target,$(RUN_TYPES),$(PREBUILD_TYPES),$(COMPILER_TYPES), \
@@ -344,6 +338,7 @@
457-regs \
461-get-reference-vreg \
466-get-live-vreg \
+ 497-inlining-and-class-loader \
ifneq (,$(filter ndebug,$(RUN_TYPES)))
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),ndebug,$(PREBUILD_TYPES), \
@@ -382,7 +377,8 @@
# Known broken tests for the default compiler (Quick).
TEST_ART_BROKEN_DEFAULT_RUN_TESTS := \
- 457-regs
+ 457-regs \
+ 496-checker-inlining-and-class-loader
ifneq (,$(filter default,$(COMPILER_TYPES)))
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
diff --git a/tools/buildbot-build.sh b/tools/buildbot-build.sh
index 77e6b1a..62fd67b 100755
--- a/tools/buildbot-build.sh
+++ b/tools/buildbot-build.sh
@@ -60,7 +60,7 @@
done
if [[ $mode == "host" ]]; then
- make_command="make $j_arg build-art-host-tests $common_targets"
+ make_command="make $j_arg build-art-host-tests $common_targets out/host/linux-x86/lib/libjavacoretests.so out/host/linux-x86/lib64/libjavacoretests.so"
echo "Executing $make_command"
$make_command
elif [[ $mode == "target" ]]; then
@@ -70,7 +70,7 @@
# Use '-e' to force the override of TARGET_GLOBAL_LDFLAGS.
# Also, we build extra tools that will be used by tests, so that
# they are compiled with our own linker.
- make_command="make -e $j_arg build-art-target-tests $common_targets libjavacrypto linker toybox toolbox sh"
+ make_command="make -e $j_arg build-art-target-tests $common_targets libjavacrypto libjavacoretests linker toybox toolbox sh"
echo "Executing env $env $make_command"
env $env $make_command
fi
diff --git a/tools/checker/match/line.py b/tools/checker/match/line.py
index 711d814..ce11e2a 100644
--- a/tools/checker/match/line.py
+++ b/tools/checker/match/line.py
@@ -41,7 +41,7 @@
if expression.name in variables:
pattern = re.escape(variables[expression.name])
else:
- Logger.testFailed("Multiple definitions of variable \"{}\"".format(expression.name),
+ Logger.testFailed("Missing definition of variable \"{}\"".format(expression.name),
pos.fileName, pos.lineNo)
else:
pattern = expression.pattern
diff --git a/tools/run-libcore-tests.sh b/tools/run-libcore-tests.sh
index 344d2ded..4e76eb4 100755
--- a/tools/run-libcore-tests.sh
+++ b/tools/run-libcore-tests.sh
@@ -33,7 +33,8 @@
fi
# Packages that currently work correctly with the expectation files.
-working_packages=("libcore.icu"
+working_packages=("dalvik.system"
+ "libcore.icu"
"libcore.io"
"libcore.java.lang"
"libcore.java.math"