Merge "Revert "Remove DCHECK that does not work for gtests.""
diff --git a/runtime/base/allocator.h b/runtime/base/allocator.h
index ad255b8..969f5b9 100644
--- a/runtime/base/allocator.h
+++ b/runtime/base/allocator.h
@@ -22,6 +22,7 @@
#include <unordered_map>
#include "atomic.h"
+#include "base/hash_map.h"
#include "base/macros.h"
#include "base/mutex.h"
#include "base/type_static_if.h"
@@ -170,6 +171,14 @@
using AllocationTrackingUnorderedMap = std::unordered_map<
Key, T, Hash, Pred, TrackingAllocator<std::pair<const Key, T>, kTag>>;
+template<class Key,
+ class T,
+ class EmptyFn,
+ AllocatorTag kTag,
+ class Hash = std::hash<Key>,
+ class Pred = std::equal_to<Key>>
+using AllocationTrackingHashMap = HashMap<
+ Key, T, EmptyFn, Hash, Pred, TrackingAllocator<std::pair<Key, T>, kTag>>;
} // namespace art
#endif // ART_RUNTIME_BASE_ALLOCATOR_H_
diff --git a/runtime/dex_file_verifier.cc b/runtime/dex_file_verifier.cc
index a5f9d09..440d696 100644
--- a/runtime/dex_file_verifier.cc
+++ b/runtime/dex_file_verifier.cc
@@ -1416,7 +1416,12 @@
}
if (IsDataSectionType(type)) {
- offset_to_type_map_.Put(aligned_offset, type);
+ if (aligned_offset == 0u) {
+ ErrorStringPrintf("Item %d offset is 0", i);
+ return false;
+ }
+ DCHECK(offset_to_type_map_.Find(aligned_offset) == offset_to_type_map_.end());
+ offset_to_type_map_.Insert(std::pair<uint32_t, uint16_t>(aligned_offset, type));
}
aligned_offset = ptr_ - begin_;
@@ -1589,7 +1594,8 @@
}
bool DexFileVerifier::CheckOffsetToTypeMap(size_t offset, uint16_t type) {
- auto it = offset_to_type_map_.find(offset);
+ DCHECK_NE(offset, 0u);
+ auto it = offset_to_type_map_.Find(offset);
if (UNLIKELY(it == offset_to_type_map_.end())) {
ErrorStringPrintf("No data map entry found @ %zx; expected %x", offset, type);
return false;
diff --git a/runtime/dex_file_verifier.h b/runtime/dex_file_verifier.h
index 4f15357..6c63749 100644
--- a/runtime/dex_file_verifier.h
+++ b/runtime/dex_file_verifier.h
@@ -175,7 +175,35 @@
const char* const location_;
const DexFile::Header* const header_;
- AllocationTrackingSafeMap<uint32_t, uint16_t, kAllocatorTagDexFileVerifier> offset_to_type_map_;
+ struct OffsetTypeMapEmptyFn {
+ // Make a hash map slot empty by making the offset 0. Offset 0 is a valid dex file offset that
+ // is in the offset of the dex file header. However, we only store data section items in the
+ // map, and these are after the header.
+ void MakeEmpty(std::pair<uint32_t, uint16_t>& pair) const {
+ pair.first = 0u;
+ }
+ // Check if a hash map slot is empty.
+ bool IsEmpty(const std::pair<uint32_t, uint16_t>& pair) const {
+ return pair.first == 0;
+ }
+ };
+ struct OffsetTypeMapHashCompareFn {
+ // Hash function for offset.
+ size_t operator()(const uint32_t key) const {
+ return key;
+ }
+ // std::equal function for offset.
+ bool operator()(const uint32_t a, const uint32_t b) const {
+ return a == b;
+ }
+ };
+ // Map from offset to dex file type, HashMap for performance reasons.
+ AllocationTrackingHashMap<uint32_t,
+ uint16_t,
+ OffsetTypeMapEmptyFn,
+ kAllocatorTagDexFileVerifier,
+ OffsetTypeMapHashCompareFn,
+ OffsetTypeMapHashCompareFn> offset_to_type_map_;
const uint8_t* ptr_;
const void* previous_item_;
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index d2d12af..e433b8d 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -36,13 +36,16 @@
namespace gc {
namespace collector {
+static constexpr size_t kDefaultGcMarkStackSize = 2 * MB;
+
ConcurrentCopying::ConcurrentCopying(Heap* heap, const std::string& name_prefix)
: GarbageCollector(heap,
name_prefix + (name_prefix.empty() ? "" : " ") +
"concurrent copying + mark sweep"),
region_space_(nullptr), gc_barrier_(new Barrier(0)),
gc_mark_stack_(accounting::ObjectStack::Create("concurrent copying gc mark stack",
- 2 * MB, 2 * MB)),
+ kDefaultGcMarkStackSize,
+ kDefaultGcMarkStackSize)),
mark_stack_lock_("concurrent copying mark stack lock", kMarkSweepMarkStackLock),
thread_running_gc_(nullptr),
is_marking_(false), is_active_(false), is_asserting_to_space_invariant_(false),
@@ -577,6 +580,18 @@
Locks::mutator_lock_->SharedLock(self);
}
+void ConcurrentCopying::ExpandGcMarkStack() {
+ DCHECK(gc_mark_stack_->IsFull());
+ const size_t new_size = gc_mark_stack_->Capacity() * 2;
+ std::vector<StackReference<mirror::Object>> temp(gc_mark_stack_->Begin(),
+ gc_mark_stack_->End());
+ gc_mark_stack_->Resize(new_size);
+ for (auto& ref : temp) {
+ gc_mark_stack_->PushBack(ref.AsMirrorPtr());
+ }
+ DCHECK(!gc_mark_stack_->IsFull());
+}
+
void ConcurrentCopying::PushOntoMarkStack(mirror::Object* to_ref) {
CHECK_EQ(is_mark_stack_push_disallowed_.LoadRelaxed(), 0)
<< " " << to_ref << " " << PrettyTypeOf(to_ref);
@@ -587,7 +602,9 @@
if (self == thread_running_gc_) {
// If GC-running thread, use the GC mark stack instead of a thread-local mark stack.
CHECK(self->GetThreadLocalMarkStack() == nullptr);
- CHECK(!gc_mark_stack_->IsFull());
+ if (UNLIKELY(gc_mark_stack_->IsFull())) {
+ ExpandGcMarkStack();
+ }
gc_mark_stack_->PushBack(to_ref);
} else {
// Otherwise, use a thread-local mark stack.
@@ -621,7 +638,9 @@
} else if (mark_stack_mode == kMarkStackModeShared) {
// Access the shared GC mark stack with a lock.
MutexLock mu(self, mark_stack_lock_);
- CHECK(!gc_mark_stack_->IsFull());
+ if (UNLIKELY(gc_mark_stack_->IsFull())) {
+ ExpandGcMarkStack();
+ }
gc_mark_stack_->PushBack(to_ref);
} else {
CHECK_EQ(static_cast<uint32_t>(mark_stack_mode),
@@ -633,7 +652,9 @@
<< "Only GC-running thread should access the mark stack "
<< "in the GC exclusive mark stack mode";
// Access the GC mark stack without a lock.
- CHECK(!gc_mark_stack_->IsFull());
+ if (UNLIKELY(gc_mark_stack_->IsFull())) {
+ ExpandGcMarkStack();
+ }
gc_mark_stack_->PushBack(to_ref);
}
}
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index 8efad73..c32b19e 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -182,6 +182,7 @@
void ReenableWeakRefAccess(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_);
void DisableMarking() SHARED_REQUIRES(Locks::mutator_lock_);
void IssueDisableMarkingCheckpoint() SHARED_REQUIRES(Locks::mutator_lock_);
+ void ExpandGcMarkStack() SHARED_REQUIRES(Locks::mutator_lock_);
space::RegionSpace* region_space_; // The underlying region space.
std::unique_ptr<Barrier> gc_barrier_;