Merge "Make nested signal more generic"
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 0746e0c..ad436d0 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -1804,6 +1804,7 @@
if (dex_cache_image_class_lookup_required_) {
MoveImageClassesToClassTable();
}
+ // TODO: why isn't this a ReaderMutexLock?
WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
for (std::pair<const size_t, GcRoot<mirror::Class> >& it : class_table_) {
mirror::Class* c = it.second.Read();
@@ -1813,18 +1814,75 @@
}
}
-static bool GetClassesVisitor(mirror::Class* c, void* arg) {
+static bool GetClassesVisitorSet(mirror::Class* c, void* arg) {
std::set<mirror::Class*>* classes = reinterpret_cast<std::set<mirror::Class*>*>(arg);
classes->insert(c);
return true;
}
+struct GetClassesVisitorArrayArg {
+ Handle<mirror::ObjectArray<mirror::Class>>* classes;
+ int32_t index;
+ bool success;
+};
+
+static bool GetClassesVisitorArray(mirror::Class* c, void* varg)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ GetClassesVisitorArrayArg* arg = reinterpret_cast<GetClassesVisitorArrayArg*>(varg);
+ if (arg->index < (*arg->classes)->GetLength()) {
+ (*arg->classes)->Set(arg->index, c);
+ arg->index++;
+ return true;
+ } else {
+ arg->success = false;
+ return false;
+ }
+}
+
void ClassLinker::VisitClassesWithoutClassesLock(ClassVisitor* visitor, void* arg) {
- std::set<mirror::Class*> classes;
- VisitClasses(GetClassesVisitor, &classes);
- for (mirror::Class* klass : classes) {
- if (!visitor(klass, arg)) {
- return;
+ // TODO: it may be possible to avoid secondary storage if we iterate over dex caches. The problem
+ // is avoiding duplicates.
+ if (!kMovingClasses) {
+ std::set<mirror::Class*> classes;
+ VisitClasses(GetClassesVisitorSet, &classes);
+ for (mirror::Class* klass : classes) {
+ if (!visitor(klass, arg)) {
+ return;
+ }
+ }
+ } else {
+ Thread* self = Thread::Current();
+ StackHandleScope<1> hs(self);
+ Handle<mirror::ObjectArray<mirror::Class>> classes =
+ hs.NewHandle<mirror::ObjectArray<mirror::Class>>(nullptr);
+ GetClassesVisitorArrayArg local_arg;
+ local_arg.classes = &classes;
+ local_arg.success = false;
+ // We size the array assuming classes won't be added to the class table during the visit.
+ // If this assumption fails we iterate again.
+ while (!local_arg.success) {
+ size_t class_table_size;
+ {
+ ReaderMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
+ class_table_size = class_table_.size();
+ }
+ mirror::Class* class_type = mirror::Class::GetJavaLangClass();
+ mirror::Class* array_of_class = FindArrayClass(self, &class_type);
+ classes.Assign(
+ mirror::ObjectArray<mirror::Class>::Alloc(self, array_of_class, class_table_size));
+ CHECK(classes.Get() != nullptr); // OOME.
+ local_arg.index = 0;
+ local_arg.success = true;
+ VisitClasses(GetClassesVisitorArray, &local_arg);
+ }
+ for (int32_t i = 0; i < classes->GetLength(); ++i) {
+ // If the class table shrank during creation of the clases array we expect null elements. If
+ // the class table grew then the loop repeats. If classes are created after the loop has
+ // finished then we don't visit.
+ mirror::Class* klass = classes->Get(i);
+ if (klass != nullptr && !visitor(klass, arg)) {
+ return;
+ }
}
}
}
@@ -2309,7 +2367,9 @@
size_t end = declaring_class->NumVirtualMethods();
bool found = false;
for (size_t i = 0; i < end; i++) {
- if (declaring_class->GetVirtualMethod(i) == method) {
+ // Check method index instead of identity in case of duplicate method definitions.
+ if (method->GetDexMethodIndex() ==
+ declaring_class->GetVirtualMethod(i)->GetDexMethodIndex()) {
found = true;
break;
}
@@ -2716,6 +2776,8 @@
klass->SetVirtualMethods(virtuals);
}
size_t class_def_method_index = 0;
+ uint32_t last_dex_method_index = DexFile::kDexNoIndex;
+ size_t last_class_def_method_index = 0;
for (size_t i = 0; it.HasNextDirectMethod(); i++, it.Next()) {
StackHandleScope<1> hs(self);
Handle<mirror::ArtMethod> method(hs.NewHandle(LoadMethod(self, dex_file, it, klass)));
@@ -2725,7 +2787,15 @@
}
klass->SetDirectMethod(i, method.Get());
LinkCode(method, oat_class, dex_file, it.GetMemberIndex(), class_def_method_index);
- method->SetMethodIndex(class_def_method_index);
+ uint32_t it_method_index = it.GetMemberIndex();
+ if (last_dex_method_index == it_method_index) {
+ // duplicate case
+ method->SetMethodIndex(last_class_def_method_index);
+ } else {
+ method->SetMethodIndex(class_def_method_index);
+ last_dex_method_index = it_method_index;
+ last_class_def_method_index = class_def_method_index;
+ }
class_def_method_index++;
}
for (size_t i = 0; it.HasNextVirtualMethod(); i++, it.Next()) {
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index a7a68b7..7750c8e 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -237,12 +237,14 @@
}
void VisitClasses(ClassVisitor* visitor, void* arg)
- LOCKS_EXCLUDED(dex_lock_)
+ LOCKS_EXCLUDED(Locks::classlinker_classes_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Less efficient variant of VisitClasses that doesn't hold the classlinker_classes_lock_
- // when calling the visitor.
+
+ // Less efficient variant of VisitClasses that copies the class_table_ into secondary storage
+ // so that it can visit individual classes without holding the doesn't hold the
+ // Locks::classlinker_classes_lock_. As the Locks::classlinker_classes_lock_ isn't held this code
+ // can race with insertion and deletion of classes while the visitor is being called.
void VisitClassesWithoutClassesLock(ClassVisitor* visitor, void* arg)
- LOCKS_EXCLUDED(dex_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void VisitClassRoots(RootCallback* callback, void* arg, VisitRootFlags flags)
@@ -623,6 +625,33 @@
ConstHandle<mirror::ArtMethod> prototype)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ mirror::Class* LookupClassFromTableLocked(const char* descriptor,
+ const mirror::ClassLoader* class_loader,
+ size_t hash)
+ SHARED_LOCKS_REQUIRED(Locks::classlinker_classes_lock_, Locks::mutator_lock_);
+
+ mirror::Class* UpdateClass(const char* descriptor, mirror::Class* klass, size_t hash)
+ LOCKS_EXCLUDED(Locks::classlinker_classes_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ void MoveImageClassesToClassTable() LOCKS_EXCLUDED(Locks::classlinker_classes_lock_)
+ LOCKS_EXCLUDED(Locks::classlinker_classes_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ mirror::Class* LookupClassFromImage(const char* descriptor)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // EnsureResolved is called to make sure that a class in the class_table_ has been resolved
+ // before returning it to the caller. Its the responsibility of the thread that placed the class
+ // in the table to make it resolved. The thread doing resolution must notify on the class' lock
+ // when resolution has occurred. This happens in mirror::Class::SetStatus. As resolution may
+ // retire a class, the version of the class in the table is returned and this may differ from
+ // the class passed in.
+ mirror::Class* EnsureResolved(Thread* self, const char* descriptor, mirror::Class* klass)
+ WARN_UNUSED SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ void FixupTemporaryDeclaringClass(mirror::Class* temp_class, mirror::Class* new_class)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
std::vector<const DexFile*> boot_class_path_;
mutable ReaderWriterMutex dex_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
@@ -646,32 +675,6 @@
// the classes into the class_table_ to avoid dex cache based searches.
Atomic<uint32_t> failed_dex_cache_class_lookups_;
- mirror::Class* LookupClassFromTableLocked(const char* descriptor,
- const mirror::ClassLoader* class_loader,
- size_t hash)
- SHARED_LOCKS_REQUIRED(Locks::classlinker_classes_lock_, Locks::mutator_lock_);
-
- mirror::Class* UpdateClass(const char* descriptor, mirror::Class* klass, size_t hash)
- LOCKS_EXCLUDED(Locks::classlinker_classes_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- void MoveImageClassesToClassTable() LOCKS_EXCLUDED(Locks::classlinker_classes_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- mirror::Class* LookupClassFromImage(const char* descriptor)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- // EnsureResolved is called to make sure that a class in the class_table_ has been resolved
- // before returning it to the caller. Its the responsibility of the thread that placed the class
- // in the table to make it resolved. The thread doing resolution must notify on the class' lock
- // when resolution has occurred. This happens in mirror::Class::SetStatus. As resolution may
- // retire a class, the version of the class in the table is returned and this may differ from
- // the class passed in.
- mirror::Class* EnsureResolved(Thread* self, const char* descriptor, mirror::Class* klass)
- WARN_UNUSED SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- void FixupTemporaryDeclaringClass(mirror::Class* temp_class, mirror::Class* new_class)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
// indexes into class_roots_.
// needs to be kept in sync with class_roots_descriptors_.
enum ClassRoot {
diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc
index e1c532e..d834d4d 100644
--- a/runtime/entrypoints/entrypoint_utils.cc
+++ b/runtime/entrypoints/entrypoint_utils.cc
@@ -114,11 +114,6 @@
// We don't fail here because SetStackEndForStackOverflow will print better diagnostics.
}
- if (Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled()) {
- // Remove extra entry pushed onto second stack during method tracing.
- Runtime::Current()->GetInstrumentation()->PopMethodForUnwind(self, false);
- }
-
self->SetStackEndForStackOverflow(); // Allow space on the stack for constructor to execute.
JNIEnvExt* env = self->GetJniEnv();
std::string msg("stack size ");
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 5d0d8d2..494781a 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -120,7 +120,7 @@
desired_collector_type_(foreground_collector_type_),
heap_trim_request_lock_(nullptr),
last_trim_time_(0),
- heap_transition_or_trim_target_time_(0),
+ last_heap_transition_time_(0),
heap_trim_request_pending_(false),
parallel_gc_threads_(parallel_gc_threads),
conc_gc_threads_(conc_gc_threads),
@@ -916,35 +916,6 @@
}
void Heap::DoPendingTransitionOrTrim() {
- Thread* self = Thread::Current();
- CollectorType desired_collector_type;
- // Wait until we reach the desired transition time.
- while (true) {
- uint64_t wait_time;
- {
- MutexLock mu(self, *heap_trim_request_lock_);
- desired_collector_type = desired_collector_type_;
- uint64_t current_time = NanoTime();
- if (current_time >= heap_transition_or_trim_target_time_) {
- break;
- }
- wait_time = heap_transition_or_trim_target_time_ - current_time;
- }
- ScopedThreadStateChange tsc(self, kSleeping);
- usleep(wait_time / 1000); // Usleep takes microseconds.
- }
- // Launch homogeneous space compaction if it is desired.
- if (desired_collector_type == kCollectorTypeHomogeneousSpaceCompact) {
- if (!CareAboutPauseTimes()) {
- PerformHomogeneousSpaceCompact();
- }
- // No need to Trim(). Homogeneous space compaction may free more virtual and physical memory.
- desired_collector_type = collector_type_;
- return;
- }
- // Transition the collector if the desired collector type is not the same as the current
- // collector type.
- TransitionCollector(desired_collector_type);
if (!CareAboutPauseTimes()) {
// Deflate the monitors, this can cause a pause but shouldn't matter since we don't care
// about pauses.
@@ -956,7 +927,23 @@
<< PrettyDuration(NanoTime() - start_time);
runtime->GetThreadList()->ResumeAll();
}
- // Do a heap trim if it is needed.
+ if (NanoTime() - last_heap_transition_time_ > kCollectorTransitionWait) {
+ // Launch homogeneous space compaction if it is desired.
+ if (desired_collector_type_ == kCollectorTypeHomogeneousSpaceCompact) {
+ if (!CareAboutPauseTimes()) {
+ PerformHomogeneousSpaceCompact();
+ last_heap_transition_time_ = NanoTime();
+ }
+ desired_collector_type_ = collector_type_;
+ } else {
+ // Transition the collector if the desired collector type is not the same as the current
+ // collector type.
+ TransitionCollector(desired_collector_type_);
+ last_heap_transition_time_ = NanoTime();
+ }
+ }
+ // Do a heap trim if it is needed. This is good to do even with hspace compaction since it may
+ // trim the native heap and dlmalloc spaces.
Trim();
}
@@ -2990,8 +2977,6 @@
if (desired_collector_type_ == desired_collector_type) {
return;
}
- heap_transition_or_trim_target_time_ =
- std::max(heap_transition_or_trim_target_time_, NanoTime() + delta_time);
desired_collector_type_ = desired_collector_type;
}
SignalHeapTrimDaemon(self);
@@ -3027,10 +3012,6 @@
return;
}
heap_trim_request_pending_ = true;
- uint64_t current_time = NanoTime();
- if (heap_transition_or_trim_target_time_ < current_time) {
- heap_transition_or_trim_target_time_ = current_time + kHeapTrimWait;
- }
}
// Notify the daemon thread which will actually do the heap trim.
SignalHeapTrimDaemon(self);
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 9742277..3bfa748 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -830,8 +830,8 @@
Mutex* heap_trim_request_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
// When we want to perform the next heap trim (nano seconds).
uint64_t last_trim_time_ GUARDED_BY(heap_trim_request_lock_);
- // When we want to perform the next heap transition (nano seconds) or heap trim.
- uint64_t heap_transition_or_trim_target_time_ GUARDED_BY(heap_trim_request_lock_);
+ // When we last performed a heap transition or hspace compact.
+ uint64_t last_heap_transition_time_;
// If we have a heap trim request pending.
bool heap_trim_request_pending_ GUARDED_BY(heap_trim_request_lock_);
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 7da57dd..773a950 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -193,21 +193,6 @@
TEST_ART_BROKEN_NO_RELOCATE_TESTS :=
-# Tests that are broken with tracing.
-TEST_ART_BROKEN_TRACE_RUN_TESTS := \
- 004-SignalTest \
- 018-stack-overflow \
- 097-duplicate-method \
- 107-int-math2
-
-ifneq (,$(filter trace,$(TRACE_TYPES)))
- ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(PREBUILD_TYPES), \
- $(COMPILER_TYPES),$(RELOCATE_TYPES),trace,$(GC_TYPES),$(JNI_TYPES), \
- $(IMAGE_TYPES), $(TEST_ART_BROKEN_TRACE_RUN_TESTS), $(ALL_ADDRESS_SIZES))
-endif
-
-TEST_ART_BROKEN_TRACE_RUN_TESTS :=
-
# Tests that are broken with GC stress.
TEST_ART_BROKEN_GCSTRESS_RUN_TESTS := \
004-SignalTest