Merge "Add DCHECK() for class loader in artResolveStringFromCode()."
diff --git a/compiler/driver/compiler_driver-inl.h b/compiler/driver/compiler_driver-inl.h
index d807fca..9711516 100644
--- a/compiler/driver/compiler_driver-inl.h
+++ b/compiler/driver/compiler_driver-inl.h
@@ -98,7 +98,7 @@
DCHECK(!resolved_field->IsStatic());
ObjPtr<mirror::Class> fields_class = resolved_field->GetDeclaringClass();
bool fast_get = referrer_class != nullptr &&
- referrer_class->CanAccessResolvedField(fields_class.Ptr(),
+ referrer_class->CanAccessResolvedField(fields_class,
resolved_field,
dex_cache,
field_idx);
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 13c73dc..83c7332 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -1492,10 +1492,15 @@
// Calculate how big the intern table will be after being serialized.
InternTable* const intern_table = image_info.intern_table_.get();
CHECK_EQ(intern_table->WeakSize(), 0u) << " should have strong interned all the strings";
- image_info.intern_table_bytes_ = intern_table->WriteToMemory(nullptr);
+ if (intern_table->StrongSize() != 0u) {
+ image_info.intern_table_bytes_ = intern_table->WriteToMemory(nullptr);
+ }
// Calculate the size of the class table.
ReaderMutexLock mu(self, *Locks::classlinker_classes_lock_);
- image_info.class_table_bytes_ += image_info.class_table_->WriteToMemory(nullptr);
+ DCHECK_EQ(image_info.class_table_->NumZygoteClasses(), 0u);
+ if (image_info.class_table_->NumNonZygoteClasses() != 0u) {
+ image_info.class_table_bytes_ += image_info.class_table_->WriteToMemory(nullptr);
+ }
}
// Calculate bin slot offsets.
diff --git a/runtime/art_field-inl.h b/runtime/art_field-inl.h
index 3b24aab..24bbcfb 100644
--- a/runtime/art_field-inl.h
+++ b/runtime/art_field-inl.h
@@ -116,9 +116,9 @@
DCHECK(object != nullptr) << PrettyField(this);
DCHECK(!IsStatic() || (object == GetDeclaringClass()) || !Runtime::Current()->IsStarted());
if (UNLIKELY(IsVolatile())) {
- object->SetFieldObjectVolatile<kTransactionActive>(GetOffset(), new_value.Ptr());
+ object->SetFieldObjectVolatile<kTransactionActive>(GetOffset(), new_value);
} else {
- object->SetFieldObject<kTransactionActive>(GetOffset(), new_value.Ptr());
+ object->SetFieldObject<kTransactionActive>(GetOffset(), new_value);
}
}
diff --git a/runtime/base/hash_set.h b/runtime/base/hash_set.h
index 12d3be7..f24a862 100644
--- a/runtime/base/hash_set.h
+++ b/runtime/base/hash_set.h
@@ -296,7 +296,7 @@
return const_iterator(this, NumBuckets());
}
- bool Empty() {
+ bool Empty() const {
return Size() == 0;
}
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index 1183dea..e77e6d7 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -64,6 +64,8 @@
Mutex* Locks::trace_lock_ = nullptr;
Mutex* Locks::unexpected_signal_lock_ = nullptr;
Uninterruptible Roles::uninterruptible_;
+ReaderWriterMutex* Locks::jni_globals_lock_ = nullptr;
+Mutex* Locks::jni_weak_globals_lock_ = nullptr;
struct AllMutexData {
// A guard for all_mutexes_ that's not a mutex (Mutexes must CAS to acquire and busy wait).
@@ -1088,6 +1090,15 @@
DCHECK(reference_queue_soft_references_lock_ == nullptr);
reference_queue_soft_references_lock_ = new Mutex("ReferenceQueue soft references lock", current_lock_level);
+ UPDATE_CURRENT_LOCK_LEVEL(kJniGlobalsLock);
+ DCHECK(jni_globals_lock_ == nullptr);
+ jni_globals_lock_ =
+ new ReaderWriterMutex("JNI global reference table lock", current_lock_level);
+
+ UPDATE_CURRENT_LOCK_LEVEL(kJniWeakGlobalsLock);
+ DCHECK(jni_weak_globals_lock_ == nullptr);
+ jni_weak_globals_lock_ = new Mutex("JNI weak global reference table lock", current_lock_level);
+
UPDATE_CURRENT_LOCK_LEVEL(kAbortLock);
DCHECK(abort_lock_ == nullptr);
abort_lock_ = new Mutex("abort lock", current_lock_level, true);
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index b3ff6c2..e0cca7b 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -68,6 +68,7 @@
kMarkSweepMarkStackLock,
kTransactionLogLock,
kJniWeakGlobalsLock,
+ kJniGlobalsLock,
kReferenceQueueSoftReferencesLock,
kReferenceQueuePhantomReferencesLock,
kReferenceQueueFinalizerReferencesLock,
@@ -678,8 +679,14 @@
// Guards soft references queue.
static Mutex* reference_queue_soft_references_lock_ ACQUIRED_AFTER(reference_queue_phantom_references_lock_);
+ // Guard accesses to the JNI Global Reference table.
+ static ReaderWriterMutex* jni_globals_lock_ ACQUIRED_AFTER(reference_queue_soft_references_lock_);
+
+ // Guard accesses to the JNI Weak Global Reference table.
+ static Mutex* jni_weak_globals_lock_ ACQUIRED_AFTER(jni_globals_lock_);
+
// Have an exclusive aborting thread.
- static Mutex* abort_lock_ ACQUIRED_AFTER(reference_queue_soft_references_lock_);
+ static Mutex* abort_lock_ ACQUIRED_AFTER(jni_weak_globals_lock_);
// Allow mutual exclusion when manipulating Thread::suspend_count_.
// TODO: Does the trade-off of a per-thread lock make sense?
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 14cbf24..239cdae 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -316,7 +316,6 @@
ClassLinker::ClassLinker(InternTable* intern_table)
// dex_lock_ is recursive as it may be used in stack dumping.
: dex_lock_("ClassLinker dex lock", kDexLock),
- dex_cache_boot_image_class_lookup_required_(false),
failed_dex_cache_class_lookups_(0),
class_roots_(nullptr),
array_iftable_(nullptr),
@@ -969,7 +968,6 @@
return false;
}
}
- dex_cache_boot_image_class_lookup_required_ = true;
std::vector<const OatFile*> oat_files =
runtime->GetOatFileManager().RegisterImageOatFiles(spaces);
DCHECK(!oat_files.empty());
@@ -1256,7 +1254,6 @@
// Add image classes into the class table for the class loader, and fixup the dex caches and
// class loader fields.
WriterMutexLock mu(self, *Locks::classlinker_classes_lock_);
- ClassTable* table = InsertClassTableForClassLoader(class_loader.Get());
// Dex cache array fixup is all or nothing, we must reject app images that have mixed since we
// rely on clobering the dex cache arrays in the image to forward to bss.
size_t num_dex_caches_with_bss_arrays = 0;
@@ -1392,103 +1389,42 @@
StackHandleScope<1> hs3(self);
RegisterDexFileLocked(*dex_file, hs3.NewHandle(dex_cache));
}
- GcRoot<mirror::Class>* const types = dex_cache->GetResolvedTypes();
- const size_t num_types = dex_cache->NumResolvedTypes();
- if (new_class_set == nullptr) {
- for (int32_t j = 0; j < static_cast<int32_t>(num_types); j++) {
- // The image space is not yet added to the heap, avoid read barriers.
- mirror::Class* klass = types[j].Read();
- // There may also be boot image classes,
- if (space->HasAddress(klass)) {
- DCHECK_NE(klass->GetStatus(), mirror::Class::kStatusError);
- // Update the class loader from the one in the image class loader to the one that loaded
- // the app image.
- klass->SetClassLoader(class_loader.Get());
- // The resolved type could be from another dex cache, go through the dex cache just in
- // case. May be null for array classes.
- if (klass->GetDexCacheStrings() != nullptr) {
- DCHECK(!klass->IsArrayClass());
- klass->SetDexCacheStrings(klass->GetDexCache()->GetStrings());
- }
- // If there are multiple dex caches, there may be the same class multiple times
- // in different dex caches. Check for this since inserting will add duplicates
- // otherwise.
- if (num_dex_caches > 1) {
- mirror::Class* existing = table->LookupByDescriptor(klass);
- if (existing != nullptr) {
- DCHECK_EQ(existing, klass) << PrettyClass(klass);
- } else {
- table->Insert(klass);
- }
- } else {
- table->Insert(klass);
- }
- // Double checked VLOG to avoid overhead.
- if (VLOG_IS_ON(image)) {
- VLOG(image) << PrettyClass(klass) << " " << klass->GetStatus();
- if (!klass->IsArrayClass()) {
- VLOG(image) << "From " << klass->GetDexCache()->GetDexFile()->GetBaseLocation();
- }
- VLOG(image) << "Direct methods";
- for (ArtMethod& m : klass->GetDirectMethods(kRuntimePointerSize)) {
- VLOG(image) << PrettyMethod(&m);
- }
- VLOG(image) << "Virtual methods";
- for (ArtMethod& m : klass->GetVirtualMethods(kRuntimePointerSize)) {
- VLOG(image) << PrettyMethod(&m);
- }
- }
- } else {
- DCHECK(klass == nullptr || heap->ObjectIsInBootImageSpace(klass))
- << klass << " " << PrettyClass(klass);
- }
- }
- }
if (kIsDebugBuild) {
+ CHECK(new_class_set != nullptr);
+ GcRoot<mirror::Class>* const types = dex_cache->GetResolvedTypes();
+ const size_t num_types = dex_cache->NumResolvedTypes();
for (int32_t j = 0; j < static_cast<int32_t>(num_types); j++) {
// The image space is not yet added to the heap, avoid read barriers.
mirror::Class* klass = types[j].Read();
if (space->HasAddress(klass)) {
DCHECK_NE(klass->GetStatus(), mirror::Class::kStatusError);
- if (kIsDebugBuild) {
- if (new_class_set != nullptr) {
- auto it = new_class_set->Find(GcRoot<mirror::Class>(klass));
- DCHECK(it != new_class_set->end());
- DCHECK_EQ(it->Read(), klass);
- mirror::Class* super_class = klass->GetSuperClass();
- if (super_class != nullptr && !heap->ObjectIsInBootImageSpace(super_class)) {
- auto it2 = new_class_set->Find(GcRoot<mirror::Class>(super_class));
- DCHECK(it2 != new_class_set->end());
- DCHECK_EQ(it2->Read(), super_class);
- }
- } else {
- DCHECK_EQ(table->LookupByDescriptor(klass), klass);
- mirror::Class* super_class = klass->GetSuperClass();
- if (super_class != nullptr && !heap->ObjectIsInBootImageSpace(super_class)) {
- CHECK_EQ(table->LookupByDescriptor(super_class), super_class);
- }
+ auto it = new_class_set->Find(GcRoot<mirror::Class>(klass));
+ DCHECK(it != new_class_set->end());
+ DCHECK_EQ(it->Read(), klass);
+ mirror::Class* super_class = klass->GetSuperClass();
+ if (super_class != nullptr && !heap->ObjectIsInBootImageSpace(super_class)) {
+ auto it2 = new_class_set->Find(GcRoot<mirror::Class>(super_class));
+ DCHECK(it2 != new_class_set->end());
+ DCHECK_EQ(it2->Read(), super_class);
+ }
+ for (ArtMethod& m : klass->GetDirectMethods(kRuntimePointerSize)) {
+ const void* code = m.GetEntryPointFromQuickCompiledCode();
+ const void* oat_code = m.IsInvokable() ? GetQuickOatCodeFor(&m) : code;
+ if (!IsQuickResolutionStub(code) &&
+ !IsQuickGenericJniStub(code) &&
+ !IsQuickToInterpreterBridge(code) &&
+ !m.IsNative()) {
+ DCHECK_EQ(code, oat_code) << PrettyMethod(&m);
}
}
- if (kIsDebugBuild) {
- for (ArtMethod& m : klass->GetDirectMethods(kRuntimePointerSize)) {
- const void* code = m.GetEntryPointFromQuickCompiledCode();
- const void* oat_code = m.IsInvokable() ? GetQuickOatCodeFor(&m) : code;
- if (!IsQuickResolutionStub(code) &&
- !IsQuickGenericJniStub(code) &&
- !IsQuickToInterpreterBridge(code) &&
- !m.IsNative()) {
- DCHECK_EQ(code, oat_code) << PrettyMethod(&m);
- }
- }
- for (ArtMethod& m : klass->GetVirtualMethods(kRuntimePointerSize)) {
- const void* code = m.GetEntryPointFromQuickCompiledCode();
- const void* oat_code = m.IsInvokable() ? GetQuickOatCodeFor(&m) : code;
- if (!IsQuickResolutionStub(code) &&
- !IsQuickGenericJniStub(code) &&
- !IsQuickToInterpreterBridge(code) &&
- !m.IsNative()) {
- DCHECK_EQ(code, oat_code) << PrettyMethod(&m);
- }
+ for (ArtMethod& m : klass->GetVirtualMethods(kRuntimePointerSize)) {
+ const void* code = m.GetEntryPointFromQuickCompiledCode();
+ const void* oat_code = m.IsInvokable() ? GetQuickOatCodeFor(&m) : code;
+ if (!IsQuickResolutionStub(code) &&
+ !IsQuickGenericJniStub(code) &&
+ !IsQuickToInterpreterBridge(code) &&
+ !m.IsNative()) {
+ DCHECK_EQ(code, oat_code) << PrettyMethod(&m);
}
}
}
@@ -1805,9 +1741,6 @@
temp_set = ClassTable::ClassSet(space->Begin() + class_table_section.Offset(),
/*make copy*/false,
&read_count);
- if (!app_image) {
- dex_cache_boot_image_class_lookup_required_ = false;
- }
VLOG(image) << "Adding class table classes took " << PrettyDuration(NanoTime() - start_time2);
}
if (app_image) {
@@ -1815,7 +1748,7 @@
if (!UpdateAppImageClassLoadersAndDexCaches(space,
class_loader,
dex_caches,
- added_class_table ? &temp_set : nullptr,
+ &temp_set,
/*out*/&forward_dex_cache_arrays,
/*out*/error_msg)) {
return false;
@@ -1825,10 +1758,8 @@
UpdateClassLoaderAndResolvedStringsVisitor visitor(space,
class_loader.Get(),
forward_dex_cache_arrays);
- if (added_class_table) {
- for (GcRoot<mirror::Class>& root : temp_set) {
- visitor(root.Read());
- }
+ for (GcRoot<mirror::Class>& root : temp_set) {
+ visitor(root.Read());
}
// forward_dex_cache_arrays is true iff we copied all of the dex cache arrays into the .bss.
// In this case, madvise away the dex cache arrays section of the image to reduce RAM usage and
@@ -1962,9 +1893,6 @@
}
void ClassLinker::VisitClasses(ClassVisitor* visitor) {
- if (dex_cache_boot_image_class_lookup_required_) {
- AddBootImageClassesToClassTable();
- }
Thread* const self = Thread::Current();
ReaderMutexLock mu(self, *Locks::classlinker_classes_lock_);
// Not safe to have thread suspension when we are holding a lock.
@@ -3608,17 +3536,6 @@
if (existing != nullptr) {
return existing;
}
- if (kIsDebugBuild &&
- !klass->IsTemp() &&
- class_loader == nullptr &&
- dex_cache_boot_image_class_lookup_required_) {
- // Check a class loaded with the system class loader matches one in the image if the class
- // is in the image.
- existing = LookupClassFromBootImage(descriptor);
- if (existing != nullptr) {
- CHECK_EQ(klass, existing);
- }
- }
VerifyObject(klass);
class_table->InsertWithHash(klass, hash);
if (class_loader != nullptr) {
@@ -3658,90 +3575,15 @@
const char* descriptor,
size_t hash,
mirror::ClassLoader* class_loader) {
- {
- ReaderMutexLock mu(self, *Locks::classlinker_classes_lock_);
- ClassTable* const class_table = ClassTableForClassLoader(class_loader);
- if (class_table != nullptr) {
- mirror::Class* result = class_table->Lookup(descriptor, hash);
- if (result != nullptr) {
- return result;
- }
+ ReaderMutexLock mu(self, *Locks::classlinker_classes_lock_);
+ ClassTable* const class_table = ClassTableForClassLoader(class_loader);
+ if (class_table != nullptr) {
+ mirror::Class* result = class_table->Lookup(descriptor, hash);
+ if (result != nullptr) {
+ return result;
}
}
- if (class_loader != nullptr || !dex_cache_boot_image_class_lookup_required_) {
- return nullptr;
- }
- // Lookup failed but need to search dex_caches_.
- mirror::Class* result = LookupClassFromBootImage(descriptor);
- if (result != nullptr) {
- result = InsertClass(descriptor, result, hash);
- } else {
- // Searching the image dex files/caches failed, we don't want to get into this situation
- // often as map searches are faster, so after kMaxFailedDexCacheLookups move all image
- // classes into the class table.
- constexpr uint32_t kMaxFailedDexCacheLookups = 1000;
- if (++failed_dex_cache_class_lookups_ > kMaxFailedDexCacheLookups) {
- AddBootImageClassesToClassTable();
- }
- }
- return result;
-}
-
-static std::vector<mirror::ObjectArray<mirror::DexCache>*> GetImageDexCaches(
- std::vector<gc::space::ImageSpace*> image_spaces) REQUIRES_SHARED(Locks::mutator_lock_) {
- CHECK(!image_spaces.empty());
- std::vector<mirror::ObjectArray<mirror::DexCache>*> dex_caches_vector;
- for (gc::space::ImageSpace* image_space : image_spaces) {
- mirror::Object* root = image_space->GetImageHeader().GetImageRoot(ImageHeader::kDexCaches);
- DCHECK(root != nullptr);
- dex_caches_vector.push_back(root->AsObjectArray<mirror::DexCache>());
- }
- return dex_caches_vector;
-}
-
-void ClassLinker::AddBootImageClassesToClassTable() {
- if (dex_cache_boot_image_class_lookup_required_) {
- AddImageClassesToClassTable(Runtime::Current()->GetHeap()->GetBootImageSpaces(),
- /*class_loader*/nullptr);
- dex_cache_boot_image_class_lookup_required_ = false;
- }
-}
-
-void ClassLinker::AddImageClassesToClassTable(std::vector<gc::space::ImageSpace*> image_spaces,
- mirror::ClassLoader* class_loader) {
- Thread* self = Thread::Current();
- WriterMutexLock mu(self, *Locks::classlinker_classes_lock_);
- ScopedAssertNoThreadSuspension ants("Moving image classes to class table");
-
- ClassTable* const class_table = InsertClassTableForClassLoader(class_loader);
-
- std::string temp;
- std::vector<mirror::ObjectArray<mirror::DexCache>*> dex_caches_vector =
- GetImageDexCaches(image_spaces);
- for (mirror::ObjectArray<mirror::DexCache>* dex_caches : dex_caches_vector) {
- for (int32_t i = 0; i < dex_caches->GetLength(); i++) {
- mirror::DexCache* dex_cache = dex_caches->Get(i);
- GcRoot<mirror::Class>* types = dex_cache->GetResolvedTypes();
- for (int32_t j = 0, num_types = dex_cache->NumResolvedTypes(); j < num_types; j++) {
- mirror::Class* klass = types[j].Read();
- if (klass != nullptr) {
- DCHECK_EQ(klass->GetClassLoader(), class_loader);
- const char* descriptor = klass->GetDescriptor(&temp);
- size_t hash = ComputeModifiedUtf8Hash(descriptor);
- mirror::Class* existing = class_table->Lookup(descriptor, hash);
- if (existing != nullptr) {
- CHECK_EQ(existing, klass) << PrettyClassAndClassLoader(existing) << " != "
- << PrettyClassAndClassLoader(klass);
- } else {
- class_table->Insert(klass);
- if (log_new_class_table_roots_) {
- new_class_roots_.push_back(GcRoot<mirror::Class>(klass));
- }
- }
- }
- }
- }
- }
+ return nullptr;
}
class MoveClassTableToPreZygoteVisitor : public ClassLoaderVisitor {
@@ -3765,28 +3607,6 @@
VisitClassLoaders(&visitor);
}
-mirror::Class* ClassLinker::LookupClassFromBootImage(const char* descriptor) {
- ScopedAssertNoThreadSuspension ants("Image class lookup");
- std::vector<mirror::ObjectArray<mirror::DexCache>*> dex_caches_vector =
- GetImageDexCaches(Runtime::Current()->GetHeap()->GetBootImageSpaces());
- for (mirror::ObjectArray<mirror::DexCache>* dex_caches : dex_caches_vector) {
- for (int32_t i = 0; i < dex_caches->GetLength(); ++i) {
- mirror::DexCache* dex_cache = dex_caches->Get(i);
- const DexFile* dex_file = dex_cache->GetDexFile();
- // Try binary searching the type index by descriptor.
- const DexFile::TypeId* type_id = dex_file->FindTypeId(descriptor);
- if (type_id != nullptr) {
- uint16_t type_idx = dex_file->GetIndexForTypeId(*type_id);
- mirror::Class* klass = dex_cache->GetResolvedType(type_idx);
- if (klass != nullptr) {
- return klass;
- }
- }
- }
- }
- return nullptr;
-}
-
// Look up classes by hash and descriptor and put all matching ones in the result array.
class LookupClassesVisitor : public ClassLoaderVisitor {
public:
@@ -3812,9 +3632,6 @@
void ClassLinker::LookupClasses(const char* descriptor, std::vector<mirror::Class*>& result) {
result.clear();
- if (dex_cache_boot_image_class_lookup_required_) {
- AddBootImageClassesToClassTable();
- }
Thread* const self = Thread::Current();
ReaderMutexLock mu(self, *Locks::classlinker_classes_lock_);
const size_t hash = ComputeModifiedUtf8Hash(descriptor);
@@ -4202,9 +4019,9 @@
// Set the class access flags incl. VerificationAttempted, so we do not try to set the flag on
// the methods.
klass->SetAccessFlags(kAccClassIsProxy | kAccPublic | kAccFinal | kAccVerificationAttempted);
- klass->SetClassLoader(soa.Decode<mirror::ClassLoader>(loader).Ptr());
+ klass->SetClassLoader(soa.Decode<mirror::ClassLoader>(loader));
DCHECK_EQ(klass->GetPrimitiveType(), Primitive::kPrimNot);
- klass->SetName(soa.Decode<mirror::String>(name).Ptr());
+ klass->SetName(soa.Decode<mirror::String>(name));
klass->SetDexCache(GetClassRoot(kJavaLangReflectProxy)->GetDexCache());
mirror::Class::SetStatus(klass, mirror::Class::kStatusIdx, self);
std::string descriptor(GetDescriptorForProxy(klass.Get()));
@@ -5217,14 +5034,6 @@
Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(class_loader);
}
CHECK_EQ(existing, klass.Get());
- if (kIsDebugBuild && class_loader == nullptr && dex_cache_boot_image_class_lookup_required_) {
- // Check a class loaded with the system class loader matches one in the image if the class
- // is in the image.
- mirror::Class* const image_class = LookupClassFromBootImage(descriptor);
- if (image_class != nullptr) {
- CHECK_EQ(klass.Get(), existing) << descriptor;
- }
- }
if (log_new_class_table_roots_) {
new_class_roots_.push_back(GcRoot<mirror::Class>(h_new_class.Get()));
}
@@ -8093,9 +7902,6 @@
void ClassLinker::DumpForSigQuit(std::ostream& os) {
ScopedObjectAccess soa(Thread::Current());
- if (dex_cache_boot_image_class_lookup_required_) {
- AddBootImageClassesToClassTable();
- }
ReaderMutexLock mu(soa.Self(), *Locks::classlinker_classes_lock_);
os << "Zygote loaded classes=" << NumZygoteClasses() << " post zygote classes="
<< NumNonZygoteClasses() << "\n";
@@ -8131,9 +7937,6 @@
}
size_t ClassLinker::NumLoadedClasses() {
- if (dex_cache_boot_image_class_lookup_required_) {
- AddBootImageClassesToClassTable();
- }
ReaderMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
// Only return non zygote classes since these are the ones which apps which care about.
return NumNonZygoteClasses();
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 43ffc8e..70cc768 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -561,17 +561,6 @@
return class_roots;
}
- // Move all of the boot image classes into the class table for faster lookups.
- void AddBootImageClassesToClassTable()
- REQUIRES(!Locks::classlinker_classes_lock_)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
- // Add image classes to the class table.
- void AddImageClassesToClassTable(std::vector<gc::space::ImageSpace*> image_spaces,
- mirror::ClassLoader* class_loader)
- REQUIRES(!Locks::classlinker_classes_lock_)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
// Move the class table to the pre-zygote table to reduce memory usage. This works by ensuring
// that no more classes are ever added to the pre zygote table which makes it that the pages
// always remain shared dirty instead of private dirty.
@@ -1050,9 +1039,6 @@
void EnsureSkipAccessChecksMethods(Handle<mirror::Class> c)
REQUIRES_SHARED(Locks::mutator_lock_);
- mirror::Class* LookupClassFromBootImage(const char* descriptor)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
// Register a class loader and create its class table and allocator. Should not be called if
// these are already created.
void RegisterClassLoader(mirror::ClassLoader* class_loader)
@@ -1157,8 +1143,6 @@
// New class roots, only used by CMS since the GC needs to mark these in the pause.
std::vector<GcRoot<mirror::Class>> new_class_roots_ GUARDED_BY(Locks::classlinker_classes_lock_);
- // Do we need to search dex caches to find boot image classes?
- bool dex_cache_boot_image_class_lookup_required_;
// Number of times we've searched dex caches for a class. After a certain number of misses we move
// the classes into the class_table_ to avoid dex cache based searches.
Atomic<uint32_t> failed_dex_cache_class_lookups_;
diff --git a/runtime/common_runtime_test.cc b/runtime/common_runtime_test.cc
index 84752f0..193f6ee 100644
--- a/runtime/common_runtime_test.cc
+++ b/runtime/common_runtime_test.cc
@@ -514,9 +514,9 @@
soa.Decode<mirror::ClassLoader>(jclass_loader));
DCHECK_EQ(class_loader->GetClass(),
- soa.Decode<mirror::Class>(WellKnownClasses::dalvik_system_PathClassLoader).Ptr());
+ soa.Decode<mirror::Class>(WellKnownClasses::dalvik_system_PathClassLoader));
DCHECK_EQ(class_loader->GetParent()->GetClass(),
- soa.Decode<mirror::Class>(WellKnownClasses::java_lang_BootClassLoader).Ptr());
+ soa.Decode<mirror::Class>(WellKnownClasses::java_lang_BootClassLoader));
// The class loader is a PathClassLoader which inherits from BaseDexClassLoader.
// We need to get the DexPathList and loop through it.
diff --git a/runtime/entrypoints/quick/quick_jni_entrypoints.cc b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
index 20fa0d8..383cdd2 100644
--- a/runtime/entrypoints/quick/quick_jni_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
@@ -24,6 +24,15 @@
extern void ReadBarrierJni(mirror::CompressedReference<mirror::Object>* handle_on_stack,
Thread* self ATTRIBUTE_UNUSED) {
+ DCHECK(kUseReadBarrier);
+ if (kUseBakerReadBarrier) {
+ DCHECK(handle_on_stack->AsMirrorPtr() != nullptr)
+ << "The class of a static jni call must not be null";
+ // Check the mark bit and return early if it's already marked.
+ if (LIKELY(handle_on_stack->AsMirrorPtr()->GetMarkBit() != 0)) {
+ return;
+ }
+ }
// Call the read barrier and update the handle.
mirror::Object* to_ref = ReadBarrier::BarrierForRoot(handle_on_stack);
handle_on_stack->Assign(to_ref);
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 126e26c..750efac 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -834,7 +834,7 @@
void BuildQuickArgumentVisitor::FixupReferences() {
// Fixup any references which may have changed.
for (const auto& pair : references_) {
- pair.second->Assign(soa_->Decode<mirror::Object>(pair.first).Ptr());
+ pair.second->Assign(soa_->Decode<mirror::Object>(pair.first));
soa_->Env()->DeleteLocalRef(pair.first);
}
}
@@ -926,7 +926,7 @@
void RememberForGcArgumentVisitor::FixupReferences() {
// Fixup any references which may have changed.
for (const auto& pair : references_) {
- pair.second->Assign(soa_->Decode<mirror::Object>(pair.first).Ptr());
+ pair.second->Assign(soa_->Decode<mirror::Object>(pair.first));
soa_->Env()->DeleteLocalRef(pair.first);
}
}
diff --git a/runtime/gc/reference_processor.cc b/runtime/gc/reference_processor.cc
index 9694597..4b8f38d 100644
--- a/runtime/gc/reference_processor.cc
+++ b/runtime/gc/reference_processor.cc
@@ -60,12 +60,13 @@
condition_.Broadcast(self);
}
-mirror::Object* ReferenceProcessor::GetReferent(Thread* self, mirror::Reference* reference) {
+ObjPtr<mirror::Object> ReferenceProcessor::GetReferent(Thread* self,
+ ObjPtr<mirror::Reference> reference) {
if (!kUseReadBarrier || self->GetWeakRefAccessEnabled()) {
// Under read barrier / concurrent copying collector, it's not safe to call GetReferent() when
// weak ref access is disabled as the call includes a read barrier which may push a ref onto the
// mark stack and interfere with termination of marking.
- mirror::Object* const referent = reference->GetReferent();
+ ObjPtr<mirror::Object> const referent = reference->GetReferent();
// If the referent is null then it is already cleared, we can just return null since there is no
// scenario where it becomes non-null during the reference processing phase.
if (UNLIKELY(!SlowPathEnabled()) || referent == nullptr) {
@@ -116,7 +117,8 @@
}
// Process reference class instances and schedule finalizations.
-void ReferenceProcessor::ProcessReferences(bool concurrent, TimingLogger* timings,
+void ReferenceProcessor::ProcessReferences(bool concurrent,
+ TimingLogger* timings,
bool clear_soft_references,
collector::GarbageCollector* collector) {
TimingLogger::ScopedTiming t(concurrent ? __FUNCTION__ : "(Paused)ProcessReferences", timings);
@@ -188,7 +190,8 @@
// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been
// marked, put it on the appropriate list in the heap for later processing.
-void ReferenceProcessor::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref,
+void ReferenceProcessor::DelayReferenceReferent(ObjPtr<mirror::Class> klass,
+ ObjPtr<mirror::Reference> ref,
collector::GarbageCollector* collector) {
// klass can be the class of the old object if the visitor already updated the class of ref.
DCHECK(klass != nullptr);
@@ -260,7 +263,8 @@
}
}
-bool ReferenceProcessor::MakeCircularListIfUnenqueued(mirror::FinalizerReference* reference) {
+bool ReferenceProcessor::MakeCircularListIfUnenqueued(
+ ObjPtr<mirror::FinalizerReference> reference) {
Thread* self = Thread::Current();
MutexLock mu(self, *Locks::reference_processor_lock_);
// Wait untul we are done processing reference.
diff --git a/runtime/gc/reference_processor.h b/runtime/gc/reference_processor.h
index 4788f8a..759b7e1 100644
--- a/runtime/gc/reference_processor.h
+++ b/runtime/gc/reference_processor.h
@@ -46,7 +46,9 @@
class ReferenceProcessor {
public:
explicit ReferenceProcessor();
- void ProcessReferences(bool concurrent, TimingLogger* timings, bool clear_soft_references,
+ void ProcessReferences(bool concurrent,
+ TimingLogger* timings,
+ bool clear_soft_references,
gc::collector::GarbageCollector* collector)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::heap_bitmap_lock_)
@@ -57,16 +59,17 @@
void EnableSlowPath() REQUIRES_SHARED(Locks::mutator_lock_);
void BroadcastForSlowPath(Thread* self);
// Decode the referent, may block if references are being processed.
- mirror::Object* GetReferent(Thread* self, mirror::Reference* reference)
+ ObjPtr<mirror::Object> GetReferent(Thread* self, ObjPtr<mirror::Reference> reference)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::reference_processor_lock_);
void EnqueueClearedReferences(Thread* self) REQUIRES(!Locks::mutator_lock_);
- void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref,
+ void DelayReferenceReferent(ObjPtr<mirror::Class> klass,
+ ObjPtr<mirror::Reference> ref,
collector::GarbageCollector* collector)
REQUIRES_SHARED(Locks::mutator_lock_);
void UpdateRoots(IsMarkedVisitor* visitor)
REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
// Make a circular list with reference if it is not enqueued. Uses the finalizer queue lock.
- bool MakeCircularListIfUnenqueued(mirror::FinalizerReference* reference)
+ bool MakeCircularListIfUnenqueued(ObjPtr<mirror::FinalizerReference> reference)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::reference_processor_lock_,
!Locks::reference_queue_finalizer_references_lock_);
diff --git a/runtime/gc/reference_queue.cc b/runtime/gc/reference_queue.cc
index 62625c4..4e6f7da 100644
--- a/runtime/gc/reference_queue.cc
+++ b/runtime/gc/reference_queue.cc
@@ -29,7 +29,7 @@
ReferenceQueue::ReferenceQueue(Mutex* lock) : lock_(lock), list_(nullptr) {
}
-void ReferenceQueue::AtomicEnqueueIfNotEnqueued(Thread* self, mirror::Reference* ref) {
+void ReferenceQueue::AtomicEnqueueIfNotEnqueued(Thread* self, ObjPtr<mirror::Reference> ref) {
DCHECK(ref != nullptr);
MutexLock mu(self, *lock_);
if (ref->IsUnprocessed()) {
@@ -37,16 +37,16 @@
}
}
-void ReferenceQueue::EnqueueReference(mirror::Reference* ref) {
+void ReferenceQueue::EnqueueReference(ObjPtr<mirror::Reference> ref) {
DCHECK(ref != nullptr);
CHECK(ref->IsUnprocessed());
if (IsEmpty()) {
// 1 element cyclic queue, ie: Reference ref = ..; ref.pendingNext = ref;
- list_ = ref;
+ list_ = ref.Ptr();
} else {
// The list is owned by the GC, everything that has been inserted must already be at least
// gray.
- mirror::Reference* head = list_->GetPendingNext<kWithoutReadBarrier>();
+ ObjPtr<mirror::Reference> head = list_->GetPendingNext<kWithoutReadBarrier>();
DCHECK(head != nullptr);
ref->SetPendingNext(head);
}
@@ -54,16 +54,16 @@
list_->SetPendingNext(ref);
}
-mirror::Reference* ReferenceQueue::DequeuePendingReference() {
+ObjPtr<mirror::Reference> ReferenceQueue::DequeuePendingReference() {
DCHECK(!IsEmpty());
- mirror::Reference* ref = list_->GetPendingNext<kWithoutReadBarrier>();
+ ObjPtr<mirror::Reference> ref = list_->GetPendingNext<kWithoutReadBarrier>();
DCHECK(ref != nullptr);
// Note: the following code is thread-safe because it is only called from ProcessReferences which
// is single threaded.
if (list_ == ref) {
list_ = nullptr;
} else {
- mirror::Reference* next = ref->GetPendingNext<kWithoutReadBarrier>();
+ ObjPtr<mirror::Reference> next = ref->GetPendingNext<kWithoutReadBarrier>();
list_->SetPendingNext(next);
}
ref->SetPendingNext(nullptr);
@@ -83,10 +83,10 @@
// In ConcurrentCopying::ProcessMarkStackRef() we may leave a white reference in the queue and
// find it here, which is OK.
CHECK_EQ(rb_ptr, ReadBarrier::WhitePtr()) << "ref=" << ref << " rb_ptr=" << rb_ptr;
- mirror::Object* referent = ref->GetReferent<kWithoutReadBarrier>();
+ ObjPtr<mirror::Object> referent = ref->GetReferent<kWithoutReadBarrier>();
// The referent could be null if it's cleared by a mutator (Reference.clear()).
if (referent != nullptr) {
- CHECK(concurrent_copying->IsInToSpace(referent))
+ CHECK(concurrent_copying->IsInToSpace(referent.Ptr()))
<< "ref=" << ref << " rb_ptr=" << ref->GetReadBarrierPointer()
<< " referent=" << referent;
}
@@ -96,13 +96,13 @@
}
void ReferenceQueue::Dump(std::ostream& os) const {
- mirror::Reference* cur = list_;
+ ObjPtr<mirror::Reference> cur = list_;
os << "Reference starting at list_=" << list_ << "\n";
if (cur == nullptr) {
return;
}
do {
- mirror::Reference* pending_next = cur->GetPendingNext();
+ ObjPtr<mirror::Reference> pending_next = cur->GetPendingNext();
os << "Reference= " << cur << " PendingNext=" << pending_next;
if (cur->IsFinalizerReferenceInstance()) {
os << " Zombie=" << cur->AsFinalizerReference()->GetZombie();
@@ -114,7 +114,7 @@
size_t ReferenceQueue::GetLength() const {
size_t count = 0;
- mirror::Reference* cur = list_;
+ ObjPtr<mirror::Reference> cur = list_;
if (cur != nullptr) {
do {
++count;
@@ -127,7 +127,7 @@
void ReferenceQueue::ClearWhiteReferences(ReferenceQueue* cleared_references,
collector::GarbageCollector* collector) {
while (!IsEmpty()) {
- mirror::Reference* ref = DequeuePendingReference();
+ ObjPtr<mirror::Reference> ref = DequeuePendingReference();
mirror::HeapReference<mirror::Object>* referent_addr = ref->GetReferentReferenceAddr();
if (referent_addr->AsMirrorPtr() != nullptr &&
!collector->IsMarkedHeapReference(referent_addr)) {
@@ -145,11 +145,11 @@
void ReferenceQueue::EnqueueFinalizerReferences(ReferenceQueue* cleared_references,
collector::GarbageCollector* collector) {
while (!IsEmpty()) {
- mirror::FinalizerReference* ref = DequeuePendingReference()->AsFinalizerReference();
+ ObjPtr<mirror::FinalizerReference> ref = DequeuePendingReference()->AsFinalizerReference();
mirror::HeapReference<mirror::Object>* referent_addr = ref->GetReferentReferenceAddr();
if (referent_addr->AsMirrorPtr() != nullptr &&
!collector->IsMarkedHeapReference(referent_addr)) {
- mirror::Object* forward_address = collector->MarkObject(referent_addr->AsMirrorPtr());
+ ObjPtr<mirror::Object> forward_address = collector->MarkObject(referent_addr->AsMirrorPtr());
// Move the updated referent to the zombie field.
if (Runtime::Current()->IsActiveTransaction()) {
ref->SetZombie<true>(forward_address);
@@ -167,8 +167,8 @@
if (UNLIKELY(IsEmpty())) {
return;
}
- mirror::Reference* const head = list_;
- mirror::Reference* ref = head;
+ ObjPtr<mirror::Reference> const head = list_;
+ ObjPtr<mirror::Reference> ref = head;
do {
mirror::HeapReference<mirror::Object>* referent_addr = ref->GetReferentReferenceAddr();
if (referent_addr->AsMirrorPtr() != nullptr) {
diff --git a/runtime/gc/reference_queue.h b/runtime/gc/reference_queue.h
index 1de1aa1..b5ec1e5 100644
--- a/runtime/gc/reference_queue.h
+++ b/runtime/gc/reference_queue.h
@@ -26,6 +26,7 @@
#include "base/timing_logger.h"
#include "globals.h"
#include "jni.h"
+#include "obj_ptr.h"
#include "object_callbacks.h"
#include "offsets.h"
#include "thread_pool.h"
@@ -54,15 +55,15 @@
// Enqueue a reference if it is unprocessed. Thread safe to call from multiple
// threads since it uses a lock to avoid a race between checking for the references presence and
// adding it.
- void AtomicEnqueueIfNotEnqueued(Thread* self, mirror::Reference* ref)
+ void AtomicEnqueueIfNotEnqueued(Thread* self, ObjPtr<mirror::Reference> ref)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*lock_);
// Enqueue a reference. The reference must be unprocessed.
// Not thread safe, used when mutators are paused to minimize lock overhead.
- void EnqueueReference(mirror::Reference* ref) REQUIRES_SHARED(Locks::mutator_lock_);
+ void EnqueueReference(ObjPtr<mirror::Reference> ref) REQUIRES_SHARED(Locks::mutator_lock_);
// Dequeue a reference from the queue and return that dequeued reference.
- mirror::Reference* DequeuePendingReference() REQUIRES_SHARED(Locks::mutator_lock_);
+ ObjPtr<mirror::Reference> DequeuePendingReference() REQUIRES_SHARED(Locks::mutator_lock_);
// Enqueues finalizer references with white referents. White referents are blackened, moved to
// the zombie field, and the referent field is cleared.
@@ -104,7 +105,7 @@
// calling AtomicEnqueueIfNotEnqueued.
Mutex* const lock_;
// The actual reference list. Only a root for the mark compact GC since it will be null for other
- // GC types.
+ // GC types. Not an ObjPtr since it is accessed from multiple threads.
mirror::Reference* list_;
DISALLOW_IMPLICIT_CONSTRUCTORS(ReferenceQueue);
diff --git a/runtime/gc/reference_queue_test.cc b/runtime/gc/reference_queue_test.cc
index 5b8a3c2..3ca3353 100644
--- a/runtime/gc/reference_queue_test.cc
+++ b/runtime/gc/reference_queue_test.cc
@@ -52,10 +52,10 @@
std::set<mirror::Reference*> refs = {ref1.Get(), ref2.Get()};
std::set<mirror::Reference*> dequeued;
- dequeued.insert(queue.DequeuePendingReference());
+ dequeued.insert(queue.DequeuePendingReference().Ptr());
ASSERT_TRUE(!queue.IsEmpty());
ASSERT_EQ(queue.GetLength(), 1U);
- dequeued.insert(queue.DequeuePendingReference());
+ dequeued.insert(queue.DequeuePendingReference().Ptr());
ASSERT_EQ(queue.GetLength(), 0U);
ASSERT_TRUE(queue.IsEmpty());
ASSERT_EQ(refs, dequeued);
diff --git a/runtime/intern_table.cc b/runtime/intern_table.cc
index be061be..a61a187 100644
--- a/runtime/intern_table.cc
+++ b/runtime/intern_table.cc
@@ -33,8 +33,7 @@
namespace art {
InternTable::InternTable()
- : images_added_to_intern_table_(false),
- log_new_roots_(false),
+ : log_new_roots_(false),
weak_intern_condition_("New intern condition", *Locks::intern_table_lock_),
weak_root_state_(gc::kWeakRootStateNormal) {
}
@@ -181,57 +180,8 @@
const ImageSection& section = header->GetImageSection(ImageHeader::kSectionInternedStrings);
if (section.Size() > 0) {
AddTableFromMemoryLocked(image_space->Begin() + section.Offset());
- } else {
- // TODO: Delete this logic?
- mirror::Object* root = header->GetImageRoot(ImageHeader::kDexCaches);
- mirror::ObjectArray<mirror::DexCache>* dex_caches = root->AsObjectArray<mirror::DexCache>();
- for (int32_t i = 0; i < dex_caches->GetLength(); ++i) {
- mirror::DexCache* dex_cache = dex_caches->Get(i);
- const size_t num_strings = dex_cache->NumStrings();
- for (size_t j = 0; j < num_strings; ++j) {
- mirror::String* image_string = dex_cache->GetResolvedString(j);
- if (image_string != nullptr) {
- mirror::String* found = LookupStrongLocked(image_string);
- if (found == nullptr) {
- InsertStrong(image_string);
- } else {
- DCHECK_EQ(found, image_string);
- }
- }
- }
- }
}
}
- images_added_to_intern_table_ = true;
-}
-
-mirror::String* InternTable::LookupStringFromImage(mirror::String* s) {
- DCHECK(!images_added_to_intern_table_);
- const std::vector<gc::space::ImageSpace*>& image_spaces =
- Runtime::Current()->GetHeap()->GetBootImageSpaces();
- if (image_spaces.empty()) {
- return nullptr; // No image present.
- }
- const std::string utf8 = s->ToModifiedUtf8();
- for (gc::space::ImageSpace* image_space : image_spaces) {
- mirror::Object* root = image_space->GetImageHeader().GetImageRoot(ImageHeader::kDexCaches);
- mirror::ObjectArray<mirror::DexCache>* dex_caches = root->AsObjectArray<mirror::DexCache>();
- for (int32_t i = 0; i < dex_caches->GetLength(); ++i) {
- mirror::DexCache* dex_cache = dex_caches->Get(i);
- const DexFile* dex_file = dex_cache->GetDexFile();
- // Binary search the dex file for the string index.
- const DexFile::StringId* string_id = dex_file->FindStringId(utf8.c_str());
- if (string_id != nullptr) {
- uint32_t string_idx = dex_file->GetIndexForStringId(*string_id);
- // GetResolvedString() contains a RB.
- mirror::String* image_string = dex_cache->GetResolvedString(string_idx);
- if (image_string != nullptr) {
- return image_string;
- }
- }
- }
- }
- return nullptr;
}
void InternTable::BroadcastForNewInterns() {
@@ -303,13 +253,6 @@
}
return weak;
}
- // Check the image for a match.
- if (!images_added_to_intern_table_) {
- mirror::String* const image_string = LookupStringFromImage(s);
- if (image_string != nullptr) {
- return is_strong ? InsertStrong(image_string) : InsertWeak(image_string);
- }
- }
// No match in the strong table or the weak table. Insert into the strong / weak table.
return is_strong ? InsertStrong(s) : InsertWeak(s);
}
diff --git a/runtime/intern_table.h b/runtime/intern_table.h
index 184fbdc..30ff55d 100644
--- a/runtime/intern_table.h
+++ b/runtime/intern_table.h
@@ -238,8 +238,6 @@
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
// Transaction rollback access.
- mirror::String* LookupStringFromImage(mirror::String* s)
- REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
mirror::String* InsertStrongFromTransaction(mirror::String* s)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
mirror::String* InsertWeakFromTransaction(mirror::String* s)
@@ -260,7 +258,6 @@
void WaitUntilAccessible(Thread* self)
REQUIRES(Locks::intern_table_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
- bool images_added_to_intern_table_ GUARDED_BY(Locks::intern_table_lock_);
bool log_new_roots_ GUARDED_BY(Locks::intern_table_lock_);
ConditionVariable weak_intern_condition_ GUARDED_BY(Locks::intern_table_lock_);
// Since this contains (strong) roots, they need a read barrier to
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index 0d3af93..f3cd25c 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -52,7 +52,7 @@
ScopedThreadStateChange tsc(self, kNative);
jresult = fn(soa.Env(), klass.get());
}
- result->SetL(soa.Decode<Object>(jresult).Ptr());
+ result->SetL(soa.Decode<Object>(jresult));
} else if (shorty == "V") {
typedef void (fntype)(JNIEnv*, jclass);
fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
@@ -94,7 +94,7 @@
ScopedThreadStateChange tsc(self, kNative);
jresult = fn(soa.Env(), klass.get(), arg0.get());
}
- result->SetL(soa.Decode<Object>(jresult).Ptr());
+ result->SetL(soa.Decode<Object>(jresult));
} else if (shorty == "IIZ") {
typedef jint (fntype)(JNIEnv*, jclass, jint, jboolean);
fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
@@ -192,7 +192,7 @@
ScopedThreadStateChange tsc(self, kNative);
jresult = fn(soa.Env(), rcvr.get());
}
- result->SetL(soa.Decode<Object>(jresult).Ptr());
+ result->SetL(soa.Decode<Object>(jresult));
} else if (shorty == "V") {
typedef void (fntype)(JNIEnv*, jobject);
fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
@@ -213,7 +213,7 @@
ScopedThreadStateChange tsc(self, kNative);
jresult = fn(soa.Env(), rcvr.get(), arg0.get());
}
- result->SetL(soa.Decode<Object>(jresult).Ptr());
+ result->SetL(soa.Decode<Object>(jresult));
ScopedThreadStateChange tsc(self, kNative);
} else if (shorty == "III") {
typedef jint (fntype)(JNIEnv*, jobject, jint, jint);
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
index e12a699..4a3654b 100644
--- a/runtime/interpreter/unstarted_runtime.cc
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -953,7 +953,7 @@
ObjPtr<mirror::Object> dex = GetDexFromDexCache(self, src->AsDexCache());
if (dex != nullptr) {
have_dex = true;
- result->SetL(dex.Ptr());
+ result->SetL(dex);
}
}
if (!have_dex) {
@@ -1186,13 +1186,13 @@
// This allows statically initializing ConcurrentHashMap and SynchronousQueue.
void UnstartedRuntime::UnstartedReferenceGetReferent(
Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) {
- mirror::Reference* const ref = down_cast<mirror::Reference*>(
+ ObjPtr<mirror::Reference> const ref = down_cast<mirror::Reference*>(
shadow_frame->GetVRegReference(arg_offset));
if (ref == nullptr) {
AbortTransactionOrFail(self, "Reference.getReferent() with null object");
return;
}
- mirror::Object* const referent =
+ ObjPtr<mirror::Object> const referent =
Runtime::Current()->GetHeap()->GetReferenceProcessor()->GetReferent(self, ref);
result->SetL(referent);
}
@@ -1457,7 +1457,7 @@
ScopedLocalRef<jobject> result_jobj(env,
InvokeMethod(soa, java_method.get(), java_receiver.get(), java_args.get()));
- result->SetL(self->DecodeJObject(result_jobj.get()).Ptr());
+ result->SetL(self->DecodeJObject(result_jobj.get()));
// Conservatively flag all exceptions as transaction aborts. This way we don't need to unwrap
// InvocationTargetExceptions.
@@ -1620,9 +1620,9 @@
uint32_t* args ATTRIBUTE_UNUSED, JValue* result) {
ScopedObjectAccessUnchecked soa(self);
if (Runtime::Current()->IsActiveTransaction()) {
- result->SetL(soa.Decode<mirror::Object>(self->CreateInternalStackTrace<true>(soa)).Ptr());
+ result->SetL(soa.Decode<mirror::Object>(self->CreateInternalStackTrace<true>(soa)));
} else {
- result->SetL(soa.Decode<mirror::Object>(self->CreateInternalStackTrace<false>(soa)).Ptr());
+ result->SetL(soa.Decode<mirror::Object>(self->CreateInternalStackTrace<false>(soa)));
}
}
diff --git a/runtime/java_vm_ext.cc b/runtime/java_vm_ext.cc
index c5f95eb..f2bda05 100644
--- a/runtime/java_vm_ext.cc
+++ b/runtime/java_vm_ext.cc
@@ -422,14 +422,14 @@
tracing_enabled_(runtime_options.Exists(RuntimeArgumentMap::JniTrace)
|| VLOG_IS_ON(third_party_jni)),
trace_(runtime_options.GetOrDefault(RuntimeArgumentMap::JniTrace)),
- globals_lock_("JNI global reference table lock"),
globals_(gGlobalsInitial, gGlobalsMax, kGlobal),
libraries_(new Libraries),
unchecked_functions_(&gJniInvokeInterface),
- weak_globals_lock_("JNI weak global reference table lock", kJniWeakGlobalsLock),
weak_globals_(kWeakGlobalsInitial, kWeakGlobalsMax, kWeakGlobal),
allow_accessing_weak_globals_(true),
- weak_globals_add_condition_("weak globals add condition", weak_globals_lock_),
+ weak_globals_add_condition_("weak globals add condition",
+ (CHECK(Locks::jni_weak_globals_lock_ != nullptr),
+ *Locks::jni_weak_globals_lock_)),
env_hooks_() {
functions = unchecked_functions_;
SetCheckJniEnabled(runtime_options.Exists(RuntimeArgumentMap::CheckJni));
@@ -537,7 +537,7 @@
if (obj == nullptr) {
return nullptr;
}
- WriterMutexLock mu(self, globals_lock_);
+ WriterMutexLock mu(self, *Locks::jni_globals_lock_);
IndirectRef ref = globals_.Add(IRT_FIRST_SEGMENT, obj);
return reinterpret_cast<jobject>(ref);
}
@@ -546,7 +546,7 @@
if (obj == nullptr) {
return nullptr;
}
- MutexLock mu(self, weak_globals_lock_);
+ MutexLock mu(self, *Locks::jni_weak_globals_lock_);
while (UNLIKELY(!MayAccessWeakGlobals(self))) {
weak_globals_add_condition_.WaitHoldingLocks(self);
}
@@ -558,7 +558,7 @@
if (obj == nullptr) {
return;
}
- WriterMutexLock mu(self, globals_lock_);
+ WriterMutexLock mu(self, *Locks::jni_globals_lock_);
if (!globals_.Remove(IRT_FIRST_SEGMENT, obj)) {
LOG(WARNING) << "JNI WARNING: DeleteGlobalRef(" << obj << ") "
<< "failed to find entry";
@@ -569,7 +569,7 @@
if (obj == nullptr) {
return;
}
- MutexLock mu(self, weak_globals_lock_);
+ MutexLock mu(self, *Locks::jni_weak_globals_lock_);
if (!weak_globals_.Remove(IRT_FIRST_SEGMENT, obj)) {
LOG(WARNING) << "JNI WARNING: DeleteWeakGlobalRef(" << obj << ") "
<< "failed to find entry";
@@ -597,11 +597,11 @@
}
Thread* self = Thread::Current();
{
- ReaderMutexLock mu(self, globals_lock_);
+ ReaderMutexLock mu(self, *Locks::jni_globals_lock_);
os << "; globals=" << globals_.Capacity();
}
{
- MutexLock mu(self, weak_globals_lock_);
+ MutexLock mu(self, *Locks::jni_weak_globals_lock_);
if (weak_globals_.Capacity() > 0) {
os << " (plus " << weak_globals_.Capacity() << " weak)";
}
@@ -617,7 +617,7 @@
void JavaVMExt::DisallowNewWeakGlobals() {
CHECK(!kUseReadBarrier);
Thread* const self = Thread::Current();
- MutexLock mu(self, weak_globals_lock_);
+ MutexLock mu(self, *Locks::jni_weak_globals_lock_);
// DisallowNewWeakGlobals is only called by CMS during the pause. It is required to have the
// mutator lock exclusively held so that we don't have any threads in the middle of
// DecodeWeakGlobal.
@@ -628,7 +628,7 @@
void JavaVMExt::AllowNewWeakGlobals() {
CHECK(!kUseReadBarrier);
Thread* self = Thread::Current();
- MutexLock mu(self, weak_globals_lock_);
+ MutexLock mu(self, *Locks::jni_weak_globals_lock_);
allow_accessing_weak_globals_.StoreSequentiallyConsistent(true);
weak_globals_add_condition_.Broadcast(self);
}
@@ -636,7 +636,7 @@
void JavaVMExt::BroadcastForNewWeakGlobals() {
CHECK(kUseReadBarrier);
Thread* self = Thread::Current();
- MutexLock mu(self, weak_globals_lock_);
+ MutexLock mu(self, *Locks::jni_weak_globals_lock_);
weak_globals_add_condition_.Broadcast(self);
}
@@ -645,7 +645,7 @@
}
void JavaVMExt::UpdateGlobal(Thread* self, IndirectRef ref, ObjPtr<mirror::Object> result) {
- WriterMutexLock mu(self, globals_lock_);
+ WriterMutexLock mu(self, *Locks::jni_globals_lock_);
globals_.Update(ref, result);
}
@@ -671,13 +671,13 @@
if (LIKELY(MayAccessWeakGlobalsUnlocked(self))) {
return weak_globals_.SynchronizedGet(ref);
}
- MutexLock mu(self, weak_globals_lock_);
+ MutexLock mu(self, *Locks::jni_weak_globals_lock_);
return DecodeWeakGlobalLocked(self, ref);
}
ObjPtr<mirror::Object> JavaVMExt::DecodeWeakGlobalLocked(Thread* self, IndirectRef ref) {
if (kDebugLocking) {
- weak_globals_lock_.AssertHeld(self);
+ Locks::jni_weak_globals_lock_->AssertHeld(self);
}
while (UNLIKELY(!MayAccessWeakGlobals(self))) {
weak_globals_add_condition_.WaitHoldingLocks(self);
@@ -700,7 +700,7 @@
bool JavaVMExt::IsWeakGlobalCleared(Thread* self, IndirectRef ref) {
DCHECK_EQ(GetIndirectRefKind(ref), kWeakGlobal);
- MutexLock mu(self, weak_globals_lock_);
+ MutexLock mu(self, *Locks::jni_weak_globals_lock_);
while (UNLIKELY(!MayAccessWeakGlobals(self))) {
weak_globals_add_condition_.WaitHoldingLocks(self);
}
@@ -712,18 +712,18 @@
}
void JavaVMExt::UpdateWeakGlobal(Thread* self, IndirectRef ref, ObjPtr<mirror::Object> result) {
- MutexLock mu(self, weak_globals_lock_);
+ MutexLock mu(self, *Locks::jni_weak_globals_lock_);
weak_globals_.Update(ref, result);
}
void JavaVMExt::DumpReferenceTables(std::ostream& os) {
Thread* self = Thread::Current();
{
- ReaderMutexLock mu(self, globals_lock_);
+ ReaderMutexLock mu(self, *Locks::jni_globals_lock_);
globals_.Dump(os);
}
{
- MutexLock mu(self, weak_globals_lock_);
+ MutexLock mu(self, *Locks::jni_weak_globals_lock_);
weak_globals_.Dump(os);
}
}
@@ -920,7 +920,7 @@
}
void JavaVMExt::SweepJniWeakGlobals(IsMarkedVisitor* visitor) {
- MutexLock mu(Thread::Current(), weak_globals_lock_);
+ MutexLock mu(Thread::Current(), *Locks::jni_weak_globals_lock_);
Runtime* const runtime = Runtime::Current();
for (auto* entry : weak_globals_) {
// Need to skip null here to distinguish between null entries and cleared weak ref entries.
@@ -937,13 +937,13 @@
}
void JavaVMExt::TrimGlobals() {
- WriterMutexLock mu(Thread::Current(), globals_lock_);
+ WriterMutexLock mu(Thread::Current(), *Locks::jni_globals_lock_);
globals_.Trim();
}
void JavaVMExt::VisitRoots(RootVisitor* visitor) {
Thread* self = Thread::Current();
- ReaderMutexLock mu(self, globals_lock_);
+ ReaderMutexLock mu(self, *Locks::jni_globals_lock_);
globals_.VisitRoots(visitor, RootInfo(kRootJNIGlobal));
// The weak_globals table is visited by the GC itself (because it mutates the table).
}
diff --git a/runtime/java_vm_ext.h b/runtime/java_vm_ext.h
index 2e59a9d..05717f4 100644
--- a/runtime/java_vm_ext.h
+++ b/runtime/java_vm_ext.h
@@ -109,72 +109,81 @@
REQUIRES_SHARED(Locks::mutator_lock_);
void DumpForSigQuit(std::ostream& os)
- REQUIRES(!Locks::jni_libraries_lock_, !globals_lock_, !weak_globals_lock_);
+ REQUIRES(!Locks::jni_libraries_lock_,
+ !Locks::jni_globals_lock_,
+ !Locks::jni_weak_globals_lock_);
void DumpReferenceTables(std::ostream& os)
- REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!globals_lock_, !weak_globals_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(!Locks::jni_globals_lock_, !Locks::jni_weak_globals_lock_);
bool SetCheckJniEnabled(bool enabled);
void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!globals_lock_);
+ REQUIRES(!Locks::jni_globals_lock_);
- void DisallowNewWeakGlobals() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!weak_globals_lock_);
- void AllowNewWeakGlobals() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!weak_globals_lock_);
- void BroadcastForNewWeakGlobals() REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!weak_globals_lock_);
+ void DisallowNewWeakGlobals()
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(!Locks::jni_weak_globals_lock_);
+ void AllowNewWeakGlobals()
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(!Locks::jni_weak_globals_lock_);
+ void BroadcastForNewWeakGlobals()
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(!Locks::jni_weak_globals_lock_);
jobject AddGlobalRef(Thread* self, ObjPtr<mirror::Object> obj)
- REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!globals_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(!Locks::jni_globals_lock_);
jweak AddWeakGlobalRef(Thread* self, ObjPtr<mirror::Object> obj)
- REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!weak_globals_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(!Locks::jni_weak_globals_lock_);
- void DeleteGlobalRef(Thread* self, jobject obj) REQUIRES(!globals_lock_);
+ void DeleteGlobalRef(Thread* self, jobject obj) REQUIRES(!Locks::jni_globals_lock_);
- void DeleteWeakGlobalRef(Thread* self, jweak obj) REQUIRES(!weak_globals_lock_);
+ void DeleteWeakGlobalRef(Thread* self, jweak obj) REQUIRES(!Locks::jni_weak_globals_lock_);
void SweepJniWeakGlobals(IsMarkedVisitor* visitor)
- REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!weak_globals_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(!Locks::jni_weak_globals_lock_);
ObjPtr<mirror::Object> DecodeGlobal(IndirectRef ref)
REQUIRES_SHARED(Locks::mutator_lock_);
void UpdateGlobal(Thread* self, IndirectRef ref, ObjPtr<mirror::Object> result)
- REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!globals_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(!Locks::jni_globals_lock_);
ObjPtr<mirror::Object> DecodeWeakGlobal(Thread* self, IndirectRef ref)
REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!weak_globals_lock_);
+ REQUIRES(!Locks::jni_weak_globals_lock_);
ObjPtr<mirror::Object> DecodeWeakGlobalLocked(Thread* self, IndirectRef ref)
REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(weak_globals_lock_);
+ REQUIRES(Locks::jni_weak_globals_lock_);
// Like DecodeWeakGlobal() but to be used only during a runtime shutdown where self may be
// null.
ObjPtr<mirror::Object> DecodeWeakGlobalDuringShutdown(Thread* self, IndirectRef ref)
REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!weak_globals_lock_);
+ REQUIRES(!Locks::jni_weak_globals_lock_);
// Checks if the weak global ref has been cleared by the GC without decode (read barrier.)
bool IsWeakGlobalCleared(Thread* self, IndirectRef ref)
REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!weak_globals_lock_);
-
- Mutex& WeakGlobalsLock() RETURN_CAPABILITY(weak_globals_lock_) {
- return weak_globals_lock_;
- }
+ REQUIRES(!Locks::jni_weak_globals_lock_);
void UpdateWeakGlobal(Thread* self, IndirectRef ref, ObjPtr<mirror::Object> result)
- REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!weak_globals_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(!Locks::jni_weak_globals_lock_);
const JNIInvokeInterface* GetUncheckedFunctions() const {
return unchecked_functions_;
}
void TrimGlobals() REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!globals_lock_);
+ REQUIRES(!Locks::jni_globals_lock_);
jint HandleGetEnv(/*out*/void** env, jint version);
@@ -187,7 +196,7 @@
bool MayAccessWeakGlobalsUnlocked(Thread* self) const REQUIRES_SHARED(Locks::mutator_lock_);
bool MayAccessWeakGlobals(Thread* self) const
REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(weak_globals_lock_);
+ REQUIRES(Locks::jni_weak_globals_lock_);
Runtime* const runtime_;
@@ -203,8 +212,6 @@
// Extra diagnostics.
const std::string trace_;
- // JNI global references.
- ReaderWriterMutex globals_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
// Not guarded by globals_lock since we sometimes use SynchronizedGet in Thread::DecodeJObject.
IndirectReferenceTable globals_;
@@ -215,8 +222,6 @@
// Used by -Xcheck:jni.
const JNIInvokeInterface* const unchecked_functions_;
- // JNI weak global references.
- Mutex weak_globals_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
// Since weak_globals_ contain weak roots, be careful not to
// directly access the object references in it. Use Get() with the
// read barrier enabled.
@@ -224,7 +229,7 @@
IndirectReferenceTable weak_globals_;
// Not guarded by weak_globals_lock since we may use SynchronizedGet in DecodeWeakGlobal.
Atomic<bool> allow_accessing_weak_globals_;
- ConditionVariable weak_globals_add_condition_ GUARDED_BY(weak_globals_lock_);
+ ConditionVariable weak_globals_add_condition_ GUARDED_BY(Locks::jni_weak_globals_lock_);
// TODO Maybe move this to Runtime.
std::vector<GetEnvHook> env_hooks_;
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index 6828124..8eebe56 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -379,7 +379,7 @@
// Not even a java.lang.reflect.Field, return null. TODO, is this check necessary?
return nullptr;
}
- ObjPtr<mirror::Field> field = down_cast<mirror::Field*>(obj_field.Ptr());
+ ObjPtr<mirror::Field> field = ObjPtr<mirror::Field>::DownCast(obj_field);
return soa.EncodeField(field->GetArtField());
}
diff --git a/runtime/mirror/array-inl.h b/runtime/mirror/array-inl.h
index 7cbcac8..d18781a 100644
--- a/runtime/mirror/array-inl.h
+++ b/runtime/mirror/array-inl.h
@@ -25,6 +25,7 @@
#include "base/stringprintf.h"
#include "class-inl.h"
#include "gc/heap-inl.h"
+#include "obj_ptr-inl.h"
#include "thread.h"
namespace art {
diff --git a/runtime/mirror/array.cc b/runtime/mirror/array.cc
index aee48cc..1aa38dd 100644
--- a/runtime/mirror/array.cc
+++ b/runtime/mirror/array.cc
@@ -121,7 +121,7 @@
art::ThrowArrayIndexOutOfBoundsException(index, GetLength());
}
-void Array::ThrowArrayStoreException(Object* object) {
+void Array::ThrowArrayStoreException(ObjPtr<Object> object) {
art::ThrowArrayStoreException(object->GetClass(), this->GetClass());
}
diff --git a/runtime/mirror/array.h b/runtime/mirror/array.h
index 6c82eb9..04d02f7 100644
--- a/runtime/mirror/array.h
+++ b/runtime/mirror/array.h
@@ -20,6 +20,7 @@
#include "base/enums.h"
#include "gc_root.h"
#include "gc/allocator_type.h"
+#include "obj_ptr.h"
#include "object.h"
#include "object_callbacks.h"
@@ -89,7 +90,7 @@
REQUIRES(!Roles::uninterruptible_);
protected:
- void ThrowArrayStoreException(Object* object) REQUIRES_SHARED(Locks::mutator_lock_)
+ void ThrowArrayStoreException(ObjPtr<Object> object) REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
private:
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index 62c583b..14bd243 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -655,7 +655,7 @@
return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(Class, name_));
}
-inline void Class::SetName(String* name) {
+inline void Class::SetName(ObjPtr<String> name) {
if (Runtime::Current()->IsActiveTransaction()) {
SetFieldObject<true>(OFFSET_OF_OBJECT_MEMBER(Class, name_), name);
} else {
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index 12ce014..a5b61fd 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -347,7 +347,7 @@
}
String* GetName() REQUIRES_SHARED(Locks::mutator_lock_); // Returns the cached name.
- void SetName(String* name) REQUIRES_SHARED(Locks::mutator_lock_); // Sets the cached name.
+ void SetName(ObjPtr<String> name) REQUIRES_SHARED(Locks::mutator_lock_); // Sets the cached name.
// Computes the name, then sets the cached value.
static String* ComputeName(Handle<Class> h_this) REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
diff --git a/runtime/mirror/field-inl.h b/runtime/mirror/field-inl.h
index 940e824..adc5107 100644
--- a/runtime/mirror/field-inl.h
+++ b/runtime/mirror/field-inl.h
@@ -48,7 +48,7 @@
self->ClearException();
}
}
- auto ret = hs.NewHandle(static_cast<Field*>(StaticClass()->AllocObject(self).Ptr()));
+ auto ret = hs.NewHandle(ObjPtr<Field>::DownCast(StaticClass()->AllocObject(self)));
if (UNLIKELY(ret.Get() == nullptr)) {
self->AssertPendingOOMException();
return nullptr;
@@ -80,7 +80,7 @@
template<bool kTransactionActive>
void Field::SetDeclaringClass(ObjPtr<mirror::Class> c) {
- SetFieldObject<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(Field, declaring_class_), c.Ptr());
+ SetFieldObject<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(Field, declaring_class_), c);
}
} // namespace mirror
diff --git a/runtime/mirror/object_array-inl.h b/runtime/mirror/object_array-inl.h
index d5bc256..3c2390b 100644
--- a/runtime/mirror/object_array-inl.h
+++ b/runtime/mirror/object_array-inl.h
@@ -25,6 +25,7 @@
#include "base/stringprintf.h"
#include "gc/heap.h"
#include "mirror/class.h"
+#include "obj_ptr-inl.h"
#include "runtime.h"
#include "handle_scope-inl.h"
#include "thread.h"
@@ -34,24 +35,29 @@
namespace mirror {
template<class T>
-inline ObjectArray<T>* ObjectArray<T>::Alloc(Thread* self, Class* object_array_class,
+inline ObjectArray<T>* ObjectArray<T>::Alloc(Thread* self,
+ ObjPtr<Class> object_array_class,
int32_t length, gc::AllocatorType allocator_type) {
- Array* array = Array::Alloc<true>(self, object_array_class, length,
- ComponentSizeShiftWidth(sizeof(HeapReference<Object>)),
+ Array* array = Array::Alloc<true>(self,
+ object_array_class.Ptr(),
+ length,
+ ComponentSizeShiftWidth(kHeapReferenceSize),
allocator_type);
if (UNLIKELY(array == nullptr)) {
return nullptr;
- } else {
- DCHECK_EQ(array->GetClass()->GetComponentSizeShift(),
- ComponentSizeShiftWidth(sizeof(HeapReference<Object>)));
- return array->AsObjectArray<T>();
}
+ DCHECK_EQ(array->GetClass()->GetComponentSizeShift(),
+ ComponentSizeShiftWidth(kHeapReferenceSize));
+ return array->AsObjectArray<T>();
}
template<class T>
-inline ObjectArray<T>* ObjectArray<T>::Alloc(Thread* self, Class* object_array_class,
+inline ObjectArray<T>* ObjectArray<T>::Alloc(Thread* self,
+ ObjPtr<Class> object_array_class,
int32_t length) {
- return Alloc(self, object_array_class, length,
+ return Alloc(self,
+ object_array_class,
+ length,
Runtime::Current()->GetHeap()->GetCurrentAllocator());
}
@@ -65,7 +71,7 @@
}
template<class T> template<VerifyObjectFlags kVerifyFlags>
-inline bool ObjectArray<T>::CheckAssignable(T* object) {
+inline bool ObjectArray<T>::CheckAssignable(ObjPtr<T> object) {
if (object != nullptr) {
Class* element_class = GetClass<kVerifyFlags>()->GetComponentType();
if (UNLIKELY(!object->InstanceOf(element_class))) {
@@ -77,7 +83,7 @@
}
template<class T>
-inline void ObjectArray<T>::Set(int32_t i, T* object) {
+inline void ObjectArray<T>::Set(int32_t i, ObjPtr<T> object) {
if (Runtime::Current()->IsActiveTransaction()) {
Set<true>(i, object);
} else {
@@ -87,7 +93,7 @@
template<class T>
template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
-inline void ObjectArray<T>::Set(int32_t i, T* object) {
+inline void ObjectArray<T>::Set(int32_t i, ObjPtr<T> object) {
if (CheckIsValidIndex(i) && CheckAssignable<kVerifyFlags>(object)) {
SetFieldObject<kTransactionActive, kCheckTransaction, kVerifyFlags>(OffsetOfElement(i), object);
} else {
@@ -97,7 +103,7 @@
template<class T>
template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
-inline void ObjectArray<T>::SetWithoutChecks(int32_t i, T* object) {
+inline void ObjectArray<T>::SetWithoutChecks(int32_t i, ObjPtr<T> object) {
DCHECK(CheckIsValidIndex<kVerifyFlags>(i));
DCHECK(CheckAssignable<static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis)>(object));
SetFieldObject<kTransactionActive, kCheckTransaction, kVerifyFlags>(OffsetOfElement(i), object);
@@ -105,7 +111,7 @@
template<class T>
template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
-inline void ObjectArray<T>::SetWithoutChecksAndWriteBarrier(int32_t i, T* object) {
+inline void ObjectArray<T>::SetWithoutChecksAndWriteBarrier(int32_t i, ObjPtr<T> object) {
DCHECK(CheckIsValidIndex<kVerifyFlags>(i));
// TODO: enable this check. It fails when writing the image in ImageWriter::FixupObjectArray.
// DCHECK(CheckAssignable(object));
@@ -120,8 +126,10 @@
}
template<class T>
-inline void ObjectArray<T>::AssignableMemmove(int32_t dst_pos, ObjectArray<T>* src,
- int32_t src_pos, int32_t count) {
+inline void ObjectArray<T>::AssignableMemmove(int32_t dst_pos,
+ ObjPtr<ObjectArray<T>> src,
+ int32_t src_pos,
+ int32_t count) {
if (kIsDebugBuild) {
for (int i = 0; i < count; ++i) {
// The get will perform the VerifyObject.
@@ -160,8 +168,10 @@
}
template<class T>
-inline void ObjectArray<T>::AssignableMemcpy(int32_t dst_pos, ObjectArray<T>* src,
- int32_t src_pos, int32_t count) {
+inline void ObjectArray<T>::AssignableMemcpy(int32_t dst_pos,
+ ObjPtr<ObjectArray<T>> src,
+ int32_t src_pos,
+ int32_t count) {
if (kIsDebugBuild) {
for (int i = 0; i < count; ++i) {
// The get will perform the VerifyObject.
@@ -190,8 +200,10 @@
template<class T>
template<bool kTransactionActive>
-inline void ObjectArray<T>::AssignableCheckingMemcpy(int32_t dst_pos, ObjectArray<T>* src,
- int32_t src_pos, int32_t count,
+inline void ObjectArray<T>::AssignableCheckingMemcpy(int32_t dst_pos,
+ ObjPtr<ObjectArray<T>> src,
+ int32_t src_pos,
+ int32_t count,
bool throw_exception) {
DCHECK_NE(this, src)
<< "This case should be handled with memmove that handles overlaps correctly";
@@ -258,8 +270,7 @@
template<class T>
inline MemberOffset ObjectArray<T>::OffsetOfElement(int32_t i) {
- return MemberOffset(DataOffset(sizeof(HeapReference<Object>)).Int32Value() +
- (i * sizeof(HeapReference<Object>)));
+ return MemberOffset(DataOffset(kHeapReferenceSize).Int32Value() + (i * kHeapReferenceSize));
}
template<class T> template<typename Visitor>
diff --git a/runtime/mirror/object_array.h b/runtime/mirror/object_array.h
index 19b9d87..e4e954e 100644
--- a/runtime/mirror/object_array.h
+++ b/runtime/mirror/object_array.h
@@ -18,6 +18,7 @@
#define ART_RUNTIME_MIRROR_OBJECT_ARRAY_H_
#include "array.h"
+#include "obj_ptr.h"
namespace art {
namespace mirror {
@@ -30,11 +31,15 @@
return Array::ClassSize(pointer_size);
}
- static ObjectArray<T>* Alloc(Thread* self, Class* object_array_class, int32_t length,
+ static ObjectArray<T>* Alloc(Thread* self,
+ ObjPtr<Class> object_array_class,
+ int32_t length,
gc::AllocatorType allocator_type)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
- static ObjectArray<T>* Alloc(Thread* self, Class* object_array_class, int32_t length)
+ static ObjectArray<T>* Alloc(Thread* self,
+ ObjPtr<Class> object_array_class,
+ int32_t length)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
@@ -45,13 +50,13 @@
// an ArrayStoreException and returns false.
// TODO fix thread safety analysis: should be REQUIRES_SHARED(Locks::mutator_lock_).
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool CheckAssignable(T* object) NO_THREAD_SAFETY_ANALYSIS;
+ bool CheckAssignable(ObjPtr<T> object) NO_THREAD_SAFETY_ANALYSIS;
- ALWAYS_INLINE void Set(int32_t i, T* object) REQUIRES_SHARED(Locks::mutator_lock_);
+ ALWAYS_INLINE void Set(int32_t i, ObjPtr<T> object) REQUIRES_SHARED(Locks::mutator_lock_);
// TODO fix thread safety analysis: should be REQUIRES_SHARED(Locks::mutator_lock_).
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ALWAYS_INLINE void Set(int32_t i, T* object) NO_THREAD_SAFETY_ANALYSIS;
+ ALWAYS_INLINE void Set(int32_t i, ObjPtr<T> object) NO_THREAD_SAFETY_ANALYSIS;
// Set element without bound and element type checks, to be used in limited
// circumstances, such as during boot image writing.
@@ -59,32 +64,42 @@
// REQUIRES_SHARED(Locks::mutator_lock_).
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ALWAYS_INLINE void SetWithoutChecks(int32_t i, T* object) NO_THREAD_SAFETY_ANALYSIS;
+ ALWAYS_INLINE void SetWithoutChecks(int32_t i, ObjPtr<T> object) NO_THREAD_SAFETY_ANALYSIS;
// TODO fix thread safety analysis broken by the use of template. This should be
// REQUIRES_SHARED(Locks::mutator_lock_).
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ALWAYS_INLINE void SetWithoutChecksAndWriteBarrier(int32_t i, T* object)
+ ALWAYS_INLINE void SetWithoutChecksAndWriteBarrier(int32_t i, ObjPtr<T> object)
NO_THREAD_SAFETY_ANALYSIS;
ALWAYS_INLINE T* GetWithoutChecks(int32_t i) REQUIRES_SHARED(Locks::mutator_lock_);
// Copy src into this array (dealing with overlaps as memmove does) without assignability checks.
- void AssignableMemmove(int32_t dst_pos, ObjectArray<T>* src, int32_t src_pos,
- int32_t count) REQUIRES_SHARED(Locks::mutator_lock_);
+ void AssignableMemmove(int32_t dst_pos,
+ ObjPtr<ObjectArray<T>> src,
+ int32_t src_pos,
+ int32_t count)
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Copy src into this array assuming no overlap and without assignability checks.
- void AssignableMemcpy(int32_t dst_pos, ObjectArray<T>* src, int32_t src_pos,
- int32_t count) REQUIRES_SHARED(Locks::mutator_lock_);
+ void AssignableMemcpy(int32_t dst_pos,
+ ObjPtr<ObjectArray<T>> src,
+ int32_t src_pos,
+ int32_t count)
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Copy src into this array with assignability checks.
template<bool kTransactionActive>
- void AssignableCheckingMemcpy(int32_t dst_pos, ObjectArray<T>* src, int32_t src_pos,
- int32_t count, bool throw_exception)
+ void AssignableCheckingMemcpy(int32_t dst_pos,
+ ObjPtr<ObjectArray<T>> src,
+ int32_t src_pos,
+ int32_t count,
+ bool throw_exception)
REQUIRES_SHARED(Locks::mutator_lock_);
ObjectArray<T>* CopyOf(Thread* self, int32_t new_length)
- REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(!Roles::uninterruptible_);
static MemberOffset OffsetOfElement(int32_t i);
diff --git a/runtime/mirror/object_reference-inl.h b/runtime/mirror/object_reference-inl.h
index 60955d6..e70b936 100644
--- a/runtime/mirror/object_reference-inl.h
+++ b/runtime/mirror/object_reference-inl.h
@@ -24,7 +24,11 @@
namespace art {
namespace mirror {
-// References between objects within the managed heap.
+template<bool kPoisonReferences, class MirrorType>
+void ObjectReference<kPoisonReferences, MirrorType>::Assign(ObjPtr<MirrorType> ptr) {
+ Assign(ptr.Ptr());
+}
+
template<class MirrorType>
HeapReference<MirrorType> HeapReference<MirrorType>::FromObjPtr(ObjPtr<MirrorType> ptr) {
return HeapReference<MirrorType>(ptr.Ptr());
diff --git a/runtime/mirror/object_reference.h b/runtime/mirror/object_reference.h
index 573cb30..71f34c6 100644
--- a/runtime/mirror/object_reference.h
+++ b/runtime/mirror/object_reference.h
@@ -42,6 +42,9 @@
reference_ = Compress(other);
}
+ void Assign(ObjPtr<MirrorType> ptr)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
void Clear() {
reference_ = 0;
DCHECK(IsNull());
diff --git a/runtime/mirror/reference-inl.h b/runtime/mirror/reference-inl.h
index 039989b..a449b41 100644
--- a/runtime/mirror/reference-inl.h
+++ b/runtime/mirror/reference-inl.h
@@ -19,6 +19,8 @@
#include "reference.h"
+#include "obj_ptr-inl.h"
+
namespace art {
namespace mirror {
@@ -27,6 +29,24 @@
return Class::ComputeClassSize(false, vtable_entries, 2, 0, 0, 0, 0, pointer_size);
}
+template<bool kTransactionActive>
+inline void Reference::SetReferent(ObjPtr<Object> referent) {
+ SetFieldObjectVolatile<kTransactionActive>(ReferentOffset(), referent);
+}
+
+inline void Reference::SetPendingNext(ObjPtr<Reference> pending_next) {
+ if (Runtime::Current()->IsActiveTransaction()) {
+ SetFieldObject<true>(PendingNextOffset(), pending_next);
+ } else {
+ SetFieldObject<false>(PendingNextOffset(), pending_next);
+ }
+}
+
+template<bool kTransactionActive>
+inline void FinalizerReference::SetZombie(ObjPtr<Object> zombie) {
+ return SetFieldObjectVolatile<kTransactionActive>(ZombieOffset(), zombie);
+}
+
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/reference.cc b/runtime/mirror/reference.cc
index 3c7f8c8..1d0b4c5 100644
--- a/runtime/mirror/reference.cc
+++ b/runtime/mirror/reference.cc
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-#include "reference.h"
+#include "reference-inl.h"
#include "art_method.h"
#include "gc_root-inl.h"
@@ -24,7 +24,7 @@
GcRoot<Class> Reference::java_lang_ref_Reference_;
-void Reference::SetClass(Class* java_lang_ref_Reference) {
+void Reference::SetClass(ObjPtr<Class> java_lang_ref_Reference) {
CHECK(java_lang_ref_Reference_.IsNull());
CHECK(java_lang_ref_Reference != nullptr);
java_lang_ref_Reference_ = GcRoot<Class>(java_lang_ref_Reference);
diff --git a/runtime/mirror/reference.h b/runtime/mirror/reference.h
index 6a8b32b..f2fa589 100644
--- a/runtime/mirror/reference.h
+++ b/runtime/mirror/reference.h
@@ -20,6 +20,7 @@
#include "base/enums.h"
#include "class.h"
#include "gc_root.h"
+#include "obj_ptr.h"
#include "object.h"
#include "object_callbacks.h"
#include "read_barrier_option.h"
@@ -69,9 +70,7 @@
ReferentOffset());
}
template<bool kTransactionActive>
- void SetReferent(Object* referent) REQUIRES_SHARED(Locks::mutator_lock_) {
- SetFieldObjectVolatile<kTransactionActive>(ReferentOffset(), referent);
- }
+ void SetReferent(ObjPtr<Object> referent) REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive>
void ClearReferent() REQUIRES_SHARED(Locks::mutator_lock_) {
SetFieldObjectVolatile<kTransactionActive>(ReferentOffset(), nullptr);
@@ -82,14 +81,7 @@
return GetFieldObject<Reference, kDefaultVerifyFlags, kReadBarrierOption>(PendingNextOffset());
}
- void SetPendingNext(Reference* pending_next)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- if (Runtime::Current()->IsActiveTransaction()) {
- SetFieldObject<true>(PendingNextOffset(), pending_next);
- } else {
- SetFieldObject<false>(PendingNextOffset(), pending_next);
- }
- }
+ void SetPendingNext(ObjPtr<Reference> pending_next) REQUIRES_SHARED(Locks::mutator_lock_);
// Returns true if the reference's pendingNext is null, indicating it is
// okay to process this reference.
@@ -112,7 +104,7 @@
DCHECK(!java_lang_ref_Reference_.IsNull());
return java_lang_ref_Reference_.Read<kReadBarrierOption>();
}
- static void SetClass(Class* klass);
+ static void SetClass(ObjPtr<Class> klass);
static void ResetClass();
static void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
@@ -144,9 +136,8 @@
}
template<bool kTransactionActive>
- void SetZombie(Object* zombie) REQUIRES_SHARED(Locks::mutator_lock_) {
- return SetFieldObjectVolatile<kTransactionActive>(ZombieOffset(), zombie);
- }
+ void SetZombie(ObjPtr<Object> zombie) REQUIRES_SHARED(Locks::mutator_lock_);
+
Object* GetZombie() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetFieldObjectVolatile<Object>(ZombieOffset());
}
diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc
index 7563161..ceb37c4 100644
--- a/runtime/native/java_lang_Class.cc
+++ b/runtime/native/java_lang_Class.cc
@@ -354,8 +354,8 @@
ObjPtr<mirror::Constructor> result =
mirror::Class::GetDeclaredConstructorInternal<kRuntimePointerSize, false>(
soa.Self(),
- DecodeClass(soa, javaThis).Ptr(),
- soa.Decode<mirror::ObjectArray<mirror::Class>>(args).Ptr());
+ DecodeClass(soa, javaThis),
+ soa.Decode<mirror::ObjectArray<mirror::Class>>(args));
return soa.AddLocalReference<jobject>(result);
}
diff --git a/runtime/native/java_lang_ref_Reference.cc b/runtime/native/java_lang_ref_Reference.cc
index 95f6d51..bedca10 100644
--- a/runtime/native/java_lang_ref_Reference.cc
+++ b/runtime/native/java_lang_ref_Reference.cc
@@ -28,8 +28,8 @@
static jobject Reference_getReferent(JNIEnv* env, jobject javaThis) {
ScopedFastNativeObjectAccess soa(env);
ObjPtr<mirror::Reference> ref = soa.Decode<mirror::Reference>(javaThis);
- mirror::Object* const referent =
- Runtime::Current()->GetHeap()->GetReferenceProcessor()->GetReferent(soa.Self(), ref.Ptr());
+ ObjPtr<mirror::Object> const referent =
+ Runtime::Current()->GetHeap()->GetReferenceProcessor()->GetReferent(soa.Self(), ref);
return soa.AddLocalReference<jobject>(referent);
}
diff --git a/runtime/native/java_lang_reflect_Executable.cc b/runtime/native/java_lang_reflect_Executable.cc
index e317c25..a0a6a12 100644
--- a/runtime/native/java_lang_reflect_Executable.cc
+++ b/runtime/native/java_lang_reflect_Executable.cc
@@ -38,7 +38,7 @@
ObjPtr<mirror::Class> annotation_array_class =
soa.Decode<mirror::Class>(WellKnownClasses::java_lang_annotation_Annotation__array);
ObjPtr<mirror::ObjectArray<mirror::Object>> empty_array =
- mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(), annotation_array_class.Ptr(), 0);
+ mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(), annotation_array_class, 0);
return soa.AddLocalReference<jobjectArray>(empty_array);
}
return soa.AddLocalReference<jobjectArray>(annotations::GetAnnotationsForMethod(method));
diff --git a/runtime/native/sun_misc_Unsafe.cc b/runtime/native/sun_misc_Unsafe.cc
index 0bdb5a4..670c4ac 100644
--- a/runtime/native/sun_misc_Unsafe.cc
+++ b/runtime/native/sun_misc_Unsafe.cc
@@ -71,8 +71,8 @@
field_addr);
}
bool success = obj->CasFieldStrongSequentiallyConsistentObject<false>(MemberOffset(offset),
- expectedValue.Ptr(),
- newValue.Ptr());
+ expectedValue,
+ newValue);
return success ? JNI_TRUE : JNI_FALSE;
}
@@ -168,7 +168,7 @@
ObjPtr<mirror::Object> obj = soa.Decode<mirror::Object>(javaObj);
ObjPtr<mirror::Object> newValue = soa.Decode<mirror::Object>(javaNewValue);
// JNI must use non transactional mode.
- obj->SetFieldObject<false>(MemberOffset(offset), newValue.Ptr());
+ obj->SetFieldObject<false>(MemberOffset(offset), newValue);
}
static void Unsafe_putObjectVolatile(JNIEnv* env, jobject, jobject javaObj, jlong offset,
@@ -177,7 +177,7 @@
ObjPtr<mirror::Object> obj = soa.Decode<mirror::Object>(javaObj);
ObjPtr<mirror::Object> newValue = soa.Decode<mirror::Object>(javaNewValue);
// JNI must use non transactional mode.
- obj->SetFieldObjectVolatile<false>(MemberOffset(offset), newValue.Ptr());
+ obj->SetFieldObjectVolatile<false>(MemberOffset(offset), newValue);
}
static void Unsafe_putOrderedObject(JNIEnv* env, jobject, jobject javaObj, jlong offset,
@@ -187,7 +187,7 @@
ObjPtr<mirror::Object> newValue = soa.Decode<mirror::Object>(javaNewValue);
QuasiAtomic::ThreadFenceRelease();
// JNI must use non transactional mode.
- obj->SetFieldObject<false>(MemberOffset(offset), newValue.Ptr());
+ obj->SetFieldObject<false>(MemberOffset(offset), newValue);
}
static jint Unsafe_getArrayBaseOffsetForComponentType(JNIEnv* env, jclass, jobject component_class) {
diff --git a/runtime/openjdkjvmti/OpenjdkJvmTi.cc b/runtime/openjdkjvmti/OpenjdkJvmTi.cc
index 05da585..ac8d5e1 100644
--- a/runtime/openjdkjvmti/OpenjdkJvmTi.cc
+++ b/runtime/openjdkjvmti/OpenjdkJvmTi.cc
@@ -476,7 +476,8 @@
}
static jvmtiError GetLoadedClasses(jvmtiEnv* env, jint* class_count_ptr, jclass** classes_ptr) {
- return ERR(NOT_IMPLEMENTED);
+ HeapUtil heap_util(&gObjectTagTable);
+ return heap_util.GetLoadedClasses(env, class_count_ptr, classes_ptr);
}
static jvmtiError GetClassLoaderClasses(jvmtiEnv* env,
diff --git a/runtime/openjdkjvmti/heap.cc b/runtime/openjdkjvmti/heap.cc
index 95d9a1d..859941c 100644
--- a/runtime/openjdkjvmti/heap.cc
+++ b/runtime/openjdkjvmti/heap.cc
@@ -19,7 +19,10 @@
#include "art_jvmti.h"
#include "base/macros.h"
#include "base/mutex.h"
+#include "class_linker.h"
#include "gc/heap.h"
+#include "java_vm_ext.h"
+#include "jni_env_ext.h"
#include "mirror/class.h"
#include "object_callbacks.h"
#include "object_tagging.h"
@@ -163,4 +166,49 @@
return ERR(NONE);
}
+jvmtiError HeapUtil::GetLoadedClasses(jvmtiEnv* env,
+ jint* class_count_ptr,
+ jclass** classes_ptr) {
+ if (class_count_ptr == nullptr || classes_ptr == nullptr) {
+ return ERR(NULL_POINTER);
+ }
+
+ class ReportClassVisitor : public art::ClassVisitor {
+ public:
+ explicit ReportClassVisitor(art::Thread* self) : self_(self) {}
+
+ bool operator()(art::mirror::Class* klass) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ art::JNIEnvExt* jni_env = self_->GetJniEnv();
+ classes_.push_back(reinterpret_cast<jclass>(jni_env->vm->AddGlobalRef(self_, klass)));
+ return true;
+ }
+
+ art::Thread* self_;
+ std::vector<jclass> classes_;
+ };
+
+ art::Thread* self = art::Thread::Current();
+ ReportClassVisitor rcv(self);
+ {
+ art::ScopedObjectAccess soa(self);
+ art::Runtime::Current()->GetClassLinker()->VisitClasses(&rcv);
+ }
+
+ size_t size = rcv.classes_.size();
+ jclass* classes = nullptr;
+ jvmtiError alloc_ret = env->Allocate(static_cast<jlong>(size * sizeof(jclass)),
+ reinterpret_cast<unsigned char**>(&classes));
+ if (alloc_ret != ERR(NONE)) {
+ return alloc_ret;
+ }
+
+ for (size_t i = 0; i < size; ++i) {
+ classes[i] = rcv.classes_[i];
+ }
+ *classes_ptr = classes;
+ *class_count_ptr = static_cast<jint>(size);
+
+ return ERR(NONE);
+}
+
} // namespace openjdkjvmti
diff --git a/runtime/openjdkjvmti/heap.h b/runtime/openjdkjvmti/heap.h
index fb9a216..b6becb9 100644
--- a/runtime/openjdkjvmti/heap.h
+++ b/runtime/openjdkjvmti/heap.h
@@ -28,6 +28,8 @@
explicit HeapUtil(ObjectTagTable* tags) : tags_(tags) {
}
+ jvmtiError GetLoadedClasses(jvmtiEnv* env, jint* class_count_ptr, jclass** classes_ptr);
+
jvmtiError IterateThroughHeap(jvmtiEnv* env,
jint heap_filter,
jclass klass,
diff --git a/runtime/reflection.cc b/runtime/reflection.cc
index 098eb03..72db827 100644
--- a/runtime/reflection.cc
+++ b/runtime/reflection.cc
@@ -772,7 +772,7 @@
}
return false;
}
- unboxed_value->SetL(o.Ptr());
+ unboxed_value->SetL(o);
return true;
}
if (UNLIKELY(dst_class->GetPrimitiveType() == Primitive::kPrimVoid)) {
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 9c0d2db..fdd8c4a 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -1165,10 +1165,6 @@
ScopedTrace trace2("AddImageStringsToTable");
GetInternTable()->AddImagesStringsToTable(heap_->GetBootImageSpaces());
}
- {
- ScopedTrace trace2("MoveImageClassesToClassTable");
- GetClassLinker()->AddBootImageClassesToClassTable();
- }
} else {
std::vector<std::string> dex_filenames;
Split(boot_class_path_string_, ':', &dex_filenames);
diff --git a/runtime/scoped_thread_state_change-inl.h b/runtime/scoped_thread_state_change-inl.h
index 1ebfd30..bde23c8 100644
--- a/runtime/scoped_thread_state_change-inl.h
+++ b/runtime/scoped_thread_state_change-inl.h
@@ -83,7 +83,7 @@
inline ObjPtr<T, kPoison> ScopedObjectAccessAlreadyRunnable::Decode(jobject obj) const {
Locks::mutator_lock_->AssertSharedHeld(Self());
DCHECK(IsRunnable()); // Don't work with raw objects in non-runnable states.
- return down_cast<T*>(Self()->DecodeJObject(obj).Ptr());
+ return ObjPtr<T, kPoison>::DownCast(Self()->DecodeJObject(obj));
}
inline ArtField* ScopedObjectAccessAlreadyRunnable::DecodeField(jfieldID fid) const {
diff --git a/test/907-get-loaded-classes/build b/test/907-get-loaded-classes/build
new file mode 100755
index 0000000..898e2e5
--- /dev/null
+++ b/test/907-get-loaded-classes/build
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-build "$@" --experimental agents
diff --git a/test/907-get-loaded-classes/expected.txt b/test/907-get-loaded-classes/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/907-get-loaded-classes/expected.txt
diff --git a/test/907-get-loaded-classes/get_loaded_classes.cc b/test/907-get-loaded-classes/get_loaded_classes.cc
new file mode 100644
index 0000000..e752bcb
--- /dev/null
+++ b/test/907-get-loaded-classes/get_loaded_classes.cc
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "get_loaded_classes.h"
+
+#include <iostream>
+#include <pthread.h>
+#include <stdio.h>
+#include <vector>
+
+#include "base/macros.h"
+#include "jni.h"
+#include "openjdkjvmti/jvmti.h"
+#include "ScopedLocalRef.h"
+#include "ScopedUtfChars.h"
+
+#include "ti-agent/common_load.h"
+
+namespace art {
+namespace Test907GetLoadedClasses {
+
+static jstring GetClassName(JNIEnv* jni_env, jclass cls) {
+ ScopedLocalRef<jclass> class_class(jni_env, jni_env->GetObjectClass(cls));
+ jmethodID mid = jni_env->GetMethodID(class_class.get(), "getName", "()Ljava/lang/String;");
+ return reinterpret_cast<jstring>(jni_env->CallObjectMethod(cls, mid));
+}
+
+extern "C" JNIEXPORT jobjectArray JNICALL Java_Main_getLoadedClasses(
+ JNIEnv* env, jclass klass ATTRIBUTE_UNUSED) {
+ jint count = -1;
+ jclass* classes = nullptr;
+ jvmtiError result = jvmti_env->GetLoadedClasses(&count, &classes);
+ if (result != JVMTI_ERROR_NONE) {
+ char* err;
+ jvmti_env->GetErrorName(result, &err);
+ printf("Failure running GetLoadedClasses: %s\n", err);
+ return nullptr;
+ }
+
+ ScopedLocalRef<jclass> obj_class(env, env->FindClass("java/lang/String"));
+ if (obj_class.get() == nullptr) {
+ return nullptr;
+ }
+
+ jobjectArray ret = env->NewObjectArray(count, obj_class.get(), nullptr);
+ if (ret == nullptr) {
+ return ret;
+ }
+
+ for (size_t i = 0; i < static_cast<size_t>(count); ++i) {
+ jstring class_name = GetClassName(env, classes[i]);
+ env->SetObjectArrayElement(ret, static_cast<jint>(i), class_name);
+ env->DeleteLocalRef(class_name);
+ }
+
+ // Need to:
+ // 1) Free the local references.
+ // 2) Deallocate.
+ for (size_t i = 0; i < static_cast<size_t>(count); ++i) {
+ env->DeleteGlobalRef(classes[i]);
+ }
+ jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(classes));
+
+ return ret;
+}
+
+} // namespace Test907GetLoadedClasses
+} // namespace art
diff --git a/test/907-get-loaded-classes/get_loaded_classes.h b/test/907-get-loaded-classes/get_loaded_classes.h
new file mode 100644
index 0000000..4d27f89
--- /dev/null
+++ b/test/907-get-loaded-classes/get_loaded_classes.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_TEST_907_GET_LOADED_CLASSES_GET_LOADED_CLASSES_H_
+#define ART_TEST_907_GET_LOADED_CLASSES_GET_LOADED_CLASSES_H_
+
+#include <jni.h>
+
+namespace art {
+namespace Test907GetLoadedClasses {
+
+jint OnLoad(JavaVM* vm, char* options, void* reserved);
+
+} // namespace Test907GetLoadedClasses
+} // namespace art
+
+#endif // ART_TEST_907_GET_LOADED_CLASSES_GET_LOADED_CLASSES_H_
diff --git a/test/907-get-loaded-classes/info.txt b/test/907-get-loaded-classes/info.txt
new file mode 100644
index 0000000..875a5f6
--- /dev/null
+++ b/test/907-get-loaded-classes/info.txt
@@ -0,0 +1 @@
+Tests basic functions in the jvmti plugin.
diff --git a/test/907-get-loaded-classes/run b/test/907-get-loaded-classes/run
new file mode 100755
index 0000000..3e135a3
--- /dev/null
+++ b/test/907-get-loaded-classes/run
@@ -0,0 +1,43 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+plugin=libopenjdkjvmtid.so
+agent=libtiagentd.so
+lib=tiagentd
+if [[ "$@" == *"-O"* ]]; then
+ agent=libtiagent.so
+ plugin=libopenjdkjvmti.so
+ lib=tiagent
+fi
+
+if [[ "$@" == *"--jvm"* ]]; then
+ arg="jvm"
+else
+ arg="art"
+fi
+
+if [[ "$@" != *"--debuggable"* ]]; then
+ other_args=" -Xcompiler-option --debuggable "
+else
+ other_args=""
+fi
+
+./default-run "$@" --experimental agents \
+ --experimental runtime-plugins \
+ --runtime-option -agentpath:${agent}=906-iterate-heap,${arg} \
+ --android-runtime-option -Xplugin:${plugin} \
+ ${other_args} \
+ --args ${lib}
diff --git a/test/907-get-loaded-classes/src/Main.java b/test/907-get-loaded-classes/src/Main.java
new file mode 100644
index 0000000..468d037
--- /dev/null
+++ b/test/907-get-loaded-classes/src/Main.java
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+
+public class Main {
+ public static void main(String[] args) throws Exception {
+ System.loadLibrary(args[1]);
+
+ doTest();
+ }
+
+ public static void doTest() throws Exception {
+ // Ensure some classes are loaded.
+ A a = new A();
+ B b = new B();
+ A[] aArray = new A[5];
+
+ String[] classes = getLoadedClasses();
+ HashSet<String> classesSet = new HashSet<>(Arrays.asList(classes));
+
+ String[] shouldBeLoaded = new String[] {
+ "java.lang.Object", "java.lang.Class", "java.lang.String", "Main$A", "Main$B", "[LMain$A;"
+ };
+
+ boolean error = false;
+ for (String s : shouldBeLoaded) {
+ if (!classesSet.contains(s)) {
+ System.out.println("Did not find " + s);
+ error = true;
+ }
+ }
+
+ if (error) {
+ System.out.println(Arrays.toString(classes));
+ }
+ }
+
+ static class A {
+ }
+
+ static class B {
+ }
+
+ private static native String[] getLoadedClasses();
+}
diff --git a/test/Android.bp b/test/Android.bp
index 45673f5..8496ffd 100644
--- a/test/Android.bp
+++ b/test/Android.bp
@@ -249,6 +249,7 @@
"904-object-allocation/tracking.cc",
"905-object-free/tracking_free.cc",
"906-iterate-heap/iterate_heap.cc",
+ "907-get-loaded-classes/get_loaded_classes.cc",
],
shared_libs: [
"libbase",
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 64ff5ba..7a5dab0 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -263,7 +263,7 @@
# 147-stripped-dex-fallback isn't supported on device because --strip-dex
# requires the zip command.
# 569-checker-pattern-replacement tests behaviour present only on host.
-# 90{2,3,4,5,6} are not supported in current form due to linker
+# 90{2,3,4,5,6,7} are not supported in current form due to linker
# restrictions. See b/31681198
TEST_ART_BROKEN_TARGET_TESTS := \
147-stripped-dex-fallback \
@@ -273,6 +273,7 @@
904-object-allocation \
905-object-free \
906-iterate-heap \
+ 907-get-loaded-classes \
ifneq (,$(filter target,$(TARGET_TYPES)))
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,target,$(RUN_TYPES),$(PREBUILD_TYPES), \