Replace NULL with nullptr
Also fixed some lines that were too long, and a few other minor
details.
Change-Id: I6efba5fb6e03eb5d0a300fddb2a75bf8e2f175cb
diff --git a/runtime/base/hex_dump.cc b/runtime/base/hex_dump.cc
index 5423ff0..bce6b53 100644
--- a/runtime/base/hex_dump.cc
+++ b/runtime/base/hex_dump.cc
@@ -27,7 +27,7 @@
return;
}
- if (address_ == NULL) {
+ if (address_ == nullptr) {
os << "00000000:";
return;
}
diff --git a/runtime/base/logging.cc b/runtime/base/logging.cc
index 0764b87..0ae7863 100644
--- a/runtime/base/logging.cc
+++ b/runtime/base/logging.cc
@@ -91,7 +91,7 @@
gProgramInvocationShortName.reset(new std::string((last_slash != nullptr) ? last_slash + 1
: argv[0]));
} else {
- // TODO: fall back to /proc/self/cmdline when argv is NULL on Linux.
+ // TODO: fall back to /proc/self/cmdline when argv is null on Linux.
gCmdLine.reset(new std::string("<unset>"));
}
const char* tags = getenv("ANDROID_LOG_TAGS");
diff --git a/runtime/base/logging.h b/runtime/base/logging.h
index 014f4ab..8b34374 100644
--- a/runtime/base/logging.h
+++ b/runtime/base/logging.h
@@ -72,7 +72,7 @@
// This can be used to reveal or conceal logs with specific tags.
extern void InitLogging(char* argv[]);
-// Returns the command line used to invoke the current tool or nullptr if InitLogging hasn't been
+// Returns the command line used to invoke the current tool or null if InitLogging hasn't been
// performed.
extern const char* GetCmdLine();
diff --git a/runtime/base/mutex-inl.h b/runtime/base/mutex-inl.h
index cb69817..a727992 100644
--- a/runtime/base/mutex-inl.h
+++ b/runtime/base/mutex-inl.h
@@ -39,13 +39,14 @@
namespace art {
#if ART_USE_FUTEXES
-static inline int futex(volatile int *uaddr, int op, int val, const struct timespec *timeout, volatile int *uaddr2, int val3) {
+static inline int futex(volatile int *uaddr, int op, int val, const struct timespec *timeout,
+ volatile int *uaddr2, int val3) {
return syscall(SYS_futex, uaddr, op, val, timeout, uaddr2, val3);
}
#endif // ART_USE_FUTEXES
static inline uint64_t SafeGetTid(const Thread* self) {
- if (self != NULL) {
+ if (self != nullptr) {
return static_cast<uint64_t>(self->GetTid());
} else {
return static_cast<uint64_t>(GetTid());
@@ -77,7 +78,7 @@
}
inline void BaseMutex::RegisterAsLocked(Thread* self) {
- if (UNLIKELY(self == NULL)) {
+ if (UNLIKELY(self == nullptr)) {
CheckUnattachedThread(level_);
return;
}
@@ -86,7 +87,7 @@
bool bad_mutexes_held = false;
for (int i = level_; i >= 0; --i) {
BaseMutex* held_mutex = self->GetHeldMutex(static_cast<LockLevel>(i));
- if (UNLIKELY(held_mutex != NULL)) {
+ if (UNLIKELY(held_mutex != nullptr)) {
LOG(ERROR) << "Lock level violation: holding \"" << held_mutex->name_ << "\" "
<< "(level " << LockLevel(i) << " - " << i
<< ") while locking \"" << name_ << "\" "
@@ -109,7 +110,7 @@
}
inline void BaseMutex::RegisterAsUnlocked(Thread* self) {
- if (UNLIKELY(self == NULL)) {
+ if (UNLIKELY(self == nullptr)) {
CheckUnattachedThread(level_);
return;
}
@@ -117,12 +118,12 @@
if (kDebugLocking && gAborting == 0) { // Avoid recursive aborts.
CHECK(self->GetHeldMutex(level_) == this) << "Unlocking on unacquired mutex: " << name_;
}
- self->SetHeldMutex(level_, NULL);
+ self->SetHeldMutex(level_, nullptr);
}
}
inline void ReaderWriterMutex::SharedLock(Thread* self) {
- DCHECK(self == NULL || self == Thread::Current());
+ DCHECK(self == nullptr || self == Thread::Current());
#if ART_USE_FUTEXES
bool done = false;
do {
@@ -143,7 +144,7 @@
}
inline void ReaderWriterMutex::SharedUnlock(Thread* self) {
- DCHECK(self == NULL || self == Thread::Current());
+ DCHECK(self == nullptr || self == Thread::Current());
DCHECK(exclusive_owner_ == 0U || exclusive_owner_ == -1U);
AssertSharedHeld(self);
RegisterAsUnlocked(self);
@@ -161,7 +162,7 @@
if (num_pending_writers_.LoadRelaxed() > 0 ||
num_pending_readers_.LoadRelaxed() > 0) {
// Wake any exclusive waiters as there are now no readers.
- futex(state_.Address(), FUTEX_WAKE, -1, NULL, NULL, 0);
+ futex(state_.Address(), FUTEX_WAKE, -1, nullptr, nullptr, 0);
}
}
} else {
@@ -174,11 +175,11 @@
}
inline bool Mutex::IsExclusiveHeld(const Thread* self) const {
- DCHECK(self == NULL || self == Thread::Current());
+ DCHECK(self == nullptr || self == Thread::Current());
bool result = (GetExclusiveOwnerTid() == SafeGetTid(self));
if (kDebugLocking) {
// Sanity debug check that if we think it is locked we have it in our held mutexes.
- if (result && self != NULL && level_ != kMonitorLock && !gAborting) {
+ if (result && self != nullptr && level_ != kMonitorLock && !gAborting) {
CHECK_EQ(self->GetHeldMutex(level_), this);
}
}
@@ -190,11 +191,11 @@
}
inline bool ReaderWriterMutex::IsExclusiveHeld(const Thread* self) const {
- DCHECK(self == NULL || self == Thread::Current());
+ DCHECK(self == nullptr || self == Thread::Current());
bool result = (GetExclusiveOwnerTid() == SafeGetTid(self));
if (kDebugLocking) {
// Sanity that if the pthread thinks we own the lock the Thread agrees.
- if (self != NULL && result) {
+ if (self != nullptr && result) {
CHECK_EQ(self->GetHeldMutex(level_), this);
}
}
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index 13dcb8c..99c7246 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -67,7 +67,7 @@
Atomic<const BaseMutex*> all_mutexes_guard;
// All created mutexes guarded by all_mutexes_guard_.
std::set<BaseMutex*>* all_mutexes;
- AllMutexData() : all_mutexes(NULL) {}
+ AllMutexData() : all_mutexes(nullptr) {}
};
static struct AllMutexData gAllMutexData[kAllMutexDataSize];
@@ -114,7 +114,7 @@
class ScopedContentionRecorder FINAL : public ValueObject {
public:
ScopedContentionRecorder(BaseMutex* mutex, uint64_t blocked_tid, uint64_t owner_tid)
- : mutex_(kLogLockContentions ? mutex : NULL),
+ : mutex_(kLogLockContentions ? mutex : nullptr),
blocked_tid_(kLogLockContentions ? blocked_tid : 0),
owner_tid_(kLogLockContentions ? owner_tid : 0),
start_nano_time_(kLogLockContentions ? NanoTime() : 0) {
@@ -144,7 +144,7 @@
if (kLogLockContentions) {
ScopedAllMutexesLock mu(this);
std::set<BaseMutex*>** all_mutexes_ptr = &gAllMutexData->all_mutexes;
- if (*all_mutexes_ptr == NULL) {
+ if (*all_mutexes_ptr == nullptr) {
// We leak the global set of all mutexes to avoid ordering issues in global variable
// construction/destruction.
*all_mutexes_ptr = new std::set<BaseMutex*>();
@@ -165,7 +165,7 @@
os << "Mutex logging:\n";
ScopedAllMutexesLock mu(reinterpret_cast<const BaseMutex*>(-1));
std::set<BaseMutex*>* all_mutexes = gAllMutexData->all_mutexes;
- if (all_mutexes == NULL) {
+ if (all_mutexes == nullptr) {
// No mutexes have been created yet during at startup.
return;
}
@@ -190,7 +190,7 @@
}
void BaseMutex::CheckSafeToWait(Thread* self) {
- if (self == NULL) {
+ if (self == nullptr) {
CheckUnattachedThread(level_);
return;
}
@@ -202,7 +202,7 @@
if (i != level_) {
BaseMutex* held_mutex = self->GetHeldMutex(static_cast<LockLevel>(i));
// We expect waits to happen while holding the thread list suspend thread lock.
- if (held_mutex != NULL) {
+ if (held_mutex != nullptr) {
LOG(ERROR) << "Holding \"" << held_mutex->name_ << "\" "
<< "(level " << LockLevel(i) << ") while performing wait on "
<< "\"" << name_ << "\" (level " << level_ << ")";
@@ -354,7 +354,7 @@
}
void Mutex::ExclusiveLock(Thread* self) {
- DCHECK(self == NULL || self == Thread::Current());
+ DCHECK(self == nullptr || self == Thread::Current());
if (kDebugLocking && !recursive_) {
AssertNotHeld(self);
}
@@ -370,7 +370,7 @@
// Failed to acquire, hang up.
ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
num_contenders_++;
- if (futex(state_.Address(), FUTEX_WAIT, 1, NULL, NULL, 0) != 0) {
+ if (futex(state_.Address(), FUTEX_WAIT, 1, nullptr, nullptr, 0) != 0) {
// EAGAIN and EINTR both indicate a spurious failure, try again from the beginning.
// We don't use TEMP_FAILURE_RETRY so we can intentionally retry to acquire the lock.
if ((errno != EAGAIN) && (errno != EINTR)) {
@@ -397,7 +397,7 @@
}
bool Mutex::ExclusiveTryLock(Thread* self) {
- DCHECK(self == NULL || self == Thread::Current());
+ DCHECK(self == nullptr || self == Thread::Current());
if (kDebugLocking && !recursive_) {
AssertNotHeld(self);
}
@@ -474,7 +474,7 @@
if (LIKELY(done)) { // Spurious fail?
// Wake a contender.
if (UNLIKELY(num_contenders_.LoadRelaxed() > 0)) {
- futex(state_.Address(), FUTEX_WAKE, 1, NULL, NULL, 0);
+ futex(state_.Address(), FUTEX_WAKE, 1, nullptr, nullptr, 0);
}
}
} else {
@@ -537,14 +537,14 @@
// TODO: should we just not log at all if shutting down? this could be the logging mutex!
MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
Runtime* runtime = Runtime::Current();
- bool shutting_down = runtime == NULL || runtime->IsShuttingDownLocked();
+ bool shutting_down = runtime == nullptr || runtime->IsShuttingDownLocked();
PLOG(shutting_down ? WARNING : FATAL) << "pthread_rwlock_destroy failed for " << name_;
}
#endif
}
void ReaderWriterMutex::ExclusiveLock(Thread* self) {
- DCHECK(self == NULL || self == Thread::Current());
+ DCHECK(self == nullptr || self == Thread::Current());
AssertNotExclusiveHeld(self);
#if ART_USE_FUTEXES
bool done = false;
@@ -557,7 +557,7 @@
// Failed to acquire, hang up.
ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
++num_pending_writers_;
- if (futex(state_.Address(), FUTEX_WAIT, cur_state, NULL, NULL, 0) != 0) {
+ if (futex(state_.Address(), FUTEX_WAIT, cur_state, nullptr, nullptr, 0) != 0) {
// EAGAIN and EINTR both indicate a spurious failure, try again from the beginning.
// We don't use TEMP_FAILURE_RETRY so we can intentionally retry to acquire the lock.
if ((errno != EAGAIN) && (errno != EINTR)) {
@@ -578,7 +578,7 @@
}
void ReaderWriterMutex::ExclusiveUnlock(Thread* self) {
- DCHECK(self == NULL || self == Thread::Current());
+ DCHECK(self == nullptr || self == Thread::Current());
AssertExclusiveHeld(self);
RegisterAsUnlocked(self);
DCHECK_NE(exclusive_owner_, 0U);
@@ -598,7 +598,7 @@
// Wake any waiters.
if (UNLIKELY(num_pending_readers_.LoadRelaxed() > 0 ||
num_pending_writers_.LoadRelaxed() > 0)) {
- futex(state_.Address(), FUTEX_WAKE, -1, NULL, NULL, 0);
+ futex(state_.Address(), FUTEX_WAKE, -1, nullptr, nullptr, 0);
}
}
} else {
@@ -613,7 +613,7 @@
#if HAVE_TIMED_RWLOCK
bool ReaderWriterMutex::ExclusiveLockWithTimeout(Thread* self, int64_t ms, int32_t ns) {
- DCHECK(self == NULL || self == Thread::Current());
+ DCHECK(self == nullptr || self == Thread::Current());
#if ART_USE_FUTEXES
bool done = false;
timespec end_abs_ts;
@@ -633,7 +633,7 @@
}
ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
++num_pending_writers_;
- if (futex(state_.Address(), FUTEX_WAIT, cur_state, &rel_ts, NULL, 0) != 0) {
+ if (futex(state_.Address(), FUTEX_WAIT, cur_state, &rel_ts, nullptr, 0) != 0) {
if (errno == ETIMEDOUT) {
--num_pending_writers_;
return false; // Timed out.
@@ -671,7 +671,7 @@
// Owner holds it exclusively, hang up.
ScopedContentionRecorder scr(this, GetExclusiveOwnerTid(), SafeGetTid(self));
++num_pending_readers_;
- if (futex(state_.Address(), FUTEX_WAIT, cur_state, NULL, NULL, 0) != 0) {
+ if (futex(state_.Address(), FUTEX_WAIT, cur_state, nullptr, nullptr, 0) != 0) {
if (errno != EAGAIN) {
PLOG(FATAL) << "futex wait failed for " << name_;
}
@@ -681,7 +681,7 @@
#endif
bool ReaderWriterMutex::SharedTryLock(Thread* self) {
- DCHECK(self == NULL || self == Thread::Current());
+ DCHECK(self == nullptr || self == Thread::Current());
#if ART_USE_FUTEXES
bool done = false;
do {
@@ -710,9 +710,9 @@
}
bool ReaderWriterMutex::IsSharedHeld(const Thread* self) const {
- DCHECK(self == NULL || self == Thread::Current());
+ DCHECK(self == nullptr || self == Thread::Current());
bool result;
- if (UNLIKELY(self == NULL)) { // Handle unattached threads.
+ if (UNLIKELY(self == nullptr)) { // Handle unattached threads.
result = IsExclusiveHeld(self); // TODO: a better best effort here.
} else {
result = (self->GetHeldMutex(level_) == this);
@@ -770,14 +770,14 @@
errno = rc;
MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
Runtime* runtime = Runtime::Current();
- bool shutting_down = (runtime == NULL) || runtime->IsShuttingDownLocked();
+ bool shutting_down = (runtime == nullptr) || runtime->IsShuttingDownLocked();
PLOG(shutting_down ? WARNING : FATAL) << "pthread_cond_destroy failed for " << name_;
}
#endif
}
void ConditionVariable::Broadcast(Thread* self) {
- DCHECK(self == NULL || self == Thread::Current());
+ DCHECK(self == nullptr || self == Thread::Current());
// TODO: enable below, there's a race in thread creation that causes false failures currently.
// guard_.AssertExclusiveHeld(self);
DCHECK_EQ(guard_.GetExclusiveOwnerTid(), SafeGetTid(self));
@@ -805,14 +805,14 @@
}
void ConditionVariable::Signal(Thread* self) {
- DCHECK(self == NULL || self == Thread::Current());
+ DCHECK(self == nullptr || self == Thread::Current());
guard_.AssertExclusiveHeld(self);
#if ART_USE_FUTEXES
if (num_waiters_ > 0) {
sequence_++; // Indicate a signal occurred.
// Futex wake 1 waiter who will then come and in contend on mutex. It'd be nice to requeue them
// to avoid this, however, requeueing can only move all waiters.
- int num_woken = futex(sequence_.Address(), FUTEX_WAKE, 1, NULL, NULL, 0);
+ int num_woken = futex(sequence_.Address(), FUTEX_WAKE, 1, nullptr, nullptr, 0);
// Check something was woken or else we changed sequence_ before they had chance to wait.
CHECK((num_woken == 0) || (num_woken == 1));
}
@@ -827,7 +827,7 @@
}
void ConditionVariable::WaitHoldingLocks(Thread* self) {
- DCHECK(self == NULL || self == Thread::Current());
+ DCHECK(self == nullptr || self == Thread::Current());
guard_.AssertExclusiveHeld(self);
unsigned int old_recursion_count = guard_.recursion_count_;
#if ART_USE_FUTEXES
@@ -837,7 +837,7 @@
guard_.recursion_count_ = 1;
int32_t cur_sequence = sequence_.LoadRelaxed();
guard_.ExclusiveUnlock(self);
- if (futex(sequence_.Address(), FUTEX_WAIT, cur_sequence, NULL, NULL, 0) != 0) {
+ if (futex(sequence_.Address(), FUTEX_WAIT, cur_sequence, nullptr, nullptr, 0) != 0) {
// Futex failed, check it is an expected error.
// EAGAIN == EWOULDBLK, so we let the caller try again.
// EINTR implies a signal was sent to this thread.
@@ -862,7 +862,7 @@
}
bool ConditionVariable::TimedWait(Thread* self, int64_t ms, int32_t ns) {
- DCHECK(self == NULL || self == Thread::Current());
+ DCHECK(self == nullptr || self == Thread::Current());
bool timed_out = false;
guard_.AssertExclusiveHeld(self);
guard_.CheckSafeToWait(self);
@@ -876,7 +876,7 @@
guard_.recursion_count_ = 1;
int32_t cur_sequence = sequence_.LoadRelaxed();
guard_.ExclusiveUnlock(self);
- if (futex(sequence_.Address(), FUTEX_WAIT, cur_sequence, &rel_ts, NULL, 0) != 0) {
+ if (futex(sequence_.Address(), FUTEX_WAIT, cur_sequence, &rel_ts, nullptr, 0) != 0) {
if (errno == ETIMEDOUT) {
// Timed out we're done.
timed_out = true;
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index 6e4b96c..f2be85e 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -344,8 +344,8 @@
// Assert the current thread has shared access to the ReaderWriterMutex.
void AssertSharedHeld(const Thread* self) {
if (kDebugLocking && (gAborting == 0)) {
- // TODO: we can only assert this well when self != NULL.
- CHECK(IsSharedHeld(self) || self == NULL) << *this;
+ // TODO: we can only assert this well when self != null.
+ CHECK(IsSharedHeld(self) || self == nullptr) << *this;
}
}
void AssertReaderHeld(const Thread* self) { AssertSharedHeld(self); }
diff --git a/runtime/base/mutex_test.cc b/runtime/base/mutex_test.cc
index 289d3ef..3750c81 100644
--- a/runtime/base/mutex_test.cc
+++ b/runtime/base/mutex_test.cc
@@ -106,7 +106,7 @@
state->mu.Lock(Thread::Current());
state->cv.Signal(Thread::Current());
state->mu.Unlock(Thread::Current());
- return NULL;
+ return nullptr;
}
Mutex mu;
@@ -120,14 +120,15 @@
state.mu.Lock(Thread::Current());
pthread_t pthread;
- int pthread_create_result = pthread_create(&pthread, NULL, RecursiveLockWait::Callback, &state);
+ int pthread_create_result = pthread_create(&pthread, nullptr, RecursiveLockWait::Callback,
+ &state);
ASSERT_EQ(0, pthread_create_result);
state.cv.Wait(Thread::Current());
state.mu.Unlock(Thread::Current());
state.mu.Unlock(Thread::Current());
- EXPECT_EQ(pthread_join(pthread, NULL), 0);
+ EXPECT_EQ(pthread_join(pthread, nullptr), 0);
}
// This ensures we don't hang when waiting on a recursively locked mutex,
diff --git a/runtime/base/scoped_flock.cc b/runtime/base/scoped_flock.cc
index 0e93eee..71e0590 100644
--- a/runtime/base/scoped_flock.cc
+++ b/runtime/base/scoped_flock.cc
@@ -31,7 +31,7 @@
UNUSED(file_->FlushCloseOrErase()); // Ignore result.
}
file_.reset(OS::OpenFileWithFlags(filename, O_CREAT | O_RDWR));
- if (file_.get() == NULL) {
+ if (file_.get() == nullptr) {
*error_msg = StringPrintf("Failed to open file '%s': %s", filename, strerror(errno));
return false;
}
@@ -71,14 +71,15 @@
}
if (0 != TEMP_FAILURE_RETRY(flock(file_->Fd(), LOCK_EX))) {
file_.reset();
- *error_msg = StringPrintf("Failed to lock file '%s': %s", file->GetPath().c_str(), strerror(errno));
+ *error_msg = StringPrintf(
+ "Failed to lock file '%s': %s", file->GetPath().c_str(), strerror(errno));
return false;
}
return true;
}
File* ScopedFlock::GetFile() {
- CHECK(file_.get() != NULL);
+ CHECK(file_.get() != nullptr);
return file_.get();
}
@@ -89,7 +90,7 @@
ScopedFlock::ScopedFlock() { }
ScopedFlock::~ScopedFlock() {
- if (file_.get() != NULL) {
+ if (file_.get() != nullptr) {
int flock_result = TEMP_FAILURE_RETRY(flock(file_->Fd(), LOCK_UN));
CHECK_EQ(0, flock_result);
if (file_->FlushCloseOrErase() != 0) {
diff --git a/runtime/base/stl_util.h b/runtime/base/stl_util.h
index 3c5565c..901f25f 100644
--- a/runtime/base/stl_util.h
+++ b/runtime/base/stl_util.h
@@ -54,28 +54,30 @@
// hash_set, or any other STL container which defines sensible begin(), end(),
// and clear() methods.
//
-// If container is NULL, this function is a no-op.
+// If container is null, this function is a no-op.
//
// As an alternative to calling STLDeleteElements() directly, consider
// using a container of std::unique_ptr, which ensures that your container's
// elements are deleted when the container goes out of scope.
template <class T>
void STLDeleteElements(T *container) {
- if (!container) return;
- STLDeleteContainerPointers(container->begin(), container->end());
- container->clear();
+ if (container != nullptr) {
+ STLDeleteContainerPointers(container->begin(), container->end());
+ container->clear();
+ }
}
// Given an STL container consisting of (key, value) pairs, STLDeleteValues
// deletes all the "value" components and clears the container. Does nothing
-// in the case it's given a NULL pointer.
+// in the case it's given a null pointer.
template <class T>
void STLDeleteValues(T *v) {
- if (!v) return;
- for (typename T::iterator i = v->begin(); i != v->end(); ++i) {
- delete i->second;
+ if (v != nullptr) {
+ for (typename T::iterator i = v->begin(); i != v->end(); ++i) {
+ delete i->second;
+ }
+ v->clear();
}
- v->clear();
}
template <class T>
diff --git a/runtime/base/variant_map.h b/runtime/base/variant_map.h
index 8655a9e..1d7596a 100644
--- a/runtime/base/variant_map.h
+++ b/runtime/base/variant_map.h
@@ -31,7 +31,7 @@
//
// struct VariantMap {
// template <typename TValue>
-// TValue* Get(Key<T> key); // nullptr if the value was never set, otherwise the value.
+// TValue* Get(Key<T> key); // null if the value was never set, otherwise the value.
//
// template <typename TValue>
// void Set(Key<T> key, TValue value);
diff --git a/runtime/base/variant_map_test.cc b/runtime/base/variant_map_test.cc
index f306a48..ccb22eb 100644
--- a/runtime/base/variant_map_test.cc
+++ b/runtime/base/variant_map_test.cc
@@ -18,7 +18,7 @@
#include "gtest/gtest.h"
#define EXPECT_NULL(expected) EXPECT_EQ(reinterpret_cast<const void*>(expected), \
- reinterpret_cast<void*>(NULL));
+ static_cast<void*>(nullptr));
namespace art {