blob: d1c81e3abaf432c4c5c9592e0a05a22a5e29f5d0 [file] [log] [blame]
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -08001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_GC_ALLOCATOR_ROSALLOC_INL_H_
18#define ART_RUNTIME_GC_ALLOCATOR_ROSALLOC_INL_H_
19
20#include "rosalloc.h"
21
22namespace art {
23namespace gc {
24namespace allocator {
25
Andreas Gamped7576322014-10-24 22:13:45 -070026inline ALWAYS_INLINE bool RosAlloc::ShouldCheckZeroMemory() {
Evgenii Stepanov1e133742015-05-20 12:30:59 -070027 return kCheckZeroMemory && !is_running_on_memory_tool_;
Andreas Gamped7576322014-10-24 22:13:45 -070028}
29
Mathieu Chartier0651d412014-04-29 14:37:57 -070030template<bool kThreadSafe>
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070031inline ALWAYS_INLINE void* RosAlloc::Alloc(Thread* self, size_t size, size_t* bytes_allocated,
32 size_t* usable_size,
33 size_t* bytes_tl_bulk_allocated) {
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -080034 if (UNLIKELY(size > kLargeSizeThreshold)) {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070035 return AllocLargeObject(self, size, bytes_allocated, usable_size,
36 bytes_tl_bulk_allocated);
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -080037 }
Mathieu Chartier0651d412014-04-29 14:37:57 -070038 void* m;
39 if (kThreadSafe) {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070040 m = AllocFromRun(self, size, bytes_allocated, usable_size, bytes_tl_bulk_allocated);
Mathieu Chartier0651d412014-04-29 14:37:57 -070041 } else {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070042 m = AllocFromRunThreadUnsafe(self, size, bytes_allocated, usable_size,
43 bytes_tl_bulk_allocated);
Mathieu Chartier0651d412014-04-29 14:37:57 -070044 }
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -080045 // Check if the returned memory is really all zero.
Andreas Gamped7576322014-10-24 22:13:45 -070046 if (ShouldCheckZeroMemory() && m != nullptr) {
Ian Rogers13735952014-10-08 12:43:28 -070047 uint8_t* bytes = reinterpret_cast<uint8_t*>(m);
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -080048 for (size_t i = 0; i < size; ++i) {
49 DCHECK_EQ(bytes[i], 0);
50 }
51 }
52 return m;
53}
54
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070055inline bool RosAlloc::Run::IsFull() {
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -070056 return free_list_.Size() == 0;
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070057}
58
59inline bool RosAlloc::CanAllocFromThreadLocalRun(Thread* self, size_t size) {
60 if (UNLIKELY(!IsSizeForThreadLocal(size))) {
61 return false;
62 }
63 size_t bracket_size;
64 size_t idx = SizeToIndexAndBracketSize(size, &bracket_size);
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070065 DCHECK_LT(idx, kNumThreadLocalSizeBrackets);
66 Run* thread_local_run = reinterpret_cast<Run*>(self->GetRosAllocRun(idx));
67 if (kIsDebugBuild) {
68 // Need the lock to prevent race conditions.
69 MutexLock mu(self, *size_bracket_locks_[idx]);
70 CHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
71 CHECK(full_runs_[idx].find(thread_local_run) == full_runs_[idx].end());
72 }
73 DCHECK(thread_local_run != nullptr);
74 DCHECK(thread_local_run->IsThreadLocal() || thread_local_run == dedicated_full_run_);
75 return !thread_local_run->IsFull();
76}
77
78inline void* RosAlloc::AllocFromThreadLocalRun(Thread* self, size_t size,
79 size_t* bytes_allocated) {
80 DCHECK(bytes_allocated != nullptr);
81 if (UNLIKELY(!IsSizeForThreadLocal(size))) {
82 return nullptr;
83 }
84 size_t bracket_size;
85 size_t idx = SizeToIndexAndBracketSize(size, &bracket_size);
86 Run* thread_local_run = reinterpret_cast<Run*>(self->GetRosAllocRun(idx));
87 if (kIsDebugBuild) {
88 // Need the lock to prevent race conditions.
89 MutexLock mu(self, *size_bracket_locks_[idx]);
90 CHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
91 CHECK(full_runs_[idx].find(thread_local_run) == full_runs_[idx].end());
92 }
93 DCHECK(thread_local_run != nullptr);
94 DCHECK(thread_local_run->IsThreadLocal() || thread_local_run == dedicated_full_run_);
95 void* slot_addr = thread_local_run->AllocSlot();
96 if (LIKELY(slot_addr != nullptr)) {
97 *bytes_allocated = bracket_size;
98 }
99 return slot_addr;
100}
101
102inline size_t RosAlloc::MaxBytesBulkAllocatedFor(size_t size) {
103 if (UNLIKELY(!IsSizeForThreadLocal(size))) {
104 return size;
105 }
106 size_t bracket_size;
107 size_t idx = SizeToIndexAndBracketSize(size, &bracket_size);
108 return numOfSlots[idx] * bracket_size;
109}
110
111inline void* RosAlloc::Run::AllocSlot() {
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700112 Slot* slot = free_list_.Remove();
113 if (kTraceRosAlloc && slot != nullptr) {
114 const uint8_t idx = size_bracket_idx_;
115 LOG(INFO) << "RosAlloc::Run::AllocSlot() : " << slot
116 << ", bracket_size=" << std::dec << bracketSizes[idx]
117 << ", slot_idx=" << SlotIndex(slot);
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700118 }
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700119 return slot;
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700120}
121
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800122} // namespace allocator
123} // namespace gc
124} // namespace art
125
126#endif // ART_RUNTIME_GC_ALLOCATOR_ROSALLOC_INL_H_