blob: d6f32282cd40bf4ec48d4fdb6033df3d54b69362 [file] [log] [blame]
Mathieu Chartierd8195f12012-10-05 12:21:28 -07001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Brian Carlstromfc0e3212013-07-17 14:40:12 -070017#ifndef ART_RUNTIME_GC_ACCOUNTING_ATOMIC_STACK_H_
18#define ART_RUNTIME_GC_ACCOUNTING_ATOMIC_STACK_H_
Mathieu Chartierd8195f12012-10-05 12:21:28 -070019
20#include <string>
21
Ian Rogersef7d42f2014-01-06 12:55:46 -080022#include "atomic.h"
Elliott Hughes07ed66b2012-12-12 18:34:25 -080023#include "base/logging.h"
Elliott Hughes76160052012-12-12 16:31:20 -080024#include "base/macros.h"
Mathieu Chartierd8195f12012-10-05 12:21:28 -070025#include "UniquePtr.h"
Mathieu Chartierd8195f12012-10-05 12:21:28 -070026#include "mem_map.h"
27#include "utils.h"
28
29namespace art {
Ian Rogers1d54e732013-05-02 21:10:01 -070030namespace gc {
31namespace accounting {
Mathieu Chartierd8195f12012-10-05 12:21:28 -070032
33template <typename T>
34class AtomicStack {
35 public:
36 // Capacity is how many elements we can store in the stack.
37 static AtomicStack* Create(const std::string& name, size_t capacity) {
38 UniquePtr<AtomicStack> mark_stack(new AtomicStack(name, capacity));
39 mark_stack->Init();
40 return mark_stack.release();
41 }
42
Ian Rogers1d54e732013-05-02 21:10:01 -070043 ~AtomicStack() {}
Mathieu Chartierd8195f12012-10-05 12:21:28 -070044
45 void Reset() {
46 DCHECK(mem_map_.get() != NULL);
47 DCHECK(begin_ != NULL);
48 front_index_ = 0;
49 back_index_ = 0;
Mathieu Chartierf082d3c2013-07-29 17:04:07 -070050 debug_is_sorted_ = true;
Mathieu Chartierd8195f12012-10-05 12:21:28 -070051 int result = madvise(begin_, sizeof(T) * capacity_, MADV_DONTNEED);
52 if (result == -1) {
53 PLOG(WARNING) << "madvise failed";
54 }
55 }
56
57 // Beware: Mixing atomic pushes and atomic pops will cause ABA problem.
58
59 // Returns false if we overflowed the stack.
60 bool AtomicPushBack(const T& value) {
Mathieu Chartierf082d3c2013-07-29 17:04:07 -070061 if (kIsDebugBuild) {
62 debug_is_sorted_ = false;
63 }
Ian Rogers56edc432013-01-18 16:51:51 -080064 int32_t index;
65 do {
66 index = back_index_;
67 if (UNLIKELY(static_cast<size_t>(index) >= capacity_)) {
68 // Stack overflow.
69 return false;
70 }
Ian Rogersb122a4b2013-11-19 18:00:50 -080071 } while (!back_index_.CompareAndSwap(index, index + 1));
Mathieu Chartierd8195f12012-10-05 12:21:28 -070072 begin_[index] = value;
73 return true;
74 }
75
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -080076 // Atomically bump the back index by the given number of
77 // slots. Returns false if we overflowed the stack.
78 bool AtomicBumpBack(size_t num_slots, T** start_address, T** end_address) {
79 if (kIsDebugBuild) {
80 debug_is_sorted_ = false;
81 }
82 int32_t index;
83 int32_t new_index;
84 do {
85 index = back_index_;
86 new_index = index + num_slots;
87 if (UNLIKELY(static_cast<size_t>(new_index) >= capacity_)) {
88 // Stack overflow.
89 return false;
90 }
91 } while (!back_index_.CompareAndSwap(index, new_index));
92 *start_address = &begin_[index];
93 *end_address = &begin_[new_index];
94 if (kIsDebugBuild) {
95 // Sanity check that the memory is zero.
96 for (int32_t i = index; i < new_index; ++i) {
97 DCHECK_EQ(begin_[i], static_cast<T>(0)) << "i=" << i << " index=" << index << " new_index=" << new_index;
98 }
99 }
100 return true;
101 }
102
103 void AssertAllZero() {
104 if (kIsDebugBuild) {
105 for (size_t i = 0; i < capacity_; ++i) {
106 DCHECK_EQ(begin_[i], static_cast<T>(0)) << "i=" << i;
107 }
108 }
109 }
110
Mathieu Chartierd8195f12012-10-05 12:21:28 -0700111 void PushBack(const T& value) {
Mathieu Chartierf082d3c2013-07-29 17:04:07 -0700112 if (kIsDebugBuild) {
113 debug_is_sorted_ = false;
114 }
Mathieu Chartierd8195f12012-10-05 12:21:28 -0700115 int32_t index = back_index_;
116 DCHECK_LT(static_cast<size_t>(index), capacity_);
117 back_index_ = index + 1;
118 begin_[index] = value;
119 }
120
121 T PopBack() {
122 DCHECK_GT(back_index_, front_index_);
123 // Decrement the back index non atomically.
124 back_index_ = back_index_ - 1;
125 return begin_[back_index_];
126 }
127
Mathieu Chartierd8195f12012-10-05 12:21:28 -0700128 // Take an item from the front of the stack.
129 T PopFront() {
130 int32_t index = front_index_;
Ian Rogersb122a4b2013-11-19 18:00:50 -0800131 DCHECK_LT(index, back_index_.Load());
Mathieu Chartierd8195f12012-10-05 12:21:28 -0700132 front_index_ = front_index_ + 1;
133 return begin_[index];
134 }
135
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700136 // Pop a number of elements.
137 void PopBackCount(int32_t n) {
138 DCHECK_GE(Size(), static_cast<size_t>(n));
Ian Rogersb122a4b2013-11-19 18:00:50 -0800139 back_index_.FetchAndSub(n);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700140 }
141
Mathieu Chartierd8195f12012-10-05 12:21:28 -0700142 bool IsEmpty() const {
143 return Size() == 0;
144 }
145
146 size_t Size() const {
147 DCHECK_LE(front_index_, back_index_);
148 return back_index_ - front_index_;
149 }
150
Ian Rogers1d54e732013-05-02 21:10:01 -0700151 T* Begin() const {
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700152 return const_cast<T*>(begin_ + front_index_);
Mathieu Chartierd8195f12012-10-05 12:21:28 -0700153 }
154
Ian Rogers1d54e732013-05-02 21:10:01 -0700155 T* End() const {
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700156 return const_cast<T*>(begin_ + back_index_);
Mathieu Chartierd8195f12012-10-05 12:21:28 -0700157 }
158
159 size_t Capacity() const {
160 return capacity_;
161 }
162
163 // Will clear the stack.
164 void Resize(size_t new_capacity) {
165 capacity_ = new_capacity;
166 Init();
167 }
168
Ian Rogers1d54e732013-05-02 21:10:01 -0700169 void Sort() {
Ian Rogersb122a4b2013-11-19 18:00:50 -0800170 int32_t start_back_index = back_index_.Load();
171 int32_t start_front_index = front_index_.Load();
Mathieu Chartierf082d3c2013-07-29 17:04:07 -0700172 std::sort(Begin(), End());
Ian Rogersb122a4b2013-11-19 18:00:50 -0800173 CHECK_EQ(start_back_index, back_index_.Load());
174 CHECK_EQ(start_front_index, front_index_.Load());
Mathieu Chartierf082d3c2013-07-29 17:04:07 -0700175 if (kIsDebugBuild) {
176 debug_is_sorted_ = true;
Ian Rogers1d54e732013-05-02 21:10:01 -0700177 }
178 }
179
Mathieu Chartierf082d3c2013-07-29 17:04:07 -0700180 bool ContainsSorted(const T& value) const {
181 DCHECK(debug_is_sorted_);
182 return std::binary_search(Begin(), End(), value);
183 }
184
Ian Rogers1d54e732013-05-02 21:10:01 -0700185 bool Contains(const T& value) const {
Mathieu Chartierf082d3c2013-07-29 17:04:07 -0700186 return std::find(Begin(), End(), value) != End();
Ian Rogers1d54e732013-05-02 21:10:01 -0700187 }
188
Mathieu Chartierd8195f12012-10-05 12:21:28 -0700189 private:
190 AtomicStack(const std::string& name, const size_t capacity)
191 : name_(name),
192 back_index_(0),
193 front_index_(0),
194 begin_(NULL),
Ian Rogers1d54e732013-05-02 21:10:01 -0700195 capacity_(capacity),
Mathieu Chartierf082d3c2013-07-29 17:04:07 -0700196 debug_is_sorted_(true) {
Mathieu Chartierd8195f12012-10-05 12:21:28 -0700197 }
198
199 // Size in number of elements.
200 void Init() {
Ian Rogers8d31bbd2013-10-13 10:44:14 -0700201 std::string error_msg;
202 mem_map_.reset(MemMap::MapAnonymous(name_.c_str(), NULL, capacity_ * sizeof(T),
Ian Rogersef7d42f2014-01-06 12:55:46 -0800203 PROT_READ | PROT_WRITE, false, &error_msg));
Ian Rogers8d31bbd2013-10-13 10:44:14 -0700204 CHECK(mem_map_.get() != NULL) << "couldn't allocate mark stack.\n" << error_msg;
Mathieu Chartierd8195f12012-10-05 12:21:28 -0700205 byte* addr = mem_map_->Begin();
206 CHECK(addr != NULL);
Mathieu Chartierf082d3c2013-07-29 17:04:07 -0700207 debug_is_sorted_ = true;
Mathieu Chartierd8195f12012-10-05 12:21:28 -0700208 begin_ = reinterpret_cast<T*>(addr);
209 Reset();
210 }
211
212 // Name of the mark stack.
213 std::string name_;
214
215 // Memory mapping of the atomic stack.
216 UniquePtr<MemMap> mem_map_;
217
218 // Back index (index after the last element pushed).
219 AtomicInteger back_index_;
220
221 // Front index, used for implementing PopFront.
222 AtomicInteger front_index_;
223
224 // Base of the atomic stack.
225 T* begin_;
226
227 // Maximum number of elements.
228 size_t capacity_;
229
Mathieu Chartierf082d3c2013-07-29 17:04:07 -0700230 // Whether or not the stack is sorted, only updated in debug mode to avoid performance overhead.
231 bool debug_is_sorted_;
Ian Rogers1d54e732013-05-02 21:10:01 -0700232
Mathieu Chartierd8195f12012-10-05 12:21:28 -0700233 DISALLOW_COPY_AND_ASSIGN(AtomicStack);
234};
235
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800236typedef AtomicStack<mirror::Object*> ObjectStack;
237
Ian Rogers1d54e732013-05-02 21:10:01 -0700238} // namespace accounting
239} // namespace gc
Mathieu Chartierd8195f12012-10-05 12:21:28 -0700240} // namespace art
241
Brian Carlstromfc0e3212013-07-17 14:40:12 -0700242#endif // ART_RUNTIME_GC_ACCOUNTING_ATOMIC_STACK_H_