blob: 14a93d1611d13726ecfa288afa1b1b6e88531d07 [file] [log] [blame]
Mathieu Chartier590fee92013-09-13 13:46:47 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_GC_SPACE_BUMP_POINTER_SPACE_INL_H_
18#define ART_RUNTIME_GC_SPACE_BUMP_POINTER_SPACE_INL_H_
19
20#include "bump_pointer_space.h"
21
22namespace art {
23namespace gc {
24namespace space {
25
Ian Rogers6fac4472014-02-25 17:01:10 -080026inline mirror::Object* BumpPointerSpace::Alloc(Thread*, size_t num_bytes, size_t* bytes_allocated,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070027 size_t* usable_size,
28 size_t* bytes_tl_bulk_allocated) {
Ian Rogers6fac4472014-02-25 17:01:10 -080029 num_bytes = RoundUp(num_bytes, kAlignment);
30 mirror::Object* ret = AllocNonvirtual(num_bytes);
31 if (LIKELY(ret != nullptr)) {
32 *bytes_allocated = num_bytes;
33 if (usable_size != nullptr) {
34 *usable_size = num_bytes;
35 }
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070036 *bytes_tl_bulk_allocated = num_bytes;
Ian Rogers6fac4472014-02-25 17:01:10 -080037 }
38 return ret;
39}
40
Mathieu Chartier0651d412014-04-29 14:37:57 -070041inline mirror::Object* BumpPointerSpace::AllocThreadUnsafe(Thread* self, size_t num_bytes,
42 size_t* bytes_allocated,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070043 size_t* usable_size,
44 size_t* bytes_tl_bulk_allocated) {
Mathieu Chartier0651d412014-04-29 14:37:57 -070045 Locks::mutator_lock_->AssertExclusiveHeld(self);
46 num_bytes = RoundUp(num_bytes, kAlignment);
Ian Rogers13735952014-10-08 12:43:28 -070047 uint8_t* end = end_.LoadRelaxed();
Ian Rogersbe2a1df2014-07-10 00:56:36 -070048 if (end + num_bytes > growth_end_) {
Mathieu Chartier0651d412014-04-29 14:37:57 -070049 return nullptr;
50 }
Ian Rogersbe2a1df2014-07-10 00:56:36 -070051 mirror::Object* obj = reinterpret_cast<mirror::Object*>(end);
52 end_.StoreRelaxed(end + num_bytes);
Mathieu Chartier0651d412014-04-29 14:37:57 -070053 *bytes_allocated = num_bytes;
54 // Use the CAS free versions as an optimization.
Ian Rogers3e5cf302014-05-20 16:40:37 -070055 objects_allocated_.StoreRelaxed(objects_allocated_.LoadRelaxed() + 1);
56 bytes_allocated_.StoreRelaxed(bytes_allocated_.LoadRelaxed() + num_bytes);
Mathieu Chartier0651d412014-04-29 14:37:57 -070057 if (UNLIKELY(usable_size != nullptr)) {
58 *usable_size = num_bytes;
59 }
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070060 *bytes_tl_bulk_allocated = num_bytes;
Mathieu Chartier0651d412014-04-29 14:37:57 -070061 return obj;
62}
63
Mathieu Chartier692fafd2013-11-29 17:24:40 -080064inline mirror::Object* BumpPointerSpace::AllocNonvirtualWithoutAccounting(size_t num_bytes) {
65 DCHECK(IsAligned<kAlignment>(num_bytes));
Ian Rogers13735952014-10-08 12:43:28 -070066 uint8_t* old_end;
67 uint8_t* new_end;
Mathieu Chartier590fee92013-09-13 13:46:47 -070068 do {
Ian Rogersbe2a1df2014-07-10 00:56:36 -070069 old_end = end_.LoadRelaxed();
Mathieu Chartier590fee92013-09-13 13:46:47 -070070 new_end = old_end + num_bytes;
71 // If there is no more room in the region, we are out of memory.
72 if (UNLIKELY(new_end > growth_end_)) {
73 return nullptr;
74 }
Ian Rogersbe2a1df2014-07-10 00:56:36 -070075 } while (!end_.CompareExchangeWeakSequentiallyConsistent(old_end, new_end));
Mathieu Chartier590fee92013-09-13 13:46:47 -070076 return reinterpret_cast<mirror::Object*>(old_end);
77}
78
Mathieu Chartier692fafd2013-11-29 17:24:40 -080079inline mirror::Object* BumpPointerSpace::AllocNonvirtual(size_t num_bytes) {
80 mirror::Object* ret = AllocNonvirtualWithoutAccounting(num_bytes);
81 if (ret != nullptr) {
Ian Rogers3e5cf302014-05-20 16:40:37 -070082 objects_allocated_.FetchAndAddSequentiallyConsistent(1);
83 bytes_allocated_.FetchAndAddSequentiallyConsistent(num_bytes);
Mathieu Chartier692fafd2013-11-29 17:24:40 -080084 }
85 return ret;
86}
87
Ian Rogers6fac4472014-02-25 17:01:10 -080088inline size_t BumpPointerSpace::AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size)
89 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
90 size_t num_bytes = obj->SizeOf();
91 if (usable_size != nullptr) {
92 *usable_size = RoundUp(num_bytes, kAlignment);
93 }
94 return num_bytes;
95}
96
Mathieu Chartier590fee92013-09-13 13:46:47 -070097} // namespace space
98} // namespace gc
99} // namespace art
100
101#endif // ART_RUNTIME_GC_SPACE_BUMP_POINTER_SPACE_INL_H_