blob: 71b628db528dd2a4e8fdc2ff6168777018a46ba0 [file] [log] [blame]
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_SRC_MIRROR_OBJECT_H_
18#define ART_SRC_MIRROR_OBJECT_H_
19
20#include "base/casts.h"
21#include "base/logging.h"
22#include "base/macros.h"
23#include "cutils/atomic-inline.h"
24#include "offsets.h"
25
26namespace art {
27
28class ImageWriter;
29struct ObjectOffsets;
30class Thread;
31
32namespace mirror {
33
34class AbstractMethod;
35class Array;
36class Class;
37class Field;
38template<class T> class ObjectArray;
39template<class T> class PrimitiveArray;
40typedef PrimitiveArray<uint8_t> BooleanArray;
41typedef PrimitiveArray<int8_t> ByteArray;
42typedef PrimitiveArray<uint16_t> CharArray;
43typedef PrimitiveArray<double> DoubleArray;
44typedef PrimitiveArray<float> FloatArray;
45typedef PrimitiveArray<int32_t> IntArray;
46typedef PrimitiveArray<int64_t> LongArray;
47typedef PrimitiveArray<int16_t> ShortArray;
48class String;
49class Throwable;
50
51// Classes shared with the managed side of the world need to be packed so that they don't have
52// extra platform specific padding.
53#define MANAGED PACKED(4)
54
55// Fields within mirror objects aren't accessed directly so that the appropriate amount of
56// handshaking is done with GC (for example, read and write barriers). This macro is used to
57// compute an offset for the Set/Get methods defined in Object that can safely access fields.
58#define OFFSET_OF_OBJECT_MEMBER(type, field) \
59 MemberOffset(OFFSETOF_MEMBER(type, field))
60
Ian Rogers04d7aa92013-03-16 14:29:17 -070061const bool kCheckFieldAssignments = false;
62
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080063// C++ mirror of java.lang.Object
64class MANAGED Object {
65 public:
66 static MemberOffset ClassOffset() {
67 return OFFSET_OF_OBJECT_MEMBER(Object, klass_);
68 }
69
70 Class* GetClass() const;
71
72 void SetClass(Class* new_klass);
73
74 bool InstanceOf(const Class* klass) const
75 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
76
77 size_t SizeOf() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
78
79 Object* Clone(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
80
81 int32_t IdentityHashCode() const {
82 #ifdef MOVING_GARBAGE_COLLECTOR
83 // TODO: we'll need to use the Object's internal concept of identity
84 UNIMPLEMENTED(FATAL);
85 #endif
86 return reinterpret_cast<int32_t>(this);
87 }
88
89 static MemberOffset MonitorOffset() {
90 return OFFSET_OF_OBJECT_MEMBER(Object, monitor_);
91 }
92
93 volatile int32_t* GetRawLockWordAddress() {
94 byte* raw_addr = reinterpret_cast<byte*>(this) +
95 OFFSET_OF_OBJECT_MEMBER(Object, monitor_).Int32Value();
96 int32_t* word_addr = reinterpret_cast<int32_t*>(raw_addr);
97 return const_cast<volatile int32_t*>(word_addr);
98 }
99
100 uint32_t GetThinLockId();
101
Ian Rogers05f30572013-02-20 12:13:11 -0800102 void MonitorEnter(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800103 EXCLUSIVE_LOCK_FUNCTION(monitor_lock_);
104
Ian Rogers05f30572013-02-20 12:13:11 -0800105 bool MonitorExit(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800106 UNLOCK_FUNCTION(monitor_lock_);
107
Ian Rogers05f30572013-02-20 12:13:11 -0800108 void Notify(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800109
Ian Rogers05f30572013-02-20 12:13:11 -0800110 void NotifyAll(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800111
Ian Rogers05f30572013-02-20 12:13:11 -0800112 void Wait(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800113
Ian Rogers05f30572013-02-20 12:13:11 -0800114 void Wait(Thread* self, int64_t timeout, int32_t nanos) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800115
116 bool IsClass() const;
117
118 Class* AsClass();
119
120 const Class* AsClass() const;
121
122 bool IsObjectArray() const;
123
124 template<class T>
125 ObjectArray<T>* AsObjectArray();
126
127 template<class T>
128 const ObjectArray<T>* AsObjectArray() const;
129
130 bool IsArrayInstance() const;
131
132 Array* AsArray();
133
134 const Array* AsArray() const;
135
136 BooleanArray* AsBooleanArray();
137 ByteArray* AsByteArray();
138 CharArray* AsCharArray();
139 ShortArray* AsShortArray();
140 IntArray* AsIntArray();
141 LongArray* AsLongArray();
142
143 String* AsString();
144
145 Throwable* AsThrowable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
146
147 bool IsMethod() const;
148
149 AbstractMethod* AsMethod();
150
151 const AbstractMethod* AsMethod() const;
152
153 bool IsField() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
154
155 Field* AsField() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
156
157 const Field* AsField() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
158
159 bool IsReferenceInstance() const;
160
161 bool IsWeakReferenceInstance() const;
162
163 bool IsSoftReferenceInstance() const;
164
165 bool IsFinalizerReferenceInstance() const;
166
167 bool IsPhantomReferenceInstance() const;
168
169 // Accessors for Java type fields
170 template<class T>
171 T GetFieldObject(MemberOffset field_offset, bool is_volatile) const {
172 T result = reinterpret_cast<T>(GetField32(field_offset, is_volatile));
173 VerifyObject(result);
174 return result;
175 }
176
177 void SetFieldObject(MemberOffset field_offset, const Object* new_value, bool is_volatile,
178 bool this_is_valid = true) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
179 VerifyObject(new_value);
180 SetField32(field_offset, reinterpret_cast<uint32_t>(new_value), is_volatile, this_is_valid);
181 if (new_value != NULL) {
182 CheckFieldAssignment(field_offset, new_value);
183 WriteBarrierField(this, field_offset, new_value);
184 }
185 }
186
187 uint32_t GetField32(MemberOffset field_offset, bool is_volatile) const {
188 VerifyObject(this);
189 const byte* raw_addr = reinterpret_cast<const byte*>(this) + field_offset.Int32Value();
190 const int32_t* word_addr = reinterpret_cast<const int32_t*>(raw_addr);
191 if (UNLIKELY(is_volatile)) {
192 return android_atomic_acquire_load(word_addr);
193 } else {
194 return *word_addr;
195 }
196 }
197
198 void SetField32(MemberOffset field_offset, uint32_t new_value, bool is_volatile,
199 bool this_is_valid = true) {
200 if (this_is_valid) {
201 VerifyObject(this);
202 }
203 byte* raw_addr = reinterpret_cast<byte*>(this) + field_offset.Int32Value();
204 uint32_t* word_addr = reinterpret_cast<uint32_t*>(raw_addr);
205 if (UNLIKELY(is_volatile)) {
206 /*
207 * TODO: add an android_atomic_synchronization_store() function and
208 * use it in the 32-bit volatile set handlers. On some platforms we
209 * can use a fast atomic instruction and avoid the barriers.
210 */
211 ANDROID_MEMBAR_STORE();
212 *word_addr = new_value;
213 ANDROID_MEMBAR_FULL();
214 } else {
215 *word_addr = new_value;
216 }
217 }
218
219 uint64_t GetField64(MemberOffset field_offset, bool is_volatile) const;
220
221 void SetField64(MemberOffset field_offset, uint64_t new_value, bool is_volatile);
222
223 protected:
224 // Accessors for non-Java type fields
225 template<class T>
226 T GetFieldPtr(MemberOffset field_offset, bool is_volatile) const {
227 return reinterpret_cast<T>(GetField32(field_offset, is_volatile));
228 }
229
230 template<typename T>
231 void SetFieldPtr(MemberOffset field_offset, T new_value, bool is_volatile, bool this_is_valid = true) {
232 SetField32(field_offset, reinterpret_cast<uint32_t>(new_value), is_volatile, this_is_valid);
233 }
234
235 private:
Ian Rogers4f6ad8a2013-03-18 15:27:28 -0700236 static void VerifyObject(const Object* obj) ALWAYS_INLINE;
Ian Rogers04d7aa92013-03-16 14:29:17 -0700237
238 // Verify the type correctness of stores to fields.
239 void CheckFieldAssignmentImpl(MemberOffset field_offset, const Object* new_value)
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800240 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Ian Rogers04d7aa92013-03-16 14:29:17 -0700241 void CheckFieldAssignment(MemberOffset field_offset, const Object* new_value)
242 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
243 if (kCheckFieldAssignments) {
244 CheckFieldAssignmentImpl(field_offset, new_value);
245 }
246 }
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800247
248 // Write barrier called post update to a reference bearing field.
249 static void WriteBarrierField(const Object* dst, MemberOffset offset, const Object* new_value);
250
251 Class* klass_;
252
253 uint32_t monitor_;
254
255 friend class art::ImageWriter;
256 friend struct art::ObjectOffsets; // for verifying offset information
257 DISALLOW_IMPLICIT_CONSTRUCTORS(Object);
258};
259
260} // namespace mirror
261} // namespace art
262
263#endif // ART_SRC_MIRROR_OBJECT_H_