blob: c404b6139356bf4ab4d5bb52721c36774c9b38f5 [file] [log] [blame]
Ian Rogers2dd0e2c2013-01-24 12:42:14 -08001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_SRC_MIRROR_OBJECT_H_
18#define ART_SRC_MIRROR_OBJECT_H_
19
20#include "base/casts.h"
21#include "base/logging.h"
22#include "base/macros.h"
23#include "cutils/atomic-inline.h"
24#include "offsets.h"
25
26namespace art {
27
28class ImageWriter;
29struct ObjectOffsets;
30class Thread;
31
32namespace mirror {
33
34class AbstractMethod;
35class Array;
36class Class;
37class Field;
38template<class T> class ObjectArray;
39template<class T> class PrimitiveArray;
40typedef PrimitiveArray<uint8_t> BooleanArray;
41typedef PrimitiveArray<int8_t> ByteArray;
42typedef PrimitiveArray<uint16_t> CharArray;
43typedef PrimitiveArray<double> DoubleArray;
44typedef PrimitiveArray<float> FloatArray;
45typedef PrimitiveArray<int32_t> IntArray;
46typedef PrimitiveArray<int64_t> LongArray;
47typedef PrimitiveArray<int16_t> ShortArray;
48class String;
49class Throwable;
50
51// Classes shared with the managed side of the world need to be packed so that they don't have
52// extra platform specific padding.
53#define MANAGED PACKED(4)
54
55// Fields within mirror objects aren't accessed directly so that the appropriate amount of
56// handshaking is done with GC (for example, read and write barriers). This macro is used to
57// compute an offset for the Set/Get methods defined in Object that can safely access fields.
58#define OFFSET_OF_OBJECT_MEMBER(type, field) \
59 MemberOffset(OFFSETOF_MEMBER(type, field))
60
61// C++ mirror of java.lang.Object
62class MANAGED Object {
63 public:
64 static MemberOffset ClassOffset() {
65 return OFFSET_OF_OBJECT_MEMBER(Object, klass_);
66 }
67
68 Class* GetClass() const;
69
70 void SetClass(Class* new_klass);
71
72 bool InstanceOf(const Class* klass) const
73 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
74
75 size_t SizeOf() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
76
77 Object* Clone(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
78
79 int32_t IdentityHashCode() const {
80 #ifdef MOVING_GARBAGE_COLLECTOR
81 // TODO: we'll need to use the Object's internal concept of identity
82 UNIMPLEMENTED(FATAL);
83 #endif
84 return reinterpret_cast<int32_t>(this);
85 }
86
87 static MemberOffset MonitorOffset() {
88 return OFFSET_OF_OBJECT_MEMBER(Object, monitor_);
89 }
90
91 volatile int32_t* GetRawLockWordAddress() {
92 byte* raw_addr = reinterpret_cast<byte*>(this) +
93 OFFSET_OF_OBJECT_MEMBER(Object, monitor_).Int32Value();
94 int32_t* word_addr = reinterpret_cast<int32_t*>(raw_addr);
95 return const_cast<volatile int32_t*>(word_addr);
96 }
97
98 uint32_t GetThinLockId();
99
Ian Rogers05f30572013-02-20 12:13:11 -0800100 void MonitorEnter(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800101 EXCLUSIVE_LOCK_FUNCTION(monitor_lock_);
102
Ian Rogers05f30572013-02-20 12:13:11 -0800103 bool MonitorExit(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800104 UNLOCK_FUNCTION(monitor_lock_);
105
Ian Rogers05f30572013-02-20 12:13:11 -0800106 void Notify(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800107
Ian Rogers05f30572013-02-20 12:13:11 -0800108 void NotifyAll(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800109
Ian Rogers05f30572013-02-20 12:13:11 -0800110 void Wait(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800111
Ian Rogers05f30572013-02-20 12:13:11 -0800112 void Wait(Thread* self, int64_t timeout, int32_t nanos) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800113
114 bool IsClass() const;
115
116 Class* AsClass();
117
118 const Class* AsClass() const;
119
120 bool IsObjectArray() const;
121
122 template<class T>
123 ObjectArray<T>* AsObjectArray();
124
125 template<class T>
126 const ObjectArray<T>* AsObjectArray() const;
127
128 bool IsArrayInstance() const;
129
130 Array* AsArray();
131
132 const Array* AsArray() const;
133
134 BooleanArray* AsBooleanArray();
135 ByteArray* AsByteArray();
136 CharArray* AsCharArray();
137 ShortArray* AsShortArray();
138 IntArray* AsIntArray();
139 LongArray* AsLongArray();
140
141 String* AsString();
142
143 Throwable* AsThrowable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
144
145 bool IsMethod() const;
146
147 AbstractMethod* AsMethod();
148
149 const AbstractMethod* AsMethod() const;
150
151 bool IsField() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
152
153 Field* AsField() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
154
155 const Field* AsField() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
156
157 bool IsReferenceInstance() const;
158
159 bool IsWeakReferenceInstance() const;
160
161 bool IsSoftReferenceInstance() const;
162
163 bool IsFinalizerReferenceInstance() const;
164
165 bool IsPhantomReferenceInstance() const;
166
167 // Accessors for Java type fields
168 template<class T>
169 T GetFieldObject(MemberOffset field_offset, bool is_volatile) const {
170 T result = reinterpret_cast<T>(GetField32(field_offset, is_volatile));
171 VerifyObject(result);
172 return result;
173 }
174
175 void SetFieldObject(MemberOffset field_offset, const Object* new_value, bool is_volatile,
176 bool this_is_valid = true) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
177 VerifyObject(new_value);
178 SetField32(field_offset, reinterpret_cast<uint32_t>(new_value), is_volatile, this_is_valid);
179 if (new_value != NULL) {
180 CheckFieldAssignment(field_offset, new_value);
181 WriteBarrierField(this, field_offset, new_value);
182 }
183 }
184
185 uint32_t GetField32(MemberOffset field_offset, bool is_volatile) const {
186 VerifyObject(this);
187 const byte* raw_addr = reinterpret_cast<const byte*>(this) + field_offset.Int32Value();
188 const int32_t* word_addr = reinterpret_cast<const int32_t*>(raw_addr);
189 if (UNLIKELY(is_volatile)) {
190 return android_atomic_acquire_load(word_addr);
191 } else {
192 return *word_addr;
193 }
194 }
195
196 void SetField32(MemberOffset field_offset, uint32_t new_value, bool is_volatile,
197 bool this_is_valid = true) {
198 if (this_is_valid) {
199 VerifyObject(this);
200 }
201 byte* raw_addr = reinterpret_cast<byte*>(this) + field_offset.Int32Value();
202 uint32_t* word_addr = reinterpret_cast<uint32_t*>(raw_addr);
203 if (UNLIKELY(is_volatile)) {
204 /*
205 * TODO: add an android_atomic_synchronization_store() function and
206 * use it in the 32-bit volatile set handlers. On some platforms we
207 * can use a fast atomic instruction and avoid the barriers.
208 */
209 ANDROID_MEMBAR_STORE();
210 *word_addr = new_value;
211 ANDROID_MEMBAR_FULL();
212 } else {
213 *word_addr = new_value;
214 }
215 }
216
217 uint64_t GetField64(MemberOffset field_offset, bool is_volatile) const;
218
219 void SetField64(MemberOffset field_offset, uint64_t new_value, bool is_volatile);
220
221 protected:
222 // Accessors for non-Java type fields
223 template<class T>
224 T GetFieldPtr(MemberOffset field_offset, bool is_volatile) const {
225 return reinterpret_cast<T>(GetField32(field_offset, is_volatile));
226 }
227
228 template<typename T>
229 void SetFieldPtr(MemberOffset field_offset, T new_value, bool is_volatile, bool this_is_valid = true) {
230 SetField32(field_offset, reinterpret_cast<uint32_t>(new_value), is_volatile, this_is_valid);
231 }
232
233 private:
234#if VERIFY_OBJECT_ENABLED
235 static void VerifyObject(const Object* obj);
236 void CheckFieldAssignment(MemberOffset field_offset, const Object* new_value)
237 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
238#else
239 static void VerifyObject(const Object*) {}
240 void CheckFieldAssignment(MemberOffset, const Object*)
241 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {}
242#endif
243
244 // Write barrier called post update to a reference bearing field.
245 static void WriteBarrierField(const Object* dst, MemberOffset offset, const Object* new_value);
246
247 Class* klass_;
248
249 uint32_t monitor_;
250
251 friend class art::ImageWriter;
252 friend struct art::ObjectOffsets; // for verifying offset information
253 DISALLOW_IMPLICIT_CONSTRUCTORS(Object);
254};
255
256} // namespace mirror
257} // namespace art
258
259#endif // ART_SRC_MIRROR_OBJECT_H_