Reduced memory usage of primitive fields smaller than 4-bytes

Reduced memory used by byte and boolean fields from 4 bytes down to a
single byte and shorts and chars down to two bytes. Fields are now
arranged as Reference followed by decreasing component sizes, with
fields shuffled forward as needed.

Bug: 8135266
Change-Id: I65eaf31ed27e5bd5ba0c7d4606454b720b074752
diff --git a/runtime/mirror/array-inl.h b/runtime/mirror/array-inl.h
index 2c0ea36..213dbc2 100644
--- a/runtime/mirror/array-inl.h
+++ b/runtime/mirror/array-inl.h
@@ -29,7 +29,7 @@
 
 inline uint32_t Array::ClassSize() {
   uint32_t vtable_entries = Object::kVTableLength;
-  return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0);
+  return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0, 0, 0);
 }
 
 template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
diff --git a/runtime/mirror/art_field-inl.h b/runtime/mirror/art_field-inl.h
index 00bed92..d37fa41 100644
--- a/runtime/mirror/art_field-inl.h
+++ b/runtime/mirror/art_field-inl.h
@@ -31,7 +31,7 @@
 
 inline uint32_t ArtField::ClassSize() {
   uint32_t vtable_entries = Object::kVTableLength + 6;
-  return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0);
+  return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0, 0, 0);
 }
 
 inline Class* ArtField::GetDeclaringClass() {
@@ -120,50 +120,64 @@
   }
 }
 
-inline bool ArtField::GetBoolean(Object* object) {
-  DCHECK_EQ(Primitive::kPrimBoolean, GetTypeAsPrimitiveType()) << PrettyField(this);
-  return Get32(object);
+#define FIELD_GET(object, type) \
+  DCHECK_EQ(Primitive::kPrim ## type, GetTypeAsPrimitiveType()) << PrettyField(this); \
+  DCHECK(object != nullptr) << PrettyField(this); \
+  DCHECK(!IsStatic() || (object == GetDeclaringClass()) || !Runtime::Current()->IsStarted()); \
+  if (UNLIKELY(IsVolatile())) { \
+    return object->GetField ## type ## Volatile(GetOffset()); \
+  } \
+  return object->GetField ## type(GetOffset());
+
+#define FIELD_SET(object, type, value) \
+  DCHECK_EQ(Primitive::kPrim ## type, GetTypeAsPrimitiveType()) << PrettyField(this); \
+  DCHECK(object != nullptr) << PrettyField(this); \
+  DCHECK(!IsStatic() || (object == GetDeclaringClass()) || !Runtime::Current()->IsStarted()); \
+  if (UNLIKELY(IsVolatile())) { \
+    object->SetField ## type ## Volatile<kTransactionActive>(GetOffset(), value); \
+  } else { \
+    object->SetField ## type<kTransactionActive>(GetOffset(), value); \
+  }
+
+inline uint8_t ArtField::GetBoolean(Object* object) {
+  FIELD_GET(object, Boolean);
 }
 
 template<bool kTransactionActive>
-inline void ArtField::SetBoolean(Object* object, bool z) {
-  DCHECK_EQ(Primitive::kPrimBoolean, GetTypeAsPrimitiveType()) << PrettyField(this);
-  Set32<kTransactionActive>(object, z);
+inline void ArtField::SetBoolean(Object* object, uint8_t z) {
+  FIELD_SET(object, Boolean, z);
 }
 
 inline int8_t ArtField::GetByte(Object* object) {
-  DCHECK_EQ(Primitive::kPrimByte, GetTypeAsPrimitiveType()) << PrettyField(this);
-  return Get32(object);
+  FIELD_GET(object, Byte);
 }
 
 template<bool kTransactionActive>
 inline void ArtField::SetByte(Object* object, int8_t b) {
-  DCHECK_EQ(Primitive::kPrimByte, GetTypeAsPrimitiveType()) << PrettyField(this);
-  Set32<kTransactionActive>(object, b);
+  FIELD_SET(object, Byte, b);
 }
 
 inline uint16_t ArtField::GetChar(Object* object) {
-  DCHECK_EQ(Primitive::kPrimChar, GetTypeAsPrimitiveType()) << PrettyField(this);
-  return Get32(object);
+  FIELD_GET(object, Char);
 }
 
 template<bool kTransactionActive>
 inline void ArtField::SetChar(Object* object, uint16_t c) {
-  DCHECK_EQ(Primitive::kPrimChar, GetTypeAsPrimitiveType()) << PrettyField(this);
-  Set32<kTransactionActive>(object, c);
+  FIELD_SET(object, Char, c);
 }
 
 inline int16_t ArtField::GetShort(Object* object) {
-  DCHECK_EQ(Primitive::kPrimShort, GetTypeAsPrimitiveType()) << PrettyField(this);
-  return Get32(object);
+  FIELD_GET(object, Short);
 }
 
 template<bool kTransactionActive>
 inline void ArtField::SetShort(Object* object, int16_t s) {
-  DCHECK_EQ(Primitive::kPrimShort, GetTypeAsPrimitiveType()) << PrettyField(this);
-  Set32<kTransactionActive>(object, s);
+  FIELD_SET(object, Short, s);
 }
 
+#undef FIELD_GET
+#undef FIELD_SET
+
 inline int32_t ArtField::GetInt(Object* object) {
   if (kIsDebugBuild) {
     Primitive::Type type = GetTypeAsPrimitiveType();
@@ -273,7 +287,7 @@
 }
 
 inline size_t ArtField::FieldSize() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  return Primitive::FieldSize(GetTypeAsPrimitiveType());
+  return Primitive::ComponentSize(GetTypeAsPrimitiveType());
 }
 
 inline mirror::DexCache* ArtField::GetDexCache() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
diff --git a/runtime/mirror/art_field.h b/runtime/mirror/art_field.h
index f3dfa15..885bcb0 100644
--- a/runtime/mirror/art_field.h
+++ b/runtime/mirror/art_field.h
@@ -95,9 +95,9 @@
   void SetOffset(MemberOffset num_bytes) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // field access, null object for static fields
-  bool GetBoolean(Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  uint8_t GetBoolean(Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   template<bool kTransactionActive>
-  void SetBoolean(Object* object, bool z) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void SetBoolean(Object* object, uint8_t z) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   int8_t GetByte(Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   template<bool kTransactionActive>
   void SetByte(Object* object, int8_t b) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/mirror/art_method-inl.h b/runtime/mirror/art_method-inl.h
index 06700e6..98ca64b 100644
--- a/runtime/mirror/art_method-inl.h
+++ b/runtime/mirror/art_method-inl.h
@@ -38,7 +38,7 @@
 
 inline uint32_t ArtMethod::ClassSize() {
   uint32_t vtable_entries = Object::kVTableLength + 8;
-  return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0);
+  return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0, 0, 0);
 }
 
 template<ReadBarrierOption kReadBarrierOption>
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index b0ff7ea..52dd0ee 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -556,6 +556,8 @@
 
 inline uint32_t Class::ComputeClassSize(bool has_embedded_tables,
                                         uint32_t num_vtable_entries,
+                                        uint32_t num_8bit_static_fields,
+                                        uint32_t num_16bit_static_fields,
                                         uint32_t num_32bit_static_fields,
                                         uint32_t num_64bit_static_fields,
                                         uint32_t num_ref_static_fields) {
@@ -569,19 +571,39 @@
             sizeof(int32_t) /* vtable len */ +
             embedded_vtable_size;
   }
+
   // Space used by reference statics.
   size +=  num_ref_static_fields * sizeof(HeapReference<Object>);
-  // Possible pad for alignment.
-  if (((size & 7) != 0) && (num_64bit_static_fields > 0)) {
-    size += sizeof(uint32_t);
-    if (num_32bit_static_fields != 0) {
-      // Shuffle one 32 bit static field forward.
-      num_32bit_static_fields--;
+  if (!IsAligned<8>(size) && num_64bit_static_fields > 0) {
+    uint32_t gap = 8 - (size & 0x7);
+    size += gap;  // will be padded
+    // Shuffle 4-byte fields forward.
+    while (gap >= sizeof(uint32_t) && num_32bit_static_fields != 0) {
+      --num_32bit_static_fields;
+      gap -= sizeof(uint32_t);
+    }
+    // Shuffle 2-byte fields forward.
+    while (gap >= sizeof(uint16_t) && num_16bit_static_fields != 0) {
+      --num_16bit_static_fields;
+      gap -= sizeof(uint16_t);
+    }
+    // Shuffle byte fields forward.
+    while (gap >= sizeof(uint8_t) && num_8bit_static_fields != 0) {
+      --num_8bit_static_fields;
+      gap -= sizeof(uint8_t);
     }
   }
+  // Guaranteed to be at least 4 byte aligned. No need for further alignments.
   // Space used for primitive static fields.
-  size += (num_32bit_static_fields * sizeof(uint32_t)) +
+  size += (num_8bit_static_fields * sizeof(uint8_t)) +
+      (num_16bit_static_fields * sizeof(uint16_t)) +
+      (num_32bit_static_fields * sizeof(uint32_t)) +
       (num_64bit_static_fields * sizeof(uint64_t));
+  // For now, the start of of subclass expects to be 4-byte aligned, pad end of object to ensure
+  // alignment.
+  if (!IsAligned<4>(size)) {
+    size = RoundUp(size, 4);
+  }
   return size;
 }
 
@@ -705,11 +727,11 @@
 }
 
 inline bool Class::GetSlowPathEnabled() {
-  return GetField32(GetSlowPathFlagOffset());
+  return GetFieldBoolean(GetSlowPathFlagOffset());
 }
 
 inline void Class::SetSlowPath(bool enabled) {
-  SetField32<false>(GetSlowPathFlagOffset(), enabled);
+  SetFieldBoolean<false>(GetSlowPathFlagOffset(), enabled);
 }
 
 inline void Class::InitializeClassVisitor::operator()(
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index 81fbcab..0d30bc6 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -502,6 +502,8 @@
   // Compute how many bytes would be used a class with the given elements.
   static uint32_t ComputeClassSize(bool has_embedded_tables,
                                    uint32_t num_vtable_entries,
+                                   uint32_t num_8bit_static_fields,
+                                   uint32_t num_16bit_static_fields,
                                    uint32_t num_32bit_static_fields,
                                    uint32_t num_64bit_static_fields,
                                    uint32_t num_ref_static_fields);
@@ -510,12 +512,12 @@
   static uint32_t ClassClassSize() {
     // The number of vtable entries in java.lang.Class.
     uint32_t vtable_entries = Object::kVTableLength + 64;
-    return ComputeClassSize(true, vtable_entries, 0, 1, 0);
+    return ComputeClassSize(true, vtable_entries, 0, 0, 0, 1, 0);
   }
 
   // The size of a java.lang.Class representing a primitive such as int.class.
   static uint32_t PrimitiveClassSize() {
-    return ComputeClassSize(false, 0, 0, 0, 0);
+    return ComputeClassSize(false, 0, 0, 0, 0, 0, 0);
   }
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
diff --git a/runtime/mirror/dex_cache-inl.h b/runtime/mirror/dex_cache-inl.h
index d3fcb55..288e88e 100644
--- a/runtime/mirror/dex_cache-inl.h
+++ b/runtime/mirror/dex_cache-inl.h
@@ -28,7 +28,7 @@
 
 inline uint32_t DexCache::ClassSize() {
   uint32_t vtable_entries = Object::kVTableLength + 1;
-  return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0);
+  return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0, 0, 0);
 }
 
 inline ArtMethod* DexCache::GetResolvedMethod(uint32_t method_idx)
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index 9dbfb56..8c1dc7d 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -37,7 +37,7 @@
 
 inline uint32_t Object::ClassSize() {
   uint32_t vtable_entries = kVTableLength;
-  return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0);
+  return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0, 0, 0);
 }
 
 template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
@@ -408,17 +408,157 @@
 }
 
 template<VerifyObjectFlags kVerifyFlags, bool kIsVolatile>
+inline uint8_t Object::GetFieldBoolean(MemberOffset field_offset) {
+  if (kVerifyFlags & kVerifyThis) {
+    VerifyObject(this);
+  }
+  return GetField<uint8_t, kIsVolatile>(field_offset);
+}
+
+template<VerifyObjectFlags kVerifyFlags, bool kIsVolatile>
+inline int8_t Object::GetFieldByte(MemberOffset field_offset) {
+  if (kVerifyFlags & kVerifyThis) {
+    VerifyObject(this);
+  }
+  return GetField<int8_t, kIsVolatile>(field_offset);
+}
+
+template<VerifyObjectFlags kVerifyFlags>
+inline uint8_t Object::GetFieldBooleanVolatile(MemberOffset field_offset) {
+  return GetFieldBoolean<kVerifyFlags, true>(field_offset);
+}
+
+template<VerifyObjectFlags kVerifyFlags>
+inline int8_t Object::GetFieldByteVolatile(MemberOffset field_offset) {
+  return GetFieldByte<kVerifyFlags, true>(field_offset);
+}
+
+template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags,
+    bool kIsVolatile>
+inline void Object::SetFieldBoolean(MemberOffset field_offset, uint8_t new_value)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  if (kCheckTransaction) {
+    DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
+  }
+  if (kTransactionActive) {
+    Runtime::Current()->RecordWriteFieldBoolean(this, field_offset,
+                                           GetFieldBoolean<kVerifyFlags, kIsVolatile>(field_offset),
+                                           kIsVolatile);
+  }
+  if (kVerifyFlags & kVerifyThis) {
+    VerifyObject(this);
+  }
+  SetField<uint8_t, kIsVolatile>(field_offset, new_value);
+}
+
+template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags,
+    bool kIsVolatile>
+inline void Object::SetFieldByte(MemberOffset field_offset, int8_t new_value)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  if (kCheckTransaction) {
+    DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
+  }
+  if (kTransactionActive) {
+    Runtime::Current()->RecordWriteFieldByte(this, field_offset,
+                                           GetFieldByte<kVerifyFlags, kIsVolatile>(field_offset),
+                                           kIsVolatile);
+  }
+  if (kVerifyFlags & kVerifyThis) {
+    VerifyObject(this);
+  }
+  SetField<int8_t, kIsVolatile>(field_offset, new_value);
+}
+
+template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
+inline void Object::SetFieldBooleanVolatile(MemberOffset field_offset, uint8_t new_value) {
+  return SetFieldBoolean<kTransactionActive, kCheckTransaction, kVerifyFlags, true>(
+      field_offset, new_value);
+}
+
+template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
+inline void Object::SetFieldByteVolatile(MemberOffset field_offset, int8_t new_value) {
+  return SetFieldByte<kTransactionActive, kCheckTransaction, kVerifyFlags, true>(
+      field_offset, new_value);
+}
+
+template<VerifyObjectFlags kVerifyFlags, bool kIsVolatile>
+inline uint16_t Object::GetFieldChar(MemberOffset field_offset) {
+  if (kVerifyFlags & kVerifyThis) {
+    VerifyObject(this);
+  }
+  return GetField<uint16_t, kIsVolatile>(field_offset);
+}
+
+template<VerifyObjectFlags kVerifyFlags, bool kIsVolatile>
+inline int16_t Object::GetFieldShort(MemberOffset field_offset) {
+  if (kVerifyFlags & kVerifyThis) {
+    VerifyObject(this);
+  }
+  return GetField<int16_t, kIsVolatile>(field_offset);
+}
+
+template<VerifyObjectFlags kVerifyFlags>
+inline uint16_t Object::GetFieldCharVolatile(MemberOffset field_offset) {
+  return GetFieldChar<kVerifyFlags, true>(field_offset);
+}
+
+template<VerifyObjectFlags kVerifyFlags>
+inline int16_t Object::GetFieldShortVolatile(MemberOffset field_offset) {
+  return GetFieldShort<kVerifyFlags, true>(field_offset);
+}
+
+template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags,
+    bool kIsVolatile>
+inline void Object::SetFieldChar(MemberOffset field_offset, uint16_t new_value) {
+  if (kCheckTransaction) {
+    DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
+  }
+  if (kTransactionActive) {
+    Runtime::Current()->RecordWriteFieldChar(this, field_offset,
+                                           GetFieldChar<kVerifyFlags, kIsVolatile>(field_offset),
+                                           kIsVolatile);
+  }
+  if (kVerifyFlags & kVerifyThis) {
+    VerifyObject(this);
+  }
+  SetField<uint16_t, kIsVolatile>(field_offset, new_value);
+}
+
+template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags,
+    bool kIsVolatile>
+inline void Object::SetFieldShort(MemberOffset field_offset, int16_t new_value) {
+  if (kCheckTransaction) {
+    DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
+  }
+  if (kTransactionActive) {
+    Runtime::Current()->RecordWriteFieldChar(this, field_offset,
+                                           GetFieldShort<kVerifyFlags, kIsVolatile>(field_offset),
+                                           kIsVolatile);
+  }
+  if (kVerifyFlags & kVerifyThis) {
+    VerifyObject(this);
+  }
+  SetField<int16_t, kIsVolatile>(field_offset, new_value);
+}
+
+template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
+inline void Object::SetFieldCharVolatile(MemberOffset field_offset, uint16_t new_value) {
+  return SetFieldChar<kTransactionActive, kCheckTransaction, kVerifyFlags, true>(
+      field_offset, new_value);
+}
+
+template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
+inline void Object::SetFieldShortVolatile(MemberOffset field_offset, int16_t new_value) {
+  return SetFieldShort<kTransactionActive, kCheckTransaction, kVerifyFlags, true>(
+      field_offset, new_value);
+}
+
+template<VerifyObjectFlags kVerifyFlags, bool kIsVolatile>
 inline int32_t Object::GetField32(MemberOffset field_offset) {
   if (kVerifyFlags & kVerifyThis) {
     VerifyObject(this);
   }
-  const byte* raw_addr = reinterpret_cast<const byte*>(this) + field_offset.Int32Value();
-  const int32_t* word_addr = reinterpret_cast<const int32_t*>(raw_addr);
-  if (UNLIKELY(kIsVolatile)) {
-    return reinterpret_cast<const Atomic<int32_t>*>(word_addr)->LoadSequentiallyConsistent();
-  } else {
-    return reinterpret_cast<const Atomic<int32_t>*>(word_addr)->LoadJavaData();
-  }
+  return GetField<int32_t, kIsVolatile>(field_offset);
 }
 
 template<VerifyObjectFlags kVerifyFlags>
@@ -440,13 +580,7 @@
   if (kVerifyFlags & kVerifyThis) {
     VerifyObject(this);
   }
-  byte* raw_addr = reinterpret_cast<byte*>(this) + field_offset.Int32Value();
-  int32_t* word_addr = reinterpret_cast<int32_t*>(raw_addr);
-  if (kIsVolatile) {
-    reinterpret_cast<Atomic<int32_t>*>(word_addr)->StoreSequentiallyConsistent(new_value);
-  } else {
-    reinterpret_cast<Atomic<int32_t>*>(word_addr)->StoreJavaData(new_value);
-  }
+  SetField<int32_t, kIsVolatile>(field_offset, new_value);
 }
 
 template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
@@ -515,13 +649,7 @@
   if (kVerifyFlags & kVerifyThis) {
     VerifyObject(this);
   }
-  const byte* raw_addr = reinterpret_cast<const byte*>(this) + field_offset.Int32Value();
-  const int64_t* addr = reinterpret_cast<const int64_t*>(raw_addr);
-  if (kIsVolatile) {
-    return reinterpret_cast<const Atomic<int64_t>*>(addr)->LoadSequentiallyConsistent();
-  } else {
-    return reinterpret_cast<const Atomic<int64_t>*>(addr)->LoadJavaData();
-  }
+  return GetField<int64_t, kIsVolatile>(field_offset);
 }
 
 template<VerifyObjectFlags kVerifyFlags>
@@ -543,13 +671,7 @@
   if (kVerifyFlags & kVerifyThis) {
     VerifyObject(this);
   }
-  byte* raw_addr = reinterpret_cast<byte*>(this) + field_offset.Int32Value();
-  int64_t* addr = reinterpret_cast<int64_t*>(raw_addr);
-  if (kIsVolatile) {
-    reinterpret_cast<Atomic<int64_t>*>(addr)->StoreSequentiallyConsistent(new_value);
-  } else {
-    reinterpret_cast<Atomic<int64_t>*>(addr)->StoreJavaData(new_value);
-  }
+  SetField<int64_t, kIsVolatile>(field_offset, new_value);
 }
 
 template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
@@ -558,6 +680,28 @@
                                                                                new_value);
 }
 
+template<typename kSize, bool kIsVolatile>
+inline void Object::SetField(MemberOffset field_offset, kSize new_value) {
+  byte* raw_addr = reinterpret_cast<byte*>(this) + field_offset.Int32Value();
+  kSize* addr = reinterpret_cast<kSize*>(raw_addr);
+  if (kIsVolatile) {
+    reinterpret_cast<Atomic<kSize>*>(addr)->StoreSequentiallyConsistent(new_value);
+  } else {
+    reinterpret_cast<Atomic<kSize>*>(addr)->StoreJavaData(new_value);
+  }
+}
+
+template<typename kSize, bool kIsVolatile>
+inline kSize Object::GetField(MemberOffset field_offset) {
+  const byte* raw_addr = reinterpret_cast<const byte*>(this) + field_offset.Int32Value();
+  const kSize* addr = reinterpret_cast<const kSize*>(raw_addr);
+  if (kIsVolatile) {
+    return reinterpret_cast<const Atomic<kSize>*>(addr)->LoadSequentiallyConsistent();
+  } else {
+    return reinterpret_cast<const Atomic<kSize>*>(addr)->LoadJavaData();
+  }
+}
+
 template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
 inline bool Object::CasFieldWeakSequentiallyConsistent64(MemberOffset field_offset,
                                                          int64_t old_value, int64_t new_value) {
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index a6b6227..6cd230b 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -19,6 +19,7 @@
 
 #include "object_reference.h"
 #include "offsets.h"
+#include "runtime.h"
 #include "verify_object.h"
 
 namespace art {
@@ -247,6 +248,78 @@
   HeapReference<Object>* GetFieldObjectReferenceAddr(MemberOffset field_offset);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
+  ALWAYS_INLINE uint8_t GetFieldBoolean(MemberOffset field_offset)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
+  ALWAYS_INLINE int8_t GetFieldByte(MemberOffset field_offset)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+  ALWAYS_INLINE uint8_t GetFieldBooleanVolatile(MemberOffset field_offset)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+  ALWAYS_INLINE int8_t GetFieldByteVolatile(MemberOffset field_offset)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  template<bool kTransactionActive, bool kCheckTransaction = true,
+      VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
+  ALWAYS_INLINE void SetFieldBoolean(MemberOffset field_offset, uint8_t new_value)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  template<bool kTransactionActive, bool kCheckTransaction = true,
+      VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
+  ALWAYS_INLINE void SetFieldByte(MemberOffset field_offset, int8_t new_value)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  template<bool kTransactionActive, bool kCheckTransaction = true,
+      VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+  ALWAYS_INLINE void SetFieldBooleanVolatile(MemberOffset field_offset, uint8_t new_value)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  template<bool kTransactionActive, bool kCheckTransaction = true,
+      VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+  ALWAYS_INLINE void SetFieldByteVolatile(MemberOffset field_offset, int8_t new_value)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
+  ALWAYS_INLINE uint16_t GetFieldChar(MemberOffset field_offset)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
+  ALWAYS_INLINE int16_t GetFieldShort(MemberOffset field_offset)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+  ALWAYS_INLINE uint16_t GetFieldCharVolatile(MemberOffset field_offset)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+  ALWAYS_INLINE int16_t GetFieldShortVolatile(MemberOffset field_offset)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  template<bool kTransactionActive, bool kCheckTransaction = true,
+      VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
+  ALWAYS_INLINE void SetFieldChar(MemberOffset field_offset, uint16_t new_value)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  template<bool kTransactionActive, bool kCheckTransaction = true,
+      VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
+  ALWAYS_INLINE void SetFieldShort(MemberOffset field_offset, int16_t new_value)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  template<bool kTransactionActive, bool kCheckTransaction = true,
+      VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+  ALWAYS_INLINE void SetFieldCharVolatile(MemberOffset field_offset, uint16_t new_value)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  template<bool kTransactionActive, bool kCheckTransaction = true,
+      VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+  ALWAYS_INLINE void SetFieldShortVolatile(MemberOffset field_offset, int16_t new_value)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
   ALWAYS_INLINE int32_t GetField32(MemberOffset field_offset)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
@@ -356,6 +429,13 @@
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
  private:
+  template<typename kSize, bool kIsVolatile>
+  ALWAYS_INLINE void SetField(MemberOffset field_offset, kSize new_value)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  template<typename kSize, bool kIsVolatile>
+  ALWAYS_INLINE kSize GetField(MemberOffset field_offset)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
   // Verify the type correctness of stores to fields.
   // TODO: This can cause thread suspension and isn't moving GC safe.
   void CheckFieldAssignmentImpl(MemberOffset field_offset, Object* new_value)
diff --git a/runtime/mirror/reference-inl.h b/runtime/mirror/reference-inl.h
index b353402..d1d2a3a 100644
--- a/runtime/mirror/reference-inl.h
+++ b/runtime/mirror/reference-inl.h
@@ -24,7 +24,7 @@
 
 inline uint32_t Reference::ClassSize() {
   uint32_t vtable_entries = Object::kVTableLength + 5;
-  return Class::ComputeClassSize(false, vtable_entries, 2, 0, 0);
+  return Class::ComputeClassSize(false, vtable_entries, 2, 0, 0, 0, 0);
 }
 
 inline bool Reference::IsEnqueuable() {
diff --git a/runtime/mirror/string-inl.h b/runtime/mirror/string-inl.h
index 6736497..f98407b 100644
--- a/runtime/mirror/string-inl.h
+++ b/runtime/mirror/string-inl.h
@@ -29,7 +29,7 @@
 
 inline uint32_t String::ClassSize() {
   uint32_t vtable_entries = Object::kVTableLength + 51;
-  return Class::ComputeClassSize(true, vtable_entries, 1, 1, 2);
+  return Class::ComputeClassSize(true, vtable_entries, 0, 1, 0, 1, 2);
 }
 
 inline CharArray* String::GetCharArray() {