ART: Convert pointer size to enum
Move away from size_t to dedicated enum (class).
Bug: 30373134
Bug: 30419309
Test: m test-art-host
Change-Id: Id453c330f1065012e7d4f9fc24ac477cc9bb9269
diff --git a/compiler/jni/quick/arm/calling_convention_arm.cc b/compiler/jni/quick/arm/calling_convention_arm.cc
index 29411f0..0d16260 100644
--- a/compiler/jni/quick/arm/calling_convention_arm.cc
+++ b/compiler/jni/quick/arm/calling_convention_arm.cc
@@ -22,6 +22,8 @@
namespace art {
namespace arm {
+static_assert(kArmPointerSize == PointerSize::k32, "Unexpected ARM pointer size");
+
// Used by hard float.
static const Register kHFCoreArgumentRegisters[] = {
R0, R1, R2, R3
@@ -255,7 +257,7 @@
ArmJniCallingConvention::ArmJniCallingConvention(bool is_static, bool is_synchronized,
const char* shorty)
- : JniCallingConvention(is_static, is_synchronized, shorty, kFramePointerSize) {
+ : JniCallingConvention(is_static, is_synchronized, shorty, kArmPointerSize) {
// Compute padding to ensure longs and doubles are not split in AAPCS. Ignore the 'this' jobject
// or jclass for static methods and the JNIEnv. We start at the aligned register r2.
size_t padding = 0;
@@ -287,9 +289,10 @@
size_t ArmJniCallingConvention::FrameSize() {
// Method*, LR and callee save area size, local reference segment state
- size_t frame_data_size = kArmPointerSize + (2 + CalleeSaveRegisters().size()) * kFramePointerSize;
+ size_t frame_data_size = static_cast<size_t>(kArmPointerSize)
+ + (2 + CalleeSaveRegisters().size()) * kFramePointerSize;
// References plus 2 words for HandleScope header
- size_t handle_scope_size = HandleScope::SizeOf(kFramePointerSize, ReferenceCount());
+ size_t handle_scope_size = HandleScope::SizeOf(kArmPointerSize, ReferenceCount());
// Plus return value spill area size
return RoundUp(frame_data_size + handle_scope_size + SizeOfReturnValue(), kStackAlignment);
}
@@ -343,7 +346,8 @@
FrameOffset ArmJniCallingConvention::CurrentParamStackOffset() {
CHECK_GE(itr_slots_, 4u);
- size_t offset = displacement_.Int32Value() - OutArgSize() + ((itr_slots_ - 4) * kFramePointerSize);
+ size_t offset =
+ displacement_.Int32Value() - OutArgSize() + ((itr_slots_ - 4) * kFramePointerSize);
CHECK_LT(offset, OutArgSize());
return FrameOffset(offset);
}
diff --git a/compiler/jni/quick/arm/calling_convention_arm.h b/compiler/jni/quick/arm/calling_convention_arm.h
index 157880b..7c717cc 100644
--- a/compiler/jni/quick/arm/calling_convention_arm.h
+++ b/compiler/jni/quick/arm/calling_convention_arm.h
@@ -17,17 +17,21 @@
#ifndef ART_COMPILER_JNI_QUICK_ARM_CALLING_CONVENTION_ARM_H_
#define ART_COMPILER_JNI_QUICK_ARM_CALLING_CONVENTION_ARM_H_
+#include "base/enums.h"
#include "jni/quick/calling_convention.h"
namespace art {
namespace arm {
-constexpr size_t kFramePointerSize = 4;
+constexpr size_t kFramePointerSize = static_cast<size_t>(PointerSize::k32);
class ArmManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingConvention {
public:
ArmManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty)
- : ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty, kFramePointerSize) {}
+ : ManagedRuntimeCallingConvention(is_static,
+ is_synchronized,
+ shorty,
+ PointerSize::k32) {}
~ArmManagedRuntimeCallingConvention() OVERRIDE {}
// Calling convention
ManagedRegister ReturnRegister() OVERRIDE;
diff --git a/compiler/jni/quick/arm64/calling_convention_arm64.cc b/compiler/jni/quick/arm64/calling_convention_arm64.cc
index ab56c1c..afa707d 100644
--- a/compiler/jni/quick/arm64/calling_convention_arm64.cc
+++ b/compiler/jni/quick/arm64/calling_convention_arm64.cc
@@ -22,6 +22,8 @@
namespace art {
namespace arm64 {
+static_assert(kArm64PointerSize == PointerSize::k64, "Unexpected ARM64 pointer size");
+
static const XRegister kXArgumentRegisters[] = {
X0, X1, X2, X3, X4, X5, X6, X7
};
@@ -211,7 +213,7 @@
// JNI calling convention
Arm64JniCallingConvention::Arm64JniCallingConvention(bool is_static, bool is_synchronized,
const char* shorty)
- : JniCallingConvention(is_static, is_synchronized, shorty, kFramePointerSize) {
+ : JniCallingConvention(is_static, is_synchronized, shorty, kArm64PointerSize) {
}
uint32_t Arm64JniCallingConvention::CoreSpillMask() const {
@@ -231,7 +233,7 @@
size_t frame_data_size = kFramePointerSize +
CalleeSaveRegisters().size() * kFramePointerSize + sizeof(uint32_t);
// References plus 2 words for HandleScope header
- size_t handle_scope_size = HandleScope::SizeOf(kFramePointerSize, ReferenceCount());
+ size_t handle_scope_size = HandleScope::SizeOf(kArm64PointerSize, ReferenceCount());
// Plus return value spill area size
return RoundUp(frame_data_size + handle_scope_size + SizeOfReturnValue(), kStackAlignment);
}
diff --git a/compiler/jni/quick/arm64/calling_convention_arm64.h b/compiler/jni/quick/arm64/calling_convention_arm64.h
index 337e881..90b12e5 100644
--- a/compiler/jni/quick/arm64/calling_convention_arm64.h
+++ b/compiler/jni/quick/arm64/calling_convention_arm64.h
@@ -17,17 +17,21 @@
#ifndef ART_COMPILER_JNI_QUICK_ARM64_CALLING_CONVENTION_ARM64_H_
#define ART_COMPILER_JNI_QUICK_ARM64_CALLING_CONVENTION_ARM64_H_
+#include "base/enums.h"
#include "jni/quick/calling_convention.h"
namespace art {
namespace arm64 {
-constexpr size_t kFramePointerSize = 8;
+constexpr size_t kFramePointerSize = static_cast<size_t>(PointerSize::k64);
class Arm64ManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingConvention {
public:
Arm64ManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty)
- : ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty, kFramePointerSize) {}
+ : ManagedRuntimeCallingConvention(is_static,
+ is_synchronized,
+ shorty,
+ PointerSize::k64) {}
~Arm64ManagedRuntimeCallingConvention() OVERRIDE {}
// Calling convention
ManagedRegister ReturnRegister() OVERRIDE;
diff --git a/compiler/jni/quick/calling_convention.cc b/compiler/jni/quick/calling_convention.cc
index e21f554..c7ed9c9 100644
--- a/compiler/jni/quick/calling_convention.cc
+++ b/compiler/jni/quick/calling_convention.cc
@@ -299,7 +299,7 @@
size_t JniCallingConvention::CurrentParamSize() {
if (itr_args_ <= kObjectOrClass) {
- return frame_pointer_size_; // JNIEnv or jobject/jclass
+ return static_cast<size_t>(frame_pointer_size_); // JNIEnv or jobject/jclass
} else {
int arg_pos = itr_args_ - NumberOfExtraArgumentsForJni();
return ParamSize(arg_pos);
diff --git a/compiler/jni/quick/calling_convention.h b/compiler/jni/quick/calling_convention.h
index e8f738d..995fa51 100644
--- a/compiler/jni/quick/calling_convention.h
+++ b/compiler/jni/quick/calling_convention.h
@@ -18,6 +18,7 @@
#define ART_COMPILER_JNI_QUICK_CALLING_CONVENTION_H_
#include "base/arena_object.h"
+#include "base/enums.h"
#include "handle_scope.h"
#include "primitive.h"
#include "thread.h"
@@ -70,8 +71,10 @@
virtual ~CallingConvention() {}
protected:
- CallingConvention(bool is_static, bool is_synchronized, const char* shorty,
- size_t frame_pointer_size)
+ CallingConvention(bool is_static,
+ bool is_synchronized,
+ const char* shorty,
+ PointerSize frame_pointer_size)
: itr_slots_(0), itr_refs_(0), itr_args_(0), itr_longs_and_doubles_(0),
itr_float_and_doubles_(0), displacement_(0),
frame_pointer_size_(frame_pointer_size),
@@ -198,7 +201,7 @@
// Space for frames below this on the stack.
FrameOffset displacement_;
// The size of a pointer.
- const size_t frame_pointer_size_;
+ const PointerSize frame_pointer_size_;
// The size of a reference entry within the handle scope.
const size_t handle_scope_pointer_size_;
@@ -255,7 +258,7 @@
ManagedRuntimeCallingConvention(bool is_static,
bool is_synchronized,
const char* shorty,
- size_t frame_pointer_size)
+ PointerSize frame_pointer_size)
: CallingConvention(is_static, is_synchronized, shorty, frame_pointer_size) {}
};
@@ -328,7 +331,7 @@
// Position of handle scope and interior fields
FrameOffset HandleScopeOffset() const {
- return FrameOffset(this->displacement_.Int32Value() + frame_pointer_size_);
+ return FrameOffset(this->displacement_.Int32Value() + static_cast<size_t>(frame_pointer_size_));
// above Method reference
}
@@ -356,8 +359,10 @@
kObjectOrClass = 1
};
- JniCallingConvention(bool is_static, bool is_synchronized, const char* shorty,
- size_t frame_pointer_size)
+ JniCallingConvention(bool is_static,
+ bool is_synchronized,
+ const char* shorty,
+ PointerSize frame_pointer_size)
: CallingConvention(is_static, is_synchronized, shorty, frame_pointer_size) {}
// Number of stack slots for outgoing arguments, above which the handle scope is
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index 4311a34..277b794 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -23,6 +23,7 @@
#include "art_method.h"
#include "base/arena_allocator.h"
+#include "base/enums.h"
#include "base/logging.h"
#include "base/macros.h"
#include "calling_convention.h"
@@ -125,16 +126,16 @@
if (is_64_bit_target) {
__ CopyRawPtrFromThread64(main_jni_conv->HandleScopeLinkOffset(),
- Thread::TopHandleScopeOffset<8>(),
+ Thread::TopHandleScopeOffset<PointerSize::k64>(),
mr_conv->InterproceduralScratchRegister());
- __ StoreStackOffsetToThread64(Thread::TopHandleScopeOffset<8>(),
+ __ StoreStackOffsetToThread64(Thread::TopHandleScopeOffset<PointerSize::k64>(),
main_jni_conv->HandleScopeOffset(),
mr_conv->InterproceduralScratchRegister());
} else {
__ CopyRawPtrFromThread32(main_jni_conv->HandleScopeLinkOffset(),
- Thread::TopHandleScopeOffset<4>(),
+ Thread::TopHandleScopeOffset<PointerSize::k32>(),
mr_conv->InterproceduralScratchRegister());
- __ StoreStackOffsetToThread32(Thread::TopHandleScopeOffset<4>(),
+ __ StoreStackOffsetToThread32(Thread::TopHandleScopeOffset<PointerSize::k32>(),
main_jni_conv->HandleScopeOffset(),
mr_conv->InterproceduralScratchRegister());
}
@@ -188,9 +189,9 @@
// 4. Write out the end of the quick frames.
if (is_64_bit_target) {
- __ StoreStackPointerToThread64(Thread::TopOfManagedStackOffset<8>());
+ __ StoreStackPointerToThread64(Thread::TopOfManagedStackOffset<PointerSize::k64>());
} else {
- __ StoreStackPointerToThread32(Thread::TopOfManagedStackOffset<4>());
+ __ StoreStackPointerToThread32(Thread::TopOfManagedStackOffset<PointerSize::k32>());
}
// 5. Move frame down to allow space for out going args.
@@ -201,8 +202,10 @@
// Call the read barrier for the declaring class loaded from the method for a static call.
// Note that we always have outgoing param space available for at least two params.
if (kUseReadBarrier && is_static) {
- ThreadOffset<4> read_barrier32 = QUICK_ENTRYPOINT_OFFSET(4, pReadBarrierJni);
- ThreadOffset<8> read_barrier64 = QUICK_ENTRYPOINT_OFFSET(8, pReadBarrierJni);
+ ThreadOffset32 read_barrier32 =
+ QUICK_ENTRYPOINT_OFFSET(PointerSize::k32, pReadBarrierJni);
+ ThreadOffset64 read_barrier64 =
+ QUICK_ENTRYPOINT_OFFSET(PointerSize::k64, pReadBarrierJni);
main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size));
main_jni_conv->Next(); // Skip JNIEnv.
FrameOffset class_handle_scope_offset = main_jni_conv->CurrentParamHandleScopeEntryOffset();
@@ -245,10 +248,14 @@
// can occur. The result is the saved JNI local state that is restored by the exit call. We
// abuse the JNI calling convention here, that is guaranteed to support passing 2 pointer
// arguments.
- ThreadOffset<4> jni_start32 = is_synchronized ? QUICK_ENTRYPOINT_OFFSET(4, pJniMethodStartSynchronized)
- : QUICK_ENTRYPOINT_OFFSET(4, pJniMethodStart);
- ThreadOffset<8> jni_start64 = is_synchronized ? QUICK_ENTRYPOINT_OFFSET(8, pJniMethodStartSynchronized)
- : QUICK_ENTRYPOINT_OFFSET(8, pJniMethodStart);
+ ThreadOffset32 jni_start32 =
+ is_synchronized
+ ? QUICK_ENTRYPOINT_OFFSET(PointerSize::k32, pJniMethodStartSynchronized)
+ : QUICK_ENTRYPOINT_OFFSET(PointerSize::k32, pJniMethodStart);
+ ThreadOffset64 jni_start64 =
+ is_synchronized
+ ? QUICK_ENTRYPOINT_OFFSET(PointerSize::k64, pJniMethodStartSynchronized)
+ : QUICK_ENTRYPOINT_OFFSET(PointerSize::k64, pJniMethodStart);
main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size));
FrameOffset locked_object_handle_scope_offset(0);
if (is_synchronized) {
@@ -346,17 +353,17 @@
ManagedRegister jni_env = main_jni_conv->CurrentParamRegister();
DCHECK(!jni_env.Equals(main_jni_conv->InterproceduralScratchRegister()));
if (is_64_bit_target) {
- __ LoadRawPtrFromThread64(jni_env, Thread::JniEnvOffset<8>());
+ __ LoadRawPtrFromThread64(jni_env, Thread::JniEnvOffset<PointerSize::k64>());
} else {
- __ LoadRawPtrFromThread32(jni_env, Thread::JniEnvOffset<4>());
+ __ LoadRawPtrFromThread32(jni_env, Thread::JniEnvOffset<PointerSize::k32>());
}
} else {
FrameOffset jni_env = main_jni_conv->CurrentParamStackOffset();
if (is_64_bit_target) {
- __ CopyRawPtrFromThread64(jni_env, Thread::JniEnvOffset<8>(),
+ __ CopyRawPtrFromThread64(jni_env, Thread::JniEnvOffset<PointerSize::k64>(),
main_jni_conv->InterproceduralScratchRegister());
} else {
- __ CopyRawPtrFromThread32(jni_env, Thread::JniEnvOffset<4>(),
+ __ CopyRawPtrFromThread32(jni_env, Thread::JniEnvOffset<PointerSize::k32>(),
main_jni_conv->InterproceduralScratchRegister());
}
}
@@ -387,7 +394,8 @@
main_jni_conv->GetReturnType() == Primitive::kPrimDouble &&
return_save_location.Uint32Value() % 8 != 0) {
// Ensure doubles are 8-byte aligned for MIPS
- return_save_location = FrameOffset(return_save_location.Uint32Value() + kMipsPointerSize);
+ return_save_location = FrameOffset(return_save_location.Uint32Value()
+ + static_cast<size_t>(kMipsPointerSize));
}
CHECK_LT(return_save_location.Uint32Value(), frame_size + main_out_arg_size);
__ Store(return_save_location, main_jni_conv->ReturnRegister(), main_jni_conv->SizeOfReturnValue());
@@ -406,21 +414,27 @@
}
// thread.
end_jni_conv->ResetIterator(FrameOffset(end_out_arg_size));
- ThreadOffset<4> jni_end32(-1);
- ThreadOffset<8> jni_end64(-1);
+ ThreadOffset32 jni_end32(-1);
+ ThreadOffset64 jni_end64(-1);
if (reference_return) {
// Pass result.
- jni_end32 = is_synchronized ? QUICK_ENTRYPOINT_OFFSET(4, pJniMethodEndWithReferenceSynchronized)
- : QUICK_ENTRYPOINT_OFFSET(4, pJniMethodEndWithReference);
- jni_end64 = is_synchronized ? QUICK_ENTRYPOINT_OFFSET(8, pJniMethodEndWithReferenceSynchronized)
- : QUICK_ENTRYPOINT_OFFSET(8, pJniMethodEndWithReference);
+ jni_end32 = is_synchronized
+ ? QUICK_ENTRYPOINT_OFFSET(PointerSize::k32,
+ pJniMethodEndWithReferenceSynchronized)
+ : QUICK_ENTRYPOINT_OFFSET(PointerSize::k32, pJniMethodEndWithReference);
+ jni_end64 = is_synchronized
+ ? QUICK_ENTRYPOINT_OFFSET(PointerSize::k64,
+ pJniMethodEndWithReferenceSynchronized)
+ : QUICK_ENTRYPOINT_OFFSET(PointerSize::k64, pJniMethodEndWithReference);
SetNativeParameter(jni_asm.get(), end_jni_conv.get(), end_jni_conv->ReturnRegister());
end_jni_conv->Next();
} else {
- jni_end32 = is_synchronized ? QUICK_ENTRYPOINT_OFFSET(4, pJniMethodEndSynchronized)
- : QUICK_ENTRYPOINT_OFFSET(4, pJniMethodEnd);
- jni_end64 = is_synchronized ? QUICK_ENTRYPOINT_OFFSET(8, pJniMethodEndSynchronized)
- : QUICK_ENTRYPOINT_OFFSET(8, pJniMethodEnd);
+ jni_end32 = is_synchronized
+ ? QUICK_ENTRYPOINT_OFFSET(PointerSize::k32, pJniMethodEndSynchronized)
+ : QUICK_ENTRYPOINT_OFFSET(PointerSize::k32, pJniMethodEnd);
+ jni_end64 = is_synchronized
+ ? QUICK_ENTRYPOINT_OFFSET(PointerSize::k64, pJniMethodEndSynchronized)
+ : QUICK_ENTRYPOINT_OFFSET(PointerSize::k64, pJniMethodEnd);
}
// Pass saved local reference state.
if (end_jni_conv->IsCurrentParamOnStack()) {
@@ -458,9 +472,11 @@
__ GetCurrentThread(end_jni_conv->CurrentParamStackOffset(),
end_jni_conv->InterproceduralScratchRegister());
if (is_64_bit_target) {
- __ CallFromThread64(ThreadOffset<8>(jni_end64), end_jni_conv->InterproceduralScratchRegister());
+ __ CallFromThread64(ThreadOffset64(jni_end64),
+ end_jni_conv->InterproceduralScratchRegister());
} else {
- __ CallFromThread32(ThreadOffset<4>(jni_end32), end_jni_conv->InterproceduralScratchRegister());
+ __ CallFromThread32(ThreadOffset32(jni_end32),
+ end_jni_conv->InterproceduralScratchRegister());
}
}
diff --git a/compiler/jni/quick/mips/calling_convention_mips.cc b/compiler/jni/quick/mips/calling_convention_mips.cc
index 3d4d140..f5ab5f7 100644
--- a/compiler/jni/quick/mips/calling_convention_mips.cc
+++ b/compiler/jni/quick/mips/calling_convention_mips.cc
@@ -172,7 +172,7 @@
MipsJniCallingConvention::MipsJniCallingConvention(bool is_static, bool is_synchronized,
const char* shorty)
- : JniCallingConvention(is_static, is_synchronized, shorty, kFramePointerSize) {
+ : JniCallingConvention(is_static, is_synchronized, shorty, kMipsPointerSize) {
// Compute padding to ensure longs and doubles are not split in AAPCS. Ignore the 'this' jobject
// or jclass for static methods and the JNIEnv. We start at the aligned register A2.
size_t padding = 0;
@@ -203,10 +203,10 @@
size_t MipsJniCallingConvention::FrameSize() {
// ArtMethod*, RA and callee save area size, local reference segment state
- size_t frame_data_size = kMipsPointerSize +
+ size_t frame_data_size = static_cast<size_t>(kMipsPointerSize) +
(2 + CalleeSaveRegisters().size()) * kFramePointerSize;
// References plus 2 words for HandleScope header
- size_t handle_scope_size = HandleScope::SizeOf(kFramePointerSize, ReferenceCount());
+ size_t handle_scope_size = HandleScope::SizeOf(kMipsPointerSize, ReferenceCount());
// Plus return value spill area size
return RoundUp(frame_data_size + handle_scope_size + SizeOfReturnValue(), kStackAlignment);
}
diff --git a/compiler/jni/quick/mips/calling_convention_mips.h b/compiler/jni/quick/mips/calling_convention_mips.h
index 5c128b0..e95a738 100644
--- a/compiler/jni/quick/mips/calling_convention_mips.h
+++ b/compiler/jni/quick/mips/calling_convention_mips.h
@@ -17,17 +17,23 @@
#ifndef ART_COMPILER_JNI_QUICK_MIPS_CALLING_CONVENTION_MIPS_H_
#define ART_COMPILER_JNI_QUICK_MIPS_CALLING_CONVENTION_MIPS_H_
+#include "base/enums.h"
#include "jni/quick/calling_convention.h"
namespace art {
namespace mips {
constexpr size_t kFramePointerSize = 4;
+static_assert(kFramePointerSize == static_cast<size_t>(PointerSize::k32),
+ "Invalid frame pointer size");
class MipsManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingConvention {
public:
MipsManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty)
- : ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty, kFramePointerSize) {}
+ : ManagedRuntimeCallingConvention(is_static,
+ is_synchronized,
+ shorty,
+ PointerSize::k32) {}
~MipsManagedRuntimeCallingConvention() OVERRIDE {}
// Calling convention
ManagedRegister ReturnRegister() OVERRIDE;
diff --git a/compiler/jni/quick/mips64/calling_convention_mips64.cc b/compiler/jni/quick/mips64/calling_convention_mips64.cc
index f2e1da8..8341e8e 100644
--- a/compiler/jni/quick/mips64/calling_convention_mips64.cc
+++ b/compiler/jni/quick/mips64/calling_convention_mips64.cc
@@ -152,7 +152,7 @@
Mips64JniCallingConvention::Mips64JniCallingConvention(bool is_static, bool is_synchronized,
const char* shorty)
- : JniCallingConvention(is_static, is_synchronized, shorty, kFramePointerSize) {
+ : JniCallingConvention(is_static, is_synchronized, shorty, kMips64PointerSize) {
}
uint32_t Mips64JniCallingConvention::CoreSpillMask() const {
@@ -172,7 +172,7 @@
size_t frame_data_size = kFramePointerSize +
(CalleeSaveRegisters().size() + 1) * kFramePointerSize + sizeof(uint32_t);
// References plus 2 words for HandleScope header
- size_t handle_scope_size = HandleScope::SizeOf(kFramePointerSize, ReferenceCount());
+ size_t handle_scope_size = HandleScope::SizeOf(kMips64PointerSize, ReferenceCount());
// Plus return value spill area size
return RoundUp(frame_data_size + handle_scope_size + SizeOfReturnValue(), kStackAlignment);
}
diff --git a/compiler/jni/quick/mips64/calling_convention_mips64.h b/compiler/jni/quick/mips64/calling_convention_mips64.h
index 99ea3cd..a5fd111 100644
--- a/compiler/jni/quick/mips64/calling_convention_mips64.h
+++ b/compiler/jni/quick/mips64/calling_convention_mips64.h
@@ -17,17 +17,23 @@
#ifndef ART_COMPILER_JNI_QUICK_MIPS64_CALLING_CONVENTION_MIPS64_H_
#define ART_COMPILER_JNI_QUICK_MIPS64_CALLING_CONVENTION_MIPS64_H_
+#include "base/enums.h"
#include "jni/quick/calling_convention.h"
namespace art {
namespace mips64 {
constexpr size_t kFramePointerSize = 8;
+static_assert(kFramePointerSize == static_cast<size_t>(PointerSize::k64),
+ "Invalid frame pointer size");
class Mips64ManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingConvention {
public:
Mips64ManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty)
- : ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty, kFramePointerSize) {}
+ : ManagedRuntimeCallingConvention(is_static,
+ is_synchronized,
+ shorty,
+ PointerSize::k64) {}
~Mips64ManagedRuntimeCallingConvention() OVERRIDE {}
// Calling convention
ManagedRegister ReturnRegister() OVERRIDE;
diff --git a/compiler/jni/quick/x86/calling_convention_x86.cc b/compiler/jni/quick/x86/calling_convention_x86.cc
index 22c7cd0..1d06f26 100644
--- a/compiler/jni/quick/x86/calling_convention_x86.cc
+++ b/compiler/jni/quick/x86/calling_convention_x86.cc
@@ -23,6 +23,8 @@
namespace art {
namespace x86 {
+static_assert(kX86PointerSize == PointerSize::k32, "Unexpected x86 pointer size");
+
static constexpr ManagedRegister kCalleeSaveRegisters[] = {
// Core registers.
X86ManagedRegister::FromCpuRegister(EBP),
@@ -190,7 +192,7 @@
X86JniCallingConvention::X86JniCallingConvention(bool is_static, bool is_synchronized,
const char* shorty)
- : JniCallingConvention(is_static, is_synchronized, shorty, kFramePointerSize) {
+ : JniCallingConvention(is_static, is_synchronized, shorty, kX86PointerSize) {
}
uint32_t X86JniCallingConvention::CoreSpillMask() const {
@@ -203,10 +205,10 @@
size_t X86JniCallingConvention::FrameSize() {
// Method*, return address and callee save area size, local reference segment state
- size_t frame_data_size = kX86PointerSize +
+ size_t frame_data_size = static_cast<size_t>(kX86PointerSize) +
(2 + CalleeSaveRegisters().size()) * kFramePointerSize;
// References plus 2 words for HandleScope header
- size_t handle_scope_size = HandleScope::SizeOf(kFramePointerSize, ReferenceCount());
+ size_t handle_scope_size = HandleScope::SizeOf(kX86PointerSize, ReferenceCount());
// Plus return value spill area size
return RoundUp(frame_data_size + handle_scope_size + SizeOfReturnValue(), kStackAlignment);
}
diff --git a/compiler/jni/quick/x86/calling_convention_x86.h b/compiler/jni/quick/x86/calling_convention_x86.h
index 9d678b7..ff92fc9 100644
--- a/compiler/jni/quick/x86/calling_convention_x86.h
+++ b/compiler/jni/quick/x86/calling_convention_x86.h
@@ -17,17 +17,21 @@
#ifndef ART_COMPILER_JNI_QUICK_X86_CALLING_CONVENTION_X86_H_
#define ART_COMPILER_JNI_QUICK_X86_CALLING_CONVENTION_X86_H_
+#include "base/enums.h"
#include "jni/quick/calling_convention.h"
namespace art {
namespace x86 {
-constexpr size_t kFramePointerSize = 4;
+constexpr size_t kFramePointerSize = static_cast<size_t>(PointerSize::k32);
class X86ManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingConvention {
public:
X86ManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty)
- : ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty, kFramePointerSize),
+ : ManagedRuntimeCallingConvention(is_static,
+ is_synchronized,
+ shorty,
+ PointerSize::k32),
gpr_arg_count_(0) {}
~X86ManagedRuntimeCallingConvention() OVERRIDE {}
// Calling convention
diff --git a/compiler/jni/quick/x86_64/calling_convention_x86_64.cc b/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
index cc4d232..cbf10bd 100644
--- a/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
+++ b/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
@@ -24,6 +24,10 @@
namespace art {
namespace x86_64 {
+constexpr size_t kFramePointerSize = static_cast<size_t>(PointerSize::k64);
+
+static_assert(kX86_64PointerSize == PointerSize::k64, "Unexpected x86_64 pointer size");
+
static constexpr ManagedRegister kCalleeSaveRegisters[] = {
// Core registers.
X86_64ManagedRegister::FromCpuRegister(RBX),
@@ -136,7 +140,7 @@
FrameOffset X86_64ManagedRuntimeCallingConvention::CurrentParamStackOffset() {
return FrameOffset(displacement_.Int32Value() + // displacement
- kX86_64PointerSize + // Method ref
+ static_cast<size_t>(kX86_64PointerSize) + // Method ref
itr_slots_ * sizeof(uint32_t)); // offset into in args
}
@@ -163,7 +167,7 @@
X86_64JniCallingConvention::X86_64JniCallingConvention(bool is_static, bool is_synchronized,
const char* shorty)
- : JniCallingConvention(is_static, is_synchronized, shorty, kFramePointerSize) {
+ : JniCallingConvention(is_static, is_synchronized, shorty, kX86_64PointerSize) {
}
uint32_t X86_64JniCallingConvention::CoreSpillMask() const {
@@ -176,10 +180,10 @@
size_t X86_64JniCallingConvention::FrameSize() {
// Method*, return address and callee save area size, local reference segment state
- size_t frame_data_size = kX86_64PointerSize +
+ size_t frame_data_size = static_cast<size_t>(kX86_64PointerSize) +
(2 + CalleeSaveRegisters().size()) * kFramePointerSize;
// References plus link_ (pointer) and number_of_references_ (uint32_t) for HandleScope header
- size_t handle_scope_size = HandleScope::SizeOf(kFramePointerSize, ReferenceCount());
+ size_t handle_scope_size = HandleScope::SizeOf(kX86_64PointerSize, ReferenceCount());
// Plus return value spill area size
return RoundUp(frame_data_size + handle_scope_size + SizeOfReturnValue(), kStackAlignment);
}
diff --git a/compiler/jni/quick/x86_64/calling_convention_x86_64.h b/compiler/jni/quick/x86_64/calling_convention_x86_64.h
index e2d3d48..b98f505 100644
--- a/compiler/jni/quick/x86_64/calling_convention_x86_64.h
+++ b/compiler/jni/quick/x86_64/calling_convention_x86_64.h
@@ -17,17 +17,19 @@
#ifndef ART_COMPILER_JNI_QUICK_X86_64_CALLING_CONVENTION_X86_64_H_
#define ART_COMPILER_JNI_QUICK_X86_64_CALLING_CONVENTION_X86_64_H_
+#include "base/enums.h"
#include "jni/quick/calling_convention.h"
namespace art {
namespace x86_64 {
-constexpr size_t kFramePointerSize = 8;
-
class X86_64ManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingConvention {
public:
X86_64ManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty)
- : ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty, kFramePointerSize) {}
+ : ManagedRuntimeCallingConvention(is_static,
+ is_synchronized,
+ shorty,
+ PointerSize::k64) {}
~X86_64ManagedRuntimeCallingConvention() OVERRIDE {}
// Calling convention
ManagedRegister ReturnRegister() OVERRIDE;