Object model changes to support 64bit.
Modify mirror objects so that references between them use an ObjectReference
value type rather than an Object* so that functionality to compress larger
references can be captured in the ObjectRefererence implementation.
ObjectReferences are 32bit and all other aspects of object layout remain as
they are currently.
Expand fields in objects holding pointers so they can hold 64bit pointers. Its
expected the size of these will come down by improving where we hold compiler
meta-data.
Stub out x86_64 architecture specific runtime implementation.
Modify OutputStream so that reads and writes are of unsigned quantities.
Make the use of portable or quick code more explicit.
Templatize AtomicInteger to support more than just int32_t as a type.
Add missing, and fix issues relating to, missing annotalysis information on the
mutator lock.
Refactor and share implementations for array copy between System and uses
elsewhere in the runtime.
Fix numerous 64bit build issues.
Change-Id: I1a5694c251a42c9eff71084dfdd4b51fff716822
diff --git a/runtime/arch/arm/context_arm.h b/runtime/arch/arm/context_arm.h
index 00651ff..020cae0 100644
--- a/runtime/arch/arm/context_arm.h
+++ b/runtime/arch/arm/context_arm.h
@@ -35,7 +35,7 @@
virtual void Reset();
- virtual void FillCalleeSaves(const StackVisitor& fr);
+ virtual void FillCalleeSaves(const StackVisitor& fr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
virtual void SetSP(uintptr_t new_sp) {
SetGPR(SP, new_sp);
diff --git a/runtime/arch/arm/portable_entrypoints_arm.S b/runtime/arch/arm/portable_entrypoints_arm.S
index ac519d5..98d17dc 100644
--- a/runtime/arch/arm/portable_entrypoints_arm.S
+++ b/runtime/arch/arm/portable_entrypoints_arm.S
@@ -53,7 +53,7 @@
mov ip, #0 @ set ip to 0
str ip, [sp] @ store NULL for method* at bottom of frame
add sp, #16 @ first 4 args are not passed on stack for portable
- ldr ip, [r0, #METHOD_CODE_OFFSET] @ get pointer to the code
+ ldr ip, [r0, #METHOD_PORTABLE_CODE_OFFSET] @ get pointer to the code
blx ip @ call the method
mov sp, r11 @ restore the stack pointer
ldr ip, [sp, #24] @ load the result pointer
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index 34de93f..0e5c60a 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -302,7 +302,7 @@
ldr r3, [sp, #12] @ copy arg value for r3
mov ip, #0 @ set ip to 0
str ip, [sp] @ store NULL for method* at bottom of frame
- ldr ip, [r0, #METHOD_CODE_OFFSET] @ get pointer to the code
+ ldr ip, [r0, #METHOD_QUICK_CODE_OFFSET] @ get pointer to the code
blx ip @ call the method
mov sp, r11 @ restore the stack pointer
ldr ip, [sp, #24] @ load the result pointer
diff --git a/runtime/arch/context.cc b/runtime/arch/context.cc
index 7075e42..5eaf809 100644
--- a/runtime/arch/context.cc
+++ b/runtime/arch/context.cc
@@ -22,6 +22,10 @@
#include "mips/context_mips.h"
#elif defined(__i386__)
#include "x86/context_x86.h"
+#elif defined(__x86_64__)
+#include "x86_64/context_x86_64.h"
+#else
+#include "base/logging.h"
#endif
namespace art {
@@ -33,8 +37,11 @@
return new mips::MipsContext();
#elif defined(__i386__)
return new x86::X86Context();
+#elif defined(__x86_64__)
+ return new x86_64::X86_64Context();
#else
UNIMPLEMENTED(FATAL);
+ return nullptr;
#endif
}
diff --git a/runtime/arch/context.h b/runtime/arch/context.h
index 91e0cd6..3d11178 100644
--- a/runtime/arch/context.h
+++ b/runtime/arch/context.h
@@ -20,6 +20,8 @@
#include <stddef.h>
#include <stdint.h>
+#include "locks.h"
+
namespace art {
class StackVisitor;
@@ -38,7 +40,8 @@
// Read values from callee saves in the given frame. The frame also holds
// the method that holds the layout.
- virtual void FillCalleeSaves(const StackVisitor& fr) = 0;
+ virtual void FillCalleeSaves(const StackVisitor& fr)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
// Set the stack pointer value
virtual void SetSP(uintptr_t new_sp) = 0;
diff --git a/runtime/arch/mips/context_mips.h b/runtime/arch/mips/context_mips.h
index 5595f86..4145cd3 100644
--- a/runtime/arch/mips/context_mips.h
+++ b/runtime/arch/mips/context_mips.h
@@ -33,7 +33,7 @@
virtual void Reset();
- virtual void FillCalleeSaves(const StackVisitor& fr);
+ virtual void FillCalleeSaves(const StackVisitor& fr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
virtual void SetSP(uintptr_t new_sp) {
SetGPR(SP, new_sp);
diff --git a/runtime/arch/mips/portable_entrypoints_mips.S b/runtime/arch/mips/portable_entrypoints_mips.S
index 9208a8a..7545ce0 100644
--- a/runtime/arch/mips/portable_entrypoints_mips.S
+++ b/runtime/arch/mips/portable_entrypoints_mips.S
@@ -61,5 +61,73 @@
.cfi_adjust_cfa_offset -64
END art_portable_proxy_invoke_handler
+ /*
+ * Invocation stub for portable code.
+ * On entry:
+ * a0 = method pointer
+ * a1 = argument array or NULL for no argument methods
+ * a2 = size of argument array in bytes
+ * a3 = (managed) thread pointer
+ * [sp + 16] = JValue* result
+ * [sp + 20] = result type char
+ */
+ENTRY art_portable_invoke_stub
+ GENERATE_GLOBAL_POINTER
+ sw $a0, 0($sp) # save out a0
+ addiu $sp, $sp, -16 # spill s0, s1, fp, ra
+ .cfi_adjust_cfa_offset 16
+ sw $ra, 12($sp)
+ .cfi_rel_offset 31, 12
+ sw $fp, 8($sp)
+ .cfi_rel_offset 30, 8
+ sw $s1, 4($sp)
+ .cfi_rel_offset 17, 4
+ sw $s0, 0($sp)
+ .cfi_rel_offset 16, 0
+ move $fp, $sp # save sp in fp
+ .cfi_def_cfa_register 30
+ move $s1, $a3 # move managed thread pointer into s1
+ addiu $s0, $zero, SUSPEND_CHECK_INTERVAL # reset s0 to suspend check interval
+ addiu $t0, $a2, 16 # create space for method pointer in frame
+ srl $t0, $t0, 3 # shift the frame size right 3
+ sll $t0, $t0, 3 # shift the frame size left 3 to align to 16 bytes
+ subu $sp, $sp, $t0 # reserve stack space for argument array
+ addiu $a0, $sp, 4 # pass stack pointer + method ptr as dest for memcpy
+ jal memcpy # (dest, src, bytes)
+ addiu $sp, $sp, -16 # make space for argument slots for memcpy
+ addiu $sp, $sp, 16 # restore stack after memcpy
+ lw $a0, 16($fp) # restore method*
+ lw $a1, 4($sp) # copy arg value for a1
+ lw $a2, 8($sp) # copy arg value for a2
+ lw $a3, 12($sp) # copy arg value for a3
+ lw $t9, METHOD_PORTABLE_CODE_OFFSET($a0) # get pointer to the code
+ jalr $t9 # call the method
+ sw $zero, 0($sp) # store NULL for method* at bottom of frame
+ move $sp, $fp # restore the stack
+ lw $s0, 0($sp)
+ .cfi_restore 16
+ lw $s1, 4($sp)
+ .cfi_restore 17
+ lw $fp, 8($sp)
+ .cfi_restore 30
+ lw $ra, 12($sp)
+ .cfi_restore 31
+ addiu $sp, $sp, 16
+ .cfi_adjust_cfa_offset -16
+ lw $t0, 16($sp) # get result pointer
+ lw $t1, 20($sp) # get result type char
+ li $t2, 68 # put char 'D' into t2
+ beq $t1, $t2, 1f # branch if result type char == 'D'
+ li $t3, 70 # put char 'F' into t3
+ beq $t1, $t3, 1f # branch if result type char == 'F'
+ sw $v0, 0($t0) # store the result
+ jr $ra
+ sw $v1, 4($t0) # store the other half of the result
+1:
+ s.s $f0, 0($t0) # store floating point result
+ jr $ra
+ s.s $f1, 4($t0) # store other half of floating point result
+END art_portable_invoke_stub
+
UNIMPLEMENTED art_portable_resolution_trampoline
UNIMPLEMENTED art_portable_to_interpreter_bridge
diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S
index 2d1e87a..c60bca0 100644
--- a/runtime/arch/mips/quick_entrypoints_mips.S
+++ b/runtime/arch/mips/quick_entrypoints_mips.S
@@ -449,7 +449,7 @@
INVOKE_TRAMPOLINE art_quick_invoke_virtual_trampoline_with_access_check, artInvokeVirtualTrampolineWithAccessCheck
/*
- * Common invocation stub for portable and quick.
+ * Invocation stub for quick code.
* On entry:
* a0 = method pointer
* a1 = argument array or NULL for no argument methods
@@ -458,9 +458,6 @@
* [sp + 16] = JValue* result
* [sp + 20] = result type char
*/
- .type art_portable_invoke_stub, %function
- .global art_portable_invoke_stub
-art_portable_invoke_stub:
ENTRY art_quick_invoke_stub
GENERATE_GLOBAL_POINTER
sw $a0, 0($sp) # save out a0
@@ -490,7 +487,7 @@
lw $a1, 4($sp) # copy arg value for a1
lw $a2, 8($sp) # copy arg value for a2
lw $a3, 12($sp) # copy arg value for a3
- lw $t9, METHOD_CODE_OFFSET($a0) # get pointer to the code
+ lw $t9, METHOD_QUICK_CODE_OFFSET($a0) # get pointer to the code
jalr $t9 # call the method
sw $zero, 0($sp) # store NULL for method* at bottom of frame
move $sp, $fp # restore the stack
@@ -518,7 +515,6 @@
jr $ra
s.s $f1, 4($t0) # store other half of floating point result
END art_quick_invoke_stub
- .size art_portable_invoke_stub, .-art_portable_invoke_stub
/*
* Entry from managed code that calls artHandleFillArrayDataFromCode and delivers exception on
diff --git a/runtime/arch/x86/context_x86.cc b/runtime/arch/x86/context_x86.cc
index 66a51f7..d7dca64 100644
--- a/runtime/arch/x86/context_x86.cc
+++ b/runtime/arch/x86/context_x86.cc
@@ -23,7 +23,7 @@
namespace art {
namespace x86 {
-static const uint32_t gZero = 0;
+static const uintptr_t gZero = 0;
void X86Context::Reset() {
for (int i = 0; i < kNumberOfCpuRegisters; i++) {
@@ -55,8 +55,8 @@
void X86Context::SmashCallerSaves() {
// This needs to be 0 because we want a null/zero return value.
- gprs_[EAX] = const_cast<uint32_t*>(&gZero);
- gprs_[EDX] = const_cast<uint32_t*>(&gZero);
+ gprs_[EAX] = const_cast<uintptr_t*>(&gZero);
+ gprs_[EDX] = const_cast<uintptr_t*>(&gZero);
gprs_[ECX] = NULL;
gprs_[EBX] = NULL;
}
@@ -89,7 +89,7 @@
: "g"(&gprs[0]) // input.
:); // clobber.
#else
- UNIMPLEMENTED(FATAL);
+ UNIMPLEMENTED(FATAL);
#endif
}
diff --git a/runtime/arch/x86/context_x86.h b/runtime/arch/x86/context_x86.h
index d7d2210..598314d 100644
--- a/runtime/arch/x86/context_x86.h
+++ b/runtime/arch/x86/context_x86.h
@@ -33,7 +33,7 @@
virtual void Reset();
- virtual void FillCalleeSaves(const StackVisitor& fr);
+ virtual void FillCalleeSaves(const StackVisitor& fr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
virtual void SetSP(uintptr_t new_sp) {
SetGPR(ESP, new_sp);
diff --git a/runtime/arch/x86/jni_entrypoints_x86.S b/runtime/arch/x86/jni_entrypoints_x86.S
index 72047d5..2eb5ada 100644
--- a/runtime/arch/x86/jni_entrypoints_x86.S
+++ b/runtime/arch/x86/jni_entrypoints_x86.S
@@ -17,7 +17,7 @@
#include "asm_support_x86.S"
/*
- * Portable resolution trampoline.
+ * Jni dlsym lookup stub.
*/
DEFINE_FUNCTION art_jni_dlsym_lookup_stub
subl LITERAL(4), %esp // align stack
diff --git a/runtime/arch/x86/portable_entrypoints_x86.S b/runtime/arch/x86/portable_entrypoints_x86.S
index 48de7c1..4bd6173 100644
--- a/runtime/arch/x86/portable_entrypoints_x86.S
+++ b/runtime/arch/x86/portable_entrypoints_x86.S
@@ -46,7 +46,7 @@
addl LITERAL(12), %esp // pop arguments to memcpy
mov 12(%ebp), %eax // move method pointer into eax
mov %eax, (%esp) // push method pointer onto stack
- call *METHOD_CODE_OFFSET(%eax) // call the method
+ call *METHOD_PORTABLE_CODE_OFFSET(%eax) // call the method
mov %ebp, %esp // restore stack pointer
POP ebx // pop ebx
POP ebp // pop ebp
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index 74ec761..9c3eb30 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -275,7 +275,7 @@
mov 4(%esp), %ecx // copy arg1 into ecx
mov 8(%esp), %edx // copy arg2 into edx
mov 12(%esp), %ebx // copy arg3 into ebx
- call *METHOD_CODE_OFFSET(%eax) // call the method
+ call *METHOD_QUICK_CODE_OFFSET(%eax) // call the method
mov %ebp, %esp // restore stack pointer
CFI_DEF_CFA_REGISTER(esp)
POP ebx // pop ebx
diff --git a/runtime/arch/x86_64/asm_support_x86_64.S b/runtime/arch/x86_64/asm_support_x86_64.S
new file mode 100644
index 0000000..b59c0cb
--- /dev/null
+++ b/runtime/arch/x86_64/asm_support_x86_64.S
@@ -0,0 +1,146 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ARCH_X86_64_ASM_SUPPORT_X86_64_S_
+#define ART_RUNTIME_ARCH_X86_64_ASM_SUPPORT_X86_64_S_
+
+#include "asm_support_x86_64.h"
+
+#if defined(__APPLE__)
+ // Mac OS' as(1) doesn't let you name macro parameters.
+ #define MACRO0(macro_name) .macro macro_name
+ #define MACRO1(macro_name, macro_arg1) .macro macro_name
+ #define MACRO2(macro_name, macro_arg1, macro_args2) .macro macro_name
+ #define MACRO3(macro_name, macro_arg1, macro_args2, macro_args3) .macro macro_name
+ #define END_MACRO .endmacro
+
+ // Mac OS' as(1) uses $0, $1, and so on for macro arguments, and function names
+ // are mangled with an extra underscore prefix. The use of $x for arguments
+ // mean that literals need to be represented with $$x in macros.
+ #define SYMBOL(name) _ ## name
+ #define PLT_SYMBOL(name) _ ## name
+ #define VAR(name,index) SYMBOL($index)
+ #define PLT_VAR(name, index) SYMBOL($index)
+ #define REG_VAR(name,index) %$index
+ #define CALL_MACRO(name,index) $index
+ #define LITERAL(value) $value
+ #define MACRO_LITERAL(value) $$value
+
+ // Mac OS' doesn't like cfi_* directives
+ #define CFI_STARTPROC
+ #define CFI_ENDPROC
+ #define CFI_ADJUST_CFA_OFFSET(size)
+ #define CFI_DEF_CFA(reg,size)
+ #define CFI_DEF_CFA_REGISTER(reg)
+ #define CFI_RESTORE(reg)
+ #define CFI_REL_OFFSET(reg,size)
+
+ // Mac OS' doesn't support certain directives
+ #define FUNCTION_TYPE(name)
+ #define SIZE(name)
+#else
+ // Regular gas(1) lets you name macro parameters.
+ #define MACRO0(macro_name) .macro macro_name
+ #define MACRO1(macro_name, macro_arg1) .macro macro_name macro_arg1
+ #define MACRO2(macro_name, macro_arg1, macro_arg2) .macro macro_name macro_arg1, macro_arg2
+ #define MACRO3(macro_name, macro_arg1, macro_arg2, macro_arg3) .macro macro_name macro_arg1, macro_arg2, macro_arg3
+ #define END_MACRO .endm
+
+ // Regular gas(1) uses \argument_name for macro arguments.
+ // We need to turn on alternate macro syntax so we can use & instead or the preprocessor
+ // will screw us by inserting a space between the \ and the name. Even in this mode there's
+ // no special meaning to $, so literals are still just $x. The use of altmacro means % is a
+ // special character meaning care needs to be taken when passing registers as macro arguments.
+ .altmacro
+ #define SYMBOL(name) name
+ #define PLT_SYMBOL(name) name@PLT
+ #define VAR(name,index) name&
+ #define PLT_VAR(name, index) name&@PLT
+ #define REG_VAR(name,index) %name
+ #define CALL_MACRO(name,index) name&
+ #define LITERAL(value) $value
+ #define MACRO_LITERAL(value) $value
+
+ // CFI support
+ #define CFI_STARTPROC .cfi_startproc
+ #define CFI_ENDPROC .cfi_endproc
+ #define CFI_ADJUST_CFA_OFFSET(size) .cfi_adjust_cfa_offset size
+ #define CFI_DEF_CFA(reg,size) .cfi_def_cfa reg,size
+ #define CFI_DEF_CFA_REGISTER(reg) .cfi_def_cfa_register reg
+ #define CFI_RESTORE(reg) .cfi_restore reg
+ #define CFI_REL_OFFSET(reg,size) .cfi_rel_offset reg,size
+
+ #define FUNCTION_TYPE(name) .type name&, @function
+ #define SIZE(name) .size name, .-name
+#endif
+
+ /* Cache alignment for function entry */
+MACRO0(ALIGN_FUNCTION_ENTRY)
+ .balign 16
+END_MACRO
+
+MACRO1(DEFINE_FUNCTION, c_name)
+ FUNCTION_TYPE(\c_name)
+ .globl VAR(c_name, 0)
+ ALIGN_FUNCTION_ENTRY
+VAR(c_name, 0):
+ CFI_STARTPROC
+END_MACRO
+
+MACRO1(END_FUNCTION, c_name)
+ CFI_ENDPROC
+ SIZE(\c_name)
+END_MACRO
+
+MACRO1(PUSH, reg)
+ pushq REG_VAR(reg, 0)
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(REG_VAR(reg, 0), 0)
+END_MACRO
+
+MACRO1(POP, reg)
+ popq REG_VAR(reg,0)
+ CFI_ADJUST_CFA_OFFSET(-8)
+ CFI_RESTORE(REG_VAR(reg,0))
+END_MACRO
+
+MACRO1(UNIMPLEMENTED,name)
+ FUNCTION_TYPE(\name)
+ .globl VAR(name, 0)
+ ALIGN_FUNCTION_ENTRY
+VAR(name, 0):
+ CFI_STARTPROC
+ int3
+ int3
+ CFI_ENDPROC
+ SIZE(\name)
+END_MACRO
+
+MACRO0(SETUP_GOT_NOSAVE)
+ call __x86.get_pc_thunk.bx
+ addl $_GLOBAL_OFFSET_TABLE_, %ebx
+END_MACRO
+
+MACRO0(SETUP_GOT)
+ PUSH ebx
+ SETUP_GOT_NOSAVE
+END_MACRO
+
+MACRO0(UNDO_SETUP_GOT)
+ POP ebx
+END_MACRO
+
+#endif // ART_RUNTIME_ARCH_X86_64_ASM_SUPPORT_X86_64_S_
diff --git a/runtime/arch/x86_64/asm_support_x86_64.h b/runtime/arch/x86_64/asm_support_x86_64.h
new file mode 100644
index 0000000..d425ed8
--- /dev/null
+++ b/runtime/arch/x86_64/asm_support_x86_64.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ARCH_X86_64_ASM_SUPPORT_X86_64_H_
+#define ART_RUNTIME_ARCH_X86_64_ASM_SUPPORT_X86_64_H_
+
+#include "asm_support.h"
+
+// Offset of field Thread::self_ verified in InitCpu
+#define THREAD_SELF_OFFSET 72
+// Offset of field Thread::card_table_ verified in InitCpu
+#define THREAD_CARD_TABLE_OFFSET 8
+// Offset of field Thread::exception_ verified in InitCpu
+#define THREAD_EXCEPTION_OFFSET 16
+// Offset of field Thread::thin_lock_thread_id_ verified in InitCpu
+#define THREAD_ID_OFFSET 112
+
+#endif // ART_RUNTIME_ARCH_X86_64_ASM_SUPPORT_X86_64_H_
diff --git a/runtime/arch/x86_64/context_x86_64.cc b/runtime/arch/x86_64/context_x86_64.cc
new file mode 100644
index 0000000..4d1131c
--- /dev/null
+++ b/runtime/arch/x86_64/context_x86_64.cc
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "context_x86_64.h"
+
+#include "mirror/art_method.h"
+#include "mirror/object-inl.h"
+#include "stack.h"
+
+namespace art {
+namespace x86_64 {
+
+static const uintptr_t gZero = 0;
+
+void X86_64Context::Reset() {
+ for (int i = 0; i < kNumberOfCpuRegisters; i++) {
+ gprs_[i] = NULL;
+ }
+ gprs_[RSP] = &rsp_;
+ // Initialize registers with easy to spot debug values.
+ rsp_ = X86_64Context::kBadGprBase + RSP;
+ rip_ = X86_64Context::kBadGprBase + kNumberOfCpuRegisters;
+}
+
+void X86_64Context::FillCalleeSaves(const StackVisitor& fr) {
+ mirror::ArtMethod* method = fr.GetMethod();
+ uint32_t core_spills = method->GetCoreSpillMask();
+ size_t spill_count = __builtin_popcount(core_spills);
+ DCHECK_EQ(method->GetFpSpillMask(), 0u);
+ size_t frame_size = method->GetFrameSizeInBytes();
+ if (spill_count > 0) {
+ // Lowest number spill is farthest away, walk registers and fill into context.
+ int j = 2; // Offset j to skip return address spill.
+ for (int i = 0; i < kNumberOfCpuRegisters; i++) {
+ if (((core_spills >> i) & 1) != 0) {
+ gprs_[i] = fr.CalleeSaveAddress(spill_count - j, frame_size);
+ j++;
+ }
+ }
+ }
+}
+
+void X86_64Context::SmashCallerSaves() {
+ // This needs to be 0 because we want a null/zero return value.
+ gprs_[RAX] = const_cast<uintptr_t*>(&gZero);
+ gprs_[RDX] = const_cast<uintptr_t*>(&gZero);
+ gprs_[RCX] = nullptr;
+ gprs_[RBX] = nullptr;
+}
+
+void X86_64Context::SetGPR(uint32_t reg, uintptr_t value) {
+ CHECK_LT(reg, static_cast<uint32_t>(kNumberOfCpuRegisters));
+ CHECK_NE(gprs_[reg], &gZero);
+ CHECK(gprs_[reg] != NULL);
+ *gprs_[reg] = value;
+}
+
+void X86_64Context::DoLongJump() {
+ UNIMPLEMENTED(FATAL);
+}
+
+} // namespace x86_64
+} // namespace art
diff --git a/runtime/arch/x86_64/context_x86_64.h b/runtime/arch/x86_64/context_x86_64.h
new file mode 100644
index 0000000..3e49165
--- /dev/null
+++ b/runtime/arch/x86_64/context_x86_64.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ARCH_X86_64_CONTEXT_X86_64_H_
+#define ART_RUNTIME_ARCH_X86_64_CONTEXT_X86_64_H_
+
+#include "arch/context.h"
+#include "base/logging.h"
+#include "registers_x86_64.h"
+
+namespace art {
+namespace x86_64 {
+
+class X86_64Context : public Context {
+ public:
+ X86_64Context() {
+ Reset();
+ }
+ virtual ~X86_64Context() {}
+
+ virtual void Reset();
+
+ virtual void FillCalleeSaves(const StackVisitor& fr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ virtual void SetSP(uintptr_t new_sp) {
+ SetGPR(RSP, new_sp);
+ }
+
+ virtual void SetPC(uintptr_t new_pc) {
+ rip_ = new_pc;
+ }
+
+ virtual uintptr_t GetGPR(uint32_t reg) {
+ const uint32_t kNumberOfCpuRegisters = 8;
+ DCHECK_LT(reg, kNumberOfCpuRegisters);
+ return *gprs_[reg];
+ }
+
+ virtual void SetGPR(uint32_t reg, uintptr_t value);
+
+ virtual void SmashCallerSaves();
+ virtual void DoLongJump();
+
+ private:
+ // Pointers to register locations, floating point registers are all caller save. Values are
+ // initialized to NULL or the special registers below.
+ uintptr_t* gprs_[kNumberOfCpuRegisters];
+ // Hold values for rsp and rip if they are not located within a stack frame. RIP is somewhat
+ // special in that it cannot be encoded normally as a register operand to an instruction (except
+ // in 64bit addressing modes).
+ uintptr_t rsp_, rip_;
+};
+} // namespace x86_64
+} // namespace art
+
+#endif // ART_RUNTIME_ARCH_X86_64_CONTEXT_X86_64_H_
diff --git a/runtime/arch/x86_64/entrypoints_init_x86_64.cc b/runtime/arch/x86_64/entrypoints_init_x86_64.cc
new file mode 100644
index 0000000..589c7d9
--- /dev/null
+++ b/runtime/arch/x86_64/entrypoints_init_x86_64.cc
@@ -0,0 +1,224 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "entrypoints/portable/portable_entrypoints.h"
+#include "entrypoints/quick/quick_entrypoints.h"
+#include "entrypoints/entrypoint_utils.h"
+
+namespace art {
+
+// Interpreter entrypoints.
+extern "C" void artInterpreterToInterpreterBridge(Thread* self, MethodHelper& mh,
+ const DexFile::CodeItem* code_item,
+ ShadowFrame* shadow_frame, JValue* result);
+extern "C" void artInterpreterToCompiledCodeBridge(Thread* self, MethodHelper& mh,
+ const DexFile::CodeItem* code_item,
+ ShadowFrame* shadow_frame, JValue* result);
+
+// Portable entrypoints.
+extern "C" void art_portable_resolution_trampoline(mirror::ArtMethod*);
+extern "C" void art_portable_to_interpreter_bridge(mirror::ArtMethod*);
+
+// Cast entrypoints.
+extern "C" uint32_t art_quick_is_assignable(const mirror::Class* klass,
+ const mirror::Class* ref_class);
+extern "C" void art_quick_check_cast(void*, void*);
+
+// DexCache entrypoints.
+extern "C" void* art_quick_initialize_static_storage(uint32_t, void*);
+extern "C" void* art_quick_initialize_type(uint32_t, void*);
+extern "C" void* art_quick_initialize_type_and_verify_access(uint32_t, void*);
+extern "C" void* art_quick_resolve_string(void*, uint32_t);
+
+// Field entrypoints.
+extern "C" int art_quick_set32_instance(uint32_t, void*, int32_t);
+extern "C" int art_quick_set32_static(uint32_t, int32_t);
+extern "C" int art_quick_set64_instance(uint32_t, void*, int64_t);
+extern "C" int art_quick_set64_static(uint32_t, int64_t);
+extern "C" int art_quick_set_obj_instance(uint32_t, void*, void*);
+extern "C" int art_quick_set_obj_static(uint32_t, void*);
+extern "C" int32_t art_quick_get32_instance(uint32_t, void*);
+extern "C" int32_t art_quick_get32_static(uint32_t);
+extern "C" int64_t art_quick_get64_instance(uint32_t, void*);
+extern "C" int64_t art_quick_get64_static(uint32_t);
+extern "C" void* art_quick_get_obj_instance(uint32_t, void*);
+extern "C" void* art_quick_get_obj_static(uint32_t);
+
+// Array entrypoints.
+extern "C" void art_quick_aput_obj_with_null_and_bound_check(void*, uint32_t, void*);
+extern "C" void art_quick_aput_obj_with_bound_check(void*, uint32_t, void*);
+extern "C" void art_quick_aput_obj(void*, uint32_t, void*);
+extern "C" void art_quick_handle_fill_data(void*, void*);
+
+// Lock entrypoints.
+extern "C" void art_quick_lock_object(void*);
+extern "C" void art_quick_unlock_object(void*);
+
+// Math entrypoints.
+extern "C" double art_quick_fmod(double, double);
+extern "C" float art_quick_fmodf(float, float);
+extern "C" double art_quick_l2d(int64_t);
+extern "C" float art_quick_l2f(int64_t);
+extern "C" int64_t art_quick_d2l(double);
+extern "C" int64_t art_quick_f2l(float);
+extern "C" int32_t art_quick_idivmod(int32_t, int32_t);
+extern "C" int64_t art_quick_ldiv(int64_t, int64_t);
+extern "C" int64_t art_quick_lmod(int64_t, int64_t);
+extern "C" int64_t art_quick_lmul(int64_t, int64_t);
+extern "C" uint64_t art_quick_lshl(uint64_t, uint32_t);
+extern "C" uint64_t art_quick_lshr(uint64_t, uint32_t);
+extern "C" uint64_t art_quick_lushr(uint64_t, uint32_t);
+
+// Intrinsic entrypoints.
+extern "C" int32_t art_quick_memcmp16(void*, void*, int32_t);
+extern "C" int32_t art_quick_indexof(void*, uint32_t, uint32_t, uint32_t);
+extern "C" int32_t art_quick_string_compareto(void*, void*);
+extern "C" void* art_quick_memcpy(void*, const void*, size_t);
+
+// Invoke entrypoints.
+extern "C" void art_quick_imt_conflict_trampoline(mirror::ArtMethod*);
+extern "C" void art_quick_resolution_trampoline(mirror::ArtMethod*);
+extern "C" void art_quick_to_interpreter_bridge(mirror::ArtMethod*);
+extern "C" void art_quick_invoke_direct_trampoline_with_access_check(uint32_t, void*);
+extern "C" void art_quick_invoke_interface_trampoline_with_access_check(uint32_t, void*);
+extern "C" void art_quick_invoke_static_trampoline_with_access_check(uint32_t, void*);
+extern "C" void art_quick_invoke_super_trampoline_with_access_check(uint32_t, void*);
+extern "C" void art_quick_invoke_virtual_trampoline_with_access_check(uint32_t, void*);
+
+// Thread entrypoints.
+extern void CheckSuspendFromCode(Thread* thread);
+extern "C" void art_quick_test_suspend();
+
+// Throw entrypoints.
+extern "C" void art_quick_deliver_exception(void*);
+extern "C" void art_quick_throw_array_bounds(int32_t index, int32_t limit);
+extern "C" void art_quick_throw_div_zero();
+extern "C" void art_quick_throw_no_such_method(int32_t method_idx);
+extern "C" void art_quick_throw_null_pointer_exception();
+extern "C" void art_quick_throw_stack_overflow(void*);
+
+extern void ResetQuickAllocEntryPoints(QuickEntryPoints* qpoints);
+
+void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
+ PortableEntryPoints* ppoints, QuickEntryPoints* qpoints) {
+ // Interpreter
+ ipoints->pInterpreterToInterpreterBridge = artInterpreterToInterpreterBridge;
+ ipoints->pInterpreterToCompiledCodeBridge = artInterpreterToCompiledCodeBridge;
+
+ // JNI
+ jpoints->pDlsymLookup = art_jni_dlsym_lookup_stub;
+
+ // Portable
+ ppoints->pPortableResolutionTrampoline = art_portable_resolution_trampoline;
+ ppoints->pPortableToInterpreterBridge = art_portable_to_interpreter_bridge;
+
+ // Alloc
+ ResetQuickAllocEntryPoints(qpoints);
+
+ // Cast
+ qpoints->pInstanceofNonTrivial = art_quick_is_assignable;
+ qpoints->pCheckCast = art_quick_check_cast;
+
+ // DexCache
+ qpoints->pInitializeStaticStorage = art_quick_initialize_static_storage;
+ qpoints->pInitializeTypeAndVerifyAccess = art_quick_initialize_type_and_verify_access;
+ qpoints->pInitializeType = art_quick_initialize_type;
+ qpoints->pResolveString = art_quick_resolve_string;
+
+ // Field
+ qpoints->pSet32Instance = art_quick_set32_instance;
+ qpoints->pSet32Static = art_quick_set32_static;
+ qpoints->pSet64Instance = art_quick_set64_instance;
+ qpoints->pSet64Static = art_quick_set64_static;
+ qpoints->pSetObjInstance = art_quick_set_obj_instance;
+ qpoints->pSetObjStatic = art_quick_set_obj_static;
+ qpoints->pGet32Instance = art_quick_get32_instance;
+ qpoints->pGet64Instance = art_quick_get64_instance;
+ qpoints->pGetObjInstance = art_quick_get_obj_instance;
+ qpoints->pGet32Static = art_quick_get32_static;
+ qpoints->pGet64Static = art_quick_get64_static;
+ qpoints->pGetObjStatic = art_quick_get_obj_static;
+
+ // Array
+ qpoints->pAputObjectWithNullAndBoundCheck = art_quick_aput_obj_with_null_and_bound_check;
+ qpoints->pAputObjectWithBoundCheck = art_quick_aput_obj_with_bound_check;
+ qpoints->pAputObject = art_quick_aput_obj;
+ qpoints->pHandleFillArrayData = art_quick_handle_fill_data;
+
+ // JNI
+ qpoints->pJniMethodStart = JniMethodStart;
+ qpoints->pJniMethodStartSynchronized = JniMethodStartSynchronized;
+ qpoints->pJniMethodEnd = JniMethodEnd;
+ qpoints->pJniMethodEndSynchronized = JniMethodEndSynchronized;
+ qpoints->pJniMethodEndWithReference = JniMethodEndWithReference;
+ qpoints->pJniMethodEndWithReferenceSynchronized = JniMethodEndWithReferenceSynchronized;
+
+ // Locks
+ qpoints->pLockObject = art_quick_lock_object;
+ qpoints->pUnlockObject = art_quick_unlock_object;
+
+ // Math
+ // points->pCmpgDouble = NULL; // Not needed on x86.
+ // points->pCmpgFloat = NULL; // Not needed on x86.
+ // points->pCmplDouble = NULL; // Not needed on x86.
+ // points->pCmplFloat = NULL; // Not needed on x86.
+ qpoints->pFmod = art_quick_fmod;
+ // qpoints->pSqrt = NULL; // Not needed on x86.
+ qpoints->pL2d = art_quick_l2d;
+ qpoints->pFmodf = art_quick_fmodf;
+ qpoints->pL2f = art_quick_l2f;
+ // points->pD2iz = NULL; // Not needed on x86.
+ // points->pF2iz = NULL; // Not needed on x86.
+ qpoints->pIdivmod = art_quick_idivmod;
+ qpoints->pD2l = art_quick_d2l;
+ qpoints->pF2l = art_quick_f2l;
+ qpoints->pLdiv = art_quick_ldiv;
+ qpoints->pLmod = art_quick_lmod;
+ qpoints->pLmul = art_quick_lmul;
+ qpoints->pShlLong = art_quick_lshl;
+ qpoints->pShrLong = art_quick_lshr;
+ qpoints->pUshrLong = art_quick_lushr;
+
+ // Intrinsics
+ qpoints->pIndexOf = art_quick_indexof;
+ qpoints->pMemcmp16 = art_quick_memcmp16;
+ qpoints->pStringCompareTo = art_quick_string_compareto;
+ qpoints->pMemcpy = art_quick_memcpy;
+
+ // Invocation
+ qpoints->pQuickImtConflictTrampoline = art_quick_imt_conflict_trampoline;
+ qpoints->pQuickResolutionTrampoline = art_quick_resolution_trampoline;
+ qpoints->pQuickToInterpreterBridge = art_quick_to_interpreter_bridge;
+ qpoints->pInvokeDirectTrampolineWithAccessCheck = art_quick_invoke_direct_trampoline_with_access_check;
+ qpoints->pInvokeInterfaceTrampolineWithAccessCheck = art_quick_invoke_interface_trampoline_with_access_check;
+ qpoints->pInvokeStaticTrampolineWithAccessCheck = art_quick_invoke_static_trampoline_with_access_check;
+ qpoints->pInvokeSuperTrampolineWithAccessCheck = art_quick_invoke_super_trampoline_with_access_check;
+ qpoints->pInvokeVirtualTrampolineWithAccessCheck = art_quick_invoke_virtual_trampoline_with_access_check;
+
+ // Thread
+ qpoints->pCheckSuspend = CheckSuspendFromCode;
+ qpoints->pTestSuspend = art_quick_test_suspend;
+
+ // Throws
+ qpoints->pDeliverException = art_quick_deliver_exception;
+ qpoints->pThrowArrayBounds = art_quick_throw_array_bounds;
+ qpoints->pThrowDivZero = art_quick_throw_div_zero;
+ qpoints->pThrowNoSuchMethod = art_quick_throw_no_such_method;
+ qpoints->pThrowNullPointer = art_quick_throw_null_pointer_exception;
+ qpoints->pThrowStackOverflow = art_quick_throw_stack_overflow;
+};
+
+} // namespace art
diff --git a/runtime/arch/x86_64/jni_entrypoints_x86_64.S b/runtime/arch/x86_64/jni_entrypoints_x86_64.S
new file mode 100644
index 0000000..35fcccb
--- /dev/null
+++ b/runtime/arch/x86_64/jni_entrypoints_x86_64.S
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "asm_support_x86_64.S"
+
+ /*
+ * Jni dlsym lookup stub.
+ */
+UNIMPLEMENTED art_jni_dlsym_lookup_stub
diff --git a/runtime/arch/x86_64/portable_entrypoints_x86_64.S b/runtime/arch/x86_64/portable_entrypoints_x86_64.S
new file mode 100644
index 0000000..2e9d19a
--- /dev/null
+++ b/runtime/arch/x86_64/portable_entrypoints_x86_64.S
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "asm_support_x86_64.S"
+
+ /*
+ * Portable invocation stub.
+ */
+UNIMPLEMENTED art_portable_invoke_stub
+
+UNIMPLEMENTED art_portable_proxy_invoke_handler
+
+UNIMPLEMENTED art_portable_resolution_trampoline
+
+UNIMPLEMENTED art_portable_to_interpreter_bridge
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
new file mode 100644
index 0000000..e01a31b
--- /dev/null
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -0,0 +1,401 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "asm_support_x86_64.S"
+
+// For x86, the CFA is esp+4, the address above the pushed return address on the stack.
+
+ /*
+ * Macro that sets up the callee save frame to conform with
+ * Runtime::CreateCalleeSaveMethod(kSaveAll)
+ */
+MACRO0(SETUP_SAVE_ALL_CALLEE_SAVE_FRAME)
+ int3
+ int3
+END_MACRO
+
+ /*
+ * Macro that sets up the callee save frame to conform with
+ * Runtime::CreateCalleeSaveMethod(kRefsOnly)
+ */
+MACRO0(SETUP_REF_ONLY_CALLEE_SAVE_FRAME)
+ int3
+ int3
+END_MACRO
+
+MACRO0(RESTORE_REF_ONLY_CALLEE_SAVE_FRAME)
+ int3
+ int3
+END_MACRO
+
+ /*
+ * Macro that sets up the callee save frame to conform with
+ * Runtime::CreateCalleeSaveMethod(kRefsAndArgs)
+ */
+MACRO0(SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME)
+ int3
+ int3
+END_MACRO
+
+MACRO0(RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME)
+ int3
+END_MACRO
+
+ /*
+ * Macro that set calls through to artDeliverPendingExceptionFromCode, where the pending
+ * exception is Thread::Current()->exception_.
+ */
+MACRO0(DELIVER_PENDING_EXCEPTION)
+ int3
+ int3
+END_MACRO
+
+MACRO2(NO_ARG_RUNTIME_EXCEPTION, c_name, cxx_name)
+ DEFINE_FUNCTION VAR(c_name, 0)
+ int3
+ int3
+ END_FUNCTION VAR(c_name, 0)
+END_MACRO
+
+MACRO2(ONE_ARG_RUNTIME_EXCEPTION, c_name, cxx_name)
+ DEFINE_FUNCTION VAR(c_name, 0)
+ int3
+ int3
+ END_FUNCTION VAR(c_name, 0)
+END_MACRO
+
+MACRO2(TWO_ARG_RUNTIME_EXCEPTION, c_name, cxx_name)
+ DEFINE_FUNCTION VAR(c_name, 0)
+ int3
+ int3
+ END_FUNCTION VAR(c_name, 0)
+END_MACRO
+
+ /*
+ * Called by managed code to create and deliver a NullPointerException.
+ */
+NO_ARG_RUNTIME_EXCEPTION art_quick_throw_null_pointer_exception, artThrowNullPointerExceptionFromCode
+
+ /*
+ * Called by managed code to create and deliver an ArithmeticException.
+ */
+NO_ARG_RUNTIME_EXCEPTION art_quick_throw_div_zero, artThrowDivZeroFromCode
+
+ /*
+ * Called by managed code to create and deliver a StackOverflowError.
+ */
+NO_ARG_RUNTIME_EXCEPTION art_quick_throw_stack_overflow, artThrowStackOverflowFromCode
+
+ /*
+ * Called by managed code, saves callee saves and then calls artThrowException
+ * that will place a mock Method* at the bottom of the stack. Arg1 holds the exception.
+ */
+ONE_ARG_RUNTIME_EXCEPTION art_quick_deliver_exception, artDeliverExceptionFromCode
+
+ /*
+ * Called by managed code to create and deliver a NoSuchMethodError.
+ */
+ONE_ARG_RUNTIME_EXCEPTION art_quick_throw_no_such_method, artThrowNoSuchMethodFromCode
+
+ /*
+ * Called by managed code to create and deliver an ArrayIndexOutOfBoundsException. Arg1 holds
+ * index, arg2 holds limit.
+ */
+TWO_ARG_RUNTIME_EXCEPTION art_quick_throw_array_bounds, artThrowArrayBoundsFromCode
+
+ /*
+ * All generated callsites for interface invokes and invocation slow paths will load arguments
+ * as usual - except instead of loading arg0/r0 with the target Method*, arg0/r0 will contain
+ * the method_idx. This wrapper will save arg1-arg3, load the caller's Method*, align the
+ * stack and call the appropriate C helper.
+ * NOTE: "this" is first visible argument of the target, and so can be found in arg1/r1.
+ *
+ * The helper will attempt to locate the target and return a 64-bit result in r0/r1 consisting
+ * of the target Method* in r0 and method->code_ in r1.
+ *
+ * If unsuccessful, the helper will return NULL/NULL. There will bea pending exception in the
+ * thread and we branch to another stub to deliver it.
+ *
+ * On success this wrapper will restore arguments and *jump* to the target, leaving the lr
+ * pointing back to the original caller.
+ */
+MACRO2(INVOKE_TRAMPOLINE, c_name, cxx_name)
+ DEFINE_FUNCTION VAR(c_name, 0)
+ int3
+ int3
+ END_FUNCTION VAR(c_name, 0)
+END_MACRO
+
+INVOKE_TRAMPOLINE art_quick_invoke_interface_trampoline, artInvokeInterfaceTrampoline
+INVOKE_TRAMPOLINE art_quick_invoke_interface_trampoline_with_access_check, artInvokeInterfaceTrampolineWithAccessCheck
+
+INVOKE_TRAMPOLINE art_quick_invoke_static_trampoline_with_access_check, artInvokeStaticTrampolineWithAccessCheck
+INVOKE_TRAMPOLINE art_quick_invoke_direct_trampoline_with_access_check, artInvokeDirectTrampolineWithAccessCheck
+INVOKE_TRAMPOLINE art_quick_invoke_super_trampoline_with_access_check, artInvokeSuperTrampolineWithAccessCheck
+INVOKE_TRAMPOLINE art_quick_invoke_virtual_trampoline_with_access_check, artInvokeVirtualTrampolineWithAccessCheck
+
+ /*
+ * Quick invocation stub.
+ */
+DEFINE_FUNCTION art_quick_invoke_stub
+ int3
+ int3
+END_FUNCTION art_quick_invoke_stub
+
+MACRO3(NO_ARG_DOWNCALL, c_name, cxx_name, return_macro)
+ DEFINE_FUNCTION VAR(c_name, 0)
+ int3
+ int3
+ END_FUNCTION VAR(c_name, 0)
+END_MACRO
+
+MACRO3(ONE_ARG_DOWNCALL, c_name, cxx_name, return_macro)
+ DEFINE_FUNCTION VAR(c_name, 0)
+ int3
+ int3
+ END_FUNCTION VAR(c_name, 0)
+END_MACRO
+
+MACRO3(TWO_ARG_DOWNCALL, c_name, cxx_name, return_macro)
+ DEFINE_FUNCTION VAR(c_name, 0)
+ int3
+ int3
+ END_FUNCTION VAR(c_name, 0)
+END_MACRO
+
+MACRO3(THREE_ARG_DOWNCALL, c_name, cxx_name, return_macro)
+ DEFINE_FUNCTION VAR(c_name, 0)
+ int3
+ int3
+ END_FUNCTION VAR(c_name, 0)
+END_MACRO
+
+MACRO0(RETURN_IF_RESULT_IS_NON_ZERO)
+ int3
+ testl %eax, %eax // eax == 0 ?
+ jz 1f // if eax == 0 goto 1
+ ret // return
+1: // deliver exception on current thread
+ DELIVER_PENDING_EXCEPTION
+END_MACRO
+
+MACRO0(RETURN_IF_EAX_ZERO)
+ int3
+ testl %eax, %eax // eax == 0 ?
+ jnz 1f // if eax != 0 goto 1
+ ret // return
+1: // deliver exception on current thread
+ DELIVER_PENDING_EXCEPTION
+END_MACRO
+
+MACRO0(RETURN_OR_DELIVER_PENDING_EXCEPTION)
+ int3
+ int3
+ DELIVER_PENDING_EXCEPTION
+END_MACRO
+
+// Generate the allocation entrypoints for each allocator.
+// TODO: use arch/quick_alloc_entrypoints.S. Currently we don't as we need to use concatenation
+// macros to work around differences between OS/X's as and binutils as (OS/X lacks named arguments
+// to macros and the VAR macro won't concatenate arguments properly), this also breaks having
+// multi-line macros that use each other (hence using 1 macro per newline below).
+#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(c_suffix, cxx_suffix) \
+ TWO_ARG_DOWNCALL art_quick_alloc_object ## c_suffix, artAllocObjectFromCode ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO
+#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(c_suffix, cxx_suffix) \
+ TWO_ARG_DOWNCALL art_quick_alloc_object_resolved ## c_suffix, artAllocObjectFromCodeResolved ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO
+#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(c_suffix, cxx_suffix) \
+ TWO_ARG_DOWNCALL art_quick_alloc_object_initialized ## c_suffix, artAllocObjectFromCodeInitialized ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO
+#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(c_suffix, cxx_suffix) \
+ TWO_ARG_DOWNCALL art_quick_alloc_object_with_access_check ## c_suffix, artAllocObjectFromCodeWithAccessCheck ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO
+#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(c_suffix, cxx_suffix) \
+ THREE_ARG_DOWNCALL art_quick_alloc_array ## c_suffix, artAllocArrayFromCode ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO
+#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(c_suffix, cxx_suffix) \
+ THREE_ARG_DOWNCALL art_quick_alloc_array_resolved ## c_suffix, artAllocArrayFromCodeResolved ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO
+#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(c_suffix, cxx_suffix) \
+ THREE_ARG_DOWNCALL art_quick_alloc_array_with_access_check ## c_suffix, artAllocArrayFromCodeWithAccessCheck ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO
+#define GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(c_suffix, cxx_suffix) \
+ THREE_ARG_DOWNCALL art_quick_check_and_alloc_array ## c_suffix, artCheckAndAllocArrayFromCode ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO
+#define GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(c_suffix, cxx_suffix) \
+ THREE_ARG_DOWNCALL art_quick_check_and_alloc_array_with_access_check ## c_suffix, artCheckAndAllocArrayFromCodeWithAccessCheck ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO
+
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_dlmalloc, DlMalloc)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_dlmalloc, DlMalloc)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_dlmalloc, DlMalloc)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_dlmalloc, DlMalloc)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_dlmalloc, DlMalloc)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_dlmalloc, DlMalloc)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_dlmalloc, DlMalloc)
+GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_dlmalloc, DlMalloc)
+GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_dlmalloc, DlMalloc)
+
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_dlmalloc_instrumented, DlMallocInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_dlmalloc_instrumented, DlMallocInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_dlmalloc_instrumented, DlMallocInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_dlmalloc_instrumented, DlMallocInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_dlmalloc_instrumented, DlMallocInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_dlmalloc_instrumented, DlMallocInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_dlmalloc_instrumented, DlMallocInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_dlmalloc_instrumented, DlMallocInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_dlmalloc_instrumented, DlMallocInstrumented)
+
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc, RosAlloc)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc, RosAlloc)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_rosalloc, RosAlloc)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_rosalloc, RosAlloc)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_rosalloc, RosAlloc)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_rosalloc, RosAlloc)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_rosalloc, RosAlloc)
+GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_rosalloc, RosAlloc)
+GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_rosalloc, RosAlloc)
+
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc_instrumented, RosAllocInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc_instrumented, RosAllocInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_rosalloc_instrumented, RosAllocInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_rosalloc_instrumented, RosAllocInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_rosalloc_instrumented, RosAllocInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_rosalloc_instrumented, RosAllocInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_rosalloc_instrumented, RosAllocInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_rosalloc_instrumented, RosAllocInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_rosalloc_instrumented, RosAllocInstrumented)
+
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_bump_pointer, BumpPointer)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_bump_pointer, BumpPointer)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_bump_pointer, BumpPointer)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_bump_pointer, BumpPointer)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_bump_pointer, BumpPointer)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_bump_pointer, BumpPointer)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_bump_pointer, BumpPointer)
+GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_bump_pointer, BumpPointer)
+GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_bump_pointer, BumpPointer)
+
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_bump_pointer_instrumented, BumpPointerInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_bump_pointer_instrumented, BumpPointerInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_bump_pointer_instrumented, BumpPointerInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_bump_pointer_instrumented, BumpPointerInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_bump_pointer_instrumented, BumpPointerInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_bump_pointer_instrumented, BumpPointerInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_bump_pointer_instrumented, BumpPointerInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_bump_pointer_instrumented, BumpPointerInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_bump_pointer_instrumented, BumpPointerInstrumented)
+
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_tlab, TLAB)
+
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab_instrumented, TLABInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab_instrumented, TLABInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_tlab_instrumented, TLABInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_tlab_instrumented, TLABInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_tlab_instrumented, TLABInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_tlab_instrumented, TLABInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_tlab_instrumented, TLABInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_tlab_instrumented, TLABInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_tlab_instrumented, TLABInstrumented)
+
+TWO_ARG_DOWNCALL art_quick_resolve_string, artResolveStringFromCode, RETURN_IF_RESULT_IS_NON_ZERO
+TWO_ARG_DOWNCALL art_quick_initialize_static_storage, artInitializeStaticStorageFromCode, RETURN_IF_RESULT_IS_NON_ZERO
+TWO_ARG_DOWNCALL art_quick_initialize_type, artInitializeTypeFromCode, RETURN_IF_RESULT_IS_NON_ZERO
+TWO_ARG_DOWNCALL art_quick_initialize_type_and_verify_access, artInitializeTypeAndVerifyAccessFromCode, RETURN_IF_RESULT_IS_NON_ZERO
+
+TWO_ARG_DOWNCALL art_quick_handle_fill_data, artHandleFillArrayDataFromCode, RETURN_IF_EAX_ZERO
+
+DEFINE_FUNCTION art_quick_lock_object
+ int3
+ int3
+END_FUNCTION art_quick_lock_object
+
+DEFINE_FUNCTION art_quick_unlock_object
+ int3
+ int3
+END_FUNCTION art_quick_unlock_object
+
+DEFINE_FUNCTION art_quick_is_assignable
+ int3
+ int3
+END_FUNCTION art_quick_is_assignable
+
+DEFINE_FUNCTION art_quick_check_cast
+ int3
+ int3
+END_FUNCTION art_quick_check_cast
+
+ /*
+ * Entry from managed code for array put operations of objects where the value being stored
+ * needs to be checked for compatibility.
+ * eax = array, ecx = index, edx = value
+ */
+UNIMPLEMENTED art_quick_aput_obj_with_null_and_bound_check
+UNIMPLEMENTED art_quick_aput_obj_with_bound_check
+UNIMPLEMENTED art_quick_aput_obj
+UNIMPLEMENTED art_quick_memcpy
+
+NO_ARG_DOWNCALL art_quick_test_suspend, artTestSuspendFromCode, ret
+
+UNIMPLEMENTED art_quick_fmod
+UNIMPLEMENTED art_quick_fmodf
+UNIMPLEMENTED art_quick_l2d
+UNIMPLEMENTED art_quick_l2f
+UNIMPLEMENTED art_quick_d2l
+UNIMPLEMENTED art_quick_f2l
+UNIMPLEMENTED art_quick_idivmod
+UNIMPLEMENTED art_quick_ldiv
+UNIMPLEMENTED art_quick_lmod
+UNIMPLEMENTED art_quick_lmul
+UNIMPLEMENTED art_quick_lshl
+UNIMPLEMENTED art_quick_lshr
+UNIMPLEMENTED art_quick_lushr
+UNIMPLEMENTED art_quick_set32_instance
+UNIMPLEMENTED art_quick_set64_instance
+UNIMPLEMENTED art_quick_set_obj_instance
+UNIMPLEMENTED art_quick_get32_instance
+UNIMPLEMENTED art_quick_get64_instance
+UNIMPLEMENTED art_quick_get_obj_instance
+UNIMPLEMENTED art_quick_set32_static
+UNIMPLEMENTED art_quick_set64_static
+UNIMPLEMENTED art_quick_set_obj_static
+UNIMPLEMENTED art_quick_get32_static
+UNIMPLEMENTED art_quick_get64_static
+UNIMPLEMENTED art_quick_get_obj_static
+UNIMPLEMENTED art_quick_proxy_invoke_handler
+
+ /*
+ * Called to resolve an imt conflict.
+ */
+UNIMPLEMENTED art_quick_imt_conflict_trampoline
+UNIMPLEMENTED art_quick_resolution_trampoline
+UNIMPLEMENTED art_quick_to_interpreter_bridge
+
+ /*
+ * Routine that intercepts method calls and returns.
+ */
+UNIMPLEMENTED art_quick_instrumentation_entry
+UNIMPLEMENTED art_quick_instrumentation_exit
+
+ /*
+ * Instrumentation has requested that we deoptimize into the interpreter. The deoptimization
+ * will long jump to the upcall with a special exception of -1.
+ */
+UNIMPLEMENTED art_quick_deoptimize
+
+UNIMPLEMENTED art_quick_indexof
+UNIMPLEMENTED art_quick_string_compareto
+UNIMPLEMENTED art_quick_memcmp16
diff --git a/runtime/arch/x86_64/registers_x86_64.cc b/runtime/arch/x86_64/registers_x86_64.cc
new file mode 100644
index 0000000..38f3494
--- /dev/null
+++ b/runtime/arch/x86_64/registers_x86_64.cc
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "registers_x86_64.h"
+
+#include <ostream>
+
+namespace art {
+namespace x86_64 {
+
+static const char* kRegisterNames[] = {
+ "rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi",
+ "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
+};
+std::ostream& operator<<(std::ostream& os, const Register& rhs) {
+ if (rhs >= RAX && rhs <= R15) {
+ os << kRegisterNames[rhs];
+ } else {
+ os << "Register[" << static_cast<int>(rhs) << "]";
+ }
+ return os;
+}
+
+} // namespace x86_64
+} // namespace art
diff --git a/runtime/arch/x86_64/registers_x86_64.h b/runtime/arch/x86_64/registers_x86_64.h
new file mode 100644
index 0000000..9808d91
--- /dev/null
+++ b/runtime/arch/x86_64/registers_x86_64.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ARCH_X86_64_REGISTERS_X86_64_H_
+#define ART_RUNTIME_ARCH_X86_64_REGISTERS_X86_64_H_
+
+#include <iosfwd>
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "globals.h"
+
+namespace art {
+namespace x86_64 {
+
+enum Register {
+ RAX = 0,
+ RCX = 1,
+ RDX = 2,
+ RBX = 3,
+ RSP = 4,
+ RBP = 5,
+ RSI = 6,
+ RDI = 7,
+ R8 = 8,
+ R9 = 9,
+ R10 = 10,
+ R11 = 11,
+ R12 = 12,
+ R13 = 13,
+ R14 = 14,
+ R15 = 15,
+ kNumberOfCpuRegisters = 16,
+ kNoRegister = -1 // Signals an illegal register.
+};
+std::ostream& operator<<(std::ostream& os, const Register& rhs);
+
+} // namespace x86_64
+} // namespace art
+
+#endif // ART_RUNTIME_ARCH_X86_64_REGISTERS_X86_64_H_
diff --git a/runtime/arch/x86_64/thread_x86_64.cc b/runtime/arch/x86_64/thread_x86_64.cc
new file mode 100644
index 0000000..9e45a72
--- /dev/null
+++ b/runtime/arch/x86_64/thread_x86_64.cc
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "thread.h"
+
+#include "asm_support_x86_64.h"
+#include "base/macros.h"
+#include "thread-inl.h"
+#include "thread_list.h"
+
+#include <asm/prctl.h>
+#include <sys/prctl.h>
+#include <sys/syscall.h>
+
+namespace art {
+
+static void arch_prctl(int code, void* val) {
+ syscall(__NR_arch_prctl, code, val);
+}
+void Thread::InitCpu() {
+ static Mutex modify_ldt_lock("modify_ldt lock");
+ MutexLock mu(Thread::Current(), modify_ldt_lock);
+ arch_prctl(ARCH_SET_GS, this);
+
+ // Allow easy indirection back to Thread*.
+ self_ = this;
+
+ // Sanity check that reads from %gs point to this Thread*.
+ Thread* self_check;
+ CHECK_EQ(THREAD_SELF_OFFSET, OFFSETOF_MEMBER(Thread, self_));
+ __asm__ __volatile__("movq %%gs:(%1), %0"
+ : "=r"(self_check) // output
+ : "r"(THREAD_SELF_OFFSET) // input
+ :); // clobber
+ CHECK_EQ(self_check, this);
+
+ // Sanity check other offsets.
+ CHECK_EQ(THREAD_EXCEPTION_OFFSET, OFFSETOF_MEMBER(Thread, exception_));
+ CHECK_EQ(THREAD_CARD_TABLE_OFFSET, OFFSETOF_MEMBER(Thread, card_table_));
+ CHECK_EQ(THREAD_ID_OFFSET, OFFSETOF_MEMBER(Thread, thin_lock_thread_id_));
+}
+
+void Thread::CleanupCpu() {
+ // Sanity check that reads from %gs point to this Thread*.
+ Thread* self_check;
+ CHECK_EQ(THREAD_SELF_OFFSET, OFFSETOF_MEMBER(Thread, self_));
+ __asm__ __volatile__("movq %%gs:(%1), %0"
+ : "=r"(self_check) // output
+ : "r"(THREAD_SELF_OFFSET) // input
+ :); // clobber
+ CHECK_EQ(self_check, this);
+
+ // Do nothing.
+}
+
+} // namespace art