Merge "Ongoing improvements in java fuzz testing"
diff --git a/build/Android.common_build.mk b/build/Android.common_build.mk
index 1e2cfa3..c8e3654 100644
--- a/build/Android.common_build.mk
+++ b/build/Android.common_build.mk
@@ -152,7 +152,7 @@
# The architectures the compiled tools are able to run on. Setting this to 'all' will cause all
# architectures to be included.
-ART_TARGET_CODEGEN_ARCHS ?= all
+ART_TARGET_CODEGEN_ARCHS ?= svelte
ART_HOST_CODEGEN_ARCHS ?= all
ifeq ($(ART_TARGET_CODEGEN_ARCHS),all)
diff --git a/compiler/Android.mk b/compiler/Android.mk
index 410b2d0..16c6a7b 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -92,6 +92,8 @@
linker/arm/relative_patcher_thumb2.cc \
optimizing/code_generator_arm.cc \
optimizing/dex_cache_array_fixups_arm.cc \
+ optimizing/instruction_simplifier_arm.cc \
+ optimizing/instruction_simplifier_shared.cc \
optimizing/intrinsics_arm.cc \
utils/arm/assembler_arm.cc \
utils/arm/assembler_arm32.cc \
@@ -109,7 +111,6 @@
linker/arm64/relative_patcher_arm64.cc \
optimizing/nodes_arm64.cc \
optimizing/code_generator_arm64.cc \
- optimizing/instruction_simplifier_arm.cc \
optimizing/instruction_simplifier_arm64.cc \
optimizing/instruction_simplifier_shared.cc \
optimizing/intrinsics_arm64.cc \
diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc
index 55e1ab2..6e5eb66 100644
--- a/compiler/optimizing/intrinsics_mips.cc
+++ b/compiler/optimizing/intrinsics_mips.cc
@@ -2456,16 +2456,18 @@
__ FloorWS(FTMP, in);
__ Mfc1(out, FTMP);
- __ LoadConst32(TMP, 1);
+ if (!IsR6()) {
+ __ LoadConst32(TMP, -1);
+ }
- // TMP = (out = java.lang.Integer.MAX_VALUE) ? 1 : 0;
+ // TMP = (out = java.lang.Integer.MAX_VALUE) ? -1 : 0;
__ LoadConst32(AT, std::numeric_limits<int32_t>::max());
__ Bne(AT, out, &finite);
__ Mtc1(ZERO, FTMP);
if (IsR6()) {
__ CmpLtS(FTMP, in, FTMP);
- __ Mfc1(AT, FTMP);
+ __ Mfc1(TMP, FTMP);
} else {
__ ColtS(in, FTMP);
}
@@ -2474,28 +2476,26 @@
__ Bind(&finite);
- // TMP = (0.5f <= (in - out)) ? 1 : 0;
+ // TMP = (0.5f <= (in - out)) ? -1 : 0;
__ Cvtsw(FTMP, FTMP); // Convert output of floor.w.s back to "float".
__ LoadConst32(AT, bit_cast<int32_t, float>(0.5f));
__ SubS(FTMP, in, FTMP);
__ Mtc1(AT, half);
if (IsR6()) {
__ CmpLeS(FTMP, half, FTMP);
- __ Mfc1(AT, FTMP);
+ __ Mfc1(TMP, FTMP);
} else {
__ ColeS(half, FTMP);
}
__ Bind(&add);
- if (IsR6()) {
- __ Selnez(TMP, TMP, AT);
- } else {
+ if (!IsR6()) {
__ Movf(TMP, ZERO);
}
- // Return out += TMP.
- __ Addu(out, out, TMP);
+ // Return out -= TMP.
+ __ Subu(out, out, TMP);
__ Bind(&done);
}
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index a1da20b..cc9cbda 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -428,8 +428,14 @@
|| instruction_set == kX86_64;
}
+// Strip pass name suffix to get optimization name.
+static std::string ConvertPassNameToOptimizationName(const std::string& pass_name) {
+ size_t pos = pass_name.find(kPassNameSeparator);
+ return pos == std::string::npos ? pass_name : pass_name.substr(0, pos);
+}
+
static HOptimization* BuildOptimization(
- const std::string& opt_name,
+ const std::string& pass_name,
ArenaAllocator* arena,
HGraph* graph,
OptimizingCompilerStats* stats,
@@ -439,6 +445,7 @@
StackHandleScopeCollection* handles,
SideEffectsAnalysis* most_recent_side_effects,
HInductionVarAnalysis* most_recent_induction) {
+ std::string opt_name = ConvertPassNameToOptimizationName(pass_name);
if (opt_name == BoundsCheckElimination::kBoundsCheckEliminationPassName) {
CHECK(most_recent_side_effects != nullptr && most_recent_induction != nullptr);
return new (arena) BoundsCheckElimination(graph,
@@ -446,11 +453,11 @@
most_recent_induction);
} else if (opt_name == GVNOptimization::kGlobalValueNumberingPassName) {
CHECK(most_recent_side_effects != nullptr);
- return new (arena) GVNOptimization(graph, *most_recent_side_effects);
+ return new (arena) GVNOptimization(graph, *most_recent_side_effects, pass_name.c_str());
} else if (opt_name == HConstantFolding::kConstantFoldingPassName) {
- return new (arena) HConstantFolding(graph);
+ return new (arena) HConstantFolding(graph, pass_name.c_str());
} else if (opt_name == HDeadCodeElimination::kDeadCodeEliminationPassName) {
- return new (arena) HDeadCodeElimination(graph, stats);
+ return new (arena) HDeadCodeElimination(graph, stats, pass_name.c_str());
} else if (opt_name == HInliner::kInlinerPassName) {
size_t number_of_dex_registers = dex_compilation_unit.GetCodeItem()->registers_size_;
return new (arena) HInliner(graph, // outer_graph
@@ -470,7 +477,7 @@
} else if (opt_name == HInductionVarAnalysis::kInductionPassName) {
return new (arena) HInductionVarAnalysis(graph);
} else if (opt_name == InstructionSimplifier::kInstructionSimplifierPassName) {
- return new (arena) InstructionSimplifier(graph, stats);
+ return new (arena) InstructionSimplifier(graph, stats, pass_name.c_str());
} else if (opt_name == IntrinsicsRecognizer::kIntrinsicsRecognizerPassName) {
return new (arena) IntrinsicsRecognizer(graph, driver, stats);
} else if (opt_name == LICM::kLoopInvariantCodeMotionPassName) {
@@ -522,12 +529,9 @@
SideEffectsAnalysis* most_recent_side_effects = nullptr;
HInductionVarAnalysis* most_recent_induction = nullptr;
ArenaVector<HOptimization*> ret(arena->Adapter());
- for (std::string pass_name : pass_names) {
- size_t pos = pass_name.find(kPassNameSeparator); // Strip suffix to get base pass name.
- std::string opt_name = pos == std::string::npos ? pass_name : pass_name.substr(0, pos);
-
+ for (const std::string& pass_name : pass_names) {
HOptimization* opt = BuildOptimization(
- opt_name,
+ pass_name,
arena,
graph,
stats,
@@ -540,6 +544,7 @@
CHECK(opt != nullptr) << "Couldn't build optimization: \"" << pass_name << "\"";
ret.push_back(opt);
+ std::string opt_name = ConvertPassNameToOptimizationName(pass_name);
if (opt_name == SideEffectsAnalysis::kSideEffectsAnalysisPassName) {
most_recent_side_effects = down_cast<SideEffectsAnalysis*>(opt);
} else if (opt_name == HInductionVarAnalysis::kInductionPassName) {
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index 11357b5..881bebe 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -1030,11 +1030,37 @@
END art_quick_set64_instance
/*
- * Entry from managed code to resolve a string, this stub will allocate a String and deliver an
- * exception on error. On success the String is returned. R0 holds the string index. The fast
- * path check for hit in strings cache has already been performed.
+ * Entry from managed code to resolve a string, this stub will
+ * check the dex cache for a matching string (the fast path), and if not found,
+ * it will allocate a String and deliver an exception on error.
+ * On success the String is returned. R0 holds the string index.
*/
-ONE_ARG_DOWNCALL art_quick_resolve_string, artResolveStringFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+
+ENTRY art_quick_resolve_string
+ ldr r1, [sp] @ load referrer
+ ldr r1, [r1, #ART_METHOD_DECLARING_CLASS_OFFSET] @ load declaring class
+ ldr r1, [r1, #DECLARING_CLASS_DEX_CACHE_STRINGS_OFFSET] @ load string dex cache
+ ubfx r2, r0, #0, #STRING_DEX_CACHE_HASH_BITS
+ add r1, r1, r2, LSL #STRING_DEX_CACHE_ELEMENT_SIZE_SHIFT
+ ldrd r2, r3, [r1] @ load index into r3 and pointer into r2
+ cmp r0, r3
+ bne .Lart_quick_resolve_string_slow_path
+#ifdef USE_READ_BARRIER
+ ldr r3, [r2, MIRROR_OBJECT_LOCK_WORD_OFFSET]
+ tst r3, #LOCK_WORD_MARK_BIT_MASK_SHIFTED
+ beq .Lart_quick_resolve_string_slow_path
+#endif
+ mov r0, r2
+ bx lr
+
+.Lart_quick_resolve_string_slow_path:
+ SETUP_SAVE_REFS_ONLY_FRAME r2 @ save callee saves in case of GC
+ mov r1, r9 @ pass Thread::Current
+ mov r3, sp
+ bl artResolveStringFromCode @ (uint32_t type_idx, Method* method, Thread*)
+ RESTORE_SAVE_REFS_ONLY_FRAME
+ RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+END art_quick_resolve_string
// Generate the allocation entrypoints for each allocator.
GENERATE_ALLOC_ENTRYPOINTS_FOR_EACH_ALLOCATOR
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index 3e6fbaf..202846a 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -1786,11 +1786,48 @@
END art_quick_set64_static
/*
- * Entry from managed code to resolve a string, this stub will allocate a String and deliver an
- * exception on error. On success the String is returned. w0 holds the string index. The fast
- * path check for hit in strings cache has already been performed.
+ * Entry from managed code to resolve a string, this stub will
+ * check the dex cache for a matching string (the fast path), and if not found,
+ * it will allocate a String and deliver an exception on error.
+ * On success the String is returned. R0 holds the string index.
*/
-ONE_ARG_DOWNCALL art_quick_resolve_string, artResolveStringFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+
+ENTRY art_quick_resolve_string
+ ldr x1, [sp] // load referrer
+ ldr w2, [x1, #ART_METHOD_DECLARING_CLASS_OFFSET] // load declaring class
+ ldr x1, [x2, #DECLARING_CLASS_DEX_CACHE_STRINGS_OFFSET] // load string dex cache
+ and x2, x0, #STRING_DEX_CACHE_SIZE_MINUS_ONE // get masked string index into x2
+ ldr x2, [x1, x2, lsl #STRING_DEX_CACHE_ELEMENT_SIZE_SHIFT] // load dex cache pair into x2
+ cmp x0, x2, lsr #32 // compare against upper 32 bits
+ bne .Lart_quick_resolve_string_slow_path
+ ubfx x0, x2, #0, #32 // extract lower 32 bits into x0
+#ifdef USE_READ_BARRIER
+ // Most common case: GC is not marking.
+ ldr w3, [xSELF, #THREAD_IS_GC_MARKING_OFFSET]
+ cbnz x3, .Lart_quick_resolve_string_marking
+#endif
+ ret
+
+// Slow path case, the index did not match.
+.Lart_quick_resolve_string_slow_path:
+ SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case of GC
+ mov x1, xSELF // pass Thread::Current
+ bl artResolveStringFromCode // (int32_t string_idx, Thread* self)
+ RESTORE_SAVE_REFS_ONLY_FRAME
+ RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+
+// GC is marking case, need to check the mark bit.
+.Lart_quick_resolve_string_marking:
+ ldr x3, [x0, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
+ tbnz x3, #LOCK_WORD_MARK_BIT_SHIFT, .Lart_quick_resolve_string_no_rb
+ // Save LR so that we can return, also x1 for alignment purposes.
+ stp x1, xLR, [sp, #-16]! // Save x1, LR.
+ bl artReadBarrierMark // Get the marked string back.
+ ldp x1, xLR, [sp], #16 // Restore registers.
+.Lart_quick_resolve_string_no_rb:
+ ret
+
+END art_quick_resolve_string
// Generate the allocation entrypoints for each allocator.
GENERATE_ALLOC_ENTRYPOINTS_FOR_NON_REGION_TLAB_ALLOCATORS
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index 2e9682e..d685ace 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -1108,7 +1108,44 @@
ALLOC_OBJECT_TLAB_SLOW_PATH artAllocObjectFromCodeRegionTLAB
END_FUNCTION art_quick_alloc_object_region_tlab
-ONE_ARG_DOWNCALL art_quick_resolve_string, artResolveStringFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+DEFINE_FUNCTION art_quick_resolve_string
+ SETUP_SAVE_REFS_ONLY_FRAME ebx, ebx
+ movl FRAME_SIZE_SAVE_REFS_ONLY(%esp), %ecx // get referrer
+ movl ART_METHOD_DECLARING_CLASS_OFFSET(%ecx), %ecx // get declaring class
+ movl DECLARING_CLASS_DEX_CACHE_STRINGS_OFFSET(%ecx), %ecx // get string dex cache
+ movl LITERAL(STRING_DEX_CACHE_SIZE_MINUS_ONE), %edx
+ andl %eax, %edx
+ shl LITERAL(STRING_DEX_CACHE_ELEMENT_SIZE_SHIFT), %edx
+ addl %ecx, %edx
+ movlps (%edx), %xmm0 // load string idx and pointer to xmm0
+ movd %xmm0, %ecx // extract pointer
+ pshufd LITERAL(0x55), %xmm0, %xmm0 // shuffle index into lowest bits
+ movd %xmm0, %edx // extract index
+ cmp %edx, %eax
+ jne .Lart_quick_resolve_string_slow_path
+#ifdef USE_READ_BARRIER
+ testl LITERAL(LOCK_WORD_MARK_BIT_MASK_SHIFTED), MIRROR_OBJECT_LOCK_WORD_OFFSET(%ecx)
+ jz .Lart_quick_resolve_string_slow_path
+#endif
+ movl %ecx, %eax
+ RESTORE_SAVE_REFS_ONLY_FRAME
+ ret
+
+.Lart_quick_resolve_string_slow_path:
+ // Outgoing argument set up
+ subl LITERAL(8), %esp // push padding
+ CFI_ADJUST_CFA_OFFSET(8)
+ pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
+ CFI_ADJUST_CFA_OFFSET(4)
+ PUSH eax // pass arg1
+ call SYMBOL(artResolveStringFromCode)
+ addl LITERAL(16), %esp // pop arguments
+ CFI_ADJUST_CFA_OFFSET(-16)
+ RESTORE_SAVE_REFS_ONLY_FRAME
+ RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+
+END_FUNCTION art_quick_resolve_string
+
ONE_ARG_DOWNCALL art_quick_initialize_static_storage, artInitializeStaticStorageFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
ONE_ARG_DOWNCALL art_quick_initialize_type, artInitializeTypeFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
ONE_ARG_DOWNCALL art_quick_initialize_type_and_verify_access, artInitializeTypeAndVerifyAccessFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index 32768b0..647fe1d 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -1330,7 +1330,36 @@
ALLOC_OBJECT_TLAB_SLOW_PATH artAllocObjectFromCodeInitializedRegionTLAB
END_FUNCTION art_quick_alloc_object_initialized_region_tlab
-ONE_ARG_DOWNCALL art_quick_resolve_string, artResolveStringFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+DEFINE_FUNCTION art_quick_resolve_string
+ movq 8(%rsp), %rcx // get referrer
+ movl ART_METHOD_DECLARING_CLASS_OFFSET(%rcx), %ecx // get declaring class
+ movq DECLARING_CLASS_DEX_CACHE_STRINGS_OFFSET(%ecx), %rcx // get string dex cache
+ movq LITERAL(STRING_DEX_CACHE_SIZE_MINUS_ONE), %rdx
+ andq %rdi, %rdx
+ shlq LITERAL(STRING_DEX_CACHE_ELEMENT_SIZE_SHIFT), %rdx
+ addq %rcx, %rdx
+ movq %rax, %rcx
+ movq (%rdx), %rdx
+ movq %rdx, %rax
+ movl %eax, %eax
+ shrq LITERAL(32), %rdx
+ cmp %rdx, %rdi
+ jne .Lart_quick_resolve_string_slow_path
+#ifdef USE_READ_BARRIER
+ testl LITERAL(LOCK_WORD_MARK_BIT_MASK_SHIFTED), MIRROR_OBJECT_LOCK_WORD_OFFSET(%rax)
+ jz .Lart_quick_resolve_string_slow_path
+#endif
+ ret
+.Lart_quick_resolve_string_slow_path:
+ SETUP_SAVE_REFS_ONLY_FRAME
+ movq %rcx, %rax
+ // Outgoing argument set up
+ movq %gs:THREAD_SELF_OFFSET, %rsi // pass Thread::Current()
+ call SYMBOL(artResolveStringFromCode) // artResolveStringFromCode(arg0, referrer, Thread*)
+ RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
+ RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+END_FUNCTION art_quick_resolve_string
+
ONE_ARG_DOWNCALL art_quick_initialize_static_storage, artInitializeStaticStorageFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
ONE_ARG_DOWNCALL art_quick_initialize_type, artInitializeTypeFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
ONE_ARG_DOWNCALL art_quick_initialize_type_and_verify_access, artInitializeTypeAndVerifyAccessFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
diff --git a/runtime/asm_support.h b/runtime/asm_support.h
index 848f8e5..102b993 100644
--- a/runtime/asm_support.h
+++ b/runtime/asm_support.h
@@ -19,12 +19,15 @@
#if defined(__cplusplus)
#include "art_method.h"
+#include "base/bit_utils.h"
#include "gc/allocator/rosalloc.h"
#include "gc/heap.h"
#include "jit/jit.h"
#include "lock_word.h"
#include "mirror/class.h"
+#include "mirror/dex_cache.h"
#include "mirror/string.h"
+#include "utils/dex_cache_arrays_layout.h"
#include "runtime.h"
#include "thread.h"
#endif
diff --git a/runtime/base/arena_allocator_test.cc b/runtime/base/arena_allocator_test.cc
index 9de3cc4..9932586 100644
--- a/runtime/base/arena_allocator_test.cc
+++ b/runtime/base/arena_allocator_test.cc
@@ -124,4 +124,140 @@
}
}
+TEST_F(ArenaAllocatorTest, AllocAlignment) {
+ ArenaPool pool;
+ ArenaAllocator arena(&pool);
+ for (size_t iterations = 0; iterations <= 10; ++iterations) {
+ for (size_t size = 1; size <= ArenaAllocator::kAlignment + 1; ++size) {
+ void* allocation = arena.Alloc(size);
+ EXPECT_TRUE(IsAligned<ArenaAllocator::kAlignment>(allocation))
+ << reinterpret_cast<uintptr_t>(allocation);
+ }
+ }
+}
+
+TEST_F(ArenaAllocatorTest, ReallocAlignment) {
+ {
+ // Case 1: small aligned allocation, aligned extend inside arena.
+ ArenaPool pool;
+ ArenaAllocator arena(&pool);
+
+ const size_t original_size = ArenaAllocator::kAlignment * 2;
+ void* original_allocation = arena.Alloc(original_size);
+ ASSERT_TRUE(IsAligned<ArenaAllocator::kAlignment>(original_allocation));
+
+ const size_t new_size = ArenaAllocator::kAlignment * 3;
+ void* realloc_allocation = arena.Realloc(original_allocation, original_size, new_size);
+ EXPECT_TRUE(IsAligned<ArenaAllocator::kAlignment>(realloc_allocation));
+ // Secondary: expect the same buffer.
+ EXPECT_EQ(original_allocation, realloc_allocation);
+
+ void* after_alloc = arena.Alloc(1);
+ EXPECT_TRUE(IsAligned<ArenaAllocator::kAlignment>(after_alloc));
+ }
+
+ {
+ // Case 2: small aligned allocation, non-aligned extend inside arena.
+ ArenaPool pool;
+ ArenaAllocator arena(&pool);
+
+ const size_t original_size = ArenaAllocator::kAlignment * 2;
+ void* original_allocation = arena.Alloc(original_size);
+ ASSERT_TRUE(IsAligned<ArenaAllocator::kAlignment>(original_allocation));
+
+ const size_t new_size = ArenaAllocator::kAlignment * 2 + (ArenaAllocator::kAlignment / 2);
+ void* realloc_allocation = arena.Realloc(original_allocation, original_size, new_size);
+ EXPECT_TRUE(IsAligned<ArenaAllocator::kAlignment>(realloc_allocation));
+ // Secondary: expect the same buffer.
+ EXPECT_EQ(original_allocation, realloc_allocation);
+
+ void* after_alloc = arena.Alloc(1);
+ EXPECT_TRUE(IsAligned<ArenaAllocator::kAlignment>(after_alloc));
+ }
+
+ {
+ // Case 3: small non-aligned allocation, aligned extend inside arena.
+ ArenaPool pool;
+ ArenaAllocator arena(&pool);
+
+ const size_t original_size = ArenaAllocator::kAlignment * 2 + (ArenaAllocator::kAlignment / 2);
+ void* original_allocation = arena.Alloc(original_size);
+ ASSERT_TRUE(IsAligned<ArenaAllocator::kAlignment>(original_allocation));
+
+ const size_t new_size = ArenaAllocator::kAlignment * 4;
+ void* realloc_allocation = arena.Realloc(original_allocation, original_size, new_size);
+ EXPECT_TRUE(IsAligned<ArenaAllocator::kAlignment>(realloc_allocation));
+ // Secondary: expect the same buffer.
+ EXPECT_EQ(original_allocation, realloc_allocation);
+
+ void* after_alloc = arena.Alloc(1);
+ EXPECT_TRUE(IsAligned<ArenaAllocator::kAlignment>(after_alloc));
+ }
+
+ {
+ // Case 4: small non-aligned allocation, aligned non-extend inside arena.
+ ArenaPool pool;
+ ArenaAllocator arena(&pool);
+
+ const size_t original_size = ArenaAllocator::kAlignment * 2 + (ArenaAllocator::kAlignment / 2);
+ void* original_allocation = arena.Alloc(original_size);
+ ASSERT_TRUE(IsAligned<ArenaAllocator::kAlignment>(original_allocation));
+
+ const size_t new_size = ArenaAllocator::kAlignment * 3;
+ void* realloc_allocation = arena.Realloc(original_allocation, original_size, new_size);
+ EXPECT_TRUE(IsAligned<ArenaAllocator::kAlignment>(realloc_allocation));
+ // Secondary: expect the same buffer.
+ EXPECT_EQ(original_allocation, realloc_allocation);
+
+ void* after_alloc = arena.Alloc(1);
+ EXPECT_TRUE(IsAligned<ArenaAllocator::kAlignment>(after_alloc));
+ }
+
+ // The next part is brittle, as the default size for an arena is variable, and we don't know about
+ // sanitization.
+
+ {
+ // Case 5: large allocation, aligned extend into next arena.
+ ArenaPool pool;
+ ArenaAllocator arena(&pool);
+
+ const size_t original_size = Arena::kDefaultSize - ArenaAllocator::kAlignment * 5;
+ void* original_allocation = arena.Alloc(original_size);
+ ASSERT_TRUE(IsAligned<ArenaAllocator::kAlignment>(original_allocation));
+
+ const size_t new_size = Arena::kDefaultSize + ArenaAllocator::kAlignment * 2;
+ void* realloc_allocation = arena.Realloc(original_allocation, original_size, new_size);
+ EXPECT_TRUE(IsAligned<ArenaAllocator::kAlignment>(realloc_allocation));
+ // Secondary: expect new buffer.
+ EXPECT_NE(original_allocation, realloc_allocation);
+
+ void* after_alloc = arena.Alloc(1);
+ EXPECT_TRUE(IsAligned<ArenaAllocator::kAlignment>(after_alloc));
+ }
+
+ {
+ // Case 6: large allocation, non-aligned extend into next arena.
+ ArenaPool pool;
+ ArenaAllocator arena(&pool);
+
+ const size_t original_size = Arena::kDefaultSize -
+ ArenaAllocator::kAlignment * 4 -
+ ArenaAllocator::kAlignment / 2;
+ void* original_allocation = arena.Alloc(original_size);
+ ASSERT_TRUE(IsAligned<ArenaAllocator::kAlignment>(original_allocation));
+
+ const size_t new_size = Arena::kDefaultSize +
+ ArenaAllocator::kAlignment * 2 +
+ ArenaAllocator::kAlignment / 2;
+ void* realloc_allocation = arena.Realloc(original_allocation, original_size, new_size);
+ EXPECT_TRUE(IsAligned<ArenaAllocator::kAlignment>(realloc_allocation));
+ // Secondary: expect new buffer.
+ EXPECT_NE(original_allocation, realloc_allocation);
+
+ void* after_alloc = arena.Alloc(1);
+ EXPECT_TRUE(IsAligned<ArenaAllocator::kAlignment>(after_alloc));
+ }
+}
+
+
} // namespace art
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index b4acc27..89bebb4 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -4059,7 +4059,7 @@
// Prepare JDWP ids for the reply.
JDWP::JdwpTag result_tag = BasicTagFromDescriptor(m->GetShorty());
const bool is_object_result = (result_tag == JDWP::JT_OBJECT);
- StackHandleScope<2> hs(soa.Self());
+ StackHandleScope<3> hs(soa.Self());
Handle<mirror::Object> object_result = hs.NewHandle(is_object_result ? result.GetL() : nullptr);
Handle<mirror::Throwable> exception = hs.NewHandle(soa.Self()->GetException());
soa.Self()->ClearException();
@@ -4100,9 +4100,9 @@
if (exceptionObjectId == 0) {
if (m->GetDeclaringClass()->IsStringClass()) {
// For string constructors, the new string is remapped to the receiver (stored in ref).
- mirror::Object* decoded_ref = soa.Self()->DecodeJObject(ref.get());
+ Handle<mirror::Object> decoded_ref = hs.NewHandle(soa.Self()->DecodeJObject(ref.get()));
result_value = gRegistry->Add(decoded_ref);
- result_tag = TagFromObject(soa, decoded_ref);
+ result_tag = TagFromObject(soa, decoded_ref.Get());
} else {
// TODO we could keep the receiver ObjectId in the DebugInvokeReq to avoid looking into the
// object registry.
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 7afe6f9..42816a0 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -435,10 +435,8 @@
gc_barrier_->Init(self, 0);
ThreadFlipVisitor thread_flip_visitor(this, heap_->use_tlab_);
FlipCallback flip_callback(this);
- heap_->ThreadFlipBegin(self); // Sync with JNI critical calls.
size_t barrier_count = Runtime::Current()->FlipThreadRoots(
&thread_flip_visitor, &flip_callback, this);
- heap_->ThreadFlipEnd(self);
{
ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
gc_barrier_->Increment(self, barrier_count);
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 39f26e7..638c1d8 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -878,9 +878,13 @@
MutexLock mu(self, *thread_flip_lock_);
bool has_waited = false;
uint64_t wait_start = NanoTime();
- while (thread_flip_running_) {
- has_waited = true;
- thread_flip_cond_->Wait(self);
+ if (thread_flip_running_) {
+ TimingLogger::ScopedTiming split("IncrementDisableThreadFlip",
+ GetCurrentGcIteration()->GetTimings());
+ while (thread_flip_running_) {
+ has_waited = true;
+ thread_flip_cond_->Wait(self);
+ }
}
++disable_thread_flip_count_;
if (has_waited) {
diff --git a/runtime/generated/asm_support_gen.h b/runtime/generated/asm_support_gen.h
index 716c23d..40b71c4 100644
--- a/runtime/generated/asm_support_gen.h
+++ b/runtime/generated/asm_support_gen.h
@@ -70,6 +70,16 @@
DEFINE_CHECK_EQ(static_cast<int32_t>(ART_METHOD_QUICK_CODE_OFFSET_32), (static_cast<int32_t>(art::ArtMethod:: EntryPointFromQuickCompiledCodeOffset(art::PointerSize::k32).Int32Value())))
#define ART_METHOD_QUICK_CODE_OFFSET_64 48
DEFINE_CHECK_EQ(static_cast<int32_t>(ART_METHOD_QUICK_CODE_OFFSET_64), (static_cast<int32_t>(art::ArtMethod:: EntryPointFromQuickCompiledCodeOffset(art::PointerSize::k64).Int32Value())))
+#define ART_METHOD_DECLARING_CLASS_OFFSET 0
+DEFINE_CHECK_EQ(static_cast<int32_t>(ART_METHOD_DECLARING_CLASS_OFFSET), (static_cast<int32_t>(art::ArtMethod:: DeclaringClassOffset().Int32Value())))
+#define DECLARING_CLASS_DEX_CACHE_STRINGS_OFFSET 40
+DEFINE_CHECK_EQ(static_cast<int32_t>(DECLARING_CLASS_DEX_CACHE_STRINGS_OFFSET), (static_cast<int32_t>(art::mirror::Class:: DexCacheStringsOffset().Int32Value())))
+#define STRING_DEX_CACHE_ELEMENT_SIZE_SHIFT 3
+DEFINE_CHECK_EQ(static_cast<int32_t>(STRING_DEX_CACHE_ELEMENT_SIZE_SHIFT), (static_cast<int32_t>(art::WhichPowerOf2(sizeof(art::mirror::StringDexCachePair)))))
+#define STRING_DEX_CACHE_SIZE_MINUS_ONE 1023
+DEFINE_CHECK_EQ(static_cast<int32_t>(STRING_DEX_CACHE_SIZE_MINUS_ONE), (static_cast<int32_t>(art::mirror::DexCache::kDexCacheStringCacheSize - 1)))
+#define STRING_DEX_CACHE_HASH_BITS 10
+DEFINE_CHECK_EQ(static_cast<int32_t>(STRING_DEX_CACHE_HASH_BITS), (static_cast<int32_t>(art::LeastSignificantBit(art::mirror::DexCache::kDexCacheStringCacheSize))))
#define MIN_LARGE_OBJECT_THRESHOLD 0x3000
DEFINE_CHECK_EQ(static_cast<size_t>(MIN_LARGE_OBJECT_THRESHOLD), (static_cast<size_t>(art::gc::Heap::kMinLargeObjectThreshold)))
#define LOCK_WORD_STATE_SHIFT 30
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index f1f7f42..101c9a1 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -20,6 +20,9 @@
#include "common_throws.h"
#include "interpreter_common.h"
+#include "interpreter_goto_table_impl.h"
+#include "interpreter_mterp_impl.h"
+#include "interpreter_switch_impl.h"
#include "mirror/string-inl.h"
#include "scoped_thread_state_change.h"
#include "ScopedLocalRef.h"
@@ -242,28 +245,6 @@
static constexpr InterpreterImplKind kInterpreterImplKind = kMterpImplKind;
-#if defined(__clang__)
-// Clang 3.4 fails to build the goto interpreter implementation.
-template<bool do_access_check, bool transaction_active>
-JValue ExecuteGotoImpl(Thread*, const DexFile::CodeItem*, ShadowFrame&, JValue) {
- LOG(FATAL) << "UNREACHABLE";
- UNREACHABLE();
-}
-// Explicit definitions of ExecuteGotoImpl.
-template<> SHARED_REQUIRES(Locks::mutator_lock_)
-JValue ExecuteGotoImpl<true, false>(Thread* self, const DexFile::CodeItem* code_item,
- ShadowFrame& shadow_frame, JValue result_register);
-template<> SHARED_REQUIRES(Locks::mutator_lock_)
-JValue ExecuteGotoImpl<false, false>(Thread* self, const DexFile::CodeItem* code_item,
- ShadowFrame& shadow_frame, JValue result_register);
-template<> SHARED_REQUIRES(Locks::mutator_lock_)
-JValue ExecuteGotoImpl<true, true>(Thread* self, const DexFile::CodeItem* code_item,
- ShadowFrame& shadow_frame, JValue result_register);
-template<> SHARED_REQUIRES(Locks::mutator_lock_)
-JValue ExecuteGotoImpl<false, true>(Thread* self, const DexFile::CodeItem* code_item,
- ShadowFrame& shadow_frame, JValue result_register);
-#endif
-
static inline JValue Execute(
Thread* self,
const DexFile::CodeItem* code_item,
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index 90c8227..ce3b1eb 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -65,21 +65,6 @@
namespace art {
namespace interpreter {
-// External references to all interpreter implementations.
-
-template<bool do_access_check, bool transaction_active>
-extern JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
- ShadowFrame& shadow_frame, JValue result_register,
- bool interpret_one_instruction);
-
-template<bool do_access_check, bool transaction_active>
-extern JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item,
- ShadowFrame& shadow_frame, JValue result_register);
-
-// Mterp does not support transactions or access check, thus no templated versions.
-extern "C" bool ExecuteMterpImpl(Thread* self, const DexFile::CodeItem* code_item,
- ShadowFrame* shadow_frame, JValue* result_register);
-
void ThrowNullPointerExceptionFromInterpreter()
SHARED_REQUIRES(Locks::mutator_lock_);
diff --git a/runtime/interpreter/interpreter_goto_table_impl.cc b/runtime/interpreter/interpreter_goto_table_impl.cc
index 43b2778..25cb4b3 100644
--- a/runtime/interpreter/interpreter_goto_table_impl.cc
+++ b/runtime/interpreter/interpreter_goto_table_impl.cc
@@ -14,18 +14,29 @@
* limitations under the License.
*/
+#include "interpreter_goto_table_impl.h"
+
+// Common includes
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/mutex.h"
+#include "stack.h"
+#include "thread.h"
+
+// Clang compiles the GOTO interpreter very slowly. So we skip it. These are the implementation
+// details only necessary when compiling it.
#if !defined(__clang__)
-// Clang 3.4 fails to build the goto interpreter implementation.
-
-
#include "experimental_flags.h"
#include "interpreter_common.h"
#include "jit/jit.h"
#include "safe_math.h"
+#endif
namespace art {
namespace interpreter {
+#if !defined(__clang__)
+
// In the following macros, we expect the following local variables exist:
// - "self": the current Thread*.
// - "inst" : the current Instruction*.
@@ -2558,20 +2569,40 @@
} // NOLINT(readability/fn_size)
// Explicit definitions of ExecuteGotoImpl.
-template SHARED_REQUIRES(Locks::mutator_lock_) HOT_ATTR
+template HOT_ATTR
JValue ExecuteGotoImpl<true, false>(Thread* self, const DexFile::CodeItem* code_item,
ShadowFrame& shadow_frame, JValue result_register);
-template SHARED_REQUIRES(Locks::mutator_lock_) HOT_ATTR
+template HOT_ATTR
JValue ExecuteGotoImpl<false, false>(Thread* self, const DexFile::CodeItem* code_item,
ShadowFrame& shadow_frame, JValue result_register);
-template SHARED_REQUIRES(Locks::mutator_lock_)
+template
JValue ExecuteGotoImpl<true, true>(Thread* self, const DexFile::CodeItem* code_item,
ShadowFrame& shadow_frame, JValue result_register);
-template SHARED_REQUIRES(Locks::mutator_lock_)
+template
JValue ExecuteGotoImpl<false, true>(Thread* self, const DexFile::CodeItem* code_item,
ShadowFrame& shadow_frame, JValue result_register);
+#else
+
+template<bool do_access_check, bool transaction_active>
+JValue ExecuteGotoImpl(Thread*, const DexFile::CodeItem*, ShadowFrame&, JValue) {
+ LOG(FATAL) << "UNREACHABLE";
+ UNREACHABLE();
+}
+// Explicit definitions of ExecuteGotoImpl.
+template<>
+JValue ExecuteGotoImpl<true, false>(Thread* self, const DexFile::CodeItem* code_item,
+ ShadowFrame& shadow_frame, JValue result_register);
+template<>
+JValue ExecuteGotoImpl<false, false>(Thread* self, const DexFile::CodeItem* code_item,
+ ShadowFrame& shadow_frame, JValue result_register);
+template<>
+JValue ExecuteGotoImpl<true, true>(Thread* self, const DexFile::CodeItem* code_item,
+ ShadowFrame& shadow_frame, JValue result_register);
+template<>
+JValue ExecuteGotoImpl<false, true>(Thread* self, const DexFile::CodeItem* code_item,
+ ShadowFrame& shadow_frame, JValue result_register);
+#endif
+
} // namespace interpreter
} // namespace art
-
-#endif
diff --git a/runtime/interpreter/interpreter_goto_table_impl.h b/runtime/interpreter/interpreter_goto_table_impl.h
new file mode 100644
index 0000000..bb9be88
--- /dev/null
+++ b/runtime/interpreter/interpreter_goto_table_impl.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_INTERPRETER_INTERPRETER_GOTO_TABLE_IMPL_H_
+#define ART_RUNTIME_INTERPRETER_INTERPRETER_GOTO_TABLE_IMPL_H_
+
+#include "base/macros.h"
+#include "base/mutex.h"
+#include "dex_file.h"
+#include "jvalue.h"
+
+namespace art {
+
+class ShadowFrame;
+class Thread;
+
+namespace interpreter {
+
+template<bool do_access_check, bool transaction_active>
+JValue ExecuteGotoImpl(Thread* self,
+ const DexFile::CodeItem* code_item,
+ ShadowFrame& shadow_frame,
+ JValue result_register) SHARED_REQUIRES(Locks::mutator_lock_);
+
+} // namespace interpreter
+} // namespace art
+
+#endif // ART_RUNTIME_INTERPRETER_INTERPRETER_GOTO_TABLE_IMPL_H_
diff --git a/runtime/interpreter/interpreter_mterp_impl.h b/runtime/interpreter/interpreter_mterp_impl.h
new file mode 100644
index 0000000..322df4e
--- /dev/null
+++ b/runtime/interpreter/interpreter_mterp_impl.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_INTERPRETER_INTERPRETER_MTERP_IMPL_H_
+#define ART_RUNTIME_INTERPRETER_INTERPRETER_MTERP_IMPL_H_
+
+#include "base/macros.h"
+#include "base/mutex.h"
+#include "dex_file.h"
+#include "jvalue.h"
+
+namespace art {
+
+class ShadowFrame;
+class Thread;
+
+namespace interpreter {
+
+// Mterp does not support transactions or access check, thus no templated versions.
+extern "C" bool ExecuteMterpImpl(Thread* self,
+ const DexFile::CodeItem* code_item,
+ ShadowFrame* shadow_frame,
+ JValue* result_register) SHARED_REQUIRES(Locks::mutator_lock_);
+
+} // namespace interpreter
+} // namespace art
+
+#endif // ART_RUNTIME_INTERPRETER_INTERPRETER_MTERP_IMPL_H_
diff --git a/runtime/interpreter/interpreter_switch_impl.cc b/runtime/interpreter/interpreter_switch_impl.cc
index a6349fc..dd10052 100644
--- a/runtime/interpreter/interpreter_switch_impl.cc
+++ b/runtime/interpreter/interpreter_switch_impl.cc
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#include "interpreter_switch_impl.h"
+
#include "base/enums.h"
#include "experimental_flags.h"
#include "interpreter_common.h"
@@ -2337,19 +2339,19 @@
} // NOLINT(readability/fn_size)
// Explicit definitions of ExecuteSwitchImpl.
-template SHARED_REQUIRES(Locks::mutator_lock_) HOT_ATTR
+template HOT_ATTR
JValue ExecuteSwitchImpl<true, false>(Thread* self, const DexFile::CodeItem* code_item,
ShadowFrame& shadow_frame, JValue result_register,
bool interpret_one_instruction);
-template SHARED_REQUIRES(Locks::mutator_lock_) HOT_ATTR
+template HOT_ATTR
JValue ExecuteSwitchImpl<false, false>(Thread* self, const DexFile::CodeItem* code_item,
ShadowFrame& shadow_frame, JValue result_register,
bool interpret_one_instruction);
-template SHARED_REQUIRES(Locks::mutator_lock_)
+template
JValue ExecuteSwitchImpl<true, true>(Thread* self, const DexFile::CodeItem* code_item,
ShadowFrame& shadow_frame, JValue result_register,
bool interpret_one_instruction);
-template SHARED_REQUIRES(Locks::mutator_lock_)
+template
JValue ExecuteSwitchImpl<false, true>(Thread* self, const DexFile::CodeItem* code_item,
ShadowFrame& shadow_frame, JValue result_register,
bool interpret_one_instruction);
diff --git a/runtime/interpreter/interpreter_switch_impl.h b/runtime/interpreter/interpreter_switch_impl.h
new file mode 100644
index 0000000..90ec908
--- /dev/null
+++ b/runtime/interpreter/interpreter_switch_impl.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_INTERPRETER_INTERPRETER_SWITCH_IMPL_H_
+#define ART_RUNTIME_INTERPRETER_INTERPRETER_SWITCH_IMPL_H_
+
+#include "base/macros.h"
+#include "base/mutex.h"
+#include "dex_file.h"
+#include "jvalue.h"
+
+namespace art {
+
+class ShadowFrame;
+class Thread;
+
+namespace interpreter {
+
+template<bool do_access_check, bool transaction_active>
+JValue ExecuteSwitchImpl(Thread* self,
+ const DexFile::CodeItem* code_item,
+ ShadowFrame& shadow_frame,
+ JValue result_register,
+ bool interpret_one_instruction) SHARED_REQUIRES(Locks::mutator_lock_);
+
+} // namespace interpreter
+} // namespace art
+
+#endif // ART_RUNTIME_INTERPRETER_INTERPRETER_SWITCH_IMPL_H_
diff --git a/runtime/simulator/Android.mk b/runtime/simulator/Android.mk
index a34a841..e39af2d 100644
--- a/runtime/simulator/Android.mk
+++ b/runtime/simulator/Android.mk
@@ -22,6 +22,9 @@
code_simulator.cc \
code_simulator_arm64.cc
+LIBART_SIMULATOR_CFLAGS := \
+ -DVIXL_INCLUDE_SIMULATOR_AARCH64
+
# $(1): target or host
# $(2): ndebug or debug
define build-libart-simulator
@@ -54,6 +57,7 @@
LOCAL_MODULE_CLASS := SHARED_LIBRARIES
LOCAL_SRC_FILES := $$(LIBART_SIMULATOR_SRC_FILES)
+ LOCAL_CFLAGS := $$(LIBART_SIMULATOR_CFLAGS)
ifeq ($$(art_target_or_host),target)
$(call set-target-local-clang-vars)
diff --git a/runtime/thread-inl.h b/runtime/thread-inl.h
index 3aa1fc2..216d8a7 100644
--- a/runtime/thread-inl.h
+++ b/runtime/thread-inl.h
@@ -224,6 +224,7 @@
thread_to_pass = this;
}
MutexLock mu(thread_to_pass, *Locks::thread_suspend_count_lock_);
+ ScopedTransitioningToRunnable scoped_transitioning_to_runnable(this);
old_state_and_flags.as_int = tls32_.state_and_flags.as_int;
DCHECK_EQ(old_state_and_flags.as_struct.state, old_state);
while ((old_state_and_flags.as_struct.flags & kSuspendRequest) != 0) {
diff --git a/runtime/thread.cc b/runtime/thread.cc
index b35a614..79b9f02 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -1217,10 +1217,8 @@
ScopedTrace trace(__FUNCTION__);
VLOG(threads) << this << " self-suspending";
// Make thread appear suspended to other threads, release mutator_lock_.
- tls32_.suspended_at_suspend_check = true;
// Transition to suspended and back to runnable, re-acquire share on mutator_lock_.
ScopedThreadSuspension(this, kSuspended);
- tls32_.suspended_at_suspend_check = false;
VLOG(threads) << this << " self-reviving";
}
@@ -1433,6 +1431,12 @@
if (o == nullptr) {
os << "an unknown object";
} else {
+ if (kUseReadBarrier && Thread::Current()->GetIsGcMarking()) {
+ // We may call Thread::Dump() in the middle of the CC thread flip and this thread's stack
+ // may have not been flipped yet and "o" may be a from-space (stale) ref, in which case the
+ // IdentityHashCode call below will crash. So explicitly mark/forward it here.
+ o = ReadBarrier::Mark(o);
+ }
if ((o->GetLockWord(false).GetState() == LockWord::kThinLocked) &&
Locks::mutator_lock_->IsExclusiveHeld(Thread::Current())) {
// Getting the identity hashcode here would result in lock inflation and suspension of the
@@ -1635,7 +1639,7 @@
}
tlsPtr_.flip_function = nullptr;
tlsPtr_.thread_local_mark_stack = nullptr;
- tls32_.suspended_at_suspend_check = false;
+ tls32_.is_transitioning_to_runnable = false;
}
bool Thread::IsStillStarting() const {
@@ -1773,7 +1777,7 @@
CHECK(tlsPtr_.checkpoint_function == nullptr);
CHECK_EQ(checkpoint_overflow_.size(), 0u);
CHECK(tlsPtr_.flip_function == nullptr);
- CHECK_EQ(tls32_.suspended_at_suspend_check, false);
+ CHECK_EQ(tls32_.is_transitioning_to_runnable, false);
// Make sure we processed all deoptimization requests.
CHECK(tlsPtr_.deoptimization_context_stack == nullptr) << "Missed deoptimization";
diff --git a/runtime/thread.h b/runtime/thread.h
index 840b781..1c2d4ab 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -1085,8 +1085,12 @@
return tlsPtr_.nested_signal_state;
}
- bool IsSuspendedAtSuspendCheck() const {
- return tls32_.suspended_at_suspend_check;
+ bool IsTransitioningToRunnable() const {
+ return tls32_.is_transitioning_to_runnable;
+ }
+
+ void SetIsTransitioningToRunnable(bool value) {
+ tls32_.is_transitioning_to_runnable = value;
}
void PushVerifier(verifier::MethodVerifier* verifier);
@@ -1264,7 +1268,7 @@
suspend_count(0), debug_suspend_count(0), thin_lock_thread_id(0), tid(0),
daemon(is_daemon), throwing_OutOfMemoryError(false), no_thread_suspension(0),
thread_exit_check_count(0), handling_signal_(false),
- suspended_at_suspend_check(false), ready_for_debug_invoke(false),
+ is_transitioning_to_runnable(false), ready_for_debug_invoke(false),
debug_method_entry_(false), is_gc_marking(false), weak_ref_access_enabled(true),
disable_thread_flip_count(0) {
}
@@ -1306,10 +1310,10 @@
// True if signal is being handled by this thread.
bool32_t handling_signal_;
- // True if the thread is suspended in FullSuspendCheck(). This is
- // used to distinguish runnable threads that are suspended due to
- // a normal suspend check from other threads.
- bool32_t suspended_at_suspend_check;
+ // True if the thread is in TransitionFromSuspendedToRunnable(). This is used to distinguish the
+ // non-runnable threads (eg. kNative, kWaiting) that are about to transition to runnable from
+ // the rest of them.
+ bool32_t is_transitioning_to_runnable;
// True if the thread has been suspended by a debugger event. This is
// used to invoke method from the debugger which is only allowed when
@@ -1588,6 +1592,26 @@
Thread* const self_;
};
+class ScopedTransitioningToRunnable : public ValueObject {
+ public:
+ explicit ScopedTransitioningToRunnable(Thread* self)
+ : self_(self) {
+ DCHECK_EQ(self, Thread::Current());
+ if (kUseReadBarrier) {
+ self_->SetIsTransitioningToRunnable(true);
+ }
+ }
+
+ ~ScopedTransitioningToRunnable() {
+ if (kUseReadBarrier) {
+ self_->SetIsTransitioningToRunnable(false);
+ }
+ }
+
+ private:
+ Thread* const self_;
+};
+
std::ostream& operator<<(std::ostream& os, const Thread& thread);
std::ostream& operator<<(std::ostream& os, const StackedShadowFrameType& thread);
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index 419ecec..688514c 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -405,6 +405,8 @@
Locks::thread_suspend_count_lock_->AssertNotHeld(self);
CHECK_NE(self->GetState(), kRunnable);
+ collector->GetHeap()->ThreadFlipBegin(self); // Sync with JNI critical calls.
+
SuspendAllInternal(self, self, nullptr);
// Run the flip callback for the collector.
@@ -414,26 +416,31 @@
collector->RegisterPause(NanoTime() - start_time);
// Resume runnable threads.
- std::vector<Thread*> runnable_threads;
+ size_t runnable_thread_count = 0;
std::vector<Thread*> other_threads;
{
+ TimingLogger::ScopedTiming split2("ResumeRunnableThreads", collector->GetTimings());
MutexLock mu(self, *Locks::thread_list_lock_);
MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
--suspend_all_count_;
for (const auto& thread : list_) {
+ // Set the flip function for all threads because Thread::DumpState/DumpJavaStack() (invoked by
+ // a checkpoint) may cause the flip function to be run for a runnable/suspended thread before
+ // a runnable thread runs it for itself or we run it for a suspended thread below.
+ thread->SetFlipFunction(thread_flip_visitor);
if (thread == self) {
continue;
}
- // Set the flip function for both runnable and suspended threads
- // because Thread::DumpState/DumpJavaStack() (invoked by a
- // checkpoint) may cause the flip function to be run for a
- // runnable/suspended thread before a runnable threads runs it
- // for itself or we run it for a suspended thread below.
- thread->SetFlipFunction(thread_flip_visitor);
- if (thread->IsSuspendedAtSuspendCheck()) {
+ // Resume early the threads that were runnable but are suspended just for this thread flip or
+ // about to transition from non-runnable (eg. kNative at the SOA entry in a JNI function) to
+ // runnable (both cases waiting inside Thread::TransitionFromSuspendedToRunnable), or waiting
+ // for the thread flip to end at the JNI critical section entry (kWaitingForGcThreadFlip),
+ ThreadState state = thread->GetState();
+ if (state == kWaitingForGcThreadFlip ||
+ thread->IsTransitioningToRunnable()) {
// The thread will resume right after the broadcast.
thread->ModifySuspendCount(self, -1, nullptr, false);
- runnable_threads.push_back(thread);
+ ++runnable_thread_count;
} else {
other_threads.push_back(thread);
}
@@ -441,8 +448,11 @@
Thread::resume_cond_->Broadcast(self);
}
+ collector->GetHeap()->ThreadFlipEnd(self);
+
// Run the closure on the other threads and let them resume.
{
+ TimingLogger::ScopedTiming split3("FlipOtherThreads", collector->GetTimings());
ReaderMutexLock mu(self, *Locks::mutator_lock_);
for (const auto& thread : other_threads) {
Closure* flip_func = thread->GetFlipFunction();
@@ -451,11 +461,15 @@
}
}
// Run it for self.
- thread_flip_visitor->Run(self);
+ Closure* flip_func = self->GetFlipFunction();
+ if (flip_func != nullptr) {
+ flip_func->Run(self);
+ }
}
// Resume other threads.
{
+ TimingLogger::ScopedTiming split4("ResumeOtherThreads", collector->GetTimings());
MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
for (const auto& thread : other_threads) {
thread->ModifySuspendCount(self, -1, nullptr, false);
@@ -463,7 +477,7 @@
Thread::resume_cond_->Broadcast(self);
}
- return runnable_threads.size() + other_threads.size() + 1; // +1 for self.
+ return runnable_thread_count + other_threads.size() + 1; // +1 for self.
}
void ThreadList::SuspendAll(const char* cause, bool long_suspend) {
diff --git a/tools/cpp-define-generator/constant_dexcache.def b/tools/cpp-define-generator/constant_dexcache.def
new file mode 100644
index 0000000..fd197f2
--- /dev/null
+++ b/tools/cpp-define-generator/constant_dexcache.def
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#if defined(DEFINE_INCLUDE_DEPENDENCIES)
+#include "mirror/dex_cache.h" // art::mirror::DexCache, StringDexCachePair
+#endif
+
+DEFINE_EXPR(STRING_DEX_CACHE_ELEMENT_SIZE_SHIFT, int32_t, art::WhichPowerOf2(sizeof(art::mirror::StringDexCachePair)))
+DEFINE_EXPR(STRING_DEX_CACHE_SIZE_MINUS_ONE, int32_t, art::mirror::DexCache::kDexCacheStringCacheSize - 1)
+DEFINE_EXPR(STRING_DEX_CACHE_HASH_BITS, int32_t,
+ art::LeastSignificantBit(art::mirror::DexCache::kDexCacheStringCacheSize))
\ No newline at end of file
diff --git a/tools/cpp-define-generator/offset_dexcache.def b/tools/cpp-define-generator/offset_dexcache.def
index 3b26518..4b9d481 100644
--- a/tools/cpp-define-generator/offset_dexcache.def
+++ b/tools/cpp-define-generator/offset_dexcache.def
@@ -19,16 +19,27 @@
#if defined(DEFINE_INCLUDE_DEPENDENCIES)
#include "art_method.h" // art::ArtMethod
#include "base/enums.h" // PointerSize
+#include "mirror/dex_cache.h" // art::DexCache
#endif
-#define DEFINE_ART_METHOD_OFFSET(field_name, method_name) \
+#define DEFINE_ART_METHOD_OFFSET_SIZED(field_name, method_name) \
DEFINE_EXPR(ART_METHOD_ ## field_name ## _OFFSET_32, int32_t, art::ArtMethod::method_name##Offset(art::PointerSize::k32).Int32Value()) \
DEFINE_EXPR(ART_METHOD_ ## field_name ## _OFFSET_64, int32_t, art::ArtMethod::method_name##Offset(art::PointerSize::k64).Int32Value())
+#define DEFINE_ART_METHOD_OFFSET(field_name, method_name) \
+ DEFINE_EXPR(ART_METHOD_ ## field_name ## _OFFSET, int32_t, art::ArtMethod::method_name##Offset().Int32Value())
+
+#define DEFINE_DECLARING_CLASS_OFFSET(field_name, method_name) \
+ DEFINE_EXPR(DECLARING_CLASS_ ## field_name ## _OFFSET, int32_t, art::mirror::Class::method_name##Offset().Int32Value())
+
// New macro suffix Method Name (of the Offset method)
-DEFINE_ART_METHOD_OFFSET(DEX_CACHE_METHODS, DexCacheResolvedMethods)
-DEFINE_ART_METHOD_OFFSET(DEX_CACHE_TYPES, DexCacheResolvedTypes)
-DEFINE_ART_METHOD_OFFSET(JNI, EntryPointFromJni)
-DEFINE_ART_METHOD_OFFSET(QUICK_CODE, EntryPointFromQuickCompiledCode)
+DEFINE_ART_METHOD_OFFSET_SIZED(DEX_CACHE_METHODS, DexCacheResolvedMethods)
+DEFINE_ART_METHOD_OFFSET_SIZED(DEX_CACHE_TYPES, DexCacheResolvedTypes)
+DEFINE_ART_METHOD_OFFSET_SIZED(JNI, EntryPointFromJni)
+DEFINE_ART_METHOD_OFFSET_SIZED(QUICK_CODE, EntryPointFromQuickCompiledCode)
+DEFINE_ART_METHOD_OFFSET(DECLARING_CLASS, DeclaringClass)
+DEFINE_DECLARING_CLASS_OFFSET(DEX_CACHE_STRINGS, DexCacheStrings)
#undef DEFINE_ART_METHOD_OFFSET
+#undef DEFINE_ART_METHOD_OFFSET_32
+#undef DEFINE_DECLARING_CLASS_OFFSET
diff --git a/tools/cpp-define-generator/offsets_all.def b/tools/cpp-define-generator/offsets_all.def
index d2d8777..13371a1 100644
--- a/tools/cpp-define-generator/offsets_all.def
+++ b/tools/cpp-define-generator/offsets_all.def
@@ -48,6 +48,7 @@
// TODO: MIRROR_*_ARRAY offsets (depends on header size)
// TODO: MIRROR_STRING offsets (depends on header size)
#include "offset_dexcache.def"
+#include "constant_dexcache.def"
#include "constant_heap.def"
#include "constant_lockword.def"
#include "constant_globals.def"