Merge "Reserve space in the char backing vector to prevent reallocations"
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 4f63099..850702a 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -33,6 +33,7 @@
Interfaces \
Lookup \
Main \
+ MethodTypes \
MultiDex \
MultiDexModifiedSecondary \
MyClass \
@@ -80,9 +81,9 @@
# Dex file dependencies for each gtest.
ART_GTEST_dex2oat_environment_tests_DEX_DEPS := Main MainStripped MultiDex MultiDexModifiedSecondary Nested
-ART_GTEST_class_linker_test_DEX_DEPS := Interfaces MultiDex MyClass Nested Statics StaticsFromCode
+ART_GTEST_class_linker_test_DEX_DEPS := Interfaces MethodTypes MultiDex MyClass Nested Statics StaticsFromCode
ART_GTEST_compiler_driver_test_DEX_DEPS := AbstractMethod StaticLeafMethods ProfileTestMultiDex
-ART_GTEST_dex_cache_test_DEX_DEPS := Main Packages
+ART_GTEST_dex_cache_test_DEX_DEPS := Main Packages MethodTypes
ART_GTEST_dex_file_test_DEX_DEPS := GetMethodSignature Main Nested
ART_GTEST_dex2oat_test_DEX_DEPS := $(ART_GTEST_dex2oat_environment_tests_DEX_DEPS) Statics
ART_GTEST_exception_test_DEX_DEPS := ExceptionHandle
diff --git a/cmdline/cmdline_types.h b/cmdline/cmdline_types.h
index b74e588..b229be4 100644
--- a/cmdline/cmdline_types.h
+++ b/cmdline/cmdline_types.h
@@ -775,6 +775,8 @@
existing = existing | ExperimentalFlags::kAgents;
} else if (option == "runtime-plugins") {
existing = existing | ExperimentalFlags::kRuntimePlugins;
+ } else if (option == "method-handles") {
+ existing = existing | ExperimentalFlags::kMethodHandles;
} else {
return Result::Failure(std::string("Unknown option '") + option + "'");
}
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 55e1221..681988d 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -4589,7 +4589,9 @@
}
// We need a temporary register for the read barrier marking slow
// path in CodeGeneratorARM::GenerateArrayLoadWithBakerReadBarrier.
- if (object_array_get_with_read_barrier && kUseBakerReadBarrier) {
+ // Also need for String compression feature.
+ if ((object_array_get_with_read_barrier && kUseBakerReadBarrier)
+ || (mirror::kUseStringCompression && instruction->IsStringCharAt())) {
locations->AddTemp(Location::RequiresRegister());
}
}
@@ -4602,6 +4604,8 @@
Location out_loc = locations->Out();
uint32_t data_offset = CodeGenerator::GetArrayDataOffset(instruction);
Primitive::Type type = instruction->GetType();
+ const bool maybe_compressed_char_at = mirror::kUseStringCompression &&
+ instruction->IsStringCharAt();
HInstruction* array_instr = instruction->GetArray();
bool has_intermediate_address = array_instr->IsIntermediateAddress();
// The read barrier instrumentation does not support the HIntermediateAddress instruction yet.
@@ -4615,10 +4619,31 @@
case Primitive::kPrimInt: {
if (index.IsConstant()) {
int32_t const_index = index.GetConstant()->AsIntConstant()->GetValue();
- uint32_t full_offset = data_offset + (const_index << Primitive::ComponentSizeShift(type));
+ if (maybe_compressed_char_at) {
+ Register length = IP;
+ Label uncompressed_load, done;
+ uint32_t count_offset = mirror::String::CountOffset().Uint32Value();
+ __ LoadFromOffset(kLoadWord, length, obj, count_offset);
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ __ cmp(length, ShifterOperand(0));
+ __ b(&uncompressed_load, GE);
+ __ LoadFromOffset(kLoadUnsignedByte,
+ out_loc.AsRegister<Register>(),
+ obj,
+ data_offset + const_index);
+ __ b(&done);
+ __ Bind(&uncompressed_load);
+ __ LoadFromOffset(GetLoadOperandType(Primitive::kPrimChar),
+ out_loc.AsRegister<Register>(),
+ obj,
+ data_offset + (const_index << 1));
+ __ Bind(&done);
+ } else {
+ uint32_t full_offset = data_offset + (const_index << Primitive::ComponentSizeShift(type));
- LoadOperandType load_type = GetLoadOperandType(type);
- __ LoadFromOffset(load_type, out_loc.AsRegister<Register>(), obj, full_offset);
+ LoadOperandType load_type = GetLoadOperandType(type);
+ __ LoadFromOffset(load_type, out_loc.AsRegister<Register>(), obj, full_offset);
+ }
} else {
Register temp = IP;
@@ -4634,7 +4659,24 @@
} else {
__ add(temp, obj, ShifterOperand(data_offset));
}
- codegen_->LoadFromShiftedRegOffset(type, out_loc, temp, index.AsRegister<Register>());
+ if (maybe_compressed_char_at) {
+ Label uncompressed_load, done;
+ uint32_t count_offset = mirror::String::CountOffset().Uint32Value();
+ Register length = locations->GetTemp(0).AsRegister<Register>();
+ __ LoadFromOffset(kLoadWord, length, obj, count_offset);
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ __ cmp(length, ShifterOperand(0));
+ __ b(&uncompressed_load, GE);
+ __ ldrb(out_loc.AsRegister<Register>(),
+ Address(temp, index.AsRegister<Register>(), Shift::LSL, 0));
+ __ b(&done);
+ __ Bind(&uncompressed_load);
+ __ ldrh(out_loc.AsRegister<Register>(),
+ Address(temp, index.AsRegister<Register>(), Shift::LSL, 1));
+ __ Bind(&done);
+ } else {
+ codegen_->LoadFromShiftedRegOffset(type, out_loc, temp, index.AsRegister<Register>());
+ }
}
break;
}
@@ -4734,7 +4776,7 @@
if (type == Primitive::kPrimNot) {
// Potential implicit null checks, in the case of reference
// arrays, are handled in the previous switch statement.
- } else {
+ } else if (!maybe_compressed_char_at) {
codegen_->MaybeRecordImplicitNullCheck(instruction);
}
}
@@ -5024,6 +5066,10 @@
Register out = locations->Out().AsRegister<Register>();
__ LoadFromOffset(kLoadWord, out, obj, offset);
codegen_->MaybeRecordImplicitNullCheck(instruction);
+ // Mask out compression flag from String's array length.
+ if (mirror::kUseStringCompression && instruction->IsStringLength()) {
+ __ bic(out, out, ShifterOperand(1u << 31));
+ }
}
void LocationsBuilderARM::VisitIntermediateAddress(HIntermediateAddress* instruction) {
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index a2a2e42..4f7f36b 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -2052,7 +2052,8 @@
Location index = locations->InAt(1);
Location out = locations->Out();
uint32_t offset = CodeGenerator::GetArrayDataOffset(instruction);
-
+ const bool maybe_compressed_char_at = mirror::kUseStringCompression &&
+ instruction->IsStringCharAt();
MacroAssembler* masm = GetVIXLAssembler();
UseScratchRegisterScope temps(masm);
// Block pools between `Load` and `MaybeRecordImplicitNullCheck`.
@@ -2070,9 +2071,28 @@
} else {
// General case.
MemOperand source = HeapOperand(obj);
+ Register length;
+ if (maybe_compressed_char_at) {
+ uint32_t count_offset = mirror::String::CountOffset().Uint32Value();
+ length = temps.AcquireW();
+ __ Ldr(length, HeapOperand(obj, count_offset));
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ }
if (index.IsConstant()) {
- offset += Int64ConstantFrom(index) << Primitive::ComponentSizeShift(type);
- source = HeapOperand(obj, offset);
+ if (maybe_compressed_char_at) {
+ vixl::aarch64::Label uncompressed_load, done;
+ __ Tbz(length.W(), kWRegSize - 1, &uncompressed_load);
+ __ Ldrb(Register(OutputCPURegister(instruction)),
+ HeapOperand(obj, offset + Int64ConstantFrom(index)));
+ __ B(&done);
+ __ Bind(&uncompressed_load);
+ __ Ldrh(Register(OutputCPURegister(instruction)),
+ HeapOperand(obj, offset + (Int64ConstantFrom(index) << 1)));
+ __ Bind(&done);
+ } else {
+ offset += Int64ConstantFrom(index) << Primitive::ComponentSizeShift(type);
+ source = HeapOperand(obj, offset);
+ }
} else {
Register temp = temps.AcquireSameSizeAs(obj);
if (instruction->GetArray()->IsIntermediateAddress()) {
@@ -2090,11 +2110,24 @@
} else {
__ Add(temp, obj, offset);
}
- source = HeapOperand(temp, XRegisterFrom(index), LSL, Primitive::ComponentSizeShift(type));
+ if (maybe_compressed_char_at) {
+ vixl::aarch64::Label uncompressed_load, done;
+ __ Tbz(length.W(), kWRegSize - 1, &uncompressed_load);
+ __ Ldrb(Register(OutputCPURegister(instruction)),
+ HeapOperand(temp, XRegisterFrom(index), LSL, 0));
+ __ B(&done);
+ __ Bind(&uncompressed_load);
+ __ Ldrh(Register(OutputCPURegister(instruction)),
+ HeapOperand(temp, XRegisterFrom(index), LSL, 1));
+ __ Bind(&done);
+ } else {
+ source = HeapOperand(temp, XRegisterFrom(index), LSL, Primitive::ComponentSizeShift(type));
+ }
}
-
- codegen_->Load(type, OutputCPURegister(instruction), source);
- codegen_->MaybeRecordImplicitNullCheck(instruction);
+ if (!maybe_compressed_char_at) {
+ codegen_->Load(type, OutputCPURegister(instruction), source);
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ }
if (type == Primitive::kPrimNot) {
static_assert(
@@ -2118,9 +2151,14 @@
void InstructionCodeGeneratorARM64::VisitArrayLength(HArrayLength* instruction) {
uint32_t offset = CodeGenerator::GetArrayLengthOffset(instruction);
+ vixl::aarch64::Register out = OutputRegister(instruction);
BlockPoolsScope block_pools(GetVIXLAssembler());
- __ Ldr(OutputRegister(instruction), HeapOperand(InputRegisterAt(instruction, 0), offset));
+ __ Ldr(out, HeapOperand(InputRegisterAt(instruction, 0), offset));
codegen_->MaybeRecordImplicitNullCheck(instruction);
+ // Mask out compression flag from String's array length.
+ if (mirror::kUseStringCompression && instruction->IsStringLength()) {
+ __ And(out.W(), out.W(), Operand(static_cast<int32_t>(INT32_MAX)));
+ }
}
void LocationsBuilderARM64::VisitArraySet(HArraySet* instruction) {
@@ -2312,7 +2350,6 @@
BoundsCheckSlowPathARM64* slow_path =
new (GetGraph()->GetArena()) BoundsCheckSlowPathARM64(instruction);
codegen_->AddSlowPath(slow_path);
-
__ Cmp(InputRegisterAt(instruction, 0), InputOperandAt(instruction, 1));
__ B(slow_path->GetEntryLabel(), hs);
}
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index c300080..a7051ae 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -150,6 +150,9 @@
length_loc = Location::RegisterLocation(calling_convention.GetRegisterAt(2));
}
__ movl(length_loc.AsRegister<Register>(), array_len);
+ if (mirror::kUseStringCompression) {
+ __ andl(length_loc.AsRegister<Register>(), Immediate(INT32_MAX));
+ }
}
x86_codegen->EmitParallelMoves(
locations->InAt(0),
@@ -5021,7 +5024,23 @@
case Primitive::kPrimChar: {
Register out = out_loc.AsRegister<Register>();
- __ movzxw(out, CodeGeneratorX86::ArrayAddress(obj, index, TIMES_2, data_offset));
+ if (mirror::kUseStringCompression && instruction->IsStringCharAt()) {
+ // Branch cases into compressed and uncompressed for each index's type.
+ uint32_t count_offset = mirror::String::CountOffset().Uint32Value();
+ NearLabel done, not_compressed;
+ __ cmpl(Address(obj, count_offset), Immediate(0));
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ __ j(kGreaterEqual, ¬_compressed);
+ __ movzxb(out, CodeGeneratorX86::ArrayAddress(obj, index, TIMES_1, data_offset));
+ __ jmp(&done);
+ __ Bind(¬_compressed);
+ __ movzxw(out, CodeGeneratorX86::ArrayAddress(obj, index, TIMES_2, data_offset));
+ __ Bind(&done);
+ } else {
+ // Common case for charAt of array of char or when string compression's
+ // feature is turned off.
+ __ movzxw(out, CodeGeneratorX86::ArrayAddress(obj, index, TIMES_2, data_offset));
+ }
break;
}
@@ -5359,6 +5378,10 @@
Register out = locations->Out().AsRegister<Register>();
__ movl(out, Address(obj, offset));
codegen_->MaybeRecordImplicitNullCheck(instruction);
+ // Mask out most significant bit in case the array is String's array of char.
+ if (mirror::kUseStringCompression && instruction->IsStringLength()) {
+ __ andl(out, Immediate(INT32_MAX));
+ }
}
void LocationsBuilderX86::VisitBoundsCheck(HBoundsCheck* instruction) {
@@ -5372,9 +5395,15 @@
if (!length->IsEmittedAtUseSite()) {
locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
}
+ // Need register to see array's length.
+ if (mirror::kUseStringCompression && instruction->IsStringCharAt()) {
+ locations->AddTemp(Location::RequiresRegister());
+ }
}
void InstructionCodeGeneratorX86::VisitBoundsCheck(HBoundsCheck* instruction) {
+ const bool is_string_compressed_char_at =
+ mirror::kUseStringCompression && instruction->IsStringCharAt();
LocationSummary* locations = instruction->GetLocations();
Location index_loc = locations->InAt(0);
Location length_loc = locations->InAt(1);
@@ -5409,13 +5438,23 @@
uint32_t len_offset = CodeGenerator::GetArrayLengthOffset(array_length->AsArrayLength());
Location array_loc = array_length->GetLocations()->InAt(0);
Address array_len(array_loc.AsRegister<Register>(), len_offset);
- if (index_loc.IsConstant()) {
- int32_t value = CodeGenerator::GetInt32ValueOf(index_loc.GetConstant());
- __ cmpl(array_len, Immediate(value));
+ if (is_string_compressed_char_at) {
+ Register length_reg = locations->GetTemp(0).AsRegister<Register>();
+ __ movl(length_reg, array_len);
+ codegen_->MaybeRecordImplicitNullCheck(array_length);
+ __ andl(length_reg, Immediate(INT32_MAX));
+ codegen_->GenerateIntCompare(length_reg, index_loc);
} else {
- __ cmpl(array_len, index_loc.AsRegister<Register>());
+ // Checking bounds for general case:
+ // Array of char or string's array with feature compression off.
+ if (index_loc.IsConstant()) {
+ int32_t value = CodeGenerator::GetInt32ValueOf(index_loc.GetConstant());
+ __ cmpl(array_len, Immediate(value));
+ } else {
+ __ cmpl(array_len, index_loc.AsRegister<Register>());
+ }
+ codegen_->MaybeRecordImplicitNullCheck(array_length);
}
- codegen_->MaybeRecordImplicitNullCheck(array_length);
} else {
codegen_->GenerateIntCompare(length_loc, index_loc);
}
@@ -7278,13 +7317,17 @@
void CodeGeneratorX86::GenerateIntCompare(Location lhs, Location rhs) {
Register lhs_reg = lhs.AsRegister<Register>();
+ GenerateIntCompare(lhs_reg, rhs);
+}
+
+void CodeGeneratorX86::GenerateIntCompare(Register lhs, Location rhs) {
if (rhs.IsConstant()) {
int32_t value = CodeGenerator::GetInt32ValueOf(rhs.GetConstant());
- Compare32BitValue(lhs_reg, value);
+ Compare32BitValue(lhs, value);
} else if (rhs.IsStackSlot()) {
- __ cmpl(lhs_reg, Address(ESP, rhs.GetStackIndex()));
+ __ cmpl(lhs, Address(ESP, rhs.GetStackIndex()));
} else {
- __ cmpl(lhs_reg, rhs.AsRegister<Register>());
+ __ cmpl(lhs, rhs.AsRegister<Register>());
}
}
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 1ae9af3..1bd28da 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -474,6 +474,7 @@
// Compare int values. Supports only register locations for `lhs`.
void GenerateIntCompare(Location lhs, Location rhs);
+ void GenerateIntCompare(Register lhs, Location rhs);
// Construct address for array access.
static Address ArrayAddress(Register obj,
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index f9a3e42..b243ee0 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -198,6 +198,9 @@
length_loc = Location::RegisterLocation(calling_convention.GetRegisterAt(2));
}
__ movl(length_loc.AsRegister<CpuRegister>(), array_len);
+ if (mirror::kUseStringCompression) {
+ __ andl(length_loc.AsRegister<CpuRegister>(), Immediate(INT32_MAX));
+ }
}
// We're moving two locations to locations that could overlap, so we need a parallel
@@ -4485,7 +4488,21 @@
case Primitive::kPrimChar: {
CpuRegister out = out_loc.AsRegister<CpuRegister>();
- __ movzxw(out, CodeGeneratorX86_64::ArrayAddress(obj, index, TIMES_2, data_offset));
+ if (mirror::kUseStringCompression && instruction->IsStringCharAt()) {
+ // Branch cases into compressed and uncompressed for each index's type.
+ uint32_t count_offset = mirror::String::CountOffset().Uint32Value();
+ NearLabel done, not_compressed;
+ __ cmpl(Address(obj, count_offset), Immediate(0));
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ __ j(kGreaterEqual, ¬_compressed);
+ __ movzxb(out, CodeGeneratorX86_64::ArrayAddress(obj, index, TIMES_1, data_offset));
+ __ jmp(&done);
+ __ Bind(¬_compressed);
+ __ movzxw(out, CodeGeneratorX86_64::ArrayAddress(obj, index, TIMES_2, data_offset));
+ __ Bind(&done);
+ } else {
+ __ movzxw(out, CodeGeneratorX86_64::ArrayAddress(obj, index, TIMES_2, data_offset));
+ }
break;
}
@@ -4807,6 +4824,10 @@
CpuRegister out = locations->Out().AsRegister<CpuRegister>();
__ movl(out, Address(obj, offset));
codegen_->MaybeRecordImplicitNullCheck(instruction);
+ // Mask out most significant bit in case the array is String's array of char.
+ if (mirror::kUseStringCompression && instruction->IsStringLength()) {
+ __ andl(out, Immediate(INT32_MAX));
+ }
}
void LocationsBuilderX86_64::VisitBoundsCheck(HBoundsCheck* instruction) {
@@ -4856,13 +4877,23 @@
uint32_t len_offset = CodeGenerator::GetArrayLengthOffset(array_length->AsArrayLength());
Location array_loc = array_length->GetLocations()->InAt(0);
Address array_len(array_loc.AsRegister<CpuRegister>(), len_offset);
- if (index_loc.IsConstant()) {
- int32_t value = CodeGenerator::GetInt32ValueOf(index_loc.GetConstant());
- __ cmpl(array_len, Immediate(value));
+ if (mirror::kUseStringCompression && instruction->IsStringCharAt()) {
+ CpuRegister length_reg = CpuRegister(TMP);
+ __ movl(length_reg, array_len);
+ codegen_->MaybeRecordImplicitNullCheck(array_length);
+ __ andl(length_reg, Immediate(INT32_MAX));
+ codegen_->GenerateIntCompare(length_reg, index_loc);
} else {
- __ cmpl(array_len, index_loc.AsRegister<CpuRegister>());
+ // Checking the bound for general case:
+ // Array of char or String's array when the compression feature off.
+ if (index_loc.IsConstant()) {
+ int32_t value = CodeGenerator::GetInt32ValueOf(index_loc.GetConstant());
+ __ cmpl(array_len, Immediate(value));
+ } else {
+ __ cmpl(array_len, index_loc.AsRegister<CpuRegister>());
+ }
+ codegen_->MaybeRecordImplicitNullCheck(array_length);
}
- codegen_->MaybeRecordImplicitNullCheck(array_length);
} else {
codegen_->GenerateIntCompare(length_loc, index_loc);
}
@@ -6525,13 +6556,17 @@
void CodeGeneratorX86_64::GenerateIntCompare(Location lhs, Location rhs) {
CpuRegister lhs_reg = lhs.AsRegister<CpuRegister>();
+ GenerateIntCompare(lhs_reg, rhs);
+}
+
+void CodeGeneratorX86_64::GenerateIntCompare(CpuRegister lhs, Location rhs) {
if (rhs.IsConstant()) {
int32_t value = CodeGenerator::GetInt32ValueOf(rhs.GetConstant());
- Compare32BitValue(lhs_reg, value);
+ Compare32BitValue(lhs, value);
} else if (rhs.IsStackSlot()) {
- __ cmpl(lhs_reg, Address(CpuRegister(RSP), rhs.GetStackIndex()));
+ __ cmpl(lhs, Address(CpuRegister(RSP), rhs.GetStackIndex()));
} else {
- __ cmpl(lhs_reg, rhs.AsRegister<CpuRegister>());
+ __ cmpl(lhs, rhs.AsRegister<CpuRegister>());
}
}
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 594f051..8dec44e 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -510,8 +510,9 @@
void Compare32BitValue(CpuRegister dest, int32_t value);
void Compare64BitValue(CpuRegister dest, int64_t value);
- // Compare int values. Supports only register locations for `lhs`.
+ // Compare int values. Supports register locations for `lhs`.
void GenerateIntCompare(Location lhs, Location rhs);
+ void GenerateIntCompare(CpuRegister lhs, Location rhs);
// Compare long values. Supports only register locations for `lhs`.
void GenerateLongCompare(Location lhs, Location rhs);
diff --git a/compiler/optimizing/instruction_simplifier_arm.cc b/compiler/optimizing/instruction_simplifier_arm.cc
index 495f3fd..56e4c7a 100644
--- a/compiler/optimizing/instruction_simplifier_arm.cc
+++ b/compiler/optimizing/instruction_simplifier_arm.cc
@@ -44,6 +44,14 @@
size_t data_offset = CodeGenerator::GetArrayDataOffset(instruction);
Primitive::Type type = instruction->GetType();
+ // TODO: Implement reading (length + compression) for String compression feature from
+ // negative offset (count_offset - data_offset). Thumb2Assembler does not support T4
+ // encoding of "LDR (immediate)" at the moment.
+ // Don't move array pointer if it is charAt because we need to take the count first.
+ if (mirror::kUseStringCompression && instruction->IsStringCharAt()) {
+ return;
+ }
+
if (type == Primitive::kPrimLong
|| type == Primitive::kPrimFloat
|| type == Primitive::kPrimDouble) {
diff --git a/compiler/optimizing/instruction_simplifier_arm64.cc b/compiler/optimizing/instruction_simplifier_arm64.cc
index 6d107d5..d0dd650 100644
--- a/compiler/optimizing/instruction_simplifier_arm64.cc
+++ b/compiler/optimizing/instruction_simplifier_arm64.cc
@@ -140,6 +140,13 @@
void InstructionSimplifierArm64Visitor::VisitArrayGet(HArrayGet* instruction) {
size_t data_offset = CodeGenerator::GetArrayDataOffset(instruction);
+ // Don't move the array pointer if it is charAt because we need to take the count first.
+ // TODO: Implement reading (length + compression) for String compression feature from
+ // negative offset (count_offset - data_offset) using LDP and clobbering an extra temporary.
+ // Note that "LDR (Immediate)" does not have a "signed offset" encoding.
+ if (mirror::kUseStringCompression && instruction->IsStringCharAt()) {
+ return;
+ }
if (TryExtractArrayAccessAddress(instruction,
instruction->GetArray(),
instruction->GetIndex(),
diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc
index fd2da10..96a6ecb 100644
--- a/compiler/optimizing/intrinsics_arm.cc
+++ b/compiler/optimizing/intrinsics_arm.cc
@@ -1039,6 +1039,11 @@
locations->AddTemp(Location::RequiresRegister());
locations->AddTemp(Location::RequiresRegister());
locations->AddTemp(Location::RequiresRegister());
+ // Need temporary registers for String compression's feature.
+ if (mirror::kUseStringCompression) {
+ locations->AddTemp(Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresRegister());
+ }
locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
}
@@ -1053,10 +1058,16 @@
Register temp0 = locations->GetTemp(0).AsRegister<Register>();
Register temp1 = locations->GetTemp(1).AsRegister<Register>();
Register temp2 = locations->GetTemp(2).AsRegister<Register>();
+ Register temp3, temp4;
+ if (mirror::kUseStringCompression) {
+ temp3 = locations->GetTemp(3).AsRegister<Register>();
+ temp4 = locations->GetTemp(4).AsRegister<Register>();
+ }
Label loop;
Label find_char_diff;
Label end;
+ Label different_compression;
// Get offsets of count and value fields within a string object.
const int32_t count_offset = mirror::String::CountOffset().Int32Value();
@@ -1077,20 +1088,40 @@
// Reference equality check, return 0 if same reference.
__ subs(out, str, ShifterOperand(arg));
__ b(&end, EQ);
- // Load lengths of this and argument strings.
- __ ldr(temp2, Address(str, count_offset));
- __ ldr(temp1, Address(arg, count_offset));
+ if (mirror::kUseStringCompression) {
+ // Load lengths of this and argument strings.
+ __ ldr(temp3, Address(str, count_offset));
+ __ ldr(temp4, Address(arg, count_offset));
+ // Clean out compression flag from lengths.
+ __ bic(temp0, temp3, ShifterOperand(0x80000000));
+ __ bic(IP, temp4, ShifterOperand(0x80000000));
+ } else {
+ // Load lengths of this and argument strings.
+ __ ldr(temp0, Address(str, count_offset));
+ __ ldr(IP, Address(arg, count_offset));
+ }
// out = length diff.
- __ subs(out, temp2, ShifterOperand(temp1));
+ __ subs(out, temp0, ShifterOperand(IP));
// temp0 = min(len(str), len(arg)).
- __ it(Condition::LT, kItElse);
- __ mov(temp0, ShifterOperand(temp2), Condition::LT);
- __ mov(temp0, ShifterOperand(temp1), Condition::GE);
+ __ it(GT);
+ __ mov(temp0, ShifterOperand(IP), GT);
// Shorter string is empty?
__ CompareAndBranchIfZero(temp0, &end);
+ if (mirror::kUseStringCompression) {
+ // Check if both strings using same compression style to use this comparison loop.
+ __ eors(temp3, temp3, ShifterOperand(temp4));
+ __ b(&different_compression, MI);
+ }
// Store offset of string value in preparation for comparison loop.
__ mov(temp1, ShifterOperand(value_offset));
+ if (mirror::kUseStringCompression) {
+ // For string compression, calculate the number of bytes to compare (not chars).
+ // This could in theory exceed INT32_MAX, so treat temp0 as unsigned.
+ __ cmp(temp4, ShifterOperand(0));
+ __ it(GE);
+ __ add(temp0, temp0, ShifterOperand(temp0), GE);
+ }
// Assertions that must hold in order to compare multiple characters at a time.
CHECK_ALIGNED(value_offset, 8);
@@ -1100,6 +1131,7 @@
const size_t char_size = Primitive::ComponentSize(Primitive::kPrimChar);
DCHECK_EQ(char_size, 2u);
+ Label find_char_diff_2nd_cmp;
// Unrolled loop comparing 4x16-bit chars per iteration (ok because of string data alignment).
__ Bind(&loop);
__ ldr(IP, Address(str, temp1));
@@ -1107,43 +1139,113 @@
__ cmp(IP, ShifterOperand(temp2));
__ b(&find_char_diff, NE);
__ add(temp1, temp1, ShifterOperand(char_size * 2));
- __ sub(temp0, temp0, ShifterOperand(2));
__ ldr(IP, Address(str, temp1));
__ ldr(temp2, Address(arg, temp1));
__ cmp(IP, ShifterOperand(temp2));
- __ b(&find_char_diff, NE);
+ __ b(&find_char_diff_2nd_cmp, NE);
__ add(temp1, temp1, ShifterOperand(char_size * 2));
- __ subs(temp0, temp0, ShifterOperand(2));
-
- __ b(&loop, GT);
+ // With string compression, we have compared 8 bytes, otherwise 4 chars.
+ __ subs(temp0, temp0, ShifterOperand(mirror::kUseStringCompression ? 8 : 4));
+ __ b(&loop, HI);
__ b(&end);
- // Find the single 16-bit character difference.
+ __ Bind(&find_char_diff_2nd_cmp);
+ if (mirror::kUseStringCompression) {
+ __ subs(temp0, temp0, ShifterOperand(4)); // 4 bytes previously compared.
+ __ b(&end, LS); // Was the second comparison fully beyond the end?
+ } else {
+ // Without string compression, we can start treating temp0 as signed
+ // and rely on the signed comparison below.
+ __ sub(temp0, temp0, ShifterOperand(2));
+ }
+
+ // Find the single character difference.
__ Bind(&find_char_diff);
// Get the bit position of the first character that differs.
__ eor(temp1, temp2, ShifterOperand(IP));
__ rbit(temp1, temp1);
__ clz(temp1, temp1);
- // temp0 = number of 16-bit characters remaining to compare.
- // (it could be < 1 if a difference is found after the first SUB in the comparison loop, and
- // after the end of the shorter string data).
+ // temp0 = number of characters remaining to compare.
+ // (Without string compression, it could be < 1 if a difference is found by the second CMP
+ // in the comparison loop, and after the end of the shorter string data).
- // (temp1 >> 4) = character where difference occurs between the last two words compared, on the
- // interval [0,1] (0 for low half-word different, 1 for high half-word different).
+ // Without string compression (temp1 >> 4) = character where difference occurs between the last
+ // two words compared, in the interval [0,1].
+ // (0 for low half-word different, 1 for high half-word different).
+ // With string compression, (temp1 << 3) = byte where the difference occurs,
+ // in the interval [0,3].
- // If temp0 <= (temp1 >> 4), the difference occurs outside the remaining string data, so just
- // return length diff (out).
- __ cmp(temp0, ShifterOperand(temp1, LSR, 4));
- __ b(&end, LE);
+ // If temp0 <= (temp1 >> (kUseStringCompression ? 3 : 4)), the difference occurs outside
+ // the remaining string data, so just return length diff (out).
+ // The comparison is unsigned for string compression, otherwise signed.
+ __ cmp(temp0, ShifterOperand(temp1, LSR, mirror::kUseStringCompression ? 3 : 4));
+ __ b(&end, mirror::kUseStringCompression ? LS : LE);
// Extract the characters and calculate the difference.
+ Label uncompressed_string, continue_process;
+ if (mirror::kUseStringCompression) {
+ __ cmp(temp4, ShifterOperand(0));
+ __ b(&uncompressed_string, GE);
+ __ bic(temp1, temp1, ShifterOperand(0x7));
+ __ b(&continue_process);
+ }
+ __ Bind(&uncompressed_string);
__ bic(temp1, temp1, ShifterOperand(0xf));
+ __ Bind(&continue_process);
+
__ Lsr(temp2, temp2, temp1);
__ Lsr(IP, IP, temp1);
+ Label calculate_difference, uncompressed_string_extract_chars;
+ if (mirror::kUseStringCompression) {
+ __ cmp(temp4, ShifterOperand(0));
+ __ b(&uncompressed_string_extract_chars, GE);
+ __ ubfx(temp2, temp2, 0, 8);
+ __ ubfx(IP, IP, 0, 8);
+ __ b(&calculate_difference);
+ }
+ __ Bind(&uncompressed_string_extract_chars);
__ movt(temp2, 0);
__ movt(IP, 0);
+ __ Bind(&calculate_difference);
__ sub(out, IP, ShifterOperand(temp2));
+ __ b(&end);
+
+ if (mirror::kUseStringCompression) {
+ const size_t c_char_size = Primitive::ComponentSize(Primitive::kPrimByte);
+ DCHECK_EQ(c_char_size, 1u);
+ Label loop_arg_compressed, loop_this_compressed, find_diff;
+ // Comparison for different compression style.
+ // This part is when THIS is compressed and ARG is not.
+ __ Bind(&different_compression);
+ __ add(temp2, str, ShifterOperand(value_offset));
+ __ add(temp3, arg, ShifterOperand(value_offset));
+ __ cmp(temp4, ShifterOperand(0));
+ __ b(&loop_arg_compressed, LT);
+
+ __ Bind(&loop_this_compressed);
+ __ ldrb(IP, Address(temp2, c_char_size, Address::PostIndex));
+ __ ldrh(temp4, Address(temp3, char_size, Address::PostIndex));
+ __ cmp(IP, ShifterOperand(temp4));
+ __ b(&find_diff, NE);
+ __ subs(temp0, temp0, ShifterOperand(1));
+ __ b(&loop_this_compressed, GT);
+ __ b(&end);
+
+ // This part is when THIS is not compressed and ARG is.
+ __ Bind(&loop_arg_compressed);
+ __ ldrh(IP, Address(temp2, char_size, Address::PostIndex));
+ __ ldrb(temp4, Address(temp3, c_char_size, Address::PostIndex));
+ __ cmp(IP, ShifterOperand(temp4));
+ __ b(&find_diff, NE);
+ __ subs(temp0, temp0, ShifterOperand(1));
+ __ b(&loop_arg_compressed, GT);
+ __ b(&end);
+
+ // Calculate the difference.
+ __ Bind(&find_diff);
+ __ sub(out, IP, ShifterOperand(temp4));
+ }
__ Bind(&end);
@@ -1180,7 +1282,7 @@
Register temp1 = locations->GetTemp(1).AsRegister<Register>();
Register temp2 = locations->GetTemp(2).AsRegister<Register>();
- Label loop;
+ Label loop, preloop;
Label end;
Label return_true;
Label return_false;
@@ -1214,11 +1316,15 @@
__ ldr(temp, Address(str, count_offset));
__ ldr(temp1, Address(arg, count_offset));
// Check if lengths are equal, return false if they're not.
+ // Also compares the compression style, if differs return false.
__ cmp(temp, ShifterOperand(temp1));
__ b(&return_false, NE);
// Return true if both strings are empty.
+ if (mirror::kUseStringCompression) {
+ // Length needs to be masked out first because 0 is treated as compressed.
+ __ bic(temp, temp, ShifterOperand(0x80000000));
+ }
__ cbz(temp, &return_true);
-
// Reference equality check, return true if same reference.
__ cmp(str, ShifterOperand(arg));
__ b(&return_true, EQ);
@@ -1227,10 +1333,19 @@
DCHECK_ALIGNED(value_offset, 4);
static_assert(IsAligned<4>(kObjectAlignment), "String data must be aligned for fast compare.");
- __ LoadImmediate(temp1, value_offset);
-
+ if (mirror::kUseStringCompression) {
+ // If not compressed, directly to fast compare. Else do preprocess on length.
+ __ cmp(temp1, ShifterOperand(0));
+ __ b(&preloop, GT);
+ // Mask out compression flag and adjust length for compressed string (8-bit)
+ // as if it is a 16-bit data, new_length = (length + 1) / 2.
+ __ add(temp, temp, ShifterOperand(1));
+ __ Lsr(temp, temp, 1);
+ __ Bind(&preloop);
+ }
// Loop to compare strings 2 characters at a time starting at the front of the string.
// Ok to do this because strings with an odd length are zero-padded.
+ __ LoadImmediate(temp1, value_offset);
__ Bind(&loop);
__ ldr(out, Address(str, temp1));
__ ldr(temp2, Address(arg, temp1));
@@ -2330,22 +2445,31 @@
Register src_ptr = locations->GetTemp(1).AsRegister<Register>();
Register dst_ptr = locations->GetTemp(2).AsRegister<Register>();
- // src range to copy.
- __ add(src_ptr, srcObj, ShifterOperand(value_offset));
- __ add(src_ptr, src_ptr, ShifterOperand(srcBegin, LSL, 1));
-
+ Label done, compressed_string_loop;
// dst to be copied.
__ add(dst_ptr, dstObj, ShifterOperand(data_offset));
__ add(dst_ptr, dst_ptr, ShifterOperand(dstBegin, LSL, 1));
__ subs(num_chr, srcEnd, ShifterOperand(srcBegin));
-
- // Do the copy.
- Label loop, remainder, done;
-
// Early out for valid zero-length retrievals.
__ b(&done, EQ);
+ // src range to copy.
+ __ add(src_ptr, srcObj, ShifterOperand(value_offset));
+ Label compressed_string_preloop;
+ if (mirror::kUseStringCompression) {
+ // Location of count in string.
+ const uint32_t count_offset = mirror::String::CountOffset().Uint32Value();
+ // String's length.
+ __ ldr(IP, Address(srcObj, count_offset));
+ __ cmp(IP, ShifterOperand(0));
+ __ b(&compressed_string_preloop, LT);
+ }
+ __ add(src_ptr, src_ptr, ShifterOperand(srcBegin, LSL, 1));
+
+ // Do the copy.
+ Label loop, remainder;
+
// Save repairing the value of num_chr on the < 4 character path.
__ subs(IP, num_chr, ShifterOperand(4));
__ b(&remainder, LT);
@@ -2374,6 +2498,20 @@
__ subs(num_chr, num_chr, ShifterOperand(1));
__ strh(IP, Address(dst_ptr, char_size, Address::PostIndex));
__ b(&remainder, GT);
+ __ b(&done);
+
+ if (mirror::kUseStringCompression) {
+ const size_t c_char_size = Primitive::ComponentSize(Primitive::kPrimByte);
+ DCHECK_EQ(c_char_size, 1u);
+ // Copy loop for compressed src, copying 1 character (8-bit) to (16-bit) at a time.
+ __ Bind(&compressed_string_preloop);
+ __ add(src_ptr, src_ptr, ShifterOperand(srcBegin));
+ __ Bind(&compressed_string_loop);
+ __ ldrb(IP, Address(src_ptr, c_char_size, Address::PostIndex));
+ __ strh(IP, Address(dst_ptr, char_size, Address::PostIndex));
+ __ subs(num_chr, num_chr, ShifterOperand(1));
+ __ b(&compressed_string_loop, GT);
+ }
__ Bind(&done);
}
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index ce58657..e2c1802 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -1223,6 +1223,11 @@
locations->AddTemp(Location::RequiresRegister());
locations->AddTemp(Location::RequiresRegister());
locations->AddTemp(Location::RequiresRegister());
+ // Need temporary registers for String compression's feature.
+ if (mirror::kUseStringCompression) {
+ locations->AddTemp(Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresRegister());
+ }
locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
}
@@ -1239,10 +1244,16 @@
Register temp0 = WRegisterFrom(locations->GetTemp(0));
Register temp1 = WRegisterFrom(locations->GetTemp(1));
Register temp2 = WRegisterFrom(locations->GetTemp(2));
+ Register temp3, temp5;
+ if (mirror::kUseStringCompression) {
+ temp3 = WRegisterFrom(locations->GetTemp(3));
+ temp5 = WRegisterFrom(locations->GetTemp(4));
+ }
vixl::aarch64::Label loop;
vixl::aarch64::Label find_char_diff;
vixl::aarch64::Label end;
+ vixl::aarch64::Label different_compression;
// Get offsets of count and value fields within a string object.
const int32_t count_offset = mirror::String::CountOffset().Int32Value();
@@ -1263,9 +1274,18 @@
// Reference equality check, return 0 if same reference.
__ Subs(out, str, arg);
__ B(&end, eq);
- // Load lengths of this and argument strings.
- __ Ldr(temp0, HeapOperand(str, count_offset));
- __ Ldr(temp1, HeapOperand(arg, count_offset));
+ if (mirror::kUseStringCompression) {
+ // Load lengths of this and argument strings.
+ __ Ldr(temp3, HeapOperand(str, count_offset));
+ __ Ldr(temp5, HeapOperand(arg, count_offset));
+ // Clean out compression flag from lengths.
+ __ Bic(temp0, temp3, Operand(static_cast<int32_t>(0x80000000)));
+ __ Bic(temp1, temp5, Operand(static_cast<int32_t>(0x80000000)));
+ } else {
+ // Load lengths of this and argument strings.
+ __ Ldr(temp0, HeapOperand(str, count_offset));
+ __ Ldr(temp1, HeapOperand(arg, count_offset));
+ }
// Return zero if both strings are empty.
__ Orr(out, temp0, temp1);
__ Cbz(out, &end);
@@ -1276,8 +1296,22 @@
// Shorter string is empty?
__ Cbz(temp2, &end);
+ if (mirror::kUseStringCompression) {
+ // Check if both strings using same compression style to use this comparison loop.
+ __ Eor(temp3.W(), temp3, Operand(temp5));
+ __ Tbnz(temp3.W(), kWRegSize - 1, &different_compression);
+ }
// Store offset of string value in preparation for comparison loop.
__ Mov(temp1, value_offset);
+ if (mirror::kUseStringCompression) {
+ // For string compression, calculate the number of bytes to compare (not chars).
+ // This could be in theory exceed INT32_MAX, so treat temp2 as unsigned.
+ vixl::aarch64::Label let_it_signed;
+ __ Cmp(temp5, Operand(0));
+ __ B(lt, &let_it_signed);
+ __ Add(temp2, temp2, Operand(temp2));
+ __ Bind(&let_it_signed);
+ }
UseScratchRegisterScope scratch_scope(masm);
Register temp4 = scratch_scope.AcquireX();
@@ -1299,29 +1333,90 @@
__ Cmp(temp4, temp0);
__ B(ne, &find_char_diff);
__ Add(temp1, temp1, char_size * 4);
- __ Subs(temp2, temp2, 4);
- __ B(gt, &loop);
+ // With string compression, we have compared 8 bytes, otherwise 4 chars.
+ __ Subs(temp2, temp2, (mirror::kUseStringCompression) ? 8 : 4);
+ __ B(hi, &loop);
__ B(&end);
// Promote temp1 to an X reg, ready for EOR.
temp1 = temp1.X();
- // Find the single 16-bit character difference.
+ // Find the single character difference.
__ Bind(&find_char_diff);
// Get the bit position of the first character that differs.
__ Eor(temp1, temp0, temp4);
__ Rbit(temp1, temp1);
__ Clz(temp1, temp1);
- // If the number of 16-bit chars remaining <= the index where the difference occurs (0-3), then
+ // If the number of chars remaining <= the index where the difference occurs (0-3), then
// the difference occurs outside the remaining string data, so just return length diff (out).
- __ Cmp(temp2, Operand(temp1.W(), LSR, 4));
- __ B(le, &end);
+ // Unlike ARM, we're doing the comparison in one go here, without the subtraction at the
+ // find_char_diff_2nd_cmp path, so it doesn't matter whether the comparison is signed or
+ // unsigned when string compression is disabled.
+ // When it's enabled, the comparison must be unsigned.
+ __ Cmp(temp2, Operand(temp1.W(), LSR, (mirror::kUseStringCompression) ? 3 : 4));
+ __ B(ls, &end);
// Extract the characters and calculate the difference.
+ vixl::aarch64::Label uncompressed_string, continue_process;
+ if (mirror:: kUseStringCompression) {
+ __ Tbz(temp5, kWRegSize - 1, &uncompressed_string);
+ __ Bic(temp1, temp1, 0x7);
+ __ B(&continue_process);
+ }
+ __ Bind(&uncompressed_string);
__ Bic(temp1, temp1, 0xf);
+ __ Bind(&continue_process);
+
__ Lsr(temp0, temp0, temp1);
__ Lsr(temp4, temp4, temp1);
+ vixl::aarch64::Label uncompressed_string_extract_chars;
+ if (mirror::kUseStringCompression) {
+ __ Tbz(temp5, kWRegSize - 1, &uncompressed_string_extract_chars);
+ __ And(temp4, temp4, 0xff);
+ __ Sub(out, temp4.W(), Operand(temp0.W(), UXTB));
+ __ B(&end);
+ }
+ __ Bind(&uncompressed_string_extract_chars);
__ And(temp4, temp4, 0xffff);
__ Sub(out, temp4.W(), Operand(temp0.W(), UXTH));
+ __ B(&end);
+
+ if (mirror::kUseStringCompression) {
+ vixl::aarch64::Label loop_this_compressed, loop_arg_compressed, find_diff;
+ const size_t c_char_size = Primitive::ComponentSize(Primitive::kPrimByte);
+ DCHECK_EQ(c_char_size, 1u);
+ temp0 = temp0.W();
+ temp1 = temp1.W();
+ // Comparison for different compression style.
+ // This part is when THIS is compressed and ARG is not.
+ __ Bind(&different_compression);
+ __ Add(temp0, str, Operand(value_offset));
+ __ Add(temp1, arg, Operand(value_offset));
+ __ Cmp(temp5, Operand(0));
+ __ B(lt, &loop_arg_compressed);
+
+ __ Bind(&loop_this_compressed);
+ __ Ldrb(temp3, MemOperand(temp0.X(), c_char_size, PostIndex));
+ __ Ldrh(temp5, MemOperand(temp1.X(), char_size, PostIndex));
+ __ Cmp(temp3, Operand(temp5));
+ __ B(ne, &find_diff);
+ __ Subs(temp2, temp2, 1);
+ __ B(gt, &loop_this_compressed);
+ __ B(&end);
+
+ // This part is when THIS is not compressed and ARG is.
+ __ Bind(&loop_arg_compressed);
+ __ Ldrh(temp3, MemOperand(temp0.X(), char_size, PostIndex));
+ __ Ldrb(temp5, MemOperand(temp1.X(), c_char_size, PostIndex));
+ __ Cmp(temp3, Operand(temp5));
+ __ B(ne, &find_diff);
+ __ Subs(temp2, temp2, 1);
+ __ B(gt, &loop_arg_compressed);
+ __ B(&end);
+
+ // Calculate the difference.
+ __ Bind(&find_diff);
+ __ Sub(out, temp3.W(), Operand(temp5.W(), UXTH));
+ }
__ Bind(&end);
@@ -1356,7 +1451,7 @@
Register temp1 = WRegisterFrom(locations->GetTemp(0));
Register temp2 = WRegisterFrom(locations->GetTemp(1));
- vixl::aarch64::Label loop;
+ vixl::aarch64::Label loop, preloop;
vixl::aarch64::Label end;
vixl::aarch64::Label return_true;
vixl::aarch64::Label return_false;
@@ -1394,22 +1489,37 @@
__ Ldr(temp, MemOperand(str.X(), count_offset));
__ Ldr(temp1, MemOperand(arg.X(), count_offset));
// Check if lengths are equal, return false if they're not.
+ // Also compares the compression style, if differs return false.
__ Cmp(temp, temp1);
__ B(&return_false, ne);
- // Store offset of string value in preparation for comparison loop
- __ Mov(temp1, value_offset);
// Return true if both strings are empty.
+ if (mirror::kUseStringCompression) {
+ // Length needs to be masked out first because 0 is treated as compressed.
+ __ Bic(temp, temp, Operand(static_cast<int32_t>(0x80000000)));
+ }
__ Cbz(temp, &return_true);
// Assertions that must hold in order to compare strings 4 characters at a time.
DCHECK_ALIGNED(value_offset, 8);
static_assert(IsAligned<8>(kObjectAlignment), "String of odd length is not zero padded");
+ if (mirror::kUseStringCompression) {
+ // If not compressed, directly to fast compare. Else do preprocess on length.
+ __ Cmp(temp1, Operand(0));
+ __ B(&preloop, gt);
+ // Mask out compression flag and adjust length for compressed string (8-bit)
+ // as if it is a 16-bit data, new_length = (length + 1) / 2
+ __ Add(temp, temp, 1);
+ __ Lsr(temp, temp, 1);
+ }
+
temp1 = temp1.X();
temp2 = temp2.X();
-
// Loop to compare strings 4 characters at a time starting at the beginning of the string.
// Ok to do this because strings are zero-padded to be 8-byte aligned.
+ // Store offset of string value in preparation for comparison loop
+ __ Bind(&preloop);
+ __ Mov(temp1, value_offset);
__ Bind(&loop);
__ Ldr(out, MemOperand(str.X(), temp1));
__ Ldr(temp2, MemOperand(arg.X(), temp1));
@@ -1773,6 +1883,10 @@
locations->AddTemp(Location::RequiresRegister());
locations->AddTemp(Location::RequiresRegister());
locations->AddTemp(Location::RequiresRegister());
+ // Need temporary register for String compression feature.
+ if (mirror::kUseStringCompression) {
+ locations->AddTemp(Location::RequiresRegister());
+ }
}
void IntrinsicCodeGeneratorARM64::VisitStringGetCharsNoCheck(HInvoke* invoke) {
@@ -1800,29 +1914,41 @@
Register src_ptr = XRegisterFrom(locations->GetTemp(0));
Register num_chr = XRegisterFrom(locations->GetTemp(1));
Register tmp1 = XRegisterFrom(locations->GetTemp(2));
+ Register tmp3;
+ if (mirror::kUseStringCompression) {
+ tmp3 = WRegisterFrom(locations->GetTemp(3));
+ }
UseScratchRegisterScope temps(masm);
Register dst_ptr = temps.AcquireX();
Register tmp2 = temps.AcquireX();
- // src address to copy from.
- __ Add(src_ptr, srcObj, Operand(value_offset));
- __ Add(src_ptr, src_ptr, Operand(srcBegin, LSL, 1));
+ vixl::aarch64::Label done;
+ vixl::aarch64::Label compressed_string_loop;
+ __ Sub(num_chr, srcEnd, srcBegin);
+ // Early out for valid zero-length retrievals.
+ __ Cbz(num_chr, &done);
// dst address start to copy to.
__ Add(dst_ptr, dstObj, Operand(data_offset));
__ Add(dst_ptr, dst_ptr, Operand(dstBegin, LSL, 1));
- __ Sub(num_chr, srcEnd, srcBegin);
+ // src address to copy from.
+ __ Add(src_ptr, srcObj, Operand(value_offset));
+ vixl::aarch64::Label compressed_string_preloop;
+ if (mirror::kUseStringCompression) {
+ // Location of count in string.
+ const uint32_t count_offset = mirror::String::CountOffset().Uint32Value();
+ // String's length.
+ __ Ldr(tmp3, MemOperand(srcObj, count_offset));
+ __ Tbnz(tmp3, kWRegSize - 1, &compressed_string_preloop);
+ }
+ __ Add(src_ptr, src_ptr, Operand(srcBegin, LSL, 1));
// Do the copy.
vixl::aarch64::Label loop;
- vixl::aarch64::Label done;
vixl::aarch64::Label remainder;
- // Early out for valid zero-length retrievals.
- __ Cbz(num_chr, &done);
-
// Save repairing the value of num_chr on the < 8 character path.
__ Subs(tmp1, num_chr, 8);
__ B(lt, &remainder);
@@ -1848,6 +1974,20 @@
__ Subs(num_chr, num_chr, 1);
__ Strh(tmp1, MemOperand(dst_ptr, char_size, PostIndex));
__ B(gt, &remainder);
+ __ B(&done);
+
+ if (mirror::kUseStringCompression) {
+ const size_t c_char_size = Primitive::ComponentSize(Primitive::kPrimByte);
+ DCHECK_EQ(c_char_size, 1u);
+ __ Bind(&compressed_string_preloop);
+ __ Add(src_ptr, src_ptr, Operand(srcBegin));
+ // Copy loop for compressed src, copying 1 character (8-bit) to (16-bit) at a time.
+ __ Bind(&compressed_string_loop);
+ __ Ldrb(tmp1, MemOperand(src_ptr, c_char_size, PostIndex));
+ __ Strh(tmp1, MemOperand(dst_ptr, char_size, PostIndex));
+ __ Subs(num_chr, num_chr, Operand(1));
+ __ B(gt, &compressed_string_loop);
+ }
__ Bind(&done);
}
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index e61aba0..f41e4d9 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -1401,23 +1401,39 @@
__ cmpl(str, arg);
__ j(kEqual, &return_true);
- // Load length of receiver string.
+ // Load length and compression flag of receiver string.
__ movl(ecx, Address(str, count_offset));
- // Check if lengths are equal, return false if they're not.
+ // Check if lengths and compression flags are equal, return false if they're not.
+ // Two identical strings will always have same compression style since
+ // compression style is decided on alloc.
__ cmpl(ecx, Address(arg, count_offset));
__ j(kNotEqual, &return_false);
- // Return true if both strings are empty.
- __ jecxz(&return_true);
+ if (mirror::kUseStringCompression) {
+ NearLabel string_uncompressed;
+ // Differ cases into both compressed or both uncompressed. Different compression style
+ // is cut above.
+ __ cmpl(ecx, Immediate(0));
+ __ j(kGreaterEqual, &string_uncompressed);
+ // Divide string length by 2, rounding up, and continue as if uncompressed.
+ // Merge clearing the compression flag (+0x80000000) with +1 for rounding.
+ __ addl(ecx, Immediate(0x80000001));
+ __ shrl(ecx, Immediate(1));
+ __ Bind(&string_uncompressed);
+ }
+ // Return true if strings are empty.
+ __ jecxz(&return_true);
// Load starting addresses of string values into ESI/EDI as required for repe_cmpsl instruction.
__ leal(esi, Address(str, value_offset));
__ leal(edi, Address(arg, value_offset));
- // Divide string length by 2 to compare characters 2 at a time and adjust for odd lengths.
+ // Divide string length by 2 to compare characters 2 at a time and adjust for lengths not
+ // divisible by 2.
__ addl(ecx, Immediate(1));
__ shrl(ecx, Immediate(1));
- // Assertions that must hold in order to compare strings 2 characters at a time.
+ // Assertions that must hold in order to compare strings 2 characters (uncompressed)
+ // or 4 characters (compressed) at a time.
DCHECK_ALIGNED(value_offset, 4);
static_assert(IsAligned<4>(kObjectAlignment), "String of odd length is not zero padded");
@@ -1461,6 +1477,10 @@
locations->AddTemp(Location::RegisterLocation(ECX));
// Need another temporary to be able to compute the result.
locations->AddTemp(Location::RequiresRegister());
+ if (mirror::kUseStringCompression) {
+ // Need another temporary to be able to save unflagged string length.
+ locations->AddTemp(Location::RequiresRegister());
+ }
}
static void GenerateStringIndexOf(HInvoke* invoke,
@@ -1478,6 +1498,8 @@
Register counter = locations->GetTemp(0).AsRegister<Register>();
Register string_length = locations->GetTemp(1).AsRegister<Register>();
Register out = locations->Out().AsRegister<Register>();
+ // Only used when string compression feature is on.
+ Register string_length_flagged;
// Check our assumptions for registers.
DCHECK_EQ(string_obj, EDI);
@@ -1515,6 +1537,12 @@
// Load string length, i.e., the count field of the string.
__ movl(string_length, Address(string_obj, count_offset));
+ if (mirror::kUseStringCompression) {
+ string_length_flagged = locations->GetTemp(2).AsRegister<Register>();
+ __ movl(string_length_flagged, string_length);
+ // Mask out first bit used as compression flag.
+ __ andl(string_length, Immediate(INT32_MAX));
+ }
// Do a zero-length check.
// TODO: Support jecxz.
@@ -1540,20 +1568,50 @@
__ cmpl(start_index, Immediate(0));
__ cmovl(kGreater, counter, start_index);
- // Move to the start of the string: string_obj + value_offset + 2 * start_index.
- __ leal(string_obj, Address(string_obj, counter, ScaleFactor::TIMES_2, value_offset));
+ if (mirror::kUseStringCompression) {
+ NearLabel modify_counter, offset_uncompressed_label;
+ __ cmpl(string_length_flagged, Immediate(0));
+ __ j(kGreaterEqual, &offset_uncompressed_label);
+ // Move to the start of the string: string_obj + value_offset + start_index.
+ __ leal(string_obj, Address(string_obj, counter, ScaleFactor::TIMES_1, value_offset));
+ __ jmp(&modify_counter);
- // Now update ecx (the repne scasw work counter). We have string.length - start_index left to
- // compare.
+ // Move to the start of the string: string_obj + value_offset + 2 * start_index.
+ __ Bind(&offset_uncompressed_label);
+ __ leal(string_obj, Address(string_obj, counter, ScaleFactor::TIMES_2, value_offset));
+
+ // Now update ecx (the repne scasw work counter). We have string.length - start_index left to
+ // compare.
+ __ Bind(&modify_counter);
+ } else {
+ __ leal(string_obj, Address(string_obj, counter, ScaleFactor::TIMES_2, value_offset));
+ }
__ negl(counter);
__ leal(counter, Address(string_length, counter, ScaleFactor::TIMES_1, 0));
}
- // Everything is set up for repne scasw:
- // * Comparison address in EDI.
- // * Counter in ECX.
- __ repne_scasw();
+ if (mirror::kUseStringCompression) {
+ NearLabel uncompressed_string_comparison;
+ NearLabel comparison_done;
+ __ cmpl(string_length_flagged, Immediate(0));
+ __ j(kGreater, &uncompressed_string_comparison);
+ // Check if EAX (search_value) is ASCII.
+ __ cmpl(search_value, Immediate(127));
+ __ j(kGreater, ¬_found_label);
+ // Comparing byte-per-byte.
+ __ repne_scasb();
+ __ jmp(&comparison_done);
+
+ // Everything is set up for repne scasw:
+ // * Comparison address in EDI.
+ // * Counter in ECX.
+ __ Bind(&uncompressed_string_comparison);
+ __ repne_scasw();
+ __ Bind(&comparison_done);
+ } else {
+ __ repne_scasw();
+ }
// Did we find a match?
__ j(kNotEqual, ¬_found_label);
@@ -1706,38 +1764,64 @@
const size_t char_size = Primitive::ComponentSize(Primitive::kPrimChar);
DCHECK_EQ(char_size, 2u);
- // Compute the address of the destination buffer.
- __ leal(EDI, Address(dst, dstBegin, ScaleFactor::TIMES_2, data_offset));
-
- // Compute the address of the source string.
- if (srcBegin.IsConstant()) {
- // Compute the address of the source string by adding the number of chars from
- // the source beginning to the value offset of a string.
- __ leal(ESI, Address(obj, srcBegin_value * char_size + value_offset));
- } else {
- __ leal(ESI, Address(obj, srcBegin.AsRegister<Register>(),
- ScaleFactor::TIMES_2, value_offset));
- }
-
// Compute the number of chars (words) to move.
- // Now is the time to save ECX, since we don't know if it will be used later.
+ // Save ECX, since we don't know if it will be used later.
__ pushl(ECX);
int stack_adjust = kX86WordSize;
__ cfi().AdjustCFAOffset(stack_adjust);
DCHECK_EQ(srcEnd, ECX);
if (srcBegin.IsConstant()) {
- if (srcBegin_value != 0) {
- __ subl(ECX, Immediate(srcBegin_value));
- }
+ __ subl(ECX, Immediate(srcBegin_value));
} else {
DCHECK(srcBegin.IsRegister());
__ subl(ECX, srcBegin.AsRegister<Register>());
}
- // Do the move.
+ NearLabel done;
+ if (mirror::kUseStringCompression) {
+ // Location of count in string
+ const uint32_t count_offset = mirror::String::CountOffset().Uint32Value();
+ const size_t c_char_size = Primitive::ComponentSize(Primitive::kPrimByte);
+ DCHECK_EQ(c_char_size, 1u);
+ __ pushl(EAX);
+ __ cfi().AdjustCFAOffset(stack_adjust);
+
+ NearLabel copy_loop, copy_uncompressed;
+ __ cmpl(Address(obj, count_offset), Immediate(0));
+ __ j(kGreaterEqual, ©_uncompressed);
+ // Compute the address of the source string by adding the number of chars from
+ // the source beginning to the value offset of a string.
+ __ leal(ESI, CodeGeneratorX86::ArrayAddress(obj, srcBegin, TIMES_1, value_offset));
+
+ // Start the loop to copy String's value to Array of Char.
+ __ leal(EDI, Address(dst, dstBegin, ScaleFactor::TIMES_2, data_offset));
+ __ Bind(©_loop);
+ __ jecxz(&done);
+ // Use EAX temporary (convert byte from ESI to word).
+ // TODO: Use LODSB/STOSW (not supported by X86Assembler) with AH initialized to 0.
+ __ movzxb(EAX, Address(ESI, 0));
+ __ movw(Address(EDI, 0), EAX);
+ __ leal(EDI, Address(EDI, char_size));
+ __ leal(ESI, Address(ESI, c_char_size));
+ // TODO: Add support for LOOP to X86Assembler.
+ __ subl(ECX, Immediate(1));
+ __ jmp(©_loop);
+ __ Bind(©_uncompressed);
+ }
+
+ // Do the copy for uncompressed string.
+ // Compute the address of the destination buffer.
+ __ leal(EDI, Address(dst, dstBegin, ScaleFactor::TIMES_2, data_offset));
+ __ leal(ESI, CodeGeneratorX86::ArrayAddress(obj, srcBegin, TIMES_2, value_offset));
__ rep_movsw();
- // And restore ECX.
+ __ Bind(&done);
+ if (mirror::kUseStringCompression) {
+ // Restore EAX.
+ __ popl(EAX);
+ __ cfi().AdjustCFAOffset(-stack_adjust);
+ }
+ // Restore ECX.
__ popl(ECX);
__ cfi().AdjustCFAOffset(-stack_adjust);
}
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index 0f31fab..4b0afca 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -1568,14 +1568,27 @@
__ cmpl(str, arg);
__ j(kEqual, &return_true);
- // Load length of receiver string.
+ // Load length and compression flag of receiver string.
__ movl(rcx, Address(str, count_offset));
- // Check if lengths are equal, return false if they're not.
+ // Check if lengths and compressiond flags are equal, return false if they're not.
+ // Two identical strings will always have same compression style since
+ // compression style is decided on alloc.
__ cmpl(rcx, Address(arg, count_offset));
__ j(kNotEqual, &return_false);
+
+ if (mirror::kUseStringCompression) {
+ NearLabel string_uncompressed;
+ // Both string are compressed.
+ __ cmpl(rcx, Immediate(0));
+ __ j(kGreaterEqual, &string_uncompressed);
+ // Divide string length by 2, rounding up, and continue as if uncompressed.
+ // Merge clearing the compression flag with +1 for rounding.
+ __ addl(rcx, Immediate(static_cast<int32_t>(0x80000001)));
+ __ shrl(rcx, Immediate(1));
+ __ Bind(&string_uncompressed);
+ }
// Return true if both strings are empty.
__ jrcxz(&return_true);
-
// Load starting addresses of string values into RSI/RDI as required for repe_cmpsq instruction.
__ leal(rsi, Address(str, value_offset));
__ leal(rdi, Address(arg, value_offset));
@@ -1584,7 +1597,8 @@
__ addl(rcx, Immediate(3));
__ shrl(rcx, Immediate(2));
- // Assertions that must hold in order to compare strings 4 characters at a time.
+ // Assertions that must hold in order to compare strings 4 characters (uncompressed)
+ // or 8 characters (compressed) at a time.
DCHECK_ALIGNED(value_offset, 8);
static_assert(IsAligned<8>(kObjectAlignment), "String is not zero padded");
@@ -1674,7 +1688,8 @@
__ j(kAbove, slow_path->GetEntryLabel());
}
- // From here down, we know that we are looking for a char that fits in 16 bits.
+ // From here down, we know that we are looking for a char that fits in
+ // 16 bits (uncompressed) or 8 bits (compressed).
// Location of reference to data array within the String object.
int32_t value_offset = mirror::String::ValueOffset().Int32Value();
// Location of count within the String object.
@@ -1682,6 +1697,12 @@
// Load string length, i.e., the count field of the string.
__ movl(string_length, Address(string_obj, count_offset));
+ if (mirror::kUseStringCompression) {
+ // Use TMP to keep string_length_flagged.
+ __ movl(CpuRegister(TMP), string_length);
+ // Mask out first bit used as compression flag.
+ __ andl(string_length, Immediate(INT32_MAX));
+ }
// Do a length check.
// TODO: Support jecxz.
@@ -1692,7 +1713,6 @@
if (start_at_zero) {
// Number of chars to scan is the same as the string length.
__ movl(counter, string_length);
-
// Move to the start of the string.
__ addq(string_obj, Immediate(value_offset));
} else {
@@ -1707,19 +1727,44 @@
__ cmpl(start_index, Immediate(0));
__ cmov(kGreater, counter, start_index, /* is64bit */ false); // 32-bit copy is enough.
- // Move to the start of the string: string_obj + value_offset + 2 * start_index.
- __ leaq(string_obj, Address(string_obj, counter, ScaleFactor::TIMES_2, value_offset));
-
+ if (mirror::kUseStringCompression) {
+ NearLabel modify_counter, offset_uncompressed_label;
+ __ cmpl(CpuRegister(TMP), Immediate(0));
+ __ j(kGreaterEqual, &offset_uncompressed_label);
+ __ leaq(string_obj, Address(string_obj, counter, ScaleFactor::TIMES_1, value_offset));
+ __ jmp(&modify_counter);
+ // Move to the start of the string: string_obj + value_offset + 2 * start_index.
+ __ Bind(&offset_uncompressed_label);
+ __ leaq(string_obj, Address(string_obj, counter, ScaleFactor::TIMES_2, value_offset));
+ __ Bind(&modify_counter);
+ } else {
+ __ leaq(string_obj, Address(string_obj, counter, ScaleFactor::TIMES_2, value_offset));
+ }
// Now update ecx, the work counter: it's gonna be string.length - start_index.
__ negq(counter); // Needs to be 64-bit negation, as the address computation is 64-bit.
__ leaq(counter, Address(string_length, counter, ScaleFactor::TIMES_1, 0));
}
- // Everything is set up for repne scasw:
- // * Comparison address in RDI.
- // * Counter in ECX.
- __ repne_scasw();
-
+ if (mirror::kUseStringCompression) {
+ NearLabel uncompressed_string_comparison;
+ NearLabel comparison_done;
+ __ cmpl(CpuRegister(TMP), Immediate(0));
+ __ j(kGreater, &uncompressed_string_comparison);
+ // Check if RAX (search_value) is ASCII.
+ __ cmpl(search_value, Immediate(127));
+ __ j(kGreater, ¬_found_label);
+ // Comparing byte-per-byte.
+ __ repne_scasb();
+ __ jmp(&comparison_done);
+ // Everything is set up for repne scasw:
+ // * Comparison address in RDI.
+ // * Counter in ECX.
+ __ Bind(&uncompressed_string_comparison);
+ __ repne_scasw();
+ __ Bind(&comparison_done);
+ } else {
+ __ repne_scasw();
+ }
// Did we find a match?
__ j(kNotEqual, ¬_found_label);
@@ -1871,32 +1916,54 @@
const size_t char_size = Primitive::ComponentSize(Primitive::kPrimChar);
DCHECK_EQ(char_size, 2u);
- // Compute the address of the destination buffer.
- __ leaq(CpuRegister(RDI), Address(dst, dstBegin, ScaleFactor::TIMES_2, data_offset));
-
- // Compute the address of the source string.
- if (srcBegin.IsConstant()) {
- // Compute the address of the source string by adding the number of chars from
- // the source beginning to the value offset of a string.
- __ leaq(CpuRegister(RSI), Address(obj, srcBegin_value * char_size + value_offset));
- } else {
- __ leaq(CpuRegister(RSI), Address(obj, srcBegin.AsRegister<CpuRegister>(),
- ScaleFactor::TIMES_2, value_offset));
- }
-
+ NearLabel done;
// Compute the number of chars (words) to move.
__ movl(CpuRegister(RCX), srcEnd);
if (srcBegin.IsConstant()) {
- if (srcBegin_value != 0) {
- __ subl(CpuRegister(RCX), Immediate(srcBegin_value));
- }
+ __ subl(CpuRegister(RCX), Immediate(srcBegin_value));
} else {
DCHECK(srcBegin.IsRegister());
__ subl(CpuRegister(RCX), srcBegin.AsRegister<CpuRegister>());
}
+ if (mirror::kUseStringCompression) {
+ NearLabel copy_uncompressed, copy_loop;
+ const size_t c_char_size = Primitive::ComponentSize(Primitive::kPrimByte);
+ DCHECK_EQ(c_char_size, 1u);
+ // Location of count in string.
+ const uint32_t count_offset = mirror::String::CountOffset().Uint32Value();
+ __ cmpl(Address(obj, count_offset), Immediate(0));
+ __ j(kGreaterEqual, ©_uncompressed);
+ // Compute the address of the source string by adding the number of chars from
+ // the source beginning to the value offset of a string.
+ __ leaq(CpuRegister(RSI),
+ CodeGeneratorX86_64::ArrayAddress(obj, srcBegin, TIMES_1, value_offset));
+ // Start the loop to copy String's value to Array of Char.
+ __ leaq(CpuRegister(RDI), Address(dst, dstBegin, ScaleFactor::TIMES_2, data_offset));
+
+ __ Bind(©_loop);
+ __ jrcxz(&done);
+ // Use TMP as temporary (convert byte from RSI to word).
+ // TODO: Selecting RAX as the temporary and using LODSB/STOSW.
+ __ movzxb(CpuRegister(TMP), Address(CpuRegister(RSI), 0));
+ __ movw(Address(CpuRegister(RDI), 0), CpuRegister(TMP));
+ __ leaq(CpuRegister(RDI), Address(CpuRegister(RDI), char_size));
+ __ leaq(CpuRegister(RSI), Address(CpuRegister(RSI), c_char_size));
+ // TODO: Add support for LOOP to X86_64Assembler.
+ __ subl(CpuRegister(RCX), Immediate(1));
+ __ jmp(©_loop);
+
+ __ Bind(©_uncompressed);
+ }
+
+ __ leaq(CpuRegister(RSI),
+ CodeGeneratorX86_64::ArrayAddress(obj, srcBegin, TIMES_2, value_offset));
+ // Compute the address of the destination buffer.
+ __ leaq(CpuRegister(RDI), Address(dst, dstBegin, ScaleFactor::TIMES_2, data_offset));
// Do the move.
__ rep_movsw();
+
+ __ Bind(&done);
}
static void GenPeek(LocationSummary* locations, Primitive::Type size, X86_64Assembler* assembler) {
diff --git a/dex2oat/Android.bp b/dex2oat/Android.bp
index 931bbd3..05a5d0f 100644
--- a/dex2oat/Android.bp
+++ b/dex2oat/Android.bp
@@ -31,6 +31,7 @@
// SANITIZE_TARGET mode.
// Bug: 22233158
address: false,
+ coverage: false,
},
},
},
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index f3f418d..be5224b 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -45,6 +45,7 @@
#include "image-inl.h"
#include "imtable-inl.h"
#include "indenter.h"
+#include "interpreter/unstarted_runtime.h"
#include "linker/buffered_output_stream.h"
#include "linker/file_output_stream.h"
#include "mirror/array-inl.h"
@@ -2445,39 +2446,55 @@
return EXIT_SUCCESS;
}
-static int DumpOatWithRuntime(Runtime* runtime, OatFile* oat_file, OatDumperOptions* options,
- std::ostream* os) {
- CHECK(runtime != nullptr && oat_file != nullptr && options != nullptr);
-
+static jobject InstallOatFile(Runtime* runtime,
+ std::unique_ptr<OatFile> oat_file,
+ std::vector<const DexFile*>* class_path)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
Thread* self = Thread::Current();
CHECK(self != nullptr);
// Need well-known-classes.
WellKnownClasses::Init(self->GetJniEnv());
// Need to register dex files to get a working dex cache.
- ScopedObjectAccess soa(self);
+ OatFile* oat_file_ptr = oat_file.get();
ClassLinker* class_linker = runtime->GetClassLinker();
- runtime->GetOatFileManager().RegisterOatFile(std::unique_ptr<const OatFile>(oat_file));
- std::vector<const DexFile*> class_path;
- for (const OatFile::OatDexFile* odf : oat_file->GetOatDexFiles()) {
+ runtime->GetOatFileManager().RegisterOatFile(std::move(oat_file));
+ for (const OatFile::OatDexFile* odf : oat_file_ptr->GetOatDexFiles()) {
std::string error_msg;
const DexFile* const dex_file = OpenDexFile(odf, &error_msg);
CHECK(dex_file != nullptr) << error_msg;
class_linker->RegisterDexFile(*dex_file, nullptr);
- class_path.push_back(dex_file);
+ class_path->push_back(dex_file);
}
- // Need a class loader.
- // Fake that we're a compiler.
- jobject class_loader = class_linker->CreatePathClassLoader(self, class_path);
+ // Need a class loader. Fake that we're a compiler.
+ // Note: this will run initializers through the unstarted runtime, so make sure it's
+ // initialized.
+ interpreter::UnstartedRuntime::Initialize();
+
+ jobject class_loader = class_linker->CreatePathClassLoader(self, *class_path);
+
+ return class_loader;
+}
+
+static int DumpOatWithRuntime(Runtime* runtime,
+ std::unique_ptr<OatFile> oat_file,
+ OatDumperOptions* options,
+ std::ostream* os) {
+ CHECK(runtime != nullptr && oat_file != nullptr && options != nullptr);
+ ScopedObjectAccess soa(Thread::Current());
+
+ OatFile* oat_file_ptr = oat_file.get();
+ std::vector<const DexFile*> class_path;
+ jobject class_loader = InstallOatFile(runtime, std::move(oat_file), &class_path);
// Use the class loader while dumping.
- StackHandleScope<1> scope(self);
+ StackHandleScope<1> scope(soa.Self());
Handle<mirror::ClassLoader> loader_handle = scope.NewHandle(
soa.Decode<mirror::ClassLoader>(class_loader));
options->class_loader_ = &loader_handle;
- OatDumper oat_dumper(*oat_file, *options);
+ OatDumper oat_dumper(*oat_file_ptr, *options);
bool success = oat_dumper.Dump(*os);
return (success) ? EXIT_SUCCESS : EXIT_FAILURE;
}
@@ -2496,23 +2513,23 @@
static int DumpOat(Runtime* runtime, const char* oat_filename, OatDumperOptions* options,
std::ostream* os) {
std::string error_msg;
- OatFile* oat_file = OatFile::Open(oat_filename,
- oat_filename,
- nullptr,
- nullptr,
- false,
- /*low_4gb*/false,
- nullptr,
- &error_msg);
+ std::unique_ptr<OatFile> oat_file(OatFile::Open(oat_filename,
+ oat_filename,
+ nullptr,
+ nullptr,
+ false,
+ /*low_4gb*/false,
+ nullptr,
+ &error_msg));
if (oat_file == nullptr) {
fprintf(stderr, "Failed to open oat file from '%s': %s\n", oat_filename, error_msg.c_str());
return EXIT_FAILURE;
}
if (runtime != nullptr) {
- return DumpOatWithRuntime(runtime, oat_file, options, os);
+ return DumpOatWithRuntime(runtime, std::move(oat_file), options, os);
} else {
- return DumpOatWithoutRuntime(oat_file, options, os);
+ return DumpOatWithoutRuntime(oat_file.get(), options, os);
}
}
@@ -2551,7 +2568,56 @@
class IMTDumper {
public:
- static bool DumpImt(Runtime* runtime, const std::string& imt_file) {
+ static bool Dump(Runtime* runtime,
+ const std::string& imt_file,
+ bool dump_imt_stats,
+ const char* oat_filename) {
+ Thread* self = Thread::Current();
+
+ ScopedObjectAccess soa(self);
+ StackHandleScope<1> scope(self);
+ MutableHandle<mirror::ClassLoader> class_loader = scope.NewHandle<mirror::ClassLoader>(nullptr);
+ std::vector<const DexFile*> class_path;
+
+ if (oat_filename != nullptr) {
+ std::string error_msg;
+ std::unique_ptr<OatFile> oat_file(OatFile::Open(oat_filename,
+ oat_filename,
+ nullptr,
+ nullptr,
+ false,
+ /*low_4gb*/false,
+ nullptr,
+ &error_msg));
+ if (oat_file == nullptr) {
+ fprintf(stderr, "Failed to open oat file from '%s': %s\n", oat_filename, error_msg.c_str());
+ return false;
+ }
+
+ class_loader.Assign(soa.Decode<mirror::ClassLoader>(
+ InstallOatFile(runtime, std::move(oat_file), &class_path)));
+ } else {
+ class_loader.Assign(nullptr); // Boot classloader. Just here for explicit documentation.
+ class_path = runtime->GetClassLinker()->GetBootClassPath();
+ }
+
+ if (!imt_file.empty()) {
+ return DumpImt(runtime, imt_file, class_loader);
+ }
+
+ if (dump_imt_stats) {
+ return DumpImtStats(runtime, class_path, class_loader);
+ }
+
+ LOG(FATAL) << "Should not reach here";
+ UNREACHABLE();
+ }
+
+ private:
+ static bool DumpImt(Runtime* runtime,
+ const std::string& imt_file,
+ Handle<mirror::ClassLoader> h_class_loader)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
std::vector<std::string> lines = ReadCommentedInputFromFile(imt_file);
std::unordered_set<std::string> prepared;
@@ -2561,11 +2627,12 @@
// determine its IMT slot, and check the class' IMT.
size_t first_space = line.find(' ');
if (first_space == std::string::npos) {
- DumpIMTForClass(runtime, line, &prepared);
+ DumpIMTForClass(runtime, line, h_class_loader, &prepared);
} else {
DumpIMTForMethod(runtime,
line.substr(0, first_space),
line.substr(first_space + 1, std::string::npos),
+ h_class_loader,
&prepared);
}
std::cerr << std::endl;
@@ -2574,9 +2641,12 @@
return true;
}
- static bool DumpImtStats(Runtime* runtime, const std::vector<const DexFile*>& dex_files) {
- size_t wo_imt = 0;
- size_t w_imt = 0;
+ static bool DumpImtStats(Runtime* runtime,
+ const std::vector<const DexFile*>& dex_files,
+ Handle<mirror::ClassLoader> h_class_loader)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ size_t without_imt = 0;
+ size_t with_imt = 0;
std::map<size_t, size_t> histogram;
ClassLinker* class_linker = runtime->GetClassLinker();
@@ -2584,37 +2654,34 @@
std::unordered_set<std::string> prepared;
Thread* self = Thread::Current();
- ScopedObjectAccess soa(self);
StackHandleScope<1> scope(self);
MutableHandle<mirror::Class> h_klass(scope.NewHandle<mirror::Class>(nullptr));
for (const DexFile* dex_file : dex_files) {
for (uint32_t class_def_index = 0;
- class_def_index != dex_file->NumClassDefs();
- ++class_def_index) {
+ class_def_index != dex_file->NumClassDefs();
+ ++class_def_index) {
const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_index);
const char* descriptor = dex_file->GetClassDescriptor(class_def);
- h_klass.Assign(class_linker->FindClass(self,
- descriptor,
- ScopedNullHandle<mirror::ClassLoader>()));
+ h_klass.Assign(class_linker->FindClass(self, descriptor, h_class_loader));
if (h_klass.Get() == nullptr) {
std::cerr << "Warning: could not load " << descriptor << std::endl;
continue;
}
if (HasNoIMT(runtime, h_klass, pointer_size, &prepared)) {
- wo_imt++;
+ without_imt++;
continue;
}
ImTable* im_table = PrepareAndGetImTable(runtime, h_klass, pointer_size, &prepared);
if (im_table == nullptr) {
// Should not happen, but accept.
- wo_imt++;
+ without_imt++;
continue;
}
- w_imt++;
+ with_imt++;
for (size_t imt_index = 0; imt_index != ImTable::kSize; ++imt_index) {
ArtMethod* ptr = im_table->Get(imt_index, pointer_size);
if (ptr->IsRuntimeMethod()) {
@@ -2634,9 +2701,9 @@
std::cerr << "IMT stats:"
<< std::endl << std::endl;
- std::cerr << " " << w_imt << " classes with IMT."
+ std::cerr << " " << with_imt << " classes with IMT."
<< std::endl << std::endl;
- std::cerr << " " << wo_imt << " classes without IMT (or copy from Object)."
+ std::cerr << " " << without_imt << " classes without IMT (or copy from Object)."
<< std::endl << std::endl;
double sum_one = 0;
@@ -2659,8 +2726,7 @@
return true;
}
- private:
- // Check whether the given class has no IMT (or the one shared with java.lang.Object).
+ // Return whether the given class has no IMT (or the one shared with java.lang.Object).
static bool HasNoIMT(Runtime* runtime,
Handle<mirror::Class> klass,
const PointerSize pointer_size,
@@ -2705,6 +2771,7 @@
static ImTable* PrepareAndGetImTable(Runtime* runtime,
Thread* self,
+ Handle<mirror::ClassLoader> h_loader,
const std::string& class_name,
const PointerSize pointer_size,
mirror::Class** klass_out,
@@ -2721,10 +2788,7 @@
descriptor = DotToDescriptor(class_name.c_str());
}
- ScopedNullHandle<mirror::ClassLoader> null_handle;
-
- mirror::Class* klass =
- runtime->GetClassLinker()->FindClass(self, descriptor.c_str(), null_handle);
+ mirror::Class* klass = runtime->GetClassLinker()->FindClass(self, descriptor.c_str(), h_loader);
if (klass == nullptr) {
self->ClearException();
@@ -2752,13 +2816,18 @@
static void DumpIMTForClass(Runtime* runtime,
const std::string& class_name,
- std::unordered_set<std::string>* prepared) {
- Thread* self = Thread::Current();
- ScopedObjectAccess soa(self);
-
+ Handle<mirror::ClassLoader> h_loader,
+ std::unordered_set<std::string>* prepared)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const PointerSize pointer_size = runtime->GetClassLinker()->GetImagePointerSize();
mirror::Class* klass;
- ImTable* imt = PrepareAndGetImTable(runtime, self, class_name, pointer_size, &klass, prepared);
+ ImTable* imt = PrepareAndGetImTable(runtime,
+ Thread::Current(),
+ h_loader,
+ class_name,
+ pointer_size,
+ &klass,
+ prepared);
if (imt == nullptr) {
return;
}
@@ -2801,14 +2870,14 @@
static void DumpIMTForMethod(Runtime* runtime,
const std::string& class_name,
const std::string& method,
- std::unordered_set<std::string>* prepared) {
- Thread* self = Thread::Current();
- ScopedObjectAccess soa(self);
-
+ Handle<mirror::ClassLoader> h_loader,
+ std::unordered_set<std::string>* prepared)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const PointerSize pointer_size = runtime->GetClassLinker()->GetImagePointerSize();
mirror::Class* klass;
ImTable* imt = PrepareAndGetImTable(runtime,
- self,
+ Thread::Current(),
+ h_loader,
class_name,
pointer_size,
&klass,
@@ -3087,9 +3156,9 @@
"\n"
" --dump-imt=<file.txt>: output IMT collisions (if any) for the given receiver\n"
" types and interface methods in the given file. The file\n"
- " is read line-wise, and each line should either be a class\n"
+ " is read line-wise, where each line should either be a class\n"
" name or descriptor, or a class name/descriptor and a prefix\n"
- " of a complete method name.\n"
+ " of a complete method name (separated by a whitespace).\n"
" Example: --dump-imt=imt.txt\n"
"\n"
" --dump-imt-stats: output IMT statistics for the given boot image\n"
@@ -3173,12 +3242,11 @@
virtual bool ExecuteWithRuntime(Runtime* runtime) {
CHECK(args_ != nullptr);
- if (!args_->imt_dump_.empty()) {
- return IMTDumper::DumpImt(runtime, args_->imt_dump_);
- }
-
- if (args_->imt_stat_dump_) {
- return IMTDumper::DumpImtStats(runtime, runtime->GetClassLinker()->GetBootClassPath());
+ if (!args_->imt_dump_.empty() || args_->imt_stat_dump_) {
+ return IMTDumper::Dump(runtime,
+ args_->imt_dump_,
+ args_->imt_stat_dump_,
+ args_->oat_filename_);
}
if (args_->oat_filename_ != nullptr) {
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index 5d53062..cdb4c25 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -1710,7 +1710,11 @@
.cfi_rel_offset lr, 12
ldr r3, [r0, #MIRROR_STRING_COUNT_OFFSET]
add r0, #MIRROR_STRING_VALUE_OFFSET
-
+#if (STRING_COMPRESSION_FEATURE)
+ /* r4 count (with flag) and r3 holds actual length */
+ mov r4, r3
+ bic r3, #2147483648
+#endif
/* Clamp start to [0..count] */
cmp r2, #0
it lt
@@ -1723,6 +1727,10 @@
mov r12, r0
/* Build pointer to start of data to compare and pre-bias */
+#if (STRING_COMPRESSION_FEATURE)
+ cmp r4, #0
+ blt .Lstring_indexof_compressed
+#endif
add r0, r0, r2, lsl #1
sub r0, #2
@@ -1734,6 +1742,7 @@
* r0: start of data to test
* r1: char to compare
* r2: iteration count
+ * r4: compression style (used temporarily)
* r12: original start of string data
* r3, r4, r10, r11 available for loading string data
*/
@@ -1791,6 +1800,22 @@
sub r0, r12
asr r0, r0, #1
pop {r4, r10-r11, pc}
+#if (STRING_COMPRESSION_FEATURE)
+.Lstring_indexof_compressed:
+ add r0, r0, r2
+ sub r0, #1
+ sub r2, r3, r2
+.Lstring_indexof_compressed_loop:
+ subs r2, #1
+ blt .Lindexof_nomatch
+ ldrb r3, [r0, #1]!
+ cmp r3, r1
+ beq .Lstring_indexof_compressed_matched
+ b .Lstring_indexof_compressed_loop
+.Lstring_indexof_compressed_matched:
+ sub r0, r12
+ pop {r4, r10-r11, pc}
+#endif
END art_quick_indexof
/* Assembly routines used to handle ABI differences. */
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index eee949d..04a3cc6 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -2378,7 +2378,11 @@
ENTRY art_quick_indexof
ldr w3, [x0, #MIRROR_STRING_COUNT_OFFSET]
add x0, x0, #MIRROR_STRING_VALUE_OFFSET
-
+#if (STRING_COMPRESSION_FEATURE)
+ /* w4 holds count (with flag) and w3 holds actual length */
+ mov w4, w3
+ and w3, w3, #2147483647
+#endif
/* Clamp start to [0..count] */
cmp w2, #0
csel w2, wzr, w2, lt
@@ -2388,10 +2392,12 @@
/* Save a copy to compute result */
mov x5, x0
+#if (STRING_COMPRESSION_FEATURE)
+ tbnz w4, #31, .Lstring_indexof_compressed
+#endif
/* Build pointer to start of data to compare and pre-bias */
add x0, x0, x2, lsl #1
sub x0, x0, #2
-
/* Compute iteration count */
sub w2, w3, w2
@@ -2456,6 +2462,26 @@
sub x0, x0, x5
asr x0, x0, #1
ret
+#if (STRING_COMPRESSION_FEATURE)
+ /*
+ * Comparing compressed string character-per-character with
+ * input character
+ */
+.Lstring_indexof_compressed:
+ add x0, x0, x2
+ sub x0, x0, #1
+ sub w2, w3, w2
+.Lstring_indexof_compressed_loop:
+ subs w2, w2, #1
+ b.lt .Lindexof_nomatch
+ ldrb w6, [x0, #1]!
+ cmp w6, w1
+ b.eq .Lstring_indexof_compressed_matched
+ b .Lstring_indexof_compressed_loop
+.Lstring_indexof_compressed_matched:
+ sub x0, x0, x5
+ ret
+#endif
END art_quick_indexof
/*
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index 879d496..7bb59ef 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -1986,11 +1986,70 @@
mov MIRROR_STRING_COUNT_OFFSET(%ecx), %ebx
lea MIRROR_STRING_VALUE_OFFSET(%eax), %esi
lea MIRROR_STRING_VALUE_OFFSET(%ecx), %edi
+#if (STRING_COMPRESSION_FEATURE)
+ /* Differ cases */
+ cmpl LITERAL(0), %edx
+ jl .Lstring_compareto_this_is_compressed
+ cmpl LITERAL(0), %ebx
+ jl .Lstring_compareto_that_is_compressed
+ jmp .Lstring_compareto_both_not_compressed
+.Lstring_compareto_this_is_compressed:
+ andl LITERAL(0x7FFFFFFF), %edx
+ cmpl LITERAL(0), %ebx
+ jl .Lstring_compareto_both_compressed
+ /* If (this->IsCompressed() && that->IsCompressed() == false) */
+ mov %edx, %eax
+ subl %ebx, %eax
+ mov %edx, %ecx
+ cmovg %ebx, %ecx
+ /* Going into loop to compare each character */
+ jecxz .Lstring_compareto_keep_length // check loop counter (if 0, don't compare)
+.Lstring_compareto_loop_comparison_this_compressed:
+ movzbl (%esi), %edx // move *(this_cur_char) byte to long
+ movzwl (%edi), %ebx // move *(that_cur_char) word to long
+ addl LITERAL(1), %esi // ++this_cur_char (8-bit)
+ addl LITERAL(2), %edi // ++that_cur_char (16-bit)
+ subl %ebx, %edx
+ loope .Lstring_compareto_loop_comparison_this_compressed
+ cmovne %edx, %eax // return eax = *(this_cur_char) - *(that_cur_char)
+ jmp .Lstring_compareto_return
+.Lstring_compareto_that_is_compressed:
+ andl LITERAL(0x7FFFFFFF), %ebx
+ mov %edx, %eax
+ subl %ebx, %eax
+ mov %edx, %ecx
+ cmovg %ebx, %ecx
+ /* If (this->IsCompressed() == false && that->IsCompressed()) */
+ jecxz .Lstring_compareto_keep_length // check loop counter, if 0, don't compare
+.Lstring_compareto_loop_comparison_that_compressed:
+ movzwl (%esi), %edx // move *(this_cur_char) word to long
+ movzbl (%edi), %ebx // move *(that_cur_char) byte to long
+ addl LITERAL(2), %esi // ++this_cur_char (16-bit)
+ addl LITERAL(1), %edi // ++that_cur_char (8-bit)
+ subl %ebx, %edx
+ loope .Lstring_compareto_loop_comparison_that_compressed
+ cmovne %edx, %eax
+ jmp .Lstring_compareto_return // return eax = *(this_cur_char) - *(that_cur_char)
+.Lstring_compareto_both_compressed:
+ andl LITERAL(0x7FFFFFFF), %ebx
/* Calculate min length and count diff */
- mov %edx, %ecx
- mov %edx, %eax
- subl %ebx, %eax
- cmovg %ebx, %ecx
+ mov %edx, %ecx
+ mov %edx, %eax
+ subl %ebx, %eax
+ cmovg %ebx, %ecx
+ jecxz .Lstring_compareto_keep_length
+ repe cmpsb
+ je .Lstring_compareto_keep_length
+ movzbl -1(%esi), %eax // get last compared char from this string (8-bit)
+ movzbl -1(%edi), %ecx // get last compared char from comp string (8-bit)
+ jmp .Lstring_compareto_count_difference
+#endif // STRING_COMPRESSION_FEATURE
+.Lstring_compareto_both_not_compressed:
+ /* Calculate min length and count diff */
+ mov %edx, %ecx
+ mov %edx, %eax
+ subl %ebx, %eax
+ cmovg %ebx, %ecx
/*
* At this point we have:
* eax: value to return if first part of strings are equal
@@ -1998,18 +2057,15 @@
* esi: pointer to this string data
* edi: pointer to comp string data
*/
- jecxz .Lkeep_length
- repe cmpsw // find nonmatching chars in [%esi] and [%edi], up to length %ecx
- jne .Lnot_equal
-.Lkeep_length:
- POP edi // pop callee save reg
- POP esi // pop callee save reg
- ret
- .balign 16
-.Lnot_equal:
- movzwl -2(%esi), %eax // get last compared char from this string
- movzwl -2(%edi), %ecx // get last compared char from comp string
- subl %ecx, %eax // return the difference
+ jecxz .Lstring_compareto_keep_length
+ repe cmpsw // find nonmatching chars in [%esi] and [%edi], up to length %ecx
+ je .Lstring_compareto_keep_length
+ movzwl -2(%esi), %eax // get last compared char from this string (16-bit)
+ movzwl -2(%edi), %ecx // get last compared char from comp string (16-bit)
+.Lstring_compareto_count_difference:
+ subl %ecx, %eax
+.Lstring_compareto_keep_length:
+.Lstring_compareto_return:
POP edi // pop callee save reg
POP esi // pop callee save reg
ret
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index a11e402..54e52e5 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -2100,11 +2100,70 @@
/* Build pointers to the start of string data */
leal MIRROR_STRING_VALUE_OFFSET(%edi), %edi
leal MIRROR_STRING_VALUE_OFFSET(%esi), %esi
+#if (STRING_COMPRESSION_FEATURE)
+ /* Differ cases */
+ cmpl LITERAL(0), %r8d
+ jl .Lstring_compareto_this_is_compressed
+ cmpl LITERAL(0), %r9d
+ jl .Lstring_compareto_that_is_compressed
+ jmp .Lstring_compareto_both_not_compressed
+.Lstring_compareto_this_is_compressed:
+ andl LITERAL(0x7FFFFFFF), %r8d
+ cmpl LITERAL(0), %r9d
+ jl .Lstring_compareto_both_compressed
+ /* Comparison this (8-bit) and that (16-bit) */
+ mov %r8d, %eax
+ subl %r9d, %eax
+ mov %r8d, %ecx
+ cmovg %r9d, %ecx
+ /* Going into loop to compare each character */
+ jecxz .Lstring_compareto_keep_length // check loop counter (if 0 then stop)
+.Lstring_compareto_loop_comparison_this_compressed:
+ movzbl (%edi), %r8d // move *(this_cur_char) byte to long
+ movzwl (%esi), %r9d // move *(that_cur_char) word to long
+ addl LITERAL(1), %edi // ++this_cur_char (8-bit)
+ addl LITERAL(2), %esi // ++that_cur_char (16-bit)
+ subl %r9d, %r8d
+ loope .Lstring_compareto_loop_comparison_this_compressed
+ cmovne %r8d, %eax // return eax = *(this_cur_char) - *(that_cur_char)
+ ret
+.Lstring_compareto_that_is_compressed:
+ andl LITERAL(0x7FFFFFFF), %r9d
+ movl %r8d, %eax
+ subl %r9d, %eax
+ mov %r8d, %ecx
+ cmovg %r9d, %ecx
+ /* Comparison this (8-bit) and that (16-bit) */
+ jecxz .Lstring_compareto_keep_length // check loop counter (if 0, don't compare)
+.Lstring_compareto_loop_comparison_that_compressed:
+ movzwl (%edi), %r8d // move *(this_cur_char) word to long
+ movzbl (%esi), %r9d // move *(that_cur_chat) byte to long
+ addl LITERAL(2), %edi // ++this_cur_char (16-bit)
+ addl LITERAL(1), %esi // ++that_cur_char (8-bit)
+ subl %r9d, %r8d
+ loope .Lstring_compareto_loop_comparison_that_compressed
+ cmovne %r8d, %eax // return eax = *(this_cur_char) - *(that_cur_char)
+ ret
+.Lstring_compareto_both_compressed:
+ andl LITERAL(0x7FFFFFFF), %r9d
/* Calculate min length and count diff */
- movl %r8d, %ecx
- movl %r8d, %eax
- subl %r9d, %eax
- cmovg %r9d, %ecx
+ movl %r8d, %ecx
+ movl %r8d, %eax
+ subl %r9d, %eax
+ cmovg %r9d, %ecx
+ jecxz .Lstring_compareto_keep_length
+ repe cmpsb
+ je .Lstring_compareto_keep_length
+ movzbl -1(%edi), %eax // get last compared char from this string (8-bit)
+ movzbl -1(%esi), %ecx // get last compared char from comp string (8-bit)
+ jmp .Lstring_compareto_count_difference
+#endif // STRING_COMPRESSION_FEATURE
+.Lstring_compareto_both_not_compressed:
+ /* Calculate min length and count diff */
+ movl %r8d, %ecx
+ movl %r8d, %eax
+ subl %r9d, %eax
+ cmovg %r9d, %ecx
/*
* At this point we have:
* eax: value to return if first part of strings are equal
@@ -2112,16 +2171,14 @@
* esi: pointer to comp string data
* edi: pointer to this string data
*/
- jecxz .Lkeep_length
- repe cmpsw // find nonmatching chars in [%esi] and [%edi], up to length %ecx
- jne .Lnot_equal
-.Lkeep_length:
- ret
- .balign 16
-.Lnot_equal:
- movzwl -2(%edi), %eax // get last compared char from this string
- movzwl -2(%esi), %ecx // get last compared char from comp string
+ jecxz .Lstring_compareto_keep_length
+ repe cmpsw // find nonmatching chars in [%esi] and [%edi], up to length %ecx
+ je .Lstring_compareto_keep_length
+ movzwl -2(%edi), %eax // get last compared char from this string (16-bit)
+ movzwl -2(%esi), %ecx // get last compared char from comp string (16-bit)
+.Lstring_compareto_count_difference:
subl %ecx, %eax // return the difference
+.Lstring_compareto_keep_length:
ret
END_FUNCTION art_quick_string_compareto
diff --git a/runtime/asm_support.h b/runtime/asm_support.h
index b8f7272..567791e 100644
--- a/runtime/asm_support.h
+++ b/runtime/asm_support.h
@@ -240,7 +240,9 @@
#define MIRROR_STRING_VALUE_OFFSET (8 + MIRROR_OBJECT_HEADER_SIZE)
ADD_TEST_EQ(MIRROR_STRING_VALUE_OFFSET, art::mirror::String::ValueOffset().Int32Value())
-
+// String compression feature.
+#define STRING_COMPRESSION_FEATURE 0
+ADD_TEST_EQ(STRING_COMPRESSION_FEATURE, art::mirror::kUseStringCompression);
#if defined(__cplusplus)
} // End of CheckAsmSupportOffsets.
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 0a1f7a9..7dea614 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -1287,7 +1287,7 @@
const DexFile* const dex_file = dex_cache->GetDexFile();
const OatFile::OatDexFile* oat_dex_file = dex_file->GetOatDexFile();
if (oat_dex_file != nullptr && oat_dex_file->GetDexCacheArrays() != nullptr) {
- // If the oat file expects the dex cache arrays to be in the BSS, then allocate there and
+ // If the oat file expects the dex cache arrays to be in the BSS, then allocate there and
// copy over the arrays.
DCHECK(dex_file != nullptr);
size_t num_strings = mirror::DexCache::kDexCacheStringCacheSize;
@@ -1297,10 +1297,19 @@
const size_t num_types = dex_file->NumTypeIds();
const size_t num_methods = dex_file->NumMethodIds();
const size_t num_fields = dex_file->NumFieldIds();
+ size_t num_method_types = 0;
+ if (Runtime::Current()->IsMethodHandlesEnabled()) {
+ num_method_types = mirror::DexCache::kDexCacheMethodTypeCacheSize;
+ if (dex_file->NumProtoIds() < num_method_types) {
+ num_method_types = dex_file->NumProtoIds();
+ }
+ }
+
CHECK_EQ(num_strings, dex_cache->NumStrings());
CHECK_EQ(num_types, dex_cache->NumResolvedTypes());
CHECK_EQ(num_methods, dex_cache->NumResolvedMethods());
CHECK_EQ(num_fields, dex_cache->NumResolvedFields());
+ CHECK_EQ(num_method_types, dex_cache->NumResolvedMethodTypes());
DexCacheArraysLayout layout(image_pointer_size_, dex_file);
uint8_t* const raw_arrays = oat_dex_file->GetDexCacheArrays();
if (num_strings != 0u) {
@@ -1351,6 +1360,25 @@
std::copy_n(dex_cache->GetResolvedFields(), num_fields, fields);
dex_cache->SetResolvedFields(fields);
}
+ if (num_method_types != 0u) {
+ // NOTE: We currently (Sep 2016) do not resolve any method types at
+ // compile time, but plan to in the future. This code exists for the
+ // sake of completeness.
+ mirror::MethodTypeDexCacheType* const image_resolved_method_types =
+ dex_cache->GetResolvedMethodTypes();
+ mirror::MethodTypeDexCacheType* const method_types =
+ reinterpret_cast<mirror::MethodTypeDexCacheType*>(
+ raw_arrays + layout.MethodTypesOffset());
+ for (size_t j = 0; j < num_method_types; ++j) {
+ DCHECK_EQ(method_types[j].load(std::memory_order_relaxed).index, 0u);
+ DCHECK(method_types[j].load(std::memory_order_relaxed).object.IsNull());
+ method_types[j].store(
+ image_resolved_method_types[j].load(std::memory_order_relaxed),
+ std::memory_order_relaxed);
+ }
+
+ dex_cache->SetResolvedMethodTypes(method_types);
+ }
}
{
WriterMutexLock mu2(self, dex_lock_);
@@ -2104,6 +2132,7 @@
// Zero-initialized.
raw_arrays = reinterpret_cast<uint8_t*>(linear_alloc->Alloc(self, layout.Size()));
}
+
mirror::StringDexCacheType* strings = (dex_file.NumStringIds() == 0u) ? nullptr :
reinterpret_cast<mirror::StringDexCacheType*>(raw_arrays + layout.StringsOffset());
GcRoot<mirror::Class>* types = (dex_file.NumTypeIds() == 0u) ? nullptr :
@@ -2112,10 +2141,35 @@
reinterpret_cast<ArtMethod**>(raw_arrays + layout.MethodsOffset());
ArtField** fields = (dex_file.NumFieldIds() == 0u) ? nullptr :
reinterpret_cast<ArtField**>(raw_arrays + layout.FieldsOffset());
+
size_t num_strings = mirror::DexCache::kDexCacheStringCacheSize;
if (dex_file.NumStringIds() < num_strings) {
num_strings = dex_file.NumStringIds();
}
+
+ // Note that we allocate the method type dex caches regardless of this flag,
+ // and we make sure here that they're not used by the runtime. This is in the
+ // interest of simplicity and to avoid extensive compiler and layout class changes.
+ //
+ // If this needs to be mitigated in a production system running this code,
+ // DexCache::kDexCacheMethodTypeCacheSize can be set to zero.
+ const bool is_method_handles_enabled = Runtime::Current()->IsMethodHandlesEnabled();
+ mirror::MethodTypeDexCacheType* method_types = nullptr;
+ size_t num_method_types = 0;
+
+ if (is_method_handles_enabled) {
+ if (dex_file.NumProtoIds() < mirror::DexCache::kDexCacheMethodTypeCacheSize) {
+ num_method_types = dex_file.NumProtoIds();
+ } else {
+ num_method_types = mirror::DexCache::kDexCacheMethodTypeCacheSize;
+ }
+
+ if (num_method_types > 0) {
+ method_types = reinterpret_cast<mirror::MethodTypeDexCacheType*>(
+ raw_arrays + layout.MethodTypesOffset());
+ }
+ }
+
DCHECK_ALIGNED(raw_arrays, alignof(mirror::StringDexCacheType)) <<
"Expected raw_arrays to align to StringDexCacheType.";
DCHECK_ALIGNED(layout.StringsOffset(), alignof(mirror::StringDexCacheType)) <<
@@ -2139,10 +2193,17 @@
for (size_t i = 0; i < dex_file.NumFieldIds(); ++i) {
CHECK(mirror::DexCache::GetElementPtrSize(fields, i, image_pointer_size_) == nullptr);
}
+ for (size_t i = 0; i < num_method_types; ++i) {
+ CHECK_EQ(method_types[i].load(std::memory_order_relaxed).index, 0u);
+ CHECK(method_types[i].load(std::memory_order_relaxed).object.IsNull());
+ }
}
if (strings != nullptr) {
mirror::StringDexCachePair::Initialize(strings);
}
+ if (method_types != nullptr) {
+ mirror::MethodTypeDexCachePair::Initialize(method_types);
+ }
dex_cache->Init(&dex_file,
location.Get(),
strings,
@@ -2153,6 +2214,8 @@
dex_file.NumMethodIds(),
fields,
dex_file.NumFieldIds(),
+ method_types,
+ num_method_types,
image_pointer_size_);
return dex_cache.Get();
}
@@ -7935,6 +7998,68 @@
return resolved;
}
+mirror::MethodType* ClassLinker::ResolveMethodType(const DexFile& dex_file,
+ uint32_t proto_idx,
+ Handle<mirror::DexCache> dex_cache,
+ Handle<mirror::ClassLoader> class_loader) {
+ DCHECK(Runtime::Current()->IsMethodHandlesEnabled());
+ DCHECK(dex_cache.Get() != nullptr);
+
+ mirror::MethodType* resolved = dex_cache->GetResolvedMethodType(proto_idx);
+ if (resolved != nullptr) {
+ return resolved;
+ }
+
+ Thread* const self = Thread::Current();
+ StackHandleScope<4> hs(self);
+
+ // First resolve the return type.
+ const DexFile::ProtoId& proto_id = dex_file.GetProtoId(proto_idx);
+ Handle<mirror::Class> return_type(hs.NewHandle(
+ ResolveType(dex_file, proto_id.return_type_idx_, dex_cache, class_loader)));
+ if (return_type.Get() == nullptr) {
+ DCHECK(self->IsExceptionPending());
+ return nullptr;
+ }
+
+ // Then resolve the argument types.
+ //
+ // TODO: Is there a better way to figure out the number of method arguments
+ // other than by looking at the shorty ?
+ const size_t num_method_args = strlen(dex_file.StringDataByIdx(proto_id.shorty_idx_)) - 1;
+
+ mirror::Class* class_type = mirror::Class::GetJavaLangClass();
+ mirror::Class* array_of_class = FindArrayClass(self, &class_type);
+ Handle<mirror::ObjectArray<mirror::Class>> method_params(hs.NewHandle(
+ mirror::ObjectArray<mirror::Class>::Alloc(self, array_of_class, num_method_args)));
+ if (method_params.Get() == nullptr) {
+ DCHECK(self->IsExceptionPending());
+ return nullptr;
+ }
+
+ DexFileParameterIterator it(dex_file, proto_id);
+ int32_t i = 0;
+ MutableHandle<mirror::Class> param_class = hs.NewHandle<mirror::Class>(nullptr);
+ for (; it.HasNext(); it.Next()) {
+ const uint16_t type_idx = it.GetTypeIdx();
+ param_class.Assign(ResolveType(dex_file, type_idx, dex_cache, class_loader));
+ if (param_class.Get() == nullptr) {
+ DCHECK(self->IsExceptionPending());
+ return nullptr;
+ }
+
+ method_params->Set(i++, param_class.Get());
+ }
+
+ DCHECK(!it.HasNext());
+
+ Handle<mirror::MethodType> type = hs.NewHandle(
+ mirror::MethodType::Create(self, return_type, method_params));
+ dex_cache->SetResolvedMethodType(proto_idx, type.Get());
+
+ return type.Get();
+}
+
const char* ClassLinker::MethodShorty(uint32_t method_idx,
ArtMethod* referrer,
uint32_t* length) {
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 8f7051e..df7fb61 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -49,6 +49,7 @@
class ClassLoader;
class DexCache;
class DexCachePointerArray;
+ class DexCacheMethodHandlesTest_Open_Test;
class DexCacheTest_Open_Test;
class IfTable;
class MethodType;
@@ -362,6 +363,15 @@
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!dex_lock_, !Roles::uninterruptible_);
+ // Resolve a method type with a given ID from the DexFile, storing
+ // the result in the DexCache.
+ mirror::MethodType* ResolveMethodType(const DexFile& dex_file,
+ uint32_t proto_idx,
+ Handle<mirror::DexCache> dex_cache,
+ Handle<mirror::ClassLoader> class_loader)
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(!dex_lock_, !Roles::uninterruptible_);
+
// Get shorty from method index without resolution. Used to do handlerization.
const char* MethodShorty(uint32_t method_idx, ArtMethod* referrer, uint32_t* length)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -1181,6 +1191,7 @@
friend class JniCompilerTest; // for GetRuntimeQuickGenericJniStub
friend class JniInternalTest; // for GetRuntimeQuickGenericJniStub
ART_FRIEND_TEST(ClassLinkerTest, RegisterDexFileName); // for DexLock, and RegisterDexFileLocked
+ ART_FRIEND_TEST(mirror::DexCacheMethodHandlesTest, Open); // for AllocDexCache
ART_FRIEND_TEST(mirror::DexCacheTest, Open); // for AllocDexCache
DISALLOW_COPY_AND_ASSIGN(ClassLinker);
};
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index a5aa0d0..451b752 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -451,6 +451,14 @@
};
};
+class ClassLinkerMethodHandlesTest : public ClassLinkerTest {
+ protected:
+ virtual void SetUpRuntimeOptions(RuntimeOptions* options) OVERRIDE {
+ CommonRuntimeTest::SetUpRuntimeOptions(options);
+ options->push_back(std::make_pair("-Xexperimental:method-handles", nullptr));
+ }
+};
+
struct CheckOffset {
size_t cpp_offset;
const char* java_name;
@@ -650,10 +658,12 @@
addOffset(OFFSETOF_MEMBER(mirror::DexCache, dex_file_), "dexFile");
addOffset(OFFSETOF_MEMBER(mirror::DexCache, location_), "location");
addOffset(OFFSETOF_MEMBER(mirror::DexCache, num_resolved_fields_), "numResolvedFields");
+ addOffset(OFFSETOF_MEMBER(mirror::DexCache, num_resolved_method_types_), "numResolvedMethodTypes");
addOffset(OFFSETOF_MEMBER(mirror::DexCache, num_resolved_methods_), "numResolvedMethods");
addOffset(OFFSETOF_MEMBER(mirror::DexCache, num_resolved_types_), "numResolvedTypes");
addOffset(OFFSETOF_MEMBER(mirror::DexCache, num_strings_), "numStrings");
addOffset(OFFSETOF_MEMBER(mirror::DexCache, resolved_fields_), "resolvedFields");
+ addOffset(OFFSETOF_MEMBER(mirror::DexCache, resolved_method_types_), "resolvedMethodTypes");
addOffset(OFFSETOF_MEMBER(mirror::DexCache, resolved_methods_), "resolvedMethods");
addOffset(OFFSETOF_MEMBER(mirror::DexCache, resolved_types_), "resolvedTypes");
addOffset(OFFSETOF_MEMBER(mirror::DexCache, strings_), "strings");
@@ -1294,4 +1304,59 @@
}
}
+TEST_F(ClassLinkerMethodHandlesTest, TestResolveMethodTypes) {
+ ScopedObjectAccess soa(Thread::Current());
+
+ StackHandleScope<7> hs(soa.Self());
+
+ Handle<mirror::ClassLoader> class_loader(
+ hs.NewHandle(soa.Decode<mirror::ClassLoader>(LoadDex("MethodTypes"))));
+ Handle<mirror::Class> method_types(
+ hs.NewHandle(class_linker_->FindClass(soa.Self(), "LMethodTypes;", class_loader)));
+ class_linker_->EnsureInitialized(soa.Self(), method_types, true, true);
+
+ ArtMethod* method1 = method_types->FindVirtualMethod("method1",
+ "(Ljava/lang/String;)Ljava/lang/String;",
+ kRuntimePointerSize);
+
+ const DexFile& dex_file = *(method1->GetDexFile());
+ Handle<mirror::DexCache> dex_cache = hs.NewHandle(
+ class_linker_->FindDexCache(Thread::Current(), dex_file));
+
+ const DexFile::MethodId& method1_id = dex_file.GetMethodId(method1->GetDexMethodIndex());
+
+ // This is the MethodType corresponding to the prototype of
+ // String MethodTypes# method1(String).
+ // Its RType = Ljava/lang/String;
+ // Its PTypes = { Ljava/lang/String; }
+ Handle<mirror::MethodType> method1_type = hs.NewHandle(
+ class_linker_->ResolveMethodType(dex_file, method1_id.proto_idx_, dex_cache, class_loader));
+
+ // Assert that the method type was resolved successfully.
+ ASSERT_TRUE(method1_type.Get() != nullptr);
+
+ // Assert that the return type and the method arguments are as we expect.
+ Handle<mirror::Class> string_class(
+ hs.NewHandle(class_linker_->FindClass(soa.Self(), "Ljava/lang/String;", class_loader)));
+ ASSERT_EQ(string_class.Get(), method1_type->GetRType());
+ ASSERT_EQ(string_class.Get(), method1_type->GetPTypes()->Get(0));
+
+ // Resolve the method type again and assert that we get back the same value.
+ Handle<mirror::MethodType> method1_type2 = hs.NewHandle(
+ class_linker_->ResolveMethodType(dex_file, method1_id.proto_idx_, dex_cache, class_loader));
+ ASSERT_EQ(method1_type.Get(), method1_type2.Get());
+
+ // Resolve the MethodType associated with a different method signature
+ // and assert it's different.
+ ArtMethod* method2 = method_types->FindVirtualMethod(
+ "method2",
+ "(Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String;",
+ kRuntimePointerSize);
+ const DexFile::MethodId& method2_id = dex_file.GetMethodId(method2->GetDexMethodIndex());
+ Handle<mirror::MethodType> method2_type = hs.NewHandle(
+ class_linker_->ResolveMethodType(dex_file, method2_id.proto_idx_, dex_cache, class_loader));
+
+ ASSERT_TRUE(method1_type.Get() != method2_type.Get());
+}
+
} // namespace art
diff --git a/runtime/experimental_flags.h b/runtime/experimental_flags.h
index 54d2c35..5ddb9fa 100644
--- a/runtime/experimental_flags.h
+++ b/runtime/experimental_flags.h
@@ -28,6 +28,7 @@
kNone = 0x0000,
kAgents = 0x0001, // 0b00000001
kRuntimePlugins = 0x0002, // 0b00000010
+ kMethodHandles = 0x0004, // 0b00000100
};
constexpr ExperimentalFlags() : value_(0x0000) {}
@@ -74,6 +75,10 @@
stream << (started ? "|" : "") << "kRuntimePlugins";
started = true;
}
+ if (e & ExperimentalFlags::kMethodHandles) {
+ stream << (started ? "|" : "") << "kMethodHandles";
+ started = true;
+ }
if (!started) {
stream << "kNone";
}
diff --git a/runtime/jni_internal_test.cc b/runtime/jni_internal_test.cc
index 9bd6f6d..fbd670c 100644
--- a/runtime/jni_internal_test.cc
+++ b/runtime/jni_internal_test.cc
@@ -679,7 +679,12 @@
ASSERT_TRUE(env_->IsInstanceOf(o, c));
// ...whose fields haven't been initialized because
// we didn't call a constructor.
- ASSERT_EQ(0, env_->GetIntField(o, env_->GetFieldID(c, "count", "I")));
+ if (art::mirror::kUseStringCompression) {
+ // Zero-length string is compressed, so the length internally will be -(1 << 31).
+ ASSERT_EQ(-2147483648, env_->GetIntField(o, env_->GetFieldID(c, "count", "I")));
+ } else {
+ ASSERT_EQ(0, env_->GetIntField(o, env_->GetFieldID(c, "count", "I")));
+ }
}
TEST_F(JniInternalTest, GetVersion) {
diff --git a/runtime/mirror/dex_cache-inl.h b/runtime/mirror/dex_cache-inl.h
index 359462d..b388f65 100644
--- a/runtime/mirror/dex_cache-inl.h
+++ b/runtime/mirror/dex_cache-inl.h
@@ -25,6 +25,7 @@
#include "base/enums.h"
#include "base/logging.h"
#include "mirror/class.h"
+#include "mirror/method_type.h"
#include "runtime.h"
#include <atomic>
@@ -43,10 +44,7 @@
}
inline void DexCache::SetResolvedString(uint32_t string_idx, mirror::String* resolved) {
- DCHECK_LT(string_idx % NumStrings(), NumStrings());
- GetStrings()[string_idx % NumStrings()].store(
- StringDexCachePair(resolved, string_idx),
- std::memory_order_relaxed);
+ StringDexCachePair::Assign(GetStrings(), string_idx, resolved, NumStrings());
Runtime* const runtime = Runtime::Current();
if (UNLIKELY(runtime->IsActiveTransaction())) {
DCHECK(runtime->IsAotCompiler());
@@ -82,6 +80,23 @@
Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(this);
}
+inline MethodType* DexCache::GetResolvedMethodType(uint32_t proto_idx) {
+ DCHECK(Runtime::Current()->IsMethodHandlesEnabled());
+ DCHECK_LT(proto_idx, GetDexFile()->NumProtoIds());
+ return MethodTypeDexCachePair::Lookup(
+ GetResolvedMethodTypes(), proto_idx, NumResolvedMethodTypes()).Read();
+}
+
+inline void DexCache::SetResolvedMethodType(uint32_t proto_idx, MethodType* resolved) {
+ DCHECK(Runtime::Current()->IsMethodHandlesEnabled());
+ DCHECK_LT(proto_idx, GetDexFile()->NumProtoIds());
+
+ MethodTypeDexCachePair::Assign(GetResolvedMethodTypes(), proto_idx, resolved,
+ NumResolvedMethodTypes());
+ // TODO: Fine-grained marking, so that we don't need to go through all arrays in full.
+ Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(this);
+}
+
inline ArtField* DexCache::GetResolvedField(uint32_t field_idx, PointerSize ptr_size) {
DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), ptr_size);
DCHECK_LT(field_idx, NumResolvedFields()); // NOTE: Unchecked, i.e. not throwing AIOOB.
diff --git a/runtime/mirror/dex_cache.cc b/runtime/mirror/dex_cache.cc
index cfcec9c..66f858c 100644
--- a/runtime/mirror/dex_cache.cc
+++ b/runtime/mirror/dex_cache.cc
@@ -41,6 +41,8 @@
uint32_t num_resolved_methods,
ArtField** resolved_fields,
uint32_t num_resolved_fields,
+ MethodTypeDexCacheType* resolved_method_types,
+ uint32_t num_resolved_method_types,
PointerSize pointer_size) {
CHECK(dex_file != nullptr);
CHECK(location != nullptr);
@@ -48,6 +50,7 @@
CHECK_EQ(num_resolved_types != 0u, resolved_types != nullptr);
CHECK_EQ(num_resolved_methods != 0u, resolved_methods != nullptr);
CHECK_EQ(num_resolved_fields != 0u, resolved_fields != nullptr);
+ CHECK_EQ(num_resolved_method_types != 0u, resolved_method_types != nullptr);
SetDexFile(dex_file);
SetLocation(location);
@@ -55,10 +58,12 @@
SetResolvedTypes(resolved_types);
SetResolvedMethods(resolved_methods);
SetResolvedFields(resolved_fields);
+ SetResolvedMethodTypes(resolved_method_types);
SetField32<false>(NumStringsOffset(), num_strings);
SetField32<false>(NumResolvedTypesOffset(), num_resolved_types);
SetField32<false>(NumResolvedMethodsOffset(), num_resolved_methods);
SetField32<false>(NumResolvedFieldsOffset(), num_resolved_fields);
+ SetField32<false>(NumResolvedMethodTypesOffset(), num_resolved_method_types);
Runtime* const runtime = Runtime::Current();
if (runtime->HasResolutionMethod()) {
diff --git a/runtime/mirror/dex_cache.h b/runtime/mirror/dex_cache.h
index d81dedc..2fcabb5 100644
--- a/runtime/mirror/dex_cache.h
+++ b/runtime/mirror/dex_cache.h
@@ -33,6 +33,7 @@
namespace mirror {
+class MethodType;
class String;
template <typename T> struct PACKED(8) DexCachePair {
@@ -82,6 +83,15 @@
return element.object;
}
+ static void Assign(std::atomic<DexCachePair<T>>* dex_cache,
+ uint32_t idx,
+ T* object,
+ uint32_t cache_size) {
+ DCHECK_LT(idx % cache_size, cache_size);
+ dex_cache[idx % cache_size].store(
+ DexCachePair<T>(object, idx), std::memory_order_relaxed);
+ }
+
static uint32_t InvalidIndexForSlot(uint32_t slot) {
// Since the cache size is a power of two, 0 will always map to slot 0.
// Use 1 for slot 0 and 0 for all other slots.
@@ -92,6 +102,9 @@
using StringDexCachePair = DexCachePair<mirror::String>;
using StringDexCacheType = std::atomic<StringDexCachePair>;
+using MethodTypeDexCachePair = DexCachePair<mirror::MethodType>;
+using MethodTypeDexCacheType = std::atomic<MethodTypeDexCachePair>;
+
// C++ mirror of java.lang.DexCache.
class MANAGED DexCache FINAL : public Object {
public:
@@ -103,10 +116,20 @@
static_assert(IsPowerOfTwo(kDexCacheStringCacheSize),
"String dex cache size is not a power of 2.");
+ // Size of method type dex cache. Needs to be a power of 2 for entrypoint assumptions
+ // to hold.
+ static constexpr size_t kDexCacheMethodTypeCacheSize = 1024;
+ static_assert(IsPowerOfTwo(kDexCacheMethodTypeCacheSize),
+ "MethodType dex cache size is not a power of 2.");
+
static constexpr size_t StaticStringSize() {
return kDexCacheStringCacheSize;
}
+ static constexpr size_t StaticMethodTypeSize() {
+ return kDexCacheMethodTypeCacheSize;
+ }
+
// Size of an instance of java.lang.DexCache not including referenced values.
static constexpr uint32_t InstanceSize() {
return sizeof(DexCache);
@@ -122,6 +145,8 @@
uint32_t num_resolved_methods,
ArtField** resolved_fields,
uint32_t num_resolved_fields,
+ MethodTypeDexCacheType* resolved_methodtypes,
+ uint32_t num_resolved_methodtypes,
PointerSize pointer_size) REQUIRES_SHARED(Locks::mutator_lock_);
void Fixup(ArtMethod* trampoline, PointerSize pointer_size)
@@ -159,6 +184,10 @@
return OFFSET_OF_OBJECT_MEMBER(DexCache, resolved_methods_);
}
+ static MemberOffset ResolvedMethodTypesOffset() {
+ return OFFSET_OF_OBJECT_MEMBER(DexCache, resolved_method_types_);
+ }
+
static MemberOffset NumStringsOffset() {
return OFFSET_OF_OBJECT_MEMBER(DexCache, num_strings_);
}
@@ -175,6 +204,10 @@
return OFFSET_OF_OBJECT_MEMBER(DexCache, num_resolved_methods_);
}
+ static MemberOffset NumResolvedMethodTypesOffset() {
+ return OFFSET_OF_OBJECT_MEMBER(DexCache, num_resolved_method_types_);
+ }
+
mirror::String* GetResolvedString(uint32_t string_idx) ALWAYS_INLINE
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -205,6 +238,10 @@
ALWAYS_INLINE void SetResolvedField(uint32_t idx, ArtField* field, PointerSize ptr_size)
REQUIRES_SHARED(Locks::mutator_lock_);
+ MethodType* GetResolvedMethodType(uint32_t proto_idx) REQUIRES_SHARED(Locks::mutator_lock_);
+
+ void SetResolvedMethodType(uint32_t proto_idx, MethodType* resolved) REQUIRES_SHARED(Locks::mutator_lock_);
+
StringDexCacheType* GetStrings() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
return GetFieldPtr64<StringDexCacheType*>(StringsOffset());
}
@@ -243,6 +280,17 @@
SetFieldPtr<false>(ResolvedFieldsOffset(), resolved_fields);
}
+ MethodTypeDexCacheType* GetResolvedMethodTypes()
+ ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
+ return GetFieldPtr<MethodTypeDexCacheType*>(ResolvedMethodTypesOffset());
+ }
+
+ void SetResolvedMethodTypes(MethodTypeDexCacheType* resolved_method_types)
+ ALWAYS_INLINE
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ SetFieldPtr<false>(ResolvedMethodTypesOffset(), resolved_method_types);
+ }
+
size_t NumStrings() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetField32(NumStringsOffset());
}
@@ -259,6 +307,10 @@
return GetField32(NumResolvedFieldsOffset());
}
+ size_t NumResolvedMethodTypes() REQUIRES_SHARED(Locks::mutator_lock_) {
+ return GetField32(NumResolvedMethodTypesOffset());
+ }
+
const DexFile* GetDexFile() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
return GetFieldPtr<const DexFile*>(OFFSET_OF_OBJECT_MEMBER(DexCache, dex_file_));
}
@@ -290,16 +342,20 @@
HeapReference<Object> dex_;
HeapReference<String> location_;
- uint64_t dex_file_; // const DexFile*
- uint64_t resolved_fields_; // ArtField*, array with num_resolved_fields_ elements.
- uint64_t resolved_methods_; // ArtMethod*, array with num_resolved_methods_ elements.
- uint64_t resolved_types_; // GcRoot<Class>*, array with num_resolved_types_ elements.
- uint64_t strings_; // std::atomic<StringDexCachePair>*,
- // array with num_strings_ elements.
- uint32_t num_resolved_fields_; // Number of elements in the resolved_fields_ array.
- uint32_t num_resolved_methods_; // Number of elements in the resolved_methods_ array.
- uint32_t num_resolved_types_; // Number of elements in the resolved_types_ array.
- uint32_t num_strings_; // Number of elements in the strings_ array.
+ uint64_t dex_file_; // const DexFile*
+ uint64_t resolved_fields_; // ArtField*, array with num_resolved_fields_ elements.
+ uint64_t resolved_method_types_; // std::atomic<MethodTypeDexCachePair>* array with
+ // num_resolved_method_types_ elements.
+ uint64_t resolved_methods_; // ArtMethod*, array with num_resolved_methods_ elements.
+ uint64_t resolved_types_; // GcRoot<Class>*, array with num_resolved_types_ elements.
+ uint64_t strings_; // std::atomic<StringDexCachePair>*, array with num_strings_
+ // elements.
+
+ uint32_t num_resolved_fields_; // Number of elements in the resolved_fields_ array.
+ uint32_t num_resolved_method_types_; // Number of elements in the resolved_method_types_ array.
+ uint32_t num_resolved_methods_; // Number of elements in the resolved_methods_ array.
+ uint32_t num_resolved_types_; // Number of elements in the resolved_types_ array.
+ uint32_t num_strings_; // Number of elements in the strings_ array.
friend struct art::DexCacheOffsets; // for verifying offset information
friend class Object; // For VisitReferences
diff --git a/runtime/mirror/dex_cache_test.cc b/runtime/mirror/dex_cache_test.cc
index ac04200..12301b8 100644
--- a/runtime/mirror/dex_cache_test.cc
+++ b/runtime/mirror/dex_cache_test.cc
@@ -31,6 +31,14 @@
class DexCacheTest : public CommonRuntimeTest {};
+class DexCacheMethodHandlesTest : public DexCacheTest {
+ protected:
+ virtual void SetUpRuntimeOptions(RuntimeOptions* options) OVERRIDE {
+ CommonRuntimeTest::SetUpRuntimeOptions(options);
+ options->push_back(std::make_pair("-Xexperimental:method-handles", nullptr));
+ }
+};
+
TEST_F(DexCacheTest, Open) {
ScopedObjectAccess soa(Thread::Current());
StackHandleScope<1> hs(soa.Self());
@@ -46,21 +54,35 @@
EXPECT_EQ(java_lang_dex_file_->NumTypeIds(), dex_cache->NumResolvedTypes());
EXPECT_EQ(java_lang_dex_file_->NumMethodIds(), dex_cache->NumResolvedMethods());
EXPECT_EQ(java_lang_dex_file_->NumFieldIds(), dex_cache->NumResolvedFields());
+ // This should always be zero because the -Xexperimental:method-handles isn't
+ // set.
+ EXPECT_EQ(0u, dex_cache->NumResolvedMethodTypes());
+}
+
+TEST_F(DexCacheMethodHandlesTest, Open) {
+ ScopedObjectAccess soa(Thread::Current());
+ StackHandleScope<1> hs(soa.Self());
+ ASSERT_TRUE(java_lang_dex_file_ != nullptr);
+ Handle<DexCache> dex_cache(
+ hs.NewHandle(class_linker_->AllocDexCache(soa.Self(),
+ *java_lang_dex_file_,
+ Runtime::Current()->GetLinearAlloc())));
+
+ EXPECT_TRUE(dex_cache->StaticMethodTypeSize() == dex_cache->NumResolvedMethodTypes()
+ || java_lang_dex_file_->NumProtoIds() == dex_cache->NumResolvedMethodTypes());
}
TEST_F(DexCacheTest, LinearAlloc) {
ScopedObjectAccess soa(Thread::Current());
jobject jclass_loader(LoadDex("Main"));
ASSERT_TRUE(jclass_loader != nullptr);
- Runtime* const runtime = Runtime::Current();
- ClassLinker* const class_linker = runtime->GetClassLinker();
StackHandleScope<1> hs(soa.Self());
Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
soa.Decode<mirror::ClassLoader>(jclass_loader)));
- mirror::Class* klass = class_linker->FindClass(soa.Self(), "LMain;", class_loader);
+ mirror::Class* klass = class_linker_->FindClass(soa.Self(), "LMain;", class_loader);
ASSERT_TRUE(klass != nullptr);
LinearAlloc* const linear_alloc = klass->GetClassLoader()->GetAllocator();
- EXPECT_NE(linear_alloc, runtime->GetLinearAlloc());
+ EXPECT_NE(linear_alloc, runtime_->GetLinearAlloc());
EXPECT_TRUE(linear_alloc->Contains(klass->GetDexCache()->GetResolvedMethods()));
}
@@ -68,16 +90,14 @@
ScopedObjectAccess soa(Thread::Current());
jobject jclass_loader(LoadDex("Packages"));
ASSERT_TRUE(jclass_loader != nullptr);
- Runtime* const runtime = Runtime::Current();
- ClassLinker* const class_linker = runtime->GetClassLinker();
StackHandleScope<3> hs(soa.Self());
Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
soa.Decode<mirror::ClassLoader>(jclass_loader)));
Handle<mirror::Class> klass1 =
- hs.NewHandle(class_linker->FindClass(soa.Self(), "Lpackage1/Package1;", class_loader));
+ hs.NewHandle(class_linker_->FindClass(soa.Self(), "Lpackage1/Package1;", class_loader));
ASSERT_TRUE(klass1.Get() != nullptr);
Handle<mirror::Class> klass2 =
- hs.NewHandle(class_linker->FindClass(soa.Self(), "Lpackage2/Package2;", class_loader));
+ hs.NewHandle(class_linker_->FindClass(soa.Self(), "Lpackage2/Package2;", class_loader));
ASSERT_TRUE(klass2.Get() != nullptr);
EXPECT_EQ(klass1->GetDexCache(), klass2->GetDexCache());
@@ -92,5 +112,60 @@
}
}
+TEST_F(DexCacheMethodHandlesTest, TestResolvedMethodTypes) {
+ ScopedObjectAccess soa(Thread::Current());
+ jobject jclass_loader(LoadDex("MethodTypes"));
+ ASSERT_TRUE(jclass_loader != nullptr);
+
+ StackHandleScope<5> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
+ soa.Decode<mirror::ClassLoader>(jclass_loader)));
+
+ Handle<mirror::Class> method_types(
+ hs.NewHandle(class_linker_->FindClass(soa.Self(), "LMethodTypes;", class_loader)));
+ class_linker_->EnsureInitialized(soa.Self(), method_types, true, true);
+
+ ArtMethod* method1 = method_types->FindVirtualMethod(
+ "method1",
+ "(Ljava/lang/String;)Ljava/lang/String;",
+ kRuntimePointerSize);
+ ArtMethod* method2 = method_types->FindVirtualMethod(
+ "method2",
+ "(Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String;",
+ kRuntimePointerSize);
+
+ const DexFile& dex_file = *(method1->GetDexFile());
+ Handle<mirror::DexCache> dex_cache = hs.NewHandle(
+ class_linker_->FindDexCache(Thread::Current(), dex_file));
+
+ const DexFile::MethodId& method1_id = dex_file.GetMethodId(method1->GetDexMethodIndex());
+ const DexFile::MethodId& method2_id = dex_file.GetMethodId(method2->GetDexMethodIndex());
+
+ Handle<mirror::MethodType> method1_type = hs.NewHandle(
+ class_linker_->ResolveMethodType(dex_file, method1_id.proto_idx_, dex_cache, class_loader));
+ Handle<mirror::MethodType> method2_type = hs.NewHandle(
+ class_linker_->ResolveMethodType(dex_file, method2_id.proto_idx_, dex_cache, class_loader));
+
+ EXPECT_EQ(method1_type.Get(), dex_cache->GetResolvedMethodType(method1_id.proto_idx_));
+ EXPECT_EQ(method2_type.Get(), dex_cache->GetResolvedMethodType(method2_id.proto_idx_));
+
+ // The MethodTypes dex file contains a single interface with two abstract
+ // methods. It must therefore contain precisely two method IDs.
+ ASSERT_EQ(2u, dex_file.NumProtoIds());
+ ASSERT_EQ(dex_file.NumProtoIds(), dex_cache->NumResolvedMethodTypes());
+ MethodTypeDexCacheType* method_types_cache = dex_cache->GetResolvedMethodTypes();
+
+ for (size_t i = 0; i < dex_file.NumProtoIds(); ++i) {
+ const MethodTypeDexCachePair pair = method_types_cache[i].load(std::memory_order_relaxed);
+ if (pair.index == method1_id.proto_idx_) {
+ ASSERT_EQ(method1_type.Get(), pair.object.Read());
+ } else if (pair.index == method2_id.proto_idx_) {
+ ASSERT_EQ(method2_type.Get(), pair.object.Read());
+ } else {
+ ASSERT_TRUE(false);
+ }
+ }
+}
+
} // namespace mirror
} // namespace art
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index 4f70b04..f937ca7 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -761,6 +761,8 @@
"(Enable new and experimental agent support)\n");
UsageMessage(stream, " -Xexperimental:agents"
"(Enable new and experimental agent support)\n");
+ UsageMessage(stream, " -Xexperimental:method-handles"
+ "(Enable new and experimental method handles support)\n");
UsageMessage(stream, "\n");
UsageMessage(stream, "The following previously supported Dalvik options are ignored:\n");
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 9e63564..30f1b4a 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -314,6 +314,10 @@
return "2.1.0";
}
+ bool IsMethodHandlesEnabled() const {
+ return experimental_flags_ & ExperimentalFlags::kMethodHandles;
+ }
+
void DisallowNewSystemWeaks() REQUIRES_SHARED(Locks::mutator_lock_);
void AllowNewSystemWeaks() REQUIRES_SHARED(Locks::mutator_lock_);
void BroadcastForNewSystemWeaks() REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/runtime_options.def b/runtime/runtime_options.def
index 146afc7..b01a570 100644
--- a/runtime/runtime_options.def
+++ b/runtime/runtime_options.def
@@ -117,7 +117,7 @@
RUNTIME_OPTIONS_KEY (Unit, NoDexFileFallback)
RUNTIME_OPTIONS_KEY (std::string, CpuAbiList)
RUNTIME_OPTIONS_KEY (std::string, Fingerprint)
-RUNTIME_OPTIONS_KEY (ExperimentalFlags, Experimental, ExperimentalFlags::kNone) // -Xexperimental:{none, agents}
+RUNTIME_OPTIONS_KEY (ExperimentalFlags, Experimental, ExperimentalFlags::kNone) // -Xexperimental:{none, agents, method-handles}
RUNTIME_OPTIONS_KEY (std::vector<ti::Agent>, AgentLib) // -agentlib:<libname>=<options>, Requires -Xexperimental:agents
RUNTIME_OPTIONS_KEY (std::vector<ti::Agent>, AgentPath) // -agentpath:<libname>=<options>, Requires -Xexperimental:agents
RUNTIME_OPTIONS_KEY (std::vector<Plugin>, Plugins) // -Xplugin:<library> Requires -Xexperimental:runtime-plugins
diff --git a/runtime/utils/dex_cache_arrays_layout-inl.h b/runtime/utils/dex_cache_arrays_layout-inl.h
index a85d033..5ccd446 100644
--- a/runtime/utils/dex_cache_arrays_layout-inl.h
+++ b/runtime/utils/dex_cache_arrays_layout-inl.h
@@ -38,8 +38,10 @@
RoundUp(methods_offset_ + MethodsSize(header.method_ids_size_), StringsAlignment())),
fields_offset_(
RoundUp(strings_offset_ + StringsSize(header.string_ids_size_), FieldsAlignment())),
+ method_types_offset_(
+ RoundUp(fields_offset_ + FieldsSize(header.field_ids_size_), Alignment())),
size_(
- RoundUp(fields_offset_ + FieldsSize(header.field_ids_size_), Alignment())) {
+ RoundUp(method_types_offset_ + MethodTypesSize(header.proto_ids_size_), Alignment())) {
}
inline DexCacheArraysLayout::DexCacheArraysLayout(PointerSize pointer_size, const DexFile* dex_file)
@@ -118,6 +120,27 @@
return static_cast<size_t>(pointer_size_);
}
+inline size_t DexCacheArraysLayout::MethodTypeOffset(uint32_t proto_idx) const {
+ return strings_offset_
+ + ElementOffset(PointerSize::k64,
+ proto_idx % mirror::DexCache::kDexCacheMethodTypeCacheSize);
+}
+
+inline size_t DexCacheArraysLayout::MethodTypesSize(size_t num_elements) const {
+ size_t cache_size = mirror::DexCache::kDexCacheMethodTypeCacheSize;
+ if (num_elements < cache_size) {
+ cache_size = num_elements;
+ }
+
+ return ArraySize(PointerSize::k64, cache_size);
+}
+
+inline size_t DexCacheArraysLayout::MethodTypesAlignment() const {
+ static_assert(alignof(mirror::MethodTypeDexCacheType) == 8,
+ "alignof(MethodTypeDexCacheType) != 8");
+ return alignof(mirror::MethodTypeDexCacheType);
+}
+
inline size_t DexCacheArraysLayout::ElementOffset(PointerSize element_size, uint32_t idx) {
return static_cast<size_t>(element_size) * idx;
}
diff --git a/runtime/utils/dex_cache_arrays_layout.h b/runtime/utils/dex_cache_arrays_layout.h
index 20ffa90..e222b46 100644
--- a/runtime/utils/dex_cache_arrays_layout.h
+++ b/runtime/utils/dex_cache_arrays_layout.h
@@ -35,6 +35,7 @@
methods_offset_(0u),
strings_offset_(0u),
fields_offset_(0u),
+ method_types_offset_(0u),
size_(0u) {
}
@@ -94,12 +95,23 @@
size_t FieldsAlignment() const;
+ size_t MethodTypesOffset() const {
+ return method_types_offset_;
+ }
+
+ size_t MethodTypeOffset(uint32_t method_type_idx) const;
+
+ size_t MethodTypesSize(size_t num_elements) const;
+
+ size_t MethodTypesAlignment() const;
+
private:
static constexpr size_t types_offset_ = 0u;
const PointerSize pointer_size_; // Must be first for construction initialization order.
const size_t methods_offset_;
const size_t strings_offset_;
const size_t fields_offset_;
+ const size_t method_types_offset_;
const size_t size_;
static size_t Alignment(PointerSize pointer_size);
diff --git a/test/MethodTypes/MethodTypes.java b/test/MethodTypes/MethodTypes.java
new file mode 100644
index 0000000..f6f8e08
--- /dev/null
+++ b/test/MethodTypes/MethodTypes.java
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public interface MethodTypes {
+ public String method1(String a);
+ public String method2(String a, String b);
+}
diff --git a/tools/dexfuzz/src/dexfuzz/DexFuzz.java b/tools/dexfuzz/src/dexfuzz/DexFuzz.java
index 04bbbf8..7337ffc 100644
--- a/tools/dexfuzz/src/dexfuzz/DexFuzz.java
+++ b/tools/dexfuzz/src/dexfuzz/DexFuzz.java
@@ -22,6 +22,7 @@
import dexfuzz.fuzzers.FuzzerSingleExecute;
import dexfuzz.fuzzers.FuzzerSingleNoExecute;
import dexfuzz.listeners.BaseListener;
+import dexfuzz.listeners.BisectionSearchListener;
import dexfuzz.listeners.ConsoleLoggerListener;
import dexfuzz.listeners.LogFileListener;
import dexfuzz.listeners.MultiplexerListener;
@@ -66,6 +67,10 @@
}
// Add the file logging listener.
multipleListener.addListener(new LogFileListener(Options.reportLogFile));
+ if (Options.runBisectionSearch) {
+ // Add the bisection search listener.
+ multipleListener.addListener(new BisectionSearchListener());
+ }
// Add the unique program tracker.
multipleListener.addListener(new UniqueProgramTrackerListener(Options.uniqueDatabaseFile));
listener = multipleListener;
diff --git a/tools/dexfuzz/src/dexfuzz/ExecutionResult.java b/tools/dexfuzz/src/dexfuzz/ExecutionResult.java
index 3a8c6cb..f85af1a 100644
--- a/tools/dexfuzz/src/dexfuzz/ExecutionResult.java
+++ b/tools/dexfuzz/src/dexfuzz/ExecutionResult.java
@@ -31,6 +31,7 @@
private String flattenedError;
private String flattenedErrorWithNewlines;
private String flattenedAll;
+ private String flattenedAllWithNewlines;
private static final int TIMEOUT_RETURN_VALUE = 124;
private static final int SIGABORT_RETURN_VALUE = 134;
@@ -101,6 +102,16 @@
return flattenedAll;
}
+ /**
+ * Get both the output and error, concatenated together, including newline characters.
+ */
+ public String getFlattenedAllWithNewlines() {
+ if (flattenedAllWithNewlines == null) {
+ flattenedAllWithNewlines = getFlattenedOutputWithNewlines() + getFlattenedErrorWithNewlines();
+ }
+ return flattenedAllWithNewlines;
+ }
+
public boolean isTimeout() {
return (returnValue == TIMEOUT_RETURN_VALUE);
}
diff --git a/tools/dexfuzz/src/dexfuzz/Options.java b/tools/dexfuzz/src/dexfuzz/Options.java
index 2e929c8..b442b22 100644
--- a/tools/dexfuzz/src/dexfuzz/Options.java
+++ b/tools/dexfuzz/src/dexfuzz/Options.java
@@ -78,6 +78,7 @@
public static boolean skipMutation;
public static boolean dumpMutations;
public static boolean loadMutations;
+ public static boolean runBisectionSearch;
/**
* Print out usage information about dexfuzz, and then exit.
@@ -138,6 +139,7 @@
Log.always(" --report-unique : Print out information about unique programs generated");
Log.always(" --unique-db=<file> : Use <file> store results about unique programs");
Log.always(" (Default: unique_progs.db)");
+ Log.always(" --bisection-search : Run bisection search for divergences");
Log.always("");
System.exit(0);
}
@@ -197,6 +199,8 @@
methodMutations = 1;
minMethods = 1;
maxMethods = 1;
+ } else if (flag.equals("bisection-search")) {
+ runBisectionSearch = true;
} else if (flag.equals("help")) {
usage();
} else {
diff --git a/tools/dexfuzz/src/dexfuzz/executors/Arm64InterpreterExecutor.java b/tools/dexfuzz/src/dexfuzz/executors/Arm64InterpreterExecutor.java
index 227c698..5546207 100644
--- a/tools/dexfuzz/src/dexfuzz/executors/Arm64InterpreterExecutor.java
+++ b/tools/dexfuzz/src/dexfuzz/executors/Arm64InterpreterExecutor.java
@@ -21,11 +21,12 @@
public class Arm64InterpreterExecutor extends Executor {
public Arm64InterpreterExecutor(BaseListener listener, Device device) {
- super("ARM64 Interpreter", 30, listener, Architecture.ARM64, device, false);
+ super("ARM64 Interpreter", 30, listener, Architecture.ARM64, device,
+ /*needsCleanCodeCache*/ false, /*isBisectable*/ false);
}
@Override
- public void execute(String programName) {
+ protected String constructCommand(String programName) {
StringBuilder commandBuilder = new StringBuilder();
commandBuilder.append("dalvikvm64 -Xint ");
if (device.noBootImageAvailable()) {
@@ -33,6 +34,6 @@
}
commandBuilder.append("-cp ").append(testLocation).append("/").append(programName).append(" ");
commandBuilder.append(executeClass);
- executionResult = executeCommandWithTimeout(commandBuilder.toString(), true);
+ return commandBuilder.toString();
}
}
diff --git a/tools/dexfuzz/src/dexfuzz/executors/Arm64OptimizingBackendExecutor.java b/tools/dexfuzz/src/dexfuzz/executors/Arm64OptimizingBackendExecutor.java
index bfa87b7..72e36e8 100644
--- a/tools/dexfuzz/src/dexfuzz/executors/Arm64OptimizingBackendExecutor.java
+++ b/tools/dexfuzz/src/dexfuzz/executors/Arm64OptimizingBackendExecutor.java
@@ -21,11 +21,12 @@
public class Arm64OptimizingBackendExecutor extends Executor {
public Arm64OptimizingBackendExecutor(BaseListener listener, Device device) {
- super("ARM64 Optimizing Backend", 5, listener, Architecture.ARM64, device, true);
+ super("ARM64 Optimizing Backend", 5, listener, Architecture.ARM64, device,
+ /*needsCleanCodeCache*/ true, /*isBisectable*/ true);
}
@Override
- public void execute(String programName) {
+ protected String constructCommand(String programName) {
StringBuilder commandBuilder = new StringBuilder();
commandBuilder.append("dalvikvm64 -Xcompiler-option --compiler-backend=Optimizing ");
if (device.noBootImageAvailable()) {
@@ -33,6 +34,6 @@
}
commandBuilder.append("-cp ").append(testLocation).append("/").append(programName).append(" ");
commandBuilder.append(executeClass);
- executionResult = executeCommandWithTimeout(commandBuilder.toString(), true);
+ return commandBuilder.toString();
}
}
diff --git a/tools/dexfuzz/src/dexfuzz/executors/Arm64QuickBackendExecutor.java b/tools/dexfuzz/src/dexfuzz/executors/Arm64QuickBackendExecutor.java
index 7251ec5..d9228ed 100644
--- a/tools/dexfuzz/src/dexfuzz/executors/Arm64QuickBackendExecutor.java
+++ b/tools/dexfuzz/src/dexfuzz/executors/Arm64QuickBackendExecutor.java
@@ -21,11 +21,12 @@
public class Arm64QuickBackendExecutor extends Executor {
public Arm64QuickBackendExecutor(BaseListener listener, Device device) {
- super("ARM64 Quick Backend", 5, listener, Architecture.ARM64, device, true);
+ super("ARM64 Quick Backend", 5, listener, Architecture.ARM64, device,
+ /*needsCleanCodeCache*/ true, /*isBisectable*/ false);
}
@Override
- public void execute(String programName) {
+ protected String constructCommand(String programName) {
StringBuilder commandBuilder = new StringBuilder();
commandBuilder.append("dalvikvm64 -Xcompiler-option --compiler-backend=Quick ");
if (device.noBootImageAvailable()) {
@@ -33,6 +34,6 @@
}
commandBuilder.append("-cp ").append(testLocation).append("/").append(programName).append(" ");
commandBuilder.append(executeClass);
- executionResult = executeCommandWithTimeout(commandBuilder.toString(), true);
+ return commandBuilder.toString();
}
}
diff --git a/tools/dexfuzz/src/dexfuzz/executors/ArmInterpreterExecutor.java b/tools/dexfuzz/src/dexfuzz/executors/ArmInterpreterExecutor.java
index d17ea87..bdfad3d 100644
--- a/tools/dexfuzz/src/dexfuzz/executors/ArmInterpreterExecutor.java
+++ b/tools/dexfuzz/src/dexfuzz/executors/ArmInterpreterExecutor.java
@@ -21,11 +21,12 @@
public class ArmInterpreterExecutor extends Executor {
public ArmInterpreterExecutor(BaseListener listener, Device device) {
- super("ARM Interpreter", 30, listener, Architecture.ARM, device, false);
+ super("ARM Interpreter", 30, listener, Architecture.ARM, device,
+ /*needsCleanCodeCache*/ false, /*isBisectable*/ false);
}
@Override
- public void execute(String programName) {
+ protected String constructCommand(String programName) {
StringBuilder commandBuilder = new StringBuilder();
commandBuilder.append("dalvikvm32 -Xint ");
if (device.noBootImageAvailable()) {
@@ -33,6 +34,6 @@
}
commandBuilder.append("-cp ").append(testLocation).append("/").append(programName).append(" ");
commandBuilder.append(executeClass);
- executionResult = executeCommandWithTimeout(commandBuilder.toString(), true);
+ return commandBuilder.toString();
}
}
diff --git a/tools/dexfuzz/src/dexfuzz/executors/ArmOptimizingBackendExecutor.java b/tools/dexfuzz/src/dexfuzz/executors/ArmOptimizingBackendExecutor.java
index 947bb2f..ded8cf9 100644
--- a/tools/dexfuzz/src/dexfuzz/executors/ArmOptimizingBackendExecutor.java
+++ b/tools/dexfuzz/src/dexfuzz/executors/ArmOptimizingBackendExecutor.java
@@ -21,11 +21,12 @@
public class ArmOptimizingBackendExecutor extends Executor {
public ArmOptimizingBackendExecutor(BaseListener listener, Device device) {
- super("ARM Optimizing Backend", 5, listener, Architecture.ARM, device, true);
+ super("ARM Optimizing Backend", 5, listener, Architecture.ARM, device,
+ /*needsCleanCodeCache*/ true, /*isBisectable*/ true);
}
@Override
- public void execute(String programName) {
+ protected String constructCommand(String programName) {
StringBuilder commandBuilder = new StringBuilder();
commandBuilder.append("dalvikvm32 -Xcompiler-option --compiler-backend=Optimizing ");
if (device.noBootImageAvailable()) {
@@ -33,6 +34,6 @@
}
commandBuilder.append("-cp ").append(testLocation).append("/").append(programName).append(" ");
commandBuilder.append(executeClass);
- executionResult = executeCommandWithTimeout(commandBuilder.toString(), true);
+ return commandBuilder.toString();
}
}
diff --git a/tools/dexfuzz/src/dexfuzz/executors/ArmQuickBackendExecutor.java b/tools/dexfuzz/src/dexfuzz/executors/ArmQuickBackendExecutor.java
index 7d226e8..0eb35f7 100644
--- a/tools/dexfuzz/src/dexfuzz/executors/ArmQuickBackendExecutor.java
+++ b/tools/dexfuzz/src/dexfuzz/executors/ArmQuickBackendExecutor.java
@@ -21,11 +21,12 @@
public class ArmQuickBackendExecutor extends Executor {
public ArmQuickBackendExecutor(BaseListener listener, Device device) {
- super("ARM Quick Backend", 5, listener, Architecture.ARM, device, true);
+ super("ARM Quick Backend", 5, listener, Architecture.ARM, device,
+ /*needsCleanCodeCache*/ true, /*isBisectable*/ false);
}
@Override
- public void execute(String programName) {
+ protected String constructCommand(String programName) {
StringBuilder commandBuilder = new StringBuilder();
commandBuilder.append("dalvikvm32 -Xcompiler-option --compiler-backend=Quick ");
if (device.noBootImageAvailable()) {
@@ -33,6 +34,6 @@
}
commandBuilder.append("-cp ").append(testLocation).append("/").append(programName).append(" ");
commandBuilder.append(executeClass);
- executionResult = executeCommandWithTimeout(commandBuilder.toString(), true);
+ return commandBuilder.toString();
}
}
diff --git a/tools/dexfuzz/src/dexfuzz/executors/Device.java b/tools/dexfuzz/src/dexfuzz/executors/Device.java
index 45538fe..72f73b8 100644
--- a/tools/dexfuzz/src/dexfuzz/executors/Device.java
+++ b/tools/dexfuzz/src/dexfuzz/executors/Device.java
@@ -18,7 +18,11 @@
import java.io.IOException;
import java.io.File;
+import java.util.ArrayList;
+import java.util.List;
import java.util.Map;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
import dexfuzz.ExecutionResult;
import dexfuzz.Log;
@@ -139,6 +143,10 @@
return isHost;
}
+ public boolean isUsingSpecificDevice() {
+ return usingSpecificDevice;
+ }
+
/**
* Certain AOSP builds of Android may not have a full boot.art built. This will be set if
* we use --no-boot-image, and is used by Executors when deciding the arguments for dalvikvm
@@ -186,7 +194,7 @@
Log.info("Executing: " + command);
try {
- ProcessBuilder processBuilder = new ProcessBuilder(command.split(" "));
+ ProcessBuilder processBuilder = new ProcessBuilder(splitCommand(command));
processBuilder.environment().put("ANDROID_ROOT", androidHostOut);
if (Options.executeOnHost) {
processBuilder.environment().put("ANDROID_DATA", androidData);
@@ -229,6 +237,17 @@
return result;
}
+ /**
+ * Splits command respecting single quotes.
+ */
+ private List<String> splitCommand(String command) {
+ List<String> ret = new ArrayList<String>();
+ Matcher m = Pattern.compile("(\'[^\']+\'| *[^ ]+ *)").matcher(command);
+ while (m.find())
+ ret.add(m.group(1).trim().replace("\'", ""));
+ return ret;
+ }
+
private String getExecutionPrefixWithAdb(String command) {
if (usingSpecificDevice) {
return String.format("adb -s %s %s ", deviceName, command);
diff --git a/tools/dexfuzz/src/dexfuzz/executors/Executor.java b/tools/dexfuzz/src/dexfuzz/executors/Executor.java
index 1e5d4be..c62a3ad 100644
--- a/tools/dexfuzz/src/dexfuzz/executors/Executor.java
+++ b/tools/dexfuzz/src/dexfuzz/executors/Executor.java
@@ -39,9 +39,10 @@
protected Architecture architecture;
protected Device device;
private boolean needsCleanCodeCache;
+ private boolean isBisectable;
protected Executor(String name, int timeout, BaseListener listener, Architecture architecture,
- Device device, boolean needsCleanCodeCache) {
+ Device device, boolean needsCleanCodeCache, boolean isBisectable) {
executeClass = Options.executeClass;
if (Options.shortTimeouts) {
@@ -55,6 +56,7 @@
this.architecture = architecture;
this.device = device;
this.needsCleanCodeCache = needsCleanCodeCache;
+ this.isBisectable = isBisectable;
if (Options.executeOnHost) {
this.testLocation = System.getProperty("user.dir");
@@ -169,7 +171,33 @@
* Executor subclasses need to override this, to construct their arguments for dalvikvm
* invocation correctly.
*/
- public abstract void execute(String programName);
+ protected abstract String constructCommand(String programName);
+
+ /**
+ * Executes runtime.
+ */
+ public void execute(String programName) {
+ executionResult = executeCommandWithTimeout(constructCommand(programName), true);
+ }
+
+ /**
+ * Runs bisection bug search.
+ */
+ public ExecutionResult runBisectionSearch(String programName, String expectedOutputFile, String logFile) {
+ assert(isBisectable);
+ String runtimeCommand = constructCommand(programName);
+ StringBuilder commandBuilder = new StringBuilder();
+ commandBuilder.append("bisection_search.py --raw-cmd '").append(runtimeCommand);
+ commandBuilder.append("' --expected-output=").append(expectedOutputFile);
+ commandBuilder.append(" --logfile=").append(logFile);
+ if (!device.isHost()) {
+ commandBuilder.append(" --device");
+ if (device.isUsingSpecificDevice()) {
+ commandBuilder.append(" --specific-device=").append(device.getName());
+ }
+ }
+ return device.executeCommand(commandBuilder.toString(), true, outputConsumer, errorConsumer);
+ }
/**
* Fuzzer.checkForArchitectureSplit() will use this determine the architecture of the Executor.
@@ -207,4 +235,8 @@
public void finishedWithProgramOnDevice() {
device.resetProgramPushed();
}
+
+ public boolean isBisectable() {
+ return isBisectable;
+ }
}
diff --git a/tools/dexfuzz/src/dexfuzz/executors/Mips64InterpreterExecutor.java b/tools/dexfuzz/src/dexfuzz/executors/Mips64InterpreterExecutor.java
index f319201..eee6111 100644
--- a/tools/dexfuzz/src/dexfuzz/executors/Mips64InterpreterExecutor.java
+++ b/tools/dexfuzz/src/dexfuzz/executors/Mips64InterpreterExecutor.java
@@ -21,16 +21,17 @@
public class Mips64InterpreterExecutor extends Executor {
public Mips64InterpreterExecutor(BaseListener listener, Device device) {
- super("MIPS64 Interpreter", 30, listener, Architecture.MIPS64, device, false);
+ super("MIPS64 Interpreter", 30, listener, Architecture.MIPS64, device,
+ /*needsCleanCodeCache*/ false, /*isBisectable*/ false);
}
@Override
- public void execute(String programName) {
+ protected String constructCommand(String programName) {
StringBuilder commandBuilder = new StringBuilder();
commandBuilder.append("dalvikvm64 -Xint ");
commandBuilder.append("-cp ").append(testLocation).append("/").append(programName).append(" ");
commandBuilder.append(executeClass);
- executionResult = executeCommandWithTimeout(commandBuilder.toString(), true);
+ return commandBuilder.toString();
}
-}
\ No newline at end of file
+}
diff --git a/tools/dexfuzz/src/dexfuzz/executors/Mips64OptimizingBackendExecutor.java b/tools/dexfuzz/src/dexfuzz/executors/Mips64OptimizingBackendExecutor.java
index a6784e6..72d43e7 100644
--- a/tools/dexfuzz/src/dexfuzz/executors/Mips64OptimizingBackendExecutor.java
+++ b/tools/dexfuzz/src/dexfuzz/executors/Mips64OptimizingBackendExecutor.java
@@ -21,15 +21,16 @@
public class Mips64OptimizingBackendExecutor extends Executor {
public Mips64OptimizingBackendExecutor(BaseListener listener, Device device) {
- super("MIPS64 Optimizing Backend", 5, listener, Architecture.MIPS64, device, true);
+ super("MIPS64 Optimizing Backend", 5, listener, Architecture.MIPS64, device,
+ /*needsCleanCodeCache*/ true, /*isBisectable*/ true);
}
@Override
- public void execute(String programName) {
+ protected String constructCommand(String programName) {
StringBuilder commandBuilder = new StringBuilder();
commandBuilder.append("dalvikvm64 -Xcompiler-option --compiler-backend=Optimizing ");
commandBuilder.append("-cp ").append(testLocation).append("/").append(programName).append(" ");
commandBuilder.append(executeClass);
- executionResult = executeCommandWithTimeout(commandBuilder.toString(), true);
+ return commandBuilder.toString();
}
}
diff --git a/tools/dexfuzz/src/dexfuzz/executors/Mips64QuickBackendExecutor.java b/tools/dexfuzz/src/dexfuzz/executors/Mips64QuickBackendExecutor.java
index 36e39c2..e7e5ff6 100644
--- a/tools/dexfuzz/src/dexfuzz/executors/Mips64QuickBackendExecutor.java
+++ b/tools/dexfuzz/src/dexfuzz/executors/Mips64QuickBackendExecutor.java
@@ -21,15 +21,16 @@
public class Mips64QuickBackendExecutor extends Executor {
public Mips64QuickBackendExecutor(BaseListener listener, Device device) {
- super("MIPS64 Quick Backend", 5, listener, Architecture.MIPS64, device, true);
+ super("MIPS64 Quick Backend", 5, listener, Architecture.MIPS64, device,
+ /*needsCleanCodeCache*/ true, /*isBisectable*/ false);
}
@Override
- public void execute(String programName) {
+ protected String constructCommand(String programName) {
StringBuilder commandBuilder = new StringBuilder();
commandBuilder.append("dalvikvm64 -Xcompiler-option --compiler-backend=Quick ");
commandBuilder.append("-cp ").append(testLocation).append("/").append(programName).append(" ");
commandBuilder.append(executeClass);
- executionResult = executeCommandWithTimeout(commandBuilder.toString(), true);
+ return commandBuilder.toString();
}
}
diff --git a/tools/dexfuzz/src/dexfuzz/executors/MipsInterpreterExecutor.java b/tools/dexfuzz/src/dexfuzz/executors/MipsInterpreterExecutor.java
index 4268c76..4a403db 100644
--- a/tools/dexfuzz/src/dexfuzz/executors/MipsInterpreterExecutor.java
+++ b/tools/dexfuzz/src/dexfuzz/executors/MipsInterpreterExecutor.java
@@ -21,15 +21,16 @@
public class MipsInterpreterExecutor extends Executor {
public MipsInterpreterExecutor(BaseListener listener, Device device) {
- super("MIPS Interpreter", 30, listener, Architecture.MIPS, device, false);
+ super("MIPS Interpreter", 30, listener, Architecture.MIPS, device,
+ /*needsCleanCodeCache*/ false, /*isBisectable*/ false);
}
@Override
- public void execute(String programName) {
+ protected String constructCommand(String programName) {
StringBuilder commandBuilder = new StringBuilder();
commandBuilder.append("dalvikvm32 -Xint ");
commandBuilder.append("-cp ").append(testLocation).append("/").append(programName).append(" ");
commandBuilder.append(executeClass);
- executionResult = executeCommandWithTimeout(commandBuilder.toString(), true);
+ return commandBuilder.toString();
}
}
diff --git a/tools/dexfuzz/src/dexfuzz/executors/MipsOptimizingBackendExecutor.java b/tools/dexfuzz/src/dexfuzz/executors/MipsOptimizingBackendExecutor.java
index d64b1ce..63f6858 100644
--- a/tools/dexfuzz/src/dexfuzz/executors/MipsOptimizingBackendExecutor.java
+++ b/tools/dexfuzz/src/dexfuzz/executors/MipsOptimizingBackendExecutor.java
@@ -21,15 +21,16 @@
public class MipsOptimizingBackendExecutor extends Executor {
public MipsOptimizingBackendExecutor(BaseListener listener, Device device) {
- super("MIPS Optimizing Backend", 5, listener, Architecture.MIPS, device, true);
+ super("MIPS Optimizing Backend", 5, listener, Architecture.MIPS, device,
+ /*needsCleanCodeCache*/ true, /*isBisectable*/ true);
}
@Override
- public void execute(String programName) {
+ protected String constructCommand(String programName) {
StringBuilder commandBuilder = new StringBuilder();
commandBuilder.append("dalvikvm32 -Xcompiler-option --compiler-backend=Optimizing ");
commandBuilder.append("-cp ").append(testLocation).append("/").append(programName).append(" ");
commandBuilder.append(executeClass);
- executionResult = executeCommandWithTimeout(commandBuilder.toString(), true);
+ return commandBuilder.toString();
}
}
diff --git a/tools/dexfuzz/src/dexfuzz/executors/MipsQuickBackendExecutor.java b/tools/dexfuzz/src/dexfuzz/executors/MipsQuickBackendExecutor.java
index 0ea166b..b262090 100644
--- a/tools/dexfuzz/src/dexfuzz/executors/MipsQuickBackendExecutor.java
+++ b/tools/dexfuzz/src/dexfuzz/executors/MipsQuickBackendExecutor.java
@@ -21,15 +21,16 @@
public class MipsQuickBackendExecutor extends Executor {
public MipsQuickBackendExecutor(BaseListener listener, Device device) {
- super("MIPS Quick Backend", 5, listener, Architecture.MIPS, device, true);
+ super("MIPS Quick Backend", 5, listener, Architecture.MIPS, device,
+ /*needsCleanCodeCache*/ true, /*isBisectable*/ false);
}
@Override
- public void execute(String programName) {
+ protected String constructCommand(String programName) {
StringBuilder commandBuilder = new StringBuilder();
commandBuilder.append("dalvikvm32 -Xcompiler-option --compiler-backend=Quick ");
commandBuilder.append("-cp ").append(testLocation).append("/").append(programName).append(" ");
commandBuilder.append(executeClass);
- executionResult = executeCommandWithTimeout(commandBuilder.toString(), true);
+ return commandBuilder.toString();
}
}
diff --git a/tools/dexfuzz/src/dexfuzz/executors/X86InterpreterExecutor.java b/tools/dexfuzz/src/dexfuzz/executors/X86InterpreterExecutor.java
index 510f0d0..a8e68a7 100644
--- a/tools/dexfuzz/src/dexfuzz/executors/X86InterpreterExecutor.java
+++ b/tools/dexfuzz/src/dexfuzz/executors/X86InterpreterExecutor.java
@@ -22,11 +22,12 @@
public class X86InterpreterExecutor extends Executor {
public X86InterpreterExecutor(BaseListener listener, Device device) {
- super("x86 Interpreter", 30, listener, Architecture.X86, device, false);
+ super("x86 Interpreter", 30, listener, Architecture.X86, device,
+ /*needsCleanCodeCache*/ false, /*isBisectable*/ false);
}
@Override
- public void execute(String programName) {
+ protected String constructCommand(String programName) {
StringBuilder commandBuilder = new StringBuilder();
commandBuilder.append("dalvikvm32 -Xint ");
if (Options.executeOnHost) {
@@ -34,6 +35,6 @@
}
commandBuilder.append("-cp ").append(testLocation).append("/").append(programName).append(" ");
commandBuilder.append(executeClass);
- executionResult = executeCommandWithTimeout(commandBuilder.toString(), true);
+ return commandBuilder.toString();
}
}
diff --git a/tools/dexfuzz/src/dexfuzz/executors/X86OptimizingBackendExecutor.java b/tools/dexfuzz/src/dexfuzz/executors/X86OptimizingBackendExecutor.java
index 81d7285..5908a8b 100644
--- a/tools/dexfuzz/src/dexfuzz/executors/X86OptimizingBackendExecutor.java
+++ b/tools/dexfuzz/src/dexfuzz/executors/X86OptimizingBackendExecutor.java
@@ -22,11 +22,12 @@
public class X86OptimizingBackendExecutor extends Executor {
public X86OptimizingBackendExecutor(BaseListener listener, Device device) {
- super("x86 Optimizing Backend", 5, listener, Architecture.X86, device, true);
+ super("x86 Optimizing Backend", 5, listener, Architecture.X86, device,
+ /*needsCleanCodeCache*/ true, /*isBisectable*/ true);
}
@Override
- public void execute(String programName) {
+ protected String constructCommand(String programName) {
StringBuilder commandBuilder = new StringBuilder();
commandBuilder.append("dalvikvm32 -Xcompiler-option --compiler-backend=Optimizing ");
if (Options.executeOnHost) {
@@ -34,6 +35,6 @@
}
commandBuilder.append("-cp ").append(testLocation).append("/").append(programName).append(" ");
commandBuilder.append(executeClass);
- executionResult = executeCommandWithTimeout(commandBuilder.toString(), true);
+ return commandBuilder.toString();
}
}
diff --git a/tools/dexfuzz/src/dexfuzz/executors/X86QuickBackendExecutor.java b/tools/dexfuzz/src/dexfuzz/executors/X86QuickBackendExecutor.java
index 7e4a2f6..9e8039d 100644
--- a/tools/dexfuzz/src/dexfuzz/executors/X86QuickBackendExecutor.java
+++ b/tools/dexfuzz/src/dexfuzz/executors/X86QuickBackendExecutor.java
@@ -22,11 +22,12 @@
public class X86QuickBackendExecutor extends Executor {
public X86QuickBackendExecutor(BaseListener listener, Device device) {
- super("x86 Quick Backend", 5, listener, Architecture.X86, device, true);
+ super("x86 Quick Backend", 5, listener, Architecture.X86, device,
+ /*needsCleanCodeCache*/ true, /*isBisectable*/ false);
}
@Override
- public void execute(String programName) {
+ protected String constructCommand(String programName) {
StringBuilder commandBuilder = new StringBuilder();
commandBuilder.append("dalvikvm32 -Xcompiler-option --compiler-backend=Quick ");
if (Options.executeOnHost) {
@@ -34,6 +35,6 @@
}
commandBuilder.append("-cp ").append(testLocation).append("/").append(programName).append(" ");
commandBuilder.append(executeClass);
- executionResult = executeCommandWithTimeout(commandBuilder.toString(), true);
+ return commandBuilder.toString();
}
}
diff --git a/tools/dexfuzz/src/dexfuzz/executors/X86_64InterpreterExecutor.java b/tools/dexfuzz/src/dexfuzz/executors/X86_64InterpreterExecutor.java
index dc55a41..af00760 100644
--- a/tools/dexfuzz/src/dexfuzz/executors/X86_64InterpreterExecutor.java
+++ b/tools/dexfuzz/src/dexfuzz/executors/X86_64InterpreterExecutor.java
@@ -21,15 +21,16 @@
public class X86_64InterpreterExecutor extends Executor {
public X86_64InterpreterExecutor(BaseListener listener, Device device) {
- super("x86_64 Interpreter", 30, listener, Architecture.X86_64, device, false);
+ super("x86_64 Interpreter", 30, listener, Architecture.X86_64, device,
+ /*needsCleanCodeCache*/ false, /*isBisectable*/ false);
}
@Override
- public void execute(String programName) {
+ protected String constructCommand(String programName) {
StringBuilder commandBuilder = new StringBuilder();
commandBuilder.append("dalvikvm64 -Xint ");
commandBuilder.append("-cp ").append(testLocation).append("/").append(programName).append(" ");
commandBuilder.append(executeClass);
- executionResult = executeCommandWithTimeout(commandBuilder.toString(), true);
+ return commandBuilder.toString();
}
}
diff --git a/tools/dexfuzz/src/dexfuzz/executors/X86_64OptimizingBackendExecutor.java b/tools/dexfuzz/src/dexfuzz/executors/X86_64OptimizingBackendExecutor.java
index 2a01c6c..28ff1a5 100644
--- a/tools/dexfuzz/src/dexfuzz/executors/X86_64OptimizingBackendExecutor.java
+++ b/tools/dexfuzz/src/dexfuzz/executors/X86_64OptimizingBackendExecutor.java
@@ -21,15 +21,16 @@
public class X86_64OptimizingBackendExecutor extends Executor {
public X86_64OptimizingBackendExecutor(BaseListener listener, Device device) {
- super("x86_64 Optimizing Backend", 5, listener, Architecture.X86_64, device, true);
+ super("x86_64 Optimizing Backend", 5, listener, Architecture.X86_64, device,
+ /*needsCleanCodeCache*/ true, /*isBisectable*/ true);
}
@Override
- public void execute(String programName) {
+ protected String constructCommand(String programName) {
StringBuilder commandBuilder = new StringBuilder();
commandBuilder.append("dalvikvm64 -Xcompiler-option --compiler-backend=Optimizing ");
commandBuilder.append("-cp ").append(testLocation).append("/").append(programName).append(" ");
commandBuilder.append(executeClass);
- executionResult = executeCommandWithTimeout(commandBuilder.toString(), true);
+ return commandBuilder.toString();
}
}
diff --git a/tools/dexfuzz/src/dexfuzz/executors/X86_64QuickBackendExecutor.java b/tools/dexfuzz/src/dexfuzz/executors/X86_64QuickBackendExecutor.java
index 995cba2..22cafe2 100644
--- a/tools/dexfuzz/src/dexfuzz/executors/X86_64QuickBackendExecutor.java
+++ b/tools/dexfuzz/src/dexfuzz/executors/X86_64QuickBackendExecutor.java
@@ -21,15 +21,16 @@
public class X86_64QuickBackendExecutor extends Executor {
public X86_64QuickBackendExecutor(BaseListener listener, Device device) {
- super("x86_64 Quick Backend", 5, listener, Architecture.X86_64, device, true);
+ super("x86_64 Quick Backend", 5, listener, Architecture.X86_64, device,
+ /*needsCleanCodeCache*/ true, /*isBisectable*/ false);
}
@Override
- public void execute(String programName) {
+ protected String constructCommand(String programName) {
StringBuilder commandBuilder = new StringBuilder();
commandBuilder.append("dalvikvm64 -Xcompiler-option --compiler-backend=Quick ");
commandBuilder.append("-cp ").append(testLocation).append("/").append(programName).append(" ");
commandBuilder.append(executeClass);
- executionResult = executeCommandWithTimeout(commandBuilder.toString(), true);
+ return commandBuilder.toString();
}
}
diff --git a/tools/dexfuzz/src/dexfuzz/listeners/BisectionSearchListener.java b/tools/dexfuzz/src/dexfuzz/listeners/BisectionSearchListener.java
new file mode 100644
index 0000000..dfd9637
--- /dev/null
+++ b/tools/dexfuzz/src/dexfuzz/listeners/BisectionSearchListener.java
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package dexfuzz.listeners;
+
+import dexfuzz.ExecutionResult;
+import dexfuzz.executors.Executor;
+import dexfuzz.Log;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Runs bisection search for divergent programs.
+ */
+public class BisectionSearchListener extends BaseListener {
+
+ /**
+ * Used to remember the seed used to fuzz the fuzzed file, so we can save it with this
+ * seed as a name, if we find a divergence.
+ */
+ private long currentSeed;
+
+ /**
+ * Used to remember the name of the file we've fuzzed, so we can save it if we
+ * find a divergence.
+ */
+ private String fuzzedFile;
+
+ @Override
+ public void handleSeed(long seed) {
+ currentSeed = seed;
+ }
+
+ @Override
+ public void handleSuccessfullyFuzzedFile(String programName) {
+ fuzzedFile = programName;
+ }
+
+ private void writeToFile(String file, String toWrite) throws IOException {
+ PrintWriter writer = new PrintWriter(file);
+ writer.write(toWrite);
+ writer.close();
+ }
+
+ private String extractExpectedOutput(ExecutionResult result) {
+ StringBuilder builder = new StringBuilder();
+ // Skip last, artificial output line with return code.
+ for (int i = 0; i < result.output.size() - 1; i++) {
+ builder.append(result.output.get(i)).append("\n");
+ }
+ return builder.toString();
+ }
+
+ @Override
+ public void handleDivergences(Map<String, List<Executor>> outputMap) {
+ if (outputMap.size() != 2) {
+ // It's unclear which output should be considered reference output.
+ return;
+ }
+ try {
+ File expected_output_file = File.createTempFile("expected_output", ".txt");
+ String outputFile = String.format("bisection_outputs/%d_out.txt", currentSeed);
+ String logFile = String.format("bisection_outputs/%d_log.txt", currentSeed);
+ List<List<Executor>> executorsGroupedByOutput =
+ new ArrayList<List<Executor>>(outputMap.values());
+ List<String> outputs = new ArrayList<String>();
+ for (List<Executor> executors : executorsGroupedByOutput) {
+ outputs.add(extractExpectedOutput(executors.get(0).getResult()));
+ }
+ for (int i = 0; i < 2; i++) {
+ String output = outputs.get(i);
+ String otherOutput = outputs.get(1 - i);
+ List<Executor> executors = executorsGroupedByOutput.get(i);
+ for (Executor executor : executors) {
+ if (executor.isBisectable()) {
+ writeToFile(expected_output_file.getAbsolutePath(), otherOutput);
+ ExecutionResult result = executor.runBisectionSearch(fuzzedFile,
+ expected_output_file.getAbsolutePath(), logFile);
+ writeToFile(outputFile, result.getFlattenedAllWithNewlines());
+ }
+ }
+ }
+ expected_output_file.delete();
+ } catch (IOException e) {
+ Log.error(
+ "BisectionSearchListener.handleDivergences() caught an IOException " + e.toString());
+ }
+ }
+
+}