Replace String CharArray with internal uint16_t array.
Summary of high level changes:
- Adds compiler inliner support to identify string init methods
- Adds compiler support (quick & optimizing) with new invoke code path
that calls method off the thread pointer
- Adds thread entrypoints for all string init methods
- Adds map to verifier to log when receiver of string init has been
copied to other registers. used by compiler and interpreter
Change-Id: I797b992a8feb566f9ad73060011ab6f51eb7ce01
diff --git a/compiler/dex/bb_optimizations.h b/compiler/dex/bb_optimizations.h
index b948afd..72919b6 100644
--- a/compiler/dex/bb_optimizations.h
+++ b/compiler/dex/bb_optimizations.h
@@ -26,6 +26,30 @@
namespace art {
/**
+ * @class String Change
+ * @brief Converts calls to String.<init> to StringFactory instead.
+ */
+class StringChange : public PassME {
+ public:
+ StringChange() : PassME("StringChange", kNoNodes) {
+ }
+
+ void Start(PassDataHolder* data) const {
+ DCHECK(data != nullptr);
+ CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
+ DCHECK(c_unit != nullptr);
+ c_unit->mir_graph->StringChange();
+ }
+
+ bool Gate(const PassDataHolder* data) const {
+ DCHECK(data != nullptr);
+ CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+ DCHECK(c_unit != nullptr);
+ return c_unit->mir_graph->HasInvokes();
+ }
+};
+
+/**
* @class CacheFieldLoweringInfo
* @brief Cache the lowering info for fields used by IGET/IPUT/SGET/SPUT insns.
*/
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
index 7bfbb34..7385a8b 100644
--- a/compiler/dex/mir_graph.h
+++ b/compiler/dex/mir_graph.h
@@ -519,6 +519,7 @@
bool is_range;
DexOffset offset; // Offset in code units.
MIR* mir;
+ int32_t string_init_offset;
};
@@ -723,6 +724,8 @@
void BasicBlockOptimization();
void BasicBlockOptimizationEnd();
+ void StringChange();
+
const ArenaVector<BasicBlockId>& GetTopologicalSortOrder() {
DCHECK(!topological_order_.empty());
return topological_order_;
diff --git a/compiler/dex/mir_method_info.cc b/compiler/dex/mir_method_info.cc
index 0c84b82..5654604 100644
--- a/compiler/dex/mir_method_info.cc
+++ b/compiler/dex/mir_method_info.cc
@@ -16,6 +16,7 @@
# include "mir_method_info.h"
+#include "dex/compiler_ir.h"
#include "dex/quick/dex_file_method_inliner.h"
#include "dex/quick/dex_file_to_method_inliner_map.h"
#include "dex/verified_method.h"
@@ -83,6 +84,13 @@
MethodReference* devirt_target = (it->target_dex_file_ != nullptr) ? &devirt_ref : nullptr;
InvokeType invoke_type = it->GetInvokeType();
mirror::ArtMethod* resolved_method = nullptr;
+
+ bool string_init = false;
+ if (default_inliner->IsStringInitMethodIndex(it->MethodIndex())) {
+ string_init = true;
+ invoke_type = kDirect;
+ }
+
if (!it->IsQuickened()) {
it->target_dex_file_ = dex_file;
it->target_method_idx_ = it->MethodIndex();
@@ -170,6 +178,9 @@
it->target_dex_file_ = target_method.dex_file;
it->target_method_idx_ = target_method.dex_method_index;
it->stats_flags_ = fast_path_flags;
+ if (string_init) {
+ it->direct_code_ = 0;
+ }
}
}
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
index 3482602..25c159f 100644
--- a/compiler/dex/mir_optimization.cc
+++ b/compiler/dex/mir_optimization.cc
@@ -18,6 +18,7 @@
#include "base/logging.h"
#include "base/scoped_arena_containers.h"
#include "dataflow_iterator-inl.h"
+#include "dex/verified_method.h"
#include "dex_flags.h"
#include "driver/compiler_driver.h"
#include "driver/dex_compilation_unit.h"
@@ -25,10 +26,11 @@
#include "gvn_dead_code_elimination.h"
#include "local_value_numbering.h"
#include "mir_field_info.h"
-#include "type_inference.h"
+#include "mirror/string.h"
#include "quick/dex_file_method_inliner.h"
#include "quick/dex_file_to_method_inliner_map.h"
#include "stack.h"
+#include "type_inference.h"
namespace art {
@@ -1660,6 +1662,77 @@
temp_scoped_alloc_.reset();
}
+void MIRGraph::StringChange() {
+ AllNodesIterator iter(this);
+ for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
+ for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
+ // Look for new instance opcodes, skip otherwise
+ Instruction::Code opcode = mir->dalvikInsn.opcode;
+ if (opcode == Instruction::NEW_INSTANCE) {
+ uint32_t type_idx = mir->dalvikInsn.vB;
+ if (cu_->compiler_driver->IsStringTypeIndex(type_idx, cu_->dex_file)) {
+ // Change NEW_INSTANCE and throwing half of the insn (if it exists) into CONST_4 of 0
+ mir->dalvikInsn.opcode = Instruction::CONST_4;
+ mir->dalvikInsn.vB = 0;
+ MIR* check_mir = GetBasicBlock(bb->predecessors[0])->last_mir_insn;
+ if (check_mir != nullptr &&
+ static_cast<int>(check_mir->dalvikInsn.opcode) == kMirOpCheck) {
+ check_mir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
+ check_mir->dalvikInsn.vB = 0;
+ }
+ }
+ } else if ((opcode == Instruction::INVOKE_DIRECT) ||
+ (opcode == Instruction::INVOKE_DIRECT_RANGE)) {
+ uint32_t method_idx = mir->dalvikInsn.vB;
+ DexFileMethodInliner* inliner =
+ cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(cu_->dex_file);
+ if (inliner->IsStringInitMethodIndex(method_idx)) {
+ bool is_range = (opcode == Instruction::INVOKE_DIRECT_RANGE);
+ uint32_t orig_this_reg = is_range ? mir->dalvikInsn.vC : mir->dalvikInsn.arg[0];
+ // Remove this pointer from string init and change to static call.
+ mir->dalvikInsn.vA--;
+ if (!is_range) {
+ mir->dalvikInsn.opcode = Instruction::INVOKE_STATIC;
+ for (uint32_t i = 0; i < mir->dalvikInsn.vA; i++) {
+ mir->dalvikInsn.arg[i] = mir->dalvikInsn.arg[i + 1];
+ }
+ } else {
+ mir->dalvikInsn.opcode = Instruction::INVOKE_STATIC_RANGE;
+ mir->dalvikInsn.vC++;
+ }
+ // Insert a move-result instruction to the original this pointer reg.
+ MIR* move_result_mir = static_cast<MIR *>(arena_->Alloc(sizeof(MIR), kArenaAllocMIR));
+ move_result_mir->dalvikInsn.opcode = Instruction::MOVE_RESULT_OBJECT;
+ move_result_mir->dalvikInsn.vA = orig_this_reg;
+ move_result_mir->offset = mir->offset;
+ move_result_mir->m_unit_index = mir->m_unit_index;
+ bb->InsertMIRAfter(mir, move_result_mir);
+ // Add additional moves if this pointer was copied to other registers.
+ const VerifiedMethod* verified_method =
+ cu_->compiler_driver->GetVerifiedMethod(cu_->dex_file, cu_->method_idx);
+ DCHECK(verified_method != nullptr);
+ const SafeMap<uint32_t, std::set<uint32_t>>& string_init_map =
+ verified_method->GetStringInitPcRegMap();
+ auto map_it = string_init_map.find(mir->offset);
+ if (map_it != string_init_map.end()) {
+ const std::set<uint32_t>& reg_set = map_it->second;
+ for (auto set_it = reg_set.begin(); set_it != reg_set.end(); ++set_it) {
+ MIR* move_mir = static_cast<MIR *>(arena_->Alloc(sizeof(MIR), kArenaAllocMIR));
+ move_mir->dalvikInsn.opcode = Instruction::MOVE_OBJECT;
+ move_mir->dalvikInsn.vA = *set_it;
+ move_mir->dalvikInsn.vB = orig_this_reg;
+ move_mir->offset = mir->offset;
+ move_mir->m_unit_index = mir->m_unit_index;
+ bb->InsertMIRAfter(move_result_mir, move_mir);
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+
bool MIRGraph::EliminateSuspendChecksGate() {
if ((cu_->disable_opt & (1 << kSuspendCheckElimination)) != 0 || // Disabled.
GetMaxNestedLoops() == 0u || // Nothing to do.
diff --git a/compiler/dex/pass_driver_me_opts.cc b/compiler/dex/pass_driver_me_opts.cc
index 3e193b4..375003b 100644
--- a/compiler/dex/pass_driver_me_opts.cc
+++ b/compiler/dex/pass_driver_me_opts.cc
@@ -35,6 +35,7 @@
* Disadvantage is the passes can't change their internal states depending on CompilationUnit:
* - This is not yet an issue: no current pass would require it.
*/
+ pass_manager->AddPass(new StringChange);
pass_manager->AddPass(new CacheFieldLoweringInfo);
pass_manager->AddPass(new CacheMethodLoweringInfo);
pass_manager->AddPass(new CalculatePredecessors);
diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc
index 6ba4016..2b2d6af 100644
--- a/compiler/dex/quick/arm/call_arm.cc
+++ b/compiler/dex/quick/arm/call_arm.cc
@@ -21,6 +21,7 @@
#include "arm_lir.h"
#include "base/logging.h"
#include "dex/mir_graph.h"
+#include "dex/quick/dex_file_to_method_inliner_map.h"
#include "dex/quick/mir_to_lir-inl.h"
#include "driver/compiler_driver.h"
#include "driver/compiler_options.h"
@@ -619,13 +620,31 @@
* Bit of a hack here - in the absence of a real scheduling pass,
* emit the next instruction in static & direct invoke sequences.
*/
-int ArmMir2Lir::ArmNextSDCallInsn(CompilationUnit* cu, CallInfo* info ATTRIBUTE_UNUSED,
+int ArmMir2Lir::ArmNextSDCallInsn(CompilationUnit* cu, CallInfo* info,
int state, const MethodReference& target_method,
uint32_t unused_idx ATTRIBUTE_UNUSED,
uintptr_t direct_code, uintptr_t direct_method,
InvokeType type) {
ArmMir2Lir* cg = static_cast<ArmMir2Lir*>(cu->cg.get());
- if (direct_code != 0 && direct_method != 0) {
+ if (info->string_init_offset != 0) {
+ RegStorage arg0_ref = cg->TargetReg(kArg0, kRef);
+ switch (state) {
+ case 0: { // Grab target method* from thread pointer
+ cg->LoadRefDisp(rs_rARM_SELF, info->string_init_offset, arg0_ref, kNotVolatile);
+ break;
+ }
+ case 1: // Grab the code from the method*
+ if (direct_code == 0) {
+ // kInvokeTgt := arg0_ref->entrypoint
+ cg->LoadWordDisp(arg0_ref,
+ mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ kArmPointerSize).Int32Value(), cg->TargetPtrReg(kInvokeTgt));
+ }
+ break;
+ default:
+ return -1;
+ }
+ } else if (direct_code != 0 && direct_method != 0) {
switch (state) {
case 0: // Get the current Method* [sets kArg0]
if (direct_code != static_cast<uintptr_t>(-1)) {
diff --git a/compiler/dex/quick/arm64/call_arm64.cc b/compiler/dex/quick/arm64/call_arm64.cc
index 9a7c2ad..e49e40d 100644
--- a/compiler/dex/quick/arm64/call_arm64.cc
+++ b/compiler/dex/quick/arm64/call_arm64.cc
@@ -21,6 +21,7 @@
#include "arm64_lir.h"
#include "base/logging.h"
#include "dex/mir_graph.h"
+#include "dex/quick/dex_file_to_method_inliner_map.h"
#include "dex/quick/mir_to_lir-inl.h"
#include "driver/compiler_driver.h"
#include "driver/compiler_options.h"
@@ -460,7 +461,25 @@
InvokeType type) {
UNUSED(info, unused_idx);
Arm64Mir2Lir* cg = static_cast<Arm64Mir2Lir*>(cu->cg.get());
- if (direct_code != 0 && direct_method != 0) {
+ if (info->string_init_offset != 0) {
+ RegStorage arg0_ref = cg->TargetReg(kArg0, kRef);
+ switch (state) {
+ case 0: { // Grab target method* from thread pointer
+ cg->LoadRefDisp(rs_xSELF, info->string_init_offset, arg0_ref, kNotVolatile);
+ break;
+ }
+ case 1: // Grab the code from the method*
+ if (direct_code == 0) {
+ // kInvokeTgt := arg0_ref->entrypoint
+ cg->LoadWordDisp(arg0_ref,
+ mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ kArm64PointerSize).Int32Value(), cg->TargetPtrReg(kInvokeTgt));
+ }
+ break;
+ default:
+ return -1;
+ }
+ } else if (direct_code != 0 && direct_method != 0) {
switch (state) {
case 0: // Get the current Method* [sets kArg0]
if (direct_code != static_cast<uintptr_t>(-1)) {
diff --git a/compiler/dex/quick/dex_file_method_inliner.cc b/compiler/dex/quick/dex_file_method_inliner.cc
index f5e6c09..2568ee3 100644
--- a/compiler/dex/quick/dex_file_method_inliner.cc
+++ b/compiler/dex/quick/dex_file_method_inliner.cc
@@ -55,8 +55,12 @@
false, // kIntrinsicReferenceGetReferent
false, // kIntrinsicCharAt
false, // kIntrinsicCompareTo
+ false, // kIntrinsicGetCharsNoCheck
false, // kIntrinsicIsEmptyOrLength
false, // kIntrinsicIndexOf
+ true, // kIntrinsicNewStringFromBytes
+ true, // kIntrinsicNewStringFromChars
+ true, // kIntrinsicNewStringFromString
true, // kIntrinsicCurrentThread
true, // kIntrinsicPeek
true, // kIntrinsicPoke
@@ -88,8 +92,15 @@
static_assert(!kIntrinsicIsStatic[kIntrinsicReferenceGetReferent], "Get must not be static");
static_assert(!kIntrinsicIsStatic[kIntrinsicCharAt], "CharAt must not be static");
static_assert(!kIntrinsicIsStatic[kIntrinsicCompareTo], "CompareTo must not be static");
+static_assert(!kIntrinsicIsStatic[kIntrinsicGetCharsNoCheck], "GetCharsNoCheck must not be static");
static_assert(!kIntrinsicIsStatic[kIntrinsicIsEmptyOrLength], "IsEmptyOrLength must not be static");
static_assert(!kIntrinsicIsStatic[kIntrinsicIndexOf], "IndexOf must not be static");
+static_assert(kIntrinsicIsStatic[kIntrinsicNewStringFromBytes],
+ "NewStringFromBytes must be static");
+static_assert(kIntrinsicIsStatic[kIntrinsicNewStringFromChars],
+ "NewStringFromChars must be static");
+static_assert(kIntrinsicIsStatic[kIntrinsicNewStringFromString],
+ "NewStringFromString must be static");
static_assert(kIntrinsicIsStatic[kIntrinsicCurrentThread], "CurrentThread must be static");
static_assert(kIntrinsicIsStatic[kIntrinsicPeek], "Peek must be static");
static_assert(kIntrinsicIsStatic[kIntrinsicPoke], "Poke must be static");
@@ -137,9 +148,15 @@
"F", // kClassCacheFloat
"D", // kClassCacheDouble
"V", // kClassCacheVoid
+ "[B", // kClassCacheJavaLangByteArray
+ "[C", // kClassCacheJavaLangCharArray
+ "[I", // kClassCacheJavaLangIntArray
"Ljava/lang/Object;", // kClassCacheJavaLangObject
- "Ljava/lang/ref/Reference;", // kClassCacheJavaLangRefReference
+ "Ljava/lang/ref/Reference;", // kClassCacheJavaLangRefReference
"Ljava/lang/String;", // kClassCacheJavaLangString
+ "Ljava/lang/StringBuffer;", // kClassCacheJavaLangStringBuffer
+ "Ljava/lang/StringBuilder;", // kClassCacheJavaLangStringBuilder
+ "Ljava/lang/StringFactory;", // kClassCacheJavaLangStringFactory
"Ljava/lang/Double;", // kClassCacheJavaLangDouble
"Ljava/lang/Float;", // kClassCacheJavaLangFloat
"Ljava/lang/Integer;", // kClassCacheJavaLangInteger
@@ -148,10 +165,10 @@
"Ljava/lang/Math;", // kClassCacheJavaLangMath
"Ljava/lang/StrictMath;", // kClassCacheJavaLangStrictMath
"Ljava/lang/Thread;", // kClassCacheJavaLangThread
+ "Ljava/nio/charset/Charset;", // kClassCacheJavaNioCharsetCharset
"Llibcore/io/Memory;", // kClassCacheLibcoreIoMemory
"Lsun/misc/Unsafe;", // kClassCacheSunMiscUnsafe
"Ljava/lang/System;", // kClassCacheJavaLangSystem
- "[C" // kClassCacheJavaLangCharArray
};
const char* const DexFileMethodInliner::kNameCacheNames[] = {
@@ -172,9 +189,14 @@
"getReferent", // kNameCacheReferenceGet
"charAt", // kNameCacheCharAt
"compareTo", // kNameCacheCompareTo
+ "getCharsNoCheck", // kNameCacheGetCharsNoCheck
"isEmpty", // kNameCacheIsEmpty
"indexOf", // kNameCacheIndexOf
"length", // kNameCacheLength
+ "<init>", // kNameCacheInit
+ "newStringFromBytes", // kNameCacheNewStringFromBytes
+ "newStringFromChars", // kNameCacheNewStringFromChars
+ "newStringFromString", // kNameCacheNewStringFromString
"currentThread", // kNameCacheCurrentThread
"peekByte", // kNameCachePeekByte
"peekIntNative", // kNameCachePeekIntNative
@@ -282,7 +304,53 @@
kClassCacheJavaLangObject } },
// kProtoCacheCharArrayICharArrayII_V
{ kClassCacheVoid, 5, {kClassCacheJavaLangCharArray, kClassCacheInt,
- kClassCacheJavaLangCharArray, kClassCacheInt, kClassCacheInt}}
+ kClassCacheJavaLangCharArray, kClassCacheInt, kClassCacheInt} },
+ // kProtoCacheIICharArrayI_V
+ { kClassCacheVoid, 4, { kClassCacheInt, kClassCacheInt, kClassCacheJavaLangCharArray,
+ kClassCacheInt } },
+ // kProtoCacheByteArrayIII_String
+ { kClassCacheJavaLangString, 4, { kClassCacheJavaLangByteArray, kClassCacheInt, kClassCacheInt,
+ kClassCacheInt } },
+ // kProtoCacheIICharArray_String
+ { kClassCacheJavaLangString, 3, { kClassCacheInt, kClassCacheInt,
+ kClassCacheJavaLangCharArray } },
+ // kProtoCacheString_String
+ { kClassCacheJavaLangString, 1, { kClassCacheJavaLangString } },
+ // kProtoCache_V
+ { kClassCacheVoid, 0, { } },
+ // kProtoCacheByteArray_V
+ { kClassCacheVoid, 1, { kClassCacheJavaLangByteArray } },
+ // kProtoCacheByteArrayI_V
+ { kClassCacheVoid, 2, { kClassCacheJavaLangByteArray, kClassCacheInt } },
+ // kProtoCacheByteArrayII_V
+ { kClassCacheVoid, 3, { kClassCacheJavaLangByteArray, kClassCacheInt, kClassCacheInt } },
+ // kProtoCacheByteArrayIII_V
+ { kClassCacheVoid, 4, { kClassCacheJavaLangByteArray, kClassCacheInt, kClassCacheInt,
+ kClassCacheInt } },
+ // kProtoCacheByteArrayIIString_V
+ { kClassCacheVoid, 4, { kClassCacheJavaLangByteArray, kClassCacheInt, kClassCacheInt,
+ kClassCacheJavaLangString } },
+ // kProtoCacheByteArrayString_V
+ { kClassCacheVoid, 2, { kClassCacheJavaLangByteArray, kClassCacheJavaLangString } },
+ // kProtoCacheByteArrayIICharset_V
+ { kClassCacheVoid, 4, { kClassCacheJavaLangByteArray, kClassCacheInt, kClassCacheInt,
+ kClassCacheJavaNioCharsetCharset } },
+ // kProtoCacheByteArrayCharset_V
+ { kClassCacheVoid, 2, { kClassCacheJavaLangByteArray, kClassCacheJavaNioCharsetCharset } },
+ // kProtoCacheCharArray_V
+ { kClassCacheVoid, 1, { kClassCacheJavaLangCharArray } },
+ // kProtoCacheCharArrayII_V
+ { kClassCacheVoid, 3, { kClassCacheJavaLangCharArray, kClassCacheInt, kClassCacheInt } },
+ // kProtoCacheIICharArray_V
+ { kClassCacheVoid, 3, { kClassCacheInt, kClassCacheInt, kClassCacheJavaLangCharArray } },
+ // kProtoCacheIntArrayII_V
+ { kClassCacheVoid, 3, { kClassCacheJavaLangIntArray, kClassCacheInt, kClassCacheInt } },
+ // kProtoCacheString_V
+ { kClassCacheVoid, 1, { kClassCacheJavaLangString } },
+ // kProtoCacheStringBuffer_V
+ { kClassCacheVoid, 1, { kClassCacheJavaLangStringBuffer } },
+ // kProtoCacheStringBuilder_V
+ { kClassCacheVoid, 1, { kClassCacheJavaLangStringBuilder } },
};
const DexFileMethodInliner::IntrinsicDef DexFileMethodInliner::kIntrinsicMethods[] = {
@@ -343,6 +411,7 @@
INTRINSIC(JavaLangString, CharAt, I_C, kIntrinsicCharAt, 0),
INTRINSIC(JavaLangString, CompareTo, String_I, kIntrinsicCompareTo, 0),
+ INTRINSIC(JavaLangString, GetCharsNoCheck, IICharArrayI_V, kIntrinsicGetCharsNoCheck, 0),
INTRINSIC(JavaLangString, IsEmpty, _Z, kIntrinsicIsEmptyOrLength, kIntrinsicFlagIsEmpty),
INTRINSIC(JavaLangString, IndexOf, II_I, kIntrinsicIndexOf, kIntrinsicFlagNone),
INTRINSIC(JavaLangString, IndexOf, I_I, kIntrinsicIndexOf, kIntrinsicFlagBase0),
@@ -386,8 +455,29 @@
INTRINSIC(JavaLangSystem, ArrayCopy, CharArrayICharArrayII_V , kIntrinsicSystemArrayCopyCharArray,
0),
-
#undef INTRINSIC
+
+#define SPECIAL(c, n, p, o, d) \
+ { { kClassCache ## c, kNameCache ## n, kProtoCache ## p }, { o, kInlineSpecial, { d } } }
+
+ SPECIAL(JavaLangString, Init, _V, kInlineStringInit, 0),
+ SPECIAL(JavaLangString, Init, ByteArray_V, kInlineStringInit, 1),
+ SPECIAL(JavaLangString, Init, ByteArrayI_V, kInlineStringInit, 2),
+ SPECIAL(JavaLangString, Init, ByteArrayII_V, kInlineStringInit, 3),
+ SPECIAL(JavaLangString, Init, ByteArrayIII_V, kInlineStringInit, 4),
+ SPECIAL(JavaLangString, Init, ByteArrayIIString_V, kInlineStringInit, 5),
+ SPECIAL(JavaLangString, Init, ByteArrayString_V, kInlineStringInit, 6),
+ SPECIAL(JavaLangString, Init, ByteArrayIICharset_V, kInlineStringInit, 7),
+ SPECIAL(JavaLangString, Init, ByteArrayCharset_V, kInlineStringInit, 8),
+ SPECIAL(JavaLangString, Init, CharArray_V, kInlineStringInit, 9),
+ SPECIAL(JavaLangString, Init, CharArrayII_V, kInlineStringInit, 10),
+ SPECIAL(JavaLangString, Init, IICharArray_V, kInlineStringInit, 11),
+ SPECIAL(JavaLangString, Init, IntArrayII_V, kInlineStringInit, 12),
+ SPECIAL(JavaLangString, Init, String_V, kInlineStringInit, 13),
+ SPECIAL(JavaLangString, Init, StringBuffer_V, kInlineStringInit, 14),
+ SPECIAL(JavaLangString, Init, StringBuilder_V, kInlineStringInit, 15),
+
+#undef SPECIAL
};
DexFileMethodInliner::DexFileMethodInliner()
@@ -491,11 +581,19 @@
return backend->GenInlinedCharAt(info);
case kIntrinsicCompareTo:
return backend->GenInlinedStringCompareTo(info);
+ case kIntrinsicGetCharsNoCheck:
+ return backend->GenInlinedStringGetCharsNoCheck(info);
case kIntrinsicIsEmptyOrLength:
return backend->GenInlinedStringIsEmptyOrLength(
info, intrinsic.d.data & kIntrinsicFlagIsEmpty);
case kIntrinsicIndexOf:
return backend->GenInlinedIndexOf(info, intrinsic.d.data & kIntrinsicFlagBase0);
+ case kIntrinsicNewStringFromBytes:
+ return backend->GenInlinedStringFactoryNewStringFromBytes(info);
+ case kIntrinsicNewStringFromChars:
+ return backend->GenInlinedStringFactoryNewStringFromChars(info);
+ case kIntrinsicNewStringFromString:
+ return backend->GenInlinedStringFactoryNewStringFromString(info);
case kIntrinsicCurrentThread:
return backend->GenInlinedCurrentThread(info);
case kIntrinsicPeek:
@@ -574,6 +672,8 @@
move_result = mir_graph->FindMoveResult(bb, invoke);
result = GenInlineIPut(mir_graph, bb, invoke, move_result, method);
break;
+ case kInlineStringInit:
+ return false;
default:
LOG(FATAL) << "Unexpected inline op: " << method.opcode;
break;
@@ -921,4 +1021,21 @@
return true;
}
+uint32_t DexFileMethodInliner::GetOffsetForStringInit(uint32_t method_index, size_t pointer_size) {
+ ReaderMutexLock mu(Thread::Current(), lock_);
+ auto it = inline_methods_.find(method_index);
+ if (it != inline_methods_.end() && (it->second.opcode == kInlineStringInit)) {
+ uint32_t string_init_base_offset = Thread::QuickEntryPointOffsetWithSize(
+ OFFSETOF_MEMBER(QuickEntryPoints, pNewEmptyString), pointer_size);
+ return string_init_base_offset + it->second.d.data * pointer_size;
+ }
+ return 0;
+}
+
+bool DexFileMethodInliner::IsStringInitMethodIndex(uint32_t method_index) {
+ ReaderMutexLock mu(Thread::Current(), lock_);
+ auto it = inline_methods_.find(method_index);
+ return (it != inline_methods_.end()) && (it->second.opcode == kInlineStringInit);
+}
+
} // namespace art
diff --git a/compiler/dex/quick/dex_file_method_inliner.h b/compiler/dex/quick/dex_file_method_inliner.h
index d1e5621..26b41bf 100644
--- a/compiler/dex/quick/dex_file_method_inliner.h
+++ b/compiler/dex/quick/dex_file_method_inliner.h
@@ -96,6 +96,17 @@
LOCKS_EXCLUDED(lock_);
/**
+ * Gets the thread pointer entrypoint offset for a string init method index and pointer size.
+ */
+ uint32_t GetOffsetForStringInit(uint32_t method_index, size_t pointer_size)
+ LOCKS_EXCLUDED(lock_);
+
+ /**
+ * Check whether a particular method index is a string init.
+ */
+ bool IsStringInitMethodIndex(uint32_t method_index) LOCKS_EXCLUDED(lock_);
+
+ /**
* To avoid multiple lookups of a class by its descriptor, we cache its
* type index in the IndexCache. These are the indexes into the IndexCache
* class_indexes array.
@@ -111,9 +122,15 @@
kClassCacheFloat,
kClassCacheDouble,
kClassCacheVoid,
+ kClassCacheJavaLangByteArray,
+ kClassCacheJavaLangCharArray,
+ kClassCacheJavaLangIntArray,
kClassCacheJavaLangObject,
kClassCacheJavaLangRefReference,
kClassCacheJavaLangString,
+ kClassCacheJavaLangStringBuffer,
+ kClassCacheJavaLangStringBuilder,
+ kClassCacheJavaLangStringFactory,
kClassCacheJavaLangDouble,
kClassCacheJavaLangFloat,
kClassCacheJavaLangInteger,
@@ -122,10 +139,10 @@
kClassCacheJavaLangMath,
kClassCacheJavaLangStrictMath,
kClassCacheJavaLangThread,
+ kClassCacheJavaNioCharsetCharset,
kClassCacheLibcoreIoMemory,
kClassCacheSunMiscUnsafe,
kClassCacheJavaLangSystem,
- kClassCacheJavaLangCharArray,
kClassCacheLast
};
@@ -153,9 +170,14 @@
kNameCacheReferenceGetReferent,
kNameCacheCharAt,
kNameCacheCompareTo,
+ kNameCacheGetCharsNoCheck,
kNameCacheIsEmpty,
kNameCacheIndexOf,
kNameCacheLength,
+ kNameCacheInit,
+ kNameCacheNewStringFromBytes,
+ kNameCacheNewStringFromChars,
+ kNameCacheNewStringFromString,
kNameCacheCurrentThread,
kNameCachePeekByte,
kNameCachePeekIntNative,
@@ -230,6 +252,26 @@
kProtoCacheObjectJ_Object,
kProtoCacheObjectJObject_V,
kProtoCacheCharArrayICharArrayII_V,
+ kProtoCacheIICharArrayI_V,
+ kProtoCacheByteArrayIII_String,
+ kProtoCacheIICharArray_String,
+ kProtoCacheString_String,
+ kProtoCache_V,
+ kProtoCacheByteArray_V,
+ kProtoCacheByteArrayI_V,
+ kProtoCacheByteArrayII_V,
+ kProtoCacheByteArrayIII_V,
+ kProtoCacheByteArrayIIString_V,
+ kProtoCacheByteArrayString_V,
+ kProtoCacheByteArrayIICharset_V,
+ kProtoCacheByteArrayCharset_V,
+ kProtoCacheCharArray_V,
+ kProtoCacheCharArrayII_V,
+ kProtoCacheIICharArray_V,
+ kProtoCacheIntArrayII_V,
+ kProtoCacheString_V,
+ kProtoCacheStringBuffer_V,
+ kProtoCacheStringBuilder_V,
kProtoCacheLast
};
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index 1eb3a5f..ab011fc 100755
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -375,6 +375,18 @@
CallHelper(r_tgt, trampoline, safepoint_pc);
}
+void Mir2Lir::CallRuntimeHelperRegLocationRegLocationRegLocationRegLocation(
+ QuickEntrypointEnum trampoline, RegLocation arg0, RegLocation arg1, RegLocation arg2,
+ RegLocation arg3, bool safepoint_pc) {
+ RegStorage r_tgt = CallHelperSetup(trampoline);
+ LoadValueDirectFixed(arg0, TargetReg(kArg0, arg0));
+ LoadValueDirectFixed(arg1, TargetReg(kArg1, arg1));
+ LoadValueDirectFixed(arg2, TargetReg(kArg2, arg2));
+ LoadValueDirectFixed(arg3, TargetReg(kArg3, arg3));
+ ClobberCallerSave();
+ CallHelper(r_tgt, trampoline, safepoint_pc);
+}
+
/*
* If there are any ins passed in registers that have not been promoted
* to a callee-save register, flush them to the frame. Perform initial
@@ -966,14 +978,10 @@
}
bool Mir2Lir::GenInlinedCharAt(CallInfo* info) {
- // Location of reference to data array
+ // Location of char array data
int value_offset = mirror::String::ValueOffset().Int32Value();
// Location of count
int count_offset = mirror::String::CountOffset().Int32Value();
- // Starting offset within data array
- int offset_offset = mirror::String::OffsetOffset().Int32Value();
- // Start of char data with array_
- int data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Int32Value();
RegLocation rl_obj = info->args[0];
RegLocation rl_idx = info->args[1];
@@ -983,38 +991,21 @@
GenNullCheck(rl_obj.reg, info->opt_flags);
bool range_check = (!(info->opt_flags & MIR_IGNORE_RANGE_CHECK));
LIR* range_check_branch = nullptr;
- RegStorage reg_off;
- RegStorage reg_ptr;
- reg_off = AllocTemp();
- reg_ptr = AllocTempRef();
if (range_check) {
reg_max = AllocTemp();
Load32Disp(rl_obj.reg, count_offset, reg_max);
MarkPossibleNullPointerException(info->opt_flags);
- }
- Load32Disp(rl_obj.reg, offset_offset, reg_off);
- MarkPossibleNullPointerException(info->opt_flags);
- LoadRefDisp(rl_obj.reg, value_offset, reg_ptr, kNotVolatile);
- if (range_check) {
- // Set up a slow path to allow retry in case of bounds violation */
+ // Set up a slow path to allow retry in case of bounds violation
OpRegReg(kOpCmp, rl_idx.reg, reg_max);
FreeTemp(reg_max);
range_check_branch = OpCondBranch(kCondUge, nullptr);
}
- OpRegImm(kOpAdd, reg_ptr, data_offset);
- if (rl_idx.is_const) {
- OpRegImm(kOpAdd, reg_off, mir_graph_->ConstantValue(rl_idx.orig_sreg));
- } else {
- OpRegReg(kOpAdd, reg_off, rl_idx.reg);
- }
+ RegStorage reg_ptr = AllocTempRef();
+ OpRegRegImm(kOpAdd, reg_ptr, rl_obj.reg, value_offset);
FreeTemp(rl_obj.reg);
- if (rl_idx.location == kLocPhysReg) {
- FreeTemp(rl_idx.reg);
- }
RegLocation rl_dest = InlineTarget(info);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- LoadBaseIndexed(reg_ptr, reg_off, rl_result.reg, 1, kUnsignedHalf);
- FreeTemp(reg_off);
+ LoadBaseIndexed(reg_ptr, rl_idx.reg, rl_result.reg, 1, kUnsignedHalf);
FreeTemp(reg_ptr);
StoreValue(rl_dest, rl_result);
if (range_check) {
@@ -1025,6 +1016,59 @@
return true;
}
+bool Mir2Lir::GenInlinedStringGetCharsNoCheck(CallInfo* info) {
+ if (cu_->instruction_set == kMips) {
+ // TODO - add Mips implementation
+ return false;
+ }
+ size_t char_component_size = Primitive::ComponentSize(Primitive::kPrimChar);
+ // Location of data in char array buffer
+ int data_offset = mirror::Array::DataOffset(char_component_size).Int32Value();
+ // Location of char array data in string
+ int value_offset = mirror::String::ValueOffset().Int32Value();
+
+ RegLocation rl_obj = info->args[0];
+ RegLocation rl_start = info->args[1];
+ RegLocation rl_end = info->args[2];
+ RegLocation rl_buffer = info->args[3];
+ RegLocation rl_index = info->args[4];
+
+ ClobberCallerSave();
+ LockCallTemps(); // Using fixed registers
+ RegStorage reg_dst_ptr = TargetReg(kArg0, kRef);
+ RegStorage reg_src_ptr = TargetReg(kArg1, kRef);
+ RegStorage reg_length = TargetReg(kArg2, kNotWide);
+ RegStorage reg_tmp = TargetReg(kArg3, kNotWide);
+ RegStorage reg_tmp_ptr = RegStorage(RegStorage::k64BitSolo, reg_tmp.GetRawBits() & RegStorage::kRegTypeMask);
+
+ LoadValueDirectFixed(rl_buffer, reg_dst_ptr);
+ OpRegImm(kOpAdd, reg_dst_ptr, data_offset);
+ LoadValueDirectFixed(rl_index, reg_tmp);
+ OpRegRegImm(kOpLsl, reg_tmp, reg_tmp, 1);
+ OpRegReg(kOpAdd, reg_dst_ptr, cu_->instruction_set == kArm64 ? reg_tmp_ptr : reg_tmp);
+
+ LoadValueDirectFixed(rl_start, reg_tmp);
+ LoadValueDirectFixed(rl_end, reg_length);
+ OpRegReg(kOpSub, reg_length, reg_tmp);
+ OpRegRegImm(kOpLsl, reg_length, reg_length, 1);
+ LoadValueDirectFixed(rl_obj, reg_src_ptr);
+
+ OpRegImm(kOpAdd, reg_src_ptr, value_offset);
+ OpRegRegImm(kOpLsl, reg_tmp, reg_tmp, 1);
+ OpRegReg(kOpAdd, reg_src_ptr, cu_->instruction_set == kArm64 ? reg_tmp_ptr : reg_tmp);
+
+ RegStorage r_tgt;
+ if (cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64) {
+ r_tgt = LoadHelper(kQuickMemcpy);
+ } else {
+ r_tgt = RegStorage::InvalidReg();
+ }
+ // NOTE: not a safepoint
+ CallHelper(r_tgt, kQuickMemcpy, false, true);
+
+ return true;
+}
+
// Generates an inlined String.is_empty or String.length.
bool Mir2Lir::GenInlinedStringIsEmptyOrLength(CallInfo* info, bool is_empty) {
if (cu_->instruction_set == kMips || cu_->instruction_set == kMips64) {
@@ -1058,6 +1102,58 @@
return true;
}
+bool Mir2Lir::GenInlinedStringFactoryNewStringFromBytes(CallInfo* info) {
+ if (cu_->instruction_set == kMips) {
+ // TODO - add Mips implementation
+ return false;
+ }
+ RegLocation rl_data = info->args[0];
+ RegLocation rl_high = info->args[1];
+ RegLocation rl_offset = info->args[2];
+ RegLocation rl_count = info->args[3];
+ rl_data = LoadValue(rl_data, kRefReg);
+ LIR* data_null_check_branch = OpCmpImmBranch(kCondEq, rl_data.reg, 0, nullptr);
+ AddIntrinsicSlowPath(info, data_null_check_branch);
+ CallRuntimeHelperRegLocationRegLocationRegLocationRegLocation(
+ kQuickAllocStringFromBytes, rl_data, rl_high, rl_offset, rl_count, true);
+ RegLocation rl_return = GetReturn(kRefReg);
+ RegLocation rl_dest = InlineTarget(info);
+ StoreValue(rl_dest, rl_return);
+ return true;
+}
+
+bool Mir2Lir::GenInlinedStringFactoryNewStringFromChars(CallInfo* info) {
+ if (cu_->instruction_set == kMips) {
+ // TODO - add Mips implementation
+ return false;
+ }
+ RegLocation rl_offset = info->args[0];
+ RegLocation rl_count = info->args[1];
+ RegLocation rl_data = info->args[2];
+ CallRuntimeHelperRegLocationRegLocationRegLocation(
+ kQuickAllocStringFromChars, rl_offset, rl_count, rl_data, true);
+ RegLocation rl_return = GetReturn(kRefReg);
+ RegLocation rl_dest = InlineTarget(info);
+ StoreValue(rl_dest, rl_return);
+ return true;
+}
+
+bool Mir2Lir::GenInlinedStringFactoryNewStringFromString(CallInfo* info) {
+ if (cu_->instruction_set == kMips) {
+ // TODO - add Mips implementation
+ return false;
+ }
+ RegLocation rl_string = info->args[0];
+ rl_string = LoadValue(rl_string, kRefReg);
+ LIR* string_null_check_branch = OpCmpImmBranch(kCondEq, rl_string.reg, 0, nullptr);
+ AddIntrinsicSlowPath(info, string_null_check_branch);
+ CallRuntimeHelperRegLocation(kQuickAllocStringFromString, rl_string, true);
+ RegLocation rl_return = GetReturn(kRefReg);
+ RegLocation rl_dest = InlineTarget(info);
+ StoreValue(rl_dest, rl_return);
+ return true;
+}
+
bool Mir2Lir::GenInlinedReverseBytes(CallInfo* info, OpSize size) {
if (cu_->instruction_set == kMips || cu_->instruction_set == kMips64) {
// TODO: add Mips and Mips64 implementations.
@@ -1451,9 +1547,22 @@
LockCallTemps();
const MirMethodLoweringInfo& method_info = mir_graph_->GetMethodLoweringInfo(info->mir);
+ MethodReference target_method = method_info.GetTargetMethod();
cu_->compiler_driver->ProcessedInvoke(method_info.GetInvokeType(), method_info.StatsFlags());
InvokeType original_type = static_cast<InvokeType>(method_info.GetInvokeType());
info->type = method_info.GetSharpType();
+ bool is_string_init = false;
+ if (method_info.IsSpecial()) {
+ DexFileMethodInliner* inliner = cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(
+ target_method.dex_file);
+ if (inliner->IsStringInitMethodIndex(target_method.dex_method_index)) {
+ is_string_init = true;
+ size_t pointer_size = GetInstructionSetPointerSize(cu_->instruction_set);
+ info->string_init_offset = inliner->GetOffsetForStringInit(target_method.dex_method_index,
+ pointer_size);
+ info->type = kStatic;
+ }
+ }
bool fast_path = method_info.FastPath();
bool skip_this;
@@ -1478,7 +1587,6 @@
next_call_insn = fast_path ? NextVCallInsn : NextVCallInsnSP;
skip_this = fast_path;
}
- MethodReference target_method = method_info.GetTargetMethod();
call_state = GenDalvikArgs(info, call_state, p_null_ck,
next_call_insn, target_method, method_info.VTableIndex(),
method_info.DirectCode(), method_info.DirectMethod(),
@@ -1495,7 +1603,7 @@
FreeCallTemps();
if (info->result.location != kLocInvalid) {
// We have a following MOVE_RESULT - do it now.
- RegisterClass reg_class =
+ RegisterClass reg_class = is_string_init ? kRefReg :
ShortyToRegClass(mir_graph_->GetShortyFromMethodReference(info->method_ref)[0]);
if (info->result.wide) {
RegLocation ret_loc = GetReturnWide(reg_class);
diff --git a/compiler/dex/quick/mips/call_mips.cc b/compiler/dex/quick/mips/call_mips.cc
index 39b9cc7..3d25384 100644
--- a/compiler/dex/quick/mips/call_mips.cc
+++ b/compiler/dex/quick/mips/call_mips.cc
@@ -20,7 +20,9 @@
#include "base/logging.h"
#include "dex/mir_graph.h"
+#include "dex/quick/dex_file_to_method_inliner_map.h"
#include "dex/quick/mir_to_lir-inl.h"
+#include "driver/compiler_driver.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "gc/accounting/card_table.h"
#include "mips_lir.h"
@@ -397,11 +399,28 @@
* Bit of a hack here - in the absence of a real scheduling pass,
* emit the next instruction in static & direct invoke sequences.
*/
-static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info ATTRIBUTE_UNUSED, int state,
+static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info, int state,
const MethodReference& target_method, uint32_t, uintptr_t direct_code,
uintptr_t direct_method, InvokeType type) {
Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
- if (direct_code != 0 && direct_method != 0) {
+ if (info->string_init_offset != 0) {
+ RegStorage arg0_ref = cg->TargetReg(kArg0, kRef);
+ switch (state) {
+ case 0: { // Grab target method* from thread pointer
+ cg->LoadRefDisp(cg->TargetPtrReg(kSelf), info->string_init_offset, arg0_ref, kNotVolatile);
+ break;
+ }
+ case 1: // Grab the code from the method*
+ if (direct_code == 0) {
+ int32_t offset = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ InstructionSetPointerSize(cu->instruction_set)).Int32Value();
+ cg->LoadWordDisp(arg0_ref, offset, cg->TargetPtrReg(kInvokeTgt));
+ }
+ break;
+ default:
+ return -1;
+ }
+ } else if (direct_code != 0 && direct_method != 0) {
switch (state) {
case 0: // Get the current Method* [sets kArg0]
if (direct_code != static_cast<uintptr_t>(-1)) {
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index 6f227fc..a07274f 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -897,6 +897,10 @@
RegLocation arg0, RegLocation arg1,
RegLocation arg2,
bool safepoint_pc);
+ void CallRuntimeHelperRegLocationRegLocationRegLocationRegLocation(
+ QuickEntrypointEnum trampoline, RegLocation arg0, RegLocation arg1,
+ RegLocation arg2, RegLocation arg3, bool safepoint_pc);
+
void GenInvoke(CallInfo* info);
void GenInvokeNoInline(CallInfo* info);
virtual NextCallInsn GetNextSDCallInsn() = 0;
@@ -937,7 +941,11 @@
bool GenInlinedReferenceGetReferent(CallInfo* info);
virtual bool GenInlinedCharAt(CallInfo* info);
+ bool GenInlinedStringGetCharsNoCheck(CallInfo* info);
bool GenInlinedStringIsEmptyOrLength(CallInfo* info, bool is_empty);
+ bool GenInlinedStringFactoryNewStringFromBytes(CallInfo* info);
+ bool GenInlinedStringFactoryNewStringFromChars(CallInfo* info);
+ bool GenInlinedStringFactoryNewStringFromString(CallInfo* info);
virtual bool GenInlinedReverseBits(CallInfo* info, OpSize size);
bool GenInlinedReverseBytes(CallInfo* info, OpSize size);
virtual bool GenInlinedAbsInt(CallInfo* info);
diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc
index e2364d8..2495757 100644
--- a/compiler/dex/quick/x86/call_x86.cc
+++ b/compiler/dex/quick/x86/call_x86.cc
@@ -19,6 +19,7 @@
#include "codegen_x86.h"
#include "base/logging.h"
+#include "dex/quick/dex_file_to_method_inliner_map.h"
#include "dex/quick/mir_to_lir-inl.h"
#include "driver/compiler_driver.h"
#include "driver/compiler_options.h"
@@ -343,11 +344,20 @@
int X86Mir2Lir::X86NextSDCallInsn(CompilationUnit* cu, CallInfo* info,
int state, const MethodReference& target_method,
uint32_t,
- uintptr_t direct_code, uintptr_t direct_method,
+ uintptr_t direct_code ATTRIBUTE_UNUSED, uintptr_t direct_method,
InvokeType type) {
- UNUSED(info, direct_code);
X86Mir2Lir* cg = static_cast<X86Mir2Lir*>(cu->cg.get());
- if (direct_method != 0) {
+ if (info->string_init_offset != 0) {
+ RegStorage arg0_ref = cg->TargetReg(kArg0, kRef);
+ switch (state) {
+ case 0: { // Grab target method* from thread pointer
+ cg->NewLIR2(kX86Mov32RT, arg0_ref.GetReg(), info->string_init_offset);
+ break;
+ }
+ default:
+ return -1;
+ }
+ } else if (direct_method != 0) {
switch (state) {
case 0: // Get the current Method* [sets kArg0]
if (direct_method != static_cast<uintptr_t>(-1)) {
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index b460379..2f211da 100755
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -1302,10 +1302,6 @@
int value_offset = mirror::String::ValueOffset().Int32Value();
// Location of count within the String object.
int count_offset = mirror::String::CountOffset().Int32Value();
- // Starting offset within data array.
- int offset_offset = mirror::String::OffsetOffset().Int32Value();
- // Start of char data with array_.
- int data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Int32Value();
// Compute the number of words to search in to rCX.
Load32Disp(rs_rDX, count_offset, rs_rCX);
@@ -1388,15 +1384,13 @@
// Load the address of the string into EDI.
// In case of start index we have to add the address to existing value in EDI.
- // The string starts at VALUE(String) + 2 * OFFSET(String) + DATA_OFFSET.
if (zero_based || (!zero_based && rl_start.is_const && start_value == 0)) {
- Load32Disp(rs_rDX, offset_offset, rs_rDI);
+ OpRegRegImm(kOpAdd, rs_rDI, rs_rDX, value_offset);
} else {
- OpRegMem(kOpAdd, rs_rDI, rs_rDX, offset_offset);
+ OpRegImm(kOpLsl, rs_rDI, 1);
+ OpRegReg(kOpAdd, rs_rDI, rs_rDX);
+ OpRegImm(kOpAdd, rs_rDI, value_offset);
}
- OpRegImm(kOpLsl, rs_rDI, 1);
- OpRegMem(kOpAdd, rs_rDI, rs_rDX, value_offset);
- OpRegImm(kOpAdd, rs_rDI, data_offset);
// EDI now contains the start of the string to be searched.
// We are all prepared to do the search for the character.
@@ -2423,24 +2417,15 @@
int value_offset = mirror::String::ValueOffset().Int32Value();
// Location of count
int count_offset = mirror::String::CountOffset().Int32Value();
- // Starting offset within data array
- int offset_offset = mirror::String::OffsetOffset().Int32Value();
- // Start of char data with array_
- int data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Int32Value();
RegLocation rl_obj = info->args[0];
RegLocation rl_idx = info->args[1];
rl_obj = LoadValue(rl_obj, kRefReg);
- // X86 wants to avoid putting a constant index into a register.
- if (!rl_idx.is_const) {
- rl_idx = LoadValue(rl_idx, kCoreReg);
- }
+ rl_idx = LoadValue(rl_idx, kCoreReg);
RegStorage reg_max;
GenNullCheck(rl_obj.reg, info->opt_flags);
bool range_check = (!(info->opt_flags & MIR_IGNORE_RANGE_CHECK));
LIR* range_check_branch = nullptr;
- RegStorage reg_off;
- RegStorage reg_ptr;
if (range_check) {
// On x86, we can compare to memory directly
// Set up a launch pad to allow retry in case of bounds violation */
@@ -2456,24 +2441,11 @@
range_check_branch = OpCondBranch(kCondUge, nullptr);
}
}
- reg_off = AllocTemp();
- reg_ptr = AllocTempRef();
- Load32Disp(rl_obj.reg, offset_offset, reg_off);
- LoadRefDisp(rl_obj.reg, value_offset, reg_ptr, kNotVolatile);
- if (rl_idx.is_const) {
- OpRegImm(kOpAdd, reg_off, mir_graph_->ConstantValue(rl_idx.orig_sreg));
- } else {
- OpRegReg(kOpAdd, reg_off, rl_idx.reg);
- }
- FreeTemp(rl_obj.reg);
- if (rl_idx.location == kLocPhysReg) {
- FreeTemp(rl_idx.reg);
- }
RegLocation rl_dest = InlineTarget(info);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- LoadBaseIndexedDisp(reg_ptr, reg_off, 1, data_offset, rl_result.reg, kUnsignedHalf);
- FreeTemp(reg_off);
- FreeTemp(reg_ptr);
+ LoadBaseIndexedDisp(rl_obj.reg, rl_idx.reg, 1, value_offset, rl_result.reg, kUnsignedHalf);
+ FreeTemp(rl_idx.reg);
+ FreeTemp(rl_obj.reg);
StoreValue(rl_dest, rl_result);
if (range_check) {
DCHECK(range_check_branch != nullptr);
diff --git a/compiler/dex/verified_method.cc b/compiler/dex/verified_method.cc
index 7eba515..e788261 100644
--- a/compiler/dex/verified_method.cc
+++ b/compiler/dex/verified_method.cc
@@ -64,6 +64,9 @@
if (method_verifier->HasCheckCasts()) {
verified_method->GenerateSafeCastSet(method_verifier);
}
+
+ verified_method->SetStringInitPcRegMap(method_verifier->GetStringInitPcRegMap());
+
return verified_method.release();
}
diff --git a/compiler/dex/verified_method.h b/compiler/dex/verified_method.h
index ad07639..242e3df 100644
--- a/compiler/dex/verified_method.h
+++ b/compiler/dex/verified_method.h
@@ -75,6 +75,13 @@
return has_verification_failures_;
}
+ void SetStringInitPcRegMap(SafeMap<uint32_t, std::set<uint32_t>>& string_init_pc_reg_map) {
+ string_init_pc_reg_map_ = string_init_pc_reg_map;
+ }
+ const SafeMap<uint32_t, std::set<uint32_t>>& GetStringInitPcRegMap() const {
+ return string_init_pc_reg_map_;
+ }
+
private:
VerifiedMethod() = default;
@@ -114,6 +121,10 @@
SafeCastSet safe_cast_set_;
bool has_verification_failures_;
+
+ // Copy of mapping generated by verifier of dex PCs of string init invocations
+ // to the set of other registers that the receiver has been copied into.
+ SafeMap<uint32_t, std::set<uint32_t>> string_init_pc_reg_map_;
};
} // namespace art
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index c858326..47288b5 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -40,6 +40,7 @@
#include "dex/verification_results.h"
#include "dex/verified_method.h"
#include "dex/quick/dex_file_method_inliner.h"
+#include "dex/quick/dex_file_to_method_inliner_map.h"
#include "driver/compiler_options.h"
#include "elf_writer_quick.h"
#include "jni_internal.h"
@@ -2485,4 +2486,16 @@
return oss.str();
}
+bool CompilerDriver::IsStringTypeIndex(uint16_t type_index, const DexFile* dex_file) {
+ const char* type = dex_file->GetTypeDescriptor(dex_file->GetTypeId(type_index));
+ return strcmp(type, "Ljava/lang/String;") == 0;
+}
+
+bool CompilerDriver::IsStringInit(uint32_t method_index, const DexFile* dex_file, int32_t* offset) {
+ DexFileMethodInliner* inliner = GetMethodInlinerMap()->GetMethodInliner(dex_file);
+ size_t pointer_size = InstructionSetPointerSize(GetInstructionSet());
+ *offset = inliner->GetOffsetForStringInit(method_index, pointer_size);
+ return inliner->IsStringInitMethodIndex(method_index);
+}
+
} // namespace art
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 02de11e..2b0985a 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -466,6 +466,9 @@
// Get memory usage during compilation.
std::string GetMemoryUsageString(bool extended) const;
+ bool IsStringTypeIndex(uint16_t type_index, const DexFile* dex_file);
+ bool IsStringInit(uint32_t method_index, const DexFile* dex_file, int32_t* offset);
+
void SetHadHardVerifierFailure() {
had_hard_verifier_failure_ = true;
}
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index fc70d8f..4dc7509 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -89,7 +89,12 @@
Thread::Current()->TransitionFromSuspendedToRunnable();
PruneNonImageClasses(); // Remove junk
ComputeLazyFieldsForImageClasses(); // Add useful information
- ProcessStrings();
+
+ // Calling this can in theory fill in some resolved strings. However, in practice it seems to
+ // never resolve any.
+ if (kComputeEagerResolvedStrings) {
+ ComputeEagerResolvedStrings();
+ }
Thread::Current()->TransitionFromRunnableToSuspended(kNative);
}
gc::Heap* heap = Runtime::Current()->GetHeap();
@@ -529,14 +534,6 @@
return true;
}
-// Count the number of strings in the heap and put the result in arg as a size_t pointer.
-static void CountStringsCallback(Object* obj, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- if (obj->GetClass()->IsStringClass()) {
- ++*reinterpret_cast<size_t*>(arg);
- }
-}
-
// Collect all the java.lang.String in the heap and put them in the output strings_ array.
class StringCollector {
public:
@@ -566,99 +563,19 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::String* lhs_s = lhs.AsMirrorPtr();
mirror::String* rhs_s = rhs.AsMirrorPtr();
- uint16_t* lhs_begin = lhs_s->GetCharArray()->GetData() + lhs_s->GetOffset();
- uint16_t* rhs_begin = rhs_s->GetCharArray()->GetData() + rhs_s->GetOffset();
+ uint16_t* lhs_begin = lhs_s->GetValue();
+ uint16_t* rhs_begin = rhs_s->GetValue();
return std::lexicographical_compare(lhs_begin, lhs_begin + lhs_s->GetLength(),
rhs_begin, rhs_begin + rhs_s->GetLength());
}
};
-static bool IsPrefix(mirror::String* pref, mirror::String* full)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- if (pref->GetLength() > full->GetLength()) {
- return false;
- }
- uint16_t* pref_begin = pref->GetCharArray()->GetData() + pref->GetOffset();
- uint16_t* full_begin = full->GetCharArray()->GetData() + full->GetOffset();
- return std::equal(pref_begin, pref_begin + pref->GetLength(), full_begin);
-}
-
-void ImageWriter::ProcessStrings() {
- size_t total_strings = 0;
- gc::Heap* heap = Runtime::Current()->GetHeap();
- ClassLinker* cl = Runtime::Current()->GetClassLinker();
- // Count the strings.
- heap->VisitObjects(CountStringsCallback, &total_strings);
- Thread* self = Thread::Current();
- StackHandleScope<1> hs(self);
- auto strings = hs.NewHandle(cl->AllocStringArray(self, total_strings));
- StringCollector string_collector(strings, 0U);
- // Read strings into the array.
- heap->VisitObjects(StringCollector::Callback, &string_collector);
- // Some strings could have gotten freed if AllocStringArray caused a GC.
- CHECK_LE(string_collector.GetIndex(), total_strings);
- total_strings = string_collector.GetIndex();
- auto* strings_begin = reinterpret_cast<mirror::HeapReference<mirror::String>*>(
- strings->GetRawData(sizeof(mirror::HeapReference<mirror::String>), 0));
- std::sort(strings_begin, strings_begin + total_strings, LexicographicalStringComparator());
- // Characters of strings which are non equal prefix of another string (not the same string).
- // We don't count the savings from equal strings since these would get interned later anyways.
- size_t prefix_saved_chars = 0;
- // Count characters needed for the strings.
- size_t num_chars = 0u;
- mirror::String* prev_s = nullptr;
- for (size_t idx = 0; idx != total_strings; ++idx) {
- mirror::String* s = strings->GetWithoutChecks(idx);
- size_t length = s->GetLength();
- num_chars += length;
- if (prev_s != nullptr && IsPrefix(prev_s, s)) {
- size_t prev_length = prev_s->GetLength();
- num_chars -= prev_length;
- if (prev_length != length) {
- prefix_saved_chars += prev_length;
- }
- }
- prev_s = s;
- }
- // Create character array, copy characters and point the strings there.
- mirror::CharArray* array = mirror::CharArray::Alloc(self, num_chars);
- string_data_array_ = array;
- uint16_t* array_data = array->GetData();
- size_t pos = 0u;
- prev_s = nullptr;
- for (size_t idx = 0; idx != total_strings; ++idx) {
- mirror::String* s = strings->GetWithoutChecks(idx);
- uint16_t* s_data = s->GetCharArray()->GetData() + s->GetOffset();
- int32_t s_length = s->GetLength();
- int32_t prefix_length = 0u;
- if (idx != 0u && IsPrefix(prev_s, s)) {
- prefix_length = prev_s->GetLength();
- }
- memcpy(array_data + pos, s_data + prefix_length, (s_length - prefix_length) * sizeof(*s_data));
- s->SetOffset(pos - prefix_length);
- s->SetArray(array);
- pos += s_length - prefix_length;
- prev_s = s;
- }
- CHECK_EQ(pos, num_chars);
-
- if (kIsDebugBuild || VLOG_IS_ON(compiler)) {
- LOG(INFO) << "Total # image strings=" << total_strings << " combined length="
- << num_chars << " prefix saved chars=" << prefix_saved_chars;
- }
- // Calling this can in theory fill in some resolved strings. However, in practice it seems to
- // never resolve any.
- if (kComputeEagerResolvedStrings) {
- ComputeEagerResolvedStrings();
- }
-}
-
void ImageWriter::ComputeEagerResolvedStringsCallback(Object* obj, void* arg ATTRIBUTE_UNUSED) {
if (!obj->GetClass()->IsStringClass()) {
return;
}
mirror::String* string = obj->AsString();
- const uint16_t* utf16_string = string->GetCharArray()->GetData() + string->GetOffset();
+ const uint16_t* utf16_string = string->GetValue();
size_t utf16_length = static_cast<size_t>(string->GetLength());
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
ReaderMutexLock mu(Thread::Current(), *class_linker->DexLock());
diff --git a/compiler/image_writer.h b/compiler/image_writer.h
index a2d99ee..c0cffa5 100644
--- a/compiler/image_writer.h
+++ b/compiler/image_writer.h
@@ -220,9 +220,6 @@
static void ComputeEagerResolvedStringsCallback(mirror::Object* obj, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Combine string char arrays.
- void ProcessStrings() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
// Remove unwanted classes from various roots.
void PruneNonImageClasses() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static bool NonImageClassesVisitor(mirror::Class* c, void* arg)
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index 925b507..dbdcc96 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -176,7 +176,7 @@
EXPECT_EQ(72U, sizeof(OatHeader));
EXPECT_EQ(4U, sizeof(OatMethodOffsets));
EXPECT_EQ(28U, sizeof(OatQuickMethodHeader));
- EXPECT_EQ(92 * GetInstructionSetPointerSize(kRuntimeISA), sizeof(QuickEntryPoints));
+ EXPECT_EQ(111 * GetInstructionSetPointerSize(kRuntimeISA), sizeof(QuickEntryPoints));
}
TEST_F(OatTest, OatHeaderIsValid) {
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index 96e08fd..a883bd0 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -19,6 +19,7 @@
#include "art_field-inl.h"
#include "base/logging.h"
#include "class_linker.h"
+#include "dex/verified_method.h"
#include "dex_file-inl.h"
#include "dex_instruction-inl.h"
#include "dex/verified_method.h"
@@ -612,6 +613,16 @@
HInvokeStaticOrDirect::ClinitCheckRequirement::kImplicit;
// Potential class initialization check, in the case of a static method call.
HClinitCheck* clinit_check = nullptr;
+ // Replace calls to String.<init> with StringFactory.
+ int32_t string_init_offset = 0;
+ bool is_string_init = compiler_driver_->IsStringInit(method_idx, dex_file_, &string_init_offset);
+ if (is_string_init) {
+ return_type = Primitive::kPrimNot;
+ is_instance_call = false;
+ number_of_arguments--;
+ invoke_type = kStatic;
+ optimized_invoke_type = kStatic;
+ }
HInvoke* invoke = nullptr;
@@ -698,7 +709,8 @@
invoke = new (arena_) HInvokeStaticOrDirect(
arena_, number_of_arguments, return_type, dex_pc, target_method.dex_method_index,
- is_recursive, invoke_type, optimized_invoke_type, clinit_check_requirement);
+ is_recursive, string_init_offset, invoke_type, optimized_invoke_type,
+ clinit_check_requirement);
}
size_t start_index = 0;
@@ -714,6 +726,9 @@
uint32_t descriptor_index = 1;
uint32_t argument_index = start_index;
+ if (is_string_init) {
+ start_index = 1;
+ }
for (size_t i = start_index; i < number_of_vreg_arguments; i++, argument_index++) {
Primitive::Type type = Primitive::GetType(descriptor[descriptor_index++]);
bool is_wide = (type == Primitive::kPrimLong) || (type == Primitive::kPrimDouble);
@@ -740,6 +755,28 @@
DCHECK_EQ(argument_index, number_of_arguments);
current_block_->AddInstruction(invoke);
latest_result_ = invoke;
+
+ // Add move-result for StringFactory method.
+ if (is_string_init) {
+ uint32_t orig_this_reg = is_range ? register_index : args[0];
+ const VerifiedMethod* verified_method =
+ compiler_driver_->GetVerifiedMethod(dex_file_, dex_compilation_unit_->GetDexMethodIndex());
+ if (verified_method == nullptr) {
+ LOG(WARNING) << "No verified method for method calling String.<init>: "
+ << PrettyMethod(dex_compilation_unit_->GetDexMethodIndex(), *dex_file_);
+ return false;
+ }
+ const SafeMap<uint32_t, std::set<uint32_t>>& string_init_map =
+ verified_method->GetStringInitPcRegMap();
+ auto map_it = string_init_map.find(dex_pc);
+ if (map_it != string_init_map.end()) {
+ std::set<uint32_t> reg_set = map_it->second;
+ for (auto set_it = reg_set.begin(); set_it != reg_set.end(); ++set_it) {
+ UpdateLocal(*set_it, invoke);
+ }
+ }
+ UpdateLocal(orig_this_reg, invoke);
+ }
return true;
}
@@ -1916,12 +1953,19 @@
case Instruction::NEW_INSTANCE: {
uint16_t type_index = instruction.VRegB_21c();
- QuickEntrypointEnum entrypoint = NeedsAccessCheck(type_index)
- ? kQuickAllocObjectWithAccessCheck
- : kQuickAllocObject;
+ if (compiler_driver_->IsStringTypeIndex(type_index, dex_file_)) {
+ // Turn new-instance of string into a const 0.
+ int32_t register_index = instruction.VRegA();
+ HNullConstant* constant = graph_->GetNullConstant();
+ UpdateLocal(register_index, constant);
+ } else {
+ QuickEntrypointEnum entrypoint = NeedsAccessCheck(type_index)
+ ? kQuickAllocObjectWithAccessCheck
+ : kQuickAllocObject;
- current_block_->AddInstruction(new (arena_) HNewInstance(dex_pc, type_index, entrypoint));
- UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction());
+ current_block_->AddInstruction(new (arena_) HNewInstance(dex_pc, type_index, entrypoint));
+ UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction());
+ }
break;
}
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 01748a9..bcdfccd 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -4071,15 +4071,9 @@
//
// Currently we implement the app -> app logic, which looks up in the resolve cache.
- // temp = method;
- LoadCurrentMethod(temp);
- if (!invoke->IsRecursive()) {
- // temp = temp->dex_cache_resolved_methods_;
- __ LoadFromOffset(
- kLoadWord, temp, temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value());
- // temp = temp[index_in_cache]
- __ LoadFromOffset(
- kLoadWord, temp, temp, CodeGenerator::GetCacheOffset(invoke->GetDexMethodIndex()));
+ if (invoke->IsStringInit()) {
+ // temp = thread->string_init_entrypoint
+ __ LoadFromOffset(kLoadWord, temp, TR, invoke->GetStringInitOffset());
// LR = temp[offset_of_quick_compiled_code]
__ LoadFromOffset(kLoadWord, LR, temp,
mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
@@ -4087,7 +4081,24 @@
// LR()
__ blx(LR);
} else {
- __ bl(GetFrameEntryLabel());
+ // temp = method;
+ LoadCurrentMethod(temp);
+ if (!invoke->IsRecursive()) {
+ // temp = temp->dex_cache_resolved_methods_;
+ __ LoadFromOffset(
+ kLoadWord, temp, temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value());
+ // temp = temp[index_in_cache]
+ __ LoadFromOffset(
+ kLoadWord, temp, temp, CodeGenerator::GetCacheOffset(invoke->GetDexMethodIndex()));
+ // LR = temp[offset_of_quick_compiled_code]
+ __ LoadFromOffset(kLoadWord, LR, temp,
+ mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ kArmWordSize).Int32Value());
+ // LR()
+ __ blx(LR);
+ } else {
+ __ bl(GetFrameEntryLabel());
+ }
}
DCHECK(!IsLeafMethod());
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index dada4ce..0d963d7 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -2006,20 +2006,30 @@
//
// Currently we implement the app -> app logic, which looks up in the resolve cache.
- // temp = method;
- LoadCurrentMethod(temp);
- if (!invoke->IsRecursive()) {
- // temp = temp->dex_cache_resolved_methods_;
- __ Ldr(temp, HeapOperand(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset()));
- // temp = temp[index_in_cache];
- __ Ldr(temp, HeapOperand(temp, index_in_cache));
- // lr = temp->entry_point_from_quick_compiled_code_;
+ if (invoke->IsStringInit()) {
+ // temp = thread->string_init_entrypoint
+ __ Ldr(temp, HeapOperand(tr, invoke->GetStringInitOffset()));
+ // LR = temp->entry_point_from_quick_compiled_code_;
__ Ldr(lr, HeapOperand(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
kArm64WordSize)));
- // lr();
+ // lr()
__ Blr(lr);
} else {
- __ Bl(&frame_entry_label_);
+ // temp = method;
+ LoadCurrentMethod(temp);
+ if (!invoke->IsRecursive()) {
+ // temp = temp->dex_cache_resolved_methods_;
+ __ Ldr(temp, HeapOperand(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset()));
+ // temp = temp[index_in_cache];
+ __ Ldr(temp, HeapOperand(temp, index_in_cache));
+ // lr = temp->entry_point_from_quick_compiled_code_;
+ __ Ldr(lr, HeapOperand(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ kArm64WordSize)));
+ // lr();
+ __ Blr(lr);
+ } else {
+ __ Bl(&frame_entry_label_);
+ }
}
DCHECK(!IsLeafMethod());
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 04999be..a037040 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -3114,18 +3114,27 @@
// 3) app -> app
//
// Currently we implement the app -> app logic, which looks up in the resolve cache.
- // temp = method;
- LoadCurrentMethod(temp);
- if (!invoke->IsRecursive()) {
- // temp = temp->dex_cache_resolved_methods_;
- __ movl(temp, Address(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value()));
- // temp = temp[index_in_cache]
- __ movl(temp, Address(temp, CodeGenerator::GetCacheOffset(invoke->GetDexMethodIndex())));
+
+ if (invoke->IsStringInit()) {
+ // temp = thread->string_init_entrypoint
+ __ fs()->movl(temp, Address::Absolute(invoke->GetStringInitOffset()));
// (temp + offset_of_quick_compiled_code)()
__ call(Address(
temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86WordSize).Int32Value()));
} else {
- __ call(GetFrameEntryLabel());
+ // temp = method;
+ LoadCurrentMethod(temp);
+ if (!invoke->IsRecursive()) {
+ // temp = temp->dex_cache_resolved_methods_;
+ __ movl(temp, Address(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value()));
+ // temp = temp[index_in_cache]
+ __ movl(temp, Address(temp, CodeGenerator::GetCacheOffset(invoke->GetDexMethodIndex())));
+ // (temp + offset_of_quick_compiled_code)()
+ __ call(Address(temp,
+ mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86WordSize).Int32Value()));
+ } else {
+ __ call(GetFrameEntryLabel());
+ }
}
DCHECK(!IsLeafMethod());
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 5ce9329..f175283 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -366,18 +366,26 @@
//
// Currently we implement the app -> app logic, which looks up in the resolve cache.
- // temp = method;
- LoadCurrentMethod(temp);
- if (!invoke->IsRecursive()) {
- // temp = temp->dex_cache_resolved_methods_;
- __ movl(temp, Address(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset().SizeValue()));
- // temp = temp[index_in_cache]
- __ movl(temp, Address(temp, CodeGenerator::GetCacheOffset(invoke->GetDexMethodIndex())));
+ if (invoke->IsStringInit()) {
+ // temp = thread->string_init_entrypoint
+ __ gs()->movl(temp, Address::Absolute(invoke->GetStringInitOffset()));
// (temp + offset_of_quick_compiled_code)()
__ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
kX86_64WordSize).SizeValue()));
} else {
- __ call(&frame_entry_label_);
+ // temp = method;
+ LoadCurrentMethod(temp);
+ if (!invoke->IsRecursive()) {
+ // temp = temp->dex_cache_resolved_methods_;
+ __ movl(temp, Address(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset().SizeValue()));
+ // temp = temp[index_in_cache]
+ __ movl(temp, Address(temp, CodeGenerator::GetCacheOffset(invoke->GetDexMethodIndex())));
+ // (temp + offset_of_quick_compiled_code)()
+ __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ kX86_64WordSize).SizeValue()));
+ } else {
+ __ call(&frame_entry_label_);
+ }
}
DCHECK(!IsLeafMethod());
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 6cdc822..e3fd5d7 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -37,7 +37,7 @@
static constexpr size_t kParameterCoreRegistersLength = arraysize(kParameterCoreRegisters);
static constexpr size_t kParameterFloatRegistersLength = arraysize(kParameterFloatRegisters);
-static constexpr Register kRuntimeParameterCoreRegisters[] = { RDI, RSI, RDX };
+static constexpr Register kRuntimeParameterCoreRegisters[] = { RDI, RSI, RDX, RCX };
static constexpr size_t kRuntimeParameterCoreRegistersLength =
arraysize(kRuntimeParameterCoreRegisters);
static constexpr FloatRegister kRuntimeParameterFpuRegisters[] = { XMM0, XMM1 };
diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc
index 20aa45f..5d3db5c 100644
--- a/compiler/optimizing/intrinsics.cc
+++ b/compiler/optimizing/intrinsics.cc
@@ -186,6 +186,8 @@
return Intrinsics::kStringCharAt;
case kIntrinsicCompareTo:
return Intrinsics::kStringCompareTo;
+ case kIntrinsicGetCharsNoCheck:
+ return Intrinsics::kStringGetCharsNoCheck;
case kIntrinsicIsEmptyOrLength:
// The inliner can handle these two cases - and this is the preferred approach
// since after inlining the call is no longer visible (as opposed to waiting
@@ -194,6 +196,12 @@
case kIntrinsicIndexOf:
return ((method.d.data & kIntrinsicFlagBase0) == 0) ?
Intrinsics::kStringIndexOfAfter : Intrinsics::kStringIndexOf;
+ case kIntrinsicNewStringFromBytes:
+ return Intrinsics::kStringNewStringFromBytes;
+ case kIntrinsicNewStringFromChars:
+ return Intrinsics::kStringNewStringFromChars;
+ case kIntrinsicNewStringFromString:
+ return Intrinsics::kStringNewStringFromString;
case kIntrinsicCas:
switch (GetType(method.d.data, false)) {
@@ -280,6 +288,11 @@
case kInlineOpIPut:
return Intrinsics::kNone;
+ // String init cases, not intrinsics.
+
+ case kInlineStringInit:
+ return Intrinsics::kNone;
+
// No default case to make the compiler warn on missing cases.
}
return Intrinsics::kNone;
diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc
index abdf04e..27d2d43 100644
--- a/compiler/optimizing/intrinsics_arm.cc
+++ b/compiler/optimizing/intrinsics_arm.cc
@@ -48,7 +48,7 @@
DCHECK_NE(type, Primitive::kPrimVoid);
- if (Primitive::IsIntegralType(type)) {
+ if (Primitive::IsIntegralType(type) || type == Primitive::kPrimNot) {
if (type == Primitive::kPrimLong) {
Register trg_reg_lo = trg.AsRegisterPairLow<Register>();
Register trg_reg_hi = trg.AsRegisterPairHigh<Register>();
@@ -810,10 +810,6 @@
const MemberOffset value_offset = mirror::String::ValueOffset();
// Location of count
const MemberOffset count_offset = mirror::String::CountOffset();
- // Starting offset within data array
- const MemberOffset offset_offset = mirror::String::OffsetOffset();
- // Start of char data with array_
- const MemberOffset data_offset = mirror::Array::DataOffset(sizeof(uint16_t));
Register obj = locations->InAt(0).AsRegister<Register>(); // String object pointer.
Register idx = locations->InAt(1).AsRegister<Register>(); // Index of character.
@@ -835,15 +831,10 @@
__ cmp(idx, ShifterOperand(temp));
__ b(slow_path->GetEntryLabel(), CS);
- // Index computation.
- __ ldr(temp, Address(obj, offset_offset.Int32Value())); // temp := str.offset.
- __ ldr(array_temp, Address(obj, value_offset.Int32Value())); // array_temp := str.offset.
- __ add(temp, temp, ShifterOperand(idx));
- DCHECK_EQ(data_offset.Int32Value() % 2, 0); // We'll compensate by shifting.
- __ add(temp, temp, ShifterOperand(data_offset.Int32Value() / 2));
+ __ add(array_temp, obj, ShifterOperand(value_offset.Int32Value())); // array_temp := str.value.
// Load the value.
- __ ldrh(out, Address(array_temp, temp, LSL, 1)); // out := array_temp[temp].
+ __ ldrh(out, Address(array_temp, idx, LSL, 1)); // out := array_temp[idx].
__ Bind(slow_path->GetExitLabel());
}
@@ -878,6 +869,81 @@
__ Bind(slow_path->GetExitLabel());
}
+void IntrinsicLocationsBuilderARM::VisitStringNewStringFromBytes(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kCall,
+ kIntrinsified);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
+ locations->SetInAt(3, Location::RegisterLocation(calling_convention.GetRegisterAt(3)));
+ locations->SetOut(Location::RegisterLocation(R0));
+}
+
+void IntrinsicCodeGeneratorARM::VisitStringNewStringFromBytes(HInvoke* invoke) {
+ ArmAssembler* assembler = GetAssembler();
+ LocationSummary* locations = invoke->GetLocations();
+
+ Register byte_array = locations->InAt(0).AsRegister<Register>();
+ __ cmp(byte_array, ShifterOperand(0));
+ SlowPathCodeARM* slow_path = new (GetAllocator()) IntrinsicSlowPathARM(invoke);
+ codegen_->AddSlowPath(slow_path);
+ __ b(slow_path->GetEntryLabel(), EQ);
+
+ __ LoadFromOffset(
+ kLoadWord, LR, TR, QUICK_ENTRYPOINT_OFFSET(kArmWordSize, pAllocStringFromBytes).Int32Value());
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+ __ blx(LR);
+ __ Bind(slow_path->GetExitLabel());
+}
+
+void IntrinsicLocationsBuilderARM::VisitStringNewStringFromChars(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kCall,
+ kIntrinsified);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
+ locations->SetOut(Location::RegisterLocation(R0));
+}
+
+void IntrinsicCodeGeneratorARM::VisitStringNewStringFromChars(HInvoke* invoke) {
+ ArmAssembler* assembler = GetAssembler();
+
+ __ LoadFromOffset(
+ kLoadWord, LR, TR, QUICK_ENTRYPOINT_OFFSET(kArmWordSize, pAllocStringFromChars).Int32Value());
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+ __ blx(LR);
+}
+
+void IntrinsicLocationsBuilderARM::VisitStringNewStringFromString(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kCall,
+ kIntrinsified);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetOut(Location::RegisterLocation(R0));
+}
+
+void IntrinsicCodeGeneratorARM::VisitStringNewStringFromString(HInvoke* invoke) {
+ ArmAssembler* assembler = GetAssembler();
+ LocationSummary* locations = invoke->GetLocations();
+
+ Register string_to_copy = locations->InAt(0).AsRegister<Register>();
+ __ cmp(string_to_copy, ShifterOperand(0));
+ SlowPathCodeARM* slow_path = new (GetAllocator()) IntrinsicSlowPathARM(invoke);
+ codegen_->AddSlowPath(slow_path);
+ __ b(slow_path->GetEntryLabel(), EQ);
+
+ __ LoadFromOffset(kLoadWord,
+ LR, TR, QUICK_ENTRYPOINT_OFFSET(kArmWordSize, pAllocStringFromString).Int32Value());
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+ __ blx(LR);
+ __ Bind(slow_path->GetExitLabel());
+}
+
// Unimplemented intrinsics.
#define UNIMPLEMENTED_INTRINSIC(Name) \
@@ -907,6 +973,7 @@
UNIMPLEMENTED_INTRINSIC(StringIndexOf)
UNIMPLEMENTED_INTRINSIC(StringIndexOfAfter)
UNIMPLEMENTED_INTRINSIC(ReferenceGetReferent)
+UNIMPLEMENTED_INTRINSIC(StringGetCharsNoCheck)
} // namespace arm
} // namespace art
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index 7a753b2..4f008e7 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -75,7 +75,7 @@
DCHECK_NE(type, Primitive::kPrimVoid);
- if (Primitive::IsIntegralType(type)) {
+ if (Primitive::IsIntegralType(type) || type == Primitive::kPrimNot) {
Register trg_reg = RegisterFrom(trg, type);
Register res_reg = RegisterFrom(ARM64ReturnLocation(type), type);
__ Mov(trg_reg, res_reg, kDiscardForSameWReg);
@@ -953,10 +953,6 @@
const MemberOffset value_offset = mirror::String::ValueOffset();
// Location of count
const MemberOffset count_offset = mirror::String::CountOffset();
- // Starting offset within data array
- const MemberOffset offset_offset = mirror::String::OffsetOffset();
- // Start of char data with array_
- const MemberOffset data_offset = mirror::Array::DataOffset(sizeof(uint16_t));
Register obj = WRegisterFrom(locations->InAt(0)); // String object pointer.
Register idx = WRegisterFrom(locations->InAt(1)); // Index of character.
@@ -979,21 +975,15 @@
__ Cmp(idx, temp);
__ B(hs, slow_path->GetEntryLabel());
- // Index computation.
- __ Ldr(temp, HeapOperand(obj, offset_offset)); // temp := str.offset.
- __ Ldr(array_temp, HeapOperand(obj, value_offset)); // array_temp := str.offset.
- __ Add(temp, temp, idx);
- DCHECK_EQ(data_offset.Int32Value() % 2, 0); // We'll compensate by shifting.
- __ Add(temp, temp, Operand(data_offset.Int32Value() / 2));
+ __ Add(array_temp, obj, Operand(value_offset.Int32Value())); // array_temp := str.value.
// Load the value.
- __ Ldrh(out, MemOperand(array_temp.X(), temp, UXTW, 1)); // out := array_temp[temp].
+ __ Ldrh(out, MemOperand(array_temp.X(), idx, UXTW, 1)); // out := array_temp[idx].
__ Bind(slow_path->GetExitLabel());
}
void IntrinsicLocationsBuilderARM64::VisitStringCompareTo(HInvoke* invoke) {
- // The inputs plus one temp.
LocationSummary* locations = new (arena_) LocationSummary(invoke,
LocationSummary::kCall,
kIntrinsified);
@@ -1022,6 +1012,84 @@
__ Bind(slow_path->GetExitLabel());
}
+void IntrinsicLocationsBuilderARM64::VisitStringNewStringFromBytes(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kCall,
+ kIntrinsified);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1)));
+ locations->SetInAt(2, LocationFrom(calling_convention.GetRegisterAt(2)));
+ locations->SetInAt(3, LocationFrom(calling_convention.GetRegisterAt(3)));
+ locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot));
+}
+
+void IntrinsicCodeGeneratorARM64::VisitStringNewStringFromBytes(HInvoke* invoke) {
+ vixl::MacroAssembler* masm = GetVIXLAssembler();
+ LocationSummary* locations = invoke->GetLocations();
+
+ Register byte_array = WRegisterFrom(locations->InAt(0));
+ __ Cmp(byte_array, 0);
+ SlowPathCodeARM64* slow_path = new (GetAllocator()) IntrinsicSlowPathARM64(invoke);
+ codegen_->AddSlowPath(slow_path);
+ __ B(eq, slow_path->GetEntryLabel());
+
+ __ Ldr(lr,
+ MemOperand(tr, QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, pAllocStringFromBytes).Int32Value()));
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+ __ Blr(lr);
+ __ Bind(slow_path->GetExitLabel());
+}
+
+void IntrinsicLocationsBuilderARM64::VisitStringNewStringFromChars(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kCall,
+ kIntrinsified);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1)));
+ locations->SetInAt(2, LocationFrom(calling_convention.GetRegisterAt(2)));
+ locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot));
+}
+
+void IntrinsicCodeGeneratorARM64::VisitStringNewStringFromChars(HInvoke* invoke) {
+ vixl::MacroAssembler* masm = GetVIXLAssembler();
+
+ __ Ldr(lr,
+ MemOperand(tr, QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, pAllocStringFromChars).Int32Value()));
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+ __ Blr(lr);
+}
+
+void IntrinsicLocationsBuilderARM64::VisitStringNewStringFromString(HInvoke* invoke) {
+ // The inputs plus one temp.
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kCall,
+ kIntrinsified);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1)));
+ locations->SetInAt(2, LocationFrom(calling_convention.GetRegisterAt(2)));
+ locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot));
+}
+
+void IntrinsicCodeGeneratorARM64::VisitStringNewStringFromString(HInvoke* invoke) {
+ vixl::MacroAssembler* masm = GetVIXLAssembler();
+ LocationSummary* locations = invoke->GetLocations();
+
+ Register string_to_copy = WRegisterFrom(locations->InAt(0));
+ __ Cmp(string_to_copy, 0);
+ SlowPathCodeARM64* slow_path = new (GetAllocator()) IntrinsicSlowPathARM64(invoke);
+ codegen_->AddSlowPath(slow_path);
+ __ B(eq, slow_path->GetEntryLabel());
+
+ __ Ldr(lr,
+ MemOperand(tr, QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, pAllocStringFromString).Int32Value()));
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+ __ Blr(lr);
+ __ Bind(slow_path->GetExitLabel());
+}
+
// Unimplemented intrinsics.
#define UNIMPLEMENTED_INTRINSIC(Name) \
@@ -1034,6 +1102,7 @@
UNIMPLEMENTED_INTRINSIC(StringIndexOf)
UNIMPLEMENTED_INTRINSIC(StringIndexOfAfter)
UNIMPLEMENTED_INTRINSIC(ReferenceGetReferent)
+UNIMPLEMENTED_INTRINSIC(StringGetCharsNoCheck)
} // namespace arm64
} // namespace art
diff --git a/compiler/optimizing/intrinsics_list.h b/compiler/optimizing/intrinsics_list.h
index 10f6e1d..2c9248f 100644
--- a/compiler/optimizing/intrinsics_list.h
+++ b/compiler/optimizing/intrinsics_list.h
@@ -60,8 +60,12 @@
V(MemoryPokeShortNative, kStatic) \
V(StringCharAt, kDirect) \
V(StringCompareTo, kDirect) \
+ V(StringGetCharsNoCheck, kDirect) \
V(StringIndexOf, kDirect) \
V(StringIndexOfAfter, kDirect) \
+ V(StringNewStringFromBytes, kStatic) \
+ V(StringNewStringFromChars, kStatic) \
+ V(StringNewStringFromString, kStatic) \
V(UnsafeCASInt, kDirect) \
V(UnsafeCASLong, kDirect) \
V(UnsafeCASObject, kDirect) \
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index 7275edb..b3e821c 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -910,23 +910,18 @@
const int32_t value_offset = mirror::String::ValueOffset().Int32Value();
// Location of count
const int32_t count_offset = mirror::String::CountOffset().Int32Value();
- // Starting offset within data array
- const int32_t offset_offset = mirror::String::OffsetOffset().Int32Value();
- // Start of char data with array_
- const int32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Int32Value();
Register obj = locations->InAt(0).AsRegister<Register>();
Register idx = locations->InAt(1).AsRegister<Register>();
Register out = locations->Out().AsRegister<Register>();
- Location temp_loc = locations->GetTemp(0);
- Register temp = temp_loc.AsRegister<Register>();
// TODO: Maybe we can support range check elimination. Overall, though, I think it's not worth
// the cost.
// TODO: For simplicity, the index parameter is requested in a register, so different from Quick
// we will not optimize the code for constants (which would save a register).
- SlowPathCodeX86* slow_path = new (GetAllocator()) IntrinsicSlowPathX86(invoke, temp);
+ SlowPathCodeX86* slow_path = new (GetAllocator()) IntrinsicSlowPathX86(
+ invoke, locations->GetTemp(0).AsRegister<Register>());
codegen_->AddSlowPath(slow_path);
X86Assembler* assembler = GetAssembler();
@@ -935,12 +930,8 @@
codegen_->MaybeRecordImplicitNullCheck(invoke);
__ j(kAboveEqual, slow_path->GetEntryLabel());
- // Get the actual element.
- __ movl(temp, idx); // temp := idx.
- __ addl(temp, Address(obj, offset_offset)); // temp := offset + idx.
- __ movl(out, Address(obj, value_offset)); // obj := obj.array.
- // out = out[2*temp].
- __ movzxw(out, Address(out, temp, ScaleFactor::TIMES_2, data_offset));
+ // out = out[2*idx].
+ __ movzxw(out, Address(out, idx, ScaleFactor::TIMES_2, value_offset));
__ Bind(slow_path->GetExitLabel());
}
@@ -976,6 +967,81 @@
__ Bind(slow_path->GetExitLabel());
}
+void IntrinsicLocationsBuilderX86::VisitStringNewStringFromBytes(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kCall,
+ kIntrinsified);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
+ locations->SetInAt(3, Location::RegisterLocation(calling_convention.GetRegisterAt(3)));
+ locations->SetOut(Location::RegisterLocation(EAX));
+ // Needs to be EAX for the invoke.
+ locations->AddTemp(Location::RegisterLocation(EAX));
+}
+
+void IntrinsicCodeGeneratorX86::VisitStringNewStringFromBytes(HInvoke* invoke) {
+ X86Assembler* assembler = GetAssembler();
+ LocationSummary* locations = invoke->GetLocations();
+
+ Register byte_array = locations->InAt(0).AsRegister<Register>();
+ __ testl(byte_array, byte_array);
+ SlowPathCodeX86* slow_path = new (GetAllocator()) IntrinsicSlowPathX86(
+ invoke, locations->GetTemp(0).AsRegister<Register>());
+ codegen_->AddSlowPath(slow_path);
+ __ j(kEqual, slow_path->GetEntryLabel());
+
+ __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pAllocStringFromBytes)));
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+ __ Bind(slow_path->GetExitLabel());
+}
+
+void IntrinsicLocationsBuilderX86::VisitStringNewStringFromChars(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kCall,
+ kIntrinsified);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
+ locations->SetOut(Location::RegisterLocation(EAX));
+}
+
+void IntrinsicCodeGeneratorX86::VisitStringNewStringFromChars(HInvoke* invoke) {
+ X86Assembler* assembler = GetAssembler();
+
+ __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pAllocStringFromChars)));
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+}
+
+void IntrinsicLocationsBuilderX86::VisitStringNewStringFromString(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kCall,
+ kIntrinsified);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetOut(Location::RegisterLocation(EAX));
+ // Needs to be EAX for the invoke.
+ locations->AddTemp(Location::RegisterLocation(EAX));
+}
+
+void IntrinsicCodeGeneratorX86::VisitStringNewStringFromString(HInvoke* invoke) {
+ X86Assembler* assembler = GetAssembler();
+ LocationSummary* locations = invoke->GetLocations();
+
+ Register string_to_copy = locations->InAt(0).AsRegister<Register>();
+ __ testl(string_to_copy, string_to_copy);
+ SlowPathCodeX86* slow_path = new (GetAllocator()) IntrinsicSlowPathX86(
+ invoke, locations->GetTemp(0).AsRegister<Register>());
+ codegen_->AddSlowPath(slow_path);
+ __ j(kEqual, slow_path->GetEntryLabel());
+
+ __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pAllocStringFromString)));
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+ __ Bind(slow_path->GetExitLabel());
+}
+
static void GenPeek(LocationSummary* locations, Primitive::Type size, X86Assembler* assembler) {
Register address = locations->InAt(0).AsRegisterPairLow<Register>();
Location out_loc = locations->Out();
@@ -1536,6 +1602,7 @@
}
UNIMPLEMENTED_INTRINSIC(MathRoundDouble)
+UNIMPLEMENTED_INTRINSIC(StringGetCharsNoCheck)
UNIMPLEMENTED_INTRINSIC(StringIndexOf)
UNIMPLEMENTED_INTRINSIC(StringIndexOfAfter)
UNIMPLEMENTED_INTRINSIC(SystemArrayCopyChar)
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index 35daaf6..5779b9c 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -824,16 +824,10 @@
const int32_t value_offset = mirror::String::ValueOffset().Int32Value();
// Location of count
const int32_t count_offset = mirror::String::CountOffset().Int32Value();
- // Starting offset within data array
- const int32_t offset_offset = mirror::String::OffsetOffset().Int32Value();
- // Start of char data with array_
- const int32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Int32Value();
CpuRegister obj = locations->InAt(0).AsRegister<CpuRegister>();
CpuRegister idx = locations->InAt(1).AsRegister<CpuRegister>();
CpuRegister out = locations->Out().AsRegister<CpuRegister>();
- Location temp_loc = locations->GetTemp(0);
- CpuRegister temp = temp_loc.AsRegister<CpuRegister>();
// TODO: Maybe we can support range check elimination. Overall, though, I think it's not worth
// the cost.
@@ -849,12 +843,8 @@
codegen_->MaybeRecordImplicitNullCheck(invoke);
__ j(kAboveEqual, slow_path->GetEntryLabel());
- // Get the actual element.
- __ movl(temp, idx); // temp := idx.
- __ addl(temp, Address(obj, offset_offset)); // temp := offset + idx.
- __ movl(out, Address(obj, value_offset)); // obj := obj.array.
- // out = out[2*temp].
- __ movzxw(out, Address(out, temp, ScaleFactor::TIMES_2, data_offset));
+ // out = out[2*idx].
+ __ movzxw(out, Address(out, idx, ScaleFactor::TIMES_2, value_offset));
__ Bind(slow_path->GetExitLabel());
}
@@ -887,6 +877,78 @@
__ Bind(slow_path->GetExitLabel());
}
+void IntrinsicLocationsBuilderX86_64::VisitStringNewStringFromBytes(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kCall,
+ kIntrinsified);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
+ locations->SetInAt(3, Location::RegisterLocation(calling_convention.GetRegisterAt(3)));
+ locations->SetOut(Location::RegisterLocation(RAX));
+}
+
+void IntrinsicCodeGeneratorX86_64::VisitStringNewStringFromBytes(HInvoke* invoke) {
+ X86_64Assembler* assembler = GetAssembler();
+ LocationSummary* locations = invoke->GetLocations();
+
+ CpuRegister byte_array = locations->InAt(0).AsRegister<CpuRegister>();
+ __ testl(byte_array, byte_array);
+ SlowPathCodeX86_64* slow_path = new (GetAllocator()) IntrinsicSlowPathX86_64(invoke);
+ codegen_->AddSlowPath(slow_path);
+ __ j(kEqual, slow_path->GetEntryLabel());
+
+ __ gs()->call(Address::Absolute(
+ QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pAllocStringFromBytes), true));
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+ __ Bind(slow_path->GetExitLabel());
+}
+
+void IntrinsicLocationsBuilderX86_64::VisitStringNewStringFromChars(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kCall,
+ kIntrinsified);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
+ locations->SetOut(Location::RegisterLocation(RAX));
+}
+
+void IntrinsicCodeGeneratorX86_64::VisitStringNewStringFromChars(HInvoke* invoke) {
+ X86_64Assembler* assembler = GetAssembler();
+
+ __ gs()->call(Address::Absolute(
+ QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pAllocStringFromChars), true));
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+}
+
+void IntrinsicLocationsBuilderX86_64::VisitStringNewStringFromString(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kCall,
+ kIntrinsified);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetOut(Location::RegisterLocation(RAX));
+}
+
+void IntrinsicCodeGeneratorX86_64::VisitStringNewStringFromString(HInvoke* invoke) {
+ X86_64Assembler* assembler = GetAssembler();
+ LocationSummary* locations = invoke->GetLocations();
+
+ CpuRegister string_to_copy = locations->InAt(0).AsRegister<CpuRegister>();
+ __ testl(string_to_copy, string_to_copy);
+ SlowPathCodeX86_64* slow_path = new (GetAllocator()) IntrinsicSlowPathX86_64(invoke);
+ codegen_->AddSlowPath(slow_path);
+ __ j(kEqual, slow_path->GetEntryLabel());
+
+ __ gs()->call(Address::Absolute(
+ QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pAllocStringFromString), true));
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+ __ Bind(slow_path->GetExitLabel());
+}
+
static void GenPeek(LocationSummary* locations, Primitive::Type size, X86_64Assembler* assembler) {
CpuRegister address = locations->InAt(0).AsRegister<CpuRegister>();
CpuRegister out = locations->Out().AsRegister<CpuRegister>(); // == address, here for clarity.
@@ -1390,6 +1452,7 @@
void IntrinsicCodeGeneratorX86_64::Visit ## Name(HInvoke* invoke ATTRIBUTE_UNUSED) { \
}
+UNIMPLEMENTED_INTRINSIC(StringGetCharsNoCheck)
UNIMPLEMENTED_INTRINSIC(StringIndexOf)
UNIMPLEMENTED_INTRINSIC(StringIndexOfAfter)
UNIMPLEMENTED_INTRINSIC(SystemArrayCopyChar)
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 0533bff..9e8df04 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -2292,6 +2292,7 @@
uint32_t dex_pc,
uint32_t dex_method_index,
bool is_recursive,
+ int32_t string_init_offset,
InvokeType original_invoke_type,
InvokeType invoke_type,
ClinitCheckRequirement clinit_check_requirement)
@@ -2299,7 +2300,8 @@
original_invoke_type_(original_invoke_type),
invoke_type_(invoke_type),
is_recursive_(is_recursive),
- clinit_check_requirement_(clinit_check_requirement) {}
+ clinit_check_requirement_(clinit_check_requirement),
+ string_init_offset_(string_init_offset) {}
bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE {
UNUSED(obj);
@@ -2312,6 +2314,8 @@
InvokeType GetInvokeType() const { return invoke_type_; }
bool IsRecursive() const { return is_recursive_; }
bool NeedsDexCache() const OVERRIDE { return !IsRecursive(); }
+ bool IsStringInit() const { return string_init_offset_ != 0; }
+ int32_t GetStringInitOffset() const { return string_init_offset_; }
// Is this instruction a call to a static method?
bool IsStatic() const {
@@ -2367,6 +2371,9 @@
const InvokeType invoke_type_;
const bool is_recursive_;
ClinitCheckRequirement clinit_check_requirement_;
+ // Thread entrypoint offset for string init method if this is a string init invoke.
+ // Note that there are multiple string init methods, each having its own offset.
+ int32_t string_init_offset_;
DISALLOW_COPY_AND_ASSIGN(HInvokeStaticOrDirect);
};