Compiler: rearranging the deck chairs.
First of several steps to adopt the new source directory layout. No
logic changes - just moved files around.
Change-Id: I087631f8aa23973e18da4dc7706249c490bee061
diff --git a/src/compiler/dex/bb_opt.cc b/src/compiler/dex/bb_opt.cc
new file mode 100644
index 0000000..3ad5821
--- /dev/null
+++ b/src/compiler/dex/bb_opt.cc
@@ -0,0 +1,519 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "bb_opt.h"
+#include "dataflow.h"
+
+namespace art {
+
+
+uint16_t BBOpt::GetValueNumber(MIR* mir)
+{
+ uint16_t res = NO_VALUE;
+ uint16_t opcode = mir->dalvikInsn.opcode;
+ switch (opcode) {
+ case Instruction::NOP:
+ case Instruction::RETURN_VOID:
+ case Instruction::RETURN:
+ case Instruction::RETURN_OBJECT:
+ case Instruction::RETURN_WIDE:
+ case Instruction::MONITOR_ENTER:
+ case Instruction::MONITOR_EXIT:
+ case Instruction::GOTO:
+ case Instruction::GOTO_16:
+ case Instruction::GOTO_32:
+ case Instruction::CHECK_CAST:
+ case Instruction::THROW:
+ case Instruction::FILL_ARRAY_DATA:
+ case Instruction::FILLED_NEW_ARRAY:
+ case Instruction::FILLED_NEW_ARRAY_RANGE:
+ case Instruction::PACKED_SWITCH:
+ case Instruction::SPARSE_SWITCH:
+ case Instruction::IF_EQ:
+ case Instruction::IF_NE:
+ case Instruction::IF_LT:
+ case Instruction::IF_GE:
+ case Instruction::IF_GT:
+ case Instruction::IF_LE:
+ case Instruction::IF_EQZ:
+ case Instruction::IF_NEZ:
+ case Instruction::IF_LTZ:
+ case Instruction::IF_GEZ:
+ case Instruction::IF_GTZ:
+ case Instruction::IF_LEZ:
+ case Instruction::INVOKE_STATIC_RANGE:
+ case Instruction::INVOKE_STATIC:
+ case Instruction::INVOKE_DIRECT:
+ case Instruction::INVOKE_DIRECT_RANGE:
+ case Instruction::INVOKE_VIRTUAL:
+ case Instruction::INVOKE_VIRTUAL_RANGE:
+ case Instruction::INVOKE_SUPER:
+ case Instruction::INVOKE_SUPER_RANGE:
+ case Instruction::INVOKE_INTERFACE:
+ case Instruction::INVOKE_INTERFACE_RANGE:
+ case kMirOpFusedCmplFloat:
+ case kMirOpFusedCmpgFloat:
+ case kMirOpFusedCmplDouble:
+ case kMirOpFusedCmpgDouble:
+ case kMirOpFusedCmpLong:
+ // Nothing defined - take no action.
+ break;
+
+ case Instruction::MOVE_EXCEPTION:
+ case Instruction::MOVE_RESULT:
+ case Instruction::MOVE_RESULT_OBJECT:
+ case Instruction::INSTANCE_OF:
+ case Instruction::NEW_INSTANCE:
+ case Instruction::CONST_STRING:
+ case Instruction::CONST_STRING_JUMBO:
+ case Instruction::CONST_CLASS:
+ case Instruction::NEW_ARRAY: {
+ // 1 result, treat as unique each time, use result s_reg - will be unique.
+ uint16_t res = GetOperandValue(mir->ssa_rep->defs[0]);
+ SetOperandValue(mir->ssa_rep->defs[0], res);
+ }
+ break;
+ case Instruction::MOVE_RESULT_WIDE: {
+ // 1 wide result, treat as unique each time, use result s_reg - will be unique.
+ uint16_t res = GetOperandValueWide(mir->ssa_rep->defs[0]);
+ SetOperandValueWide(mir->ssa_rep->defs[0], res);
+ }
+ break;
+
+ case kMirOpPhi:
+ /*
+ * Because we'll only see phi nodes at the beginning of an extended basic block,
+ * we can ignore them. Revisit if we shift to global value numbering.
+ */
+ break;
+
+ case Instruction::MOVE:
+ case Instruction::MOVE_OBJECT:
+ case Instruction::MOVE_16:
+ case Instruction::MOVE_OBJECT_16:
+ case Instruction::MOVE_FROM16:
+ case Instruction::MOVE_OBJECT_FROM16:
+ case kMirOpCopy: {
+ // Just copy value number of source to value number of resulit.
+ uint16_t res = GetOperandValue(mir->ssa_rep->uses[0]);
+ SetOperandValue(mir->ssa_rep->defs[0], res);
+ }
+ break;
+
+ case Instruction::MOVE_WIDE:
+ case Instruction::MOVE_WIDE_16:
+ case Instruction::MOVE_WIDE_FROM16: {
+ // Just copy value number of source to value number of result.
+ uint16_t res = GetOperandValueWide(mir->ssa_rep->uses[0]);
+ SetOperandValueWide(mir->ssa_rep->defs[0], res);
+ }
+ break;
+
+ case Instruction::CONST:
+ case Instruction::CONST_4:
+ case Instruction::CONST_16: {
+ uint16_t res = LookupValue(Instruction::CONST, Low16Bits(mir->dalvikInsn.vB),
+ High16Bits(mir->dalvikInsn.vB >> 16), 0);
+ SetOperandValue(mir->ssa_rep->defs[0], res);
+ }
+ break;
+
+ case Instruction::CONST_HIGH16: {
+ uint16_t res = LookupValue(Instruction::CONST, 0, mir->dalvikInsn.vB, 0);
+ SetOperandValue(mir->ssa_rep->defs[0], res);
+ }
+ break;
+
+ case Instruction::CONST_WIDE_16:
+ case Instruction::CONST_WIDE_32: {
+ uint16_t low_res = LookupValue(Instruction::CONST, Low16Bits(mir->dalvikInsn.vB),
+ High16Bits(mir->dalvikInsn.vB >> 16), 1);
+ uint16_t high_res;
+ if (mir->dalvikInsn.vB & 0x80000000) {
+ high_res = LookupValue(Instruction::CONST, 0xffff, 0xffff, 2);
+ } else {
+ high_res = LookupValue(Instruction::CONST, 0, 0, 2);
+ }
+ uint16_t res = LookupValue(Instruction::CONST, low_res, high_res, 3);
+ SetOperandValue(mir->ssa_rep->defs[0], res);
+ }
+ break;
+
+ case Instruction::CONST_WIDE: {
+ uint32_t low_word = Low32Bits(mir->dalvikInsn.vB_wide);
+ uint32_t high_word = High32Bits(mir->dalvikInsn.vB_wide);
+ uint16_t low_res = LookupValue(Instruction::CONST, Low16Bits(low_word),
+ High16Bits(low_word), 1);
+ uint16_t high_res = LookupValue(Instruction::CONST, Low16Bits(high_word),
+ High16Bits(high_word), 2);
+ uint16_t res = LookupValue(Instruction::CONST, low_res, high_res, 3);
+ SetOperandValueWide(mir->ssa_rep->defs[0], res);
+ }
+ break;
+
+ case Instruction::CONST_WIDE_HIGH16: {
+ uint16_t low_res = LookupValue(Instruction::CONST, 0, 0, 1);
+ uint16_t high_res = LookupValue(Instruction::CONST, 0, Low16Bits(mir->dalvikInsn.vB), 2);
+ uint16_t res = LookupValue(Instruction::CONST, low_res, high_res, 3);
+ SetOperandValueWide(mir->ssa_rep->defs[0], res);
+ }
+ break;
+
+ case Instruction::ARRAY_LENGTH:
+ case Instruction::NEG_INT:
+ case Instruction::NOT_INT:
+ case Instruction::NEG_FLOAT:
+ case Instruction::INT_TO_BYTE:
+ case Instruction::INT_TO_SHORT:
+ case Instruction::INT_TO_CHAR:
+ case Instruction::INT_TO_FLOAT:
+ case Instruction::FLOAT_TO_INT: {
+ // res = op + 1 operand
+ uint16_t operand1 = GetOperandValue(mir->ssa_rep->uses[0]);
+ uint16_t res = LookupValue(opcode, operand1, NO_VALUE, NO_VALUE);
+ SetOperandValue(mir->ssa_rep->defs[0], res);
+ }
+ break;
+
+ case Instruction::LONG_TO_FLOAT:
+ case Instruction::LONG_TO_INT:
+ case Instruction::DOUBLE_TO_FLOAT:
+ case Instruction::DOUBLE_TO_INT: {
+ // res = op + 1 wide operand
+ uint16_t operand1 = GetOperandValue(mir->ssa_rep->uses[0]);
+ uint16_t res = LookupValue(opcode, operand1, NO_VALUE, NO_VALUE);
+ SetOperandValue(mir->ssa_rep->defs[0], res);
+ }
+ break;
+
+
+ case Instruction::DOUBLE_TO_LONG:
+ case Instruction::LONG_TO_DOUBLE:
+ case Instruction::NEG_LONG:
+ case Instruction::NOT_LONG:
+ case Instruction::NEG_DOUBLE: {
+ // wide res = op + 1 wide operand
+ uint16_t operand1 = GetOperandValueWide(mir->ssa_rep->uses[0]);
+ uint16_t res = LookupValue(opcode, operand1, NO_VALUE, NO_VALUE);
+ SetOperandValueWide(mir->ssa_rep->defs[0], res);
+ }
+ break;
+
+ case Instruction::FLOAT_TO_DOUBLE:
+ case Instruction::FLOAT_TO_LONG:
+ case Instruction::INT_TO_DOUBLE:
+ case Instruction::INT_TO_LONG: {
+ // wide res = op + 1 operand
+ uint16_t operand1 = GetOperandValueWide(mir->ssa_rep->uses[0]);
+ uint16_t res = LookupValue(opcode, operand1, NO_VALUE, NO_VALUE);
+ SetOperandValueWide(mir->ssa_rep->defs[0], res);
+ }
+ break;
+
+ case Instruction::CMPL_DOUBLE:
+ case Instruction::CMPG_DOUBLE:
+ case Instruction::CMP_LONG: {
+ // res = op + 2 wide operands
+ uint16_t operand1 = GetOperandValueWide(mir->ssa_rep->uses[0]);
+ uint16_t operand2 = GetOperandValueWide(mir->ssa_rep->uses[2]);
+ uint16_t res = LookupValue(opcode, operand1, operand2, NO_VALUE);
+ SetOperandValue(mir->ssa_rep->defs[0], res);
+ }
+ break;
+
+ case Instruction::CMPG_FLOAT:
+ case Instruction::CMPL_FLOAT:
+ case Instruction::ADD_INT:
+ case Instruction::ADD_INT_2ADDR:
+ case Instruction::MUL_INT:
+ case Instruction::MUL_INT_2ADDR:
+ case Instruction::AND_INT:
+ case Instruction::AND_INT_2ADDR:
+ case Instruction::OR_INT:
+ case Instruction::OR_INT_2ADDR:
+ case Instruction::XOR_INT:
+ case Instruction::XOR_INT_2ADDR:
+ case Instruction::SUB_INT:
+ case Instruction::SUB_INT_2ADDR:
+ case Instruction::DIV_INT:
+ case Instruction::DIV_INT_2ADDR:
+ case Instruction::REM_INT:
+ case Instruction::REM_INT_2ADDR:
+ case Instruction::SHL_INT:
+ case Instruction::SHL_INT_2ADDR:
+ case Instruction::SHR_INT:
+ case Instruction::SHR_INT_2ADDR:
+ case Instruction::USHR_INT:
+ case Instruction::USHR_INT_2ADDR: {
+ // res = op + 2 operands
+ uint16_t operand1 = GetOperandValue(mir->ssa_rep->uses[0]);
+ uint16_t operand2 = GetOperandValue(mir->ssa_rep->uses[1]);
+ uint16_t res = LookupValue(opcode, operand1, operand2, NO_VALUE);
+ SetOperandValue(mir->ssa_rep->defs[0], res);
+ }
+ break;
+
+ case Instruction::ADD_LONG:
+ case Instruction::SUB_LONG:
+ case Instruction::MUL_LONG:
+ case Instruction::DIV_LONG:
+ case Instruction::REM_LONG:
+ case Instruction::AND_LONG:
+ case Instruction::OR_LONG:
+ case Instruction::XOR_LONG:
+ case Instruction::ADD_LONG_2ADDR:
+ case Instruction::SUB_LONG_2ADDR:
+ case Instruction::MUL_LONG_2ADDR:
+ case Instruction::DIV_LONG_2ADDR:
+ case Instruction::REM_LONG_2ADDR:
+ case Instruction::AND_LONG_2ADDR:
+ case Instruction::OR_LONG_2ADDR:
+ case Instruction::XOR_LONG_2ADDR:
+ case Instruction::ADD_DOUBLE:
+ case Instruction::SUB_DOUBLE:
+ case Instruction::MUL_DOUBLE:
+ case Instruction::DIV_DOUBLE:
+ case Instruction::REM_DOUBLE:
+ case Instruction::ADD_DOUBLE_2ADDR:
+ case Instruction::SUB_DOUBLE_2ADDR:
+ case Instruction::MUL_DOUBLE_2ADDR:
+ case Instruction::DIV_DOUBLE_2ADDR:
+ case Instruction::REM_DOUBLE_2ADDR: {
+ // wide res = op + 2 wide operands
+ uint16_t operand1 = GetOperandValueWide(mir->ssa_rep->uses[0]);
+ uint16_t operand2 = GetOperandValueWide(mir->ssa_rep->uses[2]);
+ uint16_t res = LookupValue(opcode, operand1, operand2, NO_VALUE);
+ SetOperandValueWide(mir->ssa_rep->defs[0], res);
+ }
+ break;
+
+ case Instruction::SHL_LONG:
+ case Instruction::SHR_LONG:
+ case Instruction::USHR_LONG:
+ case Instruction::SHL_LONG_2ADDR:
+ case Instruction::SHR_LONG_2ADDR:
+ case Instruction::USHR_LONG_2ADDR: {
+ // wide res = op + 1 wide operand + 1 operand
+ uint16_t operand1 = GetOperandValueWide(mir->ssa_rep->uses[0]);
+ uint16_t operand2 = GetOperandValueWide(mir->ssa_rep->uses[2]);
+ uint16_t res = LookupValue(opcode, operand1, operand2, NO_VALUE);
+ SetOperandValueWide(mir->ssa_rep->defs[0], res);
+ }
+ break;
+
+ case Instruction::ADD_FLOAT:
+ case Instruction::SUB_FLOAT:
+ case Instruction::MUL_FLOAT:
+ case Instruction::DIV_FLOAT:
+ case Instruction::REM_FLOAT:
+ case Instruction::ADD_FLOAT_2ADDR:
+ case Instruction::SUB_FLOAT_2ADDR:
+ case Instruction::MUL_FLOAT_2ADDR:
+ case Instruction::DIV_FLOAT_2ADDR:
+ case Instruction::REM_FLOAT_2ADDR: {
+ // res = op + 2 operands
+ uint16_t operand1 = GetOperandValue(mir->ssa_rep->uses[0]);
+ uint16_t operand2 = GetOperandValue(mir->ssa_rep->uses[1]);
+ uint16_t res = LookupValue(opcode, operand1, operand2, NO_VALUE);
+ SetOperandValue(mir->ssa_rep->defs[0], res);
+ }
+ break;
+
+ case Instruction::RSUB_INT:
+ case Instruction::ADD_INT_LIT16:
+ case Instruction::MUL_INT_LIT16:
+ case Instruction::DIV_INT_LIT16:
+ case Instruction::REM_INT_LIT16:
+ case Instruction::AND_INT_LIT16:
+ case Instruction::OR_INT_LIT16:
+ case Instruction::XOR_INT_LIT16:
+ case Instruction::ADD_INT_LIT8:
+ case Instruction::RSUB_INT_LIT8:
+ case Instruction::MUL_INT_LIT8:
+ case Instruction::DIV_INT_LIT8:
+ case Instruction::REM_INT_LIT8:
+ case Instruction::AND_INT_LIT8:
+ case Instruction::OR_INT_LIT8:
+ case Instruction::XOR_INT_LIT8:
+ case Instruction::SHL_INT_LIT8:
+ case Instruction::SHR_INT_LIT8:
+ case Instruction::USHR_INT_LIT8: {
+ // Same as res = op + 2 operands, except use vB as operand 2
+ uint16_t operand1 = GetOperandValue(mir->ssa_rep->uses[0]);
+ uint16_t operand2 = LookupValue(Instruction::CONST, mir->dalvikInsn.vB, 0, 0);
+ uint16_t res = LookupValue(opcode, operand1, operand2, NO_VALUE);
+ SetOperandValue(mir->ssa_rep->defs[0], res);
+ }
+ break;
+
+ case Instruction::AGET_WIDE:
+ case Instruction::AGET:
+ case Instruction::AGET_OBJECT:
+ case Instruction::AGET_BOOLEAN:
+ case Instruction::AGET_BYTE:
+ case Instruction::AGET_CHAR:
+ case Instruction::AGET_SHORT: {
+ uint16_t array = GetOperandValue(mir->ssa_rep->uses[0]);
+ if (null_checked_.find(array) != null_checked_.end()) {
+ if (cu_->verbose) {
+ LOG(INFO) << "Removing null check for 0x" << std::hex << mir->offset;
+ }
+ mir->optimization_flags |= MIR_IGNORE_NULL_CHECK;
+ } else {
+ null_checked_.insert(array);
+ }
+ uint16_t index = GetOperandValue(mir->ssa_rep->uses[1]);
+ if (ValueExists(ARRAY_REF, array, index, NO_VALUE)) {
+ if (cu_->verbose) {
+ LOG(INFO) << "Removing range check for 0x" << std::hex << mir->offset;
+ }
+ mir->optimization_flags |= MIR_IGNORE_RANGE_CHECK;
+ }
+ mir->meta.throw_insn->optimization_flags |= mir->optimization_flags;
+ // Use side effect to note range check completed.
+ (void)LookupValue(ARRAY_REF, array, index, NO_VALUE);
+ // Establish value number for loaded register. Note use of memory version.
+ uint16_t memory_version = GetMemoryVersion(array, NO_VALUE);
+ uint16_t res = LookupValue(ARRAY_REF, array, index, memory_version);
+ if (opcode == Instruction::AGET_WIDE) {
+ SetOperandValueWide(mir->ssa_rep->defs[0], res);
+ } else {
+ SetOperandValue(mir->ssa_rep->defs[0], res);
+ }
+ }
+ break;
+
+ case Instruction::APUT_WIDE:
+ case Instruction::APUT:
+ case Instruction::APUT_OBJECT:
+ case Instruction::APUT_SHORT:
+ case Instruction::APUT_CHAR:
+ case Instruction::APUT_BYTE:
+ case Instruction::APUT_BOOLEAN: {
+ int array_idx = (opcode == Instruction::APUT_WIDE) ? 2 : 1;
+ int index_idx = array_idx + 1;
+ uint16_t array = GetOperandValue(mir->ssa_rep->uses[array_idx]);
+ if (null_checked_.find(array) != null_checked_.end()) {
+ if (cu_->verbose) {
+ LOG(INFO) << "Removing range check for 0x" << std::hex << mir->offset;
+ }
+ mir->optimization_flags |= MIR_IGNORE_NULL_CHECK;
+ } else {
+ null_checked_.insert(array);
+ }
+ uint16_t index = GetOperandValue(mir->ssa_rep->uses[index_idx]);
+ if (ValueExists(ARRAY_REF, array, index, NO_VALUE)) {
+ if (cu_->verbose) {
+ LOG(INFO) << "Removing range check for 0x" << std::hex << mir->offset;
+ }
+ mir->optimization_flags |= MIR_IGNORE_RANGE_CHECK;
+ }
+ mir->meta.throw_insn->optimization_flags |= mir->optimization_flags;
+ // Use side effect to note range check completed.
+ (void)LookupValue(ARRAY_REF, array, index, NO_VALUE);
+ // Rev the memory version
+ AdvanceMemoryVersion(array, NO_VALUE);
+ }
+ break;
+
+ case Instruction::IGET_OBJECT:
+ case Instruction::IGET_WIDE:
+ case Instruction::IGET:
+ case Instruction::IGET_CHAR:
+ case Instruction::IGET_SHORT:
+ case Instruction::IGET_BOOLEAN:
+ case Instruction::IGET_BYTE: {
+ uint16_t base = GetOperandValue(mir->ssa_rep->uses[0]);
+ if (null_checked_.find(base) != null_checked_.end()) {
+ if (cu_->verbose) {
+ LOG(INFO) << "Removing null check for 0x" << std::hex << mir->offset;
+ }
+ mir->optimization_flags |= MIR_IGNORE_NULL_CHECK;
+ } else {
+ null_checked_.insert(base);
+ }
+ mir->meta.throw_insn->optimization_flags |= mir->optimization_flags;
+ uint16_t field_ref = mir->dalvikInsn.vC;
+ uint16_t memory_version = GetMemoryVersion(base, field_ref);
+ if (opcode == Instruction::IGET_WIDE) {
+ uint16_t res = LookupValue(Instruction::IGET_WIDE, base, field_ref, memory_version);
+ SetOperandValueWide(mir->ssa_rep->defs[0], res);
+ } else {
+ uint16_t res = LookupValue(Instruction::IGET, base, field_ref, memory_version);
+ SetOperandValue(mir->ssa_rep->defs[0], res);
+ }
+ }
+ break;
+
+ case Instruction::IPUT_WIDE:
+ case Instruction::IPUT_OBJECT:
+ case Instruction::IPUT:
+ case Instruction::IPUT_BOOLEAN:
+ case Instruction::IPUT_BYTE:
+ case Instruction::IPUT_CHAR:
+ case Instruction::IPUT_SHORT: {
+ int base_reg = (opcode == Instruction::IPUT_WIDE) ? 2 : 1;
+ uint16_t base = GetOperandValue(mir->ssa_rep->uses[base_reg]);
+ if (null_checked_.find(base) != null_checked_.end()) {
+ if (cu_->verbose) {
+ LOG(INFO) << "Removing null check for 0x" << std::hex << mir->offset;
+ }
+ mir->optimization_flags |= MIR_IGNORE_NULL_CHECK;
+ } else {
+ null_checked_.insert(base);
+ }
+ mir->meta.throw_insn->optimization_flags |= mir->optimization_flags;
+ uint16_t field_ref = mir->dalvikInsn.vC;
+ AdvanceMemoryVersion(base, field_ref);
+ }
+ break;
+
+ case Instruction::SGET_OBJECT:
+ case Instruction::SGET:
+ case Instruction::SGET_BOOLEAN:
+ case Instruction::SGET_BYTE:
+ case Instruction::SGET_CHAR:
+ case Instruction::SGET_SHORT:
+ case Instruction::SGET_WIDE: {
+ uint16_t field_ref = mir->dalvikInsn.vB;
+ uint16_t memory_version = GetMemoryVersion(NO_VALUE, field_ref);
+ if (opcode == Instruction::SGET_WIDE) {
+ uint16_t res = LookupValue(Instruction::SGET_WIDE, NO_VALUE, field_ref, memory_version);
+ SetOperandValueWide(mir->ssa_rep->defs[0], res);
+ } else {
+ uint16_t res = LookupValue(Instruction::SGET, NO_VALUE, field_ref, memory_version);
+ SetOperandValue(mir->ssa_rep->defs[0], res);
+ }
+ }
+ break;
+
+ case Instruction::SPUT_OBJECT:
+ case Instruction::SPUT:
+ case Instruction::SPUT_BOOLEAN:
+ case Instruction::SPUT_BYTE:
+ case Instruction::SPUT_CHAR:
+ case Instruction::SPUT_SHORT:
+ case Instruction::SPUT_WIDE: {
+ uint16_t field_ref = mir->dalvikInsn.vB;
+ AdvanceMemoryVersion(NO_VALUE, field_ref);
+ }
+ break;
+
+ }
+ return res;
+}
+
+} // namespace art
diff --git a/src/compiler/dex/bb_opt.h b/src/compiler/dex/bb_opt.h
new file mode 100644
index 0000000..aedbc10
--- /dev/null
+++ b/src/compiler/dex/bb_opt.h
@@ -0,0 +1,152 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_COMPILER_DEX_BBOPT_H_
+#define ART_SRC_COMPILER_DEX_BBOPT_H_
+
+#include "compiler_internals.h"
+
+#define NO_VALUE 0xffff
+#define ARRAY_REF 0xfffe
+
+namespace art {
+
+// Key is s_reg, value is value name.
+typedef SafeMap<uint16_t, uint16_t> SregValueMap;
+// Key is concatenation of quad, value is value name.
+typedef SafeMap<uint64_t, uint16_t> ValueMap;
+// Key represents a memory address, value is generation.
+typedef SafeMap<uint32_t, uint16_t> MemoryVersionMap;
+
+class BBOpt {
+ public:
+ BBOpt(CompilationUnit* cu) : cu_(cu) {};
+
+ uint64_t BuildKey(uint16_t op, uint16_t operand1, uint16_t operand2, uint16_t modifier)
+ {
+ return (static_cast<uint64_t>(op) << 48 | static_cast<uint64_t>(operand1) << 32 |
+ static_cast<uint64_t>(operand2) << 16 | static_cast<uint64_t>(modifier));
+ };
+
+ uint16_t LookupValue(uint16_t op, uint16_t operand1, uint16_t operand2, uint16_t modifier)
+ {
+ uint16_t res;
+ uint64_t key = BuildKey(op, operand1, operand2, modifier);
+ ValueMap::iterator it = value_map_.find(key);
+ if (it != value_map_.end()) {
+ res = it->second;
+ } else {
+ res = value_map_.size() + 1;
+ value_map_.Put(key, res);
+ }
+ return res;
+ };
+
+ bool ValueExists(uint16_t op, uint16_t operand1, uint16_t operand2, uint16_t modifier)
+ {
+ uint64_t key = BuildKey(op, operand1, operand2, modifier);
+ ValueMap::iterator it = value_map_.find(key);
+ return (it != value_map_.end());
+ };
+
+ uint16_t GetMemoryVersion(uint16_t base, uint16_t field)
+ {
+ uint32_t key = (base << 16) | field;
+ uint16_t res;
+ MemoryVersionMap::iterator it = memory_version_map_.find(key);
+ if (it == memory_version_map_.end()) {
+ res = 0;
+ memory_version_map_.Put(key, res);
+ } else {
+ res = it->second;
+ }
+ return res;
+ };
+
+ void AdvanceMemoryVersion(uint16_t base, uint16_t field)
+ {
+ uint32_t key = (base << 16) | field;
+ MemoryVersionMap::iterator it = memory_version_map_.find(key);
+ if (it == memory_version_map_.end()) {
+ memory_version_map_.Put(key, 0);
+ } else {
+ it->second++;
+ }
+ };
+
+ void SetOperandValue(uint16_t s_reg, uint16_t value)
+ {
+ SregValueMap::iterator it = sreg_value_map_.find(s_reg);
+ if (it != sreg_value_map_.end()) {
+ DCHECK_EQ(it->second, value);
+ } else {
+ sreg_value_map_.Put(s_reg, value);
+ }
+ };
+
+ uint16_t GetOperandValue(int s_reg)
+ {
+ uint16_t res = NO_VALUE;
+ SregValueMap::iterator it = sreg_value_map_.find(s_reg);
+ if (it != sreg_value_map_.end()) {
+ res = it->second;
+ } else {
+ // First use
+ res = LookupValue(NO_VALUE, s_reg, NO_VALUE, NO_VALUE);
+ sreg_value_map_.Put(s_reg, res);
+ }
+ return res;
+ };
+
+ void SetOperandValueWide(uint16_t s_reg, uint16_t value)
+ {
+ SregValueMap::iterator it = sreg_wide_value_map_.find(s_reg);
+ if (it != sreg_wide_value_map_.end()) {
+ DCHECK_EQ(it->second, value);
+ } else {
+ sreg_wide_value_map_.Put(s_reg, value);
+ }
+ };
+
+ uint16_t GetOperandValueWide(int s_reg)
+ {
+ uint16_t res = NO_VALUE;
+ SregValueMap::iterator it = sreg_wide_value_map_.find(s_reg);
+ if (it != sreg_wide_value_map_.end()) {
+ res = it->second;
+ } else {
+ // First use
+ res = LookupValue(NO_VALUE, s_reg, NO_VALUE, NO_VALUE);
+ sreg_wide_value_map_.Put(s_reg, res);
+ }
+ return res;
+ };
+
+ uint16_t GetValueNumber(MIR* mir);
+
+ private:
+ CompilationUnit* cu_;
+ SregValueMap sreg_value_map_;
+ SregValueMap sreg_wide_value_map_;
+ ValueMap value_map_;
+ MemoryVersionMap memory_version_map_;
+ std::set<uint16_t> null_checked_;
+
+};
+
+} // namespace art
+
+#endif // ART_SRC_COMPILER_DEX_BBOPT_H_
diff --git a/src/compiler/dex/compiler_enums.h b/src/compiler/dex/compiler_enums.h
new file mode 100644
index 0000000..3eb862f
--- /dev/null
+++ b/src/compiler/dex/compiler_enums.h
@@ -0,0 +1,406 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_COMPILER_DEX_COMPILERENUMS_H_
+#define ART_SRC_COMPILER_DEX_COMPILERENUMS_H_
+
+#include "dex_instruction.h"
+
+namespace art {
+
+enum RegisterClass {
+ kCoreReg,
+ kFPReg,
+ kAnyReg,
+};
+
+enum SpecialTargetRegister {
+ kSelf, // Thread pointer.
+ kSuspend, // Used to reduce suspend checks for some targets.
+ kLr,
+ kPc,
+ kSp,
+ kArg0,
+ kArg1,
+ kArg2,
+ kArg3,
+ kFArg0,
+ kFArg1,
+ kFArg2,
+ kFArg3,
+ kRet0,
+ kRet1,
+ kInvokeTgt,
+ kCount
+};
+
+enum RegLocationType {
+ kLocDalvikFrame = 0, // Normal Dalvik register
+ kLocPhysReg,
+ kLocCompilerTemp,
+ kLocInvalid
+};
+
+enum BBType {
+ kEntryBlock,
+ kDalvikByteCode,
+ kExitBlock,
+ kExceptionHandling,
+ kDead,
+};
+
+/*
+ * Def/Use encoding in 64-bit use_mask/def_mask. Low positions used for target-specific
+ * registers (and typically use the register number as the position). High positions
+ * reserved for common and abstract resources.
+ */
+
+enum ResourceEncodingPos {
+ kMustNotAlias = 63,
+ kHeapRef = 62, // Default memory reference type.
+ kLiteral = 61, // Literal pool memory reference.
+ kDalvikReg = 60, // Dalvik v_reg memory reference.
+ kFPStatus = 59,
+ kCCode = 58,
+ kLowestCommonResource = kCCode
+};
+
+// Shared pseudo opcodes - must be < 0.
+enum LIRPseudoOpcode {
+ kPseudoExportedPC = -16,
+ kPseudoSafepointPC = -15,
+ kPseudoIntrinsicRetry = -14,
+ kPseudoSuspendTarget = -13,
+ kPseudoThrowTarget = -12,
+ kPseudoCaseLabel = -11,
+ kPseudoMethodEntry = -10,
+ kPseudoMethodExit = -9,
+ kPseudoBarrier = -8,
+ kPseudoEntryBlock = -7,
+ kPseudoExitBlock = -6,
+ kPseudoTargetLabel = -5,
+ kPseudoDalvikByteCodeBoundary = -4,
+ kPseudoPseudoAlign4 = -3,
+ kPseudoEHBlockLabel = -2,
+ kPseudoNormalBlockLabel = -1,
+};
+
+enum ExtendedMIROpcode {
+ kMirOpFirst = kNumPackedOpcodes,
+ kMirOpPhi = kMirOpFirst,
+ kMirOpCopy,
+ kMirOpFusedCmplFloat,
+ kMirOpFusedCmpgFloat,
+ kMirOpFusedCmplDouble,
+ kMirOpFusedCmpgDouble,
+ kMirOpFusedCmpLong,
+ kMirOpNop,
+ kMirOpNullCheck,
+ kMirOpRangeCheck,
+ kMirOpDivZeroCheck,
+ kMirOpCheck,
+ kMirOpCheckPart2,
+ kMirOpSelect,
+ kMirOpLast,
+};
+
+enum MIROptimizationFlagPositons {
+ kMIRIgnoreNullCheck = 0,
+ kMIRNullCheckOnly,
+ kMIRIgnoreRangeCheck,
+ kMIRRangeCheckOnly,
+ kMIRInlined, // Invoke is inlined (ie dead).
+ kMIRInlinedPred, // Invoke is inlined via prediction.
+ kMIRCallee, // Instruction is inlined from callee.
+ kMIRIgnoreSuspendCheck,
+ kMIRDup,
+ kMIRMark, // Temporary node mark.
+};
+
+// For successor_block_list.
+enum BlockListType {
+ kNotUsed = 0,
+ kCatch,
+ kPackedSwitch,
+ kSparseSwitch,
+};
+
+enum AssemblerStatus {
+ kSuccess,
+ kRetryAll,
+};
+
+enum OpSize {
+ kWord,
+ kLong,
+ kSingle,
+ kDouble,
+ kUnsignedHalf,
+ kSignedHalf,
+ kUnsignedByte,
+ kSignedByte,
+};
+
+std::ostream& operator<<(std::ostream& os, const OpSize& kind);
+
+enum OpKind {
+ kOpMov,
+ kOpMvn,
+ kOpCmp,
+ kOpLsl,
+ kOpLsr,
+ kOpAsr,
+ kOpRor,
+ kOpNot,
+ kOpAnd,
+ kOpOr,
+ kOpXor,
+ kOpNeg,
+ kOpAdd,
+ kOpAdc,
+ kOpSub,
+ kOpSbc,
+ kOpRsub,
+ kOpMul,
+ kOpDiv,
+ kOpRem,
+ kOpBic,
+ kOpCmn,
+ kOpTst,
+ kOpBkpt,
+ kOpBlx,
+ kOpPush,
+ kOpPop,
+ kOp2Char,
+ kOp2Short,
+ kOp2Byte,
+ kOpCondBr,
+ kOpUncondBr,
+ kOpBx,
+ kOpInvalid,
+};
+
+std::ostream& operator<<(std::ostream& os, const OpKind& kind);
+
+enum ConditionCode {
+ kCondEq, // equal
+ kCondNe, // not equal
+ kCondCs, // carry set (unsigned less than)
+ kCondUlt = kCondCs,
+ kCondCc, // carry clear (unsigned greater than or same)
+ kCondUge = kCondCc,
+ kCondMi, // minus
+ kCondPl, // plus, positive or zero
+ kCondVs, // overflow
+ kCondVc, // no overflow
+ kCondHi, // unsigned greater than
+ kCondLs, // unsigned lower or same
+ kCondGe, // signed greater than or equal
+ kCondLt, // signed less than
+ kCondGt, // signed greater than
+ kCondLe, // signed less than or equal
+ kCondAl, // always
+ kCondNv, // never
+};
+
+std::ostream& operator<<(std::ostream& os, const ConditionCode& kind);
+
+// Target specific condition encodings
+enum ArmConditionCode {
+ kArmCondEq = 0x0, // 0000
+ kArmCondNe = 0x1, // 0001
+ kArmCondCs = 0x2, // 0010
+ kArmCondCc = 0x3, // 0011
+ kArmCondMi = 0x4, // 0100
+ kArmCondPl = 0x5, // 0101
+ kArmCondVs = 0x6, // 0110
+ kArmCondVc = 0x7, // 0111
+ kArmCondHi = 0x8, // 1000
+ kArmCondLs = 0x9, // 1001
+ kArmCondGe = 0xa, // 1010
+ kArmCondLt = 0xb, // 1011
+ kArmCondGt = 0xc, // 1100
+ kArmCondLe = 0xd, // 1101
+ kArmCondAl = 0xe, // 1110
+ kArmCondNv = 0xf, // 1111
+};
+
+std::ostream& operator<<(std::ostream& os, const ArmConditionCode& kind);
+
+enum X86ConditionCode {
+ kX86CondO = 0x0, // overflow
+ kX86CondNo = 0x1, // not overflow
+
+ kX86CondB = 0x2, // below
+ kX86CondNae = kX86CondB, // not-above-equal
+ kX86CondC = kX86CondB, // carry
+
+ kX86CondNb = 0x3, // not-below
+ kX86CondAe = kX86CondNb, // above-equal
+ kX86CondNc = kX86CondNb, // not-carry
+
+ kX86CondZ = 0x4, // zero
+ kX86CondEq = kX86CondZ, // equal
+
+ kX86CondNz = 0x5, // not-zero
+ kX86CondNe = kX86CondNz, // not-equal
+
+ kX86CondBe = 0x6, // below-equal
+ kX86CondNa = kX86CondBe, // not-above
+
+ kX86CondNbe = 0x7, // not-below-equal
+ kX86CondA = kX86CondNbe,// above
+
+ kX86CondS = 0x8, // sign
+ kX86CondNs = 0x9, // not-sign
+
+ kX86CondP = 0xa, // 8-bit parity even
+ kX86CondPE = kX86CondP,
+
+ kX86CondNp = 0xb, // 8-bit parity odd
+ kX86CondPo = kX86CondNp,
+
+ kX86CondL = 0xc, // less-than
+ kX86CondNge = kX86CondL, // not-greater-equal
+
+ kX86CondNl = 0xd, // not-less-than
+ kX86CondGe = kX86CondNl, // not-greater-equal
+
+ kX86CondLe = 0xe, // less-than-equal
+ kX86CondNg = kX86CondLe, // not-greater
+
+ kX86CondNle = 0xf, // not-less-than
+ kX86CondG = kX86CondNle,// greater
+};
+
+std::ostream& operator<<(std::ostream& os, const X86ConditionCode& kind);
+
+enum ThrowKind {
+ kThrowNullPointer,
+ kThrowDivZero,
+ kThrowArrayBounds,
+ kThrowConstantArrayBounds,
+ kThrowNoSuchMethod,
+ kThrowStackOverflow,
+};
+
+enum SpecialCaseHandler {
+ kNoHandler,
+ kNullMethod,
+ kConstFunction,
+ kIGet,
+ kIGetBoolean,
+ kIGetObject,
+ kIGetByte,
+ kIGetChar,
+ kIGetShort,
+ kIGetWide,
+ kIPut,
+ kIPutBoolean,
+ kIPutObject,
+ kIPutByte,
+ kIPutChar,
+ kIPutShort,
+ kIPutWide,
+ kIdentity,
+};
+
+enum DividePattern {
+ DivideNone,
+ Divide3,
+ Divide5,
+ Divide7,
+};
+
+std::ostream& operator<<(std::ostream& os, const DividePattern& pattern);
+
+/* Customized node traversal orders for different needs */
+enum DataFlowAnalysisMode {
+ kAllNodes = 0, // All nodes.
+ kReachableNodes, // All reachable nodes.
+ kPreOrderDFSTraversal, // Depth-First-Search / Pre-Order.
+ kPostOrderDFSTraversal, // Depth-First-Search / Post-Order.
+ kPostOrderDOMTraversal, // Dominator tree / Post-Order.
+ kReversePostOrderTraversal, // Depth-First-Search / reverse Post-Order.
+};
+
+std::ostream& operator<<(std::ostream& os, const DataFlowAnalysisMode& mode);
+
+// Memory barrier types (see "The JSR-133 Cookbook for Compiler Writers").
+enum MemBarrierKind {
+ kLoadStore,
+ kLoadLoad,
+ kStoreStore,
+ kStoreLoad
+};
+
+std::ostream& operator<<(std::ostream& os, const MemBarrierKind& kind);
+
+enum OpFeatureFlags {
+ kIsBranch = 0,
+ kNoOperand,
+ kIsUnaryOp,
+ kIsBinaryOp,
+ kIsTertiaryOp,
+ kIsQuadOp,
+ kIsQuinOp,
+ kIsSextupleOp,
+ kIsIT,
+ kMemLoad,
+ kMemStore,
+ kPCRelFixup, // x86 FIXME: add NEEDS_FIXUP to instruction attributes.
+ kRegDef0,
+ kRegDef1,
+ kRegDefA,
+ kRegDefD,
+ kRegDefFPCSList0,
+ kRegDefFPCSList2,
+ kRegDefList0,
+ kRegDefList1,
+ kRegDefList2,
+ kRegDefLR,
+ kRegDefSP,
+ kRegUse0,
+ kRegUse1,
+ kRegUse2,
+ kRegUse3,
+ kRegUse4,
+ kRegUseA,
+ kRegUseC,
+ kRegUseD,
+ kRegUseFPCSList0,
+ kRegUseFPCSList2,
+ kRegUseList0,
+ kRegUseList1,
+ kRegUseLR,
+ kRegUsePC,
+ kRegUseSP,
+ kSetsCCodes,
+ kUsesCCodes
+};
+
+enum SelectInstructionKind {
+ kSelectNone,
+ kSelectConst,
+ kSelectMove,
+ kSelectGoto
+};
+
+std::ostream& operator<<(std::ostream& os, const OpFeatureFlags& flag);
+
+} // namespace art
+
+#endif // ART_SRC_COMPILER_DEX_COMPILERENUMS_H_
diff --git a/src/compiler/dex/compiler_internals.h b/src/compiler/dex/compiler_internals.h
new file mode 100644
index 0000000..c85700a
--- /dev/null
+++ b/src/compiler/dex/compiler_internals.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_COMPILER_DEX_COMPILER_INTERNAL_H_
+#define ART_SRC_COMPILER_DEX_COMPILER_INTERNAL_H_
+
+#include <assert.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+
+#include "base/logging.h"
+#include "class_linker.h"
+#include "quick/codegen.h"
+#include "compiler.h"
+#include "compiler_ir.h"
+#include "compiler_utility.h"
+#include "frontend.h"
+#include "gc/card_table.h"
+#include "mirror/dex_cache.h"
+#include "monitor.h"
+#include "ralloc.h"
+#include "thread.h"
+#include "utils.h"
+
+#endif // ART_SRC_COMPILER_DEX_COMPILER_INTERNAL_H_
diff --git a/src/compiler/dex/compiler_ir.h b/src/compiler/dex/compiler_ir.h
new file mode 100644
index 0000000..246d200
--- /dev/null
+++ b/src/compiler/dex/compiler_ir.h
@@ -0,0 +1,627 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_COMPILER_DEX_COMPILER_IR_H_
+#define ART_SRC_COMPILER_DEX_COMPILER_IR_H_
+
+#include <vector>
+#include "dex_instruction.h"
+#include "compiler.h"
+#include "compiler_utility.h"
+#include "oat_compilation_unit.h"
+#include "safe_map.h"
+#include "greenland/ir_builder.h"
+#include "llvm/Module.h"
+#include "compiler_enums.h"
+
+namespace art {
+
+#define SLOW_FIELD_PATH (cu->enable_debug & (1 << kDebugSlowFieldPath))
+#define SLOW_INVOKE_PATH (cu->enable_debug & (1 << kDebugSlowInvokePath))
+#define SLOW_STRING_PATH (cu->enable_debug & (1 << kDebugSlowStringPath))
+#define SLOW_TYPE_PATH (cu->enable_debug & (1 << kDebugSlowTypePath))
+#define EXERCISE_SLOWEST_STRING_PATH (cu->enable_debug & \
+ (1 << kDebugSlowestStringPath))
+
+// Minimum field size to contain Dalvik v_reg number.
+#define VREG_NUM_WIDTH 16
+
+struct ArenaBitVector;
+struct LIR;
+class LLVMInfo;
+
+struct PromotionMap {
+ RegLocationType core_location:3;
+ uint8_t core_reg;
+ RegLocationType fp_location:3;
+ uint8_t FpReg;
+ bool first_in_pair;
+};
+
+struct RegLocation {
+ RegLocationType location:3;
+ unsigned wide:1;
+ unsigned defined:1; // Do we know the type?
+ unsigned is_const:1; // Constant, value in cu->constant_values[].
+ unsigned fp:1; // Floating point?
+ unsigned core:1; // Non-floating point?
+ unsigned ref:1; // Something GC cares about.
+ unsigned high_word:1; // High word of pair?
+ unsigned home:1; // Does this represent the home location?
+ uint8_t low_reg; // First physical register.
+ uint8_t high_reg; // 2nd physical register (if wide).
+ int32_t s_reg_low; // SSA name for low Dalvik word.
+ int32_t orig_sreg; // TODO: remove after Bitcode gen complete
+ // and consolodate usage w/ s_reg_low.
+};
+
+struct CompilerTemp {
+ int s_reg;
+ ArenaBitVector* bv;
+};
+
+struct CallInfo {
+ int num_arg_words; // Note: word count, not arg count.
+ RegLocation* args; // One for each word of arguments.
+ RegLocation result; // Eventual target of MOVE_RESULT.
+ int opt_flags;
+ InvokeType type;
+ uint32_t dex_idx;
+ uint32_t index; // Method idx for invokes, type idx for FilledNewArray.
+ uintptr_t direct_code;
+ uintptr_t direct_method;
+ RegLocation target; // Target of following move_result.
+ bool skip_this;
+ bool is_range;
+ int offset; // Dalvik offset.
+};
+
+ /*
+ * Data structure tracking the mapping between a Dalvik register (pair) and a
+ * native register (pair). The idea is to reuse the previously loaded value
+ * if possible, otherwise to keep the value in a native register as long as
+ * possible.
+ */
+struct RegisterInfo {
+ int reg; // Reg number
+ bool in_use; // Has it been allocated?
+ bool is_temp; // Can allocate as temp?
+ bool pair; // Part of a register pair?
+ int partner; // If pair, other reg of pair.
+ bool live; // Is there an associated SSA name?
+ bool dirty; // If live, is it dirty?
+ int s_reg; // Name of live value.
+ LIR *def_start; // Starting inst in last def sequence.
+ LIR *def_end; // Ending inst in last def sequence.
+};
+
+struct RegisterPool {
+ int num_core_regs;
+ RegisterInfo *core_regs;
+ int next_core_reg;
+ int num_fp_regs;
+ RegisterInfo *FPRegs;
+ int next_fp_reg;
+};
+
+#define INVALID_SREG (-1)
+#define INVALID_VREG (0xFFFFU)
+#define INVALID_REG (0xFF)
+#define INVALID_OFFSET (0xDEADF00FU)
+
+/* SSA encodings for special registers */
+#define SSA_METHOD_BASEREG (-2)
+/* First compiler temp basereg, grows smaller */
+#define SSA_CTEMP_BASEREG (SSA_METHOD_BASEREG - 1)
+
+/*
+ * Some code patterns cause the generation of excessively large
+ * methods - in particular initialization sequences. There isn't much
+ * benefit in optimizing these methods, and the cost can be very high.
+ * We attempt to identify these cases, and avoid performing most dataflow
+ * analysis. Two thresholds are used - one for known initializers and one
+ * for everything else.
+ */
+#define MANY_BLOCKS_INITIALIZER 1000 /* Threshold for switching dataflow off */
+#define MANY_BLOCKS 4000 /* Non-initializer threshold */
+
+// Utility macros to traverse the LIR list.
+#define NEXT_LIR(lir) (lir->next)
+#define PREV_LIR(lir) (lir->prev)
+
+// Defines for alias_info (tracks Dalvik register references).
+#define DECODE_ALIAS_INFO_REG(X) (X & 0xffff)
+#define DECODE_ALIAS_INFO_WIDE_FLAG (0x80000000)
+#define DECODE_ALIAS_INFO_WIDE(X) ((X & DECODE_ALIAS_INFO_WIDE_FLAG) ? 1 : 0)
+#define ENCODE_ALIAS_INFO(REG, ISWIDE) (REG | (ISWIDE ? DECODE_ALIAS_INFO_WIDE_FLAG : 0))
+
+// Common resource macros.
+#define ENCODE_CCODE (1ULL << kCCode)
+#define ENCODE_FP_STATUS (1ULL << kFPStatus)
+
+// Abstract memory locations.
+#define ENCODE_DALVIK_REG (1ULL << kDalvikReg)
+#define ENCODE_LITERAL (1ULL << kLiteral)
+#define ENCODE_HEAP_REF (1ULL << kHeapRef)
+#define ENCODE_MUST_NOT_ALIAS (1ULL << kMustNotAlias)
+
+#define ENCODE_ALL (~0ULL)
+#define ENCODE_MEM (ENCODE_DALVIK_REG | ENCODE_LITERAL | \
+ ENCODE_HEAP_REF | ENCODE_MUST_NOT_ALIAS)
+
+#define is_pseudo_opcode(opcode) (static_cast<int>(opcode) < 0)
+
+struct LIR {
+ int offset; // Offset of this instruction.
+ int dalvik_offset; // Offset of Dalvik opcode.
+ LIR* next;
+ LIR* prev;
+ LIR* target;
+ int opcode;
+ int operands[5]; // [0..4] = [dest, src1, src2, extra, extra2].
+ struct {
+ bool is_nop:1; // LIR is optimized away.
+ bool pcRelFixup:1; // May need pc-relative fixup.
+ unsigned int size:5; // Note: size is in bytes.
+ unsigned int unused:25;
+ } flags;
+ int alias_info; // For Dalvik register & litpool disambiguation.
+ uint64_t use_mask; // Resource mask for use.
+ uint64_t def_mask; // Resource mask for def.
+};
+
+extern const char* extended_mir_op_names[kMirOpLast - kMirOpFirst];
+
+struct SSARepresentation;
+
+#define MIR_IGNORE_NULL_CHECK (1 << kMIRIgnoreNullCheck)
+#define MIR_NULL_CHECK_ONLY (1 << kMIRNullCheckOnly)
+#define MIR_IGNORE_RANGE_CHECK (1 << kMIRIgnoreRangeCheck)
+#define MIR_RANGE_CHECK_ONLY (1 << kMIRRangeCheckOnly)
+#define MIR_INLINED (1 << kMIRInlined)
+#define MIR_INLINED_PRED (1 << kMIRInlinedPred)
+#define MIR_CALLEE (1 << kMIRCallee)
+#define MIR_IGNORE_SUSPEND_CHECK (1 << kMIRIgnoreSuspendCheck)
+#define MIR_DUP (1 << kMIRDup)
+
+struct Checkstats {
+ int null_checks;
+ int null_checks_eliminated;
+ int range_checks;
+ int range_checks_eliminated;
+};
+
+struct MIR {
+ DecodedInstruction dalvikInsn;
+ unsigned int width;
+ unsigned int offset;
+ MIR* prev;
+ MIR* next;
+ SSARepresentation* ssa_rep;
+ int optimization_flags;
+ union {
+ // Establish link between two halves of throwing instructions.
+ MIR* throw_insn;
+ // Saved opcode for NOP'd MIRs
+ Instruction::Code original_opcode;
+ } meta;
+};
+
+struct BasicBlockDataFlow;
+
+struct BasicBlock {
+ int id;
+ int dfs_id;
+ bool visited;
+ bool hidden;
+ bool catch_entry;
+ bool explicit_throw;
+ bool conditional_branch;
+ bool terminated_by_return; // Block ends with a Dalvik return opcode.
+ bool dominates_return; // Is a member of return extended basic block.
+ uint16_t start_offset;
+ uint16_t nesting_depth;
+ BBType block_type;
+ MIR* first_mir_insn;
+ MIR* last_mir_insn;
+ BasicBlock* fall_through;
+ BasicBlock* taken;
+ BasicBlock* i_dom; // Immediate dominator.
+ BasicBlockDataFlow* data_flow_info;
+ GrowableList* predecessors;
+ ArenaBitVector* dominators;
+ ArenaBitVector* i_dominated; // Set nodes being immediately dominated.
+ ArenaBitVector* dom_frontier; // Dominance frontier.
+ struct { // For one-to-many successors like.
+ BlockListType block_list_type; // switch and exception handling.
+ GrowableList blocks;
+ } successor_block_list;
+};
+
+/*
+ * The "blocks" field in "successor_block_list" points to an array of
+ * elements with the type "SuccessorBlockInfo".
+ * For catch blocks, key is type index for the exception.
+ * For swtich blocks, key is the case value.
+ */
+struct SuccessorBlockInfo {
+ BasicBlock* block;
+ int key;
+};
+
+struct LoopAnalysis;
+struct RegisterPool;
+struct ArenaMemBlock;
+struct Memstats;
+class Codegen;
+
+#define NOTVISITED (-1)
+
+struct CompilationUnit {
+ CompilationUnit()
+ : num_blocks(0),
+ compiler(NULL),
+ class_linker(NULL),
+ dex_file(NULL),
+ class_loader(NULL),
+ class_def_idx(0),
+ method_idx(0),
+ code_item(NULL),
+ access_flags(0),
+ invoke_type(kDirect),
+ shorty(NULL),
+ first_lir_insn(NULL),
+ last_lir_insn(NULL),
+ literal_list(NULL),
+ method_literal_list(NULL),
+ code_literal_list(NULL),
+ disable_opt(0),
+ enable_debug(0),
+ data_offset(0),
+ total_size(0),
+ assembler_status(kSuccess),
+ assembler_retries(0),
+ verbose(false),
+ has_loop(false),
+ has_invoke(false),
+ qd_mode(false),
+ reg_pool(NULL),
+ instruction_set(kNone),
+ num_ssa_regs(0),
+ ssa_base_vregs(NULL),
+ ssa_subscripts(NULL),
+ ssa_strings(NULL),
+ vreg_to_ssa_map(NULL),
+ ssa_last_defs(NULL),
+ is_constant_v(NULL),
+ must_flush_constant_v(NULL),
+ constant_values(NULL),
+ reg_location(NULL),
+ promotion_map(NULL),
+ method_sreg(0),
+ num_reachable_blocks(0),
+ num_dalvik_registers(0),
+ entry_block(NULL),
+ exit_block(NULL),
+ cur_block(NULL),
+ i_dom_list(NULL),
+ try_block_addr(NULL),
+ def_block_matrix(NULL),
+ temp_block_v(NULL),
+ temp_dalvik_register_v(NULL),
+ temp_ssa_register_v(NULL),
+ temp_ssa_block_id_v(NULL),
+ block_label_list(NULL),
+ num_ins(0),
+ num_outs(0),
+ num_regs(0),
+ num_core_spills(0),
+ num_fp_spills(0),
+ num_compiler_temps(0),
+ frame_size(0),
+ core_spill_mask(0U),
+ fp_spill_mask(0U),
+ attrs(0U),
+ current_dalvik_offset(0),
+ insns(NULL),
+ insns_size(0U),
+ disable_dataflow(false),
+ def_count(0),
+ compiler_flip_match(false),
+ arena_head(NULL),
+ current_arena(NULL),
+ num_arena_blocks(0),
+ mstats(NULL),
+ checkstats(NULL),
+ gen_bitcode(false),
+ llvm_info(NULL),
+ context(NULL),
+ module(NULL),
+ func(NULL),
+ intrinsic_helper(NULL),
+ irb(NULL),
+ placeholder_bb(NULL),
+ entry_bb(NULL),
+ entryTarget_bb(NULL),
+ temp_name(0),
+#ifndef NDEBUG
+ live_sreg(0),
+#endif
+ opcode_count(NULL),
+ cg(NULL) {}
+
+ int num_blocks;
+ GrowableList block_list;
+ Compiler* compiler; // Compiler driving this compiler.
+ ClassLinker* class_linker; // Linker to resolve fields and methods.
+ const DexFile* dex_file; // DexFile containing the method being compiled.
+ jobject class_loader; // compiling method's class loader.
+ uint32_t class_def_idx; // compiling method's defining class definition index.
+ uint32_t method_idx; // compiling method's index into method_ids of DexFile.
+ const DexFile::CodeItem* code_item; // compiling method's DexFile code_item.
+ uint32_t access_flags; // compiling method's access flags.
+ InvokeType invoke_type; // compiling method's invocation type.
+ const char* shorty; // compiling method's shorty.
+ LIR* first_lir_insn;
+ LIR* last_lir_insn;
+ LIR* literal_list; // Constants.
+ LIR* method_literal_list; // Method literals requiring patching.
+ LIR* code_literal_list; // Code literals requiring patching.
+ uint32_t disable_opt; // opt_control_vector flags.
+ uint32_t enable_debug; // debugControlVector flags.
+ int data_offset; // starting offset of literal pool.
+ int total_size; // header + code size.
+ AssemblerStatus assembler_status; // Success or fix and retry.
+ int assembler_retries;
+ std::vector<uint8_t> code_buffer;
+ /*
+ * Holds mapping from native PC to dex PC for safepoints where we may deoptimize.
+ * Native PC is on the return address of the safepointed operation. Dex PC is for
+ * the instruction being executed at the safepoint.
+ */
+ std::vector<uint32_t> pc2dexMappingTable;
+ /*
+ * Holds mapping from Dex PC to native PC for catch entry points. Native PC and Dex PC
+ * immediately preceed the instruction.
+ */
+ std::vector<uint32_t> dex2pcMappingTable;
+ std::vector<uint32_t> combined_mapping_table;
+ std::vector<uint32_t> core_vmap_table;
+ std::vector<uint32_t> fp_vmap_table;
+ std::vector<uint8_t> native_gc_map;
+ std::vector<BasicBlock*> extended_basic_blocks;
+ bool verbose;
+ bool has_loop; // Contains a loop.
+ bool has_invoke; // Contains an invoke instruction.
+ bool qd_mode; // Compile for code size/compile time.
+ RegisterPool* reg_pool;
+ InstructionSet instruction_set;
+ // Number of total regs used in the whole cu after SSA transformation .
+ int num_ssa_regs;
+ // Map SSA reg i to the base virtual register/subscript.
+ GrowableList* ssa_base_vregs;
+ GrowableList* ssa_subscripts;
+ GrowableList* ssa_strings;
+
+ // Map original Dalvik virtual reg i to the current SSA name.
+ int* vreg_to_ssa_map; // length == method->registers_size
+ int* ssa_last_defs; // length == method->registers_size
+ ArenaBitVector* is_constant_v; // length == num_ssa_reg
+ ArenaBitVector* must_flush_constant_v; // length == num_ssa_reg
+ int* constant_values; // length == num_ssa_reg
+
+ // Use counts of ssa names.
+ GrowableList use_counts; // Weighted by nesting depth
+ GrowableList raw_use_counts; // Not weighted
+
+ // Optimization support.
+ GrowableList loop_headers;
+
+ // Map SSA names to location.
+ RegLocation* reg_location;
+
+ // Keep track of Dalvik v_reg to physical register mappings.
+ PromotionMap* promotion_map;
+
+ // SSA name for Method*.
+ int method_sreg;
+ RegLocation method_loc; // Describes location of method*.
+
+ int num_reachable_blocks;
+ int num_dalvik_registers; // method->registers_size.
+ BasicBlock* entry_block;
+ BasicBlock* exit_block;
+ BasicBlock* cur_block;
+ GrowableList dfs_order;
+ GrowableList dfs_post_order;
+ GrowableList dom_post_order_traversal;
+ GrowableList throw_launchpads;
+ GrowableList suspend_launchpads;
+ GrowableList intrinsic_launchpads;
+ GrowableList compiler_temps;
+ int* i_dom_list;
+ ArenaBitVector* try_block_addr;
+ ArenaBitVector** def_block_matrix; // num_dalvik_register x num_blocks.
+ ArenaBitVector* temp_block_v;
+ ArenaBitVector* temp_dalvik_register_v;
+ ArenaBitVector* temp_ssa_register_v; // num_ssa_regs.
+ int* temp_ssa_block_id_v; // working storage for Phi labels.
+ LIR* block_label_list;
+ /*
+ * Frame layout details.
+ * NOTE: for debug support it will be necessary to add a structure
+ * to map the Dalvik virtual registers to the promoted registers.
+ * NOTE: "num" fields are in 4-byte words, "Size" and "Offset" in bytes.
+ */
+ int num_ins;
+ int num_outs;
+ int num_regs; // Unlike num_dalvik_registers, does not include ins.
+ int num_core_spills;
+ int num_fp_spills;
+ int num_compiler_temps;
+ int frame_size;
+ unsigned int core_spill_mask;
+ unsigned int fp_spill_mask;
+ unsigned int attrs;
+ /*
+ * TODO: The code generation utilities don't have a built-in
+ * mechanism to propagate the original Dalvik opcode address to the
+ * associated generated instructions. For the trace compiler, this wasn't
+ * necessary because the interpreter handled all throws and debugging
+ * requests. For now we'll handle this by placing the Dalvik offset
+ * in the CompilationUnit struct before codegen for each instruction.
+ * The low-level LIR creation utilites will pull it from here. Rework this.
+ */
+ int current_dalvik_offset;
+ GrowableList switch_tables;
+ GrowableList fill_array_data;
+ const uint16_t* insns;
+ uint32_t insns_size;
+ bool disable_dataflow; // Skip dataflow analysis if possible
+ SafeMap<unsigned int, BasicBlock*> block_map; // FindBlock lookup cache.
+ SafeMap<unsigned int, unsigned int> block_id_map; // Block collapse lookup cache.
+ SafeMap<unsigned int, LIR*> boundary_map; // boundary lookup cache.
+ int def_count; // Used to estimate number of SSA names.
+
+ // If non-empty, apply optimizer/debug flags only to matching methods.
+ std::string compiler_method_match;
+ // Flips sense of compiler_method_match - apply flags if doesn't match.
+ bool compiler_flip_match;
+ ArenaMemBlock* arena_head;
+ ArenaMemBlock* current_arena;
+ int num_arena_blocks;
+ Memstats* mstats;
+ Checkstats* checkstats;
+ bool gen_bitcode;
+ LLVMInfo* llvm_info;
+ llvm::LLVMContext* context;
+ llvm::Module* module;
+ llvm::Function* func;
+ greenland::IntrinsicHelper* intrinsic_helper;
+ greenland::IRBuilder* irb;
+ llvm::BasicBlock* placeholder_bb;
+ llvm::BasicBlock* entry_bb;
+ llvm::BasicBlock* entryTarget_bb;
+ std::string bitcode_filename;
+ GrowableList llvm_values;
+ int32_t temp_name;
+ SafeMap<llvm::BasicBlock*, LIR*> block_to_label_map; // llvm bb -> LIR label.
+ SafeMap<int32_t, llvm::BasicBlock*> id_to_block_map; // block id -> llvm bb.
+ SafeMap<llvm::Value*, RegLocation> loc_map; // llvm Value to loc rec.
+ std::set<llvm::BasicBlock*> llvm_blocks;
+#ifndef NDEBUG
+ /*
+ * Sanity checking for the register temp tracking. The same ssa
+ * name should never be associated with one temp register per
+ * instruction compilation.
+ */
+ int live_sreg;
+#endif
+ std::set<uint32_t> catches;
+ int* opcode_count; // Count Dalvik opcodes for tuning.
+ UniquePtr<Codegen> cg;
+};
+
+struct SwitchTable {
+ int offset;
+ const uint16_t* table; // Original dex table.
+ int vaddr; // Dalvik offset of switch opcode.
+ LIR* anchor; // Reference instruction for relative offsets.
+ LIR** targets; // Array of case targets.
+};
+
+struct FillArrayData {
+ int offset;
+ const uint16_t* table; // Original dex table.
+ int size;
+ int vaddr; // Dalvik offset of FILL_ARRAY_DATA opcode.
+};
+
+#define MAX_PATTERN_LEN 5
+
+struct CodePattern {
+ const Instruction::Code opcodes[MAX_PATTERN_LEN];
+ const SpecialCaseHandler handler_code;
+};
+
+static const CodePattern special_patterns[] = {
+ {{Instruction::RETURN_VOID}, kNullMethod},
+ {{Instruction::CONST, Instruction::RETURN}, kConstFunction},
+ {{Instruction::CONST_4, Instruction::RETURN}, kConstFunction},
+ {{Instruction::CONST_4, Instruction::RETURN_OBJECT}, kConstFunction},
+ {{Instruction::CONST_16, Instruction::RETURN}, kConstFunction},
+ {{Instruction::IGET, Instruction:: RETURN}, kIGet},
+ {{Instruction::IGET_BOOLEAN, Instruction::RETURN}, kIGetBoolean},
+ {{Instruction::IGET_OBJECT, Instruction::RETURN_OBJECT}, kIGetObject},
+ {{Instruction::IGET_BYTE, Instruction::RETURN}, kIGetByte},
+ {{Instruction::IGET_CHAR, Instruction::RETURN}, kIGetChar},
+ {{Instruction::IGET_SHORT, Instruction::RETURN}, kIGetShort},
+ {{Instruction::IGET_WIDE, Instruction::RETURN_WIDE}, kIGetWide},
+ {{Instruction::IPUT, Instruction::RETURN_VOID}, kIPut},
+ {{Instruction::IPUT_BOOLEAN, Instruction::RETURN_VOID}, kIPutBoolean},
+ {{Instruction::IPUT_OBJECT, Instruction::RETURN_VOID}, kIPutObject},
+ {{Instruction::IPUT_BYTE, Instruction::RETURN_VOID}, kIPutByte},
+ {{Instruction::IPUT_CHAR, Instruction::RETURN_VOID}, kIPutChar},
+ {{Instruction::IPUT_SHORT, Instruction::RETURN_VOID}, kIPutShort},
+ {{Instruction::IPUT_WIDE, Instruction::RETURN_VOID}, kIPutWide},
+ {{Instruction::RETURN}, kIdentity},
+ {{Instruction::RETURN_OBJECT}, kIdentity},
+ {{Instruction::RETURN_WIDE}, kIdentity},
+};
+
+static inline bool IsConst(const CompilationUnit* cu, int32_t s_reg)
+{
+ return (IsBitSet(cu->is_constant_v, s_reg));
+}
+
+static inline bool IsConst(const CompilationUnit* cu, RegLocation loc)
+{
+ return (IsConst(cu, loc.orig_sreg));
+}
+
+static inline int32_t ConstantValue(const CompilationUnit* cu, RegLocation loc)
+{
+ DCHECK(IsConst(cu, loc));
+ return cu->constant_values[loc.orig_sreg];
+}
+
+static inline int32_t ConstantValue(const CompilationUnit* cu, int32_t s_reg)
+{
+ DCHECK(IsConst(cu, s_reg));
+ return cu->constant_values[s_reg];
+}
+
+static inline int64_t ConstantValueWide(const CompilationUnit* cu, RegLocation loc)
+{
+ DCHECK(IsConst(cu, loc));
+ return (static_cast<int64_t>(cu->constant_values[loc.orig_sreg + 1]) << 32) |
+ Low32Bits(static_cast<int64_t>(cu->constant_values[loc.orig_sreg]));
+}
+
+static inline bool IsConstantNullRef(const CompilationUnit* cu, RegLocation loc)
+{
+ return loc.ref && loc.is_const && (ConstantValue(cu, loc) == 0);
+}
+
+static inline bool MustFlushConstant(const CompilationUnit* cu, RegLocation loc)
+{
+ DCHECK(IsConst(cu, loc));
+ return IsBitSet(cu->must_flush_constant_v, loc.orig_sreg);
+}
+
+} // namespace art
+
+#endif // ART_SRC_COMPILER_DEX_COMPILER_IR_H_
diff --git a/src/compiler/dex/compiler_utility.cc b/src/compiler/dex/compiler_utility.cc
new file mode 100644
index 0000000..b5185b0
--- /dev/null
+++ b/src/compiler/dex/compiler_utility.cc
@@ -0,0 +1,799 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "compiler_internals.h"
+
+namespace art {
+
+const char* extended_mir_op_names[kMirOpLast - kMirOpFirst] = {
+ "Phi",
+ "Copy",
+ "FusedCmplFloat",
+ "FusedCmpgFloat",
+ "FusedCmplDouble",
+ "FusedCmpgDouble",
+ "FusedCmpLong",
+ "Nop",
+ "OpNullCheck",
+ "OpRangeCheck",
+ "OpDivZeroCheck",
+ "Check1",
+ "Check2",
+ "Select",
+};
+
+#ifdef WITH_MEMSTATS
+struct Memstats {
+ uint32_t alloc_stats[kNumAllocKinds];
+ int list_sizes[kNumListKinds];
+ int list_wasted[kNumListKinds];
+ int list_grows[kNumListKinds];
+ int list_max_elems[kNumListKinds];
+ int bit_map_sizes[kNumBitMapKinds];
+ int bit_map_wasted[kNumBitMapKinds];
+ int bit_map_grows[kNumBitMapKinds];
+};
+
+const char* alloc_names[kNumAllocKinds] = {
+ "Misc ",
+ "BasicBlock ",
+ "LIR ",
+ "MIR ",
+ "DataFlow ",
+ "GrowList ",
+ "GrowBitMap ",
+ "Dalvik2SSA ",
+ "DebugInfo ",
+ "Successor ",
+ "RegAlloc ",
+ "Data ",
+ "Preds ",
+};
+
+const char* list_names[kNumListKinds] = {
+ "Misc ",
+ "block_list ",
+ "SSAtoDalvik ",
+ "dfs_order ",
+ "dfs_post_order ",
+ "dom_post_order_traversal ",
+ "throw_launch_pads ",
+ "suspend_launch_pads ",
+ "switch_tables ",
+ "fill_array_data ",
+ "SuccessorBlocks ",
+ "Predecessors ",
+};
+
+const char* bit_map_names[kNumBitMapKinds] = {
+ "Misc ",
+ "Use ",
+ "Def ",
+ "LiveIn ",
+ "BlockMatrix ",
+ "Dominators ",
+ "IDominated ",
+ "DomFrontier ",
+ "Phi ",
+ "TmpBlocks ",
+ "InputBlocks ",
+ "RegisterV ",
+ "TempSSARegisterV ",
+ "Null Check ",
+ "TmpBlockV ",
+ "Predecessors ",
+};
+#endif
+
+#define kArenaBitVectorGrowth 4 /* increase by 4 uint32_ts when limit hit */
+
+/* Allocate the initial memory block for arena-based allocation */
+bool HeapInit(CompilationUnit* cu)
+{
+ DCHECK(cu->arena_head == NULL);
+ cu->arena_head =
+ static_cast<ArenaMemBlock*>(malloc(sizeof(ArenaMemBlock) + ARENA_DEFAULT_SIZE));
+ if (cu->arena_head == NULL) {
+ LOG(FATAL) << "No memory left to create compiler heap memory";
+ }
+ cu->arena_head->block_size = ARENA_DEFAULT_SIZE;
+ cu->current_arena = cu->arena_head;
+ cu->current_arena->bytes_allocated = 0;
+ cu->current_arena->next = NULL;
+ cu->num_arena_blocks = 1;
+#ifdef WITH_MEMSTATS
+ cu->mstats = (Memstats*) NewMem(cu, sizeof(Memstats), true,
+ kAllocDebugInfo);
+#endif
+ return true;
+}
+
+/* Arena-based malloc for compilation tasks */
+void* NewMem(CompilationUnit* cu, size_t size, bool zero, oat_alloc_kind kind)
+{
+ size = (size + 3) & ~3;
+#ifdef WITH_MEMSTATS
+ if (cu->mstats != NULL) {
+ cu->mstats->alloc_stats[kind] += size;
+ }
+#endif
+retry:
+ /* Normal case - space is available in the current page */
+ if (size + cu->current_arena->bytes_allocated <=
+ cu->current_arena->block_size) {
+ void *ptr;
+ ptr = &cu->current_arena->ptr[cu->current_arena->bytes_allocated];
+ cu->current_arena->bytes_allocated += size;
+ if (zero) {
+ memset(ptr, 0, size);
+ }
+ return ptr;
+ } else {
+ /*
+ * See if there are previously allocated arena blocks before the last
+ * reset
+ */
+ if (cu->current_arena->next) {
+ cu->current_arena = cu->current_arena->next;
+ cu->current_arena->bytes_allocated = 0;
+ goto retry;
+ }
+
+ size_t block_size = (size < ARENA_DEFAULT_SIZE) ? ARENA_DEFAULT_SIZE : size;
+ /* Time to allocate a new arena */
+ ArenaMemBlock *new_arena =
+ static_cast<ArenaMemBlock*>(malloc(sizeof(ArenaMemBlock) + block_size));
+ if (new_arena == NULL) {
+ LOG(FATAL) << "Arena allocation failure";
+ }
+ new_arena->block_size = block_size;
+ new_arena->bytes_allocated = 0;
+ new_arena->next = NULL;
+ cu->current_arena->next = new_arena;
+ cu->current_arena = new_arena;
+ cu->num_arena_blocks++;
+ if (cu->num_arena_blocks > 20000) {
+ LOG(INFO) << "Total arena pages: " << cu->num_arena_blocks;
+ }
+ goto retry;
+ }
+}
+
+/* Reclaim all the arena blocks allocated so far */
+void ArenaReset(CompilationUnit* cu)
+{
+ ArenaMemBlock* head = cu->arena_head;
+ while (head != NULL) {
+ ArenaMemBlock* p = head;
+ head = head->next;
+ free(p);
+ }
+ cu->arena_head = NULL;
+ cu->current_arena = NULL;
+}
+
+/* Growable List initialization */
+void CompilerInitGrowableList(CompilationUnit* cu, GrowableList* g_list,
+ size_t init_length, oat_list_kind kind)
+{
+ g_list->num_allocated = init_length;
+ g_list->num_used = 0;
+ g_list->elem_list = static_cast<uintptr_t *>(NewMem(cu, sizeof(intptr_t) * init_length,
+ true, kAllocGrowableList));
+#ifdef WITH_MEMSTATS
+ cu->mstats->list_sizes[kind] += sizeof(uintptr_t) * init_length;
+ g_list->kind = kind;
+ if (static_cast<int>(init_length) > cu->mstats->list_max_elems[kind]) {
+ cu->mstats->list_max_elems[kind] = init_length;
+ }
+#endif
+}
+
+/* Expand the capacity of a growable list */
+static void ExpandGrowableList(CompilationUnit* cu, GrowableList* g_list)
+{
+ int new_length = g_list->num_allocated;
+ if (new_length < 128) {
+ new_length <<= 1;
+ } else {
+ new_length += 128;
+ }
+ uintptr_t *new_array =
+ static_cast<uintptr_t*>(NewMem(cu, sizeof(uintptr_t) * new_length, true,
+ kAllocGrowableList));
+ memcpy(new_array, g_list->elem_list, sizeof(uintptr_t) * g_list->num_allocated);
+#ifdef WITH_MEMSTATS
+ cu->mstats->list_sizes[g_list->kind] += sizeof(uintptr_t) * new_length;
+ cu->mstats->list_wasted[g_list->kind] +=
+ sizeof(uintptr_t) * g_list->num_allocated;
+ cu->mstats->list_grows[g_list->kind]++;
+ if (new_length > cu->mstats->list_max_elems[g_list->kind]) {
+ cu->mstats->list_max_elems[g_list->kind] = new_length;
+ }
+#endif
+ g_list->num_allocated = new_length;
+ g_list->elem_list = new_array;
+}
+
+/* Insert a new element into the growable list */
+void InsertGrowableList(CompilationUnit* cu, GrowableList* g_list,
+ uintptr_t elem)
+{
+ DCHECK_NE(g_list->num_allocated, 0U);
+ if (g_list->num_used == g_list->num_allocated) {
+ ExpandGrowableList(cu, g_list);
+ }
+ g_list->elem_list[g_list->num_used++] = elem;
+}
+
+/* Delete an element from a growable list. Element must be present */
+void DeleteGrowableList(GrowableList* g_list, uintptr_t elem)
+{
+ bool found = false;
+ for (unsigned int i = 0; i < g_list->num_used; i++) {
+ if (!found && g_list->elem_list[i] == elem) {
+ found = true;
+ }
+ if (found) {
+ g_list->elem_list[i] = g_list->elem_list[i+1];
+ }
+ }
+ DCHECK_EQ(found, true);
+ g_list->num_used--;
+}
+
+void GrowableListIteratorInit(GrowableList* g_list,
+ GrowableListIterator* iterator)
+{
+ iterator->list = g_list;
+ iterator->idx = 0;
+ iterator->size = g_list->num_used;
+}
+
+uintptr_t GrowableListIteratorNext(GrowableListIterator* iterator)
+{
+ DCHECK_EQ(iterator->size, iterator->list->num_used);
+ if (iterator->idx == iterator->size) return 0;
+ return iterator->list->elem_list[iterator->idx++];
+}
+
+uintptr_t GrowableListGetElement(const GrowableList* g_list, size_t idx)
+{
+ DCHECK_LT(idx, g_list->num_used);
+ return g_list->elem_list[idx];
+}
+
+#ifdef WITH_MEMSTATS
+/* Dump memory usage stats */
+void DumpMemStats(CompilationUnit* cu)
+{
+ uint32_t total = 0;
+ for (int i = 0; i < kNumAllocKinds; i++) {
+ total += cu->mstats->alloc_stats[i];
+ }
+ if (total > (10 * 1024 * 1024)) {
+ LOG(INFO) << "MEMUSAGE: " << total << " : "
+ << PrettyMethod(cu->method_idx, *cu->dex_file);
+ LOG(INFO) << "insns_size: " << cu->insns_size;
+ if (cu->disable_dataflow) {
+ LOG(INFO) << " ** Dataflow disabled ** ";
+ }
+ LOG(INFO) << "===== Overall allocations";
+ for (int i = 0; i < kNumAllocKinds; i++) {
+ LOG(INFO) << alloc_names[i] << std::setw(10) <<
+ cu->mstats->alloc_stats[i];
+ }
+ LOG(INFO) << "===== GrowableList allocations";
+ for (int i = 0; i < kNumListKinds; i++) {
+ LOG(INFO) << list_names[i]
+ << " S:" << cu->mstats->list_sizes[i]
+ << ", W:" << cu->mstats->list_wasted[i]
+ << ", G:" << cu->mstats->list_grows[i]
+ << ", E:" << cu->mstats->list_max_elems[i];
+ }
+ LOG(INFO) << "===== GrowableBitMap allocations";
+ for (int i = 0; i < kNumBitMapKinds; i++) {
+ LOG(INFO) << bit_map_names[i]
+ << " S:" << cu->mstats->bit_map_sizes[i]
+ << ", W:" << cu->mstats->bit_map_wasted[i]
+ << ", G:" << cu->mstats->bit_map_grows[i];
+ }
+ }
+}
+#endif
+
+/* Debug Utility - dump a compilation unit */
+void DumpCompilationUnit(CompilationUnit* cu)
+{
+ BasicBlock* bb;
+ const char* block_type_names[] = {
+ "Entry Block",
+ "Code Block",
+ "Exit Block",
+ "Exception Handling",
+ "Catch Block"
+ };
+
+ LOG(INFO) << "Compiling " << PrettyMethod(cu->method_idx, *cu->dex_file);
+ LOG(INFO) << cu->insns << " insns";
+ LOG(INFO) << cu->num_blocks << " blocks in total";
+ GrowableListIterator iterator;
+
+ GrowableListIteratorInit(&cu->block_list, &iterator);
+
+ while (true) {
+ bb = reinterpret_cast<BasicBlock*>(GrowableListIteratorNext(&iterator));
+ if (bb == NULL) break;
+ LOG(INFO) << StringPrintf("Block %d (%s) (insn %04x - %04x%s)",
+ bb->id,
+ block_type_names[bb->block_type],
+ bb->start_offset,
+ bb->last_mir_insn ? bb->last_mir_insn->offset : bb->start_offset,
+ bb->last_mir_insn ? "" : " empty");
+ if (bb->taken) {
+ LOG(INFO) << " Taken branch: block " << bb->taken->id
+ << "(0x" << std::hex << bb->taken->start_offset << ")";
+ }
+ if (bb->fall_through) {
+ LOG(INFO) << " Fallthrough : block " << bb->fall_through->id
+ << " (0x" << std::hex << bb->fall_through->start_offset << ")";
+ }
+ }
+}
+
+static uint32_t check_masks[32] = {
+ 0x00000001, 0x00000002, 0x00000004, 0x00000008, 0x00000010,
+ 0x00000020, 0x00000040, 0x00000080, 0x00000100, 0x00000200,
+ 0x00000400, 0x00000800, 0x00001000, 0x00002000, 0x00004000,
+ 0x00008000, 0x00010000, 0x00020000, 0x00040000, 0x00080000,
+ 0x00100000, 0x00200000, 0x00400000, 0x00800000, 0x01000000,
+ 0x02000000, 0x04000000, 0x08000000, 0x10000000, 0x20000000,
+ 0x40000000, 0x80000000 };
+
+/*
+ * Allocate a bit vector with enough space to hold at least the specified
+ * number of bits.
+ *
+ * NOTE: memory is allocated from the compiler arena.
+ */
+ArenaBitVector* AllocBitVector(CompilationUnit* cu,
+ unsigned int start_bits, bool expandable,
+ oat_bit_map_kind kind)
+{
+ ArenaBitVector* bv;
+ unsigned int count;
+
+ DCHECK_EQ(sizeof(bv->storage[0]), 4U); /* assuming 32-bit units */
+
+ bv = static_cast<ArenaBitVector*>(NewMem(cu, sizeof(ArenaBitVector), false,
+ kAllocGrowableBitMap));
+
+ count = (start_bits + 31) >> 5;
+
+ bv->storage_size = count;
+ bv->expandable = expandable;
+ bv->storage = static_cast<uint32_t*>(NewMem(cu, count * sizeof(uint32_t), true,
+ kAllocGrowableBitMap));
+#ifdef WITH_MEMSTATS
+ bv->kind = kind;
+ cu->mstats->bit_map_sizes[kind] += count * sizeof(uint32_t);
+#endif
+ return bv;
+}
+
+/*
+ * Determine whether or not the specified bit is set.
+ */
+bool IsBitSet(const ArenaBitVector* p_bits, unsigned int num)
+{
+ DCHECK_LT(num, p_bits->storage_size * sizeof(uint32_t) * 8);
+
+ unsigned int val = p_bits->storage[num >> 5] & check_masks[num & 0x1f];
+ return (val != 0);
+}
+
+/*
+ * Mark all bits bit as "clear".
+ */
+void ClearAllBits(ArenaBitVector* p_bits)
+{
+ unsigned int count = p_bits->storage_size;
+ memset(p_bits->storage, 0, count * sizeof(uint32_t));
+}
+
+/*
+ * Mark the specified bit as "set".
+ *
+ * Returns "false" if the bit is outside the range of the vector and we're
+ * not allowed to expand.
+ *
+ * NOTE: memory is allocated from the compiler arena.
+ */
+bool SetBit(CompilationUnit* cu, ArenaBitVector* p_bits, unsigned int num)
+{
+ if (num >= p_bits->storage_size * sizeof(uint32_t) * 8) {
+ if (!p_bits->expandable) {
+ LOG(FATAL) << "Can't expand";
+ }
+
+ /* Round up to word boundaries for "num+1" bits */
+ unsigned int new_size = (num + 1 + 31) >> 5;
+ DCHECK_GT(new_size, p_bits->storage_size);
+ uint32_t *new_storage = static_cast<uint32_t*>(NewMem(cu, new_size * sizeof(uint32_t), false,
+ kAllocGrowableBitMap));
+ memcpy(new_storage, p_bits->storage, p_bits->storage_size * sizeof(uint32_t));
+ memset(&new_storage[p_bits->storage_size], 0,
+ (new_size - p_bits->storage_size) * sizeof(uint32_t));
+#ifdef WITH_MEMSTATS
+ cu->mstats->bit_map_wasted[p_bits->kind] +=
+ p_bits->storage_size * sizeof(uint32_t);
+ cu->mstats->bit_map_sizes[p_bits->kind] += new_size * sizeof(uint32_t);
+ cu->mstats->bit_map_grows[p_bits->kind]++;
+#endif
+ p_bits->storage = new_storage;
+ p_bits->storage_size = new_size;
+ }
+
+ p_bits->storage[num >> 5] |= check_masks[num & 0x1f];
+ return true;
+}
+
+/*
+ * Mark the specified bit as "unset".
+ *
+ * Returns "false" if the bit is outside the range of the vector and we're
+ * not allowed to expand.
+ *
+ * NOTE: memory is allocated from the compiler arena.
+ */
+bool ClearBit(ArenaBitVector* p_bits, unsigned int num)
+{
+ if (num >= p_bits->storage_size * sizeof(uint32_t) * 8) {
+ LOG(FATAL) << "Attempt to clear a bit not set in the vector yet";;
+ }
+
+ p_bits->storage[num >> 5] &= ~check_masks[num & 0x1f];
+ return true;
+}
+
+/* Initialize the iterator structure */
+void BitVectorIteratorInit(ArenaBitVector* p_bits,
+ ArenaBitVectorIterator* iterator)
+{
+ iterator->p_bits = p_bits;
+ iterator->bit_size = p_bits->storage_size * sizeof(uint32_t) * 8;
+ iterator->idx = 0;
+}
+
+/*
+ * If the vector sizes don't match, log an error and abort.
+ */
+static void CheckSizes(const ArenaBitVector* bv1, const ArenaBitVector* bv2)
+{
+ if (bv1->storage_size != bv2->storage_size) {
+ LOG(FATAL) << "Mismatched vector sizes (" << bv1->storage_size
+ << ", " << bv2->storage_size << ")";
+ }
+}
+
+/*
+ * Copy a whole vector to the other. Only do that when the both vectors have
+ * the same size.
+ */
+void CopyBitVector(ArenaBitVector* dest, const ArenaBitVector* src)
+{
+ /* if dest is expandable and < src, we could expand dest to match */
+ CheckSizes(dest, src);
+
+ memcpy(dest->storage, src->storage, sizeof(uint32_t) * dest->storage_size);
+}
+
+/*
+ * Intersect two bit vectors and store the result to the dest vector.
+ */
+
+bool IntersectBitVectors(ArenaBitVector* dest, const ArenaBitVector* src1,
+ const ArenaBitVector* src2)
+{
+ DCHECK(src1 != NULL);
+ DCHECK(src2 != NULL);
+ if (dest->storage_size != src1->storage_size ||
+ dest->storage_size != src2->storage_size ||
+ dest->expandable != src1->expandable ||
+ dest->expandable != src2->expandable)
+ return false;
+
+ unsigned int idx;
+ for (idx = 0; idx < dest->storage_size; idx++) {
+ dest->storage[idx] = src1->storage[idx] & src2->storage[idx];
+ }
+ return true;
+}
+
+/*
+ * Unify two bit vectors and store the result to the dest vector.
+ */
+bool UnifyBitVetors(ArenaBitVector* dest, const ArenaBitVector* src1,
+ const ArenaBitVector* src2)
+{
+ DCHECK(src1 != NULL);
+ DCHECK(src2 != NULL);
+ if (dest->storage_size != src1->storage_size ||
+ dest->storage_size != src2->storage_size ||
+ dest->expandable != src1->expandable ||
+ dest->expandable != src2->expandable)
+ return false;
+
+ unsigned int idx;
+ for (idx = 0; idx < dest->storage_size; idx++) {
+ dest->storage[idx] = src1->storage[idx] | src2->storage[idx];
+ }
+ return true;
+}
+
+/*
+ * Return true if any bits collide. Vectors must be same size.
+ */
+bool TestBitVectors(const ArenaBitVector* src1,
+ const ArenaBitVector* src2)
+{
+ DCHECK_EQ(src1->storage_size, src2->storage_size);
+ for (uint32_t idx = 0; idx < src1->storage_size; idx++) {
+ if (src1->storage[idx] & src2->storage[idx]) return true;
+ }
+ return false;
+}
+
+/*
+ * Compare two bit vectors and return true if difference is seen.
+ */
+bool CompareBitVectors(const ArenaBitVector* src1,
+ const ArenaBitVector* src2)
+{
+ if (src1->storage_size != src2->storage_size ||
+ src1->expandable != src2->expandable)
+ return true;
+
+ unsigned int idx;
+ for (idx = 0; idx < src1->storage_size; idx++) {
+ if (src1->storage[idx] != src2->storage[idx]) return true;
+ }
+ return false;
+}
+
+/*
+ * Count the number of bits that are set.
+ */
+int CountSetBits(const ArenaBitVector* p_bits)
+{
+ unsigned int word;
+ unsigned int count = 0;
+
+ for (word = 0; word < p_bits->storage_size; word++) {
+ uint32_t val = p_bits->storage[word];
+
+ if (val != 0) {
+ if (val == 0xffffffff) {
+ count += 32;
+ } else {
+ /* count the number of '1' bits */
+ while (val != 0) {
+ val &= val - 1;
+ count++;
+ }
+ }
+ }
+ }
+
+ return count;
+}
+
+/* Return the next position set to 1. -1 means end-of-element reached */
+int BitVectorIteratorNext(ArenaBitVectorIterator* iterator)
+{
+ ArenaBitVector* p_bits = iterator->p_bits;
+ uint32_t bit_index = iterator->idx;
+ uint32_t bit_size = iterator->bit_size;
+
+ DCHECK_EQ(bit_size, p_bits->storage_size * sizeof(uint32_t) * 8);
+
+ if (bit_index >= bit_size) return -1;
+
+ uint32_t word_index = bit_index >> 5;
+ uint32_t end_word_index = bit_size >> 5;
+ uint32_t* storage = p_bits->storage;
+ uint32_t word = storage[word_index++];
+
+ // Mask out any bits in the first word we've already considered
+ word &= ~((1 << (bit_index & 0x1f))-1);
+
+ for (; word_index <= end_word_index;) {
+ uint32_t bit_pos = bit_index & 0x1f;
+ if (word == 0) {
+ bit_index += (32 - bit_pos);
+ word = storage[word_index++];
+ continue;
+ }
+ for (; bit_pos < 32; bit_pos++) {
+ if (word & (1 << bit_pos)) {
+ iterator->idx = bit_index + 1;
+ return bit_index;
+ }
+ bit_index++;
+ }
+ word = storage[word_index++];
+ }
+ iterator->idx = iterator->bit_size;
+ return -1;
+}
+
+/*
+ * Mark specified number of bits as "set". Cannot set all bits like ClearAll
+ * since there might be unused bits - setting those to one will confuse the
+ * iterator.
+ */
+void SetInitialBits(ArenaBitVector* p_bits, unsigned int num_bits)
+{
+ unsigned int idx;
+ DCHECK_LE(((num_bits + 31) >> 5), p_bits->storage_size);
+ for (idx = 0; idx < (num_bits >> 5); idx++) {
+ p_bits->storage[idx] = -1;
+ }
+ unsigned int rem_num_bits = num_bits & 0x1f;
+ if (rem_num_bits) {
+ p_bits->storage[idx] = (1 << rem_num_bits) - 1;
+ }
+}
+
+void GetBlockName(BasicBlock* bb, char* name)
+{
+ switch (bb->block_type) {
+ case kEntryBlock:
+ snprintf(name, BLOCK_NAME_LEN, "entry_%d", bb->id);
+ break;
+ case kExitBlock:
+ snprintf(name, BLOCK_NAME_LEN, "exit_%d", bb->id);
+ break;
+ case kDalvikByteCode:
+ snprintf(name, BLOCK_NAME_LEN, "block%04x_%d", bb->start_offset, bb->id);
+ break;
+ case kExceptionHandling:
+ snprintf(name, BLOCK_NAME_LEN, "exception%04x_%d", bb->start_offset,
+ bb->id);
+ break;
+ default:
+ snprintf(name, BLOCK_NAME_LEN, "_%d", bb->id);
+ break;
+ }
+}
+
+const char* GetShortyFromTargetIdx(CompilationUnit *cu, int target_idx)
+{
+ const DexFile::MethodId& method_id = cu->dex_file->GetMethodId(target_idx);
+ return cu->dex_file->GetShorty(method_id.proto_idx_);
+}
+
+/* Allocate a new basic block */
+BasicBlock* NewMemBB(CompilationUnit* cu, BBType block_type, int block_id)
+{
+ BasicBlock* bb = static_cast<BasicBlock*>(NewMem(cu, sizeof(BasicBlock), true, kAllocBB));
+ bb->block_type = block_type;
+ bb->id = block_id;
+ bb->predecessors = static_cast<GrowableList*>
+ (NewMem(cu, sizeof(GrowableList), false, kAllocPredecessors));
+ CompilerInitGrowableList(cu, bb->predecessors,
+ (block_type == kExitBlock) ? 2048 : 2,
+ kListPredecessors);
+ cu->block_id_map.Put(block_id, block_id);
+ return bb;
+}
+
+/* Insert an MIR instruction to the end of a basic block */
+void AppendMIR(BasicBlock* bb, MIR* mir)
+{
+ if (bb->first_mir_insn == NULL) {
+ DCHECK(bb->last_mir_insn == NULL);
+ bb->last_mir_insn = bb->first_mir_insn = mir;
+ mir->prev = mir->next = NULL;
+ } else {
+ bb->last_mir_insn->next = mir;
+ mir->prev = bb->last_mir_insn;
+ mir->next = NULL;
+ bb->last_mir_insn = mir;
+ }
+}
+
+/* Insert an MIR instruction to the head of a basic block */
+void PrependMIR(BasicBlock* bb, MIR* mir)
+{
+ if (bb->first_mir_insn == NULL) {
+ DCHECK(bb->last_mir_insn == NULL);
+ bb->last_mir_insn = bb->first_mir_insn = mir;
+ mir->prev = mir->next = NULL;
+ } else {
+ bb->first_mir_insn->prev = mir;
+ mir->next = bb->first_mir_insn;
+ mir->prev = NULL;
+ bb->first_mir_insn = mir;
+ }
+}
+
+/* Insert a MIR instruction after the specified MIR */
+void InsertMIRAfter(BasicBlock* bb, MIR* current_mir, MIR* new_mir)
+{
+ new_mir->prev = current_mir;
+ new_mir->next = current_mir->next;
+ current_mir->next = new_mir;
+
+ if (new_mir->next) {
+ /* Is not the last MIR in the block */
+ new_mir->next->prev = new_mir;
+ } else {
+ /* Is the last MIR in the block */
+ bb->last_mir_insn = new_mir;
+ }
+}
+
+/*
+ * Append an LIR instruction to the LIR list maintained by a compilation
+ * unit
+ */
+void AppendLIR(CompilationUnit *cu, LIR* lir)
+{
+ if (cu->first_lir_insn == NULL) {
+ DCHECK(cu->last_lir_insn == NULL);
+ cu->last_lir_insn = cu->first_lir_insn = lir;
+ lir->prev = lir->next = NULL;
+ } else {
+ cu->last_lir_insn->next = lir;
+ lir->prev = cu->last_lir_insn;
+ lir->next = NULL;
+ cu->last_lir_insn = lir;
+ }
+}
+
+/*
+ * Insert an LIR instruction before the current instruction, which cannot be the
+ * first instruction.
+ *
+ * prev_lir <-> new_lir <-> current_lir
+ */
+void InsertLIRBefore(LIR* current_lir, LIR* new_lir)
+{
+ DCHECK(current_lir->prev != NULL);
+ LIR *prev_lir = current_lir->prev;
+
+ prev_lir->next = new_lir;
+ new_lir->prev = prev_lir;
+ new_lir->next = current_lir;
+ current_lir->prev = new_lir;
+}
+
+/*
+ * Insert an LIR instruction after the current instruction, which cannot be the
+ * first instruction.
+ *
+ * current_lir -> new_lir -> old_next
+ */
+void InsertLIRAfter(LIR* current_lir, LIR* new_lir)
+{
+ new_lir->prev = current_lir;
+ new_lir->next = current_lir->next;
+ current_lir->next = new_lir;
+ new_lir->next->prev = new_lir;
+}
+
+} // namespace art
diff --git a/src/compiler/dex/compiler_utility.h b/src/compiler/dex/compiler_utility.h
new file mode 100644
index 0000000..582d32d
--- /dev/null
+++ b/src/compiler/dex/compiler_utility.h
@@ -0,0 +1,197 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_COMPILER_DEX_COMPILER_UTILITY_H_
+#define ART_SRC_COMPILER_DEX_COMPILER_UTILITY_H_
+
+#include <stdint.h>
+#include <stddef.h>
+#include "compiler_enums.h"
+
+namespace art {
+
+struct CompilationUnit;
+
+// Each arena page has some overhead, so take a few bytes off.
+#define ARENA_DEFAULT_SIZE ((2 * 1024 * 1024) - 256)
+
+// Type of allocation for memory tuning.
+enum oat_alloc_kind {
+ kAllocMisc,
+ kAllocBB,
+ kAllocLIR,
+ kAllocMIR,
+ kAllocDFInfo,
+ kAllocGrowableList,
+ kAllocGrowableBitMap,
+ kAllocDalvikToSSAMap,
+ kAllocDebugInfo,
+ kAllocSuccessor,
+ kAllocRegAlloc,
+ kAllocData,
+ kAllocPredecessors,
+ kNumAllocKinds
+};
+
+// Type of growable list for memory tuning.
+enum oat_list_kind {
+ kListMisc = 0,
+ kListBlockList,
+ kListSSAtoDalvikMap,
+ kListDfsOrder,
+ kListDfsPostOrder,
+ kListDomPostOrderTraversal,
+ kListThrowLaunchPads,
+ kListSuspendLaunchPads,
+ kListSwitchTables,
+ kListFillArrayData,
+ kListSuccessorBlocks,
+ kListPredecessors,
+ kNumListKinds
+};
+
+// Type of growable bitmap for memory tuning.
+enum oat_bit_map_kind {
+ kBitMapMisc = 0,
+ kBitMapUse,
+ kBitMapDef,
+ kBitMapLiveIn,
+ kBitMapBMatrix,
+ kBitMapDominators,
+ kBitMapIDominated,
+ kBitMapDomFrontier,
+ kBitMapPhi,
+ kBitMapTmpBlocks,
+ kBitMapInputBlocks,
+ kBitMapRegisterV,
+ kBitMapTempSSARegisterV,
+ kBitMapNullCheck,
+ kBitMapTmpBlockV,
+ kBitMapPredecessors,
+ kNumBitMapKinds
+};
+
+// Allocate the initial memory block for arena-based allocation.
+bool HeapInit(CompilationUnit* cu);
+
+// Uncomment to collect memory usage statistics.
+//#define WITH_MEMSTATS
+
+struct ArenaMemBlock {
+ size_t block_size;
+ size_t bytes_allocated;
+ ArenaMemBlock *next;
+ char ptr[0];
+};
+
+void* NewMem(CompilationUnit* cu, size_t size, bool zero, oat_alloc_kind kind);
+
+void ArenaReset(CompilationUnit *cu);
+
+struct GrowableList {
+ GrowableList() : num_allocated(0), num_used(0), elem_list(NULL) {
+ }
+
+ size_t num_allocated;
+ size_t num_used;
+ uintptr_t* elem_list;
+#ifdef WITH_MEMSTATS
+ oat_list_kind kind;
+#endif
+};
+
+struct GrowableListIterator {
+ GrowableList* list;
+ size_t idx;
+ size_t size;
+};
+
+/*
+ * Expanding bitmap, used for tracking resources. Bits are numbered starting
+ * from zero. All operations on a BitVector are unsynchronized.
+ */
+struct ArenaBitVector {
+ bool expandable; // expand bitmap if we run out?
+ uint32_t storage_size; // current size, in 32-bit words.
+ uint32_t* storage;
+#ifdef WITH_MEMSTATS
+ oat_bit_map_kind kind; // for memory use tuning.
+#endif
+};
+
+// Handy iterator to walk through the bit positions set to 1.
+struct ArenaBitVectorIterator {
+ ArenaBitVector* p_bits;
+ uint32_t idx;
+ uint32_t bit_size;
+};
+
+#define GET_ELEM_N(LIST, TYPE, N) ((reinterpret_cast<TYPE*>(LIST->elem_list)[N]))
+
+#define BLOCK_NAME_LEN 80
+
+// Forward declarations
+struct BasicBlock;
+struct CompilationUnit;
+struct LIR;
+struct RegLocation;
+struct MIR;
+enum BBType;
+
+void CompilerInitGrowableList(CompilationUnit* cu, GrowableList* g_list,
+ size_t init_length, oat_list_kind kind = kListMisc);
+void InsertGrowableList(CompilationUnit* cu, GrowableList* g_list, uintptr_t elem);
+void DeleteGrowableList(GrowableList* g_list, uintptr_t elem);
+void GrowableListIteratorInit(GrowableList* g_list, GrowableListIterator* iterator);
+uintptr_t GrowableListIteratorNext(GrowableListIterator* iterator);
+uintptr_t GrowableListGetElement(const GrowableList* g_list, size_t idx);
+ArenaBitVector* AllocBitVector(CompilationUnit* cu, unsigned int start_bits, bool expandable,
+ oat_bit_map_kind = kBitMapMisc);
+void BitVectorIteratorInit(ArenaBitVector* p_bits, ArenaBitVectorIterator* iterator);
+int BitVectorIteratorNext(ArenaBitVectorIterator* iterator);
+bool SetBit(CompilationUnit *cu, ArenaBitVector* p_bits, unsigned int num);
+bool ClearBit(ArenaBitVector* p_bits, unsigned int num);
+void MarkAllBits(ArenaBitVector* p_bits, bool set);
+void DebugBitVector(char* msg, const ArenaBitVector* bv, int length);
+bool IsBitSet(const ArenaBitVector* p_bits, unsigned int num);
+void ClearAllBits(ArenaBitVector* p_bits);
+void SetInitialBits(ArenaBitVector* p_bits, unsigned int num_bits);
+void CopyBitVector(ArenaBitVector* dest, const ArenaBitVector* src);
+bool IntersectBitVectors(ArenaBitVector* dest, const ArenaBitVector* src1,
+ const ArenaBitVector* src2);
+bool UnifyBitVetors(ArenaBitVector* dest, const ArenaBitVector* src1, const ArenaBitVector* src2);
+bool CompareBitVectors(const ArenaBitVector* src1, const ArenaBitVector* src2);
+bool TestBitVectors(const ArenaBitVector* src1, const ArenaBitVector* src2);
+int CountSetBits(const ArenaBitVector* p_bits);
+void DumpLIRInsn(CompilationUnit* cu, LIR* lir, unsigned char* base_addr);
+void DumpResourceMask(LIR* lir, uint64_t mask, const char* prefix);
+void DumpBlockBitVector(const GrowableList* blocks, char* msg, const ArenaBitVector* bv,
+ int length);
+void GetBlockName(BasicBlock* bb, char* name);
+const char* GetShortyFromTargetIdx(CompilationUnit*, int);
+void DumpMemStats(CompilationUnit* cu);
+void DumpCompilationUnit(CompilationUnit* cu);
+BasicBlock* NewMemBB(CompilationUnit* cu, BBType block_type, int block_id);
+void AppendMIR(BasicBlock* bb, MIR* mir);
+void PrependMIR(BasicBlock* bb, MIR* mir);
+void InsertMIRAfter(BasicBlock* bb, MIR* current_mir, MIR* new_mir);
+void AppendLIR(CompilationUnit *cu, LIR* lir);
+void InsertLIRBefore(LIR* current_lir, LIR* new_lir);
+void InsertLIRAfter(LIR* current_lir, LIR* new_lir);
+
+} // namespace art
+
+#endif // ART_SRC_COMPILER_DEX_COMPILER_UTILITY_H_
diff --git a/src/compiler/dex/dataflow.cc b/src/compiler/dex/dataflow.cc
new file mode 100644
index 0000000..2ce16a4
--- /dev/null
+++ b/src/compiler/dex/dataflow.cc
@@ -0,0 +1,2574 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "compiler_internals.h"
+#include "dataflow.h"
+#include "bb_opt.h"
+
+namespace art {
+
+/*
+ * Main table containing data flow attributes for each bytecode. The
+ * first kNumPackedOpcodes entries are for Dalvik bytecode
+ * instructions, where extended opcode at the MIR level are appended
+ * afterwards.
+ *
+ * TODO - many optimization flags are incomplete - they will only limit the
+ * scope of optimizations but will not cause mis-optimizations.
+ */
+const int oat_data_flow_attributes[kMirOpLast] = {
+ // 00 NOP
+ DF_NOP,
+
+ // 01 MOVE vA, vB
+ DF_DA | DF_UB | DF_IS_MOVE,
+
+ // 02 MOVE_FROM16 vAA, vBBBB
+ DF_DA | DF_UB | DF_IS_MOVE,
+
+ // 03 MOVE_16 vAAAA, vBBBB
+ DF_DA | DF_UB | DF_IS_MOVE,
+
+ // 04 MOVE_WIDE vA, vB
+ DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_IS_MOVE,
+
+ // 05 MOVE_WIDE_FROM16 vAA, vBBBB
+ DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_IS_MOVE,
+
+ // 06 MOVE_WIDE_16 vAAAA, vBBBB
+ DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_IS_MOVE,
+
+ // 07 MOVE_OBJECT vA, vB
+ DF_DA | DF_UB | DF_NULL_TRANSFER_0 | DF_IS_MOVE | DF_REF_A | DF_REF_B,
+
+ // 08 MOVE_OBJECT_FROM16 vAA, vBBBB
+ DF_DA | DF_UB | DF_NULL_TRANSFER_0 | DF_IS_MOVE | DF_REF_A | DF_REF_B,
+
+ // 09 MOVE_OBJECT_16 vAAAA, vBBBB
+ DF_DA | DF_UB | DF_NULL_TRANSFER_0 | DF_IS_MOVE | DF_REF_A | DF_REF_B,
+
+ // 0A MOVE_RESULT vAA
+ DF_DA,
+
+ // 0B MOVE_RESULT_WIDE vAA
+ DF_DA | DF_A_WIDE,
+
+ // 0C MOVE_RESULT_OBJECT vAA
+ DF_DA | DF_REF_A,
+
+ // 0D MOVE_EXCEPTION vAA
+ DF_DA | DF_REF_A,
+
+ // 0E RETURN_VOID
+ DF_NOP,
+
+ // 0F RETURN vAA
+ DF_UA,
+
+ // 10 RETURN_WIDE vAA
+ DF_UA | DF_A_WIDE,
+
+ // 11 RETURN_OBJECT vAA
+ DF_UA | DF_REF_A,
+
+ // 12 CONST_4 vA, #+B
+ DF_DA | DF_SETS_CONST,
+
+ // 13 CONST_16 vAA, #+BBBB
+ DF_DA | DF_SETS_CONST,
+
+ // 14 CONST vAA, #+BBBBBBBB
+ DF_DA | DF_SETS_CONST,
+
+ // 15 CONST_HIGH16 VAA, #+BBBB0000
+ DF_DA | DF_SETS_CONST,
+
+ // 16 CONST_WIDE_16 vAA, #+BBBB
+ DF_DA | DF_A_WIDE | DF_SETS_CONST,
+
+ // 17 CONST_WIDE_32 vAA, #+BBBBBBBB
+ DF_DA | DF_A_WIDE | DF_SETS_CONST,
+
+ // 18 CONST_WIDE vAA, #+BBBBBBBBBBBBBBBB
+ DF_DA | DF_A_WIDE | DF_SETS_CONST,
+
+ // 19 CONST_WIDE_HIGH16 vAA, #+BBBB000000000000
+ DF_DA | DF_A_WIDE | DF_SETS_CONST,
+
+ // 1A CONST_STRING vAA, string@BBBB
+ DF_DA | DF_REF_A,
+
+ // 1B CONST_STRING_JUMBO vAA, string@BBBBBBBB
+ DF_DA | DF_REF_A,
+
+ // 1C CONST_CLASS vAA, type@BBBB
+ DF_DA | DF_REF_A,
+
+ // 1D MONITOR_ENTER vAA
+ DF_UA | DF_NULL_CHK_0 | DF_REF_A,
+
+ // 1E MONITOR_EXIT vAA
+ DF_UA | DF_NULL_CHK_0 | DF_REF_A,
+
+ // 1F CHK_CAST vAA, type@BBBB
+ DF_UA | DF_REF_A | DF_UMS,
+
+ // 20 INSTANCE_OF vA, vB, type@CCCC
+ DF_DA | DF_UB | DF_CORE_A | DF_REF_B | DF_UMS,
+
+ // 21 ARRAY_LENGTH vA, vB
+ DF_DA | DF_UB | DF_NULL_CHK_0 | DF_CORE_A | DF_REF_B,
+
+ // 22 NEW_INSTANCE vAA, type@BBBB
+ DF_DA | DF_NON_NULL_DST | DF_REF_A | DF_UMS,
+
+ // 23 NEW_ARRAY vA, vB, type@CCCC
+ DF_DA | DF_UB | DF_NON_NULL_DST | DF_REF_A | DF_CORE_B | DF_UMS,
+
+ // 24 FILLED_NEW_ARRAY {vD, vE, vF, vG, vA}
+ DF_FORMAT_35C | DF_NON_NULL_RET | DF_UMS,
+
+ // 25 FILLED_NEW_ARRAY_RANGE {vCCCC .. vNNNN}, type@BBBB
+ DF_FORMAT_3RC | DF_NON_NULL_RET | DF_UMS,
+
+ // 26 FILL_ARRAY_DATA vAA, +BBBBBBBB
+ DF_UA | DF_REF_A | DF_UMS,
+
+ // 27 THROW vAA
+ DF_UA | DF_REF_A | DF_UMS,
+
+ // 28 GOTO
+ DF_NOP,
+
+ // 29 GOTO_16
+ DF_NOP,
+
+ // 2A GOTO_32
+ DF_NOP,
+
+ // 2B PACKED_SWITCH vAA, +BBBBBBBB
+ DF_UA,
+
+ // 2C SPARSE_SWITCH vAA, +BBBBBBBB
+ DF_UA,
+
+ // 2D CMPL_FLOAT vAA, vBB, vCC
+ DF_DA | DF_UB | DF_UC | DF_FP_B | DF_FP_C | DF_CORE_A,
+
+ // 2E CMPG_FLOAT vAA, vBB, vCC
+ DF_DA | DF_UB | DF_UC | DF_FP_B | DF_FP_C | DF_CORE_A,
+
+ // 2F CMPL_DOUBLE vAA, vBB, vCC
+ DF_DA | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_B | DF_FP_C | DF_CORE_A,
+
+ // 30 CMPG_DOUBLE vAA, vBB, vCC
+ DF_DA | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_B | DF_FP_C | DF_CORE_A,
+
+ // 31 CMP_LONG vAA, vBB, vCC
+ DF_DA | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+ // 32 IF_EQ vA, vB, +CCCC
+ DF_UA | DF_UB,
+
+ // 33 IF_NE vA, vB, +CCCC
+ DF_UA | DF_UB,
+
+ // 34 IF_LT vA, vB, +CCCC
+ DF_UA | DF_UB,
+
+ // 35 IF_GE vA, vB, +CCCC
+ DF_UA | DF_UB,
+
+ // 36 IF_GT vA, vB, +CCCC
+ DF_UA | DF_UB,
+
+ // 37 IF_LE vA, vB, +CCCC
+ DF_UA | DF_UB,
+
+ // 38 IF_EQZ vAA, +BBBB
+ DF_UA,
+
+ // 39 IF_NEZ vAA, +BBBB
+ DF_UA,
+
+ // 3A IF_LTZ vAA, +BBBB
+ DF_UA,
+
+ // 3B IF_GEZ vAA, +BBBB
+ DF_UA,
+
+ // 3C IF_GTZ vAA, +BBBB
+ DF_UA,
+
+ // 3D IF_LEZ vAA, +BBBB
+ DF_UA,
+
+ // 3E UNUSED_3E
+ DF_NOP,
+
+ // 3F UNUSED_3F
+ DF_NOP,
+
+ // 40 UNUSED_40
+ DF_NOP,
+
+ // 41 UNUSED_41
+ DF_NOP,
+
+ // 42 UNUSED_42
+ DF_NOP,
+
+ // 43 UNUSED_43
+ DF_NOP,
+
+ // 44 AGET vAA, vBB, vCC
+ DF_DA | DF_UB | DF_UC | DF_NULL_CHK_0 | DF_RANGE_CHK_1 | DF_REF_B | DF_CORE_C,
+
+ // 45 AGET_WIDE vAA, vBB, vCC
+ DF_DA | DF_A_WIDE | DF_UB | DF_UC | DF_NULL_CHK_0 | DF_RANGE_CHK_1 | DF_REF_B | DF_CORE_C,
+
+ // 46 AGET_OBJECT vAA, vBB, vCC
+ DF_DA | DF_UB | DF_UC | DF_NULL_CHK_0 | DF_RANGE_CHK_1 | DF_REF_A | DF_REF_B | DF_CORE_C,
+
+ // 47 AGET_BOOLEAN vAA, vBB, vCC
+ DF_DA | DF_UB | DF_UC | DF_NULL_CHK_0 | DF_RANGE_CHK_1 | DF_REF_B | DF_CORE_C,
+
+ // 48 AGET_BYTE vAA, vBB, vCC
+ DF_DA | DF_UB | DF_UC | DF_NULL_CHK_0 | DF_RANGE_CHK_1 | DF_REF_B | DF_CORE_C,
+
+ // 49 AGET_CHAR vAA, vBB, vCC
+ DF_DA | DF_UB | DF_UC | DF_NULL_CHK_0 | DF_RANGE_CHK_1 | DF_REF_B | DF_CORE_C,
+
+ // 4A AGET_SHORT vAA, vBB, vCC
+ DF_DA | DF_UB | DF_UC | DF_NULL_CHK_0 | DF_RANGE_CHK_1 | DF_REF_B | DF_CORE_C,
+
+ // 4B APUT vAA, vBB, vCC
+ DF_UA | DF_UB | DF_UC | DF_NULL_CHK_1 | DF_RANGE_CHK_2 | DF_REF_B | DF_CORE_C,
+
+ // 4C APUT_WIDE vAA, vBB, vCC
+ DF_UA | DF_A_WIDE | DF_UB | DF_UC | DF_NULL_CHK_2 | DF_RANGE_CHK_3 | DF_REF_B | DF_CORE_C,
+
+ // 4D APUT_OBJECT vAA, vBB, vCC
+ DF_UA | DF_UB | DF_UC | DF_NULL_CHK_1 | DF_RANGE_CHK_2 | DF_REF_A | DF_REF_B | DF_CORE_C,
+
+ // 4E APUT_BOOLEAN vAA, vBB, vCC
+ DF_UA | DF_UB | DF_UC | DF_NULL_CHK_1 | DF_RANGE_CHK_2 | DF_REF_B | DF_CORE_C,
+
+ // 4F APUT_BYTE vAA, vBB, vCC
+ DF_UA | DF_UB | DF_UC | DF_NULL_CHK_1 | DF_RANGE_CHK_2 | DF_REF_B | DF_CORE_C,
+
+ // 50 APUT_CHAR vAA, vBB, vCC
+ DF_UA | DF_UB | DF_UC | DF_NULL_CHK_1 | DF_RANGE_CHK_2 | DF_REF_B | DF_CORE_C,
+
+ // 51 APUT_SHORT vAA, vBB, vCC
+ DF_UA | DF_UB | DF_UC | DF_NULL_CHK_1 | DF_RANGE_CHK_2 | DF_REF_B | DF_CORE_C,
+
+ // 52 IGET vA, vB, field@CCCC
+ DF_DA | DF_UB | DF_NULL_CHK_0 | DF_REF_B,
+
+ // 53 IGET_WIDE vA, vB, field@CCCC
+ DF_DA | DF_A_WIDE | DF_UB | DF_NULL_CHK_0 | DF_REF_B,
+
+ // 54 IGET_OBJECT vA, vB, field@CCCC
+ DF_DA | DF_UB | DF_NULL_CHK_0 | DF_REF_A | DF_REF_B,
+
+ // 55 IGET_BOOLEAN vA, vB, field@CCCC
+ DF_DA | DF_UB | DF_NULL_CHK_0 | DF_REF_B,
+
+ // 56 IGET_BYTE vA, vB, field@CCCC
+ DF_DA | DF_UB | DF_NULL_CHK_0 | DF_REF_B,
+
+ // 57 IGET_CHAR vA, vB, field@CCCC
+ DF_DA | DF_UB | DF_NULL_CHK_0 | DF_REF_B,
+
+ // 58 IGET_SHORT vA, vB, field@CCCC
+ DF_DA | DF_UB | DF_NULL_CHK_0 | DF_REF_B,
+
+ // 59 IPUT vA, vB, field@CCCC
+ DF_UA | DF_UB | DF_NULL_CHK_1 | DF_REF_B,
+
+ // 5A IPUT_WIDE vA, vB, field@CCCC
+ DF_UA | DF_A_WIDE | DF_UB | DF_NULL_CHK_2 | DF_REF_B,
+
+ // 5B IPUT_OBJECT vA, vB, field@CCCC
+ DF_UA | DF_UB | DF_NULL_CHK_1 | DF_REF_A | DF_REF_B,
+
+ // 5C IPUT_BOOLEAN vA, vB, field@CCCC
+ DF_UA | DF_UB | DF_NULL_CHK_1 | DF_REF_B,
+
+ // 5D IPUT_BYTE vA, vB, field@CCCC
+ DF_UA | DF_UB | DF_NULL_CHK_1 | DF_REF_B,
+
+ // 5E IPUT_CHAR vA, vB, field@CCCC
+ DF_UA | DF_UB | DF_NULL_CHK_1 | DF_REF_B,
+
+ // 5F IPUT_SHORT vA, vB, field@CCCC
+ DF_UA | DF_UB | DF_NULL_CHK_1 | DF_REF_B,
+
+ // 60 SGET vAA, field@BBBB
+ DF_DA | DF_UMS,
+
+ // 61 SGET_WIDE vAA, field@BBBB
+ DF_DA | DF_A_WIDE | DF_UMS,
+
+ // 62 SGET_OBJECT vAA, field@BBBB
+ DF_DA | DF_REF_A | DF_UMS,
+
+ // 63 SGET_BOOLEAN vAA, field@BBBB
+ DF_DA | DF_UMS,
+
+ // 64 SGET_BYTE vAA, field@BBBB
+ DF_DA | DF_UMS,
+
+ // 65 SGET_CHAR vAA, field@BBBB
+ DF_DA | DF_UMS,
+
+ // 66 SGET_SHORT vAA, field@BBBB
+ DF_DA | DF_UMS,
+
+ // 67 SPUT vAA, field@BBBB
+ DF_UA | DF_UMS,
+
+ // 68 SPUT_WIDE vAA, field@BBBB
+ DF_UA | DF_A_WIDE | DF_UMS,
+
+ // 69 SPUT_OBJECT vAA, field@BBBB
+ DF_UA | DF_REF_A | DF_UMS,
+
+ // 6A SPUT_BOOLEAN vAA, field@BBBB
+ DF_UA | DF_UMS,
+
+ // 6B SPUT_BYTE vAA, field@BBBB
+ DF_UA | DF_UMS,
+
+ // 6C SPUT_CHAR vAA, field@BBBB
+ DF_UA | DF_UMS,
+
+ // 6D SPUT_SHORT vAA, field@BBBB
+ DF_UA | DF_UMS,
+
+ // 6E INVOKE_VIRTUAL {vD, vE, vF, vG, vA}
+ DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
+
+ // 6F INVOKE_SUPER {vD, vE, vF, vG, vA}
+ DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
+
+ // 70 INVOKE_DIRECT {vD, vE, vF, vG, vA}
+ DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
+
+ // 71 INVOKE_STATIC {vD, vE, vF, vG, vA}
+ DF_FORMAT_35C | DF_UMS,
+
+ // 72 INVOKE_INTERFACE {vD, vE, vF, vG, vA}
+ DF_FORMAT_35C | DF_UMS,
+
+ // 73 UNUSED_73
+ DF_NOP,
+
+ // 74 INVOKE_VIRTUAL_RANGE {vCCCC .. vNNNN}
+ DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
+
+ // 75 INVOKE_SUPER_RANGE {vCCCC .. vNNNN}
+ DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
+
+ // 76 INVOKE_DIRECT_RANGE {vCCCC .. vNNNN}
+ DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
+
+ // 77 INVOKE_STATIC_RANGE {vCCCC .. vNNNN}
+ DF_FORMAT_3RC | DF_UMS,
+
+ // 78 INVOKE_INTERFACE_RANGE {vCCCC .. vNNNN}
+ DF_FORMAT_3RC | DF_UMS,
+
+ // 79 UNUSED_79
+ DF_NOP,
+
+ // 7A UNUSED_7A
+ DF_NOP,
+
+ // 7B NEG_INT vA, vB
+ DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+ // 7C NOT_INT vA, vB
+ DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+ // 7D NEG_LONG vA, vB
+ DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
+
+ // 7E NOT_LONG vA, vB
+ DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
+
+ // 7F NEG_FLOAT vA, vB
+ DF_DA | DF_UB | DF_FP_A | DF_FP_B,
+
+ // 80 NEG_DOUBLE vA, vB
+ DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
+
+ // 81 INT_TO_LONG vA, vB
+ DF_DA | DF_A_WIDE | DF_UB | DF_CORE_A | DF_CORE_B,
+
+ // 82 INT_TO_FLOAT vA, vB
+ DF_DA | DF_UB | DF_FP_A | DF_CORE_B,
+
+ // 83 INT_TO_DOUBLE vA, vB
+ DF_DA | DF_A_WIDE | DF_UB | DF_FP_A | DF_CORE_B,
+
+ // 84 LONG_TO_INT vA, vB
+ DF_DA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
+
+ // 85 LONG_TO_FLOAT vA, vB
+ DF_DA | DF_UB | DF_B_WIDE | DF_FP_A | DF_CORE_B,
+
+ // 86 LONG_TO_DOUBLE vA, vB
+ DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_FP_A | DF_CORE_B,
+
+ // 87 FLOAT_TO_INT vA, vB
+ DF_DA | DF_UB | DF_FP_B | DF_CORE_A,
+
+ // 88 FLOAT_TO_LONG vA, vB
+ DF_DA | DF_A_WIDE | DF_UB | DF_FP_B | DF_CORE_A,
+
+ // 89 FLOAT_TO_DOUBLE vA, vB
+ DF_DA | DF_A_WIDE | DF_UB | DF_FP_A | DF_FP_B,
+
+ // 8A DOUBLE_TO_INT vA, vB
+ DF_DA | DF_UB | DF_B_WIDE | DF_FP_B | DF_CORE_A,
+
+ // 8B DOUBLE_TO_LONG vA, vB
+ DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_FP_B | DF_CORE_A,
+
+ // 8C DOUBLE_TO_FLOAT vA, vB
+ DF_DA | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
+
+ // 8D INT_TO_BYTE vA, vB
+ DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+ // 8E INT_TO_CHAR vA, vB
+ DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+ // 8F INT_TO_SHORT vA, vB
+ DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+ // 90 ADD_INT vAA, vBB, vCC
+ DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+ // 91 SUB_INT vAA, vBB, vCC
+ DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+ // 92 MUL_INT vAA, vBB, vCC
+ DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+ // 93 DIV_INT vAA, vBB, vCC
+ DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+ // 94 REM_INT vAA, vBB, vCC
+ DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+ // 95 AND_INT vAA, vBB, vCC
+ DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+ // 96 OR_INT vAA, vBB, vCC
+ DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+ // 97 XOR_INT vAA, vBB, vCC
+ DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+ // 98 SHL_INT vAA, vBB, vCC
+ DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+ // 99 SHR_INT vAA, vBB, vCC
+ DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+ // 9A USHR_INT vAA, vBB, vCC
+ DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+ // 9B ADD_LONG vAA, vBB, vCC
+ DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+ // 9C SUB_LONG vAA, vBB, vCC
+ DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+ // 9D MUL_LONG vAA, vBB, vCC
+ DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+ // 9E DIV_LONG vAA, vBB, vCC
+ DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+ // 9F REM_LONG vAA, vBB, vCC
+ DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+ // A0 AND_LONG vAA, vBB, vCC
+ DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+ // A1 OR_LONG vAA, vBB, vCC
+ DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+ // A2 XOR_LONG vAA, vBB, vCC
+ DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+ // A3 SHL_LONG vAA, vBB, vCC
+ DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+ // A4 SHR_LONG vAA, vBB, vCC
+ DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+ // A5 USHR_LONG vAA, vBB, vCC
+ DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+ // A6 ADD_FLOAT vAA, vBB, vCC
+ DF_DA | DF_UB | DF_UC | DF_FP_A | DF_FP_B | DF_FP_C,
+
+ // A7 SUB_FLOAT vAA, vBB, vCC
+ DF_DA | DF_UB | DF_UC | DF_FP_A | DF_FP_B | DF_FP_C,
+
+ // A8 MUL_FLOAT vAA, vBB, vCC
+ DF_DA | DF_UB | DF_UC | DF_FP_A | DF_FP_B | DF_FP_C,
+
+ // A9 DIV_FLOAT vAA, vBB, vCC
+ DF_DA | DF_UB | DF_UC | DF_FP_A | DF_FP_B | DF_FP_C,
+
+ // AA REM_FLOAT vAA, vBB, vCC
+ DF_DA | DF_UB | DF_UC | DF_FP_A | DF_FP_B | DF_FP_C,
+
+ // AB ADD_DOUBLE vAA, vBB, vCC
+ DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_A | DF_FP_B | DF_FP_C,
+
+ // AC SUB_DOUBLE vAA, vBB, vCC
+ DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_A | DF_FP_B | DF_FP_C,
+
+ // AD MUL_DOUBLE vAA, vBB, vCC
+ DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_A | DF_FP_B | DF_FP_C,
+
+ // AE DIV_DOUBLE vAA, vBB, vCC
+ DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_A | DF_FP_B | DF_FP_C,
+
+ // AF REM_DOUBLE vAA, vBB, vCC
+ DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_A | DF_FP_B | DF_FP_C,
+
+ // B0 ADD_INT_2ADDR vA, vB
+ DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+ // B1 SUB_INT_2ADDR vA, vB
+ DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+ // B2 MUL_INT_2ADDR vA, vB
+ DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+ // B3 DIV_INT_2ADDR vA, vB
+ DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+ // B4 REM_INT_2ADDR vA, vB
+ DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+ // B5 AND_INT_2ADDR vA, vB
+ DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+ // B6 OR_INT_2ADDR vA, vB
+ DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+ // B7 XOR_INT_2ADDR vA, vB
+ DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+ // B8 SHL_INT_2ADDR vA, vB
+ DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+ // B9 SHR_INT_2ADDR vA, vB
+ DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+ // BA USHR_INT_2ADDR vA, vB
+ DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+ // BB ADD_LONG_2ADDR vA, vB
+ DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
+
+ // BC SUB_LONG_2ADDR vA, vB
+ DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
+
+ // BD MUL_LONG_2ADDR vA, vB
+ DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
+
+ // BE DIV_LONG_2ADDR vA, vB
+ DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
+
+ // BF REM_LONG_2ADDR vA, vB
+ DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
+
+ // C0 AND_LONG_2ADDR vA, vB
+ DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
+
+ // C1 OR_LONG_2ADDR vA, vB
+ DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
+
+ // C2 XOR_LONG_2ADDR vA, vB
+ DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
+
+ // C3 SHL_LONG_2ADDR vA, vB
+ DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+ // C4 SHR_LONG_2ADDR vA, vB
+ DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+ // C5 USHR_LONG_2ADDR vA, vB
+ DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+ // C6 ADD_FLOAT_2ADDR vA, vB
+ DF_DA | DF_UA | DF_UB | DF_FP_A | DF_FP_B,
+
+ // C7 SUB_FLOAT_2ADDR vA, vB
+ DF_DA | DF_UA | DF_UB | DF_FP_A | DF_FP_B,
+
+ // C8 MUL_FLOAT_2ADDR vA, vB
+ DF_DA | DF_UA | DF_UB | DF_FP_A | DF_FP_B,
+
+ // C9 DIV_FLOAT_2ADDR vA, vB
+ DF_DA | DF_UA | DF_UB | DF_FP_A | DF_FP_B,
+
+ // CA REM_FLOAT_2ADDR vA, vB
+ DF_DA | DF_UA | DF_UB | DF_FP_A | DF_FP_B,
+
+ // CB ADD_DOUBLE_2ADDR vA, vB
+ DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
+
+ // CC SUB_DOUBLE_2ADDR vA, vB
+ DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
+
+ // CD MUL_DOUBLE_2ADDR vA, vB
+ DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
+
+ // CE DIV_DOUBLE_2ADDR vA, vB
+ DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
+
+ // CF REM_DOUBLE_2ADDR vA, vB
+ DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
+
+ // D0 ADD_INT_LIT16 vA, vB, #+CCCC
+ DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+ // D1 RSUB_INT vA, vB, #+CCCC
+ DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+ // D2 MUL_INT_LIT16 vA, vB, #+CCCC
+ DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+ // D3 DIV_INT_LIT16 vA, vB, #+CCCC
+ DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+ // D4 REM_INT_LIT16 vA, vB, #+CCCC
+ DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+ // D5 AND_INT_LIT16 vA, vB, #+CCCC
+ DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+ // D6 OR_INT_LIT16 vA, vB, #+CCCC
+ DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+ // D7 XOR_INT_LIT16 vA, vB, #+CCCC
+ DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+ // D8 ADD_INT_LIT8 vAA, vBB, #+CC
+ DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+ // D9 RSUB_INT_LIT8 vAA, vBB, #+CC
+ DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+ // DA MUL_INT_LIT8 vAA, vBB, #+CC
+ DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+ // DB DIV_INT_LIT8 vAA, vBB, #+CC
+ DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+ // DC REM_INT_LIT8 vAA, vBB, #+CC
+ DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+ // DD AND_INT_LIT8 vAA, vBB, #+CC
+ DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+ // DE OR_INT_LIT8 vAA, vBB, #+CC
+ DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+ // DF XOR_INT_LIT8 vAA, vBB, #+CC
+ DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+ // E0 SHL_INT_LIT8 vAA, vBB, #+CC
+ DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+ // E1 SHR_INT_LIT8 vAA, vBB, #+CC
+ DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+ // E2 USHR_INT_LIT8 vAA, vBB, #+CC
+ DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+ // E3 IGET_VOLATILE
+ DF_DA | DF_UB | DF_NULL_CHK_0 | DF_REF_B,
+
+ // E4 IPUT_VOLATILE
+ DF_UA | DF_UB | DF_NULL_CHK_1 | DF_REF_B,
+
+ // E5 SGET_VOLATILE
+ DF_DA | DF_UMS,
+
+ // E6 SPUT_VOLATILE
+ DF_UA | DF_UMS,
+
+ // E7 IGET_OBJECT_VOLATILE
+ DF_DA | DF_UB | DF_NULL_CHK_0 | DF_REF_A | DF_REF_B,
+
+ // E8 IGET_WIDE_VOLATILE
+ DF_DA | DF_A_WIDE | DF_UB | DF_NULL_CHK_0 | DF_REF_B,
+
+ // E9 IPUT_WIDE_VOLATILE
+ DF_UA | DF_A_WIDE | DF_UB | DF_NULL_CHK_2 | DF_REF_B,
+
+ // EA SGET_WIDE_VOLATILE
+ DF_DA | DF_A_WIDE | DF_UMS,
+
+ // EB SPUT_WIDE_VOLATILE
+ DF_UA | DF_A_WIDE | DF_UMS,
+
+ // EC BREAKPOINT
+ DF_NOP,
+
+ // ED THROW_VERIFICATION_ERROR
+ DF_NOP | DF_UMS,
+
+ // EE EXECUTE_INLINE
+ DF_FORMAT_35C,
+
+ // EF EXECUTE_INLINE_RANGE
+ DF_FORMAT_3RC,
+
+ // F0 INVOKE_OBJECT_INIT_RANGE
+ DF_NOP | DF_NULL_CHK_0,
+
+ // F1 RETURN_VOID_BARRIER
+ DF_NOP,
+
+ // F2 IGET_QUICK
+ DF_DA | DF_UB | DF_NULL_CHK_0,
+
+ // F3 IGET_WIDE_QUICK
+ DF_DA | DF_A_WIDE | DF_UB | DF_NULL_CHK_0,
+
+ // F4 IGET_OBJECT_QUICK
+ DF_DA | DF_UB | DF_NULL_CHK_0,
+
+ // F5 IPUT_QUICK
+ DF_UA | DF_UB | DF_NULL_CHK_1,
+
+ // F6 IPUT_WIDE_QUICK
+ DF_UA | DF_A_WIDE | DF_UB | DF_NULL_CHK_2,
+
+ // F7 IPUT_OBJECT_QUICK
+ DF_UA | DF_UB | DF_NULL_CHK_1,
+
+ // F8 INVOKE_VIRTUAL_QUICK
+ DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
+
+ // F9 INVOKE_VIRTUAL_QUICK_RANGE
+ DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
+
+ // FA INVOKE_SUPER_QUICK
+ DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
+
+ // FB INVOKE_SUPER_QUICK_RANGE
+ DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
+
+ // FC IPUT_OBJECT_VOLATILE
+ DF_UA | DF_UB | DF_NULL_CHK_1 | DF_REF_A | DF_REF_B,
+
+ // FD SGET_OBJECT_VOLATILE
+ DF_DA | DF_REF_A | DF_UMS,
+
+ // FE SPUT_OBJECT_VOLATILE
+ DF_UA | DF_REF_A | DF_UMS,
+
+ // FF UNUSED_FF
+ DF_NOP,
+
+ // Beginning of extended MIR opcodes
+ // 100 MIR_PHI
+ DF_DA | DF_NULL_TRANSFER_N,
+
+ // 101 MIR_COPY
+ DF_DA | DF_UB | DF_IS_MOVE,
+
+ // 102 MIR_FUSED_CMPL_FLOAT
+ DF_UA | DF_UB | DF_FP_A | DF_FP_B,
+
+ // 103 MIR_FUSED_CMPG_FLOAT
+ DF_UA | DF_UB | DF_FP_A | DF_FP_B,
+
+ // 104 MIR_FUSED_CMPL_DOUBLE
+ DF_UA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
+
+ // 105 MIR_FUSED_CMPG_DOUBLE
+ DF_UA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
+
+ // 106 MIR_FUSED_CMP_LONG
+ DF_UA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
+
+ // 107 MIR_NOP
+ DF_NOP,
+
+ // 108 MIR_NULL_CHECK
+ 0,
+
+ // 109 MIR_RANGE_CHECK
+ 0,
+
+ // 110 MIR_DIV_ZERO_CHECK
+ 0,
+
+ // 111 MIR_CHECK
+ 0,
+
+ // 112 MIR_CHECKPART2
+ 0,
+
+ // 113 MIR_SELECT
+ DF_DA | DF_UB,
+};
+
+/* Return the base virtual register for a SSA name */
+int SRegToVReg(const CompilationUnit* cu, int ssa_reg)
+{
+ DCHECK_LT(ssa_reg, static_cast<int>(cu->ssa_base_vregs->num_used));
+ return GET_ELEM_N(cu->ssa_base_vregs, int, ssa_reg);
+}
+
+int SRegToSubscript(const CompilationUnit* cu, int ssa_reg)
+{
+ DCHECK(ssa_reg < static_cast<int>(cu->ssa_subscripts->num_used));
+ return GET_ELEM_N(cu->ssa_subscripts, int, ssa_reg);
+}
+
+static int GetSSAUseCount(CompilationUnit* cu, int s_reg)
+{
+ DCHECK(s_reg < static_cast<int>(cu->raw_use_counts.num_used));
+ return cu->raw_use_counts.elem_list[s_reg];
+}
+
+static std::string GetSSAName(const CompilationUnit* cu, int ssa_reg)
+{
+ return StringPrintf("v%d_%d", SRegToVReg(cu, ssa_reg), SRegToSubscript(cu, ssa_reg));
+}
+
+// Similar to GetSSAName, but if ssa name represents an immediate show that as well.
+static std::string GetSSANameWithConst(const CompilationUnit* cu, int ssa_reg, bool singles_only)
+{
+ if (cu->reg_location == NULL) {
+ // Pre-SSA - just use the standard name
+ return GetSSAName(cu, ssa_reg);
+ }
+ if (IsConst(cu, cu->reg_location[ssa_reg])) {
+ if (!singles_only && cu->reg_location[ssa_reg].wide) {
+ return StringPrintf("v%d_%d#0x%llx", SRegToVReg(cu, ssa_reg),
+ SRegToSubscript(cu, ssa_reg),
+ ConstantValueWide(cu, cu->reg_location[ssa_reg]));
+ } else {
+ return StringPrintf("v%d_%d#0x%x", SRegToVReg(cu, ssa_reg),
+ SRegToSubscript(cu, ssa_reg),
+ ConstantValue(cu, cu->reg_location[ssa_reg]));
+ }
+ } else {
+ return StringPrintf("v%d_%d", SRegToVReg(cu, ssa_reg), SRegToSubscript(cu, ssa_reg));
+ }
+}
+
+
+char* GetDalvikDisassembly(CompilationUnit* cu, const MIR* mir)
+{
+ DecodedInstruction insn = mir->dalvikInsn;
+ std::string str;
+ int flags = 0;
+ int opcode = insn.opcode;
+ char* ret;
+ bool nop = false;
+ SSARepresentation* ssa_rep = mir->ssa_rep;
+ Instruction::Format dalvik_format = Instruction::k10x; // Default to no-operand format
+ int defs = (ssa_rep != NULL) ? ssa_rep->num_defs : 0;
+ int uses = (ssa_rep != NULL) ? ssa_rep->num_uses : 0;
+
+ // Handle special cases.
+ if ((opcode == kMirOpCheck) || (opcode == kMirOpCheckPart2)) {
+ str.append(extended_mir_op_names[opcode - kMirOpFirst]);
+ str.append(": ");
+ // Recover the original Dex instruction
+ insn = mir->meta.throw_insn->dalvikInsn;
+ ssa_rep = mir->meta.throw_insn->ssa_rep;
+ defs = ssa_rep->num_defs;
+ uses = ssa_rep->num_uses;
+ opcode = insn.opcode;
+ } else if (opcode == kMirOpNop) {
+ str.append("[");
+ insn.opcode = mir->meta.original_opcode;
+ opcode = mir->meta.original_opcode;
+ nop = true;
+ }
+
+ if (opcode >= kMirOpFirst) {
+ str.append(extended_mir_op_names[opcode - kMirOpFirst]);
+ } else {
+ dalvik_format = Instruction::FormatOf(insn.opcode);
+ flags = Instruction::FlagsOf(insn.opcode);
+ str.append(Instruction::Name(insn.opcode));
+ }
+
+ if (opcode == kMirOpPhi) {
+ int* incoming = reinterpret_cast<int*>(insn.vB);
+ str.append(StringPrintf(" %s = (%s",
+ GetSSANameWithConst(cu, ssa_rep->defs[0], true).c_str(),
+ GetSSANameWithConst(cu, ssa_rep->uses[0], true).c_str()));
+ str.append(StringPrintf(":%d",incoming[0]));
+ int i;
+ for (i = 1; i < uses; i++) {
+ str.append(StringPrintf(", %s:%d",
+ GetSSANameWithConst(cu, ssa_rep->uses[i], true).c_str(),
+ incoming[i]));
+ }
+ str.append(")");
+ } else if (flags & Instruction::kBranch) {
+ // For branches, decode the instructions to print out the branch targets.
+ int offset = 0;
+ switch (dalvik_format) {
+ case Instruction::k21t:
+ str.append(StringPrintf(" %s,", GetSSANameWithConst(cu, ssa_rep->uses[0], false).c_str()));
+ offset = insn.vB;
+ break;
+ case Instruction::k22t:
+ str.append(StringPrintf(" %s, %s,", GetSSANameWithConst(cu, ssa_rep->uses[0], false).c_str(),
+ GetSSANameWithConst(cu, ssa_rep->uses[1], false).c_str()));
+ offset = insn.vC;
+ break;
+ case Instruction::k10t:
+ case Instruction::k20t:
+ case Instruction::k30t:
+ offset = insn.vA;
+ break;
+ default:
+ LOG(FATAL) << "Unexpected branch format " << dalvik_format << " from " << insn.opcode;
+ }
+ str.append(StringPrintf(" 0x%x (%c%x)", mir->offset + offset,
+ offset > 0 ? '+' : '-', offset > 0 ? offset : -offset));
+ } else {
+ // For invokes-style formats, treat wide regs as a pair of singles
+ bool show_singles = ((dalvik_format == Instruction::k35c) ||
+ (dalvik_format == Instruction::k3rc));
+ if (defs != 0) {
+ str.append(StringPrintf(" %s", GetSSANameWithConst(cu, ssa_rep->defs[0], false).c_str()));
+ if (uses != 0) {
+ str.append(", ");
+ }
+ }
+ for (int i = 0; i < uses; i++) {
+ str.append(
+ StringPrintf(" %s", GetSSANameWithConst(cu, ssa_rep->uses[i], show_singles).c_str()));
+ if (!show_singles && (cu->reg_location != NULL) && cu->reg_location[i].wide) {
+ // For the listing, skip the high sreg.
+ i++;
+ }
+ if (i != (uses -1)) {
+ str.append(",");
+ }
+ }
+ switch (dalvik_format) {
+ case Instruction::k11n: // Add one immediate from vB
+ case Instruction::k21s:
+ case Instruction::k31i:
+ case Instruction::k21h:
+ str.append(StringPrintf(", #%d", insn.vB));
+ break;
+ case Instruction::k51l: // Add one wide immediate
+ str.append(StringPrintf(", #%lld", insn.vB_wide));
+ break;
+ case Instruction::k21c: // One register, one string/type/method index
+ case Instruction::k31c:
+ str.append(StringPrintf(", index #%d", insn.vB));
+ break;
+ case Instruction::k22c: // Two registers, one string/type/method index
+ str.append(StringPrintf(", index #%d", insn.vC));
+ break;
+ case Instruction::k22s: // Add one immediate from vC
+ case Instruction::k22b:
+ str.append(StringPrintf(", #%d", insn.vC));
+ break;
+ default:
+ ; // Nothing left to print
+ }
+ }
+ if (nop) {
+ str.append("]--optimized away");
+ }
+ int length = str.length() + 1;
+ ret = static_cast<char*>(NewMem(cu, length, false, kAllocDFInfo));
+ strncpy(ret, str.c_str(), length);
+ return ret;
+}
+
+/* Any register that is used before being defined is considered live-in */
+static void HandleLiveInUse(CompilationUnit* cu, ArenaBitVector* use_v, ArenaBitVector* def_v,
+ ArenaBitVector* live_in_v, int dalvik_reg_id)
+{
+ SetBit(cu, use_v, dalvik_reg_id);
+ if (!IsBitSet(def_v, dalvik_reg_id)) {
+ SetBit(cu, live_in_v, dalvik_reg_id);
+ }
+}
+
+/* Mark a reg as being defined */
+static void HandleDef(CompilationUnit* cu, ArenaBitVector* def_v, int dalvik_reg_id)
+{
+ SetBit(cu, def_v, dalvik_reg_id);
+}
+
+/*
+ * Find out live-in variables for natural loops. Variables that are live-in in
+ * the main loop body are considered to be defined in the entry block.
+ */
+bool FindLocalLiveIn(CompilationUnit* cu, BasicBlock* bb)
+{
+ MIR* mir;
+ ArenaBitVector *use_v, *def_v, *live_in_v;
+
+ if (bb->data_flow_info == NULL) return false;
+
+ use_v = bb->data_flow_info->use_v =
+ AllocBitVector(cu, cu->num_dalvik_registers, false, kBitMapUse);
+ def_v = bb->data_flow_info->def_v =
+ AllocBitVector(cu, cu->num_dalvik_registers, false, kBitMapDef);
+ live_in_v = bb->data_flow_info->live_in_v =
+ AllocBitVector(cu, cu->num_dalvik_registers, false,
+ kBitMapLiveIn);
+
+ for (mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+ int df_attributes = oat_data_flow_attributes[mir->dalvikInsn.opcode];
+ DecodedInstruction *d_insn = &mir->dalvikInsn;
+
+ if (df_attributes & DF_HAS_USES) {
+ if (df_attributes & DF_UA) {
+ HandleLiveInUse(cu, use_v, def_v, live_in_v, d_insn->vA);
+ if (df_attributes & DF_A_WIDE) {
+ HandleLiveInUse(cu, use_v, def_v, live_in_v, d_insn->vA+1);
+ }
+ }
+ if (df_attributes & DF_UB) {
+ HandleLiveInUse(cu, use_v, def_v, live_in_v, d_insn->vB);
+ if (df_attributes & DF_B_WIDE) {
+ HandleLiveInUse(cu, use_v, def_v, live_in_v, d_insn->vB+1);
+ }
+ }
+ if (df_attributes & DF_UC) {
+ HandleLiveInUse(cu, use_v, def_v, live_in_v, d_insn->vC);
+ if (df_attributes & DF_C_WIDE) {
+ HandleLiveInUse(cu, use_v, def_v, live_in_v, d_insn->vC+1);
+ }
+ }
+ }
+ if (df_attributes & DF_FORMAT_35C) {
+ for (unsigned int i = 0; i < d_insn->vA; i++) {
+ HandleLiveInUse(cu, use_v, def_v, live_in_v, d_insn->arg[i]);
+ }
+ }
+ if (df_attributes & DF_FORMAT_3RC) {
+ for (unsigned int i = 0; i < d_insn->vA; i++) {
+ HandleLiveInUse(cu, use_v, def_v, live_in_v, d_insn->vC+i);
+ }
+ }
+ if (df_attributes & DF_HAS_DEFS) {
+ HandleDef(cu, def_v, d_insn->vA);
+ if (df_attributes & DF_A_WIDE) {
+ HandleDef(cu, def_v, d_insn->vA+1);
+ }
+ }
+ }
+ return true;
+}
+
+static int AddNewSReg(CompilationUnit* cu, int v_reg)
+{
+ // Compiler temps always have a subscript of 0
+ int subscript = (v_reg < 0) ? 0 : ++cu->ssa_last_defs[v_reg];
+ int ssa_reg = cu->num_ssa_regs++;
+ InsertGrowableList(cu, cu->ssa_base_vregs, v_reg);
+ InsertGrowableList(cu, cu->ssa_subscripts, subscript);
+ std::string ssa_name = GetSSAName(cu, ssa_reg);
+ char* name = static_cast<char*>(NewMem(cu, ssa_name.length() + 1, false, kAllocDFInfo));
+ strncpy(name, ssa_name.c_str(), ssa_name.length() + 1);
+ InsertGrowableList(cu, cu->ssa_strings, reinterpret_cast<uintptr_t>(name));
+ DCHECK_EQ(cu->ssa_base_vregs->num_used, cu->ssa_subscripts->num_used);
+ return ssa_reg;
+}
+
+/* Find out the latest SSA register for a given Dalvik register */
+static void HandleSSAUse(CompilationUnit* cu, int* uses, int dalvik_reg, int reg_index)
+{
+ DCHECK((dalvik_reg >= 0) && (dalvik_reg < cu->num_dalvik_registers));
+ uses[reg_index] = cu->vreg_to_ssa_map[dalvik_reg];
+}
+
+/* Setup a new SSA register for a given Dalvik register */
+static void HandleSSADef(CompilationUnit* cu, int* defs, int dalvik_reg, int reg_index)
+{
+ DCHECK((dalvik_reg >= 0) && (dalvik_reg < cu->num_dalvik_registers));
+ int ssa_reg = AddNewSReg(cu, dalvik_reg);
+ cu->vreg_to_ssa_map[dalvik_reg] = ssa_reg;
+ defs[reg_index] = ssa_reg;
+}
+
+/* Look up new SSA names for format_35c instructions */
+static void DataFlowSSAFormat35C(CompilationUnit* cu, MIR* mir)
+{
+ DecodedInstruction *d_insn = &mir->dalvikInsn;
+ int num_uses = d_insn->vA;
+ int i;
+
+ mir->ssa_rep->num_uses = num_uses;
+ mir->ssa_rep->uses = static_cast<int*>(NewMem(cu, sizeof(int) * num_uses, true, kAllocDFInfo));
+ // NOTE: will be filled in during type & size inference pass
+ mir->ssa_rep->fp_use = static_cast<bool*>(NewMem(cu, sizeof(bool) * num_uses, true,
+ kAllocDFInfo));
+
+ for (i = 0; i < num_uses; i++) {
+ HandleSSAUse(cu, mir->ssa_rep->uses, d_insn->arg[i], i);
+ }
+}
+
+/* Look up new SSA names for format_3rc instructions */
+static void DataFlowSSAFormat3RC(CompilationUnit* cu, MIR* mir)
+{
+ DecodedInstruction *d_insn = &mir->dalvikInsn;
+ int num_uses = d_insn->vA;
+ int i;
+
+ mir->ssa_rep->num_uses = num_uses;
+ mir->ssa_rep->uses = static_cast<int*>(NewMem(cu, sizeof(int) * num_uses, true, kAllocDFInfo));
+ // NOTE: will be filled in during type & size inference pass
+ mir->ssa_rep->fp_use = static_cast<bool*>(NewMem(cu, sizeof(bool) * num_uses, true,
+ kAllocDFInfo));
+
+ for (i = 0; i < num_uses; i++) {
+ HandleSSAUse(cu, mir->ssa_rep->uses, d_insn->vC+i, i);
+ }
+}
+
+/* Entry function to convert a block into SSA representation */
+bool DoSSAConversion(CompilationUnit* cu, BasicBlock* bb)
+{
+ MIR* mir;
+
+ if (bb->data_flow_info == NULL) return false;
+
+ for (mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+ mir->ssa_rep = static_cast<struct SSARepresentation *>(NewMem(cu, sizeof(SSARepresentation),
+ true, kAllocDFInfo));
+
+ int df_attributes = oat_data_flow_attributes[mir->dalvikInsn.opcode];
+
+ // If not a pseudo-op, note non-leaf or can throw
+ if (static_cast<int>(mir->dalvikInsn.opcode) <
+ static_cast<int>(kNumPackedOpcodes)) {
+ int flags = Instruction::FlagsOf(mir->dalvikInsn.opcode);
+
+ if (flags & Instruction::kThrow) {
+ cu->attrs &= ~METHOD_IS_THROW_FREE;
+ }
+
+ if (flags & Instruction::kInvoke) {
+ cu->attrs &= ~METHOD_IS_LEAF;
+ }
+ }
+
+ int num_uses = 0;
+
+ if (df_attributes & DF_FORMAT_35C) {
+ DataFlowSSAFormat35C(cu, mir);
+ continue;
+ }
+
+ if (df_attributes & DF_FORMAT_3RC) {
+ DataFlowSSAFormat3RC(cu, mir);
+ continue;
+ }
+
+ if (df_attributes & DF_HAS_USES) {
+ if (df_attributes & DF_UA) {
+ num_uses++;
+ if (df_attributes & DF_A_WIDE) {
+ num_uses ++;
+ }
+ }
+ if (df_attributes & DF_UB) {
+ num_uses++;
+ if (df_attributes & DF_B_WIDE) {
+ num_uses ++;
+ }
+ }
+ if (df_attributes & DF_UC) {
+ num_uses++;
+ if (df_attributes & DF_C_WIDE) {
+ num_uses ++;
+ }
+ }
+ }
+
+ if (num_uses) {
+ mir->ssa_rep->num_uses = num_uses;
+ mir->ssa_rep->uses = static_cast<int*>(NewMem(cu, sizeof(int) * num_uses, false,
+ kAllocDFInfo));
+ mir->ssa_rep->fp_use = static_cast<bool*>(NewMem(cu, sizeof(bool) * num_uses, false,
+ kAllocDFInfo));
+ }
+
+ int num_defs = 0;
+
+ if (df_attributes & DF_HAS_DEFS) {
+ num_defs++;
+ if (df_attributes & DF_A_WIDE) {
+ num_defs++;
+ }
+ }
+
+ if (num_defs) {
+ mir->ssa_rep->num_defs = num_defs;
+ mir->ssa_rep->defs = static_cast<int*>(NewMem(cu, sizeof(int) * num_defs, false,
+ kAllocDFInfo));
+ mir->ssa_rep->fp_def = static_cast<bool*>(NewMem(cu, sizeof(bool) * num_defs, false,
+ kAllocDFInfo));
+ }
+
+ DecodedInstruction *d_insn = &mir->dalvikInsn;
+
+ if (df_attributes & DF_HAS_USES) {
+ num_uses = 0;
+ if (df_attributes & DF_UA) {
+ mir->ssa_rep->fp_use[num_uses] = df_attributes & DF_FP_A;
+ HandleSSAUse(cu, mir->ssa_rep->uses, d_insn->vA, num_uses++);
+ if (df_attributes & DF_A_WIDE) {
+ mir->ssa_rep->fp_use[num_uses] = df_attributes & DF_FP_A;
+ HandleSSAUse(cu, mir->ssa_rep->uses, d_insn->vA+1, num_uses++);
+ }
+ }
+ if (df_attributes & DF_UB) {
+ mir->ssa_rep->fp_use[num_uses] = df_attributes & DF_FP_B;
+ HandleSSAUse(cu, mir->ssa_rep->uses, d_insn->vB, num_uses++);
+ if (df_attributes & DF_B_WIDE) {
+ mir->ssa_rep->fp_use[num_uses] = df_attributes & DF_FP_B;
+ HandleSSAUse(cu, mir->ssa_rep->uses, d_insn->vB+1, num_uses++);
+ }
+ }
+ if (df_attributes & DF_UC) {
+ mir->ssa_rep->fp_use[num_uses] = df_attributes & DF_FP_C;
+ HandleSSAUse(cu, mir->ssa_rep->uses, d_insn->vC, num_uses++);
+ if (df_attributes & DF_C_WIDE) {
+ mir->ssa_rep->fp_use[num_uses] = df_attributes & DF_FP_C;
+ HandleSSAUse(cu, mir->ssa_rep->uses, d_insn->vC+1, num_uses++);
+ }
+ }
+ }
+ if (df_attributes & DF_HAS_DEFS) {
+ mir->ssa_rep->fp_def[0] = df_attributes & DF_FP_A;
+ HandleSSADef(cu, mir->ssa_rep->defs, d_insn->vA, 0);
+ if (df_attributes & DF_A_WIDE) {
+ mir->ssa_rep->fp_def[1] = df_attributes & DF_FP_A;
+ HandleSSADef(cu, mir->ssa_rep->defs, d_insn->vA+1, 1);
+ }
+ }
+ }
+
+ if (!cu->disable_dataflow) {
+ /*
+ * Take a snapshot of Dalvik->SSA mapping at the end of each block. The
+ * input to PHI nodes can be derived from the snapshot of all
+ * predecessor blocks.
+ */
+ bb->data_flow_info->vreg_to_ssa_map =
+ static_cast<int*>(NewMem(cu, sizeof(int) * cu->num_dalvik_registers, false,
+ kAllocDFInfo));
+
+ memcpy(bb->data_flow_info->vreg_to_ssa_map, cu->vreg_to_ssa_map,
+ sizeof(int) * cu->num_dalvik_registers);
+ }
+ return true;
+}
+
+/* Setup a constant value for opcodes thare have the DF_SETS_CONST attribute */
+static void SetConstant(CompilationUnit* cu, int32_t ssa_reg, int value)
+{
+ SetBit(cu, cu->is_constant_v, ssa_reg);
+ cu->constant_values[ssa_reg] = value;
+}
+
+static void SetConstantWide(CompilationUnit* cu, int ssa_reg, int64_t value)
+{
+ SetBit(cu, cu->is_constant_v, ssa_reg);
+ cu->constant_values[ssa_reg] = Low32Bits(value);
+ cu->constant_values[ssa_reg + 1] = High32Bits(value);
+}
+
+bool DoConstantPropogation(CompilationUnit* cu, BasicBlock* bb)
+{
+ MIR* mir;
+ ArenaBitVector *is_constant_v = cu->is_constant_v;
+
+ for (mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+ int df_attributes = oat_data_flow_attributes[mir->dalvikInsn.opcode];
+
+ DecodedInstruction *d_insn = &mir->dalvikInsn;
+
+ if (!(df_attributes & DF_HAS_DEFS)) continue;
+
+ /* Handle instructions that set up constants directly */
+ if (df_attributes & DF_SETS_CONST) {
+ if (df_attributes & DF_DA) {
+ int32_t vB = static_cast<int32_t>(d_insn->vB);
+ switch (d_insn->opcode) {
+ case Instruction::CONST_4:
+ case Instruction::CONST_16:
+ case Instruction::CONST:
+ SetConstant(cu, mir->ssa_rep->defs[0], vB);
+ break;
+ case Instruction::CONST_HIGH16:
+ SetConstant(cu, mir->ssa_rep->defs[0], vB << 16);
+ break;
+ case Instruction::CONST_WIDE_16:
+ case Instruction::CONST_WIDE_32:
+ SetConstantWide(cu, mir->ssa_rep->defs[0], static_cast<int64_t>(vB));
+ break;
+ case Instruction::CONST_WIDE:
+ SetConstantWide(cu, mir->ssa_rep->defs[0],d_insn->vB_wide);
+ break;
+ case Instruction::CONST_WIDE_HIGH16:
+ SetConstantWide(cu, mir->ssa_rep->defs[0], static_cast<int64_t>(vB) << 48);
+ break;
+ default:
+ break;
+ }
+ }
+ /* Handle instructions that set up constants directly */
+ } else if (df_attributes & DF_IS_MOVE) {
+ int i;
+
+ for (i = 0; i < mir->ssa_rep->num_uses; i++) {
+ if (!IsBitSet(is_constant_v, mir->ssa_rep->uses[i])) break;
+ }
+ /* Move a register holding a constant to another register */
+ if (i == mir->ssa_rep->num_uses) {
+ SetConstant(cu, mir->ssa_rep->defs[0],
+ cu->constant_values[mir->ssa_rep->uses[0]]);
+ if (df_attributes & DF_A_WIDE) {
+ SetConstant(cu, mir->ssa_rep->defs[1],
+ cu->constant_values[mir->ssa_rep->uses[1]]);
+ }
+ }
+ } else if (df_attributes & DF_NULL_TRANSFER_N) {
+ /*
+ * Mark const sregs that appear in merges. Need to flush those to home location.
+ * TUNING: instead of flushing on def, we could insert a flush on the appropriate
+ * edge[s].
+ */
+ DCHECK_EQ(static_cast<int32_t>(d_insn->opcode), kMirOpPhi);
+ for (int i = 0; i < mir->ssa_rep->num_uses; i++) {
+ if (IsConst(cu, mir->ssa_rep->uses[i])) {
+ SetBit(cu, cu->must_flush_constant_v, mir->ssa_rep->uses[i]);
+ }
+ }
+ }
+ }
+ /* TODO: implement code to handle arithmetic operations */
+ return true;
+}
+
+/* Setup the basic data structures for SSA conversion */
+void CompilerInitializeSSAConversion(CompilationUnit* cu)
+{
+ int i;
+ int num_dalvik_reg = cu->num_dalvik_registers;
+
+ cu->ssa_base_vregs =
+ static_cast<GrowableList*>(NewMem(cu, sizeof(GrowableList), false, kAllocDFInfo));
+ cu->ssa_subscripts =
+ static_cast<GrowableList*>(NewMem(cu, sizeof(GrowableList), false, kAllocDFInfo));
+ cu->ssa_strings =
+ static_cast<GrowableList*>(NewMem(cu, sizeof(GrowableList), false, kAllocDFInfo));
+ // Create the ssa mappings, estimating the max size
+ CompilerInitGrowableList(cu, cu->ssa_base_vregs,
+ num_dalvik_reg + cu->def_count + 128,
+ kListSSAtoDalvikMap);
+ CompilerInitGrowableList(cu, cu->ssa_subscripts,
+ num_dalvik_reg + cu->def_count + 128,
+ kListSSAtoDalvikMap);
+ CompilerInitGrowableList(cu, cu->ssa_strings,
+ num_dalvik_reg + cu->def_count + 128,
+ kListSSAtoDalvikMap);
+ /*
+ * Initial number of SSA registers is equal to the number of Dalvik
+ * registers.
+ */
+ cu->num_ssa_regs = num_dalvik_reg;
+
+ /*
+ * Initialize the SSA2Dalvik map list. For the first num_dalvik_reg elements,
+ * the subscript is 0 so we use the ENCODE_REG_SUB macro to encode the value
+ * into "(0 << 16) | i"
+ */
+ for (i = 0; i < num_dalvik_reg; i++) {
+ InsertGrowableList(cu, cu->ssa_base_vregs, i);
+ InsertGrowableList(cu, cu->ssa_subscripts, 0);
+ std::string ssa_name = GetSSAName(cu, i);
+ char* name = static_cast<char*>(NewMem(cu, ssa_name.length() + 1, true, kAllocDFInfo));
+ strncpy(name, ssa_name.c_str(), ssa_name.length() + 1);
+ InsertGrowableList(cu, cu->ssa_strings, reinterpret_cast<uintptr_t>(name));
+ }
+
+ /*
+ * Initialize the DalvikToSSAMap map. There is one entry for each
+ * Dalvik register, and the SSA names for those are the same.
+ */
+ cu->vreg_to_ssa_map =
+ static_cast<int*>(NewMem(cu, sizeof(int) * num_dalvik_reg, false, kAllocDFInfo));
+ /* Keep track of the higest def for each dalvik reg */
+ cu->ssa_last_defs =
+ static_cast<int*>(NewMem(cu, sizeof(int) * num_dalvik_reg, false, kAllocDFInfo));
+
+ for (i = 0; i < num_dalvik_reg; i++) {
+ cu->vreg_to_ssa_map[i] = i;
+ cu->ssa_last_defs[i] = 0;
+ }
+
+ /* Add ssa reg for Method* */
+ cu->method_sreg = AddNewSReg(cu, SSA_METHOD_BASEREG);
+
+ /*
+ * Allocate the BasicBlockDataFlow structure for the entry and code blocks
+ */
+ GrowableListIterator iterator;
+
+ GrowableListIteratorInit(&cu->block_list, &iterator);
+
+ while (true) {
+ BasicBlock* bb = reinterpret_cast<BasicBlock*>(GrowableListIteratorNext(&iterator));
+ if (bb == NULL) break;
+ if (bb->hidden == true) continue;
+ if (bb->block_type == kDalvikByteCode ||
+ bb->block_type == kEntryBlock ||
+ bb->block_type == kExitBlock) {
+ bb->data_flow_info = static_cast<BasicBlockDataFlow*>(NewMem(cu, sizeof(BasicBlockDataFlow),
+ true, kAllocDFInfo));
+ }
+ }
+}
+
+/* Clear the visited flag for each BB */
+bool ClearVisitedFlag(struct CompilationUnit* cu, struct BasicBlock* bb)
+{
+ bb->visited = false;
+ return true;
+}
+
+void DataFlowAnalysisDispatcher(CompilationUnit* cu,
+ bool (*func)(CompilationUnit*, BasicBlock*),
+ DataFlowAnalysisMode dfa_mode,
+ bool is_iterative)
+{
+ bool change = true;
+
+ while (change) {
+ change = false;
+
+ switch (dfa_mode) {
+ /* Scan all blocks and perform the operations specified in func */
+ case kAllNodes:
+ {
+ GrowableListIterator iterator;
+ GrowableListIteratorInit(&cu->block_list, &iterator);
+ while (true) {
+ BasicBlock* bb = reinterpret_cast<BasicBlock*>(GrowableListIteratorNext(&iterator));
+ if (bb == NULL) break;
+ if (bb->hidden == true) continue;
+ change |= (*func)(cu, bb);
+ }
+ }
+ break;
+ /* Scan reachable blocks and perform the ops specified in func. */
+ case kReachableNodes:
+ {
+ int num_reachable_blocks = cu->num_reachable_blocks;
+ int idx;
+ const GrowableList *block_list = &cu->block_list;
+
+ for (idx = 0; idx < num_reachable_blocks; idx++) {
+ int block_idx = cu->dfs_order.elem_list[idx];
+ BasicBlock* bb =
+ reinterpret_cast<BasicBlock*>( GrowableListGetElement(block_list, block_idx));
+ change |= (*func)(cu, bb);
+ }
+ }
+ break;
+
+ /* Scan reachable blocks by pre-order dfs and invoke func on each. */
+ case kPreOrderDFSTraversal:
+ {
+ int num_reachable_blocks = cu->num_reachable_blocks;
+ int idx;
+ const GrowableList *block_list = &cu->block_list;
+
+ for (idx = 0; idx < num_reachable_blocks; idx++) {
+ int dfs_idx = cu->dfs_order.elem_list[idx];
+ BasicBlock* bb =
+ reinterpret_cast<BasicBlock*>(GrowableListGetElement(block_list, dfs_idx));
+ change |= (*func)(cu, bb);
+ }
+ }
+ break;
+ /* Scan reachable blocks post-order dfs and invoke func on each. */
+ case kPostOrderDFSTraversal:
+ {
+ int num_reachable_blocks = cu->num_reachable_blocks;
+ int idx;
+ const GrowableList *block_list = &cu->block_list;
+
+ for (idx = num_reachable_blocks - 1; idx >= 0; idx--) {
+ int dfs_idx = cu->dfs_order.elem_list[idx];
+ BasicBlock* bb =
+ reinterpret_cast<BasicBlock *>( GrowableListGetElement(block_list, dfs_idx));
+ change |= (*func)(cu, bb);
+ }
+ }
+ break;
+ /* Scan reachable post-order dom tree and invoke func on each. */
+ case kPostOrderDOMTraversal:
+ {
+ int num_reachable_blocks = cu->num_reachable_blocks;
+ int idx;
+ const GrowableList *block_list = &cu->block_list;
+
+ for (idx = 0; idx < num_reachable_blocks; idx++) {
+ int dom_idx = cu->dom_post_order_traversal.elem_list[idx];
+ BasicBlock* bb =
+ reinterpret_cast<BasicBlock*>( GrowableListGetElement(block_list, dom_idx));
+ change |= (*func)(cu, bb);
+ }
+ }
+ break;
+ /* Scan reachable blocks reverse post-order dfs, invoke func on each */
+ case kReversePostOrderTraversal:
+ {
+ int num_reachable_blocks = cu->num_reachable_blocks;
+ int idx;
+ const GrowableList *block_list = &cu->block_list;
+
+ for (idx = num_reachable_blocks - 1; idx >= 0; idx--) {
+ int rev_idx = cu->dfs_post_order.elem_list[idx];
+ BasicBlock* bb =
+ reinterpret_cast<BasicBlock*>(GrowableListGetElement(block_list, rev_idx));
+ change |= (*func)(cu, bb);
+ }
+ }
+ break;
+ default:
+ LOG(FATAL) << "Unknown traversal mode: " << dfa_mode;
+ }
+ /* If is_iterative is false, exit the loop after the first iteration */
+ change &= is_iterative;
+ }
+}
+
+/* Advance to next strictly dominated MIR node in an extended basic block */
+static MIR* AdvanceMIR(CompilationUnit* cu, BasicBlock** p_bb, MIR* mir)
+{
+ BasicBlock* bb = *p_bb;
+ if (mir != NULL) {
+ mir = mir->next;
+ if (mir == NULL) {
+ bb = bb->fall_through;
+ if ((bb == NULL) || Predecessors(bb) != 1) {
+ mir = NULL;
+ } else {
+ *p_bb = bb;
+ mir = bb->first_mir_insn;
+ }
+ }
+ }
+ return mir;
+}
+
+/*
+ * To be used at an invoke mir. If the logically next mir node represents
+ * a move-result, return it. Else, return NULL. If a move-result exists,
+ * it is required to immediately follow the invoke with no intervening
+ * opcodes or incoming arcs. However, if the result of the invoke is not
+ * used, a move-result may not be present.
+ */
+MIR* FindMoveResult(CompilationUnit* cu, BasicBlock* bb, MIR* mir)
+{
+ BasicBlock* tbb = bb;
+ mir = AdvanceMIR(cu, &tbb, mir);
+ while (mir != NULL) {
+ int opcode = mir->dalvikInsn.opcode;
+ if ((mir->dalvikInsn.opcode == Instruction::MOVE_RESULT) ||
+ (mir->dalvikInsn.opcode == Instruction::MOVE_RESULT_OBJECT) ||
+ (mir->dalvikInsn.opcode == Instruction::MOVE_RESULT_WIDE)) {
+ break;
+ }
+ // Keep going if pseudo op, otherwise terminate
+ if (opcode < kNumPackedOpcodes) {
+ mir = NULL;
+ } else {
+ mir = AdvanceMIR(cu, &tbb, mir);
+ }
+ }
+ return mir;
+}
+
+static BasicBlock* NextDominatedBlock(CompilationUnit* cu, BasicBlock* bb)
+{
+ if (bb->block_type == kDead) {
+ return NULL;
+ }
+ DCHECK((bb->block_type == kEntryBlock) || (bb->block_type == kDalvikByteCode)
+ || (bb->block_type == kExitBlock));
+ bb = bb->fall_through;
+ if (bb == NULL || (Predecessors(bb) != 1)) {
+ return NULL;
+ }
+ DCHECK((bb->block_type == kDalvikByteCode) || (bb->block_type == kExitBlock));
+ return bb;
+}
+
+static MIR* FindPhi(CompilationUnit* cu, BasicBlock* bb, int ssa_name)
+{
+ for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+ if (static_cast<int>(mir->dalvikInsn.opcode) == kMirOpPhi) {
+ for (int i = 0; i < mir->ssa_rep->num_uses; i++) {
+ if (mir->ssa_rep->uses[i] == ssa_name) {
+ return mir;
+ }
+ }
+ }
+ }
+ return NULL;
+}
+
+static SelectInstructionKind SelectKind(MIR* mir)
+{
+ switch (mir->dalvikInsn.opcode) {
+ case Instruction::MOVE:
+ case Instruction::MOVE_OBJECT:
+ case Instruction::MOVE_16:
+ case Instruction::MOVE_OBJECT_16:
+ case Instruction::MOVE_FROM16:
+ case Instruction::MOVE_OBJECT_FROM16:
+ return kSelectMove;
+ case Instruction::CONST:
+ case Instruction::CONST_4:
+ case Instruction::CONST_16:
+ return kSelectConst;
+ case Instruction::GOTO:
+ case Instruction::GOTO_16:
+ case Instruction::GOTO_32:
+ return kSelectGoto;
+ default:;
+ }
+ return kSelectNone;
+}
+
+/* Do some MIR-level extended basic block optimizations */
+static bool BasicBlockOpt(CompilationUnit* cu, BasicBlock* bb)
+{
+ if (bb->block_type == kDead) {
+ return true;
+ }
+ int num_temps = 0;
+ BBOpt bb_opt(cu);
+ while (bb != NULL) {
+ for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+ // TUNING: use the returned value number for CSE.
+ bb_opt.GetValueNumber(mir);
+ // Look for interesting opcodes, skip otherwise
+ Instruction::Code opcode = mir->dalvikInsn.opcode;
+ switch (opcode) {
+ case Instruction::CMPL_FLOAT:
+ case Instruction::CMPL_DOUBLE:
+ case Instruction::CMPG_FLOAT:
+ case Instruction::CMPG_DOUBLE:
+ case Instruction::CMP_LONG:
+ if (cu->gen_bitcode) {
+ // Bitcode doesn't allow this optimization.
+ break;
+ }
+ if (mir->next != NULL) {
+ MIR* mir_next = mir->next;
+ Instruction::Code br_opcode = mir_next->dalvikInsn.opcode;
+ ConditionCode ccode = kCondNv;
+ switch(br_opcode) {
+ case Instruction::IF_EQZ:
+ ccode = kCondEq;
+ break;
+ case Instruction::IF_NEZ:
+ ccode = kCondNe;
+ break;
+ case Instruction::IF_LTZ:
+ ccode = kCondLt;
+ break;
+ case Instruction::IF_GEZ:
+ ccode = kCondGe;
+ break;
+ case Instruction::IF_GTZ:
+ ccode = kCondGt;
+ break;
+ case Instruction::IF_LEZ:
+ ccode = kCondLe;
+ break;
+ default:
+ break;
+ }
+ // Make sure result of cmp is used by next insn and nowhere else
+ if ((ccode != kCondNv) &&
+ (mir->ssa_rep->defs[0] == mir_next->ssa_rep->uses[0]) &&
+ (GetSSAUseCount(cu, mir->ssa_rep->defs[0]) == 1)) {
+ mir_next->dalvikInsn.arg[0] = ccode;
+ switch(opcode) {
+ case Instruction::CMPL_FLOAT:
+ mir_next->dalvikInsn.opcode =
+ static_cast<Instruction::Code>(kMirOpFusedCmplFloat);
+ break;
+ case Instruction::CMPL_DOUBLE:
+ mir_next->dalvikInsn.opcode =
+ static_cast<Instruction::Code>(kMirOpFusedCmplDouble);
+ break;
+ case Instruction::CMPG_FLOAT:
+ mir_next->dalvikInsn.opcode =
+ static_cast<Instruction::Code>(kMirOpFusedCmpgFloat);
+ break;
+ case Instruction::CMPG_DOUBLE:
+ mir_next->dalvikInsn.opcode =
+ static_cast<Instruction::Code>(kMirOpFusedCmpgDouble);
+ break;
+ case Instruction::CMP_LONG:
+ mir_next->dalvikInsn.opcode =
+ static_cast<Instruction::Code>(kMirOpFusedCmpLong);
+ break;
+ default: LOG(ERROR) << "Unexpected opcode: " << opcode;
+ }
+ mir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
+ mir_next->ssa_rep->num_uses = mir->ssa_rep->num_uses;
+ mir_next->ssa_rep->uses = mir->ssa_rep->uses;
+ mir_next->ssa_rep->fp_use = mir->ssa_rep->fp_use;
+ mir_next->ssa_rep->num_defs = 0;
+ mir->ssa_rep->num_uses = 0;
+ mir->ssa_rep->num_defs = 0;
+ }
+ }
+ break;
+ case Instruction::GOTO:
+ case Instruction::GOTO_16:
+ case Instruction::GOTO_32:
+ case Instruction::IF_EQ:
+ case Instruction::IF_NE:
+ case Instruction::IF_LT:
+ case Instruction::IF_GE:
+ case Instruction::IF_GT:
+ case Instruction::IF_LE:
+ case Instruction::IF_EQZ:
+ case Instruction::IF_NEZ:
+ case Instruction::IF_LTZ:
+ case Instruction::IF_GEZ:
+ case Instruction::IF_GTZ:
+ case Instruction::IF_LEZ:
+ if (bb->taken->dominates_return) {
+ mir->optimization_flags |= MIR_IGNORE_SUSPEND_CHECK;
+ if (cu->verbose) {
+ LOG(INFO) << "Suppressed suspend check on branch to return at 0x" << std::hex << mir->offset;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+ // Is this the select pattern?
+ // TODO: flesh out support for Mips and X86. NOTE: llvm's select op doesn't quite work here.
+ // TUNING: expand to support IF_xx compare & branches
+ if (!cu->gen_bitcode && (cu->instruction_set == kThumb2) &&
+ ((mir->dalvikInsn.opcode == Instruction::IF_EQZ) ||
+ (mir->dalvikInsn.opcode == Instruction::IF_NEZ))) {
+ BasicBlock* ft = bb->fall_through;
+ DCHECK(ft != NULL);
+ BasicBlock* ft_ft = ft->fall_through;
+ BasicBlock* ft_tk = ft->taken;
+
+ BasicBlock* tk = bb->taken;
+ DCHECK(tk != NULL);
+ BasicBlock* tk_ft = tk->fall_through;
+ BasicBlock* tk_tk = tk->taken;
+
+ /*
+ * In the select pattern, the taken edge goes to a block that unconditionally
+ * transfers to the rejoin block and the fall_though edge goes to a block that
+ * unconditionally falls through to the rejoin block.
+ */
+ if ((tk_ft == NULL) && (ft_tk == NULL) && (tk_tk == ft_ft) &&
+ (Predecessors(tk) == 1) && (Predecessors(ft) == 1)) {
+ /*
+ * Okay - we have the basic diamond shape. At the very least, we can eliminate the
+ * suspend check on the taken-taken branch back to the join point.
+ */
+ if (SelectKind(tk->last_mir_insn) == kSelectGoto) {
+ tk->last_mir_insn->optimization_flags |= (MIR_IGNORE_SUSPEND_CHECK);
+ }
+ // Are the block bodies something we can handle?
+ if ((ft->first_mir_insn == ft->last_mir_insn) &&
+ (tk->first_mir_insn != tk->last_mir_insn) &&
+ (tk->first_mir_insn->next == tk->last_mir_insn) &&
+ ((SelectKind(ft->first_mir_insn) == kSelectMove) ||
+ (SelectKind(ft->first_mir_insn) == kSelectConst)) &&
+ (SelectKind(ft->first_mir_insn) == SelectKind(tk->first_mir_insn)) &&
+ (SelectKind(tk->last_mir_insn) == kSelectGoto)) {
+ // Almost there. Are the instructions targeting the same vreg?
+ MIR* if_true = tk->first_mir_insn;
+ MIR* if_false = ft->first_mir_insn;
+ // It's possible that the target of the select isn't used - skip those (rare) cases.
+ MIR* phi = FindPhi(cu, tk_tk, if_true->ssa_rep->defs[0]);
+ if ((phi != NULL) && (if_true->dalvikInsn.vA == if_false->dalvikInsn.vA)) {
+ /*
+ * We'll convert the IF_EQZ/IF_NEZ to a SELECT. We need to find the
+ * Phi node in the merge block and delete it (while using the SSA name
+ * of the merge as the target of the SELECT. Delete both taken and
+ * fallthrough blocks, and set fallthrough to merge block.
+ * NOTE: not updating other dataflow info (no longer used at this point).
+ * If this changes, need to update i_dom, etc. here (and in CombineBlocks).
+ */
+ if (opcode == Instruction::IF_NEZ) {
+ // Normalize.
+ MIR* tmp_mir = if_true;
+ if_true = if_false;
+ if_false = tmp_mir;
+ }
+ mir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpSelect);
+ bool const_form = (SelectKind(if_true) == kSelectConst);
+ if ((SelectKind(if_true) == kSelectMove)) {
+ if (IsConst(cu, if_true->ssa_rep->uses[0]) &&
+ IsConst(cu, if_false->ssa_rep->uses[0])) {
+ const_form = true;
+ if_true->dalvikInsn.vB = ConstantValue(cu, if_true->ssa_rep->uses[0]);
+ if_false->dalvikInsn.vB = ConstantValue(cu, if_false->ssa_rep->uses[0]);
+ }
+ }
+ if (const_form) {
+ // "true" set val in vB
+ mir->dalvikInsn.vB = if_true->dalvikInsn.vB;
+ // "false" set val in vC
+ mir->dalvikInsn.vC = if_false->dalvikInsn.vB;
+ } else {
+ DCHECK_EQ(SelectKind(if_true), kSelectMove);
+ DCHECK_EQ(SelectKind(if_false), kSelectMove);
+ int* src_ssa = static_cast<int*>(NewMem(cu, sizeof(int) * 3, false,
+ kAllocDFInfo));
+ src_ssa[0] = mir->ssa_rep->uses[0];
+ src_ssa[1] = if_true->ssa_rep->uses[0];
+ src_ssa[2] = if_false->ssa_rep->uses[0];
+ mir->ssa_rep->uses = src_ssa;
+ mir->ssa_rep->num_uses = 3;
+ }
+ mir->ssa_rep->num_defs = 1;
+ mir->ssa_rep->defs = static_cast<int*>(NewMem(cu, sizeof(int) * 1, false,
+ kAllocDFInfo));
+ mir->ssa_rep->fp_def = static_cast<bool*>(NewMem(cu, sizeof(bool) * 1, false,
+ kAllocDFInfo));
+ mir->ssa_rep->fp_def[0] = if_true->ssa_rep->fp_def[0];
+ /*
+ * There is usually a Phi node in the join block for our two cases. If the
+ * Phi node only contains our two cases as input, we will use the result
+ * SSA name of the Phi node as our select result and delete the Phi. If
+ * the Phi node has more than two operands, we will arbitrarily use the SSA
+ * name of the "true" path, delete the SSA name of the "false" path from the
+ * Phi node (and fix up the incoming arc list).
+ */
+ if (phi->ssa_rep->num_uses == 2) {
+ mir->ssa_rep->defs[0] = phi->ssa_rep->defs[0];
+ phi->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
+ } else {
+ int dead_def = if_false->ssa_rep->defs[0];
+ int live_def = if_true->ssa_rep->defs[0];
+ mir->ssa_rep->defs[0] = live_def;
+ int* incoming = reinterpret_cast<int*>(phi->dalvikInsn.vB);
+ for (int i = 0; i < phi->ssa_rep->num_uses; i++) {
+ if (phi->ssa_rep->uses[i] == live_def) {
+ incoming[i] = bb->id;
+ }
+ }
+ for (int i = 0; i < phi->ssa_rep->num_uses; i++) {
+ if (phi->ssa_rep->uses[i] == dead_def) {
+ int last_slot = phi->ssa_rep->num_uses - 1;
+ phi->ssa_rep->uses[i] = phi->ssa_rep->uses[last_slot];
+ incoming[i] = incoming[last_slot];
+ }
+ }
+ }
+ phi->ssa_rep->num_uses--;
+ bb->taken = NULL;
+ tk->block_type = kDead;
+ for (MIR* tmir = ft->first_mir_insn; tmir != NULL; tmir = tmir->next) {
+ tmir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
+ }
+ }
+ }
+ }
+ }
+ }
+ bb = NextDominatedBlock(cu, bb);
+ }
+
+ if (num_temps > cu->num_compiler_temps) {
+ cu->num_compiler_temps = num_temps;
+ }
+ return true;
+}
+
+static bool NullCheckEliminationInit(struct CompilationUnit* cu, struct BasicBlock* bb)
+{
+ if (bb->data_flow_info == NULL) return false;
+ bb->data_flow_info->ending_null_check_v =
+ AllocBitVector(cu, cu->num_ssa_regs, false, kBitMapNullCheck);
+ ClearAllBits(bb->data_flow_info->ending_null_check_v);
+ return true;
+}
+
+/* Collect stats on number of checks removed */
+static bool CountChecks( struct CompilationUnit* cu, struct BasicBlock* bb)
+{
+ if (bb->data_flow_info == NULL) return false;
+ for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+ if (mir->ssa_rep == NULL) {
+ continue;
+ }
+ int df_attributes = oat_data_flow_attributes[mir->dalvikInsn.opcode];
+ if (df_attributes & DF_HAS_NULL_CHKS) {
+ cu->checkstats->null_checks++;
+ if (mir->optimization_flags & MIR_IGNORE_NULL_CHECK) {
+ cu->checkstats->null_checks_eliminated++;
+ }
+ }
+ if (df_attributes & DF_HAS_RANGE_CHKS) {
+ cu->checkstats->range_checks++;
+ if (mir->optimization_flags & MIR_IGNORE_RANGE_CHECK) {
+ cu->checkstats->range_checks_eliminated++;
+ }
+ }
+ }
+ return false;
+}
+
+/* Try to make common case the fallthrough path */
+static bool LayoutBlocks(struct CompilationUnit* cu, struct BasicBlock* bb)
+{
+ // TODO: For now, just looking for direct throws. Consider generalizing for profile feedback
+ if (!bb->explicit_throw) {
+ return false;
+ }
+ BasicBlock* walker = bb;
+ while (true) {
+ // Check termination conditions
+ if ((walker->block_type == kEntryBlock) || (Predecessors(walker) != 1)) {
+ break;
+ }
+ BasicBlock* prev = GET_ELEM_N(walker->predecessors, BasicBlock*, 0);
+ if (prev->conditional_branch) {
+ if (prev->fall_through == walker) {
+ // Already done - return
+ break;
+ }
+ DCHECK_EQ(walker, prev->taken);
+ // Got one. Flip it and exit
+ Instruction::Code opcode = prev->last_mir_insn->dalvikInsn.opcode;
+ switch (opcode) {
+ case Instruction::IF_EQ: opcode = Instruction::IF_NE; break;
+ case Instruction::IF_NE: opcode = Instruction::IF_EQ; break;
+ case Instruction::IF_LT: opcode = Instruction::IF_GE; break;
+ case Instruction::IF_GE: opcode = Instruction::IF_LT; break;
+ case Instruction::IF_GT: opcode = Instruction::IF_LE; break;
+ case Instruction::IF_LE: opcode = Instruction::IF_GT; break;
+ case Instruction::IF_EQZ: opcode = Instruction::IF_NEZ; break;
+ case Instruction::IF_NEZ: opcode = Instruction::IF_EQZ; break;
+ case Instruction::IF_LTZ: opcode = Instruction::IF_GEZ; break;
+ case Instruction::IF_GEZ: opcode = Instruction::IF_LTZ; break;
+ case Instruction::IF_GTZ: opcode = Instruction::IF_LEZ; break;
+ case Instruction::IF_LEZ: opcode = Instruction::IF_GTZ; break;
+ default: LOG(FATAL) << "Unexpected opcode " << opcode;
+ }
+ prev->last_mir_insn->dalvikInsn.opcode = opcode;
+ BasicBlock* t_bb = prev->taken;
+ prev->taken = prev->fall_through;
+ prev->fall_through = t_bb;
+ break;
+ }
+ walker = prev;
+ }
+ return false;
+}
+
+/* Combine any basic blocks terminated by instructions that we now know can't throw */
+static bool CombineBlocks(struct CompilationUnit* cu, struct BasicBlock* bb)
+{
+ // Loop here to allow combining a sequence of blocks
+ while (true) {
+ // Check termination conditions
+ if ((bb->first_mir_insn == NULL)
+ || (bb->data_flow_info == NULL)
+ || (bb->block_type == kExceptionHandling)
+ || (bb->block_type == kExitBlock)
+ || (bb->block_type == kDead)
+ || ((bb->taken == NULL) || (bb->taken->block_type != kExceptionHandling))
+ || (bb->successor_block_list.block_list_type != kNotUsed)
+ || (static_cast<int>(bb->last_mir_insn->dalvikInsn.opcode) != kMirOpCheck)) {
+ break;
+ }
+
+ // Test the kMirOpCheck instruction
+ MIR* mir = bb->last_mir_insn;
+ // Grab the attributes from the paired opcode
+ MIR* throw_insn = mir->meta.throw_insn;
+ int df_attributes = oat_data_flow_attributes[throw_insn->dalvikInsn.opcode];
+ bool can_combine = true;
+ if (df_attributes & DF_HAS_NULL_CHKS) {
+ can_combine &= ((throw_insn->optimization_flags & MIR_IGNORE_NULL_CHECK) != 0);
+ }
+ if (df_attributes & DF_HAS_RANGE_CHKS) {
+ can_combine &= ((throw_insn->optimization_flags & MIR_IGNORE_RANGE_CHECK) != 0);
+ }
+ if (!can_combine) {
+ break;
+ }
+ // OK - got one. Combine
+ BasicBlock* bb_next = bb->fall_through;
+ DCHECK(!bb_next->catch_entry);
+ DCHECK_EQ(Predecessors(bb_next), 1U);
+ MIR* t_mir = bb->last_mir_insn->prev;
+ // Overwrite the kOpCheck insn with the paired opcode
+ DCHECK_EQ(bb_next->first_mir_insn, throw_insn);
+ *bb->last_mir_insn = *throw_insn;
+ bb->last_mir_insn->prev = t_mir;
+ // Use the successor info from the next block
+ bb->successor_block_list = bb_next->successor_block_list;
+ // Use the ending block linkage from the next block
+ bb->fall_through = bb_next->fall_through;
+ bb->taken->block_type = kDead; // Kill the unused exception block
+ bb->taken = bb_next->taken;
+ // Include the rest of the instructions
+ bb->last_mir_insn = bb_next->last_mir_insn;
+ /*
+ * If lower-half of pair of blocks to combine contained a return, move the flag
+ * to the newly combined block.
+ */
+ bb->terminated_by_return = bb_next->terminated_by_return;
+
+ /*
+ * NOTE: we aren't updating all dataflow info here. Should either make sure this pass
+ * happens after uses of i_dominated, dom_frontier or update the dataflow info here.
+ */
+
+ // Kill bb_next and remap now-dead id to parent
+ bb_next->block_type = kDead;
+ cu->block_id_map.Overwrite(bb_next->id, bb->id);
+
+ // Now, loop back and see if we can keep going
+ }
+ return false;
+}
+
+/* Eliminate unnecessary null checks for a basic block. */
+static bool EliminateNullChecks( struct CompilationUnit* cu, struct BasicBlock* bb)
+{
+ if (bb->data_flow_info == NULL) return false;
+
+ /*
+ * Set initial state. Be conservative with catch
+ * blocks and start with no assumptions about null check
+ * status (except for "this").
+ */
+ if ((bb->block_type == kEntryBlock) | bb->catch_entry) {
+ ClearAllBits(cu->temp_ssa_register_v);
+ if ((cu->access_flags & kAccStatic) == 0) {
+ // If non-static method, mark "this" as non-null
+ int this_reg = cu->num_dalvik_registers - cu->num_ins;
+ SetBit(cu, cu->temp_ssa_register_v, this_reg);
+ }
+ } else {
+ // Starting state is intesection of all incoming arcs
+ GrowableListIterator iter;
+ GrowableListIteratorInit(bb->predecessors, &iter);
+ BasicBlock* pred_bb = reinterpret_cast<BasicBlock*>(GrowableListIteratorNext(&iter));
+ DCHECK(pred_bb != NULL);
+ CopyBitVector(cu->temp_ssa_register_v,
+ pred_bb->data_flow_info->ending_null_check_v);
+ while (true) {
+ pred_bb = reinterpret_cast<BasicBlock*>(GrowableListIteratorNext(&iter));
+ if (!pred_bb) break;
+ if ((pred_bb->data_flow_info == NULL) ||
+ (pred_bb->data_flow_info->ending_null_check_v == NULL)) {
+ continue;
+ }
+ IntersectBitVectors(cu->temp_ssa_register_v,
+ cu->temp_ssa_register_v,
+ pred_bb->data_flow_info->ending_null_check_v);
+ }
+ }
+
+ // Walk through the instruction in the block, updating as necessary
+ for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+ if (mir->ssa_rep == NULL) {
+ continue;
+ }
+ int df_attributes = oat_data_flow_attributes[mir->dalvikInsn.opcode];
+
+ // Mark target of NEW* as non-null
+ if (df_attributes & DF_NON_NULL_DST) {
+ SetBit(cu, cu->temp_ssa_register_v, mir->ssa_rep->defs[0]);
+ }
+
+ // Mark non-null returns from invoke-style NEW*
+ if (df_attributes & DF_NON_NULL_RET) {
+ MIR* next_mir = mir->next;
+ // Next should be an MOVE_RESULT_OBJECT
+ if (next_mir &&
+ next_mir->dalvikInsn.opcode == Instruction::MOVE_RESULT_OBJECT) {
+ // Mark as null checked
+ SetBit(cu, cu->temp_ssa_register_v, next_mir->ssa_rep->defs[0]);
+ } else {
+ if (next_mir) {
+ LOG(WARNING) << "Unexpected opcode following new: " << next_mir->dalvikInsn.opcode;
+ } else if (bb->fall_through) {
+ // Look in next basic block
+ struct BasicBlock* next_bb = bb->fall_through;
+ for (MIR* tmir = next_bb->first_mir_insn; tmir != NULL;
+ tmir =tmir->next) {
+ if (static_cast<int>(tmir->dalvikInsn.opcode) >= static_cast<int>(kMirOpFirst)) {
+ continue;
+ }
+ // First non-pseudo should be MOVE_RESULT_OBJECT
+ if (tmir->dalvikInsn.opcode == Instruction::MOVE_RESULT_OBJECT) {
+ // Mark as null checked
+ SetBit(cu, cu->temp_ssa_register_v, tmir->ssa_rep->defs[0]);
+ } else {
+ LOG(WARNING) << "Unexpected op after new: " << tmir->dalvikInsn.opcode;
+ }
+ break;
+ }
+ }
+ }
+ }
+
+ /*
+ * Propagate nullcheck state on register copies (including
+ * Phi pseudo copies. For the latter, nullcheck state is
+ * the "and" of all the Phi's operands.
+ */
+ if (df_attributes & (DF_NULL_TRANSFER_0 | DF_NULL_TRANSFER_N)) {
+ int tgt_sreg = mir->ssa_rep->defs[0];
+ int operands = (df_attributes & DF_NULL_TRANSFER_0) ? 1 :
+ mir->ssa_rep->num_uses;
+ bool null_checked = true;
+ for (int i = 0; i < operands; i++) {
+ null_checked &= IsBitSet(cu->temp_ssa_register_v,
+ mir->ssa_rep->uses[i]);
+ }
+ if (null_checked) {
+ SetBit(cu, cu->temp_ssa_register_v, tgt_sreg);
+ }
+ }
+
+ // Already nullchecked?
+ if ((df_attributes & DF_HAS_NULL_CHKS) && !(mir->optimization_flags & MIR_IGNORE_NULL_CHECK)) {
+ int src_idx;
+ if (df_attributes & DF_NULL_CHK_1) {
+ src_idx = 1;
+ } else if (df_attributes & DF_NULL_CHK_2) {
+ src_idx = 2;
+ } else {
+ src_idx = 0;
+ }
+ int src_sreg = mir->ssa_rep->uses[src_idx];
+ if (IsBitSet(cu->temp_ssa_register_v, src_sreg)) {
+ // Eliminate the null check
+ mir->optimization_flags |= MIR_IGNORE_NULL_CHECK;
+ } else {
+ // Mark s_reg as null-checked
+ SetBit(cu, cu->temp_ssa_register_v, src_sreg);
+ }
+ }
+ }
+
+ // Did anything change?
+ bool res = CompareBitVectors(bb->data_flow_info->ending_null_check_v,
+ cu->temp_ssa_register_v);
+ if (res) {
+ CopyBitVector(bb->data_flow_info->ending_null_check_v,
+ cu->temp_ssa_register_v);
+ }
+ return res;
+}
+
+void NullCheckElimination(CompilationUnit *cu)
+{
+ if (!(cu->disable_opt & (1 << kNullCheckElimination))) {
+ DCHECK(cu->temp_ssa_register_v != NULL);
+ DataFlowAnalysisDispatcher(cu, NullCheckEliminationInit, kAllNodes,
+ false /* is_iterative */);
+ DataFlowAnalysisDispatcher(cu, EliminateNullChecks,
+ kPreOrderDFSTraversal,
+ true /* is_iterative */);
+ }
+}
+
+void BasicBlockCombine(CompilationUnit* cu)
+{
+ DataFlowAnalysisDispatcher(cu, CombineBlocks, kPreOrderDFSTraversal, false);
+}
+
+void CodeLayout(CompilationUnit* cu)
+{
+ DataFlowAnalysisDispatcher(cu, LayoutBlocks, kAllNodes, false);
+}
+
+void DumpCheckStats(CompilationUnit *cu)
+{
+ Checkstats* stats =
+ static_cast<Checkstats*>(NewMem(cu, sizeof(Checkstats), true, kAllocDFInfo));
+ cu->checkstats = stats;
+ DataFlowAnalysisDispatcher(cu, CountChecks, kAllNodes, false /* is_iterative */);
+ if (stats->null_checks > 0) {
+ float eliminated = static_cast<float>(stats->null_checks_eliminated);
+ float checks = static_cast<float>(stats->null_checks);
+ LOG(INFO) << "Null Checks: " << PrettyMethod(cu->method_idx, *cu->dex_file) << " "
+ << stats->null_checks_eliminated << " of " << stats->null_checks << " -> "
+ << (eliminated/checks) * 100.0 << "%";
+ }
+ if (stats->range_checks > 0) {
+ float eliminated = static_cast<float>(stats->range_checks_eliminated);
+ float checks = static_cast<float>(stats->range_checks);
+ LOG(INFO) << "Range Checks: " << PrettyMethod(cu->method_idx, *cu->dex_file) << " "
+ << stats->range_checks_eliminated << " of " << stats->range_checks << " -> "
+ << (eliminated/checks) * 100.0 << "%";
+ }
+}
+
+bool BuildExtendedBBList(struct CompilationUnit* cu, struct BasicBlock* bb)
+{
+ if (bb->visited) return false;
+ if (!((bb->block_type == kEntryBlock) || (bb->block_type == kDalvikByteCode)
+ || (bb->block_type == kExitBlock))) {
+ // Ignore special blocks
+ bb->visited = true;
+ return false;
+ }
+ // Must be head of extended basic block.
+ BasicBlock* start_bb = bb;
+ cu->extended_basic_blocks.push_back(bb);
+ bool terminated_by_return = false;
+ // Visit blocks strictly dominated by this head.
+ while (bb != NULL) {
+ bb->visited = true;
+ terminated_by_return |= bb->terminated_by_return;
+ bb = NextDominatedBlock(cu, bb);
+ }
+ if (terminated_by_return) {
+ // This extended basic block contains a return, so mark all members.
+ bb = start_bb;
+ while (bb != NULL) {
+ bb->dominates_return = true;
+ bb = NextDominatedBlock(cu, bb);
+ }
+ }
+ return false; // Not iterative - return value will be ignored
+}
+
+void BasicBlockOptimization(CompilationUnit *cu)
+{
+ if (!(cu->disable_opt & (1 << kBBOpt))) {
+ CompilerInitGrowableList(cu, &cu->compiler_temps, 6, kListMisc);
+ DCHECK_EQ(cu->num_compiler_temps, 0);
+ // Mark all blocks as not visited
+ DataFlowAnalysisDispatcher(cu, ClearVisitedFlag,
+ kAllNodes, false /* is_iterative */);
+ DataFlowAnalysisDispatcher(cu, BuildExtendedBBList,
+ kPreOrderDFSTraversal,
+ false /* is_iterative */);
+ // Perform extended basic block optimizations.
+ for (unsigned int i = 0; i < cu->extended_basic_blocks.size(); i++) {
+ BasicBlockOpt(cu, cu->extended_basic_blocks[i]);
+ }
+ }
+}
+
+static void AddLoopHeader(CompilationUnit* cu, BasicBlock* header,
+ BasicBlock* back_edge)
+{
+ GrowableListIterator iter;
+ GrowableListIteratorInit(&cu->loop_headers, &iter);
+ for (LoopInfo* loop = reinterpret_cast<LoopInfo*>(GrowableListIteratorNext(&iter));
+ (loop != NULL); loop = reinterpret_cast<LoopInfo*>(GrowableListIteratorNext(&iter))) {
+ if (loop->header == header) {
+ InsertGrowableList(cu, &loop->incoming_back_edges,
+ reinterpret_cast<uintptr_t>(back_edge));
+ return;
+ }
+ }
+ LoopInfo* info = static_cast<LoopInfo*>(NewMem(cu, sizeof(LoopInfo), true, kAllocDFInfo));
+ info->header = header;
+ CompilerInitGrowableList(cu, &info->incoming_back_edges, 2, kListMisc);
+ InsertGrowableList(cu, &info->incoming_back_edges, reinterpret_cast<uintptr_t>(back_edge));
+ InsertGrowableList(cu, &cu->loop_headers, reinterpret_cast<uintptr_t>(info));
+}
+
+static bool FindBackEdges(struct CompilationUnit* cu, struct BasicBlock* bb)
+{
+ if ((bb->data_flow_info == NULL) || (bb->last_mir_insn == NULL)) {
+ return false;
+ }
+ Instruction::Code opcode = bb->last_mir_insn->dalvikInsn.opcode;
+ if (Instruction::FlagsOf(opcode) & Instruction::kBranch) {
+ if (bb->taken && (bb->taken->start_offset <= bb->start_offset)) {
+ DCHECK(bb->dominators != NULL);
+ if (IsBitSet(bb->dominators, bb->taken->id)) {
+ if (cu->verbose) {
+ LOG(INFO) << "Loop backedge from 0x"
+ << std::hex << bb->last_mir_insn->offset
+ << " to 0x" << std::hex << bb->taken->start_offset;
+ }
+ AddLoopHeader(cu, bb->taken, bb);
+ }
+ }
+ }
+ return false;
+}
+
+static void AddBlocksToLoop(CompilationUnit* cu, ArenaBitVector* blocks,
+ BasicBlock* bb, int head_id)
+{
+ if (!IsBitSet(bb->dominators, head_id) ||
+ IsBitSet(blocks, bb->id)) {
+ return;
+ }
+ SetBit(cu, blocks, bb->id);
+ GrowableListIterator iter;
+ GrowableListIteratorInit(bb->predecessors, &iter);
+ BasicBlock* pred_bb;
+ for (pred_bb = reinterpret_cast<BasicBlock*>(GrowableListIteratorNext(&iter)); pred_bb != NULL;
+ pred_bb = reinterpret_cast<BasicBlock*>(GrowableListIteratorNext(&iter))) {
+ AddBlocksToLoop(cu, blocks, pred_bb, head_id);
+ }
+}
+
+static void DumpLoops(CompilationUnit *cu)
+{
+ GrowableListIterator iter;
+ GrowableListIteratorInit(&cu->loop_headers, &iter);
+ for (LoopInfo* loop = reinterpret_cast<LoopInfo*>(GrowableListIteratorNext(&iter));
+ (loop != NULL); loop = reinterpret_cast<LoopInfo*>(GrowableListIteratorNext(&iter))) {
+ LOG(INFO) << "Loop head block id " << loop->header->id
+ << ", offset 0x" << std::hex << loop->header->start_offset
+ << ", Depth: " << loop->header->nesting_depth;
+ GrowableListIterator iter;
+ GrowableListIteratorInit(&loop->incoming_back_edges, &iter);
+ BasicBlock* edge_bb;
+ for (edge_bb = reinterpret_cast<BasicBlock*>(GrowableListIteratorNext(&iter)); edge_bb != NULL;
+ edge_bb = reinterpret_cast<BasicBlock*>(GrowableListIteratorNext(&iter))) {
+ LOG(INFO) << " Backedge block id " << edge_bb->id
+ << ", offset 0x" << std::hex << edge_bb->start_offset;
+ ArenaBitVectorIterator b_iter;
+ BitVectorIteratorInit(loop->blocks, &b_iter);
+ for (int bb_id = BitVectorIteratorNext(&b_iter); bb_id != -1;
+ bb_id = BitVectorIteratorNext(&b_iter)) {
+ BasicBlock *bb;
+ bb = reinterpret_cast<BasicBlock*>(GrowableListGetElement(&cu->block_list, bb_id));
+ LOG(INFO) << " (" << bb->id << ", 0x" << std::hex
+ << bb->start_offset << ")";
+ }
+ }
+ }
+}
+
+void LoopDetection(CompilationUnit *cu)
+{
+ if (cu->disable_opt & (1 << kPromoteRegs)) {
+ return;
+ }
+ CompilerInitGrowableList(cu, &cu->loop_headers, 6, kListMisc);
+ // Find the loop headers
+ DataFlowAnalysisDispatcher(cu, FindBackEdges, kAllNodes, false /* is_iterative */);
+ GrowableListIterator iter;
+ GrowableListIteratorInit(&cu->loop_headers, &iter);
+ // Add blocks to each header
+ for (LoopInfo* loop = reinterpret_cast<LoopInfo*>(GrowableListIteratorNext(&iter));
+ loop != NULL; loop = reinterpret_cast<LoopInfo*>(GrowableListIteratorNext(&iter))) {
+ loop->blocks = AllocBitVector(cu, cu->num_blocks, true,
+ kBitMapMisc);
+ SetBit(cu, loop->blocks, loop->header->id);
+ GrowableListIterator iter;
+ GrowableListIteratorInit(&loop->incoming_back_edges, &iter);
+ BasicBlock* edge_bb;
+ for (edge_bb = reinterpret_cast<BasicBlock*>(GrowableListIteratorNext(&iter)); edge_bb != NULL;
+ edge_bb = reinterpret_cast<BasicBlock*>(GrowableListIteratorNext(&iter))) {
+ AddBlocksToLoop(cu, loop->blocks, edge_bb, loop->header->id);
+ }
+ }
+ // Compute the nesting depth of each header
+ GrowableListIteratorInit(&cu->loop_headers, &iter);
+ for (LoopInfo* loop = reinterpret_cast<LoopInfo*>(GrowableListIteratorNext(&iter));
+ loop != NULL; loop = reinterpret_cast<LoopInfo*>(GrowableListIteratorNext(&iter))) {
+ GrowableListIterator iter2;
+ GrowableListIteratorInit(&cu->loop_headers, &iter2);
+ LoopInfo* loop2;
+ for (loop2 = reinterpret_cast<LoopInfo*>(GrowableListIteratorNext(&iter2));
+ loop2 != NULL; loop2 = reinterpret_cast<LoopInfo*>(GrowableListIteratorNext(&iter2))) {
+ if (IsBitSet(loop2->blocks, loop->header->id)) {
+ loop->header->nesting_depth++;
+ }
+ }
+ }
+ // Assign nesting depth to each block in all loops
+ GrowableListIteratorInit(&cu->loop_headers, &iter);
+ for (LoopInfo* loop = reinterpret_cast<LoopInfo*>(GrowableListIteratorNext(&iter));
+ (loop != NULL); loop = reinterpret_cast<LoopInfo*>(GrowableListIteratorNext(&iter))) {
+ ArenaBitVectorIterator b_iter;
+ BitVectorIteratorInit(loop->blocks, &b_iter);
+ for (int bb_id = BitVectorIteratorNext(&b_iter); bb_id != -1;
+ bb_id = BitVectorIteratorNext(&b_iter)) {
+ BasicBlock *bb;
+ bb = reinterpret_cast<BasicBlock*>(GrowableListGetElement(&cu->block_list, bb_id));
+ bb->nesting_depth = std::max(bb->nesting_depth,
+ loop->header->nesting_depth);
+ }
+ }
+ if (cu->verbose) {
+ DumpLoops(cu);
+ }
+}
+
+/*
+ * This function will make a best guess at whether the invoke will
+ * end up using Method*. It isn't critical to get it exactly right,
+ * and attempting to do would involve more complexity than it's
+ * worth.
+ */
+static bool InvokeUsesMethodStar(CompilationUnit* cu, MIR* mir)
+{
+ InvokeType type;
+ Instruction::Code opcode = mir->dalvikInsn.opcode;
+ switch (opcode) {
+ case Instruction::INVOKE_STATIC:
+ case Instruction::INVOKE_STATIC_RANGE:
+ type = kStatic;
+ break;
+ case Instruction::INVOKE_DIRECT:
+ case Instruction::INVOKE_DIRECT_RANGE:
+ type = kDirect;
+ break;
+ case Instruction::INVOKE_VIRTUAL:
+ case Instruction::INVOKE_VIRTUAL_RANGE:
+ type = kVirtual;
+ break;
+ case Instruction::INVOKE_INTERFACE:
+ case Instruction::INVOKE_INTERFACE_RANGE:
+ return false;
+ case Instruction::INVOKE_SUPER_RANGE:
+ case Instruction::INVOKE_SUPER:
+ type = kSuper;
+ break;
+ default:
+ LOG(WARNING) << "Unexpected invoke op: " << opcode;
+ return false;
+ }
+ OatCompilationUnit m_unit(cu->class_loader, cu->class_linker,
+ *cu->dex_file, cu->code_item,
+ cu->class_def_idx, cu->method_idx,
+ cu->access_flags);
+ // TODO: add a flag so we don't counts the stats for this twice
+ uint32_t dex_method_idx = mir->dalvikInsn.vB;
+ int vtable_idx;
+ uintptr_t direct_code;
+ uintptr_t direct_method;
+ bool fast_path =
+ cu->compiler->ComputeInvokeInfo(dex_method_idx, &m_unit, type,
+ vtable_idx, direct_code,
+ direct_method) &&
+ !SLOW_INVOKE_PATH;
+ return (((type == kDirect) || (type == kStatic)) &&
+ fast_path && ((direct_code == 0) || (direct_method == 0)));
+}
+
+/*
+ * Count uses, weighting by loop nesting depth. This code only
+ * counts explicitly used s_regs. A later phase will add implicit
+ * counts for things such as Method*, null-checked references, etc.
+ */
+static bool CountUses(struct CompilationUnit* cu, struct BasicBlock* bb)
+{
+ if (bb->block_type != kDalvikByteCode) {
+ return false;
+ }
+ for (MIR* mir = bb->first_mir_insn; (mir != NULL); mir = mir->next) {
+ if (mir->ssa_rep == NULL) {
+ continue;
+ }
+ uint32_t weight = std::min(16U, static_cast<uint32_t>(bb->nesting_depth));
+ for (int i = 0; i < mir->ssa_rep->num_uses; i++) {
+ int s_reg = mir->ssa_rep->uses[i];
+ DCHECK_LT(s_reg, static_cast<int>(cu->use_counts.num_used));
+ cu->raw_use_counts.elem_list[s_reg]++;
+ cu->use_counts.elem_list[s_reg] += (1 << weight);
+ }
+ if (!(cu->disable_opt & (1 << kPromoteCompilerTemps))) {
+ int df_attributes = oat_data_flow_attributes[mir->dalvikInsn.opcode];
+ // Implicit use of Method* ? */
+ if (df_attributes & DF_UMS) {
+ /*
+ * Some invokes will not use Method* - need to perform test similar
+ * to that found in GenInvoke() to decide whether to count refs
+ * for Method* on invoke-class opcodes.
+ * TODO: refactor for common test here, save results for GenInvoke
+ */
+ int uses_method_star = true;
+ if ((df_attributes & (DF_FORMAT_35C | DF_FORMAT_3RC)) &&
+ !(df_attributes & DF_NON_NULL_RET)) {
+ uses_method_star &= InvokeUsesMethodStar(cu, mir);
+ }
+ if (uses_method_star) {
+ cu->raw_use_counts.elem_list[cu->method_sreg]++;
+ cu->use_counts.elem_list[cu->method_sreg] += (1 << weight);
+ }
+ }
+ }
+ }
+ return false;
+}
+
+void MethodUseCount(CompilationUnit *cu)
+{
+ CompilerInitGrowableList(cu, &cu->use_counts, cu->num_ssa_regs + 32, kListMisc);
+ CompilerInitGrowableList(cu, &cu->raw_use_counts, cu->num_ssa_regs + 32, kListMisc);
+ // Initialize list
+ for (int i = 0; i < cu->num_ssa_regs; i++) {
+ InsertGrowableList(cu, &cu->use_counts, 0);
+ InsertGrowableList(cu, &cu->raw_use_counts, 0);
+ }
+ if (cu->disable_opt & (1 << kPromoteRegs)) {
+ return;
+ }
+ DataFlowAnalysisDispatcher(cu, CountUses,
+ kAllNodes, false /* is_iterative */);
+}
+
+} // namespace art
diff --git a/src/compiler/dex/dataflow.h b/src/compiler/dex/dataflow.h
new file mode 100644
index 0000000..5bf97ec
--- /dev/null
+++ b/src/compiler/dex/dataflow.h
@@ -0,0 +1,182 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_COMPILER_DEX_DATAFLOW_H_
+#define ART_SRC_COMPILER_DEX_DATAFLOW_H_
+
+#include "compiler_internals.h"
+
+namespace art {
+
+enum DataFlowAttributePos {
+ kUA = 0,
+ kUB,
+ kUC,
+ kAWide,
+ kBWide,
+ kCWide,
+ kDA,
+ kIsMove,
+ kSetsConst,
+ kFormat35c,
+ kFormat3rc,
+ kNullCheckSrc0, // Null check of uses[0].
+ kNullCheckSrc1, // Null check of uses[1].
+ kNullCheckSrc2, // Null check of uses[2].
+ kNullCheckOut0, // Null check out outgoing arg0.
+ kDstNonNull, // May assume dst is non-null.
+ kRetNonNull, // May assume retval is non-null.
+ kNullTransferSrc0, // Object copy src[0] -> dst.
+ kNullTransferSrcN, // Phi null check state transfer.
+ kRangeCheckSrc1, // Range check of uses[1].
+ kRangeCheckSrc2, // Range check of uses[2].
+ kRangeCheckSrc3, // Range check of uses[3].
+ kFPA,
+ kFPB,
+ kFPC,
+ kCoreA,
+ kCoreB,
+ kCoreC,
+ kRefA,
+ kRefB,
+ kRefC,
+ kUsesMethodStar, // Implicit use of Method*.
+};
+
+#define DF_NOP 0
+#define DF_UA (1 << kUA)
+#define DF_UB (1 << kUB)
+#define DF_UC (1 << kUC)
+#define DF_A_WIDE (1 << kAWide)
+#define DF_B_WIDE (1 << kBWide)
+#define DF_C_WIDE (1 << kCWide)
+#define DF_DA (1 << kDA)
+#define DF_IS_MOVE (1 << kIsMove)
+#define DF_SETS_CONST (1 << kSetsConst)
+#define DF_FORMAT_35C (1 << kFormat35c)
+#define DF_FORMAT_3RC (1 << kFormat3rc)
+#define DF_NULL_CHK_0 (1 << kNullCheckSrc0)
+#define DF_NULL_CHK_1 (1 << kNullCheckSrc1)
+#define DF_NULL_CHK_2 (1 << kNullCheckSrc2)
+#define DF_NULL_CHK_OUT0 (1 << kNullCheckOut0)
+#define DF_NON_NULL_DST (1 << kDstNonNull)
+#define DF_NON_NULL_RET (1 << kRetNonNull)
+#define DF_NULL_TRANSFER_0 (1 << kNullTransferSrc0)
+#define DF_NULL_TRANSFER_N (1 << kNullTransferSrcN)
+#define DF_RANGE_CHK_1 (1 << kRangeCheckSrc1)
+#define DF_RANGE_CHK_2 (1 << kRangeCheckSrc2)
+#define DF_RANGE_CHK_3 (1 << kRangeCheckSrc3)
+#define DF_FP_A (1 << kFPA)
+#define DF_FP_B (1 << kFPB)
+#define DF_FP_C (1 << kFPC)
+#define DF_CORE_A (1 << kCoreA)
+#define DF_CORE_B (1 << kCoreB)
+#define DF_CORE_C (1 << kCoreC)
+#define DF_REF_A (1 << kRefA)
+#define DF_REF_B (1 << kRefB)
+#define DF_REF_C (1 << kRefC)
+#define DF_UMS (1 << kUsesMethodStar)
+
+#define DF_HAS_USES (DF_UA | DF_UB | DF_UC)
+
+#define DF_HAS_DEFS (DF_DA)
+
+#define DF_HAS_NULL_CHKS (DF_NULL_CHK_0 | \
+ DF_NULL_CHK_1 | \
+ DF_NULL_CHK_2 | \
+ DF_NULL_CHK_OUT0)
+
+#define DF_HAS_RANGE_CHKS (DF_RANGE_CHK_1 | \
+ DF_RANGE_CHK_2 | \
+ DF_RANGE_CHK_3)
+
+#define DF_HAS_NR_CHKS (DF_HAS_NULL_CHKS | \
+ DF_HAS_RANGE_CHKS)
+
+#define DF_A_IS_REG (DF_UA | DF_DA)
+#define DF_B_IS_REG (DF_UB)
+#define DF_C_IS_REG (DF_UC)
+#define DF_IS_GETTER_OR_SETTER (DF_IS_GETTER | DF_IS_SETTER)
+#define DF_USES_FP (DF_FP_A | DF_FP_B | DF_FP_C)
+
+extern const int oat_data_flow_attributes[kMirOpLast];
+
+struct BasicBlockDataFlow {
+ ArenaBitVector* use_v;
+ ArenaBitVector* def_v;
+ ArenaBitVector* live_in_v;
+ ArenaBitVector* phi_v;
+ int* vreg_to_ssa_map;
+ ArenaBitVector* ending_null_check_v;
+};
+
+struct SSARepresentation {
+ int num_uses;
+ int* uses;
+ bool* fp_use;
+ int num_defs;
+ int* defs;
+ bool* fp_def;
+};
+
+/*
+ * An induction variable is represented by "m*i + c", where i is a basic
+ * induction variable.
+ */
+struct InductionVariableInfo {
+ int ssa_reg;
+ int basic_ssa_reg;
+ int m; // multiplier
+ int c; // constant
+ int inc; // loop increment
+};
+
+struct ArrayAccessInfo {
+ int array_reg;
+ int iv_reg;
+ int max_c; // For DIV - will affect upper bound checking.
+ int min_c; // For DIV - will affect lower bound checking.
+};
+
+struct LoopInfo {
+ BasicBlock* header;
+ GrowableList incoming_back_edges;
+ ArenaBitVector* blocks;
+};
+
+static inline unsigned int Predecessors(BasicBlock* bb) {return bb->predecessors->num_used;}
+
+int SRegToVReg(const CompilationUnit* cu, int ssa_reg);
+char* GetDalvikDisassembly(CompilationUnit* cu, const MIR* mir);
+bool FindLocalLiveIn(CompilationUnit* cu, BasicBlock* bb);
+bool DoSSAConversion(CompilationUnit* cu, BasicBlock* bb);
+bool DoConstantPropogation(CompilationUnit* cu, BasicBlock* bb);
+void CompilerInitializeSSAConversion(CompilationUnit* cu);
+bool ClearVisitedFlag(struct CompilationUnit* cu, struct BasicBlock* bb);
+void DataFlowAnalysisDispatcher(CompilationUnit* cu, bool (*func)(CompilationUnit*, BasicBlock*),
+ DataFlowAnalysisMode dfa_mode, bool is_iterative);
+MIR* FindMoveResult(CompilationUnit* cu, BasicBlock* bb, MIR* mir);
+void NullCheckElimination(CompilationUnit *cu);
+void BasicBlockCombine(CompilationUnit* cu);
+void CodeLayout(CompilationUnit* cu);
+void DumpCheckStats(CompilationUnit *cu);
+void BasicBlockOptimization(CompilationUnit *cu);
+void LoopDetection(CompilationUnit *cu);
+void MethodUseCount(CompilationUnit *cu);
+
+} // namespace art
+
+#endif // ART_SRC_COMPILER_DEX_DATAFLOW_H_
diff --git a/src/compiler/dex/frontend.cc b/src/compiler/dex/frontend.cc
new file mode 100644
index 0000000..498600f
--- /dev/null
+++ b/src/compiler/dex/frontend.cc
@@ -0,0 +1,1253 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "compiler.h"
+#include "compiler_internals.h"
+#include "dataflow.h"
+#include "ssa_transformation.h"
+#include "leb128.h"
+#include "mirror/object.h"
+#include "runtime.h"
+#include "quick/codegen_util.h"
+#include "portable/mir_to_gbc.h"
+#include "quick/mir_to_lir.h"
+
+#include <llvm/Support/Threading.h>
+
+namespace {
+#if !defined(ART_USE_PORTABLE_COMPILER)
+ pthread_once_t llvm_multi_init = PTHREAD_ONCE_INIT;
+#endif
+ void InitializeLLVMForQuick() {
+ llvm::llvm_start_multithreaded();
+ }
+}
+
+namespace art {
+
+LLVMInfo::LLVMInfo() {
+#if !defined(ART_USE_PORTABLE_COMPILER)
+ pthread_once(&llvm_multi_init, InitializeLLVMForQuick);
+#endif
+ // Create context, module, intrinsic helper & ir builder
+ llvm_context_.reset(new llvm::LLVMContext());
+ llvm_module_ = new llvm::Module("art", *llvm_context_);
+ llvm::StructType::create(*llvm_context_, "JavaObject");
+ intrinsic_helper_.reset( new greenland::IntrinsicHelper(*llvm_context_, *llvm_module_));
+ ir_builder_.reset(new greenland::IRBuilder(*llvm_context_, *llvm_module_, *intrinsic_helper_));
+}
+
+LLVMInfo::~LLVMInfo() {
+}
+
+extern "C" void ArtInitQuickCompilerContext(art::Compiler& compiler) {
+ CHECK(compiler.GetCompilerContext() == NULL);
+ LLVMInfo* llvm_info = new LLVMInfo();
+ compiler.SetCompilerContext(llvm_info);
+}
+
+extern "C" void ArtUnInitQuickCompilerContext(art::Compiler& compiler) {
+ delete reinterpret_cast<LLVMInfo*>(compiler.GetCompilerContext());
+ compiler.SetCompilerContext(NULL);
+}
+
+/* Default optimizer/debug setting for the compiler. */
+static uint32_t kCompilerOptimizerDisableFlags = 0 | // Disable specific optimizations
+ (1 << kLoadStoreElimination) |
+ //(1 << kLoadHoisting) |
+ //(1 << kSuppressLoads) |
+ //(1 << kNullCheckElimination) |
+ //(1 << kPromoteRegs) |
+ //(1 << kTrackLiveTemps) |
+ (1 << kSkipLargeMethodOptimization) |
+ //(1 << kSafeOptimizations) |
+ //(1 << kBBOpt) |
+ //(1 << kMatch) |
+ //(1 << kPromoteCompilerTemps) |
+ 0;
+
+static uint32_t kCompilerDebugFlags = 0 | // Enable debug/testing modes
+ //(1 << kDebugDisplayMissingTargets) |
+ //(1 << kDebugVerbose) |
+ //(1 << kDebugDumpCFG) |
+ //(1 << kDebugSlowFieldPath) |
+ //(1 << kDebugSlowInvokePath) |
+ //(1 << kDebugSlowStringPath) |
+ //(1 << kDebugSlowestFieldPath) |
+ //(1 << kDebugSlowestStringPath) |
+ //(1 << kDebugExerciseResolveMethod) |
+ //(1 << kDebugVerifyDataflow) |
+ //(1 << kDebugShowMemoryUsage) |
+ //(1 << kDebugShowNops) |
+ //(1 << kDebugCountOpcodes) |
+ //(1 << kDebugDumpCheckStats) |
+ //(1 << kDebugDumpBitcodeFile) |
+ //(1 << kDebugVerifyBitcode) |
+ 0;
+
+static bool ContentIsInsn(const uint16_t* code_ptr) {
+ uint16_t instr = *code_ptr;
+ Instruction::Code opcode = static_cast<Instruction::Code>(instr & 0xff);
+
+ /*
+ * Since the low 8-bit in metadata may look like NOP, we need to check
+ * both the low and whole sub-word to determine whether it is code or data.
+ */
+ return (opcode != Instruction::NOP || instr == 0);
+}
+
+/*
+ * Parse an instruction, return the length of the instruction
+ */
+static int ParseInsn(CompilationUnit* cu, const uint16_t* code_ptr,
+ DecodedInstruction* decoded_instruction)
+{
+ // Don't parse instruction data
+ if (!ContentIsInsn(code_ptr)) {
+ return 0;
+ }
+
+ const Instruction* instruction = Instruction::At(code_ptr);
+ *decoded_instruction = DecodedInstruction(instruction);
+
+ return instruction->SizeInCodeUnits();
+}
+
+#define UNKNOWN_TARGET 0xffffffff
+
+/* Split an existing block from the specified code offset into two */
+static BasicBlock *SplitBlock(CompilationUnit* cu, unsigned int code_offset,
+ BasicBlock* orig_block, BasicBlock** immed_pred_block_p)
+{
+ MIR* insn = orig_block->first_mir_insn;
+ while (insn) {
+ if (insn->offset == code_offset) break;
+ insn = insn->next;
+ }
+ if (insn == NULL) {
+ LOG(FATAL) << "Break split failed";
+ }
+ BasicBlock *bottom_block = NewMemBB(cu, kDalvikByteCode,
+ cu->num_blocks++);
+ InsertGrowableList(cu, &cu->block_list, reinterpret_cast<uintptr_t>(bottom_block));
+
+ bottom_block->start_offset = code_offset;
+ bottom_block->first_mir_insn = insn;
+ bottom_block->last_mir_insn = orig_block->last_mir_insn;
+
+ /* If this block was terminated by a return, the flag needs to go with the bottom block */
+ bottom_block->terminated_by_return = orig_block->terminated_by_return;
+ orig_block->terminated_by_return = false;
+
+ /* Add it to the quick lookup cache */
+ cu->block_map.Put(bottom_block->start_offset, bottom_block);
+
+ /* Handle the taken path */
+ bottom_block->taken = orig_block->taken;
+ if (bottom_block->taken) {
+ orig_block->taken = NULL;
+ DeleteGrowableList(bottom_block->taken->predecessors, reinterpret_cast<uintptr_t>(orig_block));
+ InsertGrowableList(cu, bottom_block->taken->predecessors,
+ reinterpret_cast<uintptr_t>(bottom_block));
+ }
+
+ /* Handle the fallthrough path */
+ bottom_block->fall_through = orig_block->fall_through;
+ orig_block->fall_through = bottom_block;
+ InsertGrowableList(cu, bottom_block->predecessors,
+ reinterpret_cast<uintptr_t>(orig_block));
+ if (bottom_block->fall_through) {
+ DeleteGrowableList(bottom_block->fall_through->predecessors,
+ reinterpret_cast<uintptr_t>(orig_block));
+ InsertGrowableList(cu, bottom_block->fall_through->predecessors,
+ reinterpret_cast<uintptr_t>(bottom_block));
+ }
+
+ /* Handle the successor list */
+ if (orig_block->successor_block_list.block_list_type != kNotUsed) {
+ bottom_block->successor_block_list = orig_block->successor_block_list;
+ orig_block->successor_block_list.block_list_type = kNotUsed;
+ GrowableListIterator iterator;
+
+ GrowableListIteratorInit(&bottom_block->successor_block_list.blocks,
+ &iterator);
+ while (true) {
+ SuccessorBlockInfo *successor_block_info =
+ reinterpret_cast<SuccessorBlockInfo*>(GrowableListIteratorNext(&iterator));
+ if (successor_block_info == NULL) break;
+ BasicBlock *bb = successor_block_info->block;
+ DeleteGrowableList(bb->predecessors, reinterpret_cast<uintptr_t>(orig_block));
+ InsertGrowableList(cu, bb->predecessors, reinterpret_cast<uintptr_t>(bottom_block));
+ }
+ }
+
+ orig_block->last_mir_insn = insn->prev;
+
+ insn->prev->next = NULL;
+ insn->prev = NULL;
+ /*
+ * Update the immediate predecessor block pointer so that outgoing edges
+ * can be applied to the proper block.
+ */
+ if (immed_pred_block_p) {
+ DCHECK_EQ(*immed_pred_block_p, orig_block);
+ *immed_pred_block_p = bottom_block;
+ }
+ return bottom_block;
+}
+
+/*
+ * Given a code offset, find out the block that starts with it. If the offset
+ * is in the middle of an existing block, split it into two. If immed_pred_block_p
+ * is not non-null and is the block being split, update *immed_pred_block_p to
+ * point to the bottom block so that outgoing edges can be set up properly
+ * (by the caller)
+ * Utilizes a map for fast lookup of the typical cases.
+ */
+BasicBlock *FindBlock(CompilationUnit* cu, unsigned int code_offset,
+ bool split, bool create, BasicBlock** immed_pred_block_p)
+{
+ GrowableList* block_list = &cu->block_list;
+ BasicBlock* bb;
+ unsigned int i;
+ SafeMap<unsigned int, BasicBlock*>::iterator it;
+
+ it = cu->block_map.find(code_offset);
+ if (it != cu->block_map.end()) {
+ return it->second;
+ } else if (!create) {
+ return NULL;
+ }
+
+ if (split) {
+ for (i = 0; i < block_list->num_used; i++) {
+ bb = reinterpret_cast<BasicBlock*>(block_list->elem_list[i]);
+ if (bb->block_type != kDalvikByteCode) continue;
+ /* Check if a branch jumps into the middle of an existing block */
+ if ((code_offset > bb->start_offset) && (bb->last_mir_insn != NULL) &&
+ (code_offset <= bb->last_mir_insn->offset)) {
+ BasicBlock *new_bb = SplitBlock(cu, code_offset, bb,
+ bb == *immed_pred_block_p ?
+ immed_pred_block_p : NULL);
+ return new_bb;
+ }
+ }
+ }
+
+ /* Create a new one */
+ bb = NewMemBB(cu, kDalvikByteCode, cu->num_blocks++);
+ InsertGrowableList(cu, &cu->block_list, reinterpret_cast<uintptr_t>(bb));
+ bb->start_offset = code_offset;
+ cu->block_map.Put(bb->start_offset, bb);
+ return bb;
+}
+
+/* Find existing block */
+BasicBlock* FindBlock(CompilationUnit* cu, unsigned int code_offset)
+{
+ return FindBlock(cu, code_offset, false, false, NULL);
+}
+
+/* Turn method name into a legal Linux file name */
+void ReplaceSpecialChars(std::string& str)
+{
+ static const struct { const char before; const char after; } match[] =
+ {{'/','-'}, {';','#'}, {' ','#'}, {'$','+'},
+ {'(','@'}, {')','@'}, {'<','='}, {'>','='}};
+ for (unsigned int i = 0; i < sizeof(match)/sizeof(match[0]); i++) {
+ std::replace(str.begin(), str.end(), match[i].before, match[i].after);
+ }
+}
+
+/* Dump the CFG into a DOT graph */
+void DumpCFG(CompilationUnit* cu, const char* dir_prefix, bool all_blocks)
+{
+ FILE* file;
+ std::string fname(PrettyMethod(cu->method_idx, *cu->dex_file));
+ ReplaceSpecialChars(fname);
+ fname = StringPrintf("%s%s%x.dot", dir_prefix, fname.c_str(),
+ cu->entry_block->fall_through->start_offset);
+ file = fopen(fname.c_str(), "w");
+ if (file == NULL) {
+ return;
+ }
+ fprintf(file, "digraph G {\n");
+
+ fprintf(file, " rankdir=TB\n");
+
+ int num_blocks = all_blocks ? cu->num_blocks : cu->num_reachable_blocks;
+ int idx;
+ const GrowableList *block_list = &cu->block_list;
+
+ for (idx = 0; idx < num_blocks; idx++) {
+ int block_idx = all_blocks ? idx : cu->dfs_order.elem_list[idx];
+ BasicBlock *bb = reinterpret_cast<BasicBlock*>(GrowableListGetElement(block_list, block_idx));
+ if (bb == NULL) break;
+ if (bb->block_type == kDead) continue;
+ if (bb->block_type == kEntryBlock) {
+ fprintf(file, " entry_%d [shape=Mdiamond];\n", bb->id);
+ } else if (bb->block_type == kExitBlock) {
+ fprintf(file, " exit_%d [shape=Mdiamond];\n", bb->id);
+ } else if (bb->block_type == kDalvikByteCode) {
+ fprintf(file, " block%04x_%d [shape=record,label = \"{ \\\n",
+ bb->start_offset, bb->id);
+ const MIR *mir;
+ fprintf(file, " {block id %d\\l}%s\\\n", bb->id,
+ bb->first_mir_insn ? " | " : " ");
+ for (mir = bb->first_mir_insn; mir; mir = mir->next) {
+ int opcode = mir->dalvikInsn.opcode;
+ fprintf(file, " {%04x %s %s %s\\l}%s\\\n", mir->offset,
+ mir->ssa_rep ? GetDalvikDisassembly(cu, mir) :
+ (opcode < kMirOpFirst) ? Instruction::Name(mir->dalvikInsn.opcode) :
+ extended_mir_op_names[opcode - kMirOpFirst],
+ (mir->optimization_flags & MIR_IGNORE_RANGE_CHECK) != 0 ? " no_rangecheck" : " ",
+ (mir->optimization_flags & MIR_IGNORE_NULL_CHECK) != 0 ? " no_nullcheck" : " ",
+ mir->next ? " | " : " ");
+ }
+ fprintf(file, " }\"];\n\n");
+ } else if (bb->block_type == kExceptionHandling) {
+ char block_name[BLOCK_NAME_LEN];
+
+ GetBlockName(bb, block_name);
+ fprintf(file, " %s [shape=invhouse];\n", block_name);
+ }
+
+ char block_name1[BLOCK_NAME_LEN], block_name2[BLOCK_NAME_LEN];
+
+ if (bb->taken) {
+ GetBlockName(bb, block_name1);
+ GetBlockName(bb->taken, block_name2);
+ fprintf(file, " %s:s -> %s:n [style=dotted]\n",
+ block_name1, block_name2);
+ }
+ if (bb->fall_through) {
+ GetBlockName(bb, block_name1);
+ GetBlockName(bb->fall_through, block_name2);
+ fprintf(file, " %s:s -> %s:n\n", block_name1, block_name2);
+ }
+
+ if (bb->successor_block_list.block_list_type != kNotUsed) {
+ fprintf(file, " succ%04x_%d [shape=%s,label = \"{ \\\n",
+ bb->start_offset, bb->id,
+ (bb->successor_block_list.block_list_type == kCatch) ?
+ "Mrecord" : "record");
+ GrowableListIterator iterator;
+ GrowableListIteratorInit(&bb->successor_block_list.blocks,
+ &iterator);
+ SuccessorBlockInfo *successor_block_info =
+ reinterpret_cast<SuccessorBlockInfo*>(GrowableListIteratorNext(&iterator));
+
+ int succ_id = 0;
+ while (true) {
+ if (successor_block_info == NULL) break;
+
+ BasicBlock *dest_block = successor_block_info->block;
+ SuccessorBlockInfo *next_successor_block_info =
+ reinterpret_cast<SuccessorBlockInfo*>(GrowableListIteratorNext(&iterator));
+
+ fprintf(file, " {<f%d> %04x: %04x\\l}%s\\\n",
+ succ_id++,
+ successor_block_info->key,
+ dest_block->start_offset,
+ (next_successor_block_info != NULL) ? " | " : " ");
+
+ successor_block_info = next_successor_block_info;
+ }
+ fprintf(file, " }\"];\n\n");
+
+ GetBlockName(bb, block_name1);
+ fprintf(file, " %s:s -> succ%04x_%d:n [style=dashed]\n",
+ block_name1, bb->start_offset, bb->id);
+
+ if (bb->successor_block_list.block_list_type == kPackedSwitch ||
+ bb->successor_block_list.block_list_type == kSparseSwitch) {
+
+ GrowableListIteratorInit(&bb->successor_block_list.blocks,
+ &iterator);
+
+ succ_id = 0;
+ while (true) {
+ SuccessorBlockInfo *successor_block_info =
+ reinterpret_cast<SuccessorBlockInfo*>( GrowableListIteratorNext(&iterator));
+ if (successor_block_info == NULL) break;
+
+ BasicBlock *dest_block = successor_block_info->block;
+
+ GetBlockName(dest_block, block_name2);
+ fprintf(file, " succ%04x_%d:f%d:e -> %s:n\n", bb->start_offset,
+ bb->id, succ_id++, block_name2);
+ }
+ }
+ }
+ fprintf(file, "\n");
+
+ if (cu->verbose) {
+ /* Display the dominator tree */
+ GetBlockName(bb, block_name1);
+ fprintf(file, " cfg%s [label=\"%s\", shape=none];\n",
+ block_name1, block_name1);
+ if (bb->i_dom) {
+ GetBlockName(bb->i_dom, block_name2);
+ fprintf(file, " cfg%s:s -> cfg%s:n\n\n", block_name2, block_name1);
+ }
+ }
+ }
+ fprintf(file, "}\n");
+ fclose(file);
+}
+
+/* Verify if all the successor is connected with all the claimed predecessors */
+static bool VerifyPredInfo(CompilationUnit* cu, BasicBlock* bb)
+{
+ GrowableListIterator iter;
+
+ GrowableListIteratorInit(bb->predecessors, &iter);
+ while (true) {
+ BasicBlock *pred_bb = reinterpret_cast<BasicBlock*>(GrowableListIteratorNext(&iter));
+ if (!pred_bb) break;
+ bool found = false;
+ if (pred_bb->taken == bb) {
+ found = true;
+ } else if (pred_bb->fall_through == bb) {
+ found = true;
+ } else if (pred_bb->successor_block_list.block_list_type != kNotUsed) {
+ GrowableListIterator iterator;
+ GrowableListIteratorInit(&pred_bb->successor_block_list.blocks,
+ &iterator);
+ while (true) {
+ SuccessorBlockInfo *successor_block_info =
+ reinterpret_cast<SuccessorBlockInfo*>(GrowableListIteratorNext(&iterator));
+ if (successor_block_info == NULL) break;
+ BasicBlock *succ_bb = successor_block_info->block;
+ if (succ_bb == bb) {
+ found = true;
+ break;
+ }
+ }
+ }
+ if (found == false) {
+ char block_name1[BLOCK_NAME_LEN], block_name2[BLOCK_NAME_LEN];
+ GetBlockName(bb, block_name1);
+ GetBlockName(pred_bb, block_name2);
+ DumpCFG(cu, "/sdcard/cfg/", false);
+ LOG(FATAL) << "Successor " << block_name1 << "not found from "
+ << block_name2;
+ }
+ }
+ return true;
+}
+
+/* Identify code range in try blocks and set up the empty catch blocks */
+static void ProcessTryCatchBlocks(CompilationUnit* cu)
+{
+ const DexFile::CodeItem* code_item = cu->code_item;
+ int tries_size = code_item->tries_size_;
+ int offset;
+
+ if (tries_size == 0) {
+ return;
+ }
+
+ ArenaBitVector* try_block_addr = cu->try_block_addr;
+
+ for (int i = 0; i < tries_size; i++) {
+ const DexFile::TryItem* pTry =
+ DexFile::GetTryItems(*code_item, i);
+ int start_offset = pTry->start_addr_;
+ int end_offset = start_offset + pTry->insn_count_;
+ for (offset = start_offset; offset < end_offset; offset++) {
+ SetBit(cu, try_block_addr, offset);
+ }
+ }
+
+ // Iterate over each of the handlers to enqueue the empty Catch blocks
+ const byte* handlers_ptr = DexFile::GetCatchHandlerData(*code_item, 0);
+ uint32_t handlers_size = DecodeUnsignedLeb128(&handlers_ptr);
+ for (uint32_t idx = 0; idx < handlers_size; idx++) {
+ CatchHandlerIterator iterator(handlers_ptr);
+ for (; iterator.HasNext(); iterator.Next()) {
+ uint32_t address = iterator.GetHandlerAddress();
+ FindBlock(cu, address, false /* split */, true /*create*/,
+ /* immed_pred_block_p */ NULL);
+ }
+ handlers_ptr = iterator.EndDataPointer();
+ }
+}
+
+/* Process instructions with the kBranch flag */
+static BasicBlock* ProcessCanBranch(CompilationUnit* cu, BasicBlock* cur_block,
+ MIR* insn, int cur_offset, int width, int flags,
+ const uint16_t* code_ptr, const uint16_t* code_end)
+{
+ int target = cur_offset;
+ switch (insn->dalvikInsn.opcode) {
+ case Instruction::GOTO:
+ case Instruction::GOTO_16:
+ case Instruction::GOTO_32:
+ target += insn->dalvikInsn.vA;
+ break;
+ case Instruction::IF_EQ:
+ case Instruction::IF_NE:
+ case Instruction::IF_LT:
+ case Instruction::IF_GE:
+ case Instruction::IF_GT:
+ case Instruction::IF_LE:
+ cur_block->conditional_branch = true;
+ target += insn->dalvikInsn.vC;
+ break;
+ case Instruction::IF_EQZ:
+ case Instruction::IF_NEZ:
+ case Instruction::IF_LTZ:
+ case Instruction::IF_GEZ:
+ case Instruction::IF_GTZ:
+ case Instruction::IF_LEZ:
+ cur_block->conditional_branch = true;
+ target += insn->dalvikInsn.vB;
+ break;
+ default:
+ LOG(FATAL) << "Unexpected opcode(" << insn->dalvikInsn.opcode << ") with kBranch set";
+ }
+ BasicBlock *taken_block = FindBlock(cu, target,
+ /* split */
+ true,
+ /* create */
+ true,
+ /* immed_pred_block_p */
+ &cur_block);
+ cur_block->taken = taken_block;
+ InsertGrowableList(cu, taken_block->predecessors, reinterpret_cast<uintptr_t>(cur_block));
+
+ /* Always terminate the current block for conditional branches */
+ if (flags & Instruction::kContinue) {
+ BasicBlock *fallthrough_block = FindBlock(cu,
+ cur_offset + width,
+ /*
+ * If the method is processed
+ * in sequential order from the
+ * beginning, we don't need to
+ * specify split for continue
+ * blocks. However, this
+ * routine can be called by
+ * compileLoop, which starts
+ * parsing the method from an
+ * arbitrary address in the
+ * method body.
+ */
+ true,
+ /* create */
+ true,
+ /* immed_pred_block_p */
+ &cur_block);
+ cur_block->fall_through = fallthrough_block;
+ InsertGrowableList(cu, fallthrough_block->predecessors,
+ reinterpret_cast<uintptr_t>(cur_block));
+ } else if (code_ptr < code_end) {
+ /* Create a fallthrough block for real instructions (incl. NOP) */
+ if (ContentIsInsn(code_ptr)) {
+ FindBlock(cu, cur_offset + width,
+ /* split */
+ false,
+ /* create */
+ true,
+ /* immed_pred_block_p */
+ NULL);
+ }
+ }
+ return cur_block;
+}
+
+/* Process instructions with the kSwitch flag */
+static void ProcessCanSwitch(CompilationUnit* cu, BasicBlock* cur_block,
+ MIR* insn, int cur_offset, int width, int flags)
+{
+ const uint16_t* switch_data =
+ reinterpret_cast<const uint16_t*>(cu->insns + cur_offset + insn->dalvikInsn.vB);
+ int size;
+ const int* keyTable;
+ const int* target_table;
+ int i;
+ int first_key;
+
+ /*
+ * Packed switch data format:
+ * ushort ident = 0x0100 magic value
+ * ushort size number of entries in the table
+ * int first_key first (and lowest) switch case value
+ * int targets[size] branch targets, relative to switch opcode
+ *
+ * Total size is (4+size*2) 16-bit code units.
+ */
+ if (insn->dalvikInsn.opcode == Instruction::PACKED_SWITCH) {
+ DCHECK_EQ(static_cast<int>(switch_data[0]),
+ static_cast<int>(Instruction::kPackedSwitchSignature));
+ size = switch_data[1];
+ first_key = switch_data[2] | (switch_data[3] << 16);
+ target_table = reinterpret_cast<const int*>(&switch_data[4]);
+ keyTable = NULL; // Make the compiler happy
+ /*
+ * Sparse switch data format:
+ * ushort ident = 0x0200 magic value
+ * ushort size number of entries in the table; > 0
+ * int keys[size] keys, sorted low-to-high; 32-bit aligned
+ * int targets[size] branch targets, relative to switch opcode
+ *
+ * Total size is (2+size*4) 16-bit code units.
+ */
+ } else {
+ DCHECK_EQ(static_cast<int>(switch_data[0]),
+ static_cast<int>(Instruction::kSparseSwitchSignature));
+ size = switch_data[1];
+ keyTable = reinterpret_cast<const int*>(&switch_data[2]);
+ target_table = reinterpret_cast<const int*>(&switch_data[2 + size*2]);
+ first_key = 0; // To make the compiler happy
+ }
+
+ if (cur_block->successor_block_list.block_list_type != kNotUsed) {
+ LOG(FATAL) << "Successor block list already in use: "
+ << static_cast<int>(cur_block->successor_block_list.block_list_type);
+ }
+ cur_block->successor_block_list.block_list_type =
+ (insn->dalvikInsn.opcode == Instruction::PACKED_SWITCH) ?
+ kPackedSwitch : kSparseSwitch;
+ CompilerInitGrowableList(cu, &cur_block->successor_block_list.blocks, size,
+ kListSuccessorBlocks);
+
+ for (i = 0; i < size; i++) {
+ BasicBlock *case_block = FindBlock(cu, cur_offset + target_table[i],
+ /* split */
+ true,
+ /* create */
+ true,
+ /* immed_pred_block_p */
+ &cur_block);
+ SuccessorBlockInfo *successor_block_info =
+ static_cast<SuccessorBlockInfo*>(NewMem(cu, sizeof(SuccessorBlockInfo),
+ false, kAllocSuccessor));
+ successor_block_info->block = case_block;
+ successor_block_info->key =
+ (insn->dalvikInsn.opcode == Instruction::PACKED_SWITCH) ?
+ first_key + i : keyTable[i];
+ InsertGrowableList(cu, &cur_block->successor_block_list.blocks,
+ reinterpret_cast<uintptr_t>(successor_block_info));
+ InsertGrowableList(cu, case_block->predecessors,
+ reinterpret_cast<uintptr_t>(cur_block));
+ }
+
+ /* Fall-through case */
+ BasicBlock* fallthrough_block = FindBlock(cu,
+ cur_offset + width,
+ /* split */
+ false,
+ /* create */
+ true,
+ /* immed_pred_block_p */
+ NULL);
+ cur_block->fall_through = fallthrough_block;
+ InsertGrowableList(cu, fallthrough_block->predecessors,
+ reinterpret_cast<uintptr_t>(cur_block));
+}
+
+/* Process instructions with the kThrow flag */
+static BasicBlock* ProcessCanThrow(CompilationUnit* cu, BasicBlock* cur_block,
+ MIR* insn, int cur_offset, int width, int flags,
+ ArenaBitVector* try_block_addr, const uint16_t* code_ptr,
+ const uint16_t* code_end)
+{
+ const DexFile::CodeItem* code_item = cu->code_item;
+ bool in_try_block = IsBitSet(try_block_addr, cur_offset);
+
+ /* In try block */
+ if (in_try_block) {
+ CatchHandlerIterator iterator(*code_item, cur_offset);
+
+ if (cur_block->successor_block_list.block_list_type != kNotUsed) {
+ LOG(INFO) << PrettyMethod(cu->method_idx, *cu->dex_file);
+ LOG(FATAL) << "Successor block list already in use: "
+ << static_cast<int>(cur_block->successor_block_list.block_list_type);
+ }
+
+ cur_block->successor_block_list.block_list_type = kCatch;
+ CompilerInitGrowableList(cu, &cur_block->successor_block_list.blocks, 2,
+ kListSuccessorBlocks);
+
+ for (;iterator.HasNext(); iterator.Next()) {
+ BasicBlock *catch_block = FindBlock(cu, iterator.GetHandlerAddress(),
+ false /* split*/,
+ false /* creat */,
+ NULL /* immed_pred_block_p */);
+ catch_block->catch_entry = true;
+ cu->catches.insert(catch_block->start_offset);
+ SuccessorBlockInfo *successor_block_info = reinterpret_cast<SuccessorBlockInfo*>
+ (NewMem(cu, sizeof(SuccessorBlockInfo), false, kAllocSuccessor));
+ successor_block_info->block = catch_block;
+ successor_block_info->key = iterator.GetHandlerTypeIndex();
+ InsertGrowableList(cu, &cur_block->successor_block_list.blocks,
+ reinterpret_cast<uintptr_t>(successor_block_info));
+ InsertGrowableList(cu, catch_block->predecessors,
+ reinterpret_cast<uintptr_t>(cur_block));
+ }
+ } else {
+ BasicBlock *eh_block = NewMemBB(cu, kExceptionHandling,
+ cu->num_blocks++);
+ cur_block->taken = eh_block;
+ InsertGrowableList(cu, &cu->block_list, reinterpret_cast<uintptr_t>(eh_block));
+ eh_block->start_offset = cur_offset;
+ InsertGrowableList(cu, eh_block->predecessors, reinterpret_cast<uintptr_t>(cur_block));
+ }
+
+ if (insn->dalvikInsn.opcode == Instruction::THROW){
+ cur_block->explicit_throw = true;
+ if ((code_ptr < code_end) && ContentIsInsn(code_ptr)) {
+ // Force creation of new block following THROW via side-effect
+ FindBlock(cu, cur_offset + width, /* split */ false,
+ /* create */ true, /* immed_pred_block_p */ NULL);
+ }
+ if (!in_try_block) {
+ // Don't split a THROW that can't rethrow - we're done.
+ return cur_block;
+ }
+ }
+
+ /*
+ * Split the potentially-throwing instruction into two parts.
+ * The first half will be a pseudo-op that captures the exception
+ * edges and terminates the basic block. It always falls through.
+ * Then, create a new basic block that begins with the throwing instruction
+ * (minus exceptions). Note: this new basic block must NOT be entered into
+ * the block_map. If the potentially-throwing instruction is the target of a
+ * future branch, we need to find the check psuedo half. The new
+ * basic block containing the work portion of the instruction should
+ * only be entered via fallthrough from the block containing the
+ * pseudo exception edge MIR. Note also that this new block is
+ * not automatically terminated after the work portion, and may
+ * contain following instructions.
+ */
+ BasicBlock *new_block = NewMemBB(cu, kDalvikByteCode, cu->num_blocks++);
+ InsertGrowableList(cu, &cu->block_list, reinterpret_cast<uintptr_t>(new_block));
+ new_block->start_offset = insn->offset;
+ cur_block->fall_through = new_block;
+ InsertGrowableList(cu, new_block->predecessors, reinterpret_cast<uintptr_t>(cur_block));
+ MIR* new_insn = static_cast<MIR*>(NewMem(cu, sizeof(MIR), true, kAllocMIR));
+ *new_insn = *insn;
+ insn->dalvikInsn.opcode =
+ static_cast<Instruction::Code>(kMirOpCheck);
+ // Associate the two halves
+ insn->meta.throw_insn = new_insn;
+ new_insn->meta.throw_insn = insn;
+ AppendMIR(new_block, new_insn);
+ return new_block;
+}
+
+void CompilerInit(CompilationUnit* cu, const Compiler& compiler) {
+ bool success = false;
+ switch (compiler.GetInstructionSet()) {
+ case kThumb2:
+ success = InitArmCodegen(cu);
+ break;
+ case kMips:
+ success = InitMipsCodegen(cu);
+ break;
+ case kX86:
+ success = InitX86Codegen(cu);
+ break;
+ default:;
+ }
+ if (!success) {
+ LOG(FATAL) << "Failed to initialize codegen for " << compiler.GetInstructionSet();
+ }
+ if (!HeapInit(cu)) {
+ LOG(FATAL) << "Failed to initialize oat heap";
+ }
+}
+
+static CompiledMethod* CompileMethod(Compiler& compiler,
+ const CompilerBackend compiler_backend,
+ const DexFile::CodeItem* code_item,
+ uint32_t access_flags, InvokeType invoke_type,
+ uint32_t class_def_idx, uint32_t method_idx,
+ jobject class_loader, const DexFile& dex_file,
+ LLVMInfo* llvm_info)
+{
+ VLOG(compiler) << "Compiling " << PrettyMethod(method_idx, dex_file) << "...";
+
+ const uint16_t* code_ptr = code_item->insns_;
+ const uint16_t* code_end = code_item->insns_ + code_item->insns_size_in_code_units_;
+ int num_blocks = 0;
+ unsigned int cur_offset = 0;
+
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ UniquePtr<CompilationUnit> cu(new CompilationUnit);
+
+ CompilerInit(cu.get(), compiler);
+
+ cu->compiler = &compiler;
+ cu->class_linker = class_linker;
+ cu->dex_file = &dex_file;
+ cu->class_def_idx = class_def_idx;
+ cu->method_idx = method_idx;
+ cu->code_item = code_item;
+ cu->access_flags = access_flags;
+ cu->invoke_type = invoke_type;
+ cu->shorty = dex_file.GetMethodShorty(dex_file.GetMethodId(method_idx));
+ cu->instruction_set = compiler.GetInstructionSet();
+ cu->insns = code_item->insns_;
+ cu->insns_size = code_item->insns_size_in_code_units_;
+ cu->num_ins = code_item->ins_size_;
+ cu->num_regs = code_item->registers_size_ - cu->num_ins;
+ cu->num_outs = code_item->outs_size_;
+ DCHECK((cu->instruction_set == kThumb2) ||
+ (cu->instruction_set == kX86) ||
+ (cu->instruction_set == kMips));
+ if ((compiler_backend == kQuickGBC) || (compiler_backend == kPortable)) {
+ cu->gen_bitcode = true;
+ }
+ cu->llvm_info = llvm_info;
+ /* Adjust this value accordingly once inlining is performed */
+ cu->num_dalvik_registers = code_item->registers_size_;
+ // TODO: set this from command line
+ cu->compiler_flip_match = false;
+ bool use_match = !cu->compiler_method_match.empty();
+ bool match = use_match && (cu->compiler_flip_match ^
+ (PrettyMethod(method_idx, dex_file).find(cu->compiler_method_match) !=
+ std::string::npos));
+ if (!use_match || match) {
+ cu->disable_opt = kCompilerOptimizerDisableFlags;
+ cu->enable_debug = kCompilerDebugFlags;
+ cu->verbose = VLOG_IS_ON(compiler) ||
+ (cu->enable_debug & (1 << kDebugVerbose));
+ }
+#ifndef NDEBUG
+ if (cu->gen_bitcode) {
+ cu->enable_debug |= (1 << kDebugVerifyBitcode);
+ }
+#endif
+
+ if (cu->instruction_set == kMips) {
+ // Disable some optimizations for mips for now
+ cu->disable_opt |= (
+ (1 << kLoadStoreElimination) |
+ (1 << kLoadHoisting) |
+ (1 << kSuppressLoads) |
+ (1 << kNullCheckElimination) |
+ (1 << kPromoteRegs) |
+ (1 << kTrackLiveTemps) |
+ (1 << kSkipLargeMethodOptimization) |
+ (1 << kSafeOptimizations) |
+ (1 << kBBOpt) |
+ (1 << kMatch) |
+ (1 << kPromoteCompilerTemps));
+ }
+
+ /* Gathering opcode stats? */
+ if (kCompilerDebugFlags & (1 << kDebugCountOpcodes)) {
+ cu->opcode_count =
+ static_cast<int*>(NewMem(cu.get(), kNumPackedOpcodes * sizeof(int), true, kAllocMisc));
+ }
+
+ /* Assume non-throwing leaf */
+ cu->attrs = (METHOD_IS_LEAF | METHOD_IS_THROW_FREE);
+
+ /* Initialize the block list, estimate size based on insns_size */
+ CompilerInitGrowableList(cu.get(), &cu->block_list, cu->insns_size,
+ kListBlockList);
+
+ /* Initialize the switch_tables list */
+ CompilerInitGrowableList(cu.get(), &cu->switch_tables, 4,
+ kListSwitchTables);
+
+ /* Intialize the fill_array_data list */
+ CompilerInitGrowableList(cu.get(), &cu->fill_array_data, 4,
+ kListFillArrayData);
+
+ /* Intialize the throw_launchpads list, estimate size based on insns_size */
+ CompilerInitGrowableList(cu.get(), &cu->throw_launchpads, cu->insns_size,
+ kListThrowLaunchPads);
+
+ /* Intialize the instrinsic_launchpads list */
+ CompilerInitGrowableList(cu.get(), &cu->intrinsic_launchpads, 4,
+ kListMisc);
+
+
+ /* Intialize the suspend_launchpads list */
+ CompilerInitGrowableList(cu.get(), &cu->suspend_launchpads, 2048,
+ kListSuspendLaunchPads);
+
+ /* Allocate the bit-vector to track the beginning of basic blocks */
+ ArenaBitVector *try_block_addr = AllocBitVector(cu.get(),
+ cu->insns_size,
+ true /* expandable */);
+ cu->try_block_addr = try_block_addr;
+
+ /* Create the default entry and exit blocks and enter them to the list */
+ BasicBlock *entry_block = NewMemBB(cu.get(), kEntryBlock, num_blocks++);
+ BasicBlock *exit_block = NewMemBB(cu.get(), kExitBlock, num_blocks++);
+
+ cu->entry_block = entry_block;
+ cu->exit_block = exit_block;
+
+ InsertGrowableList(cu.get(), &cu->block_list, reinterpret_cast<uintptr_t>(entry_block));
+ InsertGrowableList(cu.get(), &cu->block_list, reinterpret_cast<uintptr_t>(exit_block));
+
+ /* Current block to record parsed instructions */
+ BasicBlock *cur_block = NewMemBB(cu.get(), kDalvikByteCode, num_blocks++);
+ cur_block->start_offset = 0;
+ InsertGrowableList(cu.get(), &cu->block_list, reinterpret_cast<uintptr_t>(cur_block));
+ /* Add first block to the fast lookup cache */
+ cu->block_map.Put(cur_block->start_offset, cur_block);
+ entry_block->fall_through = cur_block;
+ InsertGrowableList(cu.get(), cur_block->predecessors,
+ reinterpret_cast<uintptr_t>(entry_block));
+
+ /*
+ * Store back the number of blocks since new blocks may be created of
+ * accessing cu.
+ */
+ cu->num_blocks = num_blocks;
+
+ /* Identify code range in try blocks and set up the empty catch blocks */
+ ProcessTryCatchBlocks(cu.get());
+
+ /* Set up for simple method detection */
+ int num_patterns = sizeof(special_patterns)/sizeof(special_patterns[0]);
+ bool live_pattern = (num_patterns > 0) && !(cu->disable_opt & (1 << kMatch));
+ bool* dead_pattern =
+ static_cast<bool*>(NewMem(cu.get(), sizeof(bool) * num_patterns, true, kAllocMisc));
+ SpecialCaseHandler special_case = kNoHandler;
+ int pattern_pos = 0;
+
+ /* Parse all instructions and put them into containing basic blocks */
+ while (code_ptr < code_end) {
+ MIR *insn = static_cast<MIR *>(NewMem(cu.get(), sizeof(MIR), true, kAllocMIR));
+ insn->offset = cur_offset;
+ int width = ParseInsn(cu.get(), code_ptr, &insn->dalvikInsn);
+ insn->width = width;
+ Instruction::Code opcode = insn->dalvikInsn.opcode;
+ if (cu->opcode_count != NULL) {
+ cu->opcode_count[static_cast<int>(opcode)]++;
+ }
+
+ /* Terminate when the data section is seen */
+ if (width == 0)
+ break;
+
+ /* Possible simple method? */
+ if (live_pattern) {
+ live_pattern = false;
+ special_case = kNoHandler;
+ for (int i = 0; i < num_patterns; i++) {
+ if (!dead_pattern[i]) {
+ if (special_patterns[i].opcodes[pattern_pos] == opcode) {
+ live_pattern = true;
+ special_case = special_patterns[i].handler_code;
+ } else {
+ dead_pattern[i] = true;
+ }
+ }
+ }
+ pattern_pos++;
+ }
+
+ AppendMIR(cur_block, insn);
+
+ code_ptr += width;
+ int flags = Instruction::FlagsOf(insn->dalvikInsn.opcode);
+
+ int df_flags = oat_data_flow_attributes[insn->dalvikInsn.opcode];
+
+ if (df_flags & DF_HAS_DEFS) {
+ cu->def_count += (df_flags & DF_A_WIDE) ? 2 : 1;
+ }
+
+ if (flags & Instruction::kBranch) {
+ cur_block = ProcessCanBranch(cu.get(), cur_block, insn, cur_offset,
+ width, flags, code_ptr, code_end);
+ } else if (flags & Instruction::kReturn) {
+ cur_block->terminated_by_return = true;
+ cur_block->fall_through = exit_block;
+ InsertGrowableList(cu.get(), exit_block->predecessors,
+ reinterpret_cast<uintptr_t>(cur_block));
+ /*
+ * Terminate the current block if there are instructions
+ * afterwards.
+ */
+ if (code_ptr < code_end) {
+ /*
+ * Create a fallthrough block for real instructions
+ * (incl. NOP).
+ */
+ if (ContentIsInsn(code_ptr)) {
+ FindBlock(cu.get(), cur_offset + width,
+ /* split */
+ false,
+ /* create */
+ true,
+ /* immed_pred_block_p */
+ NULL);
+ }
+ }
+ } else if (flags & Instruction::kThrow) {
+ cur_block = ProcessCanThrow(cu.get(), cur_block, insn, cur_offset,
+ width, flags, try_block_addr, code_ptr, code_end);
+ } else if (flags & Instruction::kSwitch) {
+ ProcessCanSwitch(cu.get(), cur_block, insn, cur_offset, width, flags);
+ }
+ cur_offset += width;
+ BasicBlock *next_block = FindBlock(cu.get(), cur_offset,
+ /* split */
+ false,
+ /* create */
+ false,
+ /* immed_pred_block_p */
+ NULL);
+ if (next_block) {
+ /*
+ * The next instruction could be the target of a previously parsed
+ * forward branch so a block is already created. If the current
+ * instruction is not an unconditional branch, connect them through
+ * the fall-through link.
+ */
+ DCHECK(cur_block->fall_through == NULL ||
+ cur_block->fall_through == next_block ||
+ cur_block->fall_through == exit_block);
+
+ if ((cur_block->fall_through == NULL) && (flags & Instruction::kContinue)) {
+ cur_block->fall_through = next_block;
+ InsertGrowableList(cu.get(), next_block->predecessors,
+ reinterpret_cast<uintptr_t>(cur_block));
+ }
+ cur_block = next_block;
+ }
+ }
+
+ if (cu->enable_debug & (1 << kDebugDumpCFG)) {
+ DumpCFG(cu.get(), "/sdcard/1_post_parse_cfg/", true);
+ }
+
+ if (!(cu->disable_opt & (1 << kSkipLargeMethodOptimization))) {
+ if ((cu->num_blocks > MANY_BLOCKS) ||
+ ((cu->num_blocks > MANY_BLOCKS_INITIALIZER) &&
+ PrettyMethod(method_idx, dex_file, false).find("init>") !=
+ std::string::npos)) {
+ cu->qd_mode = true;
+ }
+ }
+
+ if (cu->qd_mode) {
+ // Bitcode generation requires full dataflow analysis
+ cu->disable_dataflow = !cu->gen_bitcode;
+ // Disable optimization which require dataflow/ssa
+ cu->disable_opt |= (1 << kBBOpt) | (1 << kPromoteRegs) | (1 << kNullCheckElimination);
+ if (cu->verbose) {
+ LOG(INFO) << "QD mode enabled: "
+ << PrettyMethod(method_idx, dex_file)
+ << " num blocks: " << cu->num_blocks;
+ }
+ }
+
+ if (cu->verbose) {
+ DumpCompilationUnit(cu.get());
+ }
+
+ /* Do a code layout pass */
+ CodeLayout(cu.get());
+
+ if (cu->enable_debug & (1 << kDebugDumpCFG)) {
+ DumpCFG(cu.get(), "/sdcard/2_post_layout_cfg/", true);
+ }
+
+ if (cu->enable_debug & (1 << kDebugVerifyDataflow)) {
+ /* Verify if all blocks are connected as claimed */
+ DataFlowAnalysisDispatcher(cu.get(), VerifyPredInfo, kAllNodes,
+ false /* is_iterative */);
+ }
+
+ /* Perform SSA transformation for the whole method */
+ SSATransformation(cu.get());
+
+ if (cu->enable_debug & (1 << kDebugDumpCFG)) {
+ DumpCFG(cu.get(), "/sdcard/3_post_ssa_cfg/", false);
+ }
+
+ /* Do constant propagation */
+ cu->is_constant_v = AllocBitVector(cu.get(), cu->num_ssa_regs, false /* not expandable */);
+ cu->must_flush_constant_v = AllocBitVector(cu.get(), cu->num_ssa_regs,
+ false /* not expandable */);
+ cu->constant_values =
+ static_cast<int*>(NewMem(cu.get(), sizeof(int) * cu->num_ssa_regs, true, kAllocDFInfo));
+ DataFlowAnalysisDispatcher(cu.get(), DoConstantPropogation,
+ kAllNodes,
+ false /* is_iterative */);
+
+ /* Detect loops */
+ LoopDetection(cu.get());
+
+ /* Count uses */
+ MethodUseCount(cu.get());
+
+ /* Perform null check elimination */
+ NullCheckElimination(cu.get());
+
+ if (cu->enable_debug & (1 << kDebugDumpCFG)) {
+ DumpCFG(cu.get(), "/sdcard/4_post_nce_cfg/", false);
+ }
+
+ /* Combine basic blocks where possible */
+ BasicBlockCombine(cu.get());
+
+ if (cu->enable_debug & (1 << kDebugDumpCFG)) {
+ DumpCFG(cu.get(), "/sdcard/5_post_bbcombine_cfg/", false);
+ }
+
+ /* Do some basic block optimizations */
+ BasicBlockOptimization(cu.get());
+
+ // Debugging only
+ if (cu->enable_debug & (1 << kDebugDumpCFG)) {
+ DumpCFG(cu.get(), "/sdcard/6_post_bbo_cfg/", false);
+ }
+
+ if (cu->enable_debug & (1 << kDebugDumpCheckStats)) {
+ DumpCheckStats(cu.get());
+ }
+
+ cu.get()->cg->CompilerInitializeRegAlloc(cu.get()); // Needs to happen after SSA naming
+
+ /* Allocate Registers using simple local allocation scheme */
+ SimpleRegAlloc(cu.get());
+
+ if (cu->enable_debug & (1 << kDebugDumpCFG)) {
+ DumpCFG(cu.get(), "/sdcard/7_post_ralloc_cfg/", true);
+ }
+
+
+ /* Go the LLVM path? */
+ if (cu->gen_bitcode) {
+ // MIR->Bitcode
+ MethodMIR2Bitcode(cu.get());
+ if (compiler_backend == kPortable) {
+ // all done
+ ArenaReset(cu.get());
+ return NULL;
+ }
+ // Bitcode->LIR
+ MethodBitcode2LIR(cu.get());
+ } else {
+ if (special_case != kNoHandler) {
+ /*
+ * Custom codegen for special cases. If for any reason the
+ * special codegen doesn't succeed, cu->first_lir_insn will
+ * set to NULL;
+ */
+ SpecialMIR2LIR(cu.get(), special_case);
+ }
+
+ /* Convert MIR to LIR, etc. */
+ if (cu->first_lir_insn == NULL) {
+ MethodMIR2LIR(cu.get());
+ }
+ }
+
+ /* Method is not empty */
+ if (cu->first_lir_insn) {
+
+ // mark the targets of switch statement case labels
+ ProcessSwitchTables(cu.get());
+
+ /* Convert LIR into machine code. */
+ AssembleLIR(cu.get());
+
+ if (cu->verbose) {
+ CodegenDump(cu.get());
+ }
+
+ if (cu->opcode_count != NULL) {
+ LOG(INFO) << "Opcode Count";
+ for (int i = 0; i < kNumPackedOpcodes; i++) {
+ if (cu->opcode_count[i] != 0) {
+ LOG(INFO) << "-C- "
+ << Instruction::Name(static_cast<Instruction::Code>(i))
+ << " " << cu->opcode_count[i];
+ }
+ }
+ }
+ }
+
+ // Combine vmap tables - core regs, then fp regs - into vmap_table
+ std::vector<uint16_t> vmap_table;
+ // Core regs may have been inserted out of order - sort first
+ std::sort(cu->core_vmap_table.begin(), cu->core_vmap_table.end());
+ for (size_t i = 0 ; i < cu->core_vmap_table.size(); i++) {
+ // Copy, stripping out the phys register sort key
+ vmap_table.push_back(~(-1 << VREG_NUM_WIDTH) & cu->core_vmap_table[i]);
+ }
+ // If we have a frame, push a marker to take place of lr
+ if (cu->frame_size > 0) {
+ vmap_table.push_back(INVALID_VREG);
+ } else {
+ DCHECK_EQ(__builtin_popcount(cu->core_spill_mask), 0);
+ DCHECK_EQ(__builtin_popcount(cu->fp_spill_mask), 0);
+ }
+ // Combine vmap tables - core regs, then fp regs. fp regs already sorted
+ for (uint32_t i = 0; i < cu->fp_vmap_table.size(); i++) {
+ vmap_table.push_back(cu->fp_vmap_table[i]);
+ }
+ CompiledMethod* result =
+ new CompiledMethod(cu->instruction_set, cu->code_buffer,
+ cu->frame_size, cu->core_spill_mask, cu->fp_spill_mask,
+ cu->combined_mapping_table, vmap_table, cu->native_gc_map);
+
+ VLOG(compiler) << "Compiled " << PrettyMethod(method_idx, dex_file)
+ << " (" << (cu->code_buffer.size() * sizeof(cu->code_buffer[0]))
+ << " bytes)";
+
+#ifdef WITH_MEMSTATS
+ if (cu->enable_debug & (1 << kDebugShowMemoryUsage)) {
+ DumpMemStats(cu.get());
+ }
+#endif
+
+ ArenaReset(cu.get());
+
+ return result;
+}
+
+CompiledMethod* CompileOneMethod(Compiler& compiler,
+ const CompilerBackend backend,
+ const DexFile::CodeItem* code_item,
+ uint32_t access_flags, InvokeType invoke_type,
+ uint32_t class_def_idx, uint32_t method_idx, jobject class_loader,
+ const DexFile& dex_file,
+ LLVMInfo* llvm_info)
+{
+ return CompileMethod(compiler, backend, code_item, access_flags, invoke_type, class_def_idx,
+ method_idx, class_loader, dex_file, llvm_info);
+}
+
+} // namespace art
+
+extern "C" art::CompiledMethod*
+ ArtQuickCompileMethod(art::Compiler& compiler,
+ const art::DexFile::CodeItem* code_item,
+ uint32_t access_flags, art::InvokeType invoke_type,
+ uint32_t class_def_idx, uint32_t method_idx, jobject class_loader,
+ const art::DexFile& dex_file)
+{
+ // TODO: check method fingerprint here to determine appropriate backend type. Until then, use build default
+ art::CompilerBackend backend = compiler.GetCompilerBackend();
+ return art::CompileOneMethod(compiler, backend, code_item, access_flags, invoke_type,
+ class_def_idx, method_idx, class_loader, dex_file,
+ NULL /* use thread llvm_info */);
+}
diff --git a/src/compiler/dex/frontend.h b/src/compiler/dex/frontend.h
new file mode 100644
index 0000000..b261c65
--- /dev/null
+++ b/src/compiler/dex/frontend.h
@@ -0,0 +1,153 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_COMPILER_DEX_COMPILER_H_
+#define ART_SRC_COMPILER_DEX_COMPILER_H_
+
+#include "dex_file.h"
+#include "dex_instruction.h"
+
+namespace llvm {
+ class Module;
+ class LLVMContext;
+}
+
+namespace art {
+namespace greenland {
+ class IntrinsicHelper;
+ class IRBuilder;
+}
+
+#define COMPILER_TRACED(X)
+#define COMPILER_TRACEE(X)
+
+/*
+ * Special offsets to denote method entry/exit for debugger update.
+ * NOTE: bit pattern must be loadable using 1 instruction and must
+ * not be a valid Dalvik offset.
+ */
+#define DEBUGGER_METHOD_ENTRY -1
+#define DEBUGGER_METHOD_EXIT -2
+
+/*
+ * Assembly is an iterative process, and usually terminates within
+ * two or three passes. This should be high enough to handle bizarre
+ * cases, but detect an infinite loop bug.
+ */
+#define MAX_ASSEMBLER_RETRIES 50
+
+// Suppress optimization if corresponding bit set.
+enum opt_control_vector {
+ kLoadStoreElimination = 0,
+ kLoadHoisting,
+ kSuppressLoads,
+ kNullCheckElimination,
+ kPromoteRegs,
+ kTrackLiveTemps,
+ kSkipLargeMethodOptimization,
+ kSafeOptimizations,
+ kBBOpt,
+ kMatch,
+ kPromoteCompilerTemps,
+};
+
+// Force code generation paths for testing.
+enum debugControlVector {
+ kDebugDisplayMissingTargets,
+ kDebugVerbose,
+ kDebugDumpCFG,
+ kDebugSlowFieldPath,
+ kDebugSlowInvokePath,
+ kDebugSlowStringPath,
+ kDebugSlowTypePath,
+ kDebugSlowestFieldPath,
+ kDebugSlowestStringPath,
+ kDebugExerciseResolveMethod,
+ kDebugVerifyDataflow,
+ kDebugShowMemoryUsage,
+ kDebugShowNops,
+ kDebugCountOpcodes,
+ kDebugDumpCheckStats,
+ kDebugDumpBitcodeFile,
+ kDebugVerifyBitcode,
+};
+
+enum OatMethodAttributes {
+ kIsCallee = 0, // Code is part of a callee (invoked by a hot trace).
+ kIsHot, // Code is part of a hot trace.
+ kIsLeaf, // Method is leaf.
+ kIsEmpty, // Method is empty.
+ kIsThrowFree, // Method doesn't throw.
+ kIsGetter, // Method fits the getter pattern.
+ kIsSetter, // Method fits the setter pattern.
+ kCannotCompile, // Method cannot be compiled.
+};
+
+#define METHOD_IS_CALLEE (1 << kIsCallee)
+#define METHOD_IS_HOT (1 << kIsHot)
+#define METHOD_IS_LEAF (1 << kIsLeaf)
+#define METHOD_IS_EMPTY (1 << kIsEmpty)
+#define METHOD_IS_THROW_FREE (1 << kIsThrowFree)
+#define METHOD_IS_GETTER (1 << kIsGetter)
+#define METHOD_IS_SETTER (1 << kIsSetter)
+#define METHOD_CANNOT_COMPILE (1 << kCannotCompile)
+
+class LLVMInfo {
+ public:
+ LLVMInfo();
+ ~LLVMInfo();
+
+ llvm::LLVMContext* GetLLVMContext() {
+ return llvm_context_.get();
+ }
+
+ llvm::Module* GetLLVMModule() {
+ return llvm_module_;
+ }
+
+ art::greenland::IntrinsicHelper* GetIntrinsicHelper() {
+ return intrinsic_helper_.get();
+ }
+
+ art::greenland::IRBuilder* GetIRBuilder() {
+ return ir_builder_.get();
+ }
+
+ private:
+ UniquePtr<llvm::LLVMContext> llvm_context_;
+ llvm::Module* llvm_module_; // Managed by context_.
+ UniquePtr<art::greenland::IntrinsicHelper> intrinsic_helper_;
+ UniquePtr<art::greenland::IRBuilder> ir_builder_;
+};
+
+struct CompilationUnit;
+struct BasicBlock;
+
+BasicBlock* FindBlock(CompilationUnit* cu, unsigned int code_offset);
+void ReplaceSpecialChars(std::string& str);
+
+} // namespace art
+
+extern "C" art::CompiledMethod* ArtCompileMethod(art::Compiler& compiler,
+ const art::DexFile::CodeItem* code_item,
+ uint32_t access_flags,
+ art::InvokeType invoke_type,
+ uint32_t class_dex_idx,
+ uint32_t method_idx,
+ jobject class_loader,
+ const art::DexFile& dex_file);
+
+#endif // ART_SRC_COMPILER_DEX_COMPILER_H_
diff --git a/src/compiler/dex/portable/mir_to_gbc.cc b/src/compiler/dex/portable/mir_to_gbc.cc
new file mode 100644
index 0000000..31fa96a
--- /dev/null
+++ b/src/compiler/dex/portable/mir_to_gbc.cc
@@ -0,0 +1,3550 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "object_utils.h"
+
+#include <llvm/Support/ToolOutputFile.h>
+#include <llvm/Bitcode/ReaderWriter.h>
+#include <llvm/Analysis/Verifier.h>
+#include <llvm/Metadata.h>
+#include <llvm/ADT/DepthFirstIterator.h>
+#include <llvm/Instruction.h>
+#include <llvm/Type.h>
+#include <llvm/Instructions.h>
+#include <llvm/Support/Casting.h>
+#include <llvm/Support/InstIterator.h>
+
+#include "compiler/dex/compiler_internals.h"
+
+//TODO: move gbc_to_lir code into quick directory (if necessary).
+#include "compiler/dex/quick/codegen_util.h"
+#include "compiler/dex/quick/local_optimizations.h"
+#include "compiler/dex/quick/ralloc_util.h"
+
+static const char* kLabelFormat = "%c0x%x_%d";
+static const char kInvalidBlock = 0xff;
+static const char kNormalBlock = 'L';
+static const char kCatchBlock = 'C';
+
+namespace art {
+static RegLocation GetLoc(CompilationUnit* cu, llvm::Value* val);
+
+static llvm::BasicBlock* GetLLVMBlock(CompilationUnit* cu, int id)
+{
+ return cu->id_to_block_map.Get(id);
+}
+
+static llvm::Value* GetLLVMValue(CompilationUnit* cu, int s_reg)
+{
+ return reinterpret_cast<llvm::Value*>(GrowableListGetElement(&cu->llvm_values, s_reg));
+}
+
+static void SetVregOnValue(CompilationUnit* cu, llvm::Value* val, int s_reg)
+{
+ // Set vreg for debugging
+ greenland::IntrinsicHelper::IntrinsicId id =
+ greenland::IntrinsicHelper::SetVReg;
+ llvm::Function* func = cu->intrinsic_helper->GetIntrinsicFunction(id);
+ int v_reg = SRegToVReg(cu, s_reg);
+ llvm::Value* table_slot = cu->irb->getInt32(v_reg);
+ llvm::Value* args[] = { table_slot, val };
+ cu->irb->CreateCall(func, args);
+}
+
+// Replace the placeholder value with the real definition
+static void DefineValueOnly(CompilationUnit* cu, llvm::Value* val, int s_reg)
+{
+ llvm::Value* placeholder = GetLLVMValue(cu, s_reg);
+ if (placeholder == NULL) {
+ // This can happen on instruction rewrite on verification failure
+ LOG(WARNING) << "Null placeholder";
+ return;
+ }
+ placeholder->replaceAllUsesWith(val);
+ val->takeName(placeholder);
+ cu->llvm_values.elem_list[s_reg] = reinterpret_cast<uintptr_t>(val);
+ llvm::Instruction* inst = llvm::dyn_cast<llvm::Instruction>(placeholder);
+ DCHECK(inst != NULL);
+ inst->eraseFromParent();
+
+}
+
+static void DefineValue(CompilationUnit* cu, llvm::Value* val, int s_reg)
+{
+ DefineValueOnly(cu, val, s_reg);
+ SetVregOnValue(cu, val, s_reg);
+}
+
+static llvm::Type* LlvmTypeFromLocRec(CompilationUnit* cu, RegLocation loc)
+{
+ llvm::Type* res = NULL;
+ if (loc.wide) {
+ if (loc.fp)
+ res = cu->irb->getDoubleTy();
+ else
+ res = cu->irb->getInt64Ty();
+ } else {
+ if (loc.fp) {
+ res = cu->irb->getFloatTy();
+ } else {
+ if (loc.ref)
+ res = cu->irb->GetJObjectTy();
+ else
+ res = cu->irb->getInt32Ty();
+ }
+ }
+ return res;
+}
+
+/* Create an in-memory RegLocation from an llvm Value. */
+static void CreateLocFromValue(CompilationUnit* cu, llvm::Value* val)
+{
+ // NOTE: llvm takes shortcuts with c_str() - get to std::string firstt
+ std::string s(val->getName().str());
+ const char* val_name = s.c_str();
+ SafeMap<llvm::Value*, RegLocation>::iterator it = cu->loc_map.find(val);
+ DCHECK(it == cu->loc_map.end()) << " - already defined: " << val_name;
+ int base_sreg = INVALID_SREG;
+ int subscript = -1;
+ sscanf(val_name, "v%d_%d", &base_sreg, &subscript);
+ if ((base_sreg == INVALID_SREG) && (!strcmp(val_name, "method"))) {
+ base_sreg = SSA_METHOD_BASEREG;
+ subscript = 0;
+ }
+ DCHECK_NE(base_sreg, INVALID_SREG);
+ DCHECK_NE(subscript, -1);
+ // TODO: redo during C++'ification
+ RegLocation loc = {kLocDalvikFrame, 0, 0, 0, 0, 0, 0, 0, 0, INVALID_REG,
+ INVALID_REG, INVALID_SREG, INVALID_SREG};
+ llvm::Type* ty = val->getType();
+ loc.wide = ((ty == cu->irb->getInt64Ty()) ||
+ (ty == cu->irb->getDoubleTy()));
+ loc.defined = true;
+ loc.home = false; // May change during promotion
+ loc.s_reg_low = base_sreg;
+ loc.orig_sreg = cu->loc_map.size();
+ PromotionMap p_map = cu->promotion_map[base_sreg];
+ if (ty == cu->irb->getFloatTy()) {
+ loc.fp = true;
+ if (p_map.fp_location == kLocPhysReg) {
+ loc.low_reg = p_map.FpReg;
+ loc.location = kLocPhysReg;
+ loc.home = true;
+ }
+ } else if (ty == cu->irb->getDoubleTy()) {
+ loc.fp = true;
+ PromotionMap p_map_high = cu->promotion_map[base_sreg + 1];
+ if ((p_map.fp_location == kLocPhysReg) &&
+ (p_map_high.fp_location == kLocPhysReg) &&
+ ((p_map.FpReg & 0x1) == 0) &&
+ (p_map.FpReg + 1 == p_map_high.FpReg)) {
+ loc.low_reg = p_map.FpReg;
+ loc.high_reg = p_map_high.FpReg;
+ loc.location = kLocPhysReg;
+ loc.home = true;
+ }
+ } else if (ty == cu->irb->GetJObjectTy()) {
+ loc.ref = true;
+ if (p_map.core_location == kLocPhysReg) {
+ loc.low_reg = p_map.core_reg;
+ loc.location = kLocPhysReg;
+ loc.home = true;
+ }
+ } else if (ty == cu->irb->getInt64Ty()) {
+ loc.core = true;
+ PromotionMap p_map_high = cu->promotion_map[base_sreg + 1];
+ if ((p_map.core_location == kLocPhysReg) &&
+ (p_map_high.core_location == kLocPhysReg)) {
+ loc.low_reg = p_map.core_reg;
+ loc.high_reg = p_map_high.core_reg;
+ loc.location = kLocPhysReg;
+ loc.home = true;
+ }
+ } else {
+ loc.core = true;
+ if (p_map.core_location == kLocPhysReg) {
+ loc.low_reg = p_map.core_reg;
+ loc.location = kLocPhysReg;
+ loc.home = true;
+ }
+ }
+
+ if (cu->verbose && loc.home) {
+ if (loc.wide) {
+ LOG(INFO) << "Promoted wide " << s << " to regs " << loc.low_reg << "/" << loc.high_reg;
+ } else {
+ LOG(INFO) << "Promoted " << s << " to reg " << loc.low_reg;
+ }
+ }
+ cu->loc_map.Put(val, loc);
+}
+
+static void InitIR(CompilationUnit* cu)
+{
+ LLVMInfo* llvm_info = cu->llvm_info;
+ if (llvm_info == NULL) {
+ CompilerTls* tls = cu->compiler->GetTls();
+ CHECK(tls != NULL);
+ llvm_info = static_cast<LLVMInfo*>(tls->GetLLVMInfo());
+ if (llvm_info == NULL) {
+ llvm_info = new LLVMInfo();
+ tls->SetLLVMInfo(llvm_info);
+ }
+ }
+ cu->context = llvm_info->GetLLVMContext();
+ cu->module = llvm_info->GetLLVMModule();
+ cu->intrinsic_helper = llvm_info->GetIntrinsicHelper();
+ cu->irb = llvm_info->GetIRBuilder();
+}
+
+static const char* LlvmSSAName(CompilationUnit* cu, int ssa_reg) {
+ return GET_ELEM_N(cu->ssa_strings, char*, ssa_reg);
+}
+
+llvm::BasicBlock* FindCaseTarget(CompilationUnit* cu, uint32_t vaddr)
+{
+ BasicBlock* bb = FindBlock(cu, vaddr);
+ DCHECK(bb != NULL);
+ return GetLLVMBlock(cu, bb->id);
+}
+
+static void ConvertPackedSwitch(CompilationUnit* cu, BasicBlock* bb,
+ int32_t table_offset, RegLocation rl_src)
+{
+ const Instruction::PackedSwitchPayload* payload =
+ reinterpret_cast<const Instruction::PackedSwitchPayload*>(
+ cu->insns + cu->current_dalvik_offset + table_offset);
+
+ llvm::Value* value = GetLLVMValue(cu, rl_src.orig_sreg);
+
+ llvm::SwitchInst* sw =
+ cu->irb->CreateSwitch(value, GetLLVMBlock(cu, bb->fall_through->id),
+ payload->case_count);
+
+ for (uint16_t i = 0; i < payload->case_count; ++i) {
+ llvm::BasicBlock* llvm_bb =
+ FindCaseTarget(cu, cu->current_dalvik_offset + payload->targets[i]);
+ sw->addCase(cu->irb->getInt32(payload->first_key + i), llvm_bb);
+ }
+ llvm::MDNode* switch_node =
+ llvm::MDNode::get(*cu->context, cu->irb->getInt32(table_offset));
+ sw->setMetadata("SwitchTable", switch_node);
+ bb->taken = NULL;
+ bb->fall_through = NULL;
+}
+
+static void ConvertSparseSwitch(CompilationUnit* cu, BasicBlock* bb,
+ int32_t table_offset, RegLocation rl_src)
+{
+ const Instruction::SparseSwitchPayload* payload =
+ reinterpret_cast<const Instruction::SparseSwitchPayload*>(
+ cu->insns + cu->current_dalvik_offset + table_offset);
+
+ const int32_t* keys = payload->GetKeys();
+ const int32_t* targets = payload->GetTargets();
+
+ llvm::Value* value = GetLLVMValue(cu, rl_src.orig_sreg);
+
+ llvm::SwitchInst* sw =
+ cu->irb->CreateSwitch(value, GetLLVMBlock(cu, bb->fall_through->id),
+ payload->case_count);
+
+ for (size_t i = 0; i < payload->case_count; ++i) {
+ llvm::BasicBlock* llvm_bb =
+ FindCaseTarget(cu, cu->current_dalvik_offset + targets[i]);
+ sw->addCase(cu->irb->getInt32(keys[i]), llvm_bb);
+ }
+ llvm::MDNode* switch_node =
+ llvm::MDNode::get(*cu->context, cu->irb->getInt32(table_offset));
+ sw->setMetadata("SwitchTable", switch_node);
+ bb->taken = NULL;
+ bb->fall_through = NULL;
+}
+
+static void ConvertSget(CompilationUnit* cu, int32_t field_index,
+ greenland::IntrinsicHelper::IntrinsicId id, RegLocation rl_dest)
+{
+ llvm::Constant* field_idx = cu->irb->getInt32(field_index);
+ llvm::Function* intr = cu->intrinsic_helper->GetIntrinsicFunction(id);
+ llvm::Value* res = cu->irb->CreateCall(intr, field_idx);
+ DefineValue(cu, res, rl_dest.orig_sreg);
+}
+
+static void ConvertSput(CompilationUnit* cu, int32_t field_index,
+ greenland::IntrinsicHelper::IntrinsicId id, RegLocation rl_src)
+{
+ llvm::SmallVector<llvm::Value*, 2> args;
+ args.push_back(cu->irb->getInt32(field_index));
+ args.push_back(GetLLVMValue(cu, rl_src.orig_sreg));
+ llvm::Function* intr = cu->intrinsic_helper->GetIntrinsicFunction(id);
+ cu->irb->CreateCall(intr, args);
+}
+
+static void ConvertFillArrayData(CompilationUnit* cu, int32_t offset, RegLocation rl_array)
+{
+ greenland::IntrinsicHelper::IntrinsicId id;
+ id = greenland::IntrinsicHelper::HLFillArrayData;
+ llvm::SmallVector<llvm::Value*, 2> args;
+ args.push_back(cu->irb->getInt32(offset));
+ args.push_back(GetLLVMValue(cu, rl_array.orig_sreg));
+ llvm::Function* intr = cu->intrinsic_helper->GetIntrinsicFunction(id);
+ cu->irb->CreateCall(intr, args);
+}
+
+static llvm::Value* EmitConst(CompilationUnit* cu, llvm::ArrayRef<llvm::Value*> src,
+ RegLocation loc)
+{
+ greenland::IntrinsicHelper::IntrinsicId id;
+ if (loc.wide) {
+ if (loc.fp) {
+ id = greenland::IntrinsicHelper::ConstDouble;
+ } else {
+ id = greenland::IntrinsicHelper::ConstLong;
+ }
+ } else {
+ if (loc.fp) {
+ id = greenland::IntrinsicHelper::ConstFloat;
+ } else if (loc.ref) {
+ id = greenland::IntrinsicHelper::ConstObj;
+ } else {
+ id = greenland::IntrinsicHelper::ConstInt;
+ }
+ }
+ llvm::Function* intr = cu->intrinsic_helper->GetIntrinsicFunction(id);
+ return cu->irb->CreateCall(intr, src);
+}
+
+static void EmitPopShadowFrame(CompilationUnit* cu)
+{
+ llvm::Function* intr = cu->intrinsic_helper->GetIntrinsicFunction(
+ greenland::IntrinsicHelper::PopShadowFrame);
+ cu->irb->CreateCall(intr);
+}
+
+static llvm::Value* EmitCopy(CompilationUnit* cu, llvm::ArrayRef<llvm::Value*> src,
+ RegLocation loc)
+{
+ greenland::IntrinsicHelper::IntrinsicId id;
+ if (loc.wide) {
+ if (loc.fp) {
+ id = greenland::IntrinsicHelper::CopyDouble;
+ } else {
+ id = greenland::IntrinsicHelper::CopyLong;
+ }
+ } else {
+ if (loc.fp) {
+ id = greenland::IntrinsicHelper::CopyFloat;
+ } else if (loc.ref) {
+ id = greenland::IntrinsicHelper::CopyObj;
+ } else {
+ id = greenland::IntrinsicHelper::CopyInt;
+ }
+ }
+ llvm::Function* intr = cu->intrinsic_helper->GetIntrinsicFunction(id);
+ return cu->irb->CreateCall(intr, src);
+}
+
+static void ConvertMoveException(CompilationUnit* cu, RegLocation rl_dest)
+{
+ llvm::Function* func = cu->intrinsic_helper->GetIntrinsicFunction(
+ greenland::IntrinsicHelper::GetException);
+ llvm::Value* res = cu->irb->CreateCall(func);
+ DefineValue(cu, res, rl_dest.orig_sreg);
+}
+
+static void ConvertThrow(CompilationUnit* cu, RegLocation rl_src)
+{
+ llvm::Value* src = GetLLVMValue(cu, rl_src.orig_sreg);
+ llvm::Function* func = cu->intrinsic_helper->GetIntrinsicFunction(
+ greenland::IntrinsicHelper::HLThrowException);
+ cu->irb->CreateCall(func, src);
+}
+
+static void ConvertMonitorEnterExit(CompilationUnit* cu, int opt_flags,
+ greenland::IntrinsicHelper::IntrinsicId id,
+ RegLocation rl_src)
+{
+ llvm::SmallVector<llvm::Value*, 2> args;
+ args.push_back(cu->irb->getInt32(opt_flags));
+ args.push_back(GetLLVMValue(cu, rl_src.orig_sreg));
+ llvm::Function* func = cu->intrinsic_helper->GetIntrinsicFunction(id);
+ cu->irb->CreateCall(func, args);
+}
+
+static void ConvertArrayLength(CompilationUnit* cu, int opt_flags,
+ RegLocation rl_dest, RegLocation rl_src)
+{
+ llvm::SmallVector<llvm::Value*, 2> args;
+ args.push_back(cu->irb->getInt32(opt_flags));
+ args.push_back(GetLLVMValue(cu, rl_src.orig_sreg));
+ llvm::Function* func = cu->intrinsic_helper->GetIntrinsicFunction(
+ greenland::IntrinsicHelper::OptArrayLength);
+ llvm::Value* res = cu->irb->CreateCall(func, args);
+ DefineValue(cu, res, rl_dest.orig_sreg);
+}
+
+static void EmitSuspendCheck(CompilationUnit* cu)
+{
+ greenland::IntrinsicHelper::IntrinsicId id =
+ greenland::IntrinsicHelper::CheckSuspend;
+ llvm::Function* intr = cu->intrinsic_helper->GetIntrinsicFunction(id);
+ cu->irb->CreateCall(intr);
+}
+
+static llvm::Value* ConvertCompare(CompilationUnit* cu, ConditionCode cc,
+ llvm::Value* src1, llvm::Value* src2)
+{
+ llvm::Value* res = NULL;
+ DCHECK_EQ(src1->getType(), src2->getType());
+ switch(cc) {
+ case kCondEq: res = cu->irb->CreateICmpEQ(src1, src2); break;
+ case kCondNe: res = cu->irb->CreateICmpNE(src1, src2); break;
+ case kCondLt: res = cu->irb->CreateICmpSLT(src1, src2); break;
+ case kCondGe: res = cu->irb->CreateICmpSGE(src1, src2); break;
+ case kCondGt: res = cu->irb->CreateICmpSGT(src1, src2); break;
+ case kCondLe: res = cu->irb->CreateICmpSLE(src1, src2); break;
+ default: LOG(FATAL) << "Unexpected cc value " << cc;
+ }
+ return res;
+}
+
+static void ConvertCompareAndBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir,
+ ConditionCode cc, RegLocation rl_src1, RegLocation rl_src2)
+{
+ if (bb->taken->start_offset <= mir->offset) {
+ EmitSuspendCheck(cu);
+ }
+ llvm::Value* src1 = GetLLVMValue(cu, rl_src1.orig_sreg);
+ llvm::Value* src2 = GetLLVMValue(cu, rl_src2.orig_sreg);
+ llvm::Value* cond_value = ConvertCompare(cu, cc, src1, src2);
+ cond_value->setName(StringPrintf("t%d", cu->temp_name++));
+ cu->irb->CreateCondBr(cond_value, GetLLVMBlock(cu, bb->taken->id),
+ GetLLVMBlock(cu, bb->fall_through->id));
+ // Don't redo the fallthrough branch in the BB driver
+ bb->fall_through = NULL;
+}
+
+static void ConvertCompareZeroAndBranch(CompilationUnit* cu, BasicBlock* bb,
+ MIR* mir, ConditionCode cc, RegLocation rl_src1)
+{
+ if (bb->taken->start_offset <= mir->offset) {
+ EmitSuspendCheck(cu);
+ }
+ llvm::Value* src1 = GetLLVMValue(cu, rl_src1.orig_sreg);
+ llvm::Value* src2;
+ if (rl_src1.ref) {
+ src2 = cu->irb->GetJNull();
+ } else {
+ src2 = cu->irb->getInt32(0);
+ }
+ llvm::Value* cond_value = ConvertCompare(cu, cc, src1, src2);
+ cu->irb->CreateCondBr(cond_value, GetLLVMBlock(cu, bb->taken->id),
+ GetLLVMBlock(cu, bb->fall_through->id));
+ // Don't redo the fallthrough branch in the BB driver
+ bb->fall_through = NULL;
+}
+
+static llvm::Value* GenDivModOp(CompilationUnit* cu, bool is_div, bool is_long,
+ llvm::Value* src1, llvm::Value* src2)
+{
+ greenland::IntrinsicHelper::IntrinsicId id;
+ if (is_long) {
+ if (is_div) {
+ id = greenland::IntrinsicHelper::DivLong;
+ } else {
+ id = greenland::IntrinsicHelper::RemLong;
+ }
+ } else {
+ if (is_div) {
+ id = greenland::IntrinsicHelper::DivInt;
+ } else {
+ id = greenland::IntrinsicHelper::RemInt;
+ }
+ }
+ llvm::Function* intr = cu->intrinsic_helper->GetIntrinsicFunction(id);
+ llvm::SmallVector<llvm::Value*, 2>args;
+ args.push_back(src1);
+ args.push_back(src2);
+ return cu->irb->CreateCall(intr, args);
+}
+
+static llvm::Value* GenArithOp(CompilationUnit* cu, OpKind op, bool is_long,
+ llvm::Value* src1, llvm::Value* src2)
+{
+ llvm::Value* res = NULL;
+ switch(op) {
+ case kOpAdd: res = cu->irb->CreateAdd(src1, src2); break;
+ case kOpSub: res = cu->irb->CreateSub(src1, src2); break;
+ case kOpRsub: res = cu->irb->CreateSub(src2, src1); break;
+ case kOpMul: res = cu->irb->CreateMul(src1, src2); break;
+ case kOpOr: res = cu->irb->CreateOr(src1, src2); break;
+ case kOpAnd: res = cu->irb->CreateAnd(src1, src2); break;
+ case kOpXor: res = cu->irb->CreateXor(src1, src2); break;
+ case kOpDiv: res = GenDivModOp(cu, true, is_long, src1, src2); break;
+ case kOpRem: res = GenDivModOp(cu, false, is_long, src1, src2); break;
+ case kOpLsl: res = cu->irb->CreateShl(src1, src2); break;
+ case kOpLsr: res = cu->irb->CreateLShr(src1, src2); break;
+ case kOpAsr: res = cu->irb->CreateAShr(src1, src2); break;
+ default:
+ LOG(FATAL) << "Invalid op " << op;
+ }
+ return res;
+}
+
+static void ConvertFPArithOp(CompilationUnit* cu, OpKind op, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2)
+{
+ llvm::Value* src1 = GetLLVMValue(cu, rl_src1.orig_sreg);
+ llvm::Value* src2 = GetLLVMValue(cu, rl_src2.orig_sreg);
+ llvm::Value* res = NULL;
+ switch(op) {
+ case kOpAdd: res = cu->irb->CreateFAdd(src1, src2); break;
+ case kOpSub: res = cu->irb->CreateFSub(src1, src2); break;
+ case kOpMul: res = cu->irb->CreateFMul(src1, src2); break;
+ case kOpDiv: res = cu->irb->CreateFDiv(src1, src2); break;
+ case kOpRem: res = cu->irb->CreateFRem(src1, src2); break;
+ default:
+ LOG(FATAL) << "Invalid op " << op;
+ }
+ DefineValue(cu, res, rl_dest.orig_sreg);
+}
+
+static void ConvertShift(CompilationUnit* cu, greenland::IntrinsicHelper::IntrinsicId id,
+ RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2)
+{
+ llvm::Function* intr = cu->intrinsic_helper->GetIntrinsicFunction(id);
+ llvm::SmallVector<llvm::Value*, 2>args;
+ args.push_back(GetLLVMValue(cu, rl_src1.orig_sreg));
+ args.push_back(GetLLVMValue(cu, rl_src2.orig_sreg));
+ llvm::Value* res = cu->irb->CreateCall(intr, args);
+ DefineValue(cu, res, rl_dest.orig_sreg);
+}
+
+static void ConvertShiftLit(CompilationUnit* cu, greenland::IntrinsicHelper::IntrinsicId id,
+ RegLocation rl_dest, RegLocation rl_src, int shift_amount)
+{
+ llvm::Function* intr = cu->intrinsic_helper->GetIntrinsicFunction(id);
+ llvm::SmallVector<llvm::Value*, 2>args;
+ args.push_back(GetLLVMValue(cu, rl_src.orig_sreg));
+ args.push_back(cu->irb->getInt32(shift_amount));
+ llvm::Value* res = cu->irb->CreateCall(intr, args);
+ DefineValue(cu, res, rl_dest.orig_sreg);
+}
+
+static void ConvertArithOp(CompilationUnit* cu, OpKind op, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2)
+{
+ llvm::Value* src1 = GetLLVMValue(cu, rl_src1.orig_sreg);
+ llvm::Value* src2 = GetLLVMValue(cu, rl_src2.orig_sreg);
+ DCHECK_EQ(src1->getType(), src2->getType());
+ llvm::Value* res = GenArithOp(cu, op, rl_dest.wide, src1, src2);
+ DefineValue(cu, res, rl_dest.orig_sreg);
+}
+
+static void ConvertArithOpLit(CompilationUnit* cu, OpKind op, RegLocation rl_dest,
+ RegLocation rl_src1, int32_t imm)
+{
+ llvm::Value* src1 = GetLLVMValue(cu, rl_src1.orig_sreg);
+ llvm::Value* src2 = cu->irb->getInt32(imm);
+ llvm::Value* res = GenArithOp(cu, op, rl_dest.wide, src1, src2);
+ DefineValue(cu, res, rl_dest.orig_sreg);
+}
+
+/*
+ * Process arguments for invoke. Note: this code is also used to
+ * collect and process arguments for NEW_FILLED_ARRAY and NEW_FILLED_ARRAY_RANGE.
+ * The requirements are similar.
+ */
+static void ConvertInvoke(CompilationUnit* cu, BasicBlock* bb, MIR* mir,
+ InvokeType invoke_type, bool is_range, bool is_filled_new_array)
+{
+ Codegen* cg = cu->cg.get();
+ CallInfo* info = cg->NewMemCallInfo(cu, bb, mir, invoke_type, is_range);
+ llvm::SmallVector<llvm::Value*, 10> args;
+ // Insert the invoke_type
+ args.push_back(cu->irb->getInt32(static_cast<int>(invoke_type)));
+ // Insert the method_idx
+ args.push_back(cu->irb->getInt32(info->index));
+ // Insert the optimization flags
+ args.push_back(cu->irb->getInt32(info->opt_flags));
+ // Now, insert the actual arguments
+ for (int i = 0; i < info->num_arg_words;) {
+ llvm::Value* val = GetLLVMValue(cu, info->args[i].orig_sreg);
+ args.push_back(val);
+ i += info->args[i].wide ? 2 : 1;
+ }
+ /*
+ * Choose the invoke return type based on actual usage. Note: may
+ * be different than shorty. For example, if a function return value
+ * is not used, we'll treat this as a void invoke.
+ */
+ greenland::IntrinsicHelper::IntrinsicId id;
+ if (is_filled_new_array) {
+ id = greenland::IntrinsicHelper::HLFilledNewArray;
+ } else if (info->result.location == kLocInvalid) {
+ id = greenland::IntrinsicHelper::HLInvokeVoid;
+ } else {
+ if (info->result.wide) {
+ if (info->result.fp) {
+ id = greenland::IntrinsicHelper::HLInvokeDouble;
+ } else {
+ id = greenland::IntrinsicHelper::HLInvokeLong;
+ }
+ } else if (info->result.ref) {
+ id = greenland::IntrinsicHelper::HLInvokeObj;
+ } else if (info->result.fp) {
+ id = greenland::IntrinsicHelper::HLInvokeFloat;
+ } else {
+ id = greenland::IntrinsicHelper::HLInvokeInt;
+ }
+ }
+ llvm::Function* intr = cu->intrinsic_helper->GetIntrinsicFunction(id);
+ llvm::Value* res = cu->irb->CreateCall(intr, args);
+ if (info->result.location != kLocInvalid) {
+ DefineValue(cu, res, info->result.orig_sreg);
+ }
+}
+
+static void ConvertConstObject(CompilationUnit* cu, uint32_t idx,
+ greenland::IntrinsicHelper::IntrinsicId id, RegLocation rl_dest)
+{
+ llvm::Function* intr = cu->intrinsic_helper->GetIntrinsicFunction(id);
+ llvm::Value* index = cu->irb->getInt32(idx);
+ llvm::Value* res = cu->irb->CreateCall(intr, index);
+ DefineValue(cu, res, rl_dest.orig_sreg);
+}
+
+static void ConvertCheckCast(CompilationUnit* cu, uint32_t type_idx, RegLocation rl_src)
+{
+ greenland::IntrinsicHelper::IntrinsicId id;
+ id = greenland::IntrinsicHelper::HLCheckCast;
+ llvm::Function* intr = cu->intrinsic_helper->GetIntrinsicFunction(id);
+ llvm::SmallVector<llvm::Value*, 2> args;
+ args.push_back(cu->irb->getInt32(type_idx));
+ args.push_back(GetLLVMValue(cu, rl_src.orig_sreg));
+ cu->irb->CreateCall(intr, args);
+}
+
+static void ConvertNewInstance(CompilationUnit* cu, uint32_t type_idx, RegLocation rl_dest)
+{
+ greenland::IntrinsicHelper::IntrinsicId id;
+ id = greenland::IntrinsicHelper::NewInstance;
+ llvm::Function* intr = cu->intrinsic_helper->GetIntrinsicFunction(id);
+ llvm::Value* index = cu->irb->getInt32(type_idx);
+ llvm::Value* res = cu->irb->CreateCall(intr, index);
+ DefineValue(cu, res, rl_dest.orig_sreg);
+}
+
+static void ConvertNewArray(CompilationUnit* cu, uint32_t type_idx,
+ RegLocation rl_dest, RegLocation rl_src)
+{
+ greenland::IntrinsicHelper::IntrinsicId id;
+ id = greenland::IntrinsicHelper::NewArray;
+ llvm::Function* intr = cu->intrinsic_helper->GetIntrinsicFunction(id);
+ llvm::SmallVector<llvm::Value*, 2> args;
+ args.push_back(cu->irb->getInt32(type_idx));
+ args.push_back(GetLLVMValue(cu, rl_src.orig_sreg));
+ llvm::Value* res = cu->irb->CreateCall(intr, args);
+ DefineValue(cu, res, rl_dest.orig_sreg);
+}
+
+static void ConvertAget(CompilationUnit* cu, int opt_flags,
+ greenland::IntrinsicHelper::IntrinsicId id,
+ RegLocation rl_dest, RegLocation rl_array, RegLocation rl_index)
+{
+ llvm::SmallVector<llvm::Value*, 3> args;
+ args.push_back(cu->irb->getInt32(opt_flags));
+ args.push_back(GetLLVMValue(cu, rl_array.orig_sreg));
+ args.push_back(GetLLVMValue(cu, rl_index.orig_sreg));
+ llvm::Function* intr = cu->intrinsic_helper->GetIntrinsicFunction(id);
+ llvm::Value* res = cu->irb->CreateCall(intr, args);
+ DefineValue(cu, res, rl_dest.orig_sreg);
+}
+
+static void ConvertAput(CompilationUnit* cu, int opt_flags,
+ greenland::IntrinsicHelper::IntrinsicId id,
+ RegLocation rl_src, RegLocation rl_array, RegLocation rl_index)
+{
+ llvm::SmallVector<llvm::Value*, 4> args;
+ args.push_back(cu->irb->getInt32(opt_flags));
+ args.push_back(GetLLVMValue(cu, rl_src.orig_sreg));
+ args.push_back(GetLLVMValue(cu, rl_array.orig_sreg));
+ args.push_back(GetLLVMValue(cu, rl_index.orig_sreg));
+ llvm::Function* intr = cu->intrinsic_helper->GetIntrinsicFunction(id);
+ cu->irb->CreateCall(intr, args);
+}
+
+static void ConvertIget(CompilationUnit* cu, int opt_flags,
+ greenland::IntrinsicHelper::IntrinsicId id,
+ RegLocation rl_dest, RegLocation rl_obj, int field_index)
+{
+ llvm::SmallVector<llvm::Value*, 3> args;
+ args.push_back(cu->irb->getInt32(opt_flags));
+ args.push_back(GetLLVMValue(cu, rl_obj.orig_sreg));
+ args.push_back(cu->irb->getInt32(field_index));
+ llvm::Function* intr = cu->intrinsic_helper->GetIntrinsicFunction(id);
+ llvm::Value* res = cu->irb->CreateCall(intr, args);
+ DefineValue(cu, res, rl_dest.orig_sreg);
+}
+
+static void ConvertIput(CompilationUnit* cu, int opt_flags,
+ greenland::IntrinsicHelper::IntrinsicId id,
+ RegLocation rl_src, RegLocation rl_obj, int field_index)
+{
+ llvm::SmallVector<llvm::Value*, 4> args;
+ args.push_back(cu->irb->getInt32(opt_flags));
+ args.push_back(GetLLVMValue(cu, rl_src.orig_sreg));
+ args.push_back(GetLLVMValue(cu, rl_obj.orig_sreg));
+ args.push_back(cu->irb->getInt32(field_index));
+ llvm::Function* intr = cu->intrinsic_helper->GetIntrinsicFunction(id);
+ cu->irb->CreateCall(intr, args);
+}
+
+static void ConvertInstanceOf(CompilationUnit* cu, uint32_t type_idx,
+ RegLocation rl_dest, RegLocation rl_src)
+{
+ greenland::IntrinsicHelper::IntrinsicId id;
+ id = greenland::IntrinsicHelper::InstanceOf;
+ llvm::Function* intr = cu->intrinsic_helper->GetIntrinsicFunction(id);
+ llvm::SmallVector<llvm::Value*, 2> args;
+ args.push_back(cu->irb->getInt32(type_idx));
+ args.push_back(GetLLVMValue(cu, rl_src.orig_sreg));
+ llvm::Value* res = cu->irb->CreateCall(intr, args);
+ DefineValue(cu, res, rl_dest.orig_sreg);
+}
+
+static void ConvertIntToLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src)
+{
+ llvm::Value* res = cu->irb->CreateSExt(GetLLVMValue(cu, rl_src.orig_sreg),
+ cu->irb->getInt64Ty());
+ DefineValue(cu, res, rl_dest.orig_sreg);
+}
+
+static void ConvertLongToInt(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src)
+{
+ llvm::Value* src = GetLLVMValue(cu, rl_src.orig_sreg);
+ llvm::Value* res = cu->irb->CreateTrunc(src, cu->irb->getInt32Ty());
+ DefineValue(cu, res, rl_dest.orig_sreg);
+}
+
+static void ConvertFloatToDouble(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src)
+{
+ llvm::Value* src = GetLLVMValue(cu, rl_src.orig_sreg);
+ llvm::Value* res = cu->irb->CreateFPExt(src, cu->irb->getDoubleTy());
+ DefineValue(cu, res, rl_dest.orig_sreg);
+}
+
+static void ConvertDoubleToFloat(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src)
+{
+ llvm::Value* src = GetLLVMValue(cu, rl_src.orig_sreg);
+ llvm::Value* res = cu->irb->CreateFPTrunc(src, cu->irb->getFloatTy());
+ DefineValue(cu, res, rl_dest.orig_sreg);
+}
+
+static void ConvertWideComparison(CompilationUnit* cu,
+ greenland::IntrinsicHelper::IntrinsicId id,
+ RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2)
+{
+ DCHECK_EQ(rl_src1.fp, rl_src2.fp);
+ DCHECK_EQ(rl_src1.wide, rl_src2.wide);
+ llvm::Function* intr = cu->intrinsic_helper->GetIntrinsicFunction(id);
+ llvm::SmallVector<llvm::Value*, 2> args;
+ args.push_back(GetLLVMValue(cu, rl_src1.orig_sreg));
+ args.push_back(GetLLVMValue(cu, rl_src2.orig_sreg));
+ llvm::Value* res = cu->irb->CreateCall(intr, args);
+ DefineValue(cu, res, rl_dest.orig_sreg);
+}
+
+static void ConvertIntNarrowing(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src,
+ greenland::IntrinsicHelper::IntrinsicId id)
+{
+ llvm::Function* intr = cu->intrinsic_helper->GetIntrinsicFunction(id);
+ llvm::Value* res =
+ cu->irb->CreateCall(intr, GetLLVMValue(cu, rl_src.orig_sreg));
+ DefineValue(cu, res, rl_dest.orig_sreg);
+}
+
+static void ConvertNeg(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src)
+{
+ llvm::Value* res = cu->irb->CreateNeg(GetLLVMValue(cu, rl_src.orig_sreg));
+ DefineValue(cu, res, rl_dest.orig_sreg);
+}
+
+static void ConvertIntToFP(CompilationUnit* cu, llvm::Type* ty, RegLocation rl_dest,
+ RegLocation rl_src)
+{
+ llvm::Value* res =
+ cu->irb->CreateSIToFP(GetLLVMValue(cu, rl_src.orig_sreg), ty);
+ DefineValue(cu, res, rl_dest.orig_sreg);
+}
+
+static void ConvertFPToInt(CompilationUnit* cu, greenland::IntrinsicHelper::IntrinsicId id,
+ RegLocation rl_dest,
+ RegLocation rl_src)
+{
+ llvm::Function* intr = cu->intrinsic_helper->GetIntrinsicFunction(id);
+ llvm::Value* res = cu->irb->CreateCall(intr, GetLLVMValue(cu, rl_src.orig_sreg));
+ DefineValue(cu, res, rl_dest.orig_sreg);
+}
+
+
+static void ConvertNegFP(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src)
+{
+ llvm::Value* res =
+ cu->irb->CreateFNeg(GetLLVMValue(cu, rl_src.orig_sreg));
+ DefineValue(cu, res, rl_dest.orig_sreg);
+}
+
+static void ConvertNot(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src)
+{
+ llvm::Value* src = GetLLVMValue(cu, rl_src.orig_sreg);
+ llvm::Value* res = cu->irb->CreateXor(src, static_cast<uint64_t>(-1));
+ DefineValue(cu, res, rl_dest.orig_sreg);
+}
+
+/*
+ * Target-independent code generation. Use only high-level
+ * load/store utilities here, or target-dependent genXX() handlers
+ * when necessary.
+ */
+static bool ConvertMIRNode(CompilationUnit* cu, MIR* mir, BasicBlock* bb,
+ llvm::BasicBlock* llvm_bb, LIR* label_list)
+{
+ bool res = false; // Assume success
+ RegLocation rl_src[3];
+ RegLocation rl_dest = GetBadLoc();
+ Instruction::Code opcode = mir->dalvikInsn.opcode;
+ int op_val = opcode;
+ uint32_t vB = mir->dalvikInsn.vB;
+ uint32_t vC = mir->dalvikInsn.vC;
+ int opt_flags = mir->optimization_flags;
+
+ if (cu->verbose) {
+ if (op_val < kMirOpFirst) {
+ LOG(INFO) << ".. " << Instruction::Name(opcode) << " 0x" << std::hex << op_val;
+ } else {
+ LOG(INFO) << extended_mir_op_names[op_val - kMirOpFirst] << " 0x" << std::hex << op_val;
+ }
+ }
+
+ /* Prep Src and Dest locations */
+ int next_sreg = 0;
+ int next_loc = 0;
+ int attrs = oat_data_flow_attributes[opcode];
+ rl_src[0] = rl_src[1] = rl_src[2] = GetBadLoc();
+ if (attrs & DF_UA) {
+ if (attrs & DF_A_WIDE) {
+ rl_src[next_loc++] = GetSrcWide(cu, mir, next_sreg);
+ next_sreg+= 2;
+ } else {
+ rl_src[next_loc++] = GetSrc(cu, mir, next_sreg);
+ next_sreg++;
+ }
+ }
+ if (attrs & DF_UB) {
+ if (attrs & DF_B_WIDE) {
+ rl_src[next_loc++] = GetSrcWide(cu, mir, next_sreg);
+ next_sreg+= 2;
+ } else {
+ rl_src[next_loc++] = GetSrc(cu, mir, next_sreg);
+ next_sreg++;
+ }
+ }
+ if (attrs & DF_UC) {
+ if (attrs & DF_C_WIDE) {
+ rl_src[next_loc++] = GetSrcWide(cu, mir, next_sreg);
+ } else {
+ rl_src[next_loc++] = GetSrc(cu, mir, next_sreg);
+ }
+ }
+ if (attrs & DF_DA) {
+ if (attrs & DF_A_WIDE) {
+ rl_dest = GetDestWide(cu, mir);
+ } else {
+ rl_dest = GetDest(cu, mir);
+ }
+ }
+
+ switch (opcode) {
+ case Instruction::NOP:
+ break;
+
+ case Instruction::MOVE:
+ case Instruction::MOVE_OBJECT:
+ case Instruction::MOVE_16:
+ case Instruction::MOVE_OBJECT_16:
+ case Instruction::MOVE_OBJECT_FROM16:
+ case Instruction::MOVE_FROM16:
+ case Instruction::MOVE_WIDE:
+ case Instruction::MOVE_WIDE_16:
+ case Instruction::MOVE_WIDE_FROM16: {
+ /*
+ * Moves/copies are meaningless in pure SSA register form,
+ * but we need to preserve them for the conversion back into
+ * MIR (at least until we stop using the Dalvik register maps).
+ * Insert a dummy intrinsic copy call, which will be recognized
+ * by the quick path and removed by the portable path.
+ */
+ llvm::Value* src = GetLLVMValue(cu, rl_src[0].orig_sreg);
+ llvm::Value* res = EmitCopy(cu, src, rl_dest);
+ DefineValue(cu, res, rl_dest.orig_sreg);
+ }
+ break;
+
+ case Instruction::CONST:
+ case Instruction::CONST_4:
+ case Instruction::CONST_16: {
+ llvm::Constant* imm_value = cu->irb->GetJInt(vB);
+ llvm::Value* res = EmitConst(cu, imm_value, rl_dest);
+ DefineValue(cu, res, rl_dest.orig_sreg);
+ }
+ break;
+
+ case Instruction::CONST_WIDE_16:
+ case Instruction::CONST_WIDE_32: {
+ // Sign extend to 64 bits
+ int64_t imm = static_cast<int32_t>(vB);
+ llvm::Constant* imm_value = cu->irb->GetJLong(imm);
+ llvm::Value* res = EmitConst(cu, imm_value, rl_dest);
+ DefineValue(cu, res, rl_dest.orig_sreg);
+ }
+ break;
+
+ case Instruction::CONST_HIGH16: {
+ llvm::Constant* imm_value = cu->irb->GetJInt(vB << 16);
+ llvm::Value* res = EmitConst(cu, imm_value, rl_dest);
+ DefineValue(cu, res, rl_dest.orig_sreg);
+ }
+ break;
+
+ case Instruction::CONST_WIDE: {
+ llvm::Constant* imm_value =
+ cu->irb->GetJLong(mir->dalvikInsn.vB_wide);
+ llvm::Value* res = EmitConst(cu, imm_value, rl_dest);
+ DefineValue(cu, res, rl_dest.orig_sreg);
+ }
+ break;
+ case Instruction::CONST_WIDE_HIGH16: {
+ int64_t imm = static_cast<int64_t>(vB) << 48;
+ llvm::Constant* imm_value = cu->irb->GetJLong(imm);
+ llvm::Value* res = EmitConst(cu, imm_value, rl_dest);
+ DefineValue(cu, res, rl_dest.orig_sreg);
+ }
+ break;
+
+ case Instruction::SPUT_OBJECT:
+ ConvertSput(cu, vB, greenland::IntrinsicHelper::HLSputObject,
+ rl_src[0]);
+ break;
+ case Instruction::SPUT:
+ if (rl_src[0].fp) {
+ ConvertSput(cu, vB, greenland::IntrinsicHelper::HLSputFloat,
+ rl_src[0]);
+ } else {
+ ConvertSput(cu, vB, greenland::IntrinsicHelper::HLSput, rl_src[0]);
+ }
+ break;
+ case Instruction::SPUT_BOOLEAN:
+ ConvertSput(cu, vB, greenland::IntrinsicHelper::HLSputBoolean,
+ rl_src[0]);
+ break;
+ case Instruction::SPUT_BYTE:
+ ConvertSput(cu, vB, greenland::IntrinsicHelper::HLSputByte, rl_src[0]);
+ break;
+ case Instruction::SPUT_CHAR:
+ ConvertSput(cu, vB, greenland::IntrinsicHelper::HLSputChar, rl_src[0]);
+ break;
+ case Instruction::SPUT_SHORT:
+ ConvertSput(cu, vB, greenland::IntrinsicHelper::HLSputShort, rl_src[0]);
+ break;
+ case Instruction::SPUT_WIDE:
+ if (rl_src[0].fp) {
+ ConvertSput(cu, vB, greenland::IntrinsicHelper::HLSputDouble,
+ rl_src[0]);
+ } else {
+ ConvertSput(cu, vB, greenland::IntrinsicHelper::HLSputWide,
+ rl_src[0]);
+ }
+ break;
+
+ case Instruction::SGET_OBJECT:
+ ConvertSget(cu, vB, greenland::IntrinsicHelper::HLSgetObject, rl_dest);
+ break;
+ case Instruction::SGET:
+ if (rl_dest.fp) {
+ ConvertSget(cu, vB, greenland::IntrinsicHelper::HLSgetFloat, rl_dest);
+ } else {
+ ConvertSget(cu, vB, greenland::IntrinsicHelper::HLSget, rl_dest);
+ }
+ break;
+ case Instruction::SGET_BOOLEAN:
+ ConvertSget(cu, vB, greenland::IntrinsicHelper::HLSgetBoolean, rl_dest);
+ break;
+ case Instruction::SGET_BYTE:
+ ConvertSget(cu, vB, greenland::IntrinsicHelper::HLSgetByte, rl_dest);
+ break;
+ case Instruction::SGET_CHAR:
+ ConvertSget(cu, vB, greenland::IntrinsicHelper::HLSgetChar, rl_dest);
+ break;
+ case Instruction::SGET_SHORT:
+ ConvertSget(cu, vB, greenland::IntrinsicHelper::HLSgetShort, rl_dest);
+ break;
+ case Instruction::SGET_WIDE:
+ if (rl_dest.fp) {
+ ConvertSget(cu, vB, greenland::IntrinsicHelper::HLSgetDouble,
+ rl_dest);
+ } else {
+ ConvertSget(cu, vB, greenland::IntrinsicHelper::HLSgetWide, rl_dest);
+ }
+ break;
+
+ case Instruction::RETURN_WIDE:
+ case Instruction::RETURN:
+ case Instruction::RETURN_OBJECT: {
+ if (!(cu->attrs & METHOD_IS_LEAF)) {
+ EmitSuspendCheck(cu);
+ }
+ EmitPopShadowFrame(cu);
+ cu->irb->CreateRet(GetLLVMValue(cu, rl_src[0].orig_sreg));
+ DCHECK(bb->terminated_by_return);
+ }
+ break;
+
+ case Instruction::RETURN_VOID: {
+ if (!(cu->attrs & METHOD_IS_LEAF)) {
+ EmitSuspendCheck(cu);
+ }
+ EmitPopShadowFrame(cu);
+ cu->irb->CreateRetVoid();
+ DCHECK(bb->terminated_by_return);
+ }
+ break;
+
+ case Instruction::IF_EQ:
+ ConvertCompareAndBranch(cu, bb, mir, kCondEq, rl_src[0], rl_src[1]);
+ break;
+ case Instruction::IF_NE:
+ ConvertCompareAndBranch(cu, bb, mir, kCondNe, rl_src[0], rl_src[1]);
+ break;
+ case Instruction::IF_LT:
+ ConvertCompareAndBranch(cu, bb, mir, kCondLt, rl_src[0], rl_src[1]);
+ break;
+ case Instruction::IF_GE:
+ ConvertCompareAndBranch(cu, bb, mir, kCondGe, rl_src[0], rl_src[1]);
+ break;
+ case Instruction::IF_GT:
+ ConvertCompareAndBranch(cu, bb, mir, kCondGt, rl_src[0], rl_src[1]);
+ break;
+ case Instruction::IF_LE:
+ ConvertCompareAndBranch(cu, bb, mir, kCondLe, rl_src[0], rl_src[1]);
+ break;
+ case Instruction::IF_EQZ:
+ ConvertCompareZeroAndBranch(cu, bb, mir, kCondEq, rl_src[0]);
+ break;
+ case Instruction::IF_NEZ:
+ ConvertCompareZeroAndBranch(cu, bb, mir, kCondNe, rl_src[0]);
+ break;
+ case Instruction::IF_LTZ:
+ ConvertCompareZeroAndBranch(cu, bb, mir, kCondLt, rl_src[0]);
+ break;
+ case Instruction::IF_GEZ:
+ ConvertCompareZeroAndBranch(cu, bb, mir, kCondGe, rl_src[0]);
+ break;
+ case Instruction::IF_GTZ:
+ ConvertCompareZeroAndBranch(cu, bb, mir, kCondGt, rl_src[0]);
+ break;
+ case Instruction::IF_LEZ:
+ ConvertCompareZeroAndBranch(cu, bb, mir, kCondLe, rl_src[0]);
+ break;
+
+ case Instruction::GOTO:
+ case Instruction::GOTO_16:
+ case Instruction::GOTO_32: {
+ if (bb->taken->start_offset <= bb->start_offset) {
+ EmitSuspendCheck(cu);
+ }
+ cu->irb->CreateBr(GetLLVMBlock(cu, bb->taken->id));
+ }
+ break;
+
+ case Instruction::ADD_LONG:
+ case Instruction::ADD_LONG_2ADDR:
+ case Instruction::ADD_INT:
+ case Instruction::ADD_INT_2ADDR:
+ ConvertArithOp(cu, kOpAdd, rl_dest, rl_src[0], rl_src[1]);
+ break;
+ case Instruction::SUB_LONG:
+ case Instruction::SUB_LONG_2ADDR:
+ case Instruction::SUB_INT:
+ case Instruction::SUB_INT_2ADDR:
+ ConvertArithOp(cu, kOpSub, rl_dest, rl_src[0], rl_src[1]);
+ break;
+ case Instruction::MUL_LONG:
+ case Instruction::MUL_LONG_2ADDR:
+ case Instruction::MUL_INT:
+ case Instruction::MUL_INT_2ADDR:
+ ConvertArithOp(cu, kOpMul, rl_dest, rl_src[0], rl_src[1]);
+ break;
+ case Instruction::DIV_LONG:
+ case Instruction::DIV_LONG_2ADDR:
+ case Instruction::DIV_INT:
+ case Instruction::DIV_INT_2ADDR:
+ ConvertArithOp(cu, kOpDiv, rl_dest, rl_src[0], rl_src[1]);
+ break;
+ case Instruction::REM_LONG:
+ case Instruction::REM_LONG_2ADDR:
+ case Instruction::REM_INT:
+ case Instruction::REM_INT_2ADDR:
+ ConvertArithOp(cu, kOpRem, rl_dest, rl_src[0], rl_src[1]);
+ break;
+ case Instruction::AND_LONG:
+ case Instruction::AND_LONG_2ADDR:
+ case Instruction::AND_INT:
+ case Instruction::AND_INT_2ADDR:
+ ConvertArithOp(cu, kOpAnd, rl_dest, rl_src[0], rl_src[1]);
+ break;
+ case Instruction::OR_LONG:
+ case Instruction::OR_LONG_2ADDR:
+ case Instruction::OR_INT:
+ case Instruction::OR_INT_2ADDR:
+ ConvertArithOp(cu, kOpOr, rl_dest, rl_src[0], rl_src[1]);
+ break;
+ case Instruction::XOR_LONG:
+ case Instruction::XOR_LONG_2ADDR:
+ case Instruction::XOR_INT:
+ case Instruction::XOR_INT_2ADDR:
+ ConvertArithOp(cu, kOpXor, rl_dest, rl_src[0], rl_src[1]);
+ break;
+ case Instruction::SHL_LONG:
+ case Instruction::SHL_LONG_2ADDR:
+ ConvertShift(cu, greenland::IntrinsicHelper::SHLLong,
+ rl_dest, rl_src[0], rl_src[1]);
+ break;
+ case Instruction::SHL_INT:
+ case Instruction::SHL_INT_2ADDR:
+ ConvertShift(cu, greenland::IntrinsicHelper::SHLInt,
+ rl_dest, rl_src[0], rl_src[1]);
+ break;
+ case Instruction::SHR_LONG:
+ case Instruction::SHR_LONG_2ADDR:
+ ConvertShift(cu, greenland::IntrinsicHelper::SHRLong,
+ rl_dest, rl_src[0], rl_src[1]);
+ break;
+ case Instruction::SHR_INT:
+ case Instruction::SHR_INT_2ADDR:
+ ConvertShift(cu, greenland::IntrinsicHelper::SHRInt,
+ rl_dest, rl_src[0], rl_src[1]);
+ break;
+ case Instruction::USHR_LONG:
+ case Instruction::USHR_LONG_2ADDR:
+ ConvertShift(cu, greenland::IntrinsicHelper::USHRLong,
+ rl_dest, rl_src[0], rl_src[1]);
+ break;
+ case Instruction::USHR_INT:
+ case Instruction::USHR_INT_2ADDR:
+ ConvertShift(cu, greenland::IntrinsicHelper::USHRInt,
+ rl_dest, rl_src[0], rl_src[1]);
+ break;
+
+ case Instruction::ADD_INT_LIT16:
+ case Instruction::ADD_INT_LIT8:
+ ConvertArithOpLit(cu, kOpAdd, rl_dest, rl_src[0], vC);
+ break;
+ case Instruction::RSUB_INT:
+ case Instruction::RSUB_INT_LIT8:
+ ConvertArithOpLit(cu, kOpRsub, rl_dest, rl_src[0], vC);
+ break;
+ case Instruction::MUL_INT_LIT16:
+ case Instruction::MUL_INT_LIT8:
+ ConvertArithOpLit(cu, kOpMul, rl_dest, rl_src[0], vC);
+ break;
+ case Instruction::DIV_INT_LIT16:
+ case Instruction::DIV_INT_LIT8:
+ ConvertArithOpLit(cu, kOpDiv, rl_dest, rl_src[0], vC);
+ break;
+ case Instruction::REM_INT_LIT16:
+ case Instruction::REM_INT_LIT8:
+ ConvertArithOpLit(cu, kOpRem, rl_dest, rl_src[0], vC);
+ break;
+ case Instruction::AND_INT_LIT16:
+ case Instruction::AND_INT_LIT8:
+ ConvertArithOpLit(cu, kOpAnd, rl_dest, rl_src[0], vC);
+ break;
+ case Instruction::OR_INT_LIT16:
+ case Instruction::OR_INT_LIT8:
+ ConvertArithOpLit(cu, kOpOr, rl_dest, rl_src[0], vC);
+ break;
+ case Instruction::XOR_INT_LIT16:
+ case Instruction::XOR_INT_LIT8:
+ ConvertArithOpLit(cu, kOpXor, rl_dest, rl_src[0], vC);
+ break;
+ case Instruction::SHL_INT_LIT8:
+ ConvertShiftLit(cu, greenland::IntrinsicHelper::SHLInt,
+ rl_dest, rl_src[0], vC & 0x1f);
+ break;
+ case Instruction::SHR_INT_LIT8:
+ ConvertShiftLit(cu, greenland::IntrinsicHelper::SHRInt,
+ rl_dest, rl_src[0], vC & 0x1f);
+ break;
+ case Instruction::USHR_INT_LIT8:
+ ConvertShiftLit(cu, greenland::IntrinsicHelper::USHRInt,
+ rl_dest, rl_src[0], vC & 0x1f);
+ break;
+
+ case Instruction::ADD_FLOAT:
+ case Instruction::ADD_FLOAT_2ADDR:
+ case Instruction::ADD_DOUBLE:
+ case Instruction::ADD_DOUBLE_2ADDR:
+ ConvertFPArithOp(cu, kOpAdd, rl_dest, rl_src[0], rl_src[1]);
+ break;
+
+ case Instruction::SUB_FLOAT:
+ case Instruction::SUB_FLOAT_2ADDR:
+ case Instruction::SUB_DOUBLE:
+ case Instruction::SUB_DOUBLE_2ADDR:
+ ConvertFPArithOp(cu, kOpSub, rl_dest, rl_src[0], rl_src[1]);
+ break;
+
+ case Instruction::MUL_FLOAT:
+ case Instruction::MUL_FLOAT_2ADDR:
+ case Instruction::MUL_DOUBLE:
+ case Instruction::MUL_DOUBLE_2ADDR:
+ ConvertFPArithOp(cu, kOpMul, rl_dest, rl_src[0], rl_src[1]);
+ break;
+
+ case Instruction::DIV_FLOAT:
+ case Instruction::DIV_FLOAT_2ADDR:
+ case Instruction::DIV_DOUBLE:
+ case Instruction::DIV_DOUBLE_2ADDR:
+ ConvertFPArithOp(cu, kOpDiv, rl_dest, rl_src[0], rl_src[1]);
+ break;
+
+ case Instruction::REM_FLOAT:
+ case Instruction::REM_FLOAT_2ADDR:
+ case Instruction::REM_DOUBLE:
+ case Instruction::REM_DOUBLE_2ADDR:
+ ConvertFPArithOp(cu, kOpRem, rl_dest, rl_src[0], rl_src[1]);
+ break;
+
+ case Instruction::INVOKE_STATIC:
+ ConvertInvoke(cu, bb, mir, kStatic, false /*range*/,
+ false /* NewFilledArray */);
+ break;
+ case Instruction::INVOKE_STATIC_RANGE:
+ ConvertInvoke(cu, bb, mir, kStatic, true /*range*/,
+ false /* NewFilledArray */);
+ break;
+
+ case Instruction::INVOKE_DIRECT:
+ ConvertInvoke(cu, bb, mir, kDirect, false /*range*/,
+ false /* NewFilledArray */);
+ break;
+ case Instruction::INVOKE_DIRECT_RANGE:
+ ConvertInvoke(cu, bb, mir, kDirect, true /*range*/,
+ false /* NewFilledArray */);
+ break;
+
+ case Instruction::INVOKE_VIRTUAL:
+ ConvertInvoke(cu, bb, mir, kVirtual, false /*range*/,
+ false /* NewFilledArray */);
+ break;
+ case Instruction::INVOKE_VIRTUAL_RANGE:
+ ConvertInvoke(cu, bb, mir, kVirtual, true /*range*/,
+ false /* NewFilledArray */);
+ break;
+
+ case Instruction::INVOKE_SUPER:
+ ConvertInvoke(cu, bb, mir, kSuper, false /*range*/,
+ false /* NewFilledArray */);
+ break;
+ case Instruction::INVOKE_SUPER_RANGE:
+ ConvertInvoke(cu, bb, mir, kSuper, true /*range*/,
+ false /* NewFilledArray */);
+ break;
+
+ case Instruction::INVOKE_INTERFACE:
+ ConvertInvoke(cu, bb, mir, kInterface, false /*range*/,
+ false /* NewFilledArray */);
+ break;
+ case Instruction::INVOKE_INTERFACE_RANGE:
+ ConvertInvoke(cu, bb, mir, kInterface, true /*range*/,
+ false /* NewFilledArray */);
+ break;
+ case Instruction::FILLED_NEW_ARRAY:
+ ConvertInvoke(cu, bb, mir, kInterface, false /*range*/,
+ true /* NewFilledArray */);
+ break;
+ case Instruction::FILLED_NEW_ARRAY_RANGE:
+ ConvertInvoke(cu, bb, mir, kInterface, true /*range*/,
+ true /* NewFilledArray */);
+ break;
+
+ case Instruction::CONST_STRING:
+ case Instruction::CONST_STRING_JUMBO:
+ ConvertConstObject(cu, vB, greenland::IntrinsicHelper::ConstString,
+ rl_dest);
+ break;
+
+ case Instruction::CONST_CLASS:
+ ConvertConstObject(cu, vB, greenland::IntrinsicHelper::ConstClass,
+ rl_dest);
+ break;
+
+ case Instruction::CHECK_CAST:
+ ConvertCheckCast(cu, vB, rl_src[0]);
+ break;
+
+ case Instruction::NEW_INSTANCE:
+ ConvertNewInstance(cu, vB, rl_dest);
+ break;
+
+ case Instruction::MOVE_EXCEPTION:
+ ConvertMoveException(cu, rl_dest);
+ break;
+
+ case Instruction::THROW:
+ ConvertThrow(cu, rl_src[0]);
+ /*
+ * If this throw is standalone, terminate.
+ * If it might rethrow, force termination
+ * of the following block.
+ */
+ if (bb->fall_through == NULL) {
+ cu->irb->CreateUnreachable();
+ } else {
+ bb->fall_through->fall_through = NULL;
+ bb->fall_through->taken = NULL;
+ }
+ break;
+
+ case Instruction::MOVE_RESULT_WIDE:
+ case Instruction::MOVE_RESULT:
+ case Instruction::MOVE_RESULT_OBJECT:
+ /*
+ * All move_results should have been folded into the preceeding invoke.
+ */
+ LOG(FATAL) << "Unexpected move_result";
+ break;
+
+ case Instruction::MONITOR_ENTER:
+ ConvertMonitorEnterExit(cu, opt_flags,
+ greenland::IntrinsicHelper::MonitorEnter,
+ rl_src[0]);
+ break;
+
+ case Instruction::MONITOR_EXIT:
+ ConvertMonitorEnterExit(cu, opt_flags,
+ greenland::IntrinsicHelper::MonitorExit,
+ rl_src[0]);
+ break;
+
+ case Instruction::ARRAY_LENGTH:
+ ConvertArrayLength(cu, opt_flags, rl_dest, rl_src[0]);
+ break;
+
+ case Instruction::NEW_ARRAY:
+ ConvertNewArray(cu, vC, rl_dest, rl_src[0]);
+ break;
+
+ case Instruction::INSTANCE_OF:
+ ConvertInstanceOf(cu, vC, rl_dest, rl_src[0]);
+ break;
+
+ case Instruction::AGET:
+ if (rl_dest.fp) {
+ ConvertAget(cu, opt_flags,
+ greenland::IntrinsicHelper::HLArrayGetFloat,
+ rl_dest, rl_src[0], rl_src[1]);
+ } else {
+ ConvertAget(cu, opt_flags, greenland::IntrinsicHelper::HLArrayGet,
+ rl_dest, rl_src[0], rl_src[1]);
+ }
+ break;
+ case Instruction::AGET_OBJECT:
+ ConvertAget(cu, opt_flags, greenland::IntrinsicHelper::HLArrayGetObject,
+ rl_dest, rl_src[0], rl_src[1]);
+ break;
+ case Instruction::AGET_BOOLEAN:
+ ConvertAget(cu, opt_flags,
+ greenland::IntrinsicHelper::HLArrayGetBoolean,
+ rl_dest, rl_src[0], rl_src[1]);
+ break;
+ case Instruction::AGET_BYTE:
+ ConvertAget(cu, opt_flags, greenland::IntrinsicHelper::HLArrayGetByte,
+ rl_dest, rl_src[0], rl_src[1]);
+ break;
+ case Instruction::AGET_CHAR:
+ ConvertAget(cu, opt_flags, greenland::IntrinsicHelper::HLArrayGetChar,
+ rl_dest, rl_src[0], rl_src[1]);
+ break;
+ case Instruction::AGET_SHORT:
+ ConvertAget(cu, opt_flags, greenland::IntrinsicHelper::HLArrayGetShort,
+ rl_dest, rl_src[0], rl_src[1]);
+ break;
+ case Instruction::AGET_WIDE:
+ if (rl_dest.fp) {
+ ConvertAget(cu, opt_flags,
+ greenland::IntrinsicHelper::HLArrayGetDouble,
+ rl_dest, rl_src[0], rl_src[1]);
+ } else {
+ ConvertAget(cu, opt_flags, greenland::IntrinsicHelper::HLArrayGetWide,
+ rl_dest, rl_src[0], rl_src[1]);
+ }
+ break;
+
+ case Instruction::APUT:
+ if (rl_src[0].fp) {
+ ConvertAput(cu, opt_flags,
+ greenland::IntrinsicHelper::HLArrayPutFloat,
+ rl_src[0], rl_src[1], rl_src[2]);
+ } else {
+ ConvertAput(cu, opt_flags, greenland::IntrinsicHelper::HLArrayPut,
+ rl_src[0], rl_src[1], rl_src[2]);
+ }
+ break;
+ case Instruction::APUT_OBJECT:
+ ConvertAput(cu, opt_flags, greenland::IntrinsicHelper::HLArrayPutObject,
+ rl_src[0], rl_src[1], rl_src[2]);
+ break;
+ case Instruction::APUT_BOOLEAN:
+ ConvertAput(cu, opt_flags,
+ greenland::IntrinsicHelper::HLArrayPutBoolean,
+ rl_src[0], rl_src[1], rl_src[2]);
+ break;
+ case Instruction::APUT_BYTE:
+ ConvertAput(cu, opt_flags, greenland::IntrinsicHelper::HLArrayPutByte,
+ rl_src[0], rl_src[1], rl_src[2]);
+ break;
+ case Instruction::APUT_CHAR:
+ ConvertAput(cu, opt_flags, greenland::IntrinsicHelper::HLArrayPutChar,
+ rl_src[0], rl_src[1], rl_src[2]);
+ break;
+ case Instruction::APUT_SHORT:
+ ConvertAput(cu, opt_flags, greenland::IntrinsicHelper::HLArrayPutShort,
+ rl_src[0], rl_src[1], rl_src[2]);
+ break;
+ case Instruction::APUT_WIDE:
+ if (rl_src[0].fp) {
+ ConvertAput(cu, opt_flags,
+ greenland::IntrinsicHelper::HLArrayPutDouble,
+ rl_src[0], rl_src[1], rl_src[2]);
+ } else {
+ ConvertAput(cu, opt_flags, greenland::IntrinsicHelper::HLArrayPutWide,
+ rl_src[0], rl_src[1], rl_src[2]);
+ }
+ break;
+
+ case Instruction::IGET:
+ if (rl_dest.fp) {
+ ConvertIget(cu, opt_flags, greenland::IntrinsicHelper::HLIGetFloat,
+ rl_dest, rl_src[0], vC);
+ } else {
+ ConvertIget(cu, opt_flags, greenland::IntrinsicHelper::HLIGet,
+ rl_dest, rl_src[0], vC);
+ }
+ break;
+ case Instruction::IGET_OBJECT:
+ ConvertIget(cu, opt_flags, greenland::IntrinsicHelper::HLIGetObject,
+ rl_dest, rl_src[0], vC);
+ break;
+ case Instruction::IGET_BOOLEAN:
+ ConvertIget(cu, opt_flags, greenland::IntrinsicHelper::HLIGetBoolean,
+ rl_dest, rl_src[0], vC);
+ break;
+ case Instruction::IGET_BYTE:
+ ConvertIget(cu, opt_flags, greenland::IntrinsicHelper::HLIGetByte,
+ rl_dest, rl_src[0], vC);
+ break;
+ case Instruction::IGET_CHAR:
+ ConvertIget(cu, opt_flags, greenland::IntrinsicHelper::HLIGetChar,
+ rl_dest, rl_src[0], vC);
+ break;
+ case Instruction::IGET_SHORT:
+ ConvertIget(cu, opt_flags, greenland::IntrinsicHelper::HLIGetShort,
+ rl_dest, rl_src[0], vC);
+ break;
+ case Instruction::IGET_WIDE:
+ if (rl_dest.fp) {
+ ConvertIget(cu, opt_flags, greenland::IntrinsicHelper::HLIGetDouble,
+ rl_dest, rl_src[0], vC);
+ } else {
+ ConvertIget(cu, opt_flags, greenland::IntrinsicHelper::HLIGetWide,
+ rl_dest, rl_src[0], vC);
+ }
+ break;
+ case Instruction::IPUT:
+ if (rl_src[0].fp) {
+ ConvertIput(cu, opt_flags, greenland::IntrinsicHelper::HLIPutFloat,
+ rl_src[0], rl_src[1], vC);
+ } else {
+ ConvertIput(cu, opt_flags, greenland::IntrinsicHelper::HLIPut,
+ rl_src[0], rl_src[1], vC);
+ }
+ break;
+ case Instruction::IPUT_OBJECT:
+ ConvertIput(cu, opt_flags, greenland::IntrinsicHelper::HLIPutObject,
+ rl_src[0], rl_src[1], vC);
+ break;
+ case Instruction::IPUT_BOOLEAN:
+ ConvertIput(cu, opt_flags, greenland::IntrinsicHelper::HLIPutBoolean,
+ rl_src[0], rl_src[1], vC);
+ break;
+ case Instruction::IPUT_BYTE:
+ ConvertIput(cu, opt_flags, greenland::IntrinsicHelper::HLIPutByte,
+ rl_src[0], rl_src[1], vC);
+ break;
+ case Instruction::IPUT_CHAR:
+ ConvertIput(cu, opt_flags, greenland::IntrinsicHelper::HLIPutChar,
+ rl_src[0], rl_src[1], vC);
+ break;
+ case Instruction::IPUT_SHORT:
+ ConvertIput(cu, opt_flags, greenland::IntrinsicHelper::HLIPutShort,
+ rl_src[0], rl_src[1], vC);
+ break;
+ case Instruction::IPUT_WIDE:
+ if (rl_src[0].fp) {
+ ConvertIput(cu, opt_flags, greenland::IntrinsicHelper::HLIPutDouble,
+ rl_src[0], rl_src[1], vC);
+ } else {
+ ConvertIput(cu, opt_flags, greenland::IntrinsicHelper::HLIPutWide,
+ rl_src[0], rl_src[1], vC);
+ }
+ break;
+
+ case Instruction::FILL_ARRAY_DATA:
+ ConvertFillArrayData(cu, vB, rl_src[0]);
+ break;
+
+ case Instruction::LONG_TO_INT:
+ ConvertLongToInt(cu, rl_dest, rl_src[0]);
+ break;
+
+ case Instruction::INT_TO_LONG:
+ ConvertIntToLong(cu, rl_dest, rl_src[0]);
+ break;
+
+ case Instruction::INT_TO_CHAR:
+ ConvertIntNarrowing(cu, rl_dest, rl_src[0],
+ greenland::IntrinsicHelper::IntToChar);
+ break;
+ case Instruction::INT_TO_BYTE:
+ ConvertIntNarrowing(cu, rl_dest, rl_src[0],
+ greenland::IntrinsicHelper::IntToByte);
+ break;
+ case Instruction::INT_TO_SHORT:
+ ConvertIntNarrowing(cu, rl_dest, rl_src[0],
+ greenland::IntrinsicHelper::IntToShort);
+ break;
+
+ case Instruction::INT_TO_FLOAT:
+ case Instruction::LONG_TO_FLOAT:
+ ConvertIntToFP(cu, cu->irb->getFloatTy(), rl_dest, rl_src[0]);
+ break;
+
+ case Instruction::INT_TO_DOUBLE:
+ case Instruction::LONG_TO_DOUBLE:
+ ConvertIntToFP(cu, cu->irb->getDoubleTy(), rl_dest, rl_src[0]);
+ break;
+
+ case Instruction::FLOAT_TO_DOUBLE:
+ ConvertFloatToDouble(cu, rl_dest, rl_src[0]);
+ break;
+
+ case Instruction::DOUBLE_TO_FLOAT:
+ ConvertDoubleToFloat(cu, rl_dest, rl_src[0]);
+ break;
+
+ case Instruction::NEG_LONG:
+ case Instruction::NEG_INT:
+ ConvertNeg(cu, rl_dest, rl_src[0]);
+ break;
+
+ case Instruction::NEG_FLOAT:
+ case Instruction::NEG_DOUBLE:
+ ConvertNegFP(cu, rl_dest, rl_src[0]);
+ break;
+
+ case Instruction::NOT_LONG:
+ case Instruction::NOT_INT:
+ ConvertNot(cu, rl_dest, rl_src[0]);
+ break;
+
+ case Instruction::FLOAT_TO_INT:
+ ConvertFPToInt(cu, greenland::IntrinsicHelper::F2I, rl_dest, rl_src[0]);
+ break;
+
+ case Instruction::DOUBLE_TO_INT:
+ ConvertFPToInt(cu, greenland::IntrinsicHelper::D2I, rl_dest, rl_src[0]);
+ break;
+
+ case Instruction::FLOAT_TO_LONG:
+ ConvertFPToInt(cu, greenland::IntrinsicHelper::F2L, rl_dest, rl_src[0]);
+ break;
+
+ case Instruction::DOUBLE_TO_LONG:
+ ConvertFPToInt(cu, greenland::IntrinsicHelper::D2L, rl_dest, rl_src[0]);
+ break;
+
+ case Instruction::CMPL_FLOAT:
+ ConvertWideComparison(cu, greenland::IntrinsicHelper::CmplFloat,
+ rl_dest, rl_src[0], rl_src[1]);
+ break;
+ case Instruction::CMPG_FLOAT:
+ ConvertWideComparison(cu, greenland::IntrinsicHelper::CmpgFloat,
+ rl_dest, rl_src[0], rl_src[1]);
+ break;
+ case Instruction::CMPL_DOUBLE:
+ ConvertWideComparison(cu, greenland::IntrinsicHelper::CmplDouble,
+ rl_dest, rl_src[0], rl_src[1]);
+ break;
+ case Instruction::CMPG_DOUBLE:
+ ConvertWideComparison(cu, greenland::IntrinsicHelper::CmpgDouble,
+ rl_dest, rl_src[0], rl_src[1]);
+ break;
+ case Instruction::CMP_LONG:
+ ConvertWideComparison(cu, greenland::IntrinsicHelper::CmpLong,
+ rl_dest, rl_src[0], rl_src[1]);
+ break;
+
+ case Instruction::PACKED_SWITCH:
+ ConvertPackedSwitch(cu, bb, vB, rl_src[0]);
+ break;
+
+ case Instruction::SPARSE_SWITCH:
+ ConvertSparseSwitch(cu, bb, vB, rl_src[0]);
+ break;
+
+ default:
+ UNIMPLEMENTED(FATAL) << "Unsupported Dex opcode 0x" << std::hex << opcode;
+ res = true;
+ }
+ return res;
+}
+
+static void SetDexOffset(CompilationUnit* cu, int32_t offset)
+{
+ cu->current_dalvik_offset = offset;
+ llvm::SmallVector<llvm::Value*, 1> array_ref;
+ array_ref.push_back(cu->irb->getInt32(offset));
+ llvm::MDNode* node = llvm::MDNode::get(*cu->context, array_ref);
+ cu->irb->SetDexOffset(node);
+}
+
+// Attach method info as metadata to special intrinsic
+static void SetMethodInfo(CompilationUnit* cu)
+{
+ // We don't want dex offset on this
+ cu->irb->SetDexOffset(NULL);
+ greenland::IntrinsicHelper::IntrinsicId id;
+ id = greenland::IntrinsicHelper::MethodInfo;
+ llvm::Function* intr = cu->intrinsic_helper->GetIntrinsicFunction(id);
+ llvm::Instruction* inst = cu->irb->CreateCall(intr);
+ llvm::SmallVector<llvm::Value*, 2> reg_info;
+ reg_info.push_back(cu->irb->getInt32(cu->num_ins));
+ reg_info.push_back(cu->irb->getInt32(cu->num_regs));
+ reg_info.push_back(cu->irb->getInt32(cu->num_outs));
+ reg_info.push_back(cu->irb->getInt32(cu->num_compiler_temps));
+ reg_info.push_back(cu->irb->getInt32(cu->num_ssa_regs));
+ llvm::MDNode* reg_info_node = llvm::MDNode::get(*cu->context, reg_info);
+ inst->setMetadata("RegInfo", reg_info_node);
+ int promo_size = cu->num_dalvik_registers + cu->num_compiler_temps + 1;
+ llvm::SmallVector<llvm::Value*, 50> pmap;
+ for (int i = 0; i < promo_size; i++) {
+ PromotionMap* p = &cu->promotion_map[i];
+ int32_t map_data = ((p->first_in_pair & 0xff) << 24) |
+ ((p->FpReg & 0xff) << 16) |
+ ((p->core_reg & 0xff) << 8) |
+ ((p->fp_location & 0xf) << 4) |
+ (p->core_location & 0xf);
+ pmap.push_back(cu->irb->getInt32(map_data));
+ }
+ llvm::MDNode* map_node = llvm::MDNode::get(*cu->context, pmap);
+ inst->setMetadata("PromotionMap", map_node);
+ SetDexOffset(cu, cu->current_dalvik_offset);
+}
+
+static void HandlePhiNodes(CompilationUnit* cu, BasicBlock* bb, llvm::BasicBlock* llvm_bb)
+{
+ SetDexOffset(cu, bb->start_offset);
+ for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+ int opcode = mir->dalvikInsn.opcode;
+ if (opcode < kMirOpFirst) {
+ // Stop after first non-pseudo MIR op.
+ continue;
+ }
+ if (opcode != kMirOpPhi) {
+ // Skip other mir Pseudos.
+ continue;
+ }
+ RegLocation rl_dest = cu->reg_location[mir->ssa_rep->defs[0]];
+ /*
+ * The Art compiler's Phi nodes only handle 32-bit operands,
+ * representing wide values using a matched set of Phi nodes
+ * for the lower and upper halves. In the llvm world, we only
+ * want a single Phi for wides. Here we will simply discard
+ * the Phi node representing the high word.
+ */
+ if (rl_dest.high_word) {
+ continue; // No Phi node - handled via low word
+ }
+ int* incoming = reinterpret_cast<int*>(mir->dalvikInsn.vB);
+ llvm::Type* phi_type =
+ LlvmTypeFromLocRec(cu, rl_dest);
+ llvm::PHINode* phi = cu->irb->CreatePHI(phi_type, mir->ssa_rep->num_uses);
+ for (int i = 0; i < mir->ssa_rep->num_uses; i++) {
+ RegLocation loc;
+ // Don't check width here.
+ loc = GetRawSrc(cu, mir, i);
+ DCHECK_EQ(rl_dest.wide, loc.wide);
+ DCHECK_EQ(rl_dest.wide & rl_dest.high_word, loc.wide & loc.high_word);
+ DCHECK_EQ(rl_dest.fp, loc.fp);
+ DCHECK_EQ(rl_dest.core, loc.core);
+ DCHECK_EQ(rl_dest.ref, loc.ref);
+ SafeMap<unsigned int, unsigned int>::iterator it;
+ it = cu->block_id_map.find(incoming[i]);
+ DCHECK(it != cu->block_id_map.end());
+ DCHECK(GetLLVMValue(cu, loc.orig_sreg) != NULL);
+ DCHECK(GetLLVMBlock(cu, it->second) != NULL);
+ phi->addIncoming(GetLLVMValue(cu, loc.orig_sreg),
+ GetLLVMBlock(cu, it->second));
+ }
+ DefineValueOnly(cu, phi, rl_dest.orig_sreg);
+ }
+}
+
+/* Extended MIR instructions like PHI */
+static void ConvertExtendedMIR(CompilationUnit* cu, BasicBlock* bb, MIR* mir,
+ llvm::BasicBlock* llvm_bb)
+{
+
+ switch (static_cast<ExtendedMIROpcode>(mir->dalvikInsn.opcode)) {
+ case kMirOpPhi: {
+ // The llvm Phi node already emitted - just DefineValue() here.
+ RegLocation rl_dest = cu->reg_location[mir->ssa_rep->defs[0]];
+ if (!rl_dest.high_word) {
+ // Only consider low word of pairs.
+ DCHECK(GetLLVMValue(cu, rl_dest.orig_sreg) != NULL);
+ llvm::Value* phi = GetLLVMValue(cu, rl_dest.orig_sreg);
+ if (1) SetVregOnValue(cu, phi, rl_dest.orig_sreg);
+ }
+ break;
+ }
+ case kMirOpCopy: {
+ UNIMPLEMENTED(WARNING) << "unimp kMirOpPhi";
+ break;
+ }
+ case kMirOpNop:
+ if ((mir == bb->last_mir_insn) && (bb->taken == NULL) &&
+ (bb->fall_through == NULL)) {
+ cu->irb->CreateUnreachable();
+ }
+ break;
+
+ // TODO: need GBC intrinsic to take advantage of fused operations
+ case kMirOpFusedCmplFloat:
+ UNIMPLEMENTED(FATAL) << "kMirOpFusedCmpFloat unsupported";
+ break;
+ case kMirOpFusedCmpgFloat:
+ UNIMPLEMENTED(FATAL) << "kMirOpFusedCmgFloat unsupported";
+ break;
+ case kMirOpFusedCmplDouble:
+ UNIMPLEMENTED(FATAL) << "kMirOpFusedCmplDouble unsupported";
+ break;
+ case kMirOpFusedCmpgDouble:
+ UNIMPLEMENTED(FATAL) << "kMirOpFusedCmpgDouble unsupported";
+ break;
+ case kMirOpFusedCmpLong:
+ UNIMPLEMENTED(FATAL) << "kMirOpLongCmpBranch unsupported";
+ break;
+ default:
+ break;
+ }
+}
+
+/* Handle the content in each basic block */
+static bool BlockBitcodeConversion(CompilationUnit* cu, BasicBlock* bb)
+{
+ if (bb->block_type == kDead) return false;
+ llvm::BasicBlock* llvm_bb = GetLLVMBlock(cu, bb->id);
+ if (llvm_bb == NULL) {
+ CHECK(bb->block_type == kExitBlock);
+ } else {
+ cu->irb->SetInsertPoint(llvm_bb);
+ SetDexOffset(cu, bb->start_offset);
+ }
+
+ if (cu->verbose) {
+ LOG(INFO) << "................................";
+ LOG(INFO) << "Block id " << bb->id;
+ if (llvm_bb != NULL) {
+ LOG(INFO) << "label " << llvm_bb->getName().str().c_str();
+ } else {
+ LOG(INFO) << "llvm_bb is NULL";
+ }
+ }
+
+ if (bb->block_type == kEntryBlock) {
+ SetMethodInfo(cu);
+
+ { // Allocate shadowframe.
+ greenland::IntrinsicHelper::IntrinsicId id =
+ greenland::IntrinsicHelper::AllocaShadowFrame;
+ llvm::Function* func = cu->intrinsic_helper->GetIntrinsicFunction(id);
+ llvm::Value* entries = cu->irb->getInt32(cu->num_dalvik_registers);
+ cu->irb->CreateCall(func, entries);
+ }
+
+ { // Store arguments to vregs.
+ uint16_t arg_reg = cu->num_regs;
+
+ llvm::Function::arg_iterator arg_iter(cu->func->arg_begin());
+ llvm::Function::arg_iterator arg_end(cu->func->arg_end());
+
+ const char* shorty = cu->shorty;
+ uint32_t shorty_size = strlen(shorty);
+ CHECK_GE(shorty_size, 1u);
+
+ ++arg_iter; // skip method object
+
+ if ((cu->access_flags & kAccStatic) == 0) {
+ SetVregOnValue(cu, arg_iter, arg_reg);
+ ++arg_iter;
+ ++arg_reg;
+ }
+
+ for (uint32_t i = 1; i < shorty_size; ++i, ++arg_iter) {
+ SetVregOnValue(cu, arg_iter, arg_reg);
+
+ ++arg_reg;
+ if (shorty[i] == 'J' || shorty[i] == 'D') {
+ // Wide types, such as long and double, are using a pair of registers
+ // to store the value, so we have to increase arg_reg again.
+ ++arg_reg;
+ }
+ }
+ }
+ } else if (bb->block_type == kExitBlock) {
+ /*
+ * Because of the differences between how MIR/LIR and llvm handle exit
+ * blocks, we won't explicitly covert them. On the llvm-to-lir
+ * path, it will need to be regenereated.
+ */
+ return false;
+ } else if (bb->block_type == kExceptionHandling) {
+ /*
+ * Because we're deferring null checking, delete the associated empty
+ * exception block.
+ */
+ llvm_bb->eraseFromParent();
+ return false;
+ }
+
+ HandlePhiNodes(cu, bb, llvm_bb);
+
+ for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+
+ SetDexOffset(cu, mir->offset);
+
+ int opcode = mir->dalvikInsn.opcode;
+ Instruction::Format dalvik_format =
+ Instruction::FormatOf(mir->dalvikInsn.opcode);
+
+ if (opcode == kMirOpCheck) {
+ // Combine check and work halves of throwing instruction.
+ MIR* work_half = mir->meta.throw_insn;
+ mir->dalvikInsn.opcode = work_half->dalvikInsn.opcode;
+ opcode = mir->dalvikInsn.opcode;
+ SSARepresentation* ssa_rep = work_half->ssa_rep;
+ work_half->ssa_rep = mir->ssa_rep;
+ mir->ssa_rep = ssa_rep;
+ work_half->meta.original_opcode = work_half->dalvikInsn.opcode;
+ work_half->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
+ if (bb->successor_block_list.block_list_type == kCatch) {
+ llvm::Function* intr = cu->intrinsic_helper->GetIntrinsicFunction(
+ greenland::IntrinsicHelper::CatchTargets);
+ llvm::Value* switch_key =
+ cu->irb->CreateCall(intr, cu->irb->getInt32(mir->offset));
+ GrowableListIterator iter;
+ GrowableListIteratorInit(&bb->successor_block_list.blocks, &iter);
+ // New basic block to use for work half
+ llvm::BasicBlock* work_bb =
+ llvm::BasicBlock::Create(*cu->context, "", cu->func);
+ llvm::SwitchInst* sw =
+ cu->irb->CreateSwitch(switch_key, work_bb,
+ bb->successor_block_list.blocks.num_used);
+ while (true) {
+ SuccessorBlockInfo *successor_block_info =
+ reinterpret_cast<SuccessorBlockInfo*>(GrowableListIteratorNext(&iter));
+ if (successor_block_info == NULL) break;
+ llvm::BasicBlock *target =
+ GetLLVMBlock(cu, successor_block_info->block->id);
+ int type_index = successor_block_info->key;
+ sw->addCase(cu->irb->getInt32(type_index), target);
+ }
+ llvm_bb = work_bb;
+ cu->irb->SetInsertPoint(llvm_bb);
+ }
+ }
+
+ if (opcode >= kMirOpFirst) {
+ ConvertExtendedMIR(cu, bb, mir, llvm_bb);
+ continue;
+ }
+
+ bool not_handled = ConvertMIRNode(cu, mir, bb, llvm_bb,
+ NULL /* label_list */);
+ if (not_handled) {
+ Instruction::Code dalvik_opcode = static_cast<Instruction::Code>(opcode);
+ LOG(WARNING) << StringPrintf("%#06x: Op %#x (%s) / Fmt %d not handled",
+ mir->offset, opcode,
+ Instruction::Name(dalvik_opcode),
+ dalvik_format);
+ }
+ }
+
+ if (bb->block_type == kEntryBlock) {
+ cu->entryTarget_bb = GetLLVMBlock(cu, bb->fall_through->id);
+ } else if ((bb->fall_through != NULL) && !bb->terminated_by_return) {
+ cu->irb->CreateBr(GetLLVMBlock(cu, bb->fall_through->id));
+ }
+
+ return false;
+}
+
+char RemapShorty(char shorty_type) {
+ /*
+ * TODO: might want to revisit this. Dalvik registers are 32-bits wide,
+ * and longs/doubles are represented as a pair of registers. When sub-word
+ * arguments (and method results) are passed, they are extended to Dalvik
+ * virtual register containers. Because llvm is picky about type consistency,
+ * we must either cast the "real" type to 32-bit container multiple Dalvik
+ * register types, or always use the expanded values.
+ * Here, we're doing the latter. We map the shorty signature to container
+ * types (which is valid so long as we always do a real expansion of passed
+ * arguments and field loads).
+ */
+ switch(shorty_type) {
+ case 'Z' : shorty_type = 'I'; break;
+ case 'B' : shorty_type = 'I'; break;
+ case 'S' : shorty_type = 'I'; break;
+ case 'C' : shorty_type = 'I'; break;
+ default: break;
+ }
+ return shorty_type;
+}
+
+static llvm::FunctionType* GetFunctionType(CompilationUnit* cu) {
+
+ // Get return type
+ llvm::Type* ret_type = cu->irb->GetJType(RemapShorty(cu->shorty[0]),
+ greenland::kAccurate);
+
+ // Get argument type
+ std::vector<llvm::Type*> args_type;
+
+ // method object
+ args_type.push_back(cu->irb->GetJMethodTy());
+
+ // Do we have a "this"?
+ if ((cu->access_flags & kAccStatic) == 0) {
+ args_type.push_back(cu->irb->GetJObjectTy());
+ }
+
+ for (uint32_t i = 1; i < strlen(cu->shorty); ++i) {
+ args_type.push_back(cu->irb->GetJType(RemapShorty(cu->shorty[i]),
+ greenland::kAccurate));
+ }
+
+ return llvm::FunctionType::get(ret_type, args_type, false);
+}
+
+static bool CreateFunction(CompilationUnit* cu) {
+ std::string func_name(PrettyMethod(cu->method_idx, *cu->dex_file,
+ /* with_signature */ false));
+ llvm::FunctionType* func_type = GetFunctionType(cu);
+
+ if (func_type == NULL) {
+ return false;
+ }
+
+ cu->func = llvm::Function::Create(func_type,
+ llvm::Function::ExternalLinkage,
+ func_name, cu->module);
+
+ llvm::Function::arg_iterator arg_iter(cu->func->arg_begin());
+ llvm::Function::arg_iterator arg_end(cu->func->arg_end());
+
+ arg_iter->setName("method");
+ ++arg_iter;
+
+ int start_sreg = cu->num_regs;
+
+ for (unsigned i = 0; arg_iter != arg_end; ++i, ++arg_iter) {
+ arg_iter->setName(StringPrintf("v%i_0", start_sreg));
+ start_sreg += cu->reg_location[start_sreg].wide ? 2 : 1;
+ }
+
+ return true;
+}
+
+static bool CreateLLVMBasicBlock(CompilationUnit* cu, BasicBlock* bb)
+{
+ // Skip the exit block
+ if ((bb->block_type == kDead) ||(bb->block_type == kExitBlock)) {
+ cu->id_to_block_map.Put(bb->id, NULL);
+ } else {
+ int offset = bb->start_offset;
+ bool entry_block = (bb->block_type == kEntryBlock);
+ llvm::BasicBlock* llvm_bb =
+ llvm::BasicBlock::Create(*cu->context, entry_block ? "entry" :
+ StringPrintf(kLabelFormat, bb->catch_entry ? kCatchBlock :
+ kNormalBlock, offset, bb->id), cu->func);
+ if (entry_block) {
+ cu->entry_bb = llvm_bb;
+ cu->placeholder_bb =
+ llvm::BasicBlock::Create(*cu->context, "placeholder",
+ cu->func);
+ }
+ cu->id_to_block_map.Put(bb->id, llvm_bb);
+ }
+ return false;
+}
+
+
+/*
+ * Convert MIR to LLVM_IR
+ * o For each ssa name, create LLVM named value. Type these
+ * appropriately, and ignore high half of wide and double operands.
+ * o For each MIR basic block, create an LLVM basic block.
+ * o Iterate through the MIR a basic block at a time, setting arguments
+ * to recovered ssa name.
+ */
+void MethodMIR2Bitcode(CompilationUnit* cu)
+{
+ InitIR(cu);
+ CompilerInitGrowableList(cu, &cu->llvm_values, cu->num_ssa_regs);
+
+ // Create the function
+ CreateFunction(cu);
+
+ // Create an LLVM basic block for each MIR block in dfs preorder
+ DataFlowAnalysisDispatcher(cu, CreateLLVMBasicBlock,
+ kPreOrderDFSTraversal, false /* is_iterative */);
+ /*
+ * Create an llvm named value for each MIR SSA name. Note: we'll use
+ * placeholders for all non-argument values (because we haven't seen
+ * the definition yet).
+ */
+ cu->irb->SetInsertPoint(cu->placeholder_bb);
+ llvm::Function::arg_iterator arg_iter(cu->func->arg_begin());
+ arg_iter++; /* Skip path method */
+ for (int i = 0; i < cu->num_ssa_regs; i++) {
+ llvm::Value* val;
+ RegLocation rl_temp = cu->reg_location[i];
+ if ((SRegToVReg(cu, i) < 0) || rl_temp.high_word) {
+ InsertGrowableList(cu, &cu->llvm_values, 0);
+ } else if ((i < cu->num_regs) ||
+ (i >= (cu->num_regs + cu->num_ins))) {
+ llvm::Constant* imm_value = cu->reg_location[i].wide ?
+ cu->irb->GetJLong(0) : cu->irb->GetJInt(0);
+ val = EmitConst(cu, imm_value, cu->reg_location[i]);
+ val->setName(LlvmSSAName(cu, i));
+ InsertGrowableList(cu, &cu->llvm_values, reinterpret_cast<uintptr_t>(val));
+ } else {
+ // Recover previously-created argument values
+ llvm::Value* arg_val = arg_iter++;
+ InsertGrowableList(cu, &cu->llvm_values, reinterpret_cast<uintptr_t>(arg_val));
+ }
+ }
+
+ DataFlowAnalysisDispatcher(cu, BlockBitcodeConversion,
+ kPreOrderDFSTraversal, false /* Iterative */);
+
+ /*
+ * In a few rare cases of verification failure, the verifier will
+ * replace one or more Dalvik opcodes with the special
+ * throw-verification-failure opcode. This can leave the SSA graph
+ * in an invalid state, as definitions may be lost, while uses retained.
+ * To work around this problem, we insert placeholder definitions for
+ * all Dalvik SSA regs in the "placeholder" block. Here, after
+ * bitcode conversion is complete, we examine those placeholder definitions
+ * and delete any with no references (which normally is all of them).
+ *
+ * If any definitions remain, we link the placeholder block into the
+ * CFG. Otherwise, it is deleted.
+ */
+ for (llvm::BasicBlock::iterator it = cu->placeholder_bb->begin(),
+ it_end = cu->placeholder_bb->end(); it != it_end;) {
+ llvm::Instruction* inst = llvm::dyn_cast<llvm::Instruction>(it++);
+ DCHECK(inst != NULL);
+ llvm::Value* val = llvm::dyn_cast<llvm::Value>(inst);
+ DCHECK(val != NULL);
+ if (val->getNumUses() == 0) {
+ inst->eraseFromParent();
+ }
+ }
+ SetDexOffset(cu, 0);
+ if (cu->placeholder_bb->empty()) {
+ cu->placeholder_bb->eraseFromParent();
+ } else {
+ cu->irb->SetInsertPoint(cu->placeholder_bb);
+ cu->irb->CreateBr(cu->entryTarget_bb);
+ cu->entryTarget_bb = cu->placeholder_bb;
+ }
+ cu->irb->SetInsertPoint(cu->entry_bb);
+ cu->irb->CreateBr(cu->entryTarget_bb);
+
+ if (cu->enable_debug & (1 << kDebugVerifyBitcode)) {
+ if (llvm::verifyFunction(*cu->func, llvm::PrintMessageAction)) {
+ LOG(INFO) << "Bitcode verification FAILED for "
+ << PrettyMethod(cu->method_idx, *cu->dex_file)
+ << " of size " << cu->insns_size;
+ cu->enable_debug |= (1 << kDebugDumpBitcodeFile);
+ }
+ }
+
+ if (cu->enable_debug & (1 << kDebugDumpBitcodeFile)) {
+ // Write bitcode to file
+ std::string errmsg;
+ std::string fname(PrettyMethod(cu->method_idx, *cu->dex_file));
+ ReplaceSpecialChars(fname);
+ // TODO: make configurable change naming mechanism to avoid fname length issues.
+ fname = StringPrintf("/sdcard/Bitcode/%s.bc", fname.c_str());
+
+ if (fname.size() > 240) {
+ LOG(INFO) << "Warning: bitcode filename too long. Truncated.";
+ fname.resize(240);
+ }
+
+ llvm::OwningPtr<llvm::tool_output_file> out_file(
+ new llvm::tool_output_file(fname.c_str(), errmsg,
+ llvm::raw_fd_ostream::F_Binary));
+
+ if (!errmsg.empty()) {
+ LOG(ERROR) << "Failed to create bitcode output file: " << errmsg;
+ }
+
+ llvm::WriteBitcodeToFile(cu->module, out_file->os());
+ out_file->keep();
+ }
+}
+
+static RegLocation GetLoc(CompilationUnit* cu, llvm::Value* val) {
+ RegLocation res;
+ DCHECK(val != NULL);
+ SafeMap<llvm::Value*, RegLocation>::iterator it = cu->loc_map.find(val);
+ if (it == cu->loc_map.end()) {
+ std::string val_name = val->getName().str();
+ if (val_name.empty()) {
+ // FIXME: need to be more robust, handle FP and be in a position to
+ // manage unnamed temps whose lifetimes span basic block boundaries
+ UNIMPLEMENTED(WARNING) << "Need to handle unnamed llvm temps";
+ memset(&res, 0, sizeof(res));
+ res.location = kLocPhysReg;
+ res.low_reg = AllocTemp(cu);
+ res.home = true;
+ res.s_reg_low = INVALID_SREG;
+ res.orig_sreg = INVALID_SREG;
+ llvm::Type* ty = val->getType();
+ res.wide = ((ty == cu->irb->getInt64Ty()) ||
+ (ty == cu->irb->getDoubleTy()));
+ if (res.wide) {
+ res.high_reg = AllocTemp(cu);
+ }
+ cu->loc_map.Put(val, res);
+ } else {
+ DCHECK_EQ(val_name[0], 'v');
+ int base_sreg = INVALID_SREG;
+ sscanf(val_name.c_str(), "v%d_", &base_sreg);
+ res = cu->reg_location[base_sreg];
+ cu->loc_map.Put(val, res);
+ }
+ } else {
+ res = it->second;
+ }
+ return res;
+}
+
+static Instruction::Code GetDalvikOpcode(OpKind op, bool is_const, bool is_wide)
+{
+ Instruction::Code res = Instruction::NOP;
+ if (is_wide) {
+ switch(op) {
+ case kOpAdd: res = Instruction::ADD_LONG; break;
+ case kOpSub: res = Instruction::SUB_LONG; break;
+ case kOpMul: res = Instruction::MUL_LONG; break;
+ case kOpDiv: res = Instruction::DIV_LONG; break;
+ case kOpRem: res = Instruction::REM_LONG; break;
+ case kOpAnd: res = Instruction::AND_LONG; break;
+ case kOpOr: res = Instruction::OR_LONG; break;
+ case kOpXor: res = Instruction::XOR_LONG; break;
+ case kOpLsl: res = Instruction::SHL_LONG; break;
+ case kOpLsr: res = Instruction::USHR_LONG; break;
+ case kOpAsr: res = Instruction::SHR_LONG; break;
+ default: LOG(FATAL) << "Unexpected OpKind " << op;
+ }
+ } else if (is_const){
+ switch(op) {
+ case kOpAdd: res = Instruction::ADD_INT_LIT16; break;
+ case kOpSub: res = Instruction::RSUB_INT_LIT8; break;
+ case kOpMul: res = Instruction::MUL_INT_LIT16; break;
+ case kOpDiv: res = Instruction::DIV_INT_LIT16; break;
+ case kOpRem: res = Instruction::REM_INT_LIT16; break;
+ case kOpAnd: res = Instruction::AND_INT_LIT16; break;
+ case kOpOr: res = Instruction::OR_INT_LIT16; break;
+ case kOpXor: res = Instruction::XOR_INT_LIT16; break;
+ case kOpLsl: res = Instruction::SHL_INT_LIT8; break;
+ case kOpLsr: res = Instruction::USHR_INT_LIT8; break;
+ case kOpAsr: res = Instruction::SHR_INT_LIT8; break;
+ default: LOG(FATAL) << "Unexpected OpKind " << op;
+ }
+ } else {
+ switch(op) {
+ case kOpAdd: res = Instruction::ADD_INT; break;
+ case kOpSub: res = Instruction::SUB_INT; break;
+ case kOpMul: res = Instruction::MUL_INT; break;
+ case kOpDiv: res = Instruction::DIV_INT; break;
+ case kOpRem: res = Instruction::REM_INT; break;
+ case kOpAnd: res = Instruction::AND_INT; break;
+ case kOpOr: res = Instruction::OR_INT; break;
+ case kOpXor: res = Instruction::XOR_INT; break;
+ case kOpLsl: res = Instruction::SHL_INT; break;
+ case kOpLsr: res = Instruction::USHR_INT; break;
+ case kOpAsr: res = Instruction::SHR_INT; break;
+ default: LOG(FATAL) << "Unexpected OpKind " << op;
+ }
+ }
+ return res;
+}
+
+static Instruction::Code GetDalvikFPOpcode(OpKind op, bool is_const, bool is_wide)
+{
+ Instruction::Code res = Instruction::NOP;
+ if (is_wide) {
+ switch(op) {
+ case kOpAdd: res = Instruction::ADD_DOUBLE; break;
+ case kOpSub: res = Instruction::SUB_DOUBLE; break;
+ case kOpMul: res = Instruction::MUL_DOUBLE; break;
+ case kOpDiv: res = Instruction::DIV_DOUBLE; break;
+ case kOpRem: res = Instruction::REM_DOUBLE; break;
+ default: LOG(FATAL) << "Unexpected OpKind " << op;
+ }
+ } else {
+ switch(op) {
+ case kOpAdd: res = Instruction::ADD_FLOAT; break;
+ case kOpSub: res = Instruction::SUB_FLOAT; break;
+ case kOpMul: res = Instruction::MUL_FLOAT; break;
+ case kOpDiv: res = Instruction::DIV_FLOAT; break;
+ case kOpRem: res = Instruction::REM_FLOAT; break;
+ default: LOG(FATAL) << "Unexpected OpKind " << op;
+ }
+ }
+ return res;
+}
+
+static void CvtBinFPOp(CompilationUnit* cu, OpKind op, llvm::Instruction* inst)
+{
+ Codegen* cg = cu->cg.get();
+ RegLocation rl_dest = GetLoc(cu, inst);
+ /*
+ * Normally, we won't ever generate an FP operation with an immediate
+ * operand (not supported in Dex instruction set). However, the IR builder
+ * may insert them - in particular for create_neg_fp. Recognize this case
+ * and deal with it.
+ */
+ llvm::ConstantFP* op1C = llvm::dyn_cast<llvm::ConstantFP>(inst->getOperand(0));
+ llvm::ConstantFP* op2C = llvm::dyn_cast<llvm::ConstantFP>(inst->getOperand(1));
+ DCHECK(op2C == NULL);
+ if ((op1C != NULL) && (op == kOpSub)) {
+ RegLocation rl_src = GetLoc(cu, inst->getOperand(1));
+ if (rl_dest.wide) {
+ cg->GenArithOpDouble(cu, Instruction::NEG_DOUBLE, rl_dest, rl_src, rl_src);
+ } else {
+ cg->GenArithOpFloat(cu, Instruction::NEG_FLOAT, rl_dest, rl_src, rl_src);
+ }
+ } else {
+ DCHECK(op1C == NULL);
+ RegLocation rl_src1 = GetLoc(cu, inst->getOperand(0));
+ RegLocation rl_src2 = GetLoc(cu, inst->getOperand(1));
+ Instruction::Code dalvik_op = GetDalvikFPOpcode(op, false, rl_dest.wide);
+ if (rl_dest.wide) {
+ cg->GenArithOpDouble(cu, dalvik_op, rl_dest, rl_src1, rl_src2);
+ } else {
+ cg->GenArithOpFloat(cu, dalvik_op, rl_dest, rl_src1, rl_src2);
+ }
+ }
+}
+
+static void CvtIntNarrowing(CompilationUnit* cu, llvm::Instruction* inst,
+ Instruction::Code opcode)
+{
+ Codegen* cg = cu->cg.get();
+ RegLocation rl_dest = GetLoc(cu, inst);
+ RegLocation rl_src = GetLoc(cu, inst->getOperand(0));
+ cg->GenIntNarrowing(cu, opcode, rl_dest, rl_src);
+}
+
+static void CvtIntToFP(CompilationUnit* cu, llvm::Instruction* inst)
+{
+ Codegen* cg = cu->cg.get();
+ RegLocation rl_dest = GetLoc(cu, inst);
+ RegLocation rl_src = GetLoc(cu, inst->getOperand(0));
+ Instruction::Code opcode;
+ if (rl_dest.wide) {
+ if (rl_src.wide) {
+ opcode = Instruction::LONG_TO_DOUBLE;
+ } else {
+ opcode = Instruction::INT_TO_DOUBLE;
+ }
+ } else {
+ if (rl_src.wide) {
+ opcode = Instruction::LONG_TO_FLOAT;
+ } else {
+ opcode = Instruction::INT_TO_FLOAT;
+ }
+ }
+ cg->GenConversion(cu, opcode, rl_dest, rl_src);
+}
+
+static void CvtFPToInt(CompilationUnit* cu, llvm::CallInst* call_inst)
+{
+ Codegen* cg = cu->cg.get();
+ RegLocation rl_dest = GetLoc(cu, call_inst);
+ RegLocation rl_src = GetLoc(cu, call_inst->getOperand(0));
+ Instruction::Code opcode;
+ if (rl_dest.wide) {
+ if (rl_src.wide) {
+ opcode = Instruction::DOUBLE_TO_LONG;
+ } else {
+ opcode = Instruction::FLOAT_TO_LONG;
+ }
+ } else {
+ if (rl_src.wide) {
+ opcode = Instruction::DOUBLE_TO_INT;
+ } else {
+ opcode = Instruction::FLOAT_TO_INT;
+ }
+ }
+ cg->GenConversion(cu, opcode, rl_dest, rl_src);
+}
+
+static void CvtFloatToDouble(CompilationUnit* cu, llvm::Instruction* inst)
+{
+ Codegen* cg = cu->cg.get();
+ RegLocation rl_dest = GetLoc(cu, inst);
+ RegLocation rl_src = GetLoc(cu, inst->getOperand(0));
+ cg->GenConversion(cu, Instruction::FLOAT_TO_DOUBLE, rl_dest, rl_src);
+}
+
+static void CvtTrunc(CompilationUnit* cu, llvm::Instruction* inst)
+{
+ Codegen* cg = cu->cg.get();
+ RegLocation rl_dest = GetLoc(cu, inst);
+ RegLocation rl_src = GetLoc(cu, inst->getOperand(0));
+ rl_src = UpdateLocWide(cu, rl_src);
+ rl_src = WideToNarrow(cu, rl_src);
+ cg->StoreValue(cu, rl_dest, rl_src);
+}
+
+static void CvtDoubleToFloat(CompilationUnit* cu, llvm::Instruction* inst)
+{
+ Codegen* cg = cu->cg.get();
+ RegLocation rl_dest = GetLoc(cu, inst);
+ RegLocation rl_src = GetLoc(cu, inst->getOperand(0));
+ cg->GenConversion(cu, Instruction::DOUBLE_TO_FLOAT, rl_dest, rl_src);
+}
+
+
+static void CvtIntExt(CompilationUnit* cu, llvm::Instruction* inst, bool is_signed)
+{
+ Codegen* cg = cu->cg.get();
+ // TODO: evaluate src/tgt types and add general support for more than int to long
+ RegLocation rl_dest = GetLoc(cu, inst);
+ RegLocation rl_src = GetLoc(cu, inst->getOperand(0));
+ DCHECK(rl_dest.wide);
+ DCHECK(!rl_src.wide);
+ DCHECK(!rl_dest.fp);
+ DCHECK(!rl_src.fp);
+ RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ if (rl_src.location == kLocPhysReg) {
+ cg->OpRegCopy(cu, rl_result.low_reg, rl_src.low_reg);
+ } else {
+ cg->LoadValueDirect(cu, rl_src, rl_result.low_reg);
+ }
+ if (is_signed) {
+ cg->OpRegRegImm(cu, kOpAsr, rl_result.high_reg, rl_result.low_reg, 31);
+ } else {
+ cg->LoadConstant(cu, rl_result.high_reg, 0);
+ }
+ cg->StoreValueWide(cu, rl_dest, rl_result);
+}
+
+static void CvtBinOp(CompilationUnit* cu, OpKind op, llvm::Instruction* inst)
+{
+ Codegen* cg = cu->cg.get();
+ RegLocation rl_dest = GetLoc(cu, inst);
+ llvm::Value* lhs = inst->getOperand(0);
+ // Special-case RSUB/NEG
+ llvm::ConstantInt* lhs_imm = llvm::dyn_cast<llvm::ConstantInt>(lhs);
+ if ((op == kOpSub) && (lhs_imm != NULL)) {
+ RegLocation rl_src1 = GetLoc(cu, inst->getOperand(1));
+ if (rl_src1.wide) {
+ DCHECK_EQ(lhs_imm->getSExtValue(), 0);
+ cg->GenArithOpLong(cu, Instruction::NEG_LONG, rl_dest, rl_src1, rl_src1);
+ } else {
+ cg->GenArithOpIntLit(cu, Instruction::RSUB_INT, rl_dest, rl_src1,
+ lhs_imm->getSExtValue());
+ }
+ return;
+ }
+ DCHECK(lhs_imm == NULL);
+ RegLocation rl_src1 = GetLoc(cu, inst->getOperand(0));
+ llvm::Value* rhs = inst->getOperand(1);
+ llvm::ConstantInt* const_rhs = llvm::dyn_cast<llvm::ConstantInt>(rhs);
+ if (!rl_dest.wide && (const_rhs != NULL)) {
+ Instruction::Code dalvik_op = GetDalvikOpcode(op, true, false);
+ cg->GenArithOpIntLit(cu, dalvik_op, rl_dest, rl_src1, const_rhs->getSExtValue());
+ } else {
+ Instruction::Code dalvik_op = GetDalvikOpcode(op, false, rl_dest.wide);
+ RegLocation rl_src2;
+ if (const_rhs != NULL) {
+ // ir_builder converts NOT_LONG to xor src, -1. Restore
+ DCHECK_EQ(dalvik_op, Instruction::XOR_LONG);
+ DCHECK_EQ(-1L, const_rhs->getSExtValue());
+ dalvik_op = Instruction::NOT_LONG;
+ rl_src2 = rl_src1;
+ } else {
+ rl_src2 = GetLoc(cu, rhs);
+ }
+ if (rl_dest.wide) {
+ cg->GenArithOpLong(cu, dalvik_op, rl_dest, rl_src1, rl_src2);
+ } else {
+ cg->GenArithOpInt(cu, dalvik_op, rl_dest, rl_src1, rl_src2);
+ }
+ }
+}
+
+static void CvtShiftOp(CompilationUnit* cu, Instruction::Code opcode, llvm::CallInst* call_inst)
+{
+ Codegen* cg = cu->cg.get();
+ DCHECK_EQ(call_inst->getNumArgOperands(), 2U);
+ RegLocation rl_dest = GetLoc(cu, call_inst);
+ RegLocation rl_src = GetLoc(cu, call_inst->getArgOperand(0));
+ llvm::Value* rhs = call_inst->getArgOperand(1);
+ if (llvm::ConstantInt* src2 = llvm::dyn_cast<llvm::ConstantInt>(rhs)) {
+ DCHECK(!rl_dest.wide);
+ cg->GenArithOpIntLit(cu, opcode, rl_dest, rl_src, src2->getSExtValue());
+ } else {
+ RegLocation rl_shift = GetLoc(cu, rhs);
+ if (call_inst->getType() == cu->irb->getInt64Ty()) {
+ cg->GenShiftOpLong(cu, opcode, rl_dest, rl_src, rl_shift);
+ } else {
+ cg->GenArithOpInt(cu, opcode, rl_dest, rl_src, rl_shift);
+ }
+ }
+}
+
+static void CvtBr(CompilationUnit* cu, llvm::Instruction* inst)
+{
+ Codegen* cg = cu->cg.get();
+ llvm::BranchInst* br_inst = llvm::dyn_cast<llvm::BranchInst>(inst);
+ DCHECK(br_inst != NULL);
+ DCHECK(br_inst->isUnconditional()); // May change - but this is all we use now
+ llvm::BasicBlock* target_bb = br_inst->getSuccessor(0);
+ cg->OpUnconditionalBranch(cu, cu->block_to_label_map.Get(target_bb));
+}
+
+static void CvtPhi(CompilationUnit* cu, llvm::Instruction* inst)
+{
+ // Nop - these have already been processed
+}
+
+static void CvtRet(CompilationUnit* cu, llvm::Instruction* inst)
+{
+ Codegen* cg = cu->cg.get();
+ llvm::ReturnInst* ret_inst = llvm::dyn_cast<llvm::ReturnInst>(inst);
+ llvm::Value* ret_val = ret_inst->getReturnValue();
+ if (ret_val != NULL) {
+ RegLocation rl_src = GetLoc(cu, ret_val);
+ if (rl_src.wide) {
+ cg->StoreValueWide(cu, GetReturnWide(cu, rl_src.fp), rl_src);
+ } else {
+ cg->StoreValue(cu, GetReturn(cu, rl_src.fp), rl_src);
+ }
+ }
+ cg->GenExitSequence(cu);
+}
+
+static ConditionCode GetCond(llvm::ICmpInst::Predicate llvm_cond)
+{
+ ConditionCode res = kCondAl;
+ switch(llvm_cond) {
+ case llvm::ICmpInst::ICMP_EQ: res = kCondEq; break;
+ case llvm::ICmpInst::ICMP_NE: res = kCondNe; break;
+ case llvm::ICmpInst::ICMP_SLT: res = kCondLt; break;
+ case llvm::ICmpInst::ICMP_SGE: res = kCondGe; break;
+ case llvm::ICmpInst::ICMP_SGT: res = kCondGt; break;
+ case llvm::ICmpInst::ICMP_SLE: res = kCondLe; break;
+ default: LOG(FATAL) << "Unexpected llvm condition";
+ }
+ return res;
+}
+
+static void CvtICmp(CompilationUnit* cu, llvm::Instruction* inst)
+{
+ // cg->GenCmpLong(cu, rl_dest, rl_src1, rl_src2)
+ UNIMPLEMENTED(FATAL);
+}
+
+static void CvtICmpBr(CompilationUnit* cu, llvm::Instruction* inst,
+ llvm::BranchInst* br_inst)
+{
+ Codegen* cg = cu->cg.get();
+ // Get targets
+ llvm::BasicBlock* taken_bb = br_inst->getSuccessor(0);
+ LIR* taken = cu->block_to_label_map.Get(taken_bb);
+ llvm::BasicBlock* fallthrough_bb = br_inst->getSuccessor(1);
+ LIR* fall_through = cu->block_to_label_map.Get(fallthrough_bb);
+ // Get comparison operands
+ llvm::ICmpInst* i_cmp_inst = llvm::dyn_cast<llvm::ICmpInst>(inst);
+ ConditionCode cond = GetCond(i_cmp_inst->getPredicate());
+ llvm::Value* lhs = i_cmp_inst->getOperand(0);
+ // Not expecting a constant as 1st operand
+ DCHECK(llvm::dyn_cast<llvm::ConstantInt>(lhs) == NULL);
+ RegLocation rl_src1 = GetLoc(cu, inst->getOperand(0));
+ rl_src1 = cg->LoadValue(cu, rl_src1, kCoreReg);
+ llvm::Value* rhs = inst->getOperand(1);
+ if (cu->instruction_set == kMips) {
+ // Compare and branch in one shot
+ UNIMPLEMENTED(FATAL);
+ }
+ //Compare, then branch
+ // TODO: handle fused CMP_LONG/IF_xxZ case
+ if (llvm::ConstantInt* src2 = llvm::dyn_cast<llvm::ConstantInt>(rhs)) {
+ cg->OpRegImm(cu, kOpCmp, rl_src1.low_reg, src2->getSExtValue());
+ } else if (llvm::dyn_cast<llvm::ConstantPointerNull>(rhs) != NULL) {
+ cg->OpRegImm(cu, kOpCmp, rl_src1.low_reg, 0);
+ } else {
+ RegLocation rl_src2 = GetLoc(cu, rhs);
+ rl_src2 = cg->LoadValue(cu, rl_src2, kCoreReg);
+ cg->OpRegReg(cu, kOpCmp, rl_src1.low_reg, rl_src2.low_reg);
+ }
+ cg->OpCondBranch(cu, cond, taken);
+ // Fallthrough
+ cg->OpUnconditionalBranch(cu, fall_through);
+}
+
+static void CvtCopy(CompilationUnit* cu, llvm::CallInst* call_inst)
+{
+ Codegen* cg = cu->cg.get();
+ DCHECK_EQ(call_inst->getNumArgOperands(), 1U);
+ RegLocation rl_src = GetLoc(cu, call_inst->getArgOperand(0));
+ RegLocation rl_dest = GetLoc(cu, call_inst);
+ DCHECK_EQ(rl_src.wide, rl_dest.wide);
+ DCHECK_EQ(rl_src.fp, rl_dest.fp);
+ if (rl_src.wide) {
+ cg->StoreValueWide(cu, rl_dest, rl_src);
+ } else {
+ cg->StoreValue(cu, rl_dest, rl_src);
+ }
+}
+
+// Note: Immediate arg is a ConstantInt regardless of result type
+static void CvtConst(CompilationUnit* cu, llvm::CallInst* call_inst)
+{
+ Codegen* cg = cu->cg.get();
+ DCHECK_EQ(call_inst->getNumArgOperands(), 1U);
+ llvm::ConstantInt* src =
+ llvm::dyn_cast<llvm::ConstantInt>(call_inst->getArgOperand(0));
+ uint64_t immval = src->getZExtValue();
+ RegLocation rl_dest = GetLoc(cu, call_inst);
+ RegLocation rl_result = EvalLoc(cu, rl_dest, kAnyReg, true);
+ if (rl_dest.wide) {
+ cg->LoadConstantWide(cu, rl_result.low_reg, rl_result.high_reg, immval);
+ cg->StoreValueWide(cu, rl_dest, rl_result);
+ } else {
+ int immediate = immval & 0xffffffff;
+ cg->LoadConstantNoClobber(cu, rl_result.low_reg, immediate);
+ cg->StoreValue(cu, rl_dest, rl_result);
+ if (immediate == 0) {
+ cg->Workaround7250540(cu, rl_dest, rl_result.low_reg);
+ }
+ }
+}
+
+static void CvtConstObject(CompilationUnit* cu, llvm::CallInst* call_inst, bool is_string)
+{
+ Codegen* cg = cu->cg.get();
+ DCHECK_EQ(call_inst->getNumArgOperands(), 1U);
+ llvm::ConstantInt* idx_val =
+ llvm::dyn_cast<llvm::ConstantInt>(call_inst->getArgOperand(0));
+ uint32_t index = idx_val->getZExtValue();
+ RegLocation rl_dest = GetLoc(cu, call_inst);
+ if (is_string) {
+ cg->GenConstString(cu, index, rl_dest);
+ } else {
+ cg->GenConstClass(cu, index, rl_dest);
+ }
+}
+
+static void CvtFillArrayData(CompilationUnit* cu, llvm::CallInst* call_inst)
+{
+ Codegen* cg = cu->cg.get();
+ DCHECK_EQ(call_inst->getNumArgOperands(), 2U);
+ llvm::ConstantInt* offset_val =
+ llvm::dyn_cast<llvm::ConstantInt>(call_inst->getArgOperand(0));
+ RegLocation rl_src = GetLoc(cu, call_inst->getArgOperand(1));
+ cg->GenFillArrayData(cu, offset_val->getSExtValue(), rl_src);
+}
+
+static void CvtNewInstance(CompilationUnit* cu, llvm::CallInst* call_inst)
+{
+ Codegen* cg = cu->cg.get();
+ DCHECK_EQ(call_inst->getNumArgOperands(), 1U);
+ llvm::ConstantInt* type_idx_val =
+ llvm::dyn_cast<llvm::ConstantInt>(call_inst->getArgOperand(0));
+ uint32_t type_idx = type_idx_val->getZExtValue();
+ RegLocation rl_dest = GetLoc(cu, call_inst);
+ cg->GenNewInstance(cu, type_idx, rl_dest);
+}
+
+static void CvtNewArray(CompilationUnit* cu, llvm::CallInst* call_inst)
+{
+ Codegen* cg = cu->cg.get();
+ DCHECK_EQ(call_inst->getNumArgOperands(), 2U);
+ llvm::ConstantInt* type_idx_val =
+ llvm::dyn_cast<llvm::ConstantInt>(call_inst->getArgOperand(0));
+ uint32_t type_idx = type_idx_val->getZExtValue();
+ llvm::Value* len = call_inst->getArgOperand(1);
+ RegLocation rl_len = GetLoc(cu, len);
+ RegLocation rl_dest = GetLoc(cu, call_inst);
+ cg->GenNewArray(cu, type_idx, rl_dest, rl_len);
+}
+
+static void CvtInstanceOf(CompilationUnit* cu, llvm::CallInst* call_inst)
+{
+ Codegen* cg = cu->cg.get();
+ DCHECK_EQ(call_inst->getNumArgOperands(), 2U);
+ llvm::ConstantInt* type_idx_val =
+ llvm::dyn_cast<llvm::ConstantInt>(call_inst->getArgOperand(0));
+ uint32_t type_idx = type_idx_val->getZExtValue();
+ llvm::Value* src = call_inst->getArgOperand(1);
+ RegLocation rl_src = GetLoc(cu, src);
+ RegLocation rl_dest = GetLoc(cu, call_inst);
+ cg->GenInstanceof(cu, type_idx, rl_dest, rl_src);
+}
+
+static void CvtThrow(CompilationUnit* cu, llvm::CallInst* call_inst)
+{
+ Codegen* cg = cu->cg.get();
+ DCHECK_EQ(call_inst->getNumArgOperands(), 1U);
+ llvm::Value* src = call_inst->getArgOperand(0);
+ RegLocation rl_src = GetLoc(cu, src);
+ cg->GenThrow(cu, rl_src);
+}
+
+static void CvtMonitorEnterExit(CompilationUnit* cu, bool is_enter,
+ llvm::CallInst* call_inst)
+{
+ Codegen* cg = cu->cg.get();
+ DCHECK_EQ(call_inst->getNumArgOperands(), 2U);
+ llvm::ConstantInt* opt_flags =
+ llvm::dyn_cast<llvm::ConstantInt>(call_inst->getArgOperand(0));
+ llvm::Value* src = call_inst->getArgOperand(1);
+ RegLocation rl_src = GetLoc(cu, src);
+ if (is_enter) {
+ cg->GenMonitorEnter(cu, opt_flags->getZExtValue(), rl_src);
+ } else {
+ cg->GenMonitorExit(cu, opt_flags->getZExtValue(), rl_src);
+ }
+}
+
+static void CvtArrayLength(CompilationUnit* cu, llvm::CallInst* call_inst)
+{
+ Codegen* cg = cu->cg.get();
+ DCHECK_EQ(call_inst->getNumArgOperands(), 2U);
+ llvm::ConstantInt* opt_flags =
+ llvm::dyn_cast<llvm::ConstantInt>(call_inst->getArgOperand(0));
+ llvm::Value* src = call_inst->getArgOperand(1);
+ RegLocation rl_src = GetLoc(cu, src);
+ rl_src = cg->LoadValue(cu, rl_src, kCoreReg);
+ cg->GenNullCheck(cu, rl_src.s_reg_low, rl_src.low_reg, opt_flags->getZExtValue());
+ RegLocation rl_dest = GetLoc(cu, call_inst);
+ RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ int len_offset = mirror::Array::LengthOffset().Int32Value();
+ cg->LoadWordDisp(cu, rl_src.low_reg, len_offset, rl_result.low_reg);
+ cg->StoreValue(cu, rl_dest, rl_result);
+}
+
+static void CvtMoveException(CompilationUnit* cu, llvm::CallInst* call_inst)
+{
+ Codegen* cg = cu->cg.get();
+ RegLocation rl_dest = GetLoc(cu, call_inst);
+ cg->GenMoveException(cu, rl_dest);
+}
+
+static void CvtSget(CompilationUnit* cu, llvm::CallInst* call_inst, bool is_wide, bool is_object)
+{
+ Codegen* cg = cu->cg.get();
+ DCHECK_EQ(call_inst->getNumArgOperands(), 1U);
+ llvm::ConstantInt* type_idx_val =
+ llvm::dyn_cast<llvm::ConstantInt>(call_inst->getArgOperand(0));
+ uint32_t type_idx = type_idx_val->getZExtValue();
+ RegLocation rl_dest = GetLoc(cu, call_inst);
+ cg->GenSget(cu, type_idx, rl_dest, is_wide, is_object);
+}
+
+static void CvtSput(CompilationUnit* cu, llvm::CallInst* call_inst, bool is_wide, bool is_object)
+{
+ Codegen* cg = cu->cg.get();
+ DCHECK_EQ(call_inst->getNumArgOperands(), 2U);
+ llvm::ConstantInt* type_idx_val =
+ llvm::dyn_cast<llvm::ConstantInt>(call_inst->getArgOperand(0));
+ uint32_t type_idx = type_idx_val->getZExtValue();
+ llvm::Value* src = call_inst->getArgOperand(1);
+ RegLocation rl_src = GetLoc(cu, src);
+ cg->GenSput(cu, type_idx, rl_src, is_wide, is_object);
+}
+
+static void CvtAget(CompilationUnit* cu, llvm::CallInst* call_inst, OpSize size, int scale)
+{
+ Codegen* cg = cu->cg.get();
+ DCHECK_EQ(call_inst->getNumArgOperands(), 3U);
+ llvm::ConstantInt* opt_flags =
+ llvm::dyn_cast<llvm::ConstantInt>(call_inst->getArgOperand(0));
+ RegLocation rl_array = GetLoc(cu, call_inst->getArgOperand(1));
+ RegLocation rl_index = GetLoc(cu, call_inst->getArgOperand(2));
+ RegLocation rl_dest = GetLoc(cu, call_inst);
+ cg->GenArrayGet(cu, opt_flags->getZExtValue(), size, rl_array, rl_index,
+ rl_dest, scale);
+}
+
+static void CvtAput(CompilationUnit* cu, llvm::CallInst* call_inst, OpSize size,
+ int scale, bool is_object)
+{
+ Codegen* cg = cu->cg.get();
+ DCHECK_EQ(call_inst->getNumArgOperands(), 4U);
+ llvm::ConstantInt* opt_flags =
+ llvm::dyn_cast<llvm::ConstantInt>(call_inst->getArgOperand(0));
+ RegLocation rl_src = GetLoc(cu, call_inst->getArgOperand(1));
+ RegLocation rl_array = GetLoc(cu, call_inst->getArgOperand(2));
+ RegLocation rl_index = GetLoc(cu, call_inst->getArgOperand(3));
+ if (is_object) {
+ cg->GenArrayObjPut(cu, opt_flags->getZExtValue(), rl_array, rl_index,
+ rl_src, scale);
+ } else {
+ cg->GenArrayPut(cu, opt_flags->getZExtValue(), size, rl_array, rl_index,
+ rl_src, scale);
+ }
+}
+
+static void CvtAputObj(CompilationUnit* cu, llvm::CallInst* call_inst)
+{
+ CvtAput(cu, call_inst, kWord, 2, true /* is_object */);
+}
+
+static void CvtAputPrimitive(CompilationUnit* cu, llvm::CallInst* call_inst,
+ OpSize size, int scale)
+{
+ CvtAput(cu, call_inst, size, scale, false /* is_object */);
+}
+
+static void CvtIget(CompilationUnit* cu, llvm::CallInst* call_inst, OpSize size,
+ bool is_wide, bool is_obj)
+{
+ Codegen* cg = cu->cg.get();
+ DCHECK_EQ(call_inst->getNumArgOperands(), 3U);
+ llvm::ConstantInt* opt_flags =
+ llvm::dyn_cast<llvm::ConstantInt>(call_inst->getArgOperand(0));
+ RegLocation rl_obj = GetLoc(cu, call_inst->getArgOperand(1));
+ llvm::ConstantInt* field_idx =
+ llvm::dyn_cast<llvm::ConstantInt>(call_inst->getArgOperand(2));
+ RegLocation rl_dest = GetLoc(cu, call_inst);
+ cg->GenIGet(cu, field_idx->getZExtValue(), opt_flags->getZExtValue(),
+ size, rl_dest, rl_obj, is_wide, is_obj);
+}
+
+static void CvtIput(CompilationUnit* cu, llvm::CallInst* call_inst, OpSize size,
+ bool is_wide, bool is_obj)
+{
+ Codegen* cg = cu->cg.get();
+ DCHECK_EQ(call_inst->getNumArgOperands(), 4U);
+ llvm::ConstantInt* opt_flags =
+ llvm::dyn_cast<llvm::ConstantInt>(call_inst->getArgOperand(0));
+ RegLocation rl_src = GetLoc(cu, call_inst->getArgOperand(1));
+ RegLocation rl_obj = GetLoc(cu, call_inst->getArgOperand(2));
+ llvm::ConstantInt* field_idx =
+ llvm::dyn_cast<llvm::ConstantInt>(call_inst->getArgOperand(3));
+ cg->GenIPut(cu, field_idx->getZExtValue(), opt_flags->getZExtValue(),
+ size, rl_src, rl_obj, is_wide, is_obj);
+}
+
+static void CvtCheckCast(CompilationUnit* cu, llvm::CallInst* call_inst)
+{
+ Codegen* cg = cu->cg.get();
+ DCHECK_EQ(call_inst->getNumArgOperands(), 2U);
+ llvm::ConstantInt* type_idx =
+ llvm::dyn_cast<llvm::ConstantInt>(call_inst->getArgOperand(0));
+ RegLocation rl_src = GetLoc(cu, call_inst->getArgOperand(1));
+ cg->GenCheckCast(cu, type_idx->getZExtValue(), rl_src);
+}
+
+static void CvtFPCompare(CompilationUnit* cu, llvm::CallInst* call_inst,
+ Instruction::Code opcode)
+{
+ Codegen* cg = cu->cg.get();
+ RegLocation rl_src1 = GetLoc(cu, call_inst->getArgOperand(0));
+ RegLocation rl_src2 = GetLoc(cu, call_inst->getArgOperand(1));
+ RegLocation rl_dest = GetLoc(cu, call_inst);
+ cg->GenCmpFP(cu, opcode, rl_dest, rl_src1, rl_src2);
+}
+
+static void CvtLongCompare(CompilationUnit* cu, llvm::CallInst* call_inst)
+{
+ Codegen* cg = cu->cg.get();
+ RegLocation rl_src1 = GetLoc(cu, call_inst->getArgOperand(0));
+ RegLocation rl_src2 = GetLoc(cu, call_inst->getArgOperand(1));
+ RegLocation rl_dest = GetLoc(cu, call_inst);
+ cg->GenCmpLong(cu, rl_dest, rl_src1, rl_src2);
+}
+
+static void CvtSwitch(CompilationUnit* cu, llvm::Instruction* inst)
+{
+ Codegen* cg = cu->cg.get();
+ llvm::SwitchInst* sw_inst = llvm::dyn_cast<llvm::SwitchInst>(inst);
+ DCHECK(sw_inst != NULL);
+ llvm::Value* test_val = sw_inst->getCondition();
+ llvm::MDNode* table_offset_node = sw_inst->getMetadata("SwitchTable");
+ DCHECK(table_offset_node != NULL);
+ llvm::ConstantInt* table_offset_value =
+ static_cast<llvm::ConstantInt*>(table_offset_node->getOperand(0));
+ int32_t table_offset = table_offset_value->getSExtValue();
+ RegLocation rl_src = GetLoc(cu, test_val);
+ const uint16_t* table = cu->insns + cu->current_dalvik_offset + table_offset;
+ uint16_t table_magic = *table;
+ if (table_magic == 0x100) {
+ cg->GenPackedSwitch(cu, table_offset, rl_src);
+ } else {
+ DCHECK_EQ(table_magic, 0x200);
+ cg->GenSparseSwitch(cu, table_offset, rl_src);
+ }
+}
+
+static void CvtInvoke(CompilationUnit* cu, llvm::CallInst* call_inst, bool is_void,
+ bool is_filled_new_array)
+{
+ Codegen* cg = cu->cg.get();
+ CallInfo* info = static_cast<CallInfo*>(NewMem(cu, sizeof(CallInfo), true, kAllocMisc));
+ if (is_void) {
+ info->result.location = kLocInvalid;
+ } else {
+ info->result = GetLoc(cu, call_inst);
+ }
+ llvm::ConstantInt* invoke_type_val =
+ llvm::dyn_cast<llvm::ConstantInt>(call_inst->getArgOperand(0));
+ llvm::ConstantInt* method_index_val =
+ llvm::dyn_cast<llvm::ConstantInt>(call_inst->getArgOperand(1));
+ llvm::ConstantInt* opt_flags_val =
+ llvm::dyn_cast<llvm::ConstantInt>(call_inst->getArgOperand(2));
+ info->type = static_cast<InvokeType>(invoke_type_val->getZExtValue());
+ info->index = method_index_val->getZExtValue();
+ info->opt_flags = opt_flags_val->getZExtValue();
+ info->offset = cu->current_dalvik_offset;
+
+ // Count the argument words, and then build argument array.
+ info->num_arg_words = 0;
+ for (unsigned int i = 3; i < call_inst->getNumArgOperands(); i++) {
+ RegLocation t_loc = GetLoc(cu, call_inst->getArgOperand(i));
+ info->num_arg_words += t_loc.wide ? 2 : 1;
+ }
+ info->args = (info->num_arg_words == 0) ? NULL : static_cast<RegLocation*>
+ (NewMem(cu, sizeof(RegLocation) * info->num_arg_words, false, kAllocMisc));
+ // Now, fill in the location records, synthesizing high loc of wide vals
+ for (int i = 3, next = 0; next < info->num_arg_words;) {
+ info->args[next] = GetLoc(cu, call_inst->getArgOperand(i++));
+ if (info->args[next].wide) {
+ next++;
+ // TODO: Might make sense to mark this as an invalid loc
+ info->args[next].orig_sreg = info->args[next-1].orig_sreg+1;
+ info->args[next].s_reg_low = info->args[next-1].s_reg_low+1;
+ }
+ next++;
+ }
+ // TODO - rework such that we no longer need is_range
+ info->is_range = (info->num_arg_words > 5);
+
+ if (is_filled_new_array) {
+ cg->GenFilledNewArray(cu, info);
+ } else {
+ cg->GenInvoke(cu, info);
+ }
+}
+
+/* Look up the RegLocation associated with a Value. Must already be defined */
+static RegLocation ValToLoc(CompilationUnit* cu, llvm::Value* val)
+{
+ SafeMap<llvm::Value*, RegLocation>::iterator it = cu->loc_map.find(val);
+ DCHECK(it != cu->loc_map.end()) << "Missing definition";
+ return it->second;
+}
+
+static bool BitcodeBlockCodeGen(CompilationUnit* cu, llvm::BasicBlock* bb)
+{
+ Codegen* cg = cu->cg.get();
+ while (cu->llvm_blocks.find(bb) == cu->llvm_blocks.end()) {
+ llvm::BasicBlock* next_bb = NULL;
+ cu->llvm_blocks.insert(bb);
+ bool is_entry = (bb == &cu->func->getEntryBlock());
+ // Define the starting label
+ LIR* block_label = cu->block_to_label_map.Get(bb);
+ // Extract the type and starting offset from the block's name
+ char block_type = kInvalidBlock;
+ if (is_entry) {
+ block_type = kNormalBlock;
+ block_label->operands[0] = 0;
+ } else if (!bb->hasName()) {
+ block_type = kNormalBlock;
+ block_label->operands[0] = DexFile::kDexNoIndex;
+ } else {
+ std::string block_name = bb->getName().str();
+ int dummy;
+ sscanf(block_name.c_str(), kLabelFormat, &block_type, &block_label->operands[0], &dummy);
+ cu->current_dalvik_offset = block_label->operands[0];
+ }
+ DCHECK((block_type == kNormalBlock) || (block_type == kCatchBlock));
+ cu->current_dalvik_offset = block_label->operands[0];
+ // Set the label kind
+ block_label->opcode = kPseudoNormalBlockLabel;
+ // Insert the label
+ AppendLIR(cu, block_label);
+
+ LIR* head_lir = NULL;
+
+ if (block_type == kCatchBlock) {
+ head_lir = NewLIR0(cu, kPseudoExportedPC);
+ }
+
+ // Free temp registers and reset redundant store tracking */
+ ResetRegPool(cu);
+ ResetDefTracking(cu);
+
+ //TODO: restore oat incoming liveness optimization
+ ClobberAllRegs(cu);
+
+ if (is_entry) {
+ RegLocation* ArgLocs = static_cast<RegLocation*>
+ (NewMem(cu, sizeof(RegLocation) * cu->num_ins, true, kAllocMisc));
+ llvm::Function::arg_iterator it(cu->func->arg_begin());
+ llvm::Function::arg_iterator it_end(cu->func->arg_end());
+ // Skip past Method*
+ it++;
+ for (unsigned i = 0; it != it_end; ++it) {
+ llvm::Value* val = it;
+ ArgLocs[i++] = ValToLoc(cu, val);
+ llvm::Type* ty = val->getType();
+ if ((ty == cu->irb->getInt64Ty()) || (ty == cu->irb->getDoubleTy())) {
+ ArgLocs[i] = ArgLocs[i-1];
+ ArgLocs[i].low_reg = ArgLocs[i].high_reg;
+ ArgLocs[i].orig_sreg++;
+ ArgLocs[i].s_reg_low = INVALID_SREG;
+ ArgLocs[i].high_word = true;
+ i++;
+ }
+ }
+ cg->GenEntrySequence(cu, ArgLocs, cu->method_loc);
+ }
+
+ // Visit all of the instructions in the block
+ for (llvm::BasicBlock::iterator it = bb->begin(), e = bb->end(); it != e;) {
+ llvm::Instruction* inst = it;
+ llvm::BasicBlock::iterator next_it = ++it;
+ // Extract the Dalvik offset from the instruction
+ uint32_t opcode = inst->getOpcode();
+ llvm::MDNode* dex_offset_node = inst->getMetadata("DexOff");
+ if (dex_offset_node != NULL) {
+ llvm::ConstantInt* dex_offset_value =
+ static_cast<llvm::ConstantInt*>(dex_offset_node->getOperand(0));
+ cu->current_dalvik_offset = dex_offset_value->getZExtValue();
+ }
+
+ ResetRegPool(cu);
+ if (cu->disable_opt & (1 << kTrackLiveTemps)) {
+ ClobberAllRegs(cu);
+ }
+
+ if (cu->disable_opt & (1 << kSuppressLoads)) {
+ ResetDefTracking(cu);
+ }
+
+ #ifndef NDEBUG
+ /* Reset temp tracking sanity check */
+ cu->live_sreg = INVALID_SREG;
+ #endif
+
+ // TODO: use llvm opcode name here instead of "boundary" if verbose
+ LIR* boundary_lir = MarkBoundary(cu, cu->current_dalvik_offset, "boundary");
+
+ /* Remember the first LIR for thisl block*/
+ if (head_lir == NULL) {
+ head_lir = boundary_lir;
+ head_lir->def_mask = ENCODE_ALL;
+ }
+
+ switch(opcode) {
+
+ case llvm::Instruction::ICmp: {
+ llvm::Instruction* next_inst = next_it;
+ llvm::BranchInst* br_inst = llvm::dyn_cast<llvm::BranchInst>(next_inst);
+ if (br_inst != NULL /* and... */) {
+ CvtICmpBr(cu, inst, br_inst);
+ ++it;
+ } else {
+ CvtICmp(cu, inst);
+ }
+ }
+ break;
+
+ case llvm::Instruction::Call: {
+ llvm::CallInst* call_inst = llvm::dyn_cast<llvm::CallInst>(inst);
+ llvm::Function* callee = call_inst->getCalledFunction();
+ greenland::IntrinsicHelper::IntrinsicId id =
+ cu->intrinsic_helper->GetIntrinsicId(callee);
+ switch (id) {
+ case greenland::IntrinsicHelper::AllocaShadowFrame:
+ case greenland::IntrinsicHelper::PopShadowFrame:
+ case greenland::IntrinsicHelper::SetVReg:
+ // Ignore shadow frame stuff for quick compiler
+ break;
+ case greenland::IntrinsicHelper::CopyInt:
+ case greenland::IntrinsicHelper::CopyObj:
+ case greenland::IntrinsicHelper::CopyFloat:
+ case greenland::IntrinsicHelper::CopyLong:
+ case greenland::IntrinsicHelper::CopyDouble:
+ CvtCopy(cu, call_inst);
+ break;
+ case greenland::IntrinsicHelper::ConstInt:
+ case greenland::IntrinsicHelper::ConstObj:
+ case greenland::IntrinsicHelper::ConstLong:
+ case greenland::IntrinsicHelper::ConstFloat:
+ case greenland::IntrinsicHelper::ConstDouble:
+ CvtConst(cu, call_inst);
+ break;
+ case greenland::IntrinsicHelper::DivInt:
+ case greenland::IntrinsicHelper::DivLong:
+ CvtBinOp(cu, kOpDiv, inst);
+ break;
+ case greenland::IntrinsicHelper::RemInt:
+ case greenland::IntrinsicHelper::RemLong:
+ CvtBinOp(cu, kOpRem, inst);
+ break;
+ case greenland::IntrinsicHelper::MethodInfo:
+ // Already dealt with - just ignore it here.
+ break;
+ case greenland::IntrinsicHelper::CheckSuspend:
+ cg->GenSuspendTest(cu, 0 /* opt_flags already applied */);
+ break;
+ case greenland::IntrinsicHelper::HLInvokeObj:
+ case greenland::IntrinsicHelper::HLInvokeFloat:
+ case greenland::IntrinsicHelper::HLInvokeDouble:
+ case greenland::IntrinsicHelper::HLInvokeLong:
+ case greenland::IntrinsicHelper::HLInvokeInt:
+ CvtInvoke(cu, call_inst, false /* is_void */, false /* new_array */);
+ break;
+ case greenland::IntrinsicHelper::HLInvokeVoid:
+ CvtInvoke(cu, call_inst, true /* is_void */, false /* new_array */);
+ break;
+ case greenland::IntrinsicHelper::HLFilledNewArray:
+ CvtInvoke(cu, call_inst, false /* is_void */, true /* new_array */);
+ break;
+ case greenland::IntrinsicHelper::HLFillArrayData:
+ CvtFillArrayData(cu, call_inst);
+ break;
+ case greenland::IntrinsicHelper::ConstString:
+ CvtConstObject(cu, call_inst, true /* is_string */);
+ break;
+ case greenland::IntrinsicHelper::ConstClass:
+ CvtConstObject(cu, call_inst, false /* is_string */);
+ break;
+ case greenland::IntrinsicHelper::HLCheckCast:
+ CvtCheckCast(cu, call_inst);
+ break;
+ case greenland::IntrinsicHelper::NewInstance:
+ CvtNewInstance(cu, call_inst);
+ break;
+ case greenland::IntrinsicHelper::HLSgetObject:
+ CvtSget(cu, call_inst, false /* wide */, true /* Object */);
+ break;
+ case greenland::IntrinsicHelper::HLSget:
+ case greenland::IntrinsicHelper::HLSgetFloat:
+ case greenland::IntrinsicHelper::HLSgetBoolean:
+ case greenland::IntrinsicHelper::HLSgetByte:
+ case greenland::IntrinsicHelper::HLSgetChar:
+ case greenland::IntrinsicHelper::HLSgetShort:
+ CvtSget(cu, call_inst, false /* wide */, false /* Object */);
+ break;
+ case greenland::IntrinsicHelper::HLSgetWide:
+ case greenland::IntrinsicHelper::HLSgetDouble:
+ CvtSget(cu, call_inst, true /* wide */, false /* Object */);
+ break;
+ case greenland::IntrinsicHelper::HLSput:
+ case greenland::IntrinsicHelper::HLSputFloat:
+ case greenland::IntrinsicHelper::HLSputBoolean:
+ case greenland::IntrinsicHelper::HLSputByte:
+ case greenland::IntrinsicHelper::HLSputChar:
+ case greenland::IntrinsicHelper::HLSputShort:
+ CvtSput(cu, call_inst, false /* wide */, false /* Object */);
+ break;
+ case greenland::IntrinsicHelper::HLSputWide:
+ case greenland::IntrinsicHelper::HLSputDouble:
+ CvtSput(cu, call_inst, true /* wide */, false /* Object */);
+ break;
+ case greenland::IntrinsicHelper::HLSputObject:
+ CvtSput(cu, call_inst, false /* wide */, true /* Object */);
+ break;
+ case greenland::IntrinsicHelper::GetException:
+ CvtMoveException(cu, call_inst);
+ break;
+ case greenland::IntrinsicHelper::HLThrowException:
+ CvtThrow(cu, call_inst);
+ break;
+ case greenland::IntrinsicHelper::MonitorEnter:
+ CvtMonitorEnterExit(cu, true /* is_enter */, call_inst);
+ break;
+ case greenland::IntrinsicHelper::MonitorExit:
+ CvtMonitorEnterExit(cu, false /* is_enter */, call_inst);
+ break;
+ case greenland::IntrinsicHelper::OptArrayLength:
+ CvtArrayLength(cu, call_inst);
+ break;
+ case greenland::IntrinsicHelper::NewArray:
+ CvtNewArray(cu, call_inst);
+ break;
+ case greenland::IntrinsicHelper::InstanceOf:
+ CvtInstanceOf(cu, call_inst);
+ break;
+
+ case greenland::IntrinsicHelper::HLArrayGet:
+ case greenland::IntrinsicHelper::HLArrayGetObject:
+ case greenland::IntrinsicHelper::HLArrayGetFloat:
+ CvtAget(cu, call_inst, kWord, 2);
+ break;
+ case greenland::IntrinsicHelper::HLArrayGetWide:
+ case greenland::IntrinsicHelper::HLArrayGetDouble:
+ CvtAget(cu, call_inst, kLong, 3);
+ break;
+ case greenland::IntrinsicHelper::HLArrayGetBoolean:
+ CvtAget(cu, call_inst, kUnsignedByte, 0);
+ break;
+ case greenland::IntrinsicHelper::HLArrayGetByte:
+ CvtAget(cu, call_inst, kSignedByte, 0);
+ break;
+ case greenland::IntrinsicHelper::HLArrayGetChar:
+ CvtAget(cu, call_inst, kUnsignedHalf, 1);
+ break;
+ case greenland::IntrinsicHelper::HLArrayGetShort:
+ CvtAget(cu, call_inst, kSignedHalf, 1);
+ break;
+
+ case greenland::IntrinsicHelper::HLArrayPut:
+ case greenland::IntrinsicHelper::HLArrayPutFloat:
+ CvtAputPrimitive(cu, call_inst, kWord, 2);
+ break;
+ case greenland::IntrinsicHelper::HLArrayPutObject:
+ CvtAputObj(cu, call_inst);
+ break;
+ case greenland::IntrinsicHelper::HLArrayPutWide:
+ case greenland::IntrinsicHelper::HLArrayPutDouble:
+ CvtAputPrimitive(cu, call_inst, kLong, 3);
+ break;
+ case greenland::IntrinsicHelper::HLArrayPutBoolean:
+ CvtAputPrimitive(cu, call_inst, kUnsignedByte, 0);
+ break;
+ case greenland::IntrinsicHelper::HLArrayPutByte:
+ CvtAputPrimitive(cu, call_inst, kSignedByte, 0);
+ break;
+ case greenland::IntrinsicHelper::HLArrayPutChar:
+ CvtAputPrimitive(cu, call_inst, kUnsignedHalf, 1);
+ break;
+ case greenland::IntrinsicHelper::HLArrayPutShort:
+ CvtAputPrimitive(cu, call_inst, kSignedHalf, 1);
+ break;
+
+ case greenland::IntrinsicHelper::HLIGet:
+ case greenland::IntrinsicHelper::HLIGetFloat:
+ CvtIget(cu, call_inst, kWord, false /* is_wide */, false /* obj */);
+ break;
+ case greenland::IntrinsicHelper::HLIGetObject:
+ CvtIget(cu, call_inst, kWord, false /* is_wide */, true /* obj */);
+ break;
+ case greenland::IntrinsicHelper::HLIGetWide:
+ case greenland::IntrinsicHelper::HLIGetDouble:
+ CvtIget(cu, call_inst, kLong, true /* is_wide */, false /* obj */);
+ break;
+ case greenland::IntrinsicHelper::HLIGetBoolean:
+ CvtIget(cu, call_inst, kUnsignedByte, false /* is_wide */,
+ false /* obj */);
+ break;
+ case greenland::IntrinsicHelper::HLIGetByte:
+ CvtIget(cu, call_inst, kSignedByte, false /* is_wide */,
+ false /* obj */);
+ break;
+ case greenland::IntrinsicHelper::HLIGetChar:
+ CvtIget(cu, call_inst, kUnsignedHalf, false /* is_wide */,
+ false /* obj */);
+ break;
+ case greenland::IntrinsicHelper::HLIGetShort:
+ CvtIget(cu, call_inst, kSignedHalf, false /* is_wide */,
+ false /* obj */);
+ break;
+
+ case greenland::IntrinsicHelper::HLIPut:
+ case greenland::IntrinsicHelper::HLIPutFloat:
+ CvtIput(cu, call_inst, kWord, false /* is_wide */, false /* obj */);
+ break;
+ case greenland::IntrinsicHelper::HLIPutObject:
+ CvtIput(cu, call_inst, kWord, false /* is_wide */, true /* obj */);
+ break;
+ case greenland::IntrinsicHelper::HLIPutWide:
+ case greenland::IntrinsicHelper::HLIPutDouble:
+ CvtIput(cu, call_inst, kLong, true /* is_wide */, false /* obj */);
+ break;
+ case greenland::IntrinsicHelper::HLIPutBoolean:
+ CvtIput(cu, call_inst, kUnsignedByte, false /* is_wide */,
+ false /* obj */);
+ break;
+ case greenland::IntrinsicHelper::HLIPutByte:
+ CvtIput(cu, call_inst, kSignedByte, false /* is_wide */,
+ false /* obj */);
+ break;
+ case greenland::IntrinsicHelper::HLIPutChar:
+ CvtIput(cu, call_inst, kUnsignedHalf, false /* is_wide */,
+ false /* obj */);
+ break;
+ case greenland::IntrinsicHelper::HLIPutShort:
+ CvtIput(cu, call_inst, kSignedHalf, false /* is_wide */,
+ false /* obj */);
+ break;
+
+ case greenland::IntrinsicHelper::IntToChar:
+ CvtIntNarrowing(cu, call_inst, Instruction::INT_TO_CHAR);
+ break;
+ case greenland::IntrinsicHelper::IntToShort:
+ CvtIntNarrowing(cu, call_inst, Instruction::INT_TO_SHORT);
+ break;
+ case greenland::IntrinsicHelper::IntToByte:
+ CvtIntNarrowing(cu, call_inst, Instruction::INT_TO_BYTE);
+ break;
+
+ case greenland::IntrinsicHelper::F2I:
+ case greenland::IntrinsicHelper::D2I:
+ case greenland::IntrinsicHelper::F2L:
+ case greenland::IntrinsicHelper::D2L:
+ CvtFPToInt(cu, call_inst);
+ break;
+
+ case greenland::IntrinsicHelper::CmplFloat:
+ CvtFPCompare(cu, call_inst, Instruction::CMPL_FLOAT);
+ break;
+ case greenland::IntrinsicHelper::CmpgFloat:
+ CvtFPCompare(cu, call_inst, Instruction::CMPG_FLOAT);
+ break;
+ case greenland::IntrinsicHelper::CmplDouble:
+ CvtFPCompare(cu, call_inst, Instruction::CMPL_DOUBLE);
+ break;
+ case greenland::IntrinsicHelper::CmpgDouble:
+ CvtFPCompare(cu, call_inst, Instruction::CMPG_DOUBLE);
+ break;
+
+ case greenland::IntrinsicHelper::CmpLong:
+ CvtLongCompare(cu, call_inst);
+ break;
+
+ case greenland::IntrinsicHelper::SHLLong:
+ CvtShiftOp(cu, Instruction::SHL_LONG, call_inst);
+ break;
+ case greenland::IntrinsicHelper::SHRLong:
+ CvtShiftOp(cu, Instruction::SHR_LONG, call_inst);
+ break;
+ case greenland::IntrinsicHelper::USHRLong:
+ CvtShiftOp(cu, Instruction::USHR_LONG, call_inst);
+ break;
+ case greenland::IntrinsicHelper::SHLInt:
+ CvtShiftOp(cu, Instruction::SHL_INT, call_inst);
+ break;
+ case greenland::IntrinsicHelper::SHRInt:
+ CvtShiftOp(cu, Instruction::SHR_INT, call_inst);
+ break;
+ case greenland::IntrinsicHelper::USHRInt:
+ CvtShiftOp(cu, Instruction::USHR_INT, call_inst);
+ break;
+
+ case greenland::IntrinsicHelper::CatchTargets: {
+ llvm::SwitchInst* sw_inst =
+ llvm::dyn_cast<llvm::SwitchInst>(next_it);
+ DCHECK(sw_inst != NULL);
+ /*
+ * Discard the edges and the following conditional branch.
+ * Do a direct branch to the default target (which is the
+ * "work" portion of the pair.
+ * TODO: awful code layout - rework
+ */
+ llvm::BasicBlock* target_bb = sw_inst->getDefaultDest();
+ DCHECK(target_bb != NULL);
+ cg->OpUnconditionalBranch(cu, cu->block_to_label_map.Get(target_bb));
+ ++it;
+ // Set next bb to default target - improves code layout
+ next_bb = target_bb;
+ }
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected intrinsic " << cu->intrinsic_helper->GetName(id);
+ }
+ }
+ break;
+
+ case llvm::Instruction::Br: CvtBr(cu, inst); break;
+ case llvm::Instruction::Add: CvtBinOp(cu, kOpAdd, inst); break;
+ case llvm::Instruction::Sub: CvtBinOp(cu, kOpSub, inst); break;
+ case llvm::Instruction::Mul: CvtBinOp(cu, kOpMul, inst); break;
+ case llvm::Instruction::SDiv: CvtBinOp(cu, kOpDiv, inst); break;
+ case llvm::Instruction::SRem: CvtBinOp(cu, kOpRem, inst); break;
+ case llvm::Instruction::And: CvtBinOp(cu, kOpAnd, inst); break;
+ case llvm::Instruction::Or: CvtBinOp(cu, kOpOr, inst); break;
+ case llvm::Instruction::Xor: CvtBinOp(cu, kOpXor, inst); break;
+ case llvm::Instruction::PHI: CvtPhi(cu, inst); break;
+ case llvm::Instruction::Ret: CvtRet(cu, inst); break;
+ case llvm::Instruction::FAdd: CvtBinFPOp(cu, kOpAdd, inst); break;
+ case llvm::Instruction::FSub: CvtBinFPOp(cu, kOpSub, inst); break;
+ case llvm::Instruction::FMul: CvtBinFPOp(cu, kOpMul, inst); break;
+ case llvm::Instruction::FDiv: CvtBinFPOp(cu, kOpDiv, inst); break;
+ case llvm::Instruction::FRem: CvtBinFPOp(cu, kOpRem, inst); break;
+ case llvm::Instruction::SIToFP: CvtIntToFP(cu, inst); break;
+ case llvm::Instruction::FPTrunc: CvtDoubleToFloat(cu, inst); break;
+ case llvm::Instruction::FPExt: CvtFloatToDouble(cu, inst); break;
+ case llvm::Instruction::Trunc: CvtTrunc(cu, inst); break;
+
+ case llvm::Instruction::ZExt: CvtIntExt(cu, inst, false /* signed */);
+ break;
+ case llvm::Instruction::SExt: CvtIntExt(cu, inst, true /* signed */);
+ break;
+
+ case llvm::Instruction::Switch: CvtSwitch(cu, inst); break;
+
+ case llvm::Instruction::Unreachable:
+ break; // FIXME: can we really ignore these?
+
+ case llvm::Instruction::Shl:
+ case llvm::Instruction::LShr:
+ case llvm::Instruction::AShr:
+ case llvm::Instruction::Invoke:
+ case llvm::Instruction::FPToUI:
+ case llvm::Instruction::FPToSI:
+ case llvm::Instruction::UIToFP:
+ case llvm::Instruction::PtrToInt:
+ case llvm::Instruction::IntToPtr:
+ case llvm::Instruction::FCmp:
+ case llvm::Instruction::URem:
+ case llvm::Instruction::UDiv:
+ case llvm::Instruction::Resume:
+ case llvm::Instruction::Alloca:
+ case llvm::Instruction::GetElementPtr:
+ case llvm::Instruction::Fence:
+ case llvm::Instruction::AtomicCmpXchg:
+ case llvm::Instruction::AtomicRMW:
+ case llvm::Instruction::BitCast:
+ case llvm::Instruction::VAArg:
+ case llvm::Instruction::Select:
+ case llvm::Instruction::UserOp1:
+ case llvm::Instruction::UserOp2:
+ case llvm::Instruction::ExtractElement:
+ case llvm::Instruction::InsertElement:
+ case llvm::Instruction::ShuffleVector:
+ case llvm::Instruction::ExtractValue:
+ case llvm::Instruction::InsertValue:
+ case llvm::Instruction::LandingPad:
+ case llvm::Instruction::IndirectBr:
+ case llvm::Instruction::Load:
+ case llvm::Instruction::Store:
+ LOG(FATAL) << "Unexpected llvm opcode: " << opcode; break;
+
+ default:
+ LOG(FATAL) << "Unknown llvm opcode: " << inst->getOpcodeName();
+ break;
+ }
+ }
+
+ if (head_lir != NULL) {
+ ApplyLocalOptimizations(cu, head_lir, cu->last_lir_insn);
+ }
+ if (next_bb != NULL) {
+ bb = next_bb;
+ next_bb = NULL;
+ }
+ }
+ return false;
+}
+
+/*
+ * Convert LLVM_IR to MIR:
+ * o Iterate through the LLVM_IR and construct a graph using
+ * standard MIR building blocks.
+ * o Perform a basic-block optimization pass to remove unnecessary
+ * store/load sequences.
+ * o Convert the LLVM Value operands into RegLocations where applicable.
+ * o Create ssa_rep def/use operand arrays for each converted LLVM opcode
+ * o Perform register promotion
+ * o Iterate through the graph a basic block at a time, generating
+ * LIR.
+ * o Assemble LIR as usual.
+ * o Profit.
+ */
+void MethodBitcode2LIR(CompilationUnit* cu)
+{
+ Codegen* cg = cu->cg.get();
+ llvm::Function* func = cu->func;
+ int num_basic_blocks = func->getBasicBlockList().size();
+ // Allocate a list for LIR basic block labels
+ cu->block_label_list =
+ static_cast<LIR*>(NewMem(cu, sizeof(LIR) * num_basic_blocks, true, kAllocLIR));
+ LIR* label_list = cu->block_label_list;
+ int next_label = 0;
+ for (llvm::Function::iterator i = func->begin(), e = func->end(); i != e; ++i) {
+ cu->block_to_label_map.Put(static_cast<llvm::BasicBlock*>(i),
+ &label_list[next_label++]);
+ }
+
+ /*
+ * Keep honest - clear reg_locations, Value => RegLocation,
+ * promotion map and VmapTables.
+ */
+ cu->loc_map.clear(); // Start fresh
+ cu->reg_location = NULL;
+ for (int i = 0; i < cu->num_dalvik_registers + cu->num_compiler_temps + 1; i++) {
+ cu->promotion_map[i].core_location = kLocDalvikFrame;
+ cu->promotion_map[i].fp_location = kLocDalvikFrame;
+ }
+ cu->core_spill_mask = 0;
+ cu->num_core_spills = 0;
+ cu->fp_spill_mask = 0;
+ cu->num_fp_spills = 0;
+ cu->core_vmap_table.clear();
+ cu->fp_vmap_table.clear();
+
+ /*
+ * At this point, we've lost all knowledge of register promotion.
+ * Rebuild that info from the MethodInfo intrinsic (if it
+ * exists - not required for correctness). Normally, this will
+ * be the first instruction we encounter, so we won't have to iterate
+ * through everything.
+ */
+ for (llvm::inst_iterator i = llvm::inst_begin(func), e = llvm::inst_end(func); i != e; ++i) {
+ llvm::CallInst* call_inst = llvm::dyn_cast<llvm::CallInst>(&*i);
+ if (call_inst != NULL) {
+ llvm::Function* callee = call_inst->getCalledFunction();
+ greenland::IntrinsicHelper::IntrinsicId id =
+ cu->intrinsic_helper->GetIntrinsicId(callee);
+ if (id == greenland::IntrinsicHelper::MethodInfo) {
+ if (cu->verbose) {
+ LOG(INFO) << "Found MethodInfo";
+ }
+ llvm::MDNode* reg_info_node = call_inst->getMetadata("RegInfo");
+ if (reg_info_node != NULL) {
+ llvm::ConstantInt* num_ins_value =
+ static_cast<llvm::ConstantInt*>(reg_info_node->getOperand(0));
+ llvm::ConstantInt* num_regs_value =
+ static_cast<llvm::ConstantInt*>(reg_info_node->getOperand(1));
+ llvm::ConstantInt* num_outs_value =
+ static_cast<llvm::ConstantInt*>(reg_info_node->getOperand(2));
+ llvm::ConstantInt* num_compiler_temps_value =
+ static_cast<llvm::ConstantInt*>(reg_info_node->getOperand(3));
+ llvm::ConstantInt* num_ssa_regs_value =
+ static_cast<llvm::ConstantInt*>(reg_info_node->getOperand(4));
+ if (cu->verbose) {
+ LOG(INFO) << "RegInfo - Ins:" << num_ins_value->getZExtValue()
+ << ", Regs:" << num_regs_value->getZExtValue()
+ << ", Outs:" << num_outs_value->getZExtValue()
+ << ", CTemps:" << num_compiler_temps_value->getZExtValue()
+ << ", SSARegs:" << num_ssa_regs_value->getZExtValue();
+ }
+ }
+ llvm::MDNode* pmap_info_node = call_inst->getMetadata("PromotionMap");
+ if (pmap_info_node != NULL) {
+ int elems = pmap_info_node->getNumOperands();
+ if (cu->verbose) {
+ LOG(INFO) << "PMap size: " << elems;
+ }
+ for (int i = 0; i < elems; i++) {
+ llvm::ConstantInt* raw_map_data =
+ static_cast<llvm::ConstantInt*>(pmap_info_node->getOperand(i));
+ uint32_t map_data = raw_map_data->getZExtValue();
+ PromotionMap* p = &cu->promotion_map[i];
+ p->first_in_pair = (map_data >> 24) & 0xff;
+ p->FpReg = (map_data >> 16) & 0xff;
+ p->core_reg = (map_data >> 8) & 0xff;
+ p->fp_location = static_cast<RegLocationType>((map_data >> 4) & 0xf);
+ if (p->fp_location == kLocPhysReg) {
+ RecordFpPromotion(cu, p->FpReg, i);
+ }
+ p->core_location = static_cast<RegLocationType>(map_data & 0xf);
+ if (p->core_location == kLocPhysReg) {
+ RecordCorePromotion(cu, p->core_reg, i);
+ }
+ }
+ if (cu->verbose) {
+ DumpPromotionMap(cu);
+ }
+ }
+ break;
+ }
+ }
+ }
+ cg->AdjustSpillMask(cu);
+ cu->frame_size = ComputeFrameSize(cu);
+
+ // Create RegLocations for arguments
+ llvm::Function::arg_iterator it(cu->func->arg_begin());
+ llvm::Function::arg_iterator it_end(cu->func->arg_end());
+ for (; it != it_end; ++it) {
+ llvm::Value* val = it;
+ CreateLocFromValue(cu, val);
+ }
+ // Create RegLocations for all non-argument defintions
+ for (llvm::inst_iterator i = llvm::inst_begin(func), e = llvm::inst_end(func); i != e; ++i) {
+ llvm::Value* val = &*i;
+ if (val->hasName() && (val->getName().str().c_str()[0] == 'v')) {
+ CreateLocFromValue(cu, val);
+ }
+ }
+
+ // Walk the blocks, generating code.
+ for (llvm::Function::iterator i = cu->func->begin(), e = cu->func->end(); i != e; ++i) {
+ BitcodeBlockCodeGen(cu, static_cast<llvm::BasicBlock*>(i));
+ }
+
+ cg->HandleSuspendLaunchPads(cu);
+
+ cg->HandleThrowLaunchPads(cu);
+
+ cg->HandleIntrinsicLaunchPads(cu);
+
+ cu->func->eraseFromParent();
+ cu->func = NULL;
+}
+
+
+} // namespace art
diff --git a/src/compiler/dex/portable/mir_to_gbc.h b/src/compiler/dex/portable/mir_to_gbc.h
new file mode 100644
index 0000000..a3b5b31
--- /dev/null
+++ b/src/compiler/dex/portable/mir_to_gbc.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_COMPILER_DEX_PORTABLE_MIRTOGBC_H_
+#define ART_SRC_COMPILER_DEX_PORTABLE_MIRTOGBC_H_
+
+namespace art {
+
+void MethodMIR2Bitcode(CompilationUnit* cu);
+void MethodBitcode2LIR(CompilationUnit* cu);
+
+} // namespace art
+
+#endif // ART_SRC_COMPILER_DEX_PORTABLE_MIRTOGBC_H_
diff --git a/src/compiler/dex/quick/arm/arm_lir.h b/src/compiler/dex/quick/arm/arm_lir.h
new file mode 100644
index 0000000..abcaacc
--- /dev/null
+++ b/src/compiler/dex/quick/arm/arm_lir.h
@@ -0,0 +1,499 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_COMPILER_DEX_QUICK_ARM_ARMLIR_H_
+#define ART_SRC_COMPILER_DEX_QUICK_ARM_ARMLIR_H_
+
+#include "compiler/dex/compiler_internals.h"
+
+namespace art {
+
+/*
+ * Runtime register usage conventions.
+ *
+ * r0-r3: Argument registers in both Dalvik and C/C++ conventions.
+ * However, for Dalvik->Dalvik calls we'll pass the target's Method*
+ * pointer in r0 as a hidden arg0. Otherwise used as codegen scratch
+ * registers.
+ * r0-r1: As in C/C++ r0 is 32-bit return register and r0/r1 is 64-bit
+ * r4 : (rARM_SUSPEND) is reserved (suspend check/debugger assist)
+ * r5 : Callee save (promotion target)
+ * r6 : Callee save (promotion target)
+ * r7 : Callee save (promotion target)
+ * r8 : Callee save (promotion target)
+ * r9 : (rARM_SELF) is reserved (pointer to thread-local storage)
+ * r10 : Callee save (promotion target)
+ * r11 : Callee save (promotion target)
+ * r12 : Scratch, may be trashed by linkage stubs
+ * r13 : (sp) is reserved
+ * r14 : (lr) is reserved
+ * r15 : (pc) is reserved
+ *
+ * 5 core temps that codegen can use (r0, r1, r2, r3, r12)
+ * 7 core registers that can be used for promotion
+ *
+ * Floating pointer registers
+ * s0-s31
+ * d0-d15, where d0={s0,s1}, d1={s2,s3}, ... , d15={s30,s31}
+ *
+ * s16-s31 (d8-d15) preserved across C calls
+ * s0-s15 (d0-d7) trashed across C calls
+ *
+ * s0-s15/d0-d7 used as codegen temp/scratch
+ * s16-s31/d8-d31 can be used for promotion.
+ *
+ * Calling convention
+ * o On a call to a Dalvik method, pass target's Method* in r0
+ * o r1-r3 will be used for up to the first 3 words of arguments
+ * o Arguments past the first 3 words will be placed in appropriate
+ * out slots by the caller.
+ * o If a 64-bit argument would span the register/memory argument
+ * boundary, it will instead be fully passed in the frame.
+ * o Maintain a 16-byte stack alignment
+ *
+ * Stack frame diagram (stack grows down, higher addresses at top):
+ *
+ * +------------------------+
+ * | IN[ins-1] | {Note: resides in caller's frame}
+ * | . |
+ * | IN[0] |
+ * | caller's Method* |
+ * +========================+ {Note: start of callee's frame}
+ * | spill region | {variable sized - will include lr if non-leaf.}
+ * +------------------------+
+ * | ...filler word... | {Note: used as 2nd word of V[locals-1] if long]
+ * +------------------------+
+ * | V[locals-1] |
+ * | V[locals-2] |
+ * | . |
+ * | . |
+ * | V[1] |
+ * | V[0] |
+ * +------------------------+
+ * | 0 to 3 words padding |
+ * +------------------------+
+ * | OUT[outs-1] |
+ * | OUT[outs-2] |
+ * | . |
+ * | OUT[0] |
+ * | cur_method* | <<== sp w/ 16-byte alignment
+ * +========================+
+ */
+
+// Offset to distingish FP regs.
+#define ARM_FP_REG_OFFSET 32
+// Offset to distinguish DP FP regs.
+#define ARM_FP_DOUBLE 64
+// First FP callee save.
+#define ARM_FP_CALLEE_SAVE_BASE 16
+// Reg types.
+#define ARM_REGTYPE(x) (x & (ARM_FP_REG_OFFSET | ARM_FP_DOUBLE))
+#define ARM_FPREG(x) ((x & ARM_FP_REG_OFFSET) == ARM_FP_REG_OFFSET)
+#define ARM_LOWREG(x) ((x & 0x7) == x)
+#define ARM_DOUBLEREG(x) ((x & ARM_FP_DOUBLE) == ARM_FP_DOUBLE)
+#define ARM_SINGLEREG(x) (ARM_FPREG(x) && !ARM_DOUBLEREG(x))
+
+/*
+ * Note: the low register of a floating point pair is sufficient to
+ * create the name of a double, but require both names to be passed to
+ * allow for asserts to verify that the pair is consecutive if significant
+ * rework is done in this area. Also, it is a good reminder in the calling
+ * code that reg locations always describe doubles as a pair of singles.
+ */
+#define ARM_S2D(x,y) ((x) | ARM_FP_DOUBLE)
+// Mask to strip off fp flags.
+#define ARM_FP_REG_MASK (ARM_FP_REG_OFFSET-1)
+
+// RegisterLocation templates return values (r0, or r0/r1).
+#define ARM_LOC_C_RETURN {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, r0, INVALID_REG,\
+ INVALID_SREG, INVALID_SREG}
+#define ARM_LOC_C_RETURN_WIDE {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r0, r1, \
+ INVALID_SREG, INVALID_SREG}
+#define ARM_LOC_C_RETURN_FLOAT ARM_LOC_C_RETURN
+#define ARM_LOC_C_RETURN_DOUBLE ARM_LOC_C_RETURN_WIDE
+
+enum ArmResourceEncodingPos {
+ kArmGPReg0 = 0,
+ kArmRegSP = 13,
+ kArmRegLR = 14,
+ kArmRegPC = 15,
+ kArmFPReg0 = 16,
+ kArmFPReg16 = 32,
+ kArmRegEnd = 48,
+};
+
+#define ENCODE_ARM_REG_LIST(N) (static_cast<uint64_t>(N))
+#define ENCODE_ARM_REG_SP (1ULL << kArmRegSP)
+#define ENCODE_ARM_REG_LR (1ULL << kArmRegLR)
+#define ENCODE_ARM_REG_PC (1ULL << kArmRegPC)
+#define ENCODE_ARM_REG_FPCS_LIST(N) (static_cast<uint64_t>(N) << kArmFPReg16)
+
+enum ArmNativeRegisterPool {
+ r0 = 0,
+ r1 = 1,
+ r2 = 2,
+ r3 = 3,
+ rARM_SUSPEND = 4,
+ r5 = 5,
+ r6 = 6,
+ r7 = 7,
+ r8 = 8,
+ rARM_SELF = 9,
+ r10 = 10,
+ r11 = 11,
+ r12 = 12,
+ r13sp = 13,
+ rARM_SP = 13,
+ r14lr = 14,
+ rARM_LR = 14,
+ r15pc = 15,
+ rARM_PC = 15,
+ fr0 = 0 + ARM_FP_REG_OFFSET,
+ fr1 = 1 + ARM_FP_REG_OFFSET,
+ fr2 = 2 + ARM_FP_REG_OFFSET,
+ fr3 = 3 + ARM_FP_REG_OFFSET,
+ fr4 = 4 + ARM_FP_REG_OFFSET,
+ fr5 = 5 + ARM_FP_REG_OFFSET,
+ fr6 = 6 + ARM_FP_REG_OFFSET,
+ fr7 = 7 + ARM_FP_REG_OFFSET,
+ fr8 = 8 + ARM_FP_REG_OFFSET,
+ fr9 = 9 + ARM_FP_REG_OFFSET,
+ fr10 = 10 + ARM_FP_REG_OFFSET,
+ fr11 = 11 + ARM_FP_REG_OFFSET,
+ fr12 = 12 + ARM_FP_REG_OFFSET,
+ fr13 = 13 + ARM_FP_REG_OFFSET,
+ fr14 = 14 + ARM_FP_REG_OFFSET,
+ fr15 = 15 + ARM_FP_REG_OFFSET,
+ fr16 = 16 + ARM_FP_REG_OFFSET,
+ fr17 = 17 + ARM_FP_REG_OFFSET,
+ fr18 = 18 + ARM_FP_REG_OFFSET,
+ fr19 = 19 + ARM_FP_REG_OFFSET,
+ fr20 = 20 + ARM_FP_REG_OFFSET,
+ fr21 = 21 + ARM_FP_REG_OFFSET,
+ fr22 = 22 + ARM_FP_REG_OFFSET,
+ fr23 = 23 + ARM_FP_REG_OFFSET,
+ fr24 = 24 + ARM_FP_REG_OFFSET,
+ fr25 = 25 + ARM_FP_REG_OFFSET,
+ fr26 = 26 + ARM_FP_REG_OFFSET,
+ fr27 = 27 + ARM_FP_REG_OFFSET,
+ fr28 = 28 + ARM_FP_REG_OFFSET,
+ fr29 = 29 + ARM_FP_REG_OFFSET,
+ fr30 = 30 + ARM_FP_REG_OFFSET,
+ fr31 = 31 + ARM_FP_REG_OFFSET,
+ dr0 = fr0 + ARM_FP_DOUBLE,
+ dr1 = fr2 + ARM_FP_DOUBLE,
+ dr2 = fr4 + ARM_FP_DOUBLE,
+ dr3 = fr6 + ARM_FP_DOUBLE,
+ dr4 = fr8 + ARM_FP_DOUBLE,
+ dr5 = fr10 + ARM_FP_DOUBLE,
+ dr6 = fr12 + ARM_FP_DOUBLE,
+ dr7 = fr14 + ARM_FP_DOUBLE,
+ dr8 = fr16 + ARM_FP_DOUBLE,
+ dr9 = fr18 + ARM_FP_DOUBLE,
+ dr10 = fr20 + ARM_FP_DOUBLE,
+ dr11 = fr22 + ARM_FP_DOUBLE,
+ dr12 = fr24 + ARM_FP_DOUBLE,
+ dr13 = fr26 + ARM_FP_DOUBLE,
+ dr14 = fr28 + ARM_FP_DOUBLE,
+ dr15 = fr30 + ARM_FP_DOUBLE,
+};
+
+// Target-independent aliases.
+#define rARM_ARG0 r0
+#define rARM_ARG1 r1
+#define rARM_ARG2 r2
+#define rARM_ARG3 r3
+#define rARM_FARG0 r0
+#define rARM_FARG1 r1
+#define rARM_FARG2 r2
+#define rARM_FARG3 r3
+#define rARM_RET0 r0
+#define rARM_RET1 r1
+#define rARM_INVOKE_TGT rARM_LR
+#define rARM_COUNT INVALID_REG
+
+enum ArmShiftEncodings {
+ kArmLsl = 0x0,
+ kArmLsr = 0x1,
+ kArmAsr = 0x2,
+ kArmRor = 0x3
+};
+
+/*
+ * The following enum defines the list of supported Thumb instructions by the
+ * assembler. Their corresponding EncodingMap positions will be defined in
+ * Assemble.cc.
+ */
+enum ArmOpcode {
+ kArmFirst = 0,
+ kArm16BitData = kArmFirst, // DATA [0] rd[15..0].
+ kThumbAdcRR, // adc [0100000101] rm[5..3] rd[2..0].
+ kThumbAddRRI3, // add(1) [0001110] imm_3[8..6] rn[5..3] rd[2..0]*/
+ kThumbAddRI8, // add(2) [00110] rd[10..8] imm_8[7..0].
+ kThumbAddRRR, // add(3) [0001100] rm[8..6] rn[5..3] rd[2..0].
+ kThumbAddRRLH, // add(4) [01000100] H12[01] rm[5..3] rd[2..0].
+ kThumbAddRRHL, // add(4) [01001000] H12[10] rm[5..3] rd[2..0].
+ kThumbAddRRHH, // add(4) [01001100] H12[11] rm[5..3] rd[2..0].
+ kThumbAddPcRel, // add(5) [10100] rd[10..8] imm_8[7..0].
+ kThumbAddSpRel, // add(6) [10101] rd[10..8] imm_8[7..0].
+ kThumbAddSpI7, // add(7) [101100000] imm_7[6..0].
+ kThumbAndRR, // and [0100000000] rm[5..3] rd[2..0].
+ kThumbAsrRRI5, // asr(1) [00010] imm_5[10..6] rm[5..3] rd[2..0].
+ kThumbAsrRR, // asr(2) [0100000100] rs[5..3] rd[2..0].
+ kThumbBCond, // b(1) [1101] cond[11..8] offset_8[7..0].
+ kThumbBUncond, // b(2) [11100] offset_11[10..0].
+ kThumbBicRR, // bic [0100001110] rm[5..3] rd[2..0].
+ kThumbBkpt, // bkpt [10111110] imm_8[7..0].
+ kThumbBlx1, // blx(1) [111] H[10] offset_11[10..0].
+ kThumbBlx2, // blx(1) [111] H[01] offset_11[10..0].
+ kThumbBl1, // blx(1) [111] H[10] offset_11[10..0].
+ kThumbBl2, // blx(1) [111] H[11] offset_11[10..0].
+ kThumbBlxR, // blx(2) [010001111] rm[6..3] [000].
+ kThumbBx, // bx [010001110] H2[6..6] rm[5..3] SBZ[000].
+ kThumbCmnRR, // cmn [0100001011] rm[5..3] rd[2..0].
+ kThumbCmpRI8, // cmp(1) [00101] rn[10..8] imm_8[7..0].
+ kThumbCmpRR, // cmp(2) [0100001010] rm[5..3] rd[2..0].
+ kThumbCmpLH, // cmp(3) [01000101] H12[01] rm[5..3] rd[2..0].
+ kThumbCmpHL, // cmp(3) [01000110] H12[10] rm[5..3] rd[2..0].
+ kThumbCmpHH, // cmp(3) [01000111] H12[11] rm[5..3] rd[2..0].
+ kThumbEorRR, // eor [0100000001] rm[5..3] rd[2..0].
+ kThumbLdmia, // ldmia [11001] rn[10..8] reglist [7..0].
+ kThumbLdrRRI5, // ldr(1) [01101] imm_5[10..6] rn[5..3] rd[2..0].
+ kThumbLdrRRR, // ldr(2) [0101100] rm[8..6] rn[5..3] rd[2..0].
+ kThumbLdrPcRel, // ldr(3) [01001] rd[10..8] imm_8[7..0].
+ kThumbLdrSpRel, // ldr(4) [10011] rd[10..8] imm_8[7..0].
+ kThumbLdrbRRI5, // ldrb(1) [01111] imm_5[10..6] rn[5..3] rd[2..0].
+ kThumbLdrbRRR, // ldrb(2) [0101110] rm[8..6] rn[5..3] rd[2..0].
+ kThumbLdrhRRI5, // ldrh(1) [10001] imm_5[10..6] rn[5..3] rd[2..0].
+ kThumbLdrhRRR, // ldrh(2) [0101101] rm[8..6] rn[5..3] rd[2..0].
+ kThumbLdrsbRRR, // ldrsb [0101011] rm[8..6] rn[5..3] rd[2..0].
+ kThumbLdrshRRR, // ldrsh [0101111] rm[8..6] rn[5..3] rd[2..0].
+ kThumbLslRRI5, // lsl(1) [00000] imm_5[10..6] rm[5..3] rd[2..0].
+ kThumbLslRR, // lsl(2) [0100000010] rs[5..3] rd[2..0].
+ kThumbLsrRRI5, // lsr(1) [00001] imm_5[10..6] rm[5..3] rd[2..0].
+ kThumbLsrRR, // lsr(2) [0100000011] rs[5..3] rd[2..0].
+ kThumbMovImm, // mov(1) [00100] rd[10..8] imm_8[7..0].
+ kThumbMovRR, // mov(2) [0001110000] rn[5..3] rd[2..0].
+ kThumbMovRR_H2H, // mov(3) [01000111] H12[11] rm[5..3] rd[2..0].
+ kThumbMovRR_H2L, // mov(3) [01000110] H12[01] rm[5..3] rd[2..0].
+ kThumbMovRR_L2H, // mov(3) [01000101] H12[10] rm[5..3] rd[2..0].
+ kThumbMul, // mul [0100001101] rm[5..3] rd[2..0].
+ kThumbMvn, // mvn [0100001111] rm[5..3] rd[2..0].
+ kThumbNeg, // neg [0100001001] rm[5..3] rd[2..0].
+ kThumbOrr, // orr [0100001100] rm[5..3] rd[2..0].
+ kThumbPop, // pop [1011110] r[8..8] rl[7..0].
+ kThumbPush, // push [1011010] r[8..8] rl[7..0].
+ kThumbRorRR, // ror [0100000111] rs[5..3] rd[2..0].
+ kThumbSbc, // sbc [0100000110] rm[5..3] rd[2..0].
+ kThumbStmia, // stmia [11000] rn[10..8] reglist [7.. 0].
+ kThumbStrRRI5, // str(1) [01100] imm_5[10..6] rn[5..3] rd[2..0].
+ kThumbStrRRR, // str(2) [0101000] rm[8..6] rn[5..3] rd[2..0].
+ kThumbStrSpRel, // str(3) [10010] rd[10..8] imm_8[7..0].
+ kThumbStrbRRI5, // strb(1) [01110] imm_5[10..6] rn[5..3] rd[2..0].
+ kThumbStrbRRR, // strb(2) [0101010] rm[8..6] rn[5..3] rd[2..0].
+ kThumbStrhRRI5, // strh(1) [10000] imm_5[10..6] rn[5..3] rd[2..0].
+ kThumbStrhRRR, // strh(2) [0101001] rm[8..6] rn[5..3] rd[2..0].
+ kThumbSubRRI3, // sub(1) [0001111] imm_3[8..6] rn[5..3] rd[2..0]*/
+ kThumbSubRI8, // sub(2) [00111] rd[10..8] imm_8[7..0].
+ kThumbSubRRR, // sub(3) [0001101] rm[8..6] rn[5..3] rd[2..0].
+ kThumbSubSpI7, // sub(4) [101100001] imm_7[6..0].
+ kThumbSwi, // swi [11011111] imm_8[7..0].
+ kThumbTst, // tst [0100001000] rm[5..3] rn[2..0].
+ kThumb2Vldrs, // vldr low sx [111011011001] rn[19..16] rd[15-12] [1010] imm_8[7..0].
+ kThumb2Vldrd, // vldr low dx [111011011001] rn[19..16] rd[15-12] [1011] imm_8[7..0].
+ kThumb2Vmuls, // vmul vd, vn, vm [111011100010] rn[19..16] rd[15-12] [10100000] rm[3..0].
+ kThumb2Vmuld, // vmul vd, vn, vm [111011100010] rn[19..16] rd[15-12] [10110000] rm[3..0].
+ kThumb2Vstrs, // vstr low sx [111011011000] rn[19..16] rd[15-12] [1010] imm_8[7..0].
+ kThumb2Vstrd, // vstr low dx [111011011000] rn[19..16] rd[15-12] [1011] imm_8[7..0].
+ kThumb2Vsubs, // vsub vd, vn, vm [111011100011] rn[19..16] rd[15-12] [10100040] rm[3..0].
+ kThumb2Vsubd, // vsub vd, vn, vm [111011100011] rn[19..16] rd[15-12] [10110040] rm[3..0].
+ kThumb2Vadds, // vadd vd, vn, vm [111011100011] rn[19..16] rd[15-12] [10100000] rm[3..0].
+ kThumb2Vaddd, // vadd vd, vn, vm [111011100011] rn[19..16] rd[15-12] [10110000] rm[3..0].
+ kThumb2Vdivs, // vdiv vd, vn, vm [111011101000] rn[19..16] rd[15-12] [10100000] rm[3..0].
+ kThumb2Vdivd, // vdiv vd, vn, vm [111011101000] rn[19..16] rd[15-12] [10110000] rm[3..0].
+ kThumb2VcvtIF, // vcvt.F32 vd, vm [1110111010111000] vd[15..12] [10101100] vm[3..0].
+ kThumb2VcvtID, // vcvt.F64 vd, vm [1110111010111000] vd[15..12] [10111100] vm[3..0].
+ kThumb2VcvtFI, // vcvt.S32.F32 vd, vm [1110111010111101] vd[15..12] [10101100] vm[3..0].
+ kThumb2VcvtDI, // vcvt.S32.F32 vd, vm [1110111010111101] vd[15..12] [10111100] vm[3..0].
+ kThumb2VcvtFd, // vcvt.F64.F32 vd, vm [1110111010110111] vd[15..12] [10101100] vm[3..0].
+ kThumb2VcvtDF, // vcvt.F32.F64 vd, vm [1110111010110111] vd[15..12] [10111100] vm[3..0].
+ kThumb2Vsqrts, // vsqrt.f32 vd, vm [1110111010110001] vd[15..12] [10101100] vm[3..0].
+ kThumb2Vsqrtd, // vsqrt.f64 vd, vm [1110111010110001] vd[15..12] [10111100] vm[3..0].
+ kThumb2MovImmShift,// mov(T2) rd, #<const> [11110] i [00001001111] imm3 rd[11..8] imm8.
+ kThumb2MovImm16, // mov(T3) rd, #<const> [11110] i [0010100] imm4 [0] imm3 rd[11..8] imm8.
+ kThumb2StrRRI12, // str(Imm,T3) rd,[rn,#imm12] [111110001100] rn[19..16] rt[15..12] imm12[11..0].
+ kThumb2LdrRRI12, // str(Imm,T3) rd,[rn,#imm12] [111110001100] rn[19..16] rt[15..12] imm12[11..0].
+ kThumb2StrRRI8Predec, // str(Imm,T4) rd,[rn,#-imm8] [111110000100] rn[19..16] rt[15..12] [1100] imm[7..0]*/
+ kThumb2LdrRRI8Predec, // ldr(Imm,T4) rd,[rn,#-imm8] [111110000101] rn[19..16] rt[15..12] [1100] imm[7..0]*/
+ kThumb2Cbnz, // cbnz rd,<label> [101110] i [1] imm5[7..3] rn[2..0].
+ kThumb2Cbz, // cbn rd,<label> [101100] i [1] imm5[7..3] rn[2..0].
+ kThumb2AddRRI12, // add rd, rn, #imm12 [11110] i [100000] rn[19..16] [0] imm3[14..12] rd[11..8] imm8[7..0].
+ kThumb2MovRR, // mov rd, rm [11101010010011110000] rd[11..8] [0000] rm[3..0].
+ kThumb2Vmovs, // vmov.f32 vd, vm [111011101] D [110000] vd[15..12] 101001] M [0] vm[3..0].
+ kThumb2Vmovd, // vmov.f64 vd, vm [111011101] D [110000] vd[15..12] 101101] M [0] vm[3..0].
+ kThumb2Ldmia, // ldmia [111010001001[ rn[19..16] mask[15..0].
+ kThumb2Stmia, // stmia [111010001000[ rn[19..16] mask[15..0].
+ kThumb2AddRRR, // add [111010110000] rn[19..16] [0000] rd[11..8] [0000] rm[3..0].
+ kThumb2SubRRR, // sub [111010111010] rn[19..16] [0000] rd[11..8] [0000] rm[3..0].
+ kThumb2SbcRRR, // sbc [111010110110] rn[19..16] [0000] rd[11..8] [0000] rm[3..0].
+ kThumb2CmpRR, // cmp [111010111011] rn[19..16] [0000] [1111] [0000] rm[3..0].
+ kThumb2SubRRI12, // sub rd, rn, #imm12 [11110] i [01010] rn[19..16] [0] imm3[14..12] rd[11..8] imm8[7..0].
+ kThumb2MvnImm12, // mov(T2) rd, #<const> [11110] i [00011011110] imm3 rd[11..8] imm8.
+ kThumb2Sel, // sel rd, rn, rm [111110101010] rn[19-16] rd[11-8] rm[3-0].
+ kThumb2Ubfx, // ubfx rd,rn,#lsb,#width [111100111100] rn[19..16] [0] imm3[14-12] rd[11-8] w[4-0].
+ kThumb2Sbfx, // ubfx rd,rn,#lsb,#width [111100110100] rn[19..16] [0] imm3[14-12] rd[11-8] w[4-0].
+ kThumb2LdrRRR, // ldr rt,[rn,rm,LSL #imm] [111110000101] rn[19-16] rt[15-12] [000000] imm[5-4] rm[3-0].
+ kThumb2LdrhRRR, // ldrh rt,[rn,rm,LSL #imm] [111110000101] rn[19-16] rt[15-12] [000000] imm[5-4] rm[3-0].
+ kThumb2LdrshRRR, // ldrsh rt,[rn,rm,LSL #imm] [111110000101] rn[19-16] rt[15-12] [000000] imm[5-4] rm[3-0].
+ kThumb2LdrbRRR, // ldrb rt,[rn,rm,LSL #imm] [111110000101] rn[19-16] rt[15-12] [000000] imm[5-4] rm[3-0].
+ kThumb2LdrsbRRR, // ldrsb rt,[rn,rm,LSL #imm] [111110000101] rn[19-16] rt[15-12] [000000] imm[5-4] rm[3-0].
+ kThumb2StrRRR, // str rt,[rn,rm,LSL #imm] [111110000100] rn[19-16] rt[15-12] [000000] imm[5-4] rm[3-0].
+ kThumb2StrhRRR, // str rt,[rn,rm,LSL #imm] [111110000010] rn[19-16] rt[15-12] [000000] imm[5-4] rm[3-0].
+ kThumb2StrbRRR, // str rt,[rn,rm,LSL #imm] [111110000000] rn[19-16] rt[15-12] [000000] imm[5-4] rm[3-0].
+ kThumb2LdrhRRI12, // ldrh rt,[rn,#imm12] [111110001011] rt[15..12] rn[19..16] imm12[11..0].
+ kThumb2LdrshRRI12, // ldrsh rt,[rn,#imm12] [111110011011] rt[15..12] rn[19..16] imm12[11..0].
+ kThumb2LdrbRRI12, // ldrb rt,[rn,#imm12] [111110001001] rt[15..12] rn[19..16] imm12[11..0].
+ kThumb2LdrsbRRI12, // ldrsb rt,[rn,#imm12] [111110011001] rt[15..12] rn[19..16] imm12[11..0].
+ kThumb2StrhRRI12, // strh rt,[rn,#imm12] [111110001010] rt[15..12] rn[19..16] imm12[11..0].
+ kThumb2StrbRRI12, // strb rt,[rn,#imm12] [111110001000] rt[15..12] rn[19..16] imm12[11..0].
+ kThumb2Pop, // pop [1110100010111101] list[15-0]*/
+ kThumb2Push, // push [1110100100101101] list[15-0]*/
+ kThumb2CmpRI12, // cmp rn, #<const> [11110] i [011011] rn[19-16] [0] imm3 [1111] imm8[7..0].
+ kThumb2AdcRRR, // adc [111010110101] rn[19..16] [0000] rd[11..8] [0000] rm[3..0].
+ kThumb2AndRRR, // and [111010100000] rn[19..16] [0000] rd[11..8] [0000] rm[3..0].
+ kThumb2BicRRR, // bic [111010100010] rn[19..16] [0000] rd[11..8] [0000] rm[3..0].
+ kThumb2CmnRR, // cmn [111010110001] rn[19..16] [0000] [1111] [0000] rm[3..0].
+ kThumb2EorRRR, // eor [111010101000] rn[19..16] [0000] rd[11..8] [0000] rm[3..0].
+ kThumb2MulRRR, // mul [111110110000] rn[19..16] [1111] rd[11..8] [0000] rm[3..0].
+ kThumb2MnvRR, // mvn [11101010011011110] rd[11-8] [0000] rm[3..0].
+ kThumb2RsubRRI8, // rsub [111100011100] rn[19..16] [0000] rd[11..8] imm8[7..0].
+ kThumb2NegRR, // actually rsub rd, rn, #0.
+ kThumb2OrrRRR, // orr [111010100100] rn[19..16] [0000] rd[11..8] [0000] rm[3..0].
+ kThumb2TstRR, // tst [111010100001] rn[19..16] [0000] [1111] [0000] rm[3..0].
+ kThumb2LslRRR, // lsl [111110100000] rn[19..16] [1111] rd[11..8] [0000] rm[3..0].
+ kThumb2LsrRRR, // lsr [111110100010] rn[19..16] [1111] rd[11..8] [0000] rm[3..0].
+ kThumb2AsrRRR, // asr [111110100100] rn[19..16] [1111] rd[11..8] [0000] rm[3..0].
+ kThumb2RorRRR, // ror [111110100110] rn[19..16] [1111] rd[11..8] [0000] rm[3..0].
+ kThumb2LslRRI5, // lsl [11101010010011110] imm[14.12] rd[11..8] [00] rm[3..0].
+ kThumb2LsrRRI5, // lsr [11101010010011110] imm[14.12] rd[11..8] [01] rm[3..0].
+ kThumb2AsrRRI5, // asr [11101010010011110] imm[14.12] rd[11..8] [10] rm[3..0].
+ kThumb2RorRRI5, // ror [11101010010011110] imm[14.12] rd[11..8] [11] rm[3..0].
+ kThumb2BicRRI8, // bic [111100000010] rn[19..16] [0] imm3 rd[11..8] imm8.
+ kThumb2AndRRI8, // bic [111100000000] rn[19..16] [0] imm3 rd[11..8] imm8.
+ kThumb2OrrRRI8, // orr [111100000100] rn[19..16] [0] imm3 rd[11..8] imm8.
+ kThumb2EorRRI8, // eor [111100001000] rn[19..16] [0] imm3 rd[11..8] imm8.
+ kThumb2AddRRI8, // add [111100001000] rn[19..16] [0] imm3 rd[11..8] imm8.
+ kThumb2AdcRRI8, // adc [111100010101] rn[19..16] [0] imm3 rd[11..8] imm8.
+ kThumb2SubRRI8, // sub [111100011011] rn[19..16] [0] imm3 rd[11..8] imm8.
+ kThumb2SbcRRI8, // sbc [111100010111] rn[19..16] [0] imm3 rd[11..8] imm8.
+ kThumb2It, // it [10111111] firstcond[7-4] mask[3-0].
+ kThumb2Fmstat, // fmstat [11101110111100011111101000010000].
+ kThumb2Vcmpd, // vcmp [111011101] D [11011] rd[15-12] [1011] E [1] M [0] rm[3-0].
+ kThumb2Vcmps, // vcmp [111011101] D [11010] rd[15-12] [1011] E [1] M [0] rm[3-0].
+ kThumb2LdrPcRel12, // ldr rd,[pc,#imm12] [1111100011011111] rt[15-12] imm12[11-0].
+ kThumb2BCond, // b<c> [1110] S cond[25-22] imm6[21-16] [10] J1 [0] J2 imm11[10..0].
+ kThumb2Vmovd_RR, // vmov [111011101] D [110000] vd[15-12 [101101] M [0] vm[3-0].
+ kThumb2Vmovs_RR, // vmov [111011101] D [110000] vd[15-12 [101001] M [0] vm[3-0].
+ kThumb2Fmrs, // vmov [111011100000] vn[19-16] rt[15-12] [1010] N [0010000].
+ kThumb2Fmsr, // vmov [111011100001] vn[19-16] rt[15-12] [1010] N [0010000].
+ kThumb2Fmrrd, // vmov [111011000100] rt2[19-16] rt[15-12] [101100] M [1] vm[3-0].
+ kThumb2Fmdrr, // vmov [111011000101] rt2[19-16] rt[15-12] [101100] M [1] vm[3-0].
+ kThumb2Vabsd, // vabs.f64 [111011101] D [110000] rd[15-12] [1011110] M [0] vm[3-0].
+ kThumb2Vabss, // vabs.f32 [111011101] D [110000] rd[15-12] [1010110] M [0] vm[3-0].
+ kThumb2Vnegd, // vneg.f64 [111011101] D [110000] rd[15-12] [1011110] M [0] vm[3-0].
+ kThumb2Vnegs, // vneg.f32 [111011101] D [110000] rd[15-12] [1010110] M [0] vm[3-0].
+ kThumb2Vmovs_IMM8, // vmov.f32 [111011101] D [11] imm4h[19-16] vd[15-12] [10100000] imm4l[3-0].
+ kThumb2Vmovd_IMM8, // vmov.f64 [111011101] D [11] imm4h[19-16] vd[15-12] [10110000] imm4l[3-0].
+ kThumb2Mla, // mla [111110110000] rn[19-16] ra[15-12] rd[7-4] [0000] rm[3-0].
+ kThumb2Umull, // umull [111110111010] rn[19-16], rdlo[15-12] rdhi[11-8] [0000] rm[3-0].
+ kThumb2Ldrex, // ldrex [111010000101] rn[19-16] rt[11-8] [1111] imm8[7-0].
+ kThumb2Strex, // strex [111010000100] rn[19-16] rt[11-8] rd[11-8] imm8[7-0].
+ kThumb2Clrex, // clrex [111100111011111110000111100101111].
+ kThumb2Bfi, // bfi [111100110110] rn[19-16] [0] imm3[14-12] rd[11-8] imm2[7-6] [0] msb[4-0].
+ kThumb2Bfc, // bfc [11110011011011110] [0] imm3[14-12] rd[11-8] imm2[7-6] [0] msb[4-0].
+ kThumb2Dmb, // dmb [1111001110111111100011110101] option[3-0].
+ kThumb2LdrPcReln12,// ldr rd,[pc,-#imm12] [1111100011011111] rt[15-12] imm12[11-0].
+ kThumb2Stm, // stm <list> [111010010000] rn[19-16] 000 rl[12-0].
+ kThumbUndefined, // undefined [11011110xxxxxxxx].
+ kThumb2VPopCS, // vpop <list of callee save fp singles (s16+).
+ kThumb2VPushCS, // vpush <list callee save fp singles (s16+).
+ kThumb2Vldms, // vldms rd, <list>.
+ kThumb2Vstms, // vstms rd, <list>.
+ kThumb2BUncond, // b <label>.
+ kThumb2MovImm16H, // similar to kThumb2MovImm16, but target high hw.
+ kThumb2AddPCR, // Thumb2 2-operand add with hard-coded PC target.
+ kThumb2Adr, // Special purpose encoding of ADR for switch tables.
+ kThumb2MovImm16LST,// Special purpose version for switch table use.
+ kThumb2MovImm16HST,// Special purpose version for switch table use.
+ kThumb2LdmiaWB, // ldmia [111010011001[ rn[19..16] mask[15..0].
+ kThumb2SubsRRI12, // setflags encoding.
+ kThumb2OrrRRRs, // orrx [111010100101] rn[19..16] [0000] rd[11..8] [0000] rm[3..0].
+ kThumb2Push1, // t3 encoding of push.
+ kThumb2Pop1, // t3 encoding of pop.
+ kThumb2RsubRRR, // rsb [111010111101] rn[19..16] [0000] rd[11..8] [0000] rm[3..0].
+ kThumb2Smull, // smull [111110111000] rn[19-16], rdlo[15-12] rdhi[11-8] [0000] rm[3-0].
+ kThumb2LdrdPcRel8, // ldrd rt, rt2, pc +-/1024.
+ kThumb2LdrdI8, // ldrd rt, rt2, [rn +-/1024].
+ kThumb2StrdI8, // strd rt, rt2, [rn +-/1024].
+ kArmLast,
+};
+
+enum ArmOpDmbOptions {
+ kSY = 0xf,
+ kST = 0xe,
+ kISH = 0xb,
+ kISHST = 0xa,
+ kNSH = 0x7,
+ kNSHST = 0x6
+};
+
+// Instruction assembly field_loc kind.
+enum ArmEncodingKind {
+ kFmtUnused,
+ kFmtBitBlt, // Bit string using end/start.
+ kFmtDfp, // Double FP reg.
+ kFmtSfp, // Single FP reg.
+ kFmtModImm, // Shifted 8-bit immed using [26,14..12,7..0].
+ kFmtImm16, // Zero-extended immed using [26,19..16,14..12,7..0].
+ kFmtImm6, // Encoded branch target using [9,7..3]0.
+ kFmtImm12, // Zero-extended immediate using [26,14..12,7..0].
+ kFmtShift, // Shift descriptor, [14..12,7..4].
+ kFmtLsb, // least significant bit using [14..12][7..6].
+ kFmtBWidth, // bit-field width, encoded as width-1.
+ kFmtShift5, // Shift count, [14..12,7..6].
+ kFmtBrOffset, // Signed extended [26,11,13,21-16,10-0]:0.
+ kFmtFPImm, // Encoded floating point immediate.
+ kFmtOff24, // 24-bit Thumb2 unconditional branch encoding.
+};
+
+// Struct used to define the snippet positions for each Thumb opcode.
+struct ArmEncodingMap {
+ uint32_t skeleton;
+ struct {
+ ArmEncodingKind kind;
+ int end; // end for kFmtBitBlt, 1-bit slice end for FP regs.
+ int start; // start for kFmtBitBlt, 4-bit slice end for FP regs.
+ } field_loc[4];
+ ArmOpcode opcode;
+ uint64_t flags;
+ const char* name;
+ const char* fmt;
+ int size; // Note: size is in bytes.
+};
+
+} // namespace art
+
+#endif // ART_SRC_COMPILER_DEX_QUICK_ARM_ARMLIR_H_
diff --git a/src/compiler/dex/quick/arm/assemble_arm.cc b/src/compiler/dex/quick/arm/assemble_arm.cc
new file mode 100644
index 0000000..32bb5df
--- /dev/null
+++ b/src/compiler/dex/quick/arm/assemble_arm.cc
@@ -0,0 +1,1393 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "arm_lir.h"
+#include "codegen_arm.h"
+#include "compiler/dex/quick/codegen_util.h"
+
+namespace art {
+
+/*
+ * opcode: ArmOpcode enum
+ * skeleton: pre-designated bit-pattern for this opcode
+ * k0: key to applying ds/de
+ * ds: dest start bit position
+ * de: dest end bit position
+ * k1: key to applying s1s/s1e
+ * s1s: src1 start bit position
+ * s1e: src1 end bit position
+ * k2: key to applying s2s/s2e
+ * s2s: src2 start bit position
+ * s2e: src2 end bit position
+ * operands: number of operands (for sanity check purposes)
+ * name: mnemonic name
+ * fmt: for pretty-printing
+ */
+#define ENCODING_MAP(opcode, skeleton, k0, ds, de, k1, s1s, s1e, k2, s2s, s2e, \
+ k3, k3s, k3e, flags, name, fmt, size) \
+ {skeleton, {{k0, ds, de}, {k1, s1s, s1e}, {k2, s2s, s2e}, \
+ {k3, k3s, k3e}}, opcode, flags, name, fmt, size}
+
+/* Instruction dump string format keys: !pf, where "!" is the start
+ * of the key, "p" is which numeric operand to use and "f" is the
+ * print format.
+ *
+ * [p]ositions:
+ * 0 -> operands[0] (dest)
+ * 1 -> operands[1] (src1)
+ * 2 -> operands[2] (src2)
+ * 3 -> operands[3] (extra)
+ *
+ * [f]ormats:
+ * h -> 4-digit hex
+ * d -> decimal
+ * E -> decimal*4
+ * F -> decimal*2
+ * c -> branch condition (beq, bne, etc.)
+ * t -> pc-relative target
+ * u -> 1st half of bl[x] target
+ * v -> 2nd half ob bl[x] target
+ * R -> register list
+ * s -> single precision floating point register
+ * S -> double precision floating point register
+ * m -> Thumb2 modified immediate
+ * n -> complimented Thumb2 modified immediate
+ * M -> Thumb2 16-bit zero-extended immediate
+ * b -> 4-digit binary
+ * B -> dmb option string (sy, st, ish, ishst, nsh, hshst)
+ * H -> operand shift
+ * C -> core register name
+ * P -> fp cs register list (base of s16)
+ * Q -> fp cs register list (base of s0)
+ *
+ * [!] escape. To insert "!", use "!!"
+ */
+/* NOTE: must be kept in sync with enum ArmOpcode from LIR.h */
+const ArmEncodingMap ArmCodegen::EncodingMap[kArmLast] = {
+ ENCODING_MAP(kArm16BitData, 0x0000,
+ kFmtBitBlt, 15, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_UNARY_OP, "data", "0x!0h(!0d)", 2),
+ ENCODING_MAP(kThumbAdcRR, 0x4140,
+ kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1,
+ IS_BINARY_OP | REG_DEF0_USE01 | SETS_CCODES | USES_CCODES,
+ "adcs", "!0C, !1C", 2),
+ ENCODING_MAP(kThumbAddRRI3, 0x1c00,
+ kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 8, 6,
+ kFmtUnused, -1, -1,
+ IS_TERTIARY_OP | REG_DEF0_USE1 | SETS_CCODES,
+ "adds", "!0C, !1C, #!2d", 2),
+ ENCODING_MAP(kThumbAddRI8, 0x3000,
+ kFmtBitBlt, 10, 8, kFmtBitBlt, 7, 0, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1,
+ IS_BINARY_OP | REG_DEF0_USE0 | SETS_CCODES,
+ "adds", "!0C, !0C, #!1d", 2),
+ ENCODING_MAP(kThumbAddRRR, 0x1800,
+ kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 8, 6,
+ kFmtUnused, -1, -1,
+ IS_TERTIARY_OP | REG_DEF0_USE12 | SETS_CCODES,
+ "adds", "!0C, !1C, !2C", 2),
+ ENCODING_MAP(kThumbAddRRLH, 0x4440,
+ kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE01,
+ "add", "!0C, !1C", 2),
+ ENCODING_MAP(kThumbAddRRHL, 0x4480,
+ kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE01,
+ "add", "!0C, !1C", 2),
+ ENCODING_MAP(kThumbAddRRHH, 0x44c0,
+ kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE01,
+ "add", "!0C, !1C", 2),
+ ENCODING_MAP(kThumbAddPcRel, 0xa000,
+ kFmtBitBlt, 10, 8, kFmtBitBlt, 7, 0, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | IS_BRANCH | NEEDS_FIXUP,
+ "add", "!0C, pc, #!1E", 2),
+ ENCODING_MAP(kThumbAddSpRel, 0xa800,
+ kFmtBitBlt, 10, 8, kFmtUnused, -1, -1, kFmtBitBlt, 7, 0,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF_SP | REG_USE_SP,
+ "add", "!0C, sp, #!2E", 2),
+ ENCODING_MAP(kThumbAddSpI7, 0xb000,
+ kFmtBitBlt, 6, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_UNARY_OP | REG_DEF_SP | REG_USE_SP,
+ "add", "sp, #!0d*4", 2),
+ ENCODING_MAP(kThumbAndRR, 0x4000,
+ kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1,
+ IS_BINARY_OP | REG_DEF0_USE01 | SETS_CCODES,
+ "ands", "!0C, !1C", 2),
+ ENCODING_MAP(kThumbAsrRRI5, 0x1000,
+ kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 10, 6,
+ kFmtUnused, -1, -1,
+ IS_TERTIARY_OP | REG_DEF0_USE1 | SETS_CCODES,
+ "asrs", "!0C, !1C, #!2d", 2),
+ ENCODING_MAP(kThumbAsrRR, 0x4100,
+ kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1,
+ IS_BINARY_OP | REG_DEF0_USE01 | SETS_CCODES,
+ "asrs", "!0C, !1C", 2),
+ ENCODING_MAP(kThumbBCond, 0xd000,
+ kFmtBitBlt, 7, 0, kFmtBitBlt, 11, 8, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | IS_BRANCH | USES_CCODES |
+ NEEDS_FIXUP, "b!1c", "!0t", 2),
+ ENCODING_MAP(kThumbBUncond, 0xe000,
+ kFmtBitBlt, 10, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | NEEDS_FIXUP,
+ "b", "!0t", 2),
+ ENCODING_MAP(kThumbBicRR, 0x4380,
+ kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1,
+ IS_BINARY_OP | REG_DEF0_USE01 | SETS_CCODES,
+ "bics", "!0C, !1C", 2),
+ ENCODING_MAP(kThumbBkpt, 0xbe00,
+ kFmtBitBlt, 7, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH,
+ "bkpt", "!0d", 2),
+ ENCODING_MAP(kThumbBlx1, 0xf000,
+ kFmtBitBlt, 10, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | IS_BRANCH | REG_DEF_LR |
+ NEEDS_FIXUP, "blx_1", "!0u", 2),
+ ENCODING_MAP(kThumbBlx2, 0xe800,
+ kFmtBitBlt, 10, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | IS_BRANCH | REG_DEF_LR |
+ NEEDS_FIXUP, "blx_2", "!0v", 2),
+ ENCODING_MAP(kThumbBl1, 0xf000,
+ kFmtBitBlt, 10, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_DEF_LR,
+ "bl_1", "!0u", 2),
+ ENCODING_MAP(kThumbBl2, 0xf800,
+ kFmtBitBlt, 10, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_DEF_LR,
+ "bl_2", "!0v", 2),
+ ENCODING_MAP(kThumbBlxR, 0x4780,
+ kFmtBitBlt, 6, 3, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1,
+ IS_UNARY_OP | REG_USE0 | IS_BRANCH | REG_DEF_LR,
+ "blx", "!0C", 2),
+ ENCODING_MAP(kThumbBx, 0x4700,
+ kFmtBitBlt, 6, 3, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH,
+ "bx", "!0C", 2),
+ ENCODING_MAP(kThumbCmnRR, 0x42c0,
+ kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE01 | SETS_CCODES,
+ "cmn", "!0C, !1C", 2),
+ ENCODING_MAP(kThumbCmpRI8, 0x2800,
+ kFmtBitBlt, 10, 8, kFmtBitBlt, 7, 0, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE0 | SETS_CCODES,
+ "cmp", "!0C, #!1d", 2),
+ ENCODING_MAP(kThumbCmpRR, 0x4280,
+ kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE01 | SETS_CCODES,
+ "cmp", "!0C, !1C", 2),
+ ENCODING_MAP(kThumbCmpLH, 0x4540,
+ kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE01 | SETS_CCODES,
+ "cmp", "!0C, !1C", 2),
+ ENCODING_MAP(kThumbCmpHL, 0x4580,
+ kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE01 | SETS_CCODES,
+ "cmp", "!0C, !1C", 2),
+ ENCODING_MAP(kThumbCmpHH, 0x45c0,
+ kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE01 | SETS_CCODES,
+ "cmp", "!0C, !1C", 2),
+ ENCODING_MAP(kThumbEorRR, 0x4040,
+ kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1,
+ IS_BINARY_OP | REG_DEF0_USE01 | SETS_CCODES,
+ "eors", "!0C, !1C", 2),
+ ENCODING_MAP(kThumbLdmia, 0xc800,
+ kFmtBitBlt, 10, 8, kFmtBitBlt, 7, 0, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1,
+ IS_BINARY_OP | REG_DEF0_USE0 | REG_DEF_LIST1 | IS_LOAD,
+ "ldmia", "!0C!!, <!1R>", 2),
+ ENCODING_MAP(kThumbLdrRRI5, 0x6800,
+ kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 10, 6,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+ "ldr", "!0C, [!1C, #!2E]", 2),
+ ENCODING_MAP(kThumbLdrRRR, 0x5800,
+ kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 8, 6,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12 | IS_LOAD,
+ "ldr", "!0C, [!1C, !2C]", 2),
+ ENCODING_MAP(kThumbLdrPcRel, 0x4800,
+ kFmtBitBlt, 10, 8, kFmtBitBlt, 7, 0, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0 | REG_USE_PC
+ | IS_LOAD | NEEDS_FIXUP, "ldr", "!0C, [pc, #!1E]", 2),
+ ENCODING_MAP(kThumbLdrSpRel, 0x9800,
+ kFmtBitBlt, 10, 8, kFmtUnused, -1, -1, kFmtBitBlt, 7, 0,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0 | REG_USE_SP
+ | IS_LOAD, "ldr", "!0C, [sp, #!2E]", 2),
+ ENCODING_MAP(kThumbLdrbRRI5, 0x7800,
+ kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 10, 6,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+ "ldrb", "!0C, [!1C, #2d]", 2),
+ ENCODING_MAP(kThumbLdrbRRR, 0x5c00,
+ kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 8, 6,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12 | IS_LOAD,
+ "ldrb", "!0C, [!1C, !2C]", 2),
+ ENCODING_MAP(kThumbLdrhRRI5, 0x8800,
+ kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 10, 6,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+ "ldrh", "!0C, [!1C, #!2F]", 2),
+ ENCODING_MAP(kThumbLdrhRRR, 0x5a00,
+ kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 8, 6,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12 | IS_LOAD,
+ "ldrh", "!0C, [!1C, !2C]", 2),
+ ENCODING_MAP(kThumbLdrsbRRR, 0x5600,
+ kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 8, 6,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12 | IS_LOAD,
+ "ldrsb", "!0C, [!1C, !2C]", 2),
+ ENCODING_MAP(kThumbLdrshRRR, 0x5e00,
+ kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 8, 6,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12 | IS_LOAD,
+ "ldrsh", "!0C, [!1C, !2C]", 2),
+ ENCODING_MAP(kThumbLslRRI5, 0x0000,
+ kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 10, 6,
+ kFmtUnused, -1, -1,
+ IS_TERTIARY_OP | REG_DEF0_USE1 | SETS_CCODES,
+ "lsls", "!0C, !1C, #!2d", 2),
+ ENCODING_MAP(kThumbLslRR, 0x4080,
+ kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1,
+ IS_BINARY_OP | REG_DEF0_USE01 | SETS_CCODES,
+ "lsls", "!0C, !1C", 2),
+ ENCODING_MAP(kThumbLsrRRI5, 0x0800,
+ kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 10, 6,
+ kFmtUnused, -1, -1,
+ IS_TERTIARY_OP | REG_DEF0_USE1 | SETS_CCODES,
+ "lsrs", "!0C, !1C, #!2d", 2),
+ ENCODING_MAP(kThumbLsrRR, 0x40c0,
+ kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1,
+ IS_BINARY_OP | REG_DEF0_USE01 | SETS_CCODES,
+ "lsrs", "!0C, !1C", 2),
+ ENCODING_MAP(kThumbMovImm, 0x2000,
+ kFmtBitBlt, 10, 8, kFmtBitBlt, 7, 0, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1,
+ IS_BINARY_OP | REG_DEF0 | SETS_CCODES,
+ "movs", "!0C, #!1d", 2),
+ ENCODING_MAP(kThumbMovRR, 0x1c00,
+ kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1,
+ IS_BINARY_OP | REG_DEF0_USE1 | SETS_CCODES,
+ "movs", "!0C, !1C", 2),
+ ENCODING_MAP(kThumbMovRR_H2H, 0x46c0,
+ kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "mov", "!0C, !1C", 2),
+ ENCODING_MAP(kThumbMovRR_H2L, 0x4640,
+ kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "mov", "!0C, !1C", 2),
+ ENCODING_MAP(kThumbMovRR_L2H, 0x4680,
+ kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "mov", "!0C, !1C", 2),
+ ENCODING_MAP(kThumbMul, 0x4340,
+ kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1,
+ IS_BINARY_OP | REG_DEF0_USE01 | SETS_CCODES,
+ "muls", "!0C, !1C", 2),
+ ENCODING_MAP(kThumbMvn, 0x43c0,
+ kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1,
+ IS_BINARY_OP | REG_DEF0_USE1 | SETS_CCODES,
+ "mvns", "!0C, !1C", 2),
+ ENCODING_MAP(kThumbNeg, 0x4240,
+ kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1,
+ IS_BINARY_OP | REG_DEF0_USE1 | SETS_CCODES,
+ "negs", "!0C, !1C", 2),
+ ENCODING_MAP(kThumbOrr, 0x4300,
+ kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1,
+ IS_BINARY_OP | REG_DEF0_USE01 | SETS_CCODES,
+ "orrs", "!0C, !1C", 2),
+ ENCODING_MAP(kThumbPop, 0xbc00,
+ kFmtBitBlt, 8, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1,
+ IS_UNARY_OP | REG_DEF_SP | REG_USE_SP | REG_DEF_LIST0
+ | IS_LOAD, "pop", "<!0R>", 2),
+ ENCODING_MAP(kThumbPush, 0xb400,
+ kFmtBitBlt, 8, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1,
+ IS_UNARY_OP | REG_DEF_SP | REG_USE_SP | REG_USE_LIST0
+ | IS_STORE, "push", "<!0R>", 2),
+ ENCODING_MAP(kThumbRorRR, 0x41c0,
+ kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1,
+ IS_BINARY_OP | REG_DEF0_USE01 | SETS_CCODES,
+ "rors", "!0C, !1C", 2),
+ ENCODING_MAP(kThumbSbc, 0x4180,
+ kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1,
+ IS_BINARY_OP | REG_DEF0_USE01 | USES_CCODES | SETS_CCODES,
+ "sbcs", "!0C, !1C", 2),
+ ENCODING_MAP(kThumbStmia, 0xc000,
+ kFmtBitBlt, 10, 8, kFmtBitBlt, 7, 0, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1,
+ IS_BINARY_OP | REG_DEF0 | REG_USE0 | REG_USE_LIST1 | IS_STORE,
+ "stmia", "!0C!!, <!1R>", 2),
+ ENCODING_MAP(kThumbStrRRI5, 0x6000,
+ kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 10, 6,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
+ "str", "!0C, [!1C, #!2E]", 2),
+ ENCODING_MAP(kThumbStrRRR, 0x5000,
+ kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 8, 6,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE012 | IS_STORE,
+ "str", "!0C, [!1C, !2C]", 2),
+ ENCODING_MAP(kThumbStrSpRel, 0x9000,
+ kFmtBitBlt, 10, 8, kFmtUnused, -1, -1, kFmtBitBlt, 7, 0,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE0 | REG_USE_SP
+ | IS_STORE, "str", "!0C, [sp, #!2E]", 2),
+ ENCODING_MAP(kThumbStrbRRI5, 0x7000,
+ kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 10, 6,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
+ "strb", "!0C, [!1C, #!2d]", 2),
+ ENCODING_MAP(kThumbStrbRRR, 0x5400,
+ kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 8, 6,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE012 | IS_STORE,
+ "strb", "!0C, [!1C, !2C]", 2),
+ ENCODING_MAP(kThumbStrhRRI5, 0x8000,
+ kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 10, 6,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
+ "strh", "!0C, [!1C, #!2F]", 2),
+ ENCODING_MAP(kThumbStrhRRR, 0x5200,
+ kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 8, 6,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE012 | IS_STORE,
+ "strh", "!0C, [!1C, !2C]", 2),
+ ENCODING_MAP(kThumbSubRRI3, 0x1e00,
+ kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 8, 6,
+ kFmtUnused, -1, -1,
+ IS_TERTIARY_OP | REG_DEF0_USE1 | SETS_CCODES,
+ "subs", "!0C, !1C, #!2d", 2),
+ ENCODING_MAP(kThumbSubRI8, 0x3800,
+ kFmtBitBlt, 10, 8, kFmtBitBlt, 7, 0, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1,
+ IS_BINARY_OP | REG_DEF0_USE0 | SETS_CCODES,
+ "subs", "!0C, #!1d", 2),
+ ENCODING_MAP(kThumbSubRRR, 0x1a00,
+ kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 8, 6,
+ kFmtUnused, -1, -1,
+ IS_TERTIARY_OP | REG_DEF0_USE12 | SETS_CCODES,
+ "subs", "!0C, !1C, !2C", 2),
+ ENCODING_MAP(kThumbSubSpI7, 0xb080,
+ kFmtBitBlt, 6, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1,
+ IS_UNARY_OP | REG_DEF_SP | REG_USE_SP,
+ "sub", "sp, #!0d*4", 2),
+ ENCODING_MAP(kThumbSwi, 0xdf00,
+ kFmtBitBlt, 7, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH,
+ "swi", "!0d", 2),
+ ENCODING_MAP(kThumbTst, 0x4200,
+ kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_UNARY_OP | REG_USE01 | SETS_CCODES,
+ "tst", "!0C, !1C", 2),
+ ENCODING_MAP(kThumb2Vldrs, 0xed900a00,
+ kFmtSfp, 22, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 7, 0,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD |
+ REG_DEF_LR | NEEDS_FIXUP, "vldr", "!0s, [!1C, #!2E]", 4),
+ ENCODING_MAP(kThumb2Vldrd, 0xed900b00,
+ kFmtDfp, 22, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 7, 0,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD |
+ REG_DEF_LR | NEEDS_FIXUP, "vldr", "!0S, [!1C, #!2E]", 4),
+ ENCODING_MAP(kThumb2Vmuls, 0xee200a00,
+ kFmtSfp, 22, 12, kFmtSfp, 7, 16, kFmtSfp, 5, 0,
+ kFmtUnused, -1, -1,
+ IS_TERTIARY_OP | REG_DEF0_USE12,
+ "vmuls", "!0s, !1s, !2s", 4),
+ ENCODING_MAP(kThumb2Vmuld, 0xee200b00,
+ kFmtDfp, 22, 12, kFmtDfp, 7, 16, kFmtDfp, 5, 0,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "vmuld", "!0S, !1S, !2S", 4),
+ ENCODING_MAP(kThumb2Vstrs, 0xed800a00,
+ kFmtSfp, 22, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 7, 0,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
+ "vstr", "!0s, [!1C, #!2E]", 4),
+ ENCODING_MAP(kThumb2Vstrd, 0xed800b00,
+ kFmtDfp, 22, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 7, 0,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
+ "vstr", "!0S, [!1C, #!2E]", 4),
+ ENCODING_MAP(kThumb2Vsubs, 0xee300a40,
+ kFmtSfp, 22, 12, kFmtSfp, 7, 16, kFmtSfp, 5, 0,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "vsub", "!0s, !1s, !2s", 4),
+ ENCODING_MAP(kThumb2Vsubd, 0xee300b40,
+ kFmtDfp, 22, 12, kFmtDfp, 7, 16, kFmtDfp, 5, 0,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "vsub", "!0S, !1S, !2S", 4),
+ ENCODING_MAP(kThumb2Vadds, 0xee300a00,
+ kFmtSfp, 22, 12, kFmtSfp, 7, 16, kFmtSfp, 5, 0,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "vadd", "!0s, !1s, !2s", 4),
+ ENCODING_MAP(kThumb2Vaddd, 0xee300b00,
+ kFmtDfp, 22, 12, kFmtDfp, 7, 16, kFmtDfp, 5, 0,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "vadd", "!0S, !1S, !2S", 4),
+ ENCODING_MAP(kThumb2Vdivs, 0xee800a00,
+ kFmtSfp, 22, 12, kFmtSfp, 7, 16, kFmtSfp, 5, 0,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "vdivs", "!0s, !1s, !2s", 4),
+ ENCODING_MAP(kThumb2Vdivd, 0xee800b00,
+ kFmtDfp, 22, 12, kFmtDfp, 7, 16, kFmtDfp, 5, 0,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "vdivd", "!0S, !1S, !2S", 4),
+ ENCODING_MAP(kThumb2VcvtIF, 0xeeb80ac0,
+ kFmtSfp, 22, 12, kFmtSfp, 5, 0, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "vcvt.f32", "!0s, !1s", 4),
+ ENCODING_MAP(kThumb2VcvtID, 0xeeb80bc0,
+ kFmtDfp, 22, 12, kFmtSfp, 5, 0, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "vcvt.f64", "!0S, !1s", 4),
+ ENCODING_MAP(kThumb2VcvtFI, 0xeebd0ac0,
+ kFmtSfp, 22, 12, kFmtSfp, 5, 0, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "vcvt.s32.f32 ", "!0s, !1s", 4),
+ ENCODING_MAP(kThumb2VcvtDI, 0xeebd0bc0,
+ kFmtSfp, 22, 12, kFmtDfp, 5, 0, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "vcvt.s32.f64 ", "!0s, !1S", 4),
+ ENCODING_MAP(kThumb2VcvtFd, 0xeeb70ac0,
+ kFmtDfp, 22, 12, kFmtSfp, 5, 0, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "vcvt.f64.f32 ", "!0S, !1s", 4),
+ ENCODING_MAP(kThumb2VcvtDF, 0xeeb70bc0,
+ kFmtSfp, 22, 12, kFmtDfp, 5, 0, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "vcvt.f32.f64 ", "!0s, !1S", 4),
+ ENCODING_MAP(kThumb2Vsqrts, 0xeeb10ac0,
+ kFmtSfp, 22, 12, kFmtSfp, 5, 0, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "vsqrt.f32 ", "!0s, !1s", 4),
+ ENCODING_MAP(kThumb2Vsqrtd, 0xeeb10bc0,
+ kFmtDfp, 22, 12, kFmtDfp, 5, 0, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "vsqrt.f64 ", "!0S, !1S", 4),
+ ENCODING_MAP(kThumb2MovImmShift, 0xf04f0000, /* no setflags encoding */
+ kFmtBitBlt, 11, 8, kFmtModImm, -1, -1, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0,
+ "mov", "!0C, #!1m", 4),
+ ENCODING_MAP(kThumb2MovImm16, 0xf2400000,
+ kFmtBitBlt, 11, 8, kFmtImm16, -1, -1, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0,
+ "mov", "!0C, #!1M", 4),
+ ENCODING_MAP(kThumb2StrRRI12, 0xf8c00000,
+ kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 11, 0,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
+ "str", "!0C, [!1C, #!2d]", 4),
+ ENCODING_MAP(kThumb2LdrRRI12, 0xf8d00000,
+ kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 11, 0,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+ "ldr", "!0C, [!1C, #!2d]", 4),
+ ENCODING_MAP(kThumb2StrRRI8Predec, 0xf8400c00,
+ kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 8, 0,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
+ "str", "!0C, [!1C, #-!2d]", 4),
+ ENCODING_MAP(kThumb2LdrRRI8Predec, 0xf8500c00,
+ kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 8, 0,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+ "ldr", "!0C, [!1C, #-!2d]", 4),
+ ENCODING_MAP(kThumb2Cbnz, 0xb900, /* Note: does not affect flags */
+ kFmtBitBlt, 2, 0, kFmtImm6, -1, -1, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE0 | IS_BRANCH |
+ NEEDS_FIXUP, "cbnz", "!0C,!1t", 2),
+ ENCODING_MAP(kThumb2Cbz, 0xb100, /* Note: does not affect flags */
+ kFmtBitBlt, 2, 0, kFmtImm6, -1, -1, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE0 | IS_BRANCH |
+ NEEDS_FIXUP, "cbz", "!0C,!1t", 2),
+ ENCODING_MAP(kThumb2AddRRI12, 0xf2000000,
+ kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtImm12, -1, -1,
+ kFmtUnused, -1, -1,
+ IS_TERTIARY_OP | REG_DEF0_USE1,/* Note: doesn't affect flags */
+ "add", "!0C,!1C,#!2d", 4),
+ ENCODING_MAP(kThumb2MovRR, 0xea4f0000, /* no setflags encoding */
+ kFmtBitBlt, 11, 8, kFmtBitBlt, 3, 0, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "mov", "!0C, !1C", 4),
+ ENCODING_MAP(kThumb2Vmovs, 0xeeb00a40,
+ kFmtSfp, 22, 12, kFmtSfp, 5, 0, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "vmov.f32 ", " !0s, !1s", 4),
+ ENCODING_MAP(kThumb2Vmovd, 0xeeb00b40,
+ kFmtDfp, 22, 12, kFmtDfp, 5, 0, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "vmov.f64 ", " !0S, !1S", 4),
+ ENCODING_MAP(kThumb2Ldmia, 0xe8900000,
+ kFmtBitBlt, 19, 16, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1,
+ IS_BINARY_OP | REG_DEF0_USE0 | REG_DEF_LIST1 | IS_LOAD,
+ "ldmia", "!0C!!, <!1R>", 4),
+ ENCODING_MAP(kThumb2Stmia, 0xe8800000,
+ kFmtBitBlt, 19, 16, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1,
+ IS_BINARY_OP | REG_DEF0_USE0 | REG_USE_LIST1 | IS_STORE,
+ "stmia", "!0C!!, <!1R>", 4),
+ ENCODING_MAP(kThumb2AddRRR, 0xeb100000, /* setflags encoding */
+ kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
+ kFmtShift, -1, -1,
+ IS_QUAD_OP | REG_DEF0_USE12 | SETS_CCODES,
+ "adds", "!0C, !1C, !2C!3H", 4),
+ ENCODING_MAP(kThumb2SubRRR, 0xebb00000, /* setflags enconding */
+ kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
+ kFmtShift, -1, -1,
+ IS_QUAD_OP | REG_DEF0_USE12 | SETS_CCODES,
+ "subs", "!0C, !1C, !2C!3H", 4),
+ ENCODING_MAP(kThumb2SbcRRR, 0xeb700000, /* setflags encoding */
+ kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
+ kFmtShift, -1, -1,
+ IS_QUAD_OP | REG_DEF0_USE12 | USES_CCODES | SETS_CCODES,
+ "sbcs", "!0C, !1C, !2C!3H", 4),
+ ENCODING_MAP(kThumb2CmpRR, 0xebb00f00,
+ kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0, kFmtShift, -1, -1,
+ kFmtUnused, -1, -1,
+ IS_TERTIARY_OP | REG_USE01 | SETS_CCODES,
+ "cmp", "!0C, !1C", 4),
+ ENCODING_MAP(kThumb2SubRRI12, 0xf2a00000,
+ kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtImm12, -1, -1,
+ kFmtUnused, -1, -1,
+ IS_TERTIARY_OP | REG_DEF0_USE1,/* Note: doesn't affect flags */
+ "sub", "!0C,!1C,#!2d", 4),
+ ENCODING_MAP(kThumb2MvnImm12, 0xf06f0000, /* no setflags encoding */
+ kFmtBitBlt, 11, 8, kFmtImm12, -1, -1, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0,
+ "mvn", "!0C, #!1n", 4),
+ ENCODING_MAP(kThumb2Sel, 0xfaa0f080,
+ kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
+ kFmtUnused, -1, -1,
+ IS_TERTIARY_OP | REG_DEF0_USE12 | USES_CCODES,
+ "sel", "!0C, !1C, !2C", 4),
+ ENCODING_MAP(kThumb2Ubfx, 0xf3c00000,
+ kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtLsb, -1, -1,
+ kFmtBWidth, 4, 0, IS_QUAD_OP | REG_DEF0_USE1,
+ "ubfx", "!0C, !1C, #!2d, #!3d", 4),
+ ENCODING_MAP(kThumb2Sbfx, 0xf3400000,
+ kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtLsb, -1, -1,
+ kFmtBWidth, 4, 0, IS_QUAD_OP | REG_DEF0_USE1,
+ "sbfx", "!0C, !1C, #!2d, #!3d", 4),
+ ENCODING_MAP(kThumb2LdrRRR, 0xf8500000,
+ kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
+ kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD,
+ "ldr", "!0C, [!1C, !2C, LSL #!3d]", 4),
+ ENCODING_MAP(kThumb2LdrhRRR, 0xf8300000,
+ kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
+ kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD,
+ "ldrh", "!0C, [!1C, !2C, LSL #!3d]", 4),
+ ENCODING_MAP(kThumb2LdrshRRR, 0xf9300000,
+ kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
+ kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD,
+ "ldrsh", "!0C, [!1C, !2C, LSL #!3d]", 4),
+ ENCODING_MAP(kThumb2LdrbRRR, 0xf8100000,
+ kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
+ kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD,
+ "ldrb", "!0C, [!1C, !2C, LSL #!3d]", 4),
+ ENCODING_MAP(kThumb2LdrsbRRR, 0xf9100000,
+ kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
+ kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD,
+ "ldrsb", "!0C, [!1C, !2C, LSL #!3d]", 4),
+ ENCODING_MAP(kThumb2StrRRR, 0xf8400000,
+ kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
+ kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_USE012 | IS_STORE,
+ "str", "!0C, [!1C, !2C, LSL #!3d]", 4),
+ ENCODING_MAP(kThumb2StrhRRR, 0xf8200000,
+ kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
+ kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_USE012 | IS_STORE,
+ "strh", "!0C, [!1C, !2C, LSL #!3d]", 4),
+ ENCODING_MAP(kThumb2StrbRRR, 0xf8000000,
+ kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
+ kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_USE012 | IS_STORE,
+ "strb", "!0C, [!1C, !2C, LSL #!3d]", 4),
+ ENCODING_MAP(kThumb2LdrhRRI12, 0xf8b00000,
+ kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 11, 0,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+ "ldrh", "!0C, [!1C, #!2d]", 4),
+ ENCODING_MAP(kThumb2LdrshRRI12, 0xf9b00000,
+ kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 11, 0,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+ "ldrsh", "!0C, [!1C, #!2d]", 4),
+ ENCODING_MAP(kThumb2LdrbRRI12, 0xf8900000,
+ kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 11, 0,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+ "ldrb", "!0C, [!1C, #!2d]", 4),
+ ENCODING_MAP(kThumb2LdrsbRRI12, 0xf9900000,
+ kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 11, 0,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+ "ldrsb", "!0C, [!1C, #!2d]", 4),
+ ENCODING_MAP(kThumb2StrhRRI12, 0xf8a00000,
+ kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 11, 0,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
+ "strh", "!0C, [!1C, #!2d]", 4),
+ ENCODING_MAP(kThumb2StrbRRI12, 0xf8800000,
+ kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 11, 0,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
+ "strb", "!0C, [!1C, #!2d]", 4),
+ ENCODING_MAP(kThumb2Pop, 0xe8bd0000,
+ kFmtBitBlt, 15, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1,
+ IS_UNARY_OP | REG_DEF_SP | REG_USE_SP | REG_DEF_LIST0
+ | IS_LOAD | NEEDS_FIXUP, "pop", "<!0R>", 4),
+ ENCODING_MAP(kThumb2Push, 0xe92d0000,
+ kFmtBitBlt, 15, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1,
+ IS_UNARY_OP | REG_DEF_SP | REG_USE_SP | REG_USE_LIST0
+ | IS_STORE | NEEDS_FIXUP, "push", "<!0R>", 4),
+ ENCODING_MAP(kThumb2CmpRI12, 0xf1b00f00,
+ kFmtBitBlt, 19, 16, kFmtModImm, -1, -1, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1,
+ IS_BINARY_OP | REG_USE0 | SETS_CCODES,
+ "cmp", "!0C, #!1m", 4),
+ ENCODING_MAP(kThumb2AdcRRR, 0xeb500000, /* setflags encoding */
+ kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
+ kFmtShift, -1, -1,
+ IS_QUAD_OP | REG_DEF0_USE12 | SETS_CCODES,
+ "adcs", "!0C, !1C, !2C!3H", 4),
+ ENCODING_MAP(kThumb2AndRRR, 0xea000000,
+ kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
+ kFmtShift, -1, -1, IS_QUAD_OP | REG_DEF0_USE12,
+ "and", "!0C, !1C, !2C!3H", 4),
+ ENCODING_MAP(kThumb2BicRRR, 0xea200000,
+ kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
+ kFmtShift, -1, -1, IS_QUAD_OP | REG_DEF0_USE12,
+ "bic", "!0C, !1C, !2C!3H", 4),
+ ENCODING_MAP(kThumb2CmnRR, 0xeb000000,
+ kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0, kFmtShift, -1, -1,
+ kFmtUnused, -1, -1,
+ IS_TERTIARY_OP | REG_DEF0_USE1 | SETS_CCODES,
+ "cmn", "!0C, !1C, shift !2d", 4),
+ ENCODING_MAP(kThumb2EorRRR, 0xea800000,
+ kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
+ kFmtShift, -1, -1, IS_QUAD_OP | REG_DEF0_USE12,
+ "eor", "!0C, !1C, !2C!3H", 4),
+ ENCODING_MAP(kThumb2MulRRR, 0xfb00f000,
+ kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "mul", "!0C, !1C, !2C", 4),
+ ENCODING_MAP(kThumb2MnvRR, 0xea6f0000,
+ kFmtBitBlt, 11, 8, kFmtBitBlt, 3, 0, kFmtShift, -1, -1,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+ "mvn", "!0C, !1C, shift !2d", 4),
+ ENCODING_MAP(kThumb2RsubRRI8, 0xf1d00000,
+ kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtModImm, -1, -1,
+ kFmtUnused, -1, -1,
+ IS_TERTIARY_OP | REG_DEF0_USE1 | SETS_CCODES,
+ "rsb", "!0C,!1C,#!2m", 4),
+ ENCODING_MAP(kThumb2NegRR, 0xf1d00000, /* instance of rsub */
+ kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1,
+ IS_BINARY_OP | REG_DEF0_USE1 | SETS_CCODES,
+ "neg", "!0C,!1C", 4),
+ ENCODING_MAP(kThumb2OrrRRR, 0xea400000,
+ kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
+ kFmtShift, -1, -1, IS_QUAD_OP | REG_DEF0_USE12,
+ "orr", "!0C, !1C, !2C!3H", 4),
+ ENCODING_MAP(kThumb2TstRR, 0xea100f00,
+ kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0, kFmtShift, -1, -1,
+ kFmtUnused, -1, -1,
+ IS_TERTIARY_OP | REG_USE01 | SETS_CCODES,
+ "tst", "!0C, !1C, shift !2d", 4),
+ ENCODING_MAP(kThumb2LslRRR, 0xfa00f000,
+ kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "lsl", "!0C, !1C, !2C", 4),
+ ENCODING_MAP(kThumb2LsrRRR, 0xfa20f000,
+ kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "lsr", "!0C, !1C, !2C", 4),
+ ENCODING_MAP(kThumb2AsrRRR, 0xfa40f000,
+ kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "asr", "!0C, !1C, !2C", 4),
+ ENCODING_MAP(kThumb2RorRRR, 0xfa60f000,
+ kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "ror", "!0C, !1C, !2C", 4),
+ ENCODING_MAP(kThumb2LslRRI5, 0xea4f0000,
+ kFmtBitBlt, 11, 8, kFmtBitBlt, 3, 0, kFmtShift5, -1, -1,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+ "lsl", "!0C, !1C, #!2d", 4),
+ ENCODING_MAP(kThumb2LsrRRI5, 0xea4f0010,
+ kFmtBitBlt, 11, 8, kFmtBitBlt, 3, 0, kFmtShift5, -1, -1,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+ "lsr", "!0C, !1C, #!2d", 4),
+ ENCODING_MAP(kThumb2AsrRRI5, 0xea4f0020,
+ kFmtBitBlt, 11, 8, kFmtBitBlt, 3, 0, kFmtShift5, -1, -1,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+ "asr", "!0C, !1C, #!2d", 4),
+ ENCODING_MAP(kThumb2RorRRI5, 0xea4f0030,
+ kFmtBitBlt, 11, 8, kFmtBitBlt, 3, 0, kFmtShift5, -1, -1,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+ "ror", "!0C, !1C, #!2d", 4),
+ ENCODING_MAP(kThumb2BicRRI8, 0xf0200000,
+ kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtModImm, -1, -1,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+ "bic", "!0C, !1C, #!2m", 4),
+ ENCODING_MAP(kThumb2AndRRI8, 0xf0000000,
+ kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtModImm, -1, -1,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+ "and", "!0C, !1C, #!2m", 4),
+ ENCODING_MAP(kThumb2OrrRRI8, 0xf0400000,
+ kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtModImm, -1, -1,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+ "orr", "!0C, !1C, #!2m", 4),
+ ENCODING_MAP(kThumb2EorRRI8, 0xf0800000,
+ kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtModImm, -1, -1,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+ "eor", "!0C, !1C, #!2m", 4),
+ ENCODING_MAP(kThumb2AddRRI8, 0xf1100000,
+ kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtModImm, -1, -1,
+ kFmtUnused, -1, -1,
+ IS_TERTIARY_OP | REG_DEF0_USE1 | SETS_CCODES,
+ "adds", "!0C, !1C, #!2m", 4),
+ ENCODING_MAP(kThumb2AdcRRI8, 0xf1500000,
+ kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtModImm, -1, -1,
+ kFmtUnused, -1, -1,
+ IS_TERTIARY_OP | REG_DEF0_USE1 | SETS_CCODES | USES_CCODES,
+ "adcs", "!0C, !1C, #!2m", 4),
+ ENCODING_MAP(kThumb2SubRRI8, 0xf1b00000,
+ kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtModImm, -1, -1,
+ kFmtUnused, -1, -1,
+ IS_TERTIARY_OP | REG_DEF0_USE1 | SETS_CCODES,
+ "subs", "!0C, !1C, #!2m", 4),
+ ENCODING_MAP(kThumb2SbcRRI8, 0xf1700000,
+ kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtModImm, -1, -1,
+ kFmtUnused, -1, -1,
+ IS_TERTIARY_OP | REG_DEF0_USE1 | SETS_CCODES | USES_CCODES,
+ "sbcs", "!0C, !1C, #!2m", 4),
+ ENCODING_MAP(kThumb2It, 0xbf00,
+ kFmtBitBlt, 7, 4, kFmtBitBlt, 3, 0, kFmtModImm, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | IS_IT | USES_CCODES,
+ "it:!1b", "!0c", 2),
+ ENCODING_MAP(kThumb2Fmstat, 0xeef1fa10,
+ kFmtUnused, -1, -1, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, NO_OPERAND | SETS_CCODES,
+ "fmstat", "", 4),
+ ENCODING_MAP(kThumb2Vcmpd, 0xeeb40b40,
+ kFmtDfp, 22, 12, kFmtDfp, 5, 0, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE01,
+ "vcmp.f64", "!0S, !1S", 4),
+ ENCODING_MAP(kThumb2Vcmps, 0xeeb40a40,
+ kFmtSfp, 22, 12, kFmtSfp, 5, 0, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE01,
+ "vcmp.f32", "!0s, !1s", 4),
+ ENCODING_MAP(kThumb2LdrPcRel12, 0xf8df0000,
+ kFmtBitBlt, 15, 12, kFmtBitBlt, 11, 0, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1,
+ IS_TERTIARY_OP | REG_DEF0 | REG_USE_PC | IS_LOAD | NEEDS_FIXUP,
+ "ldr", "!0C, [r15pc, #!1d]", 4),
+ ENCODING_MAP(kThumb2BCond, 0xf0008000,
+ kFmtBrOffset, -1, -1, kFmtBitBlt, 25, 22, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1,
+ IS_BINARY_OP | IS_BRANCH | USES_CCODES | NEEDS_FIXUP,
+ "b!1c", "!0t", 4),
+ ENCODING_MAP(kThumb2Vmovd_RR, 0xeeb00b40,
+ kFmtDfp, 22, 12, kFmtDfp, 5, 0, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "vmov.f64", "!0S, !1S", 4),
+ ENCODING_MAP(kThumb2Vmovs_RR, 0xeeb00a40,
+ kFmtSfp, 22, 12, kFmtSfp, 5, 0, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "vmov.f32", "!0s, !1s", 4),
+ ENCODING_MAP(kThumb2Fmrs, 0xee100a10,
+ kFmtBitBlt, 15, 12, kFmtSfp, 7, 16, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "fmrs", "!0C, !1s", 4),
+ ENCODING_MAP(kThumb2Fmsr, 0xee000a10,
+ kFmtSfp, 7, 16, kFmtBitBlt, 15, 12, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "fmsr", "!0s, !1C", 4),
+ ENCODING_MAP(kThumb2Fmrrd, 0xec500b10,
+ kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtDfp, 5, 0,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF01_USE2,
+ "fmrrd", "!0C, !1C, !2S", 4),
+ ENCODING_MAP(kThumb2Fmdrr, 0xec400b10,
+ kFmtDfp, 5, 0, kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "fmdrr", "!0S, !1C, !2C", 4),
+ ENCODING_MAP(kThumb2Vabsd, 0xeeb00bc0,
+ kFmtDfp, 22, 12, kFmtDfp, 5, 0, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "vabs.f64", "!0S, !1S", 4),
+ ENCODING_MAP(kThumb2Vabss, 0xeeb00ac0,
+ kFmtSfp, 22, 12, kFmtSfp, 5, 0, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "vabs.f32", "!0s, !1s", 4),
+ ENCODING_MAP(kThumb2Vnegd, 0xeeb10b40,
+ kFmtDfp, 22, 12, kFmtDfp, 5, 0, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "vneg.f64", "!0S, !1S", 4),
+ ENCODING_MAP(kThumb2Vnegs, 0xeeb10a40,
+ kFmtSfp, 22, 12, kFmtSfp, 5, 0, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "vneg.f32", "!0s, !1s", 4),
+ ENCODING_MAP(kThumb2Vmovs_IMM8, 0xeeb00a00,
+ kFmtSfp, 22, 12, kFmtFPImm, 16, 0, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0,
+ "vmov.f32", "!0s, #0x!1h", 4),
+ ENCODING_MAP(kThumb2Vmovd_IMM8, 0xeeb00b00,
+ kFmtDfp, 22, 12, kFmtFPImm, 16, 0, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0,
+ "vmov.f64", "!0S, #0x!1h", 4),
+ ENCODING_MAP(kThumb2Mla, 0xfb000000,
+ kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
+ kFmtBitBlt, 15, 12,
+ IS_QUAD_OP | REG_DEF0 | REG_USE1 | REG_USE2 | REG_USE3,
+ "mla", "!0C, !1C, !2C, !3C", 4),
+ ENCODING_MAP(kThumb2Umull, 0xfba00000,
+ kFmtBitBlt, 15, 12, kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16,
+ kFmtBitBlt, 3, 0,
+ IS_QUAD_OP | REG_DEF0 | REG_DEF1 | REG_USE2 | REG_USE3,
+ "umull", "!0C, !1C, !2C, !3C", 4),
+ ENCODING_MAP(kThumb2Ldrex, 0xe8500f00,
+ kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 7, 0,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+ "ldrex", "!0C, [!1C, #!2E]", 4),
+ ENCODING_MAP(kThumb2Strex, 0xe8400000,
+ kFmtBitBlt, 11, 8, kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16,
+ kFmtBitBlt, 7, 0, IS_QUAD_OP | REG_DEF0_USE12 | IS_STORE,
+ "strex", "!0C,!1C, [!2C, #!2E]", 4),
+ ENCODING_MAP(kThumb2Clrex, 0xf3bf8f2f,
+ kFmtUnused, -1, -1, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, NO_OPERAND,
+ "clrex", "", 4),
+ ENCODING_MAP(kThumb2Bfi, 0xf3600000,
+ kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtShift5, -1, -1,
+ kFmtBitBlt, 4, 0, IS_QUAD_OP | REG_DEF0_USE1,
+ "bfi", "!0C,!1C,#!2d,#!3d", 4),
+ ENCODING_MAP(kThumb2Bfc, 0xf36f0000,
+ kFmtBitBlt, 11, 8, kFmtShift5, -1, -1, kFmtBitBlt, 4, 0,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0,
+ "bfc", "!0C,#!1d,#!2d", 4),
+ ENCODING_MAP(kThumb2Dmb, 0xf3bf8f50,
+ kFmtBitBlt, 3, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_UNARY_OP,
+ "dmb","#!0B",4),
+ ENCODING_MAP(kThumb2LdrPcReln12, 0xf85f0000,
+ kFmtBitBlt, 15, 12, kFmtBitBlt, 11, 0, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1,
+ IS_BINARY_OP | REG_DEF0 | REG_USE_PC | IS_LOAD,
+ "ldr", "!0C, [r15pc, -#!1d]", 4),
+ ENCODING_MAP(kThumb2Stm, 0xe9000000,
+ kFmtBitBlt, 19, 16, kFmtBitBlt, 12, 0, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1,
+ IS_BINARY_OP | REG_USE0 | REG_USE_LIST1 | IS_STORE,
+ "stm", "!0C, <!1R>", 4),
+ ENCODING_MAP(kThumbUndefined, 0xde00,
+ kFmtUnused, -1, -1, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, NO_OPERAND,
+ "undefined", "", 2),
+ // NOTE: vpop, vpush hard-encoded for s16+ reg list
+ ENCODING_MAP(kThumb2VPopCS, 0xecbd8a00,
+ kFmtBitBlt, 7, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1,
+ IS_UNARY_OP | REG_DEF_SP | REG_USE_SP | REG_DEF_FPCS_LIST0
+ | IS_LOAD, "vpop", "<!0P>", 4),
+ ENCODING_MAP(kThumb2VPushCS, 0xed2d8a00,
+ kFmtBitBlt, 7, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1,
+ IS_UNARY_OP | REG_DEF_SP | REG_USE_SP | REG_USE_FPCS_LIST0
+ | IS_STORE, "vpush", "<!0P>", 4),
+ ENCODING_MAP(kThumb2Vldms, 0xec900a00,
+ kFmtBitBlt, 19, 16, kFmtSfp, 22, 12, kFmtBitBlt, 7, 0,
+ kFmtUnused, -1, -1,
+ IS_TERTIARY_OP | REG_USE0 | REG_DEF_FPCS_LIST2
+ | IS_LOAD, "vldms", "!0C, <!2Q>", 4),
+ ENCODING_MAP(kThumb2Vstms, 0xec800a00,
+ kFmtBitBlt, 19, 16, kFmtSfp, 22, 12, kFmtBitBlt, 7, 0,
+ kFmtUnused, -1, -1,
+ IS_TERTIARY_OP | REG_USE0 | REG_USE_FPCS_LIST2
+ | IS_STORE, "vstms", "!0C, <!2Q>", 4),
+ ENCODING_MAP(kThumb2BUncond, 0xf0009000,
+ kFmtOff24, -1, -1, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, NO_OPERAND | IS_BRANCH,
+ "b", "!0t", 4),
+ ENCODING_MAP(kThumb2MovImm16H, 0xf2c00000,
+ kFmtBitBlt, 11, 8, kFmtImm16, -1, -1, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0 | REG_USE0,
+ "movt", "!0C, #!1M", 4),
+ ENCODING_MAP(kThumb2AddPCR, 0x4487,
+ kFmtBitBlt, 6, 3, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1,
+ IS_UNARY_OP | REG_USE0 | IS_BRANCH,
+ "add", "rPC, !0C", 2),
+ ENCODING_MAP(kThumb2Adr, 0xf20f0000,
+ kFmtBitBlt, 11, 8, kFmtImm12, -1, -1, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1,
+ /* Note: doesn't affect flags */
+ IS_TERTIARY_OP | REG_DEF0 | NEEDS_FIXUP,
+ "adr", "!0C,#!1d", 4),
+ ENCODING_MAP(kThumb2MovImm16LST, 0xf2400000,
+ kFmtBitBlt, 11, 8, kFmtImm16, -1, -1, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0 | NEEDS_FIXUP,
+ "mov", "!0C, #!1M", 4),
+ ENCODING_MAP(kThumb2MovImm16HST, 0xf2c00000,
+ kFmtBitBlt, 11, 8, kFmtImm16, -1, -1, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0 | REG_USE0 | NEEDS_FIXUP,
+ "movt", "!0C, #!1M", 4),
+ ENCODING_MAP(kThumb2LdmiaWB, 0xe8b00000,
+ kFmtBitBlt, 19, 16, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1,
+ IS_BINARY_OP | REG_DEF0_USE0 | REG_DEF_LIST1 | IS_LOAD,
+ "ldmia", "!0C!!, <!1R>", 4),
+ ENCODING_MAP(kThumb2SubsRRI12, 0xf1b00000,
+ kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtImm12, -1, -1,
+ kFmtUnused, -1, -1,
+ IS_TERTIARY_OP | REG_DEF0_USE1 | SETS_CCODES,
+ "subs", "!0C,!1C,#!2d", 4),
+ ENCODING_MAP(kThumb2OrrRRRs, 0xea500000,
+ kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
+ kFmtShift, -1, -1, IS_QUAD_OP | REG_DEF0_USE12 | SETS_CCODES,
+ "orrs", "!0C, !1C, !2C!3H", 4),
+ ENCODING_MAP(kThumb2Push1, 0xf84d0d04,
+ kFmtBitBlt, 15, 12, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1,
+ IS_UNARY_OP | REG_DEF_SP | REG_USE_SP | REG_USE0
+ | IS_STORE, "push1", "!0C", 4),
+ ENCODING_MAP(kThumb2Pop1, 0xf85d0b04,
+ kFmtBitBlt, 15, 12, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1,
+ IS_UNARY_OP | REG_DEF_SP | REG_USE_SP | REG_DEF0
+ | IS_LOAD, "pop1", "!0C", 4),
+ ENCODING_MAP(kThumb2RsubRRR, 0xebd00000, /* setflags encoding */
+ kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
+ kFmtShift, -1, -1,
+ IS_QUAD_OP | REG_DEF0_USE12 | SETS_CCODES,
+ "rsbs", "!0C, !1C, !2C!3H", 4),
+ ENCODING_MAP(kThumb2Smull, 0xfb800000,
+ kFmtBitBlt, 15, 12, kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16,
+ kFmtBitBlt, 3, 0,
+ IS_QUAD_OP | REG_DEF0 | REG_DEF1 | REG_USE2 | REG_USE3,
+ "smull", "!0C, !1C, !2C, !3C", 4),
+ ENCODING_MAP(kThumb2LdrdPcRel8, 0xe9df0000,
+ kFmtBitBlt, 15, 12, kFmtBitBlt, 11, 8, kFmtBitBlt, 7, 0,
+ kFmtUnused, -1, -1,
+ IS_TERTIARY_OP | REG_DEF0 | REG_DEF1 | REG_USE_PC | IS_LOAD | NEEDS_FIXUP,
+ "ldrd", "!0C, !1C, [pc, #!2E]", 4),
+ ENCODING_MAP(kThumb2LdrdI8, 0xe9d00000,
+ kFmtBitBlt, 15, 12, kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16,
+ kFmtBitBlt, 7, 0,
+ IS_QUAD_OP | REG_DEF0 | REG_DEF1 | REG_USE2 | IS_LOAD,
+ "ldrd", "!0C, !1C, [!2C, #!3E]", 4),
+ ENCODING_MAP(kThumb2StrdI8, 0xe9c00000,
+ kFmtBitBlt, 15, 12, kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16,
+ kFmtBitBlt, 7, 0,
+ IS_QUAD_OP | REG_USE0 | REG_USE1 | REG_USE2 | IS_STORE,
+ "strd", "!0C, !1C, [!2C, #!3E]", 4),
+};
+
+/*
+ * The fake NOP of moving r0 to r0 actually will incur data stalls if r0 is
+ * not ready. Since r5FP is not updated often, it is less likely to
+ * generate unnecessary stall cycles.
+ * TUNING: No longer true - find new NOP pattern.
+ */
+#define PADDING_MOV_R5_R5 0x1C2D
+
+/*
+ * Assemble the LIR into binary instruction format. Note that we may
+ * discover that pc-relative displacements may not fit the selected
+ * instruction.
+ */
+AssemblerStatus ArmCodegen::AssembleInstructions(CompilationUnit* cu, uintptr_t start_addr)
+{
+ LIR* lir;
+ AssemblerStatus res = kSuccess; // Assume success
+
+ for (lir = cu->first_lir_insn; lir != NULL; lir = NEXT_LIR(lir)) {
+
+ if (lir->opcode < 0) {
+ /* 1 means padding is needed */
+ if ((lir->opcode == kPseudoPseudoAlign4) && (lir->operands[0] == 1)) {
+ cu->code_buffer.push_back(PADDING_MOV_R5_R5 & 0xFF);
+ cu->code_buffer.push_back((PADDING_MOV_R5_R5 >> 8) & 0xFF);
+ }
+ continue;
+ }
+
+ if (lir->flags.is_nop) {
+ continue;
+ }
+
+ /*
+ * For PC-relative displacements we won't know if the
+ * selected instruction will work until late (i.e. - now).
+ * If something doesn't fit, we must replace the short-form
+ * operation with a longer-form one. Note, though, that this
+ * can change code we've already processed, so we'll need to
+ * re-calculate offsets and restart. To limit the number of
+ * restarts, the entire list will be scanned and patched.
+ * Of course, the patching itself may cause new overflows so this
+ * is an iterative process.
+ */
+ if (lir->flags.pcRelFixup) {
+ if (lir->opcode == kThumbLdrPcRel ||
+ lir->opcode == kThumb2LdrPcRel12 ||
+ lir->opcode == kThumbAddPcRel ||
+ lir->opcode == kThumb2LdrdPcRel8 ||
+ ((lir->opcode == kThumb2Vldrd) && (lir->operands[1] == r15pc)) ||
+ ((lir->opcode == kThumb2Vldrs) && (lir->operands[1] == r15pc))) {
+ /*
+ * PC-relative loads are mostly used to load immediates
+ * that are too large to materialize directly in one shot.
+ * However, if the load displacement exceeds the limit,
+ * we revert to a multiple-instruction materialization sequence.
+ */
+ LIR *lir_target = lir->target;
+ uintptr_t pc = (lir->offset + 4) & ~3;
+ uintptr_t target = lir_target->offset;
+ int delta = target - pc;
+ if (delta & 0x3) {
+ LOG(FATAL) << "PC-rel offset not multiple of 4: " << delta;
+ }
+ // First, a sanity check for cases we shouldn't see now
+ if (((lir->opcode == kThumbAddPcRel) && (delta > 1020)) ||
+ ((lir->opcode == kThumbLdrPcRel) && (delta > 1020))) {
+ // Shouldn't happen in current codegen.
+ LOG(FATAL) << "Unexpected pc-rel offset " << delta;
+ }
+ // Now, check for the difficult cases
+ if (((lir->opcode == kThumb2LdrPcRel12) && (delta > 4091)) ||
+ ((lir->opcode == kThumb2LdrdPcRel8) && (delta > 1020)) ||
+ ((lir->opcode == kThumb2Vldrs) && (delta > 1020)) ||
+ ((lir->opcode == kThumb2Vldrd) && (delta > 1020))) {
+ /*
+ * Note: because rARM_LR may be used to fix up out-of-range
+ * vldrs/vldrd we include REG_DEF_LR in the resource
+ * masks for these instructions.
+ */
+ int base_reg = ((lir->opcode == kThumb2LdrdPcRel8) || (lir->opcode == kThumb2LdrPcRel12))
+ ? lir->operands[0] : rARM_LR;
+
+ // Add new Adr to generate the address.
+ LIR* new_adr = RawLIR(cu, lir->dalvik_offset, kThumb2Adr,
+ base_reg, 0, 0, 0, 0, lir->target);
+ InsertLIRBefore(lir, new_adr);
+
+ // Convert to normal load.
+ if (lir->opcode == kThumb2LdrPcRel12) {
+ lir->opcode = kThumb2LdrRRI12;
+ } else if (lir->opcode == kThumb2LdrdPcRel8) {
+ lir->opcode = kThumb2LdrdI8;
+ }
+ // Change the load to be relative to the new Adr base.
+ if (lir->opcode == kThumb2LdrdI8) {
+ lir->operands[3] = 0;
+ lir->operands[2] = base_reg;
+ } else {
+ lir->operands[2] = 0;
+ lir->operands[1] = base_reg;
+ }
+ SetupResourceMasks(cu, lir);
+ res = kRetryAll;
+ } else {
+ if ((lir->opcode == kThumb2Vldrs) ||
+ (lir->opcode == kThumb2Vldrd) ||
+ (lir->opcode == kThumb2LdrdPcRel8)) {
+ lir->operands[2] = delta >> 2;
+ } else {
+ lir->operands[1] = (lir->opcode == kThumb2LdrPcRel12) ? delta :
+ delta >> 2;
+ }
+ }
+ } else if (lir->opcode == kThumb2Cbnz || lir->opcode == kThumb2Cbz) {
+ LIR *target_lir = lir->target;
+ uintptr_t pc = lir->offset + 4;
+ uintptr_t target = target_lir->offset;
+ int delta = target - pc;
+ if (delta > 126 || delta < 0) {
+ /*
+ * Convert to cmp rx,#0 / b[eq/ne] tgt pair
+ * Make new branch instruction and insert after
+ */
+ LIR* new_inst =
+ RawLIR(cu, lir->dalvik_offset, kThumbBCond, 0,
+ (lir->opcode == kThumb2Cbz) ? kArmCondEq : kArmCondNe,
+ 0, 0, 0, lir->target);
+ InsertLIRAfter(lir, new_inst);
+ /* Convert the cb[n]z to a cmp rx, #0 ] */
+ lir->opcode = kThumbCmpRI8;
+ /* operand[0] is src1 in both cb[n]z & CmpRI8 */
+ lir->operands[1] = 0;
+ lir->target = 0;
+ SetupResourceMasks(cu, lir);
+ res = kRetryAll;
+ } else {
+ lir->operands[1] = delta >> 1;
+ }
+ } else if (lir->opcode == kThumb2Push || lir->opcode == kThumb2Pop) {
+ if (__builtin_popcount(lir->operands[0]) == 1) {
+ /*
+ * The standard push/pop multiple instruction
+ * requires at least two registers in the list.
+ * If we've got just one, switch to the single-reg
+ * encoding.
+ */
+ lir->opcode = (lir->opcode == kThumb2Push) ? kThumb2Push1 :
+ kThumb2Pop1;
+ int reg = 0;
+ while (lir->operands[0]) {
+ if (lir->operands[0] & 0x1) {
+ break;
+ } else {
+ reg++;
+ lir->operands[0] >>= 1;
+ }
+ }
+ lir->operands[0] = reg;
+ SetupResourceMasks(cu, lir);
+ res = kRetryAll;
+ }
+ } else if (lir->opcode == kThumbBCond || lir->opcode == kThumb2BCond) {
+ LIR *target_lir = lir->target;
+ int delta = 0;
+ DCHECK(target_lir);
+ uintptr_t pc = lir->offset + 4;
+ uintptr_t target = target_lir->offset;
+ delta = target - pc;
+ if ((lir->opcode == kThumbBCond) && (delta > 254 || delta < -256)) {
+ lir->opcode = kThumb2BCond;
+ SetupResourceMasks(cu, lir);
+ res = kRetryAll;
+ }
+ lir->operands[0] = delta >> 1;
+ } else if (lir->opcode == kThumb2BUncond) {
+ LIR *target_lir = lir->target;
+ uintptr_t pc = lir->offset + 4;
+ uintptr_t target = target_lir->offset;
+ int delta = target - pc;
+ lir->operands[0] = delta >> 1;
+ if (!(cu->disable_opt & (1 << kSafeOptimizations)) &&
+ lir->operands[0] == 0) { // Useless branch
+ lir->flags.is_nop = true;
+ res = kRetryAll;
+ }
+ } else if (lir->opcode == kThumbBUncond) {
+ LIR *target_lir = lir->target;
+ uintptr_t pc = lir->offset + 4;
+ uintptr_t target = target_lir->offset;
+ int delta = target - pc;
+ if (delta > 2046 || delta < -2048) {
+ // Convert to Thumb2BCond w/ kArmCondAl
+ lir->opcode = kThumb2BUncond;
+ lir->operands[0] = 0;
+ SetupResourceMasks(cu, lir);
+ res = kRetryAll;
+ } else {
+ lir->operands[0] = delta >> 1;
+ if (!(cu->disable_opt & (1 << kSafeOptimizations)) &&
+ lir->operands[0] == -1) { // Useless branch
+ lir->flags.is_nop = true;
+ res = kRetryAll;
+ }
+ }
+ } else if (lir->opcode == kThumbBlx1) {
+ DCHECK(NEXT_LIR(lir)->opcode == kThumbBlx2);
+ /* cur_pc is Thumb */
+ uintptr_t cur_pc = (start_addr + lir->offset + 4) & ~3;
+ uintptr_t target = lir->operands[1];
+
+ /* Match bit[1] in target with base */
+ if (cur_pc & 0x2) {
+ target |= 0x2;
+ }
+ int delta = target - cur_pc;
+ DCHECK((delta >= -(1<<22)) && (delta <= ((1<<22)-2)));
+
+ lir->operands[0] = (delta >> 12) & 0x7ff;
+ NEXT_LIR(lir)->operands[0] = (delta>> 1) & 0x7ff;
+ } else if (lir->opcode == kThumbBl1) {
+ DCHECK(NEXT_LIR(lir)->opcode == kThumbBl2);
+ /* Both cur_pc and target are Thumb */
+ uintptr_t cur_pc = start_addr + lir->offset + 4;
+ uintptr_t target = lir->operands[1];
+
+ int delta = target - cur_pc;
+ DCHECK((delta >= -(1<<22)) && (delta <= ((1<<22)-2)));
+
+ lir->operands[0] = (delta >> 12) & 0x7ff;
+ NEXT_LIR(lir)->operands[0] = (delta>> 1) & 0x7ff;
+ } else if (lir->opcode == kThumb2Adr) {
+ SwitchTable *tab_rec = reinterpret_cast<SwitchTable*>(lir->operands[2]);
+ LIR* target = lir->target;
+ int target_disp = tab_rec ? tab_rec->offset
+ : target->offset;
+ int disp = target_disp - ((lir->offset + 4) & ~3);
+ if (disp < 4096) {
+ lir->operands[1] = disp;
+ } else {
+ // convert to ldimm16l, ldimm16h, add tgt, pc, operands[0]
+ LIR *new_mov16L =
+ RawLIR(cu, lir->dalvik_offset, kThumb2MovImm16LST,
+ lir->operands[0], 0, reinterpret_cast<uintptr_t>(lir),
+ reinterpret_cast<uintptr_t>(tab_rec), 0, lir->target);
+ InsertLIRBefore(lir, new_mov16L);
+ LIR *new_mov16H =
+ RawLIR(cu, lir->dalvik_offset, kThumb2MovImm16HST,
+ lir->operands[0], 0, reinterpret_cast<uintptr_t>(lir),
+ reinterpret_cast<uintptr_t>(tab_rec), 0, lir->target);
+ InsertLIRBefore(lir, new_mov16H);
+ lir->opcode = kThumb2AddRRR;
+ lir->operands[1] = rARM_PC;
+ lir->operands[2] = lir->operands[0];
+ SetupResourceMasks(cu, lir);
+ res = kRetryAll;
+ }
+ } else if (lir->opcode == kThumb2MovImm16LST) {
+ // operands[1] should hold disp, [2] has add, [3] has tab_rec
+ LIR *addPCInst = reinterpret_cast<LIR*>(lir->operands[2]);
+ SwitchTable *tab_rec = reinterpret_cast<SwitchTable*>(lir->operands[3]);
+ // If tab_rec is null, this is a literal load. Use target
+ LIR* target = lir->target;
+ int target_disp = tab_rec ? tab_rec->offset : target->offset;
+ lir->operands[1] = (target_disp - (addPCInst->offset + 4)) & 0xffff;
+ } else if (lir->opcode == kThumb2MovImm16HST) {
+ // operands[1] should hold disp, [2] has add, [3] has tab_rec
+ LIR *addPCInst = reinterpret_cast<LIR*>(lir->operands[2]);
+ SwitchTable *tab_rec = reinterpret_cast<SwitchTable*>(lir->operands[3]);
+ // If tab_rec is null, this is a literal load. Use target
+ LIR* target = lir->target;
+ int target_disp = tab_rec ? tab_rec->offset : target->offset;
+ lir->operands[1] =
+ ((target_disp - (addPCInst->offset + 4)) >> 16) & 0xffff;
+ }
+ }
+ /*
+ * If one of the pc-relative instructions expanded we'll have
+ * to make another pass. Don't bother to fully assemble the
+ * instruction.
+ */
+ if (res != kSuccess) {
+ continue;
+ }
+ const ArmEncodingMap *encoder = &EncodingMap[lir->opcode];
+ uint32_t bits = encoder->skeleton;
+ int i;
+ for (i = 0; i < 4; i++) {
+ uint32_t operand;
+ uint32_t value;
+ operand = lir->operands[i];
+ switch (encoder->field_loc[i].kind) {
+ case kFmtUnused:
+ break;
+ case kFmtFPImm:
+ value = ((operand & 0xF0) >> 4) << encoder->field_loc[i].end;
+ value |= (operand & 0x0F) << encoder->field_loc[i].start;
+ bits |= value;
+ break;
+ case kFmtBrOffset:
+ value = ((operand & 0x80000) >> 19) << 26;
+ value |= ((operand & 0x40000) >> 18) << 11;
+ value |= ((operand & 0x20000) >> 17) << 13;
+ value |= ((operand & 0x1f800) >> 11) << 16;
+ value |= (operand & 0x007ff);
+ bits |= value;
+ break;
+ case kFmtShift5:
+ value = ((operand & 0x1c) >> 2) << 12;
+ value |= (operand & 0x03) << 6;
+ bits |= value;
+ break;
+ case kFmtShift:
+ value = ((operand & 0x70) >> 4) << 12;
+ value |= (operand & 0x0f) << 4;
+ bits |= value;
+ break;
+ case kFmtBWidth:
+ value = operand - 1;
+ bits |= value;
+ break;
+ case kFmtLsb:
+ value = ((operand & 0x1c) >> 2) << 12;
+ value |= (operand & 0x03) << 6;
+ bits |= value;
+ break;
+ case kFmtImm6:
+ value = ((operand & 0x20) >> 5) << 9;
+ value |= (operand & 0x1f) << 3;
+ bits |= value;
+ break;
+ case kFmtBitBlt:
+ value = (operand << encoder->field_loc[i].start) &
+ ((1 << (encoder->field_loc[i].end + 1)) - 1);
+ bits |= value;
+ break;
+ case kFmtDfp: {
+ DCHECK(ARM_DOUBLEREG(operand));
+ DCHECK_EQ((operand & 0x1), 0U);
+ int reg_name = (operand & ARM_FP_REG_MASK) >> 1;
+ /* Snag the 1-bit slice and position it */
+ value = ((reg_name & 0x10) >> 4) << encoder->field_loc[i].end;
+ /* Extract and position the 4-bit slice */
+ value |= (reg_name & 0x0f) << encoder->field_loc[i].start;
+ bits |= value;
+ break;
+ }
+ case kFmtSfp:
+ DCHECK(ARM_SINGLEREG(operand));
+ /* Snag the 1-bit slice and position it */
+ value = (operand & 0x1) << encoder->field_loc[i].end;
+ /* Extract and position the 4-bit slice */
+ value |= ((operand & 0x1e) >> 1) << encoder->field_loc[i].start;
+ bits |= value;
+ break;
+ case kFmtImm12:
+ case kFmtModImm:
+ value = ((operand & 0x800) >> 11) << 26;
+ value |= ((operand & 0x700) >> 8) << 12;
+ value |= operand & 0x0ff;
+ bits |= value;
+ break;
+ case kFmtImm16:
+ value = ((operand & 0x0800) >> 11) << 26;
+ value |= ((operand & 0xf000) >> 12) << 16;
+ value |= ((operand & 0x0700) >> 8) << 12;
+ value |= operand & 0x0ff;
+ bits |= value;
+ break;
+ case kFmtOff24: {
+ uint32_t signbit = (operand >> 31) & 0x1;
+ uint32_t i1 = (operand >> 22) & 0x1;
+ uint32_t i2 = (operand >> 21) & 0x1;
+ uint32_t imm10 = (operand >> 11) & 0x03ff;
+ uint32_t imm11 = operand & 0x07ff;
+ uint32_t j1 = (i1 ^ signbit) ? 0 : 1;
+ uint32_t j2 = (i2 ^ signbit) ? 0 : 1;
+ value = (signbit << 26) | (j1 << 13) | (j2 << 11) | (imm10 << 16) |
+ imm11;
+ bits |= value;
+ }
+ break;
+ default:
+ LOG(FATAL) << "Bad fmt:" << encoder->field_loc[i].kind;
+ }
+ }
+ if (encoder->size == 4) {
+ cu->code_buffer.push_back((bits >> 16) & 0xff);
+ cu->code_buffer.push_back((bits >> 24) & 0xff);
+ }
+ cu->code_buffer.push_back(bits & 0xff);
+ cu->code_buffer.push_back((bits >> 8) & 0xff);
+ }
+ return res;
+}
+
+int ArmCodegen::GetInsnSize(LIR* lir)
+{
+ return EncodingMap[lir->opcode].size;
+}
+
+} // namespace art
diff --git a/src/compiler/dex/quick/arm/call_arm.cc b/src/compiler/dex/quick/arm/call_arm.cc
new file mode 100644
index 0000000..09e8f3f
--- /dev/null
+++ b/src/compiler/dex/quick/arm/call_arm.cc
@@ -0,0 +1,655 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* This file contains codegen for the Thumb2 ISA. */
+
+#include "arm_lir.h"
+#include "codegen_arm.h"
+#include "compiler/dex/quick/codegen_util.h"
+#include "compiler/dex/quick/ralloc_util.h"
+#include "oat/runtime/oat_support_entrypoints.h"
+#include "oat_compilation_unit.h"
+
+namespace art {
+
+
+/* Return the position of an ssa name within the argument list */
+static int InPosition(CompilationUnit* cu, int s_reg)
+{
+ int v_reg = SRegToVReg(cu, s_reg);
+ return v_reg - cu->num_regs;
+}
+
+/*
+ * Describe an argument. If it's already in an arg register, just leave it
+ * there. NOTE: all live arg registers must be locked prior to this call
+ * to avoid having them allocated as a temp by downstream utilities.
+ */
+RegLocation ArmCodegen::ArgLoc(CompilationUnit* cu, RegLocation loc)
+{
+ int arg_num = InPosition(cu, loc.s_reg_low);
+ if (loc.wide) {
+ if (arg_num == 2) {
+ // Bad case - half in register, half in frame. Just punt
+ loc.location = kLocInvalid;
+ } else if (arg_num < 2) {
+ loc.low_reg = rARM_ARG1 + arg_num;
+ loc.high_reg = loc.low_reg + 1;
+ loc.location = kLocPhysReg;
+ } else {
+ loc.location = kLocDalvikFrame;
+ }
+ } else {
+ if (arg_num < 3) {
+ loc.low_reg = rARM_ARG1 + arg_num;
+ loc.location = kLocPhysReg;
+ } else {
+ loc.location = kLocDalvikFrame;
+ }
+ }
+ return loc;
+}
+
+/*
+ * Load an argument. If already in a register, just return. If in
+ * the frame, we can't use the normal LoadValue() because it assumed
+ * a proper frame - and we're frameless.
+ */
+static RegLocation LoadArg(CompilationUnit* cu, RegLocation loc)
+{
+ Codegen* cg = cu->cg.get();
+ if (loc.location == kLocDalvikFrame) {
+ int start = (InPosition(cu, loc.s_reg_low) + 1) * sizeof(uint32_t);
+ loc.low_reg = AllocTemp(cu);
+ cg->LoadWordDisp(cu, rARM_SP, start, loc.low_reg);
+ if (loc.wide) {
+ loc.high_reg = AllocTemp(cu);
+ cg->LoadWordDisp(cu, rARM_SP, start + sizeof(uint32_t), loc.high_reg);
+ }
+ loc.location = kLocPhysReg;
+ }
+ return loc;
+}
+
+/* Lock any referenced arguments that arrive in registers */
+static void LockLiveArgs(CompilationUnit* cu, MIR* mir)
+{
+ int first_in = cu->num_regs;
+ const int num_arg_regs = 3; // TODO: generalize & move to RegUtil.cc
+ for (int i = 0; i < mir->ssa_rep->num_uses; i++) {
+ int v_reg = SRegToVReg(cu, mir->ssa_rep->uses[i]);
+ int InPosition = v_reg - first_in;
+ if (InPosition < num_arg_regs) {
+ LockTemp(cu, rARM_ARG1 + InPosition);
+ }
+ }
+}
+
+/* Find the next MIR, which may be in a following basic block */
+static MIR* GetNextMir(CompilationUnit* cu, BasicBlock** p_bb, MIR* mir)
+{
+ BasicBlock* bb = *p_bb;
+ MIR* orig_mir = mir;
+ while (bb != NULL) {
+ if (mir != NULL) {
+ mir = mir->next;
+ }
+ if (mir != NULL) {
+ return mir;
+ } else {
+ bb = bb->fall_through;
+ *p_bb = bb;
+ if (bb) {
+ mir = bb->first_mir_insn;
+ if (mir != NULL) {
+ return mir;
+ }
+ }
+ }
+ }
+ return orig_mir;
+}
+
+/* Used for the "verbose" listing */
+//TODO: move to common code
+void ArmCodegen::GenPrintLabel(CompilationUnit *cu, MIR* mir)
+{
+ /* Mark the beginning of a Dalvik instruction for line tracking */
+ char* inst_str = cu->verbose ?
+ GetDalvikDisassembly(cu, mir) : NULL;
+ MarkBoundary(cu, mir->offset, inst_str);
+}
+
+static MIR* SpecialIGet(CompilationUnit* cu, BasicBlock** bb, MIR* mir,
+ OpSize size, bool long_or_double, bool is_object)
+{
+ Codegen* cg = cu->cg.get();
+ int field_offset;
+ bool is_volatile;
+ uint32_t field_idx = mir->dalvikInsn.vC;
+ bool fast_path = FastInstance(cu, field_idx, field_offset, is_volatile, false);
+ if (!fast_path || !(mir->optimization_flags & MIR_IGNORE_NULL_CHECK)) {
+ return NULL;
+ }
+ RegLocation rl_obj = GetSrc(cu, mir, 0);
+ LockLiveArgs(cu, mir);
+ rl_obj = ArmCodegen::ArgLoc(cu, rl_obj);
+ RegLocation rl_dest;
+ if (long_or_double) {
+ rl_dest = GetReturnWide(cu, false);
+ } else {
+ rl_dest = GetReturn(cu, false);
+ }
+ // Point of no return - no aborts after this
+ ArmCodegen::GenPrintLabel(cu, mir);
+ rl_obj = LoadArg(cu, rl_obj);
+ cg->GenIGet(cu, field_idx, mir->optimization_flags, size, rl_dest, rl_obj,
+ long_or_double, is_object);
+ return GetNextMir(cu, bb, mir);
+}
+
+static MIR* SpecialIPut(CompilationUnit* cu, BasicBlock** bb, MIR* mir,
+ OpSize size, bool long_or_double, bool is_object)
+{
+ Codegen* cg = cu->cg.get();
+ int field_offset;
+ bool is_volatile;
+ uint32_t field_idx = mir->dalvikInsn.vC;
+ bool fast_path = FastInstance(cu, field_idx, field_offset, is_volatile, false);
+ if (!fast_path || !(mir->optimization_flags & MIR_IGNORE_NULL_CHECK)) {
+ return NULL;
+ }
+ RegLocation rl_src;
+ RegLocation rl_obj;
+ LockLiveArgs(cu, mir);
+ if (long_or_double) {
+ rl_src = GetSrcWide(cu, mir, 0);
+ rl_obj = GetSrc(cu, mir, 2);
+ } else {
+ rl_src = GetSrc(cu, mir, 0);
+ rl_obj = GetSrc(cu, mir, 1);
+ }
+ rl_src = ArmCodegen::ArgLoc(cu, rl_src);
+ rl_obj = ArmCodegen::ArgLoc(cu, rl_obj);
+ // Reject if source is split across registers & frame
+ if (rl_obj.location == kLocInvalid) {
+ ResetRegPool(cu);
+ return NULL;
+ }
+ // Point of no return - no aborts after this
+ ArmCodegen::GenPrintLabel(cu, mir);
+ rl_obj = LoadArg(cu, rl_obj);
+ rl_src = LoadArg(cu, rl_src);
+ cg->GenIPut(cu, field_idx, mir->optimization_flags, size, rl_src, rl_obj,
+ long_or_double, is_object);
+ return GetNextMir(cu, bb, mir);
+}
+
+static MIR* SpecialIdentity(CompilationUnit* cu, MIR* mir)
+{
+ Codegen* cg = cu->cg.get();
+ RegLocation rl_src;
+ RegLocation rl_dest;
+ bool wide = (mir->ssa_rep->num_uses == 2);
+ if (wide) {
+ rl_src = GetSrcWide(cu, mir, 0);
+ rl_dest = GetReturnWide(cu, false);
+ } else {
+ rl_src = GetSrc(cu, mir, 0);
+ rl_dest = GetReturn(cu, false);
+ }
+ LockLiveArgs(cu, mir);
+ rl_src = ArmCodegen::ArgLoc(cu, rl_src);
+ if (rl_src.location == kLocInvalid) {
+ ResetRegPool(cu);
+ return NULL;
+ }
+ // Point of no return - no aborts after this
+ ArmCodegen::GenPrintLabel(cu, mir);
+ rl_src = LoadArg(cu, rl_src);
+ if (wide) {
+ cg->StoreValueWide(cu, rl_dest, rl_src);
+ } else {
+ cg->StoreValue(cu, rl_dest, rl_src);
+ }
+ return mir;
+}
+
+/*
+ * Special-case code genration for simple non-throwing leaf methods.
+ */
+void ArmCodegen::GenSpecialCase(CompilationUnit* cu, BasicBlock* bb, MIR* mir,
+ SpecialCaseHandler special_case)
+{
+ cu->current_dalvik_offset = mir->offset;
+ MIR* next_mir = NULL;
+ switch (special_case) {
+ case kNullMethod:
+ DCHECK(mir->dalvikInsn.opcode == Instruction::RETURN_VOID);
+ next_mir = mir;
+ break;
+ case kConstFunction:
+ ArmCodegen::GenPrintLabel(cu, mir);
+ LoadConstant(cu, rARM_RET0, mir->dalvikInsn.vB);
+ next_mir = GetNextMir(cu, &bb, mir);
+ break;
+ case kIGet:
+ next_mir = SpecialIGet(cu, &bb, mir, kWord, false, false);
+ break;
+ case kIGetBoolean:
+ case kIGetByte:
+ next_mir = SpecialIGet(cu, &bb, mir, kUnsignedByte, false, false);
+ break;
+ case kIGetObject:
+ next_mir = SpecialIGet(cu, &bb, mir, kWord, false, true);
+ break;
+ case kIGetChar:
+ next_mir = SpecialIGet(cu, &bb, mir, kUnsignedHalf, false, false);
+ break;
+ case kIGetShort:
+ next_mir = SpecialIGet(cu, &bb, mir, kSignedHalf, false, false);
+ break;
+ case kIGetWide:
+ next_mir = SpecialIGet(cu, &bb, mir, kLong, true, false);
+ break;
+ case kIPut:
+ next_mir = SpecialIPut(cu, &bb, mir, kWord, false, false);
+ break;
+ case kIPutBoolean:
+ case kIPutByte:
+ next_mir = SpecialIPut(cu, &bb, mir, kUnsignedByte, false, false);
+ break;
+ case kIPutObject:
+ next_mir = SpecialIPut(cu, &bb, mir, kWord, false, true);
+ break;
+ case kIPutChar:
+ next_mir = SpecialIPut(cu, &bb, mir, kUnsignedHalf, false, false);
+ break;
+ case kIPutShort:
+ next_mir = SpecialIPut(cu, &bb, mir, kSignedHalf, false, false);
+ break;
+ case kIPutWide:
+ next_mir = SpecialIPut(cu, &bb, mir, kLong, true, false);
+ break;
+ case kIdentity:
+ next_mir = SpecialIdentity(cu, mir);
+ break;
+ default:
+ return;
+ }
+ if (next_mir != NULL) {
+ cu->current_dalvik_offset = next_mir->offset;
+ if (special_case != kIdentity) {
+ ArmCodegen::GenPrintLabel(cu, next_mir);
+ }
+ NewLIR1(cu, kThumbBx, rARM_LR);
+ cu->core_spill_mask = 0;
+ cu->num_core_spills = 0;
+ cu->fp_spill_mask = 0;
+ cu->num_fp_spills = 0;
+ cu->frame_size = 0;
+ cu->core_vmap_table.clear();
+ cu->fp_vmap_table.clear();
+ }
+}
+
+/*
+ * The sparse table in the literal pool is an array of <key,displacement>
+ * pairs. For each set, we'll load them as a pair using ldmia.
+ * This means that the register number of the temp we use for the key
+ * must be lower than the reg for the displacement.
+ *
+ * The test loop will look something like:
+ *
+ * adr rBase, <table>
+ * ldr r_val, [rARM_SP, v_reg_off]
+ * mov r_idx, #table_size
+ * lp:
+ * ldmia rBase!, {r_key, r_disp}
+ * sub r_idx, #1
+ * cmp r_val, r_key
+ * ifeq
+ * add rARM_PC, r_disp ; This is the branch from which we compute displacement
+ * cbnz r_idx, lp
+ */
+void ArmCodegen::GenSparseSwitch(CompilationUnit* cu, uint32_t table_offset, RegLocation rl_src)
+{
+ const uint16_t* table = cu->insns + cu->current_dalvik_offset + table_offset;
+ if (cu->verbose) {
+ DumpSparseSwitchTable(table);
+ }
+ // Add the table to the list - we'll process it later
+ SwitchTable *tab_rec =
+ static_cast<SwitchTable*>(NewMem(cu, sizeof(SwitchTable), true, kAllocData));
+ tab_rec->table = table;
+ tab_rec->vaddr = cu->current_dalvik_offset;
+ int size = table[1];
+ tab_rec->targets = static_cast<LIR**>(NewMem(cu, size * sizeof(LIR*), true, kAllocLIR));
+ InsertGrowableList(cu, &cu->switch_tables, reinterpret_cast<uintptr_t>(tab_rec));
+
+ // Get the switch value
+ rl_src = LoadValue(cu, rl_src, kCoreReg);
+ int rBase = AllocTemp(cu);
+ /* Allocate key and disp temps */
+ int r_key = AllocTemp(cu);
+ int r_disp = AllocTemp(cu);
+ // Make sure r_key's register number is less than r_disp's number for ldmia
+ if (r_key > r_disp) {
+ int tmp = r_disp;
+ r_disp = r_key;
+ r_key = tmp;
+ }
+ // Materialize a pointer to the switch table
+ NewLIR3(cu, kThumb2Adr, rBase, 0, reinterpret_cast<uintptr_t>(tab_rec));
+ // Set up r_idx
+ int r_idx = AllocTemp(cu);
+ LoadConstant(cu, r_idx, size);
+ // Establish loop branch target
+ LIR* target = NewLIR0(cu, kPseudoTargetLabel);
+ // Load next key/disp
+ NewLIR2(cu, kThumb2LdmiaWB, rBase, (1 << r_key) | (1 << r_disp));
+ OpRegReg(cu, kOpCmp, r_key, rl_src.low_reg);
+ // Go if match. NOTE: No instruction set switch here - must stay Thumb2
+ OpIT(cu, kCondEq, "");
+ LIR* switch_branch = NewLIR1(cu, kThumb2AddPCR, r_disp);
+ tab_rec->anchor = switch_branch;
+ // Needs to use setflags encoding here
+ NewLIR3(cu, kThumb2SubsRRI12, r_idx, r_idx, 1);
+ OpCondBranch(cu, kCondNe, target);
+}
+
+
+void ArmCodegen::GenPackedSwitch(CompilationUnit* cu, uint32_t table_offset, RegLocation rl_src)
+{
+ const uint16_t* table = cu->insns + cu->current_dalvik_offset + table_offset;
+ if (cu->verbose) {
+ DumpPackedSwitchTable(table);
+ }
+ // Add the table to the list - we'll process it later
+ SwitchTable *tab_rec =
+ static_cast<SwitchTable*>(NewMem(cu, sizeof(SwitchTable), true, kAllocData));
+ tab_rec->table = table;
+ tab_rec->vaddr = cu->current_dalvik_offset;
+ int size = table[1];
+ tab_rec->targets = static_cast<LIR**>(NewMem(cu, size * sizeof(LIR*), true, kAllocLIR));
+ InsertGrowableList(cu, &cu->switch_tables, reinterpret_cast<uintptr_t>(tab_rec));
+
+ // Get the switch value
+ rl_src = LoadValue(cu, rl_src, kCoreReg);
+ int table_base = AllocTemp(cu);
+ // Materialize a pointer to the switch table
+ NewLIR3(cu, kThumb2Adr, table_base, 0, reinterpret_cast<uintptr_t>(tab_rec));
+ int low_key = s4FromSwitchData(&table[2]);
+ int keyReg;
+ // Remove the bias, if necessary
+ if (low_key == 0) {
+ keyReg = rl_src.low_reg;
+ } else {
+ keyReg = AllocTemp(cu);
+ OpRegRegImm(cu, kOpSub, keyReg, rl_src.low_reg, low_key);
+ }
+ // Bounds check - if < 0 or >= size continue following switch
+ OpRegImm(cu, kOpCmp, keyReg, size-1);
+ LIR* branch_over = OpCondBranch(cu, kCondHi, NULL);
+
+ // Load the displacement from the switch table
+ int disp_reg = AllocTemp(cu);
+ LoadBaseIndexed(cu, table_base, keyReg, disp_reg, 2, kWord);
+
+ // ..and go! NOTE: No instruction set switch here - must stay Thumb2
+ LIR* switch_branch = NewLIR1(cu, kThumb2AddPCR, disp_reg);
+ tab_rec->anchor = switch_branch;
+
+ /* branch_over target here */
+ LIR* target = NewLIR0(cu, kPseudoTargetLabel);
+ branch_over->target = target;
+}
+
+/*
+ * Array data table format:
+ * ushort ident = 0x0300 magic value
+ * ushort width width of each element in the table
+ * uint size number of elements in the table
+ * ubyte data[size*width] table of data values (may contain a single-byte
+ * padding at the end)
+ *
+ * Total size is 4+(width * size + 1)/2 16-bit code units.
+ */
+void ArmCodegen::GenFillArrayData(CompilationUnit* cu, uint32_t table_offset, RegLocation rl_src)
+{
+ const uint16_t* table = cu->insns + cu->current_dalvik_offset + table_offset;
+ // Add the table to the list - we'll process it later
+ FillArrayData *tab_rec =
+ static_cast<FillArrayData*>(NewMem(cu, sizeof(FillArrayData), true, kAllocData));
+ tab_rec->table = table;
+ tab_rec->vaddr = cu->current_dalvik_offset;
+ uint16_t width = tab_rec->table[1];
+ uint32_t size = tab_rec->table[2] | ((static_cast<uint32_t>(tab_rec->table[3])) << 16);
+ tab_rec->size = (size * width) + 8;
+
+ InsertGrowableList(cu, &cu->fill_array_data, reinterpret_cast<uintptr_t>(tab_rec));
+
+ // Making a call - use explicit registers
+ FlushAllRegs(cu); /* Everything to home location */
+ LoadValueDirectFixed(cu, rl_src, r0);
+ LoadWordDisp(cu, rARM_SELF, ENTRYPOINT_OFFSET(pHandleFillArrayDataFromCode),
+ rARM_LR);
+ // Materialize a pointer to the fill data image
+ NewLIR3(cu, kThumb2Adr, r1, 0, reinterpret_cast<uintptr_t>(tab_rec));
+ ClobberCalleeSave(cu);
+ LIR* call_inst = OpReg(cu, kOpBlx, rARM_LR);
+ MarkSafepointPC(cu, call_inst);
+}
+
+/*
+ * Handle simple case (thin lock) inline. If it's complicated, bail
+ * out to the heavyweight lock/unlock routines. We'll use dedicated
+ * registers here in order to be in the right position in case we
+ * to bail to oat[Lock/Unlock]Object(self, object)
+ *
+ * r0 -> self pointer [arg0 for oat[Lock/Unlock]Object
+ * r1 -> object [arg1 for oat[Lock/Unlock]Object
+ * r2 -> intial contents of object->lock, later result of strex
+ * r3 -> self->thread_id
+ * r12 -> allow to be used by utilities as general temp
+ *
+ * The result of the strex is 0 if we acquire the lock.
+ *
+ * See comments in monitor.cc for the layout of the lock word.
+ * Of particular interest to this code is the test for the
+ * simple case - which we handle inline. For monitor enter, the
+ * simple case is thin lock, held by no-one. For monitor exit,
+ * the simple case is thin lock, held by the unlocking thread with
+ * a recurse count of 0.
+ *
+ * A minor complication is that there is a field in the lock word
+ * unrelated to locking: the hash state. This field must be ignored, but
+ * preserved.
+ *
+ */
+void ArmCodegen::GenMonitorEnter(CompilationUnit* cu, int opt_flags, RegLocation rl_src)
+{
+ FlushAllRegs(cu);
+ DCHECK_EQ(LW_SHAPE_THIN, 0);
+ LoadValueDirectFixed(cu, rl_src, r0); // Get obj
+ LockCallTemps(cu); // Prepare for explicit register usage
+ GenNullCheck(cu, rl_src.s_reg_low, r0, opt_flags);
+ LoadWordDisp(cu, rARM_SELF, Thread::ThinLockIdOffset().Int32Value(), r2);
+ NewLIR3(cu, kThumb2Ldrex, r1, r0,
+ mirror::Object::MonitorOffset().Int32Value() >> 2); // Get object->lock
+ // Align owner
+ OpRegImm(cu, kOpLsl, r2, LW_LOCK_OWNER_SHIFT);
+ // Is lock unheld on lock or held by us (==thread_id) on unlock?
+ NewLIR4(cu, kThumb2Bfi, r2, r1, 0, LW_LOCK_OWNER_SHIFT - 1);
+ NewLIR3(cu, kThumb2Bfc, r1, LW_HASH_STATE_SHIFT, LW_LOCK_OWNER_SHIFT - 1);
+ OpRegImm(cu, kOpCmp, r1, 0);
+ OpIT(cu, kCondEq, "");
+ NewLIR4(cu, kThumb2Strex, r1, r2, r0,
+ mirror::Object::MonitorOffset().Int32Value() >> 2);
+ OpRegImm(cu, kOpCmp, r1, 0);
+ OpIT(cu, kCondNe, "T");
+ // Go expensive route - artLockObjectFromCode(self, obj);
+ LoadWordDisp(cu, rARM_SELF, ENTRYPOINT_OFFSET(pLockObjectFromCode), rARM_LR);
+ ClobberCalleeSave(cu);
+ LIR* call_inst = OpReg(cu, kOpBlx, rARM_LR);
+ MarkSafepointPC(cu, call_inst);
+ GenMemBarrier(cu, kLoadLoad);
+}
+
+/*
+ * For monitor unlock, we don't have to use ldrex/strex. Once
+ * we've determined that the lock is thin and that we own it with
+ * a zero recursion count, it's safe to punch it back to the
+ * initial, unlock thin state with a store word.
+ */
+void ArmCodegen::GenMonitorExit(CompilationUnit* cu, int opt_flags, RegLocation rl_src)
+{
+ DCHECK_EQ(LW_SHAPE_THIN, 0);
+ FlushAllRegs(cu);
+ LoadValueDirectFixed(cu, rl_src, r0); // Get obj
+ LockCallTemps(cu); // Prepare for explicit register usage
+ GenNullCheck(cu, rl_src.s_reg_low, r0, opt_flags);
+ LoadWordDisp(cu, r0, mirror::Object::MonitorOffset().Int32Value(), r1); // Get lock
+ LoadWordDisp(cu, rARM_SELF, Thread::ThinLockIdOffset().Int32Value(), r2);
+ // Is lock unheld on lock or held by us (==thread_id) on unlock?
+ OpRegRegImm(cu, kOpAnd, r3, r1,
+ (LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT));
+ // Align owner
+ OpRegImm(cu, kOpLsl, r2, LW_LOCK_OWNER_SHIFT);
+ NewLIR3(cu, kThumb2Bfc, r1, LW_HASH_STATE_SHIFT, LW_LOCK_OWNER_SHIFT - 1);
+ OpRegReg(cu, kOpSub, r1, r2);
+ OpIT(cu, kCondEq, "EE");
+ StoreWordDisp(cu, r0, mirror::Object::MonitorOffset().Int32Value(), r3);
+ // Go expensive route - UnlockObjectFromCode(obj);
+ LoadWordDisp(cu, rARM_SELF, ENTRYPOINT_OFFSET(pUnlockObjectFromCode), rARM_LR);
+ ClobberCalleeSave(cu);
+ LIR* call_inst = OpReg(cu, kOpBlx, rARM_LR);
+ MarkSafepointPC(cu, call_inst);
+ GenMemBarrier(cu, kStoreLoad);
+}
+
+void ArmCodegen::GenMoveException(CompilationUnit* cu, RegLocation rl_dest)
+{
+ int ex_offset = Thread::ExceptionOffset().Int32Value();
+ RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ int reset_reg = AllocTemp(cu);
+ LoadWordDisp(cu, rARM_SELF, ex_offset, rl_result.low_reg);
+ LoadConstant(cu, reset_reg, 0);
+ StoreWordDisp(cu, rARM_SELF, ex_offset, reset_reg);
+ FreeTemp(cu, reset_reg);
+ StoreValue(cu, rl_dest, rl_result);
+}
+
+/*
+ * Mark garbage collection card. Skip if the value we're storing is null.
+ */
+void ArmCodegen::MarkGCCard(CompilationUnit* cu, int val_reg, int tgt_addr_reg)
+{
+ int reg_card_base = AllocTemp(cu);
+ int reg_card_no = AllocTemp(cu);
+ LIR* branch_over = OpCmpImmBranch(cu, kCondEq, val_reg, 0, NULL);
+ LoadWordDisp(cu, rARM_SELF, Thread::CardTableOffset().Int32Value(), reg_card_base);
+ OpRegRegImm(cu, kOpLsr, reg_card_no, tgt_addr_reg, CardTable::kCardShift);
+ StoreBaseIndexed(cu, reg_card_base, reg_card_no, reg_card_base, 0,
+ kUnsignedByte);
+ LIR* target = NewLIR0(cu, kPseudoTargetLabel);
+ branch_over->target = target;
+ FreeTemp(cu, reg_card_base);
+ FreeTemp(cu, reg_card_no);
+}
+
+void ArmCodegen::GenEntrySequence(CompilationUnit* cu, RegLocation* ArgLocs, RegLocation rl_method)
+{
+ int spill_count = cu->num_core_spills + cu->num_fp_spills;
+ /*
+ * On entry, r0, r1, r2 & r3 are live. Let the register allocation
+ * mechanism know so it doesn't try to use any of them when
+ * expanding the frame or flushing. This leaves the utility
+ * code with a single temp: r12. This should be enough.
+ */
+ LockTemp(cu, r0);
+ LockTemp(cu, r1);
+ LockTemp(cu, r2);
+ LockTemp(cu, r3);
+
+ /*
+ * We can safely skip the stack overflow check if we're
+ * a leaf *and* our frame size < fudge factor.
+ */
+ bool skip_overflow_check = ((cu->attrs & METHOD_IS_LEAF) &&
+ (static_cast<size_t>(cu->frame_size) <
+ Thread::kStackOverflowReservedBytes));
+ NewLIR0(cu, kPseudoMethodEntry);
+ if (!skip_overflow_check) {
+ /* Load stack limit */
+ LoadWordDisp(cu, rARM_SELF, Thread::StackEndOffset().Int32Value(), r12);
+ }
+ /* Spill core callee saves */
+ NewLIR1(cu, kThumb2Push, cu->core_spill_mask);
+ /* Need to spill any FP regs? */
+ if (cu->num_fp_spills) {
+ /*
+ * NOTE: fp spills are a little different from core spills in that
+ * they are pushed as a contiguous block. When promoting from
+ * the fp set, we must allocate all singles from s16..highest-promoted
+ */
+ NewLIR1(cu, kThumb2VPushCS, cu->num_fp_spills);
+ }
+ if (!skip_overflow_check) {
+ OpRegRegImm(cu, kOpSub, rARM_LR, rARM_SP, cu->frame_size - (spill_count * 4));
+ GenRegRegCheck(cu, kCondCc, rARM_LR, r12, kThrowStackOverflow);
+ OpRegCopy(cu, rARM_SP, rARM_LR); // Establish stack
+ } else {
+ OpRegImm(cu, kOpSub, rARM_SP, cu->frame_size - (spill_count * 4));
+ }
+
+ FlushIns(cu, ArgLocs, rl_method);
+
+ FreeTemp(cu, r0);
+ FreeTemp(cu, r1);
+ FreeTemp(cu, r2);
+ FreeTemp(cu, r3);
+}
+
+void ArmCodegen::GenExitSequence(CompilationUnit* cu)
+{
+ int spill_count = cu->num_core_spills + cu->num_fp_spills;
+ /*
+ * In the exit path, r0/r1 are live - make sure they aren't
+ * allocated by the register utilities as temps.
+ */
+ LockTemp(cu, r0);
+ LockTemp(cu, r1);
+
+ NewLIR0(cu, kPseudoMethodExit);
+ OpRegImm(cu, kOpAdd, rARM_SP, cu->frame_size - (spill_count * 4));
+ /* Need to restore any FP callee saves? */
+ if (cu->num_fp_spills) {
+ NewLIR1(cu, kThumb2VPopCS, cu->num_fp_spills);
+ }
+ if (cu->core_spill_mask & (1 << rARM_LR)) {
+ /* Unspill rARM_LR to rARM_PC */
+ cu->core_spill_mask &= ~(1 << rARM_LR);
+ cu->core_spill_mask |= (1 << rARM_PC);
+ }
+ NewLIR1(cu, kThumb2Pop, cu->core_spill_mask);
+ if (!(cu->core_spill_mask & (1 << rARM_PC))) {
+ /* We didn't pop to rARM_PC, so must do a bv rARM_LR */
+ NewLIR1(cu, kThumbBx, rARM_LR);
+ }
+}
+
+} // namespace art
diff --git a/src/compiler/dex/quick/arm/codegen_arm.h b/src/compiler/dex/quick/arm/codegen_arm.h
new file mode 100644
index 0000000..e77394c
--- /dev/null
+++ b/src/compiler/dex/quick/arm/codegen_arm.h
@@ -0,0 +1,218 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_COMPILER_DEX_QUICK_ARM_CODEGENARM_H_
+#define ART_SRC_COMPILER_DEX_QUICK_ARM_CODEGENARM_H_
+
+#include "compiler/dex/compiler_internals.h"
+
+namespace art {
+
+class ArmCodegen : public Codegen {
+ public:
+ // Required for target - codegen helpers.
+ virtual bool SmallLiteralDivide(CompilationUnit* cu, Instruction::Code dalvik_opcode,
+ RegLocation rl_src, RegLocation rl_dest, int lit);
+ virtual int LoadHelper(CompilationUnit* cu, int offset);
+ virtual LIR* LoadBaseDisp(CompilationUnit* cu, int rBase, int displacement, int r_dest,
+ OpSize size, int s_reg);
+ virtual LIR* LoadBaseDispWide(CompilationUnit* cu, int rBase, int displacement, int r_dest_lo,
+ int r_dest_hi, int s_reg);
+ virtual LIR* LoadBaseIndexed(CompilationUnit* cu, int rBase, int r_index, int r_dest, int scale,
+ OpSize size);
+ virtual LIR* LoadBaseIndexedDisp(CompilationUnit *cu, int rBase, int r_index, int scale,
+ int displacement, int r_dest, int r_dest_hi, OpSize size,
+ int s_reg);
+ virtual LIR* LoadConstantNoClobber(CompilationUnit* cu, int r_dest, int value);
+ virtual LIR* LoadConstantWide(CompilationUnit* cu, int r_dest_lo, int r_dest_hi, int64_t value);
+ virtual LIR* StoreBaseDisp(CompilationUnit* cu, int rBase, int displacement, int r_src,
+ OpSize size);
+ virtual LIR* StoreBaseDispWide(CompilationUnit* cu, int rBase, int displacement, int r_src_lo,
+ int r_src_hi);
+ virtual LIR* StoreBaseIndexed(CompilationUnit* cu, int rBase, int r_index, int r_src, int scale,
+ OpSize size);
+ virtual LIR* StoreBaseIndexedDisp(CompilationUnit *cu, int rBase, int r_index, int scale,
+ int displacement, int r_src, int r_src_hi, OpSize size,
+ int s_reg);
+ virtual void MarkGCCard(CompilationUnit* cu, int val_reg, int tgt_addr_reg);
+
+ // Required for target - register utilities.
+ virtual bool IsFpReg(int reg);
+ virtual bool SameRegType(int reg1, int reg2);
+ virtual int AllocTypedTemp(CompilationUnit* cu, bool fp_hint, int reg_class);
+ virtual int AllocTypedTempPair(CompilationUnit* cu, bool fp_hint, int reg_class);
+ virtual int S2d(int low_reg, int high_reg);
+ virtual int TargetReg(SpecialTargetRegister reg);
+ virtual RegisterInfo* GetRegInfo(CompilationUnit* cu, int reg);
+ virtual RegLocation GetReturnAlt(CompilationUnit* cu);
+ virtual RegLocation GetReturnWideAlt(CompilationUnit* cu);
+ virtual RegLocation LocCReturn();
+ virtual RegLocation LocCReturnDouble();
+ virtual RegLocation LocCReturnFloat();
+ virtual RegLocation LocCReturnWide();
+ virtual uint32_t FpRegMask();
+ virtual uint64_t GetRegMaskCommon(CompilationUnit* cu, int reg);
+ virtual void AdjustSpillMask(CompilationUnit* cu);
+ virtual void ClobberCalleeSave(CompilationUnit *cu);
+ virtual void FlushReg(CompilationUnit* cu, int reg);
+ virtual void FlushRegWide(CompilationUnit* cu, int reg1, int reg2);
+ virtual void FreeCallTemps(CompilationUnit* cu);
+ virtual void FreeRegLocTemps(CompilationUnit* cu, RegLocation rl_keep, RegLocation rl_free);
+ virtual void LockCallTemps(CompilationUnit* cu);
+ virtual void MarkPreservedSingle(CompilationUnit* cu, int v_reg, int reg);
+ virtual void CompilerInitializeRegAlloc(CompilationUnit* cu);
+
+ // Required for target - miscellaneous.
+ virtual AssemblerStatus AssembleInstructions(CompilationUnit* cu, uintptr_t start_addr);
+ virtual void DumpResourceMask(LIR* lir, uint64_t mask, const char* prefix);
+ virtual void SetupTargetResourceMasks(CompilationUnit* cu, LIR* lir);
+ virtual const char* GetTargetInstFmt(int opcode);
+ virtual const char* GetTargetInstName(int opcode);
+ virtual std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr);
+ virtual uint64_t GetPCUseDefEncoding();
+ virtual uint64_t GetTargetInstFlags(int opcode);
+ virtual int GetInsnSize(LIR* lir);
+ virtual bool IsUnconditionalBranch(LIR* lir);
+
+ // Required for target - Dalvik-level generators.
+ virtual void GenArithImmOpLong(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2);
+ virtual void GenArrayObjPut(CompilationUnit* cu, int opt_flags, RegLocation rl_array,
+ RegLocation rl_index, RegLocation rl_src, int scale);
+ virtual void GenArrayGet(CompilationUnit* cu, int opt_flags, OpSize size, RegLocation rl_array,
+ RegLocation rl_index, RegLocation rl_dest, int scale);
+ virtual void GenArrayPut(CompilationUnit* cu, int opt_flags, OpSize size, RegLocation rl_array,
+ RegLocation rl_index, RegLocation rl_src, int scale);
+ virtual void GenShiftImmOpLong(CompilationUnit* cu, Instruction::Code opcode,
+ RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_shift);
+ virtual void GenMulLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2);
+ virtual void GenAddLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2);
+ virtual void GenAndLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2);
+ virtual void GenArithOpDouble(CompilationUnit* cu, Instruction::Code opcode,
+ RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2);
+ virtual void GenArithOpFloat(CompilationUnit *cu, Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2);
+ virtual void GenCmpFP(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2);
+ virtual void GenConversion(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src);
+ virtual bool GenInlinedCas32(CompilationUnit* cu, CallInfo* info, bool need_write_barrier);
+ virtual bool GenInlinedMinMaxInt(CompilationUnit *cu, CallInfo* info, bool is_min);
+ virtual bool GenInlinedSqrt(CompilationUnit* cu, CallInfo* info);
+ virtual void GenNegLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src);
+ virtual void GenOrLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2);
+ virtual void GenSubLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2);
+ virtual void GenXorLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2);
+ virtual LIR* GenRegMemCheck(CompilationUnit* cu, ConditionCode c_code, int reg1, int base,
+ int offset, ThrowKind kind);
+ virtual RegLocation GenDivRem(CompilationUnit* cu, RegLocation rl_dest, int reg_lo, int reg_hi,
+ bool is_div);
+ virtual RegLocation GenDivRemLit(CompilationUnit* cu, RegLocation rl_dest, int reg_lo, int lit,
+ bool is_div);
+ virtual void GenCmpLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2);
+ virtual void GenDivZeroCheck(CompilationUnit* cu, int reg_lo, int reg_hi);
+ virtual void GenEntrySequence(CompilationUnit* cu, RegLocation* ArgLocs,
+ RegLocation rl_method);
+ virtual void GenExitSequence(CompilationUnit* cu);
+ virtual void GenFillArrayData(CompilationUnit* cu, uint32_t table_offset,
+ RegLocation rl_src);
+ virtual void GenFusedFPCmpBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir, bool gt_bias,
+ bool is_double);
+ virtual void GenFusedLongCmpBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir);
+ virtual void GenSelect(CompilationUnit* cu, BasicBlock* bb, MIR* mir);
+ virtual void GenMemBarrier(CompilationUnit* cu, MemBarrierKind barrier_kind);
+ virtual void GenMonitorEnter(CompilationUnit* cu, int opt_flags, RegLocation rl_src);
+ virtual void GenMonitorExit(CompilationUnit* cu, int opt_flags, RegLocation rl_src);
+ virtual void GenMoveException(CompilationUnit* cu, RegLocation rl_dest);
+ virtual void GenMultiplyByTwoBitMultiplier(CompilationUnit* cu, RegLocation rl_src,
+ RegLocation rl_result, int lit, int first_bit,
+ int second_bit);
+ virtual void GenNegDouble(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src);
+ virtual void GenNegFloat(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src);
+ virtual void GenPackedSwitch(CompilationUnit* cu, uint32_t table_offset,
+ RegLocation rl_src);
+ virtual void GenSparseSwitch(CompilationUnit* cu, uint32_t table_offset,
+ RegLocation rl_src);
+ virtual void GenSpecialCase(CompilationUnit* cu, BasicBlock* bb, MIR* mir,
+ SpecialCaseHandler special_case);
+
+ // Required for target - single operation generators.
+ virtual LIR* OpUnconditionalBranch(CompilationUnit* cu, LIR* target);
+ virtual LIR* OpCmpBranch(CompilationUnit* cu, ConditionCode cond, int src1, int src2,
+ LIR* target);
+ virtual LIR* OpCmpImmBranch(CompilationUnit* cu, ConditionCode cond, int reg, int check_value,
+ LIR* target);
+ virtual LIR* OpCondBranch(CompilationUnit* cu, ConditionCode cc, LIR* target);
+ virtual LIR* OpDecAndBranch(CompilationUnit* cu, ConditionCode c_code, int reg,
+ LIR* target);
+ virtual LIR* OpFpRegCopy(CompilationUnit* cu, int r_dest, int r_src);
+ virtual LIR* OpIT(CompilationUnit* cu, ConditionCode cond, const char* guide);
+ virtual LIR* OpMem(CompilationUnit* cu, OpKind op, int rBase, int disp);
+ virtual LIR* OpPcRelLoad(CompilationUnit* cu, int reg, LIR* target);
+ virtual LIR* OpReg(CompilationUnit* cu, OpKind op, int r_dest_src);
+ virtual LIR* OpRegCopy(CompilationUnit* cu, int r_dest, int r_src);
+ virtual LIR* OpRegCopyNoInsert(CompilationUnit* cu, int r_dest, int r_src);
+ virtual LIR* OpRegImm(CompilationUnit* cu, OpKind op, int r_dest_src1, int value);
+ virtual LIR* OpRegMem(CompilationUnit* cu, OpKind op, int r_dest, int rBase, int offset);
+ virtual LIR* OpRegReg(CompilationUnit* cu, OpKind op, int r_dest_src1, int r_src2);
+ virtual LIR* OpRegRegImm(CompilationUnit* cu, OpKind op, int r_dest, int r_src1, int value);
+ virtual LIR* OpRegRegReg(CompilationUnit* cu, OpKind op, int r_dest, int r_src1,
+ int r_src2);
+ virtual LIR* OpTestSuspend(CompilationUnit* cu, LIR* target);
+ virtual LIR* OpThreadMem(CompilationUnit* cu, OpKind op, int thread_offset);
+ virtual LIR* OpVldm(CompilationUnit* cu, int rBase, int count);
+ virtual LIR* OpVstm(CompilationUnit* cu, int rBase, int count);
+ virtual void OpLea(CompilationUnit* cu, int rBase, int reg1, int reg2, int scale,
+ int offset);
+ virtual void OpRegCopyWide(CompilationUnit* cu, int dest_lo, int dest_hi, int src_lo,
+ int src_hi);
+ virtual void OpTlsCmp(CompilationUnit* cu, int offset, int val);
+
+ static RegLocation ArgLoc(CompilationUnit* cu, RegLocation loc);
+ LIR* LoadBaseDispBody(CompilationUnit* cu, int rBase, int displacement, int r_dest,
+ int r_dest_hi, OpSize size, int s_reg);
+ LIR* StoreBaseDispBody(CompilationUnit* cu, int rBase, int displacement, int r_src,
+ int r_src_hi, OpSize size);
+ static void GenPrintLabel(CompilationUnit *cu, MIR* mir);
+ static LIR* OpRegRegRegShift(CompilationUnit* cu, OpKind op, int r_dest, int r_src1,
+ int r_src2, int shift);
+ static LIR* OpRegRegShift(CompilationUnit* cu, OpKind op, int r_dest_src1, int r_src2,
+ int shift);
+ static const ArmEncodingMap EncodingMap[kArmLast];
+ static int EncodeShift(int code, int amount);
+ static int ModifiedImmediate(uint32_t value);
+ static ArmConditionCode ArmConditionEncoding(ConditionCode code);
+ bool InexpensiveConstantInt(int32_t value);
+ bool InexpensiveConstantFloat(int32_t value);
+ bool InexpensiveConstantLong(int64_t value);
+ bool InexpensiveConstantDouble(int64_t value);
+
+ private:
+ void GenFusedLongCmpImmBranch(CompilationUnit* cu, BasicBlock* bb, RegLocation rl_src1,
+ int64_t val, ConditionCode ccode);
+};
+
+} // namespace art
+
+#endif // ART_SRC_COMPILER_DEX_QUICK_ARM_CODEGENARM_H_
diff --git a/src/compiler/dex/quick/arm/fp_arm.cc b/src/compiler/dex/quick/arm/fp_arm.cc
new file mode 100644
index 0000000..cc65217
--- /dev/null
+++ b/src/compiler/dex/quick/arm/fp_arm.cc
@@ -0,0 +1,337 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "arm_lir.h"
+#include "codegen_arm.h"
+#include "compiler/dex/quick/codegen_util.h"
+#include "compiler/dex/quick/ralloc_util.h"
+
+namespace art {
+
+void ArmCodegen::GenArithOpFloat(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2)
+{
+ int op = kThumbBkpt;
+ RegLocation rl_result;
+
+ /*
+ * Don't attempt to optimize register usage since these opcodes call out to
+ * the handlers.
+ */
+ switch (opcode) {
+ case Instruction::ADD_FLOAT_2ADDR:
+ case Instruction::ADD_FLOAT:
+ op = kThumb2Vadds;
+ break;
+ case Instruction::SUB_FLOAT_2ADDR:
+ case Instruction::SUB_FLOAT:
+ op = kThumb2Vsubs;
+ break;
+ case Instruction::DIV_FLOAT_2ADDR:
+ case Instruction::DIV_FLOAT:
+ op = kThumb2Vdivs;
+ break;
+ case Instruction::MUL_FLOAT_2ADDR:
+ case Instruction::MUL_FLOAT:
+ op = kThumb2Vmuls;
+ break;
+ case Instruction::REM_FLOAT_2ADDR:
+ case Instruction::REM_FLOAT:
+ FlushAllRegs(cu); // Send everything to home location
+ CallRuntimeHelperRegLocationRegLocation(cu, ENTRYPOINT_OFFSET(pFmodf), rl_src1, rl_src2, false);
+ rl_result = GetReturn(cu, true);
+ StoreValue(cu, rl_dest, rl_result);
+ return;
+ case Instruction::NEG_FLOAT:
+ GenNegFloat(cu, rl_dest, rl_src1);
+ return;
+ default:
+ LOG(FATAL) << "Unexpected opcode: " << opcode;
+ }
+ rl_src1 = LoadValue(cu, rl_src1, kFPReg);
+ rl_src2 = LoadValue(cu, rl_src2, kFPReg);
+ rl_result = EvalLoc(cu, rl_dest, kFPReg, true);
+ NewLIR3(cu, op, rl_result.low_reg, rl_src1.low_reg, rl_src2.low_reg);
+ StoreValue(cu, rl_dest, rl_result);
+}
+
+void ArmCodegen::GenArithOpDouble(CompilationUnit* cu, Instruction::Code opcode,
+ RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2)
+{
+ int op = kThumbBkpt;
+ RegLocation rl_result;
+
+ switch (opcode) {
+ case Instruction::ADD_DOUBLE_2ADDR:
+ case Instruction::ADD_DOUBLE:
+ op = kThumb2Vaddd;
+ break;
+ case Instruction::SUB_DOUBLE_2ADDR:
+ case Instruction::SUB_DOUBLE:
+ op = kThumb2Vsubd;
+ break;
+ case Instruction::DIV_DOUBLE_2ADDR:
+ case Instruction::DIV_DOUBLE:
+ op = kThumb2Vdivd;
+ break;
+ case Instruction::MUL_DOUBLE_2ADDR:
+ case Instruction::MUL_DOUBLE:
+ op = kThumb2Vmuld;
+ break;
+ case Instruction::REM_DOUBLE_2ADDR:
+ case Instruction::REM_DOUBLE:
+ FlushAllRegs(cu); // Send everything to home location
+ CallRuntimeHelperRegLocationRegLocation(cu, ENTRYPOINT_OFFSET(pFmod), rl_src1, rl_src2, false);
+ rl_result = GetReturnWide(cu, true);
+ StoreValueWide(cu, rl_dest, rl_result);
+ return;
+ case Instruction::NEG_DOUBLE:
+ GenNegDouble(cu, rl_dest, rl_src1);
+ return;
+ default:
+ LOG(FATAL) << "Unexpected opcode: " << opcode;
+ }
+
+ rl_src1 = LoadValueWide(cu, rl_src1, kFPReg);
+ DCHECK(rl_src1.wide);
+ rl_src2 = LoadValueWide(cu, rl_src2, kFPReg);
+ DCHECK(rl_src2.wide);
+ rl_result = EvalLoc(cu, rl_dest, kFPReg, true);
+ DCHECK(rl_dest.wide);
+ DCHECK(rl_result.wide);
+ NewLIR3(cu, op, S2d(rl_result.low_reg, rl_result.high_reg), S2d(rl_src1.low_reg, rl_src1.high_reg),
+ S2d(rl_src2.low_reg, rl_src2.high_reg));
+ StoreValueWide(cu, rl_dest, rl_result);
+}
+
+void ArmCodegen::GenConversion(CompilationUnit* cu, Instruction::Code opcode,
+ RegLocation rl_dest, RegLocation rl_src)
+{
+ int op = kThumbBkpt;
+ int src_reg;
+ RegLocation rl_result;
+
+ switch (opcode) {
+ case Instruction::INT_TO_FLOAT:
+ op = kThumb2VcvtIF;
+ break;
+ case Instruction::FLOAT_TO_INT:
+ op = kThumb2VcvtFI;
+ break;
+ case Instruction::DOUBLE_TO_FLOAT:
+ op = kThumb2VcvtDF;
+ break;
+ case Instruction::FLOAT_TO_DOUBLE:
+ op = kThumb2VcvtFd;
+ break;
+ case Instruction::INT_TO_DOUBLE:
+ op = kThumb2VcvtID;
+ break;
+ case Instruction::DOUBLE_TO_INT:
+ op = kThumb2VcvtDI;
+ break;
+ case Instruction::LONG_TO_DOUBLE:
+ GenConversionCall(cu, ENTRYPOINT_OFFSET(pL2d), rl_dest, rl_src);
+ return;
+ case Instruction::FLOAT_TO_LONG:
+ GenConversionCall(cu, ENTRYPOINT_OFFSET(pF2l), rl_dest, rl_src);
+ return;
+ case Instruction::LONG_TO_FLOAT:
+ GenConversionCall(cu, ENTRYPOINT_OFFSET(pL2f), rl_dest, rl_src);
+ return;
+ case Instruction::DOUBLE_TO_LONG:
+ GenConversionCall(cu, ENTRYPOINT_OFFSET(pD2l), rl_dest, rl_src);
+ return;
+ default:
+ LOG(FATAL) << "Unexpected opcode: " << opcode;
+ }
+ if (rl_src.wide) {
+ rl_src = LoadValueWide(cu, rl_src, kFPReg);
+ src_reg = S2d(rl_src.low_reg, rl_src.high_reg);
+ } else {
+ rl_src = LoadValue(cu, rl_src, kFPReg);
+ src_reg = rl_src.low_reg;
+ }
+ if (rl_dest.wide) {
+ rl_result = EvalLoc(cu, rl_dest, kFPReg, true);
+ NewLIR2(cu, op, S2d(rl_result.low_reg, rl_result.high_reg), src_reg);
+ StoreValueWide(cu, rl_dest, rl_result);
+ } else {
+ rl_result = EvalLoc(cu, rl_dest, kFPReg, true);
+ NewLIR2(cu, op, rl_result.low_reg, src_reg);
+ StoreValue(cu, rl_dest, rl_result);
+ }
+}
+
+void ArmCodegen::GenFusedFPCmpBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir, bool gt_bias,
+ bool is_double)
+{
+ LIR* label_list = cu->block_label_list;
+ LIR* target = &label_list[bb->taken->id];
+ RegLocation rl_src1;
+ RegLocation rl_src2;
+ if (is_double) {
+ rl_src1 = GetSrcWide(cu, mir, 0);
+ rl_src2 = GetSrcWide(cu, mir, 2);
+ rl_src1 = LoadValueWide(cu, rl_src1, kFPReg);
+ rl_src2 = LoadValueWide(cu, rl_src2, kFPReg);
+ NewLIR2(cu, kThumb2Vcmpd, S2d(rl_src1.low_reg, rl_src2.high_reg),
+ S2d(rl_src2.low_reg, rl_src2.high_reg));
+ } else {
+ rl_src1 = GetSrc(cu, mir, 0);
+ rl_src2 = GetSrc(cu, mir, 1);
+ rl_src1 = LoadValue(cu, rl_src1, kFPReg);
+ rl_src2 = LoadValue(cu, rl_src2, kFPReg);
+ NewLIR2(cu, kThumb2Vcmps, rl_src1.low_reg, rl_src2.low_reg);
+ }
+ NewLIR0(cu, kThumb2Fmstat);
+ ConditionCode ccode = static_cast<ConditionCode>(mir->dalvikInsn.arg[0]);
+ switch(ccode) {
+ case kCondEq:
+ case kCondNe:
+ break;
+ case kCondLt:
+ if (gt_bias) {
+ ccode = kCondMi;
+ }
+ break;
+ case kCondLe:
+ if (gt_bias) {
+ ccode = kCondLs;
+ }
+ break;
+ case kCondGt:
+ if (gt_bias) {
+ ccode = kCondHi;
+ }
+ break;
+ case kCondGe:
+ if (gt_bias) {
+ ccode = kCondCs;
+ }
+ break;
+ default:
+ LOG(FATAL) << "Unexpected ccode: " << ccode;
+ }
+ OpCondBranch(cu, ccode, target);
+}
+
+
+void ArmCodegen::GenCmpFP(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2)
+{
+ bool is_double = false;
+ int default_result = -1;
+ RegLocation rl_result;
+
+ switch (opcode) {
+ case Instruction::CMPL_FLOAT:
+ is_double = false;
+ default_result = -1;
+ break;
+ case Instruction::CMPG_FLOAT:
+ is_double = false;
+ default_result = 1;
+ break;
+ case Instruction::CMPL_DOUBLE:
+ is_double = true;
+ default_result = -1;
+ break;
+ case Instruction::CMPG_DOUBLE:
+ is_double = true;
+ default_result = 1;
+ break;
+ default:
+ LOG(FATAL) << "Unexpected opcode: " << opcode;
+ }
+ if (is_double) {
+ rl_src1 = LoadValueWide(cu, rl_src1, kFPReg);
+ rl_src2 = LoadValueWide(cu, rl_src2, kFPReg);
+ // In case result vreg is also a src vreg, break association to avoid useless copy by EvalLoc()
+ ClobberSReg(cu, rl_dest.s_reg_low);
+ rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ LoadConstant(cu, rl_result.low_reg, default_result);
+ NewLIR2(cu, kThumb2Vcmpd, S2d(rl_src1.low_reg, rl_src2.high_reg),
+ S2d(rl_src2.low_reg, rl_src2.high_reg));
+ } else {
+ rl_src1 = LoadValue(cu, rl_src1, kFPReg);
+ rl_src2 = LoadValue(cu, rl_src2, kFPReg);
+ // In case result vreg is also a srcvreg, break association to avoid useless copy by EvalLoc()
+ ClobberSReg(cu, rl_dest.s_reg_low);
+ rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ LoadConstant(cu, rl_result.low_reg, default_result);
+ NewLIR2(cu, kThumb2Vcmps, rl_src1.low_reg, rl_src2.low_reg);
+ }
+ DCHECK(!ARM_FPREG(rl_result.low_reg));
+ NewLIR0(cu, kThumb2Fmstat);
+
+ OpIT(cu, (default_result == -1) ? kCondGt : kCondMi, "");
+ NewLIR2(cu, kThumb2MovImmShift, rl_result.low_reg,
+ ModifiedImmediate(-default_result)); // Must not alter ccodes
+ GenBarrier(cu);
+
+ OpIT(cu, kCondEq, "");
+ LoadConstant(cu, rl_result.low_reg, 0);
+ GenBarrier(cu);
+
+ StoreValue(cu, rl_dest, rl_result);
+}
+
+void ArmCodegen::GenNegFloat(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src)
+{
+ RegLocation rl_result;
+ rl_src = LoadValue(cu, rl_src, kFPReg);
+ rl_result = EvalLoc(cu, rl_dest, kFPReg, true);
+ NewLIR2(cu, kThumb2Vnegs, rl_result.low_reg, rl_src.low_reg);
+ StoreValue(cu, rl_dest, rl_result);
+}
+
+void ArmCodegen::GenNegDouble(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src)
+{
+ RegLocation rl_result;
+ rl_src = LoadValueWide(cu, rl_src, kFPReg);
+ rl_result = EvalLoc(cu, rl_dest, kFPReg, true);
+ NewLIR2(cu, kThumb2Vnegd, S2d(rl_result.low_reg, rl_result.high_reg),
+ S2d(rl_src.low_reg, rl_src.high_reg));
+ StoreValueWide(cu, rl_dest, rl_result);
+}
+
+bool ArmCodegen::GenInlinedSqrt(CompilationUnit* cu, CallInfo* info) {
+ DCHECK_EQ(cu->instruction_set, kThumb2);
+ LIR *branch;
+ RegLocation rl_src = info->args[0];
+ RegLocation rl_dest = InlineTargetWide(cu, info); // double place for result
+ rl_src = LoadValueWide(cu, rl_src, kFPReg);
+ RegLocation rl_result = EvalLoc(cu, rl_dest, kFPReg, true);
+ NewLIR2(cu, kThumb2Vsqrtd, S2d(rl_result.low_reg, rl_result.high_reg),
+ S2d(rl_src.low_reg, rl_src.high_reg));
+ NewLIR2(cu, kThumb2Vcmpd, S2d(rl_result.low_reg, rl_result.high_reg),
+ S2d(rl_result.low_reg, rl_result.high_reg));
+ NewLIR0(cu, kThumb2Fmstat);
+ branch = NewLIR2(cu, kThumbBCond, 0, kArmCondEq);
+ ClobberCalleeSave(cu);
+ LockCallTemps(cu); // Using fixed registers
+ int r_tgt = LoadHelper(cu, ENTRYPOINT_OFFSET(pSqrt));
+ NewLIR3(cu, kThumb2Fmrrd, r0, r1, S2d(rl_src.low_reg, rl_src.high_reg));
+ NewLIR1(cu, kThumbBlxR, r_tgt);
+ NewLIR3(cu, kThumb2Fmdrr, S2d(rl_result.low_reg, rl_result.high_reg), r0, r1);
+ branch->target = NewLIR0(cu, kPseudoTargetLabel);
+ StoreValueWide(cu, rl_dest, rl_result);
+ return true;
+}
+
+
+} // namespace art
diff --git a/src/compiler/dex/quick/arm/int_arm.cc b/src/compiler/dex/quick/arm/int_arm.cc
new file mode 100644
index 0000000..0018d44
--- /dev/null
+++ b/src/compiler/dex/quick/arm/int_arm.cc
@@ -0,0 +1,1190 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* This file contains codegen for the Thumb2 ISA. */
+
+#include "arm_lir.h"
+#include "codegen_arm.h"
+#include "compiler/dex/quick/codegen_util.h"
+#include "compiler/dex/quick/ralloc_util.h"
+#include "oat/runtime/oat_support_entrypoints.h"
+#include "oat_compilation_unit.h"
+
+namespace art {
+
+LIR* ArmCodegen::OpCmpBranch(CompilationUnit* cu, ConditionCode cond, int src1,
+ int src2, LIR* target)
+{
+ OpRegReg(cu, kOpCmp, src1, src2);
+ return OpCondBranch(cu, cond, target);
+}
+
+/*
+ * Generate a Thumb2 IT instruction, which can nullify up to
+ * four subsequent instructions based on a condition and its
+ * inverse. The condition applies to the first instruction, which
+ * is executed if the condition is met. The string "guide" consists
+ * of 0 to 3 chars, and applies to the 2nd through 4th instruction.
+ * A "T" means the instruction is executed if the condition is
+ * met, and an "E" means the instruction is executed if the condition
+ * is not met.
+ */
+LIR* ArmCodegen::OpIT(CompilationUnit* cu, ConditionCode ccode, const char* guide)
+{
+ int mask;
+ int mask3 = 0;
+ int mask2 = 0;
+ int mask1 = 0;
+ ArmConditionCode code = ArmConditionEncoding(ccode);
+ int cond_bit = code & 1;
+ int alt_bit = cond_bit ^ 1;
+
+ //Note: case fallthroughs intentional
+ switch (strlen(guide)) {
+ case 3:
+ mask1 = (guide[2] == 'T') ? cond_bit : alt_bit;
+ case 2:
+ mask2 = (guide[1] == 'T') ? cond_bit : alt_bit;
+ case 1:
+ mask3 = (guide[0] == 'T') ? cond_bit : alt_bit;
+ break;
+ case 0:
+ break;
+ default:
+ LOG(FATAL) << "OAT: bad case in OpIT";
+ }
+ mask = (mask3 << 3) | (mask2 << 2) | (mask1 << 1) |
+ (1 << (3 - strlen(guide)));
+ return NewLIR2(cu, kThumb2It, code, mask);
+}
+
+/*
+ * 64-bit 3way compare function.
+ * mov rX, #-1
+ * cmp op1hi, op2hi
+ * blt done
+ * bgt flip
+ * sub rX, op1lo, op2lo (treat as unsigned)
+ * beq done
+ * ite hi
+ * mov(hi) rX, #-1
+ * mov(!hi) rX, #1
+ * flip:
+ * neg rX
+ * done:
+ */
+void ArmCodegen::GenCmpLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2)
+{
+ LIR* target1;
+ LIR* target2;
+ rl_src1 = LoadValueWide(cu, rl_src1, kCoreReg);
+ rl_src2 = LoadValueWide(cu, rl_src2, kCoreReg);
+ int t_reg = AllocTemp(cu);
+ LoadConstant(cu, t_reg, -1);
+ OpRegReg(cu, kOpCmp, rl_src1.high_reg, rl_src2.high_reg);
+ LIR* branch1 = OpCondBranch(cu, kCondLt, NULL);
+ LIR* branch2 = OpCondBranch(cu, kCondGt, NULL);
+ OpRegRegReg(cu, kOpSub, t_reg, rl_src1.low_reg, rl_src2.low_reg);
+ LIR* branch3 = OpCondBranch(cu, kCondEq, NULL);
+
+ OpIT(cu, kCondHi, "E");
+ NewLIR2(cu, kThumb2MovImmShift, t_reg, ModifiedImmediate(-1));
+ LoadConstant(cu, t_reg, 1);
+ GenBarrier(cu);
+
+ target2 = NewLIR0(cu, kPseudoTargetLabel);
+ OpRegReg(cu, kOpNeg, t_reg, t_reg);
+
+ target1 = NewLIR0(cu, kPseudoTargetLabel);
+
+ RegLocation rl_temp = LocCReturn(); // Just using as template, will change
+ rl_temp.low_reg = t_reg;
+ StoreValue(cu, rl_dest, rl_temp);
+ FreeTemp(cu, t_reg);
+
+ branch1->target = target1;
+ branch2->target = target2;
+ branch3->target = branch1->target;
+}
+
+void ArmCodegen::GenFusedLongCmpImmBranch(CompilationUnit* cu, BasicBlock* bb, RegLocation rl_src1,
+ int64_t val, ConditionCode ccode)
+{
+ int32_t val_lo = Low32Bits(val);
+ int32_t val_hi = High32Bits(val);
+ DCHECK(ModifiedImmediate(val_lo) >= 0);
+ DCHECK(ModifiedImmediate(val_hi) >= 0);
+ LIR* label_list = cu->block_label_list;
+ LIR* taken = &label_list[bb->taken->id];
+ LIR* not_taken = &label_list[bb->fall_through->id];
+ rl_src1 = LoadValueWide(cu, rl_src1, kCoreReg);
+ int32_t low_reg = rl_src1.low_reg;
+ int32_t high_reg = rl_src1.high_reg;
+
+ switch(ccode) {
+ case kCondEq:
+ case kCondNe:
+ LIR* target;
+ ConditionCode condition;
+ if (ccode == kCondEq) {
+ target = not_taken;
+ condition = kCondEq;
+ } else {
+ target = taken;
+ condition = kCondNe;
+ }
+ if (val == 0) {
+ int t_reg = AllocTemp(cu);
+ NewLIR4(cu, kThumb2OrrRRRs, t_reg, low_reg, high_reg, 0);
+ FreeTemp(cu, t_reg);
+ OpCondBranch(cu, condition, taken);
+ return;
+ }
+ OpCmpImmBranch(cu, kCondNe, high_reg, val_hi, target);
+ break;
+ case kCondLt:
+ OpCmpImmBranch(cu, kCondLt, high_reg, val_hi, taken);
+ OpCmpImmBranch(cu, kCondGt, high_reg, val_hi, not_taken);
+ ccode = kCondCc;
+ break;
+ case kCondLe:
+ OpCmpImmBranch(cu, kCondLt, high_reg, val_hi, taken);
+ OpCmpImmBranch(cu, kCondGt, high_reg, val_hi, not_taken);
+ ccode = kCondLs;
+ break;
+ case kCondGt:
+ OpCmpImmBranch(cu, kCondGt, high_reg, val_hi, taken);
+ OpCmpImmBranch(cu, kCondLt, high_reg, val_hi, not_taken);
+ ccode = kCondHi;
+ break;
+ case kCondGe:
+ OpCmpImmBranch(cu, kCondGt, high_reg, val_hi, taken);
+ OpCmpImmBranch(cu, kCondLt, high_reg, val_hi, not_taken);
+ ccode = kCondCs;
+ break;
+ default:
+ LOG(FATAL) << "Unexpected ccode: " << ccode;
+ }
+ OpCmpImmBranch(cu, ccode, low_reg, val_lo, taken);
+}
+
+void ArmCodegen::GenSelect(CompilationUnit* cu, BasicBlock* bb, MIR* mir)
+{
+ RegLocation rl_result;
+ RegLocation rl_src = GetSrc(cu, mir, 0);
+ // Temporary debugging code
+ int dest_sreg = mir->ssa_rep->defs[0];
+ if ((dest_sreg < 0) || (dest_sreg >= cu->num_ssa_regs)) {
+ LOG(INFO) << "Bad target sreg: " << dest_sreg << ", in "
+ << PrettyMethod(cu->method_idx,*cu->dex_file);
+ LOG(INFO) << "at dex offset 0x" << std::hex << mir->offset;
+ LOG(INFO) << "vreg = " << SRegToVReg(cu, dest_sreg);
+ LOG(INFO) << "num uses = " << mir->ssa_rep->num_uses;
+ if (mir->ssa_rep->num_uses == 1) {
+ LOG(INFO) << "CONST case, vals = " << mir->dalvikInsn.vB << ", " << mir->dalvikInsn.vC;
+ } else {
+ LOG(INFO) << "MOVE case, operands = " << mir->ssa_rep->uses[1] << ", "
+ << mir->ssa_rep->uses[2];
+ }
+ CHECK(false) << "Invalid target sreg on Select.";
+ }
+ // End temporary debugging code
+ RegLocation rl_dest = GetDest(cu, mir);
+ rl_src = LoadValue(cu, rl_src, kCoreReg);
+ if (mir->ssa_rep->num_uses == 1) {
+ // CONST case
+ int true_val = mir->dalvikInsn.vB;
+ int false_val = mir->dalvikInsn.vC;
+ rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ if ((true_val == 1) && (false_val == 0)) {
+ OpRegRegImm(cu, kOpRsub, rl_result.low_reg, rl_src.low_reg, 1);
+ OpIT(cu, kCondCc, "");
+ LoadConstant(cu, rl_result.low_reg, 0);
+ GenBarrier(cu); // Add a scheduling barrier to keep the IT shadow intact
+ } else if (InexpensiveConstantInt(true_val) && InexpensiveConstantInt(false_val)) {
+ OpRegImm(cu, kOpCmp, rl_src.low_reg, 0);
+ OpIT(cu, kCondEq, "E");
+ LoadConstant(cu, rl_result.low_reg, true_val);
+ LoadConstant(cu, rl_result.low_reg, false_val);
+ GenBarrier(cu); // Add a scheduling barrier to keep the IT shadow intact
+ } else {
+ // Unlikely case - could be tuned.
+ int t_reg1 = AllocTemp(cu);
+ int t_reg2 = AllocTemp(cu);
+ LoadConstant(cu, t_reg1, true_val);
+ LoadConstant(cu, t_reg2, false_val);
+ OpRegImm(cu, kOpCmp, rl_src.low_reg, 0);
+ OpIT(cu, kCondEq, "E");
+ OpRegCopy(cu, rl_result.low_reg, t_reg1);
+ OpRegCopy(cu, rl_result.low_reg, t_reg2);
+ GenBarrier(cu); // Add a scheduling barrier to keep the IT shadow intact
+ }
+ } else {
+ // MOVE case
+ RegLocation rl_true = cu->reg_location[mir->ssa_rep->uses[1]];
+ RegLocation rl_false = cu->reg_location[mir->ssa_rep->uses[2]];
+ rl_true = LoadValue(cu, rl_true, kCoreReg);
+ rl_false = LoadValue(cu, rl_false, kCoreReg);
+ rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ OpRegImm(cu, kOpCmp, rl_src.low_reg, 0);
+ OpIT(cu, kCondEq, "E");
+ LIR* l1 = OpRegCopy(cu, rl_result.low_reg, rl_true.low_reg);
+ l1->flags.is_nop = false; // Make sure this instruction isn't optimized away
+ LIR* l2 = OpRegCopy(cu, rl_result.low_reg, rl_false.low_reg);
+ l2->flags.is_nop = false; // Make sure this instruction isn't optimized away
+ GenBarrier(cu); // Add a scheduling barrier to keep the IT shadow intact
+ }
+ StoreValue(cu, rl_dest, rl_result);
+}
+
+void ArmCodegen::GenFusedLongCmpBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir)
+{
+ RegLocation rl_src1 = GetSrcWide(cu, mir, 0);
+ RegLocation rl_src2 = GetSrcWide(cu, mir, 2);
+ // Normalize such that if either operand is constant, src2 will be constant.
+ ConditionCode ccode = static_cast<ConditionCode>(mir->dalvikInsn.arg[0]);
+ if (rl_src1.is_const) {
+ RegLocation rl_temp = rl_src1;
+ rl_src1 = rl_src2;
+ rl_src2 = rl_temp;
+ ccode = FlipComparisonOrder(ccode);
+ }
+ if (rl_src2.is_const) {
+ RegLocation rl_temp = UpdateLocWide(cu, rl_src2);
+ // Do special compare/branch against simple const operand if not already in registers.
+ int64_t val = ConstantValueWide(cu, rl_src2);
+ if ((rl_temp.location != kLocPhysReg) &&
+ ((ModifiedImmediate(Low32Bits(val)) >= 0) && (ModifiedImmediate(High32Bits(val)) >= 0))) {
+ GenFusedLongCmpImmBranch(cu, bb, rl_src1, val, ccode);
+ return;
+ }
+ }
+ LIR* label_list = cu->block_label_list;
+ LIR* taken = &label_list[bb->taken->id];
+ LIR* not_taken = &label_list[bb->fall_through->id];
+ rl_src1 = LoadValueWide(cu, rl_src1, kCoreReg);
+ rl_src2 = LoadValueWide(cu, rl_src2, kCoreReg);
+ OpRegReg(cu, kOpCmp, rl_src1.high_reg, rl_src2.high_reg);
+ switch(ccode) {
+ case kCondEq:
+ OpCondBranch(cu, kCondNe, not_taken);
+ break;
+ case kCondNe:
+ OpCondBranch(cu, kCondNe, taken);
+ break;
+ case kCondLt:
+ OpCondBranch(cu, kCondLt, taken);
+ OpCondBranch(cu, kCondGt, not_taken);
+ ccode = kCondCc;
+ break;
+ case kCondLe:
+ OpCondBranch(cu, kCondLt, taken);
+ OpCondBranch(cu, kCondGt, not_taken);
+ ccode = kCondLs;
+ break;
+ case kCondGt:
+ OpCondBranch(cu, kCondGt, taken);
+ OpCondBranch(cu, kCondLt, not_taken);
+ ccode = kCondHi;
+ break;
+ case kCondGe:
+ OpCondBranch(cu, kCondGt, taken);
+ OpCondBranch(cu, kCondLt, not_taken);
+ ccode = kCondCs;
+ break;
+ default:
+ LOG(FATAL) << "Unexpected ccode: " << ccode;
+ }
+ OpRegReg(cu, kOpCmp, rl_src1.low_reg, rl_src2.low_reg);
+ OpCondBranch(cu, ccode, taken);
+}
+
+/*
+ * Generate a register comparison to an immediate and branch. Caller
+ * is responsible for setting branch target field.
+ */
+LIR* ArmCodegen::OpCmpImmBranch(CompilationUnit* cu, ConditionCode cond, int reg, int check_value,
+ LIR* target)
+{
+ LIR* branch;
+ int mod_imm;
+ ArmConditionCode arm_cond = ArmConditionEncoding(cond);
+ if ((ARM_LOWREG(reg)) && (check_value == 0) &&
+ ((arm_cond == kArmCondEq) || (arm_cond == kArmCondNe))) {
+ branch = NewLIR2(cu, (arm_cond == kArmCondEq) ? kThumb2Cbz : kThumb2Cbnz,
+ reg, 0);
+ } else {
+ mod_imm = ModifiedImmediate(check_value);
+ if (ARM_LOWREG(reg) && ((check_value & 0xff) == check_value)) {
+ NewLIR2(cu, kThumbCmpRI8, reg, check_value);
+ } else if (mod_imm >= 0) {
+ NewLIR2(cu, kThumb2CmpRI12, reg, mod_imm);
+ } else {
+ int t_reg = AllocTemp(cu);
+ LoadConstant(cu, t_reg, check_value);
+ OpRegReg(cu, kOpCmp, reg, t_reg);
+ }
+ branch = NewLIR2(cu, kThumbBCond, 0, arm_cond);
+ }
+ branch->target = target;
+ return branch;
+}
+
+LIR* ArmCodegen::OpRegCopyNoInsert(CompilationUnit* cu, int r_dest, int r_src)
+{
+ LIR* res;
+ int opcode;
+ if (ARM_FPREG(r_dest) || ARM_FPREG(r_src))
+ return OpFpRegCopy(cu, r_dest, r_src);
+ if (ARM_LOWREG(r_dest) && ARM_LOWREG(r_src))
+ opcode = kThumbMovRR;
+ else if (!ARM_LOWREG(r_dest) && !ARM_LOWREG(r_src))
+ opcode = kThumbMovRR_H2H;
+ else if (ARM_LOWREG(r_dest))
+ opcode = kThumbMovRR_H2L;
+ else
+ opcode = kThumbMovRR_L2H;
+ res = RawLIR(cu, cu->current_dalvik_offset, opcode, r_dest, r_src);
+ if (!(cu->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) {
+ res->flags.is_nop = true;
+ }
+ return res;
+}
+
+LIR* ArmCodegen::OpRegCopy(CompilationUnit* cu, int r_dest, int r_src)
+{
+ LIR* res = OpRegCopyNoInsert(cu, r_dest, r_src);
+ AppendLIR(cu, res);
+ return res;
+}
+
+void ArmCodegen::OpRegCopyWide(CompilationUnit* cu, int dest_lo, int dest_hi, int src_lo,
+ int src_hi)
+{
+ bool dest_fp = ARM_FPREG(dest_lo) && ARM_FPREG(dest_hi);
+ bool src_fp = ARM_FPREG(src_lo) && ARM_FPREG(src_hi);
+ DCHECK_EQ(ARM_FPREG(src_lo), ARM_FPREG(src_hi));
+ DCHECK_EQ(ARM_FPREG(dest_lo), ARM_FPREG(dest_hi));
+ if (dest_fp) {
+ if (src_fp) {
+ OpRegCopy(cu, S2d(dest_lo, dest_hi), S2d(src_lo, src_hi));
+ } else {
+ NewLIR3(cu, kThumb2Fmdrr, S2d(dest_lo, dest_hi), src_lo, src_hi);
+ }
+ } else {
+ if (src_fp) {
+ NewLIR3(cu, kThumb2Fmrrd, dest_lo, dest_hi, S2d(src_lo, src_hi));
+ } else {
+ // Handle overlap
+ if (src_hi == dest_lo) {
+ OpRegCopy(cu, dest_hi, src_hi);
+ OpRegCopy(cu, dest_lo, src_lo);
+ } else {
+ OpRegCopy(cu, dest_lo, src_lo);
+ OpRegCopy(cu, dest_hi, src_hi);
+ }
+ }
+ }
+}
+
+// Table of magic divisors
+struct MagicTable {
+ uint32_t magic;
+ uint32_t shift;
+ DividePattern pattern;
+};
+
+static const MagicTable magic_table[] = {
+ {0, 0, DivideNone}, // 0
+ {0, 0, DivideNone}, // 1
+ {0, 0, DivideNone}, // 2
+ {0x55555556, 0, Divide3}, // 3
+ {0, 0, DivideNone}, // 4
+ {0x66666667, 1, Divide5}, // 5
+ {0x2AAAAAAB, 0, Divide3}, // 6
+ {0x92492493, 2, Divide7}, // 7
+ {0, 0, DivideNone}, // 8
+ {0x38E38E39, 1, Divide5}, // 9
+ {0x66666667, 2, Divide5}, // 10
+ {0x2E8BA2E9, 1, Divide5}, // 11
+ {0x2AAAAAAB, 1, Divide5}, // 12
+ {0x4EC4EC4F, 2, Divide5}, // 13
+ {0x92492493, 3, Divide7}, // 14
+ {0x88888889, 3, Divide7}, // 15
+};
+
+// Integer division by constant via reciprocal multiply (Hacker's Delight, 10-4)
+bool ArmCodegen::SmallLiteralDivide(CompilationUnit* cu, Instruction::Code dalvik_opcode,
+ RegLocation rl_src, RegLocation rl_dest, int lit)
+{
+ if ((lit < 0) || (lit >= static_cast<int>(sizeof(magic_table)/sizeof(magic_table[0])))) {
+ return false;
+ }
+ DividePattern pattern = magic_table[lit].pattern;
+ if (pattern == DivideNone) {
+ return false;
+ }
+ // Tuning: add rem patterns
+ if (dalvik_opcode != Instruction::DIV_INT_LIT8) {
+ return false;
+ }
+
+ int r_magic = AllocTemp(cu);
+ LoadConstant(cu, r_magic, magic_table[lit].magic);
+ rl_src = LoadValue(cu, rl_src, kCoreReg);
+ RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ int r_hi = AllocTemp(cu);
+ int r_lo = AllocTemp(cu);
+ NewLIR4(cu, kThumb2Smull, r_lo, r_hi, r_magic, rl_src.low_reg);
+ switch(pattern) {
+ case Divide3:
+ OpRegRegRegShift(cu, kOpSub, rl_result.low_reg, r_hi,
+ rl_src.low_reg, EncodeShift(kArmAsr, 31));
+ break;
+ case Divide5:
+ OpRegRegImm(cu, kOpAsr, r_lo, rl_src.low_reg, 31);
+ OpRegRegRegShift(cu, kOpRsub, rl_result.low_reg, r_lo, r_hi,
+ EncodeShift(kArmAsr, magic_table[lit].shift));
+ break;
+ case Divide7:
+ OpRegReg(cu, kOpAdd, r_hi, rl_src.low_reg);
+ OpRegRegImm(cu, kOpAsr, r_lo, rl_src.low_reg, 31);
+ OpRegRegRegShift(cu, kOpRsub, rl_result.low_reg, r_lo, r_hi,
+ EncodeShift(kArmAsr, magic_table[lit].shift));
+ break;
+ default:
+ LOG(FATAL) << "Unexpected pattern: " << pattern;
+ }
+ StoreValue(cu, rl_dest, rl_result);
+ return true;
+}
+
+LIR* ArmCodegen::GenRegMemCheck(CompilationUnit* cu, ConditionCode c_code,
+ int reg1, int base, int offset, ThrowKind kind)
+{
+ LOG(FATAL) << "Unexpected use of GenRegMemCheck for Arm";
+ return NULL;
+}
+
+RegLocation ArmCodegen::GenDivRemLit(CompilationUnit* cu, RegLocation rl_dest, int reg1, int lit,
+ bool is_div)
+{
+ LOG(FATAL) << "Unexpected use of GenDivRemLit for Arm";
+ return rl_dest;
+}
+
+RegLocation ArmCodegen::GenDivRem(CompilationUnit* cu, RegLocation rl_dest, int reg1, int reg2,
+ bool is_div)
+{
+ LOG(FATAL) << "Unexpected use of GenDivRem for Arm";
+ return rl_dest;
+}
+
+bool ArmCodegen::GenInlinedMinMaxInt(CompilationUnit *cu, CallInfo* info, bool is_min)
+{
+ DCHECK_EQ(cu->instruction_set, kThumb2);
+ RegLocation rl_src1 = info->args[0];
+ RegLocation rl_src2 = info->args[1];
+ rl_src1 = LoadValue(cu, rl_src1, kCoreReg);
+ rl_src2 = LoadValue(cu, rl_src2, kCoreReg);
+ RegLocation rl_dest = InlineTarget(cu, info);
+ RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ OpRegReg(cu, kOpCmp, rl_src1.low_reg, rl_src2.low_reg);
+ OpIT(cu, (is_min) ? kCondGt : kCondLt, "E");
+ OpRegReg(cu, kOpMov, rl_result.low_reg, rl_src2.low_reg);
+ OpRegReg(cu, kOpMov, rl_result.low_reg, rl_src1.low_reg);
+ GenBarrier(cu);
+ StoreValue(cu, rl_dest, rl_result);
+ return true;
+}
+
+void ArmCodegen::OpLea(CompilationUnit* cu, int rBase, int reg1, int reg2, int scale, int offset)
+{
+ LOG(FATAL) << "Unexpected use of OpLea for Arm";
+}
+
+void ArmCodegen::OpTlsCmp(CompilationUnit* cu, int offset, int val)
+{
+ LOG(FATAL) << "Unexpected use of OpTlsCmp for Arm";
+}
+
+bool ArmCodegen::GenInlinedCas32(CompilationUnit* cu, CallInfo* info, bool need_write_barrier) {
+ DCHECK_EQ(cu->instruction_set, kThumb2);
+ // Unused - RegLocation rl_src_unsafe = info->args[0];
+ RegLocation rl_src_obj= info->args[1]; // Object - known non-null
+ RegLocation rl_src_offset= info->args[2]; // long low
+ rl_src_offset.wide = 0; // ignore high half in info->args[3]
+ RegLocation rl_src_expected= info->args[4]; // int or Object
+ RegLocation rl_src_new_value= info->args[5]; // int or Object
+ RegLocation rl_dest = InlineTarget(cu, info); // boolean place for result
+
+
+ // Release store semantics, get the barrier out of the way. TODO: revisit
+ GenMemBarrier(cu, kStoreLoad);
+
+ RegLocation rl_object = LoadValue(cu, rl_src_obj, kCoreReg);
+ RegLocation rl_new_value = LoadValue(cu, rl_src_new_value, kCoreReg);
+
+ if (need_write_barrier && !IsConstantNullRef(cu, rl_new_value)) {
+ // Mark card for object assuming new value is stored.
+ MarkGCCard(cu, rl_new_value.low_reg, rl_object.low_reg);
+ }
+
+ RegLocation rl_offset = LoadValue(cu, rl_src_offset, kCoreReg);
+
+ int r_ptr = AllocTemp(cu);
+ OpRegRegReg(cu, kOpAdd, r_ptr, rl_object.low_reg, rl_offset.low_reg);
+
+ // Free now unneeded rl_object and rl_offset to give more temps.
+ ClobberSReg(cu, rl_object.s_reg_low);
+ FreeTemp(cu, rl_object.low_reg);
+ ClobberSReg(cu, rl_offset.s_reg_low);
+ FreeTemp(cu, rl_offset.low_reg);
+
+ int r_old_value = AllocTemp(cu);
+ NewLIR3(cu, kThumb2Ldrex, r_old_value, r_ptr, 0); // r_old_value := [r_ptr]
+
+ RegLocation rl_expected = LoadValue(cu, rl_src_expected, kCoreReg);
+
+ // if (r_old_value == rExpected) {
+ // [r_ptr] <- r_new_value && r_result := success ? 0 : 1
+ // r_result ^= 1
+ // } else {
+ // r_result := 0
+ // }
+ OpRegReg(cu, kOpCmp, r_old_value, rl_expected.low_reg);
+ FreeTemp(cu, r_old_value); // Now unneeded.
+ RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ OpIT(cu, kCondEq, "TE");
+ NewLIR4(cu, kThumb2Strex, rl_result.low_reg, rl_new_value.low_reg, r_ptr, 0);
+ FreeTemp(cu, r_ptr); // Now unneeded.
+ OpRegImm(cu, kOpXor, rl_result.low_reg, 1);
+ OpRegReg(cu, kOpXor, rl_result.low_reg, rl_result.low_reg);
+
+ StoreValue(cu, rl_dest, rl_result);
+
+ return true;
+}
+
+LIR* ArmCodegen::OpPcRelLoad(CompilationUnit* cu, int reg, LIR* target)
+{
+ return RawLIR(cu, cu->current_dalvik_offset, kThumb2LdrPcRel12, reg, 0, 0, 0, 0, target);
+}
+
+LIR* ArmCodegen::OpVldm(CompilationUnit* cu, int rBase, int count)
+{
+ return NewLIR3(cu, kThumb2Vldms, rBase, fr0, count);
+}
+
+LIR* ArmCodegen::OpVstm(CompilationUnit* cu, int rBase, int count)
+{
+ return NewLIR3(cu, kThumb2Vstms, rBase, fr0, count);
+}
+
+void ArmCodegen::GenMultiplyByTwoBitMultiplier(CompilationUnit* cu, RegLocation rl_src,
+ RegLocation rl_result, int lit,
+ int first_bit, int second_bit)
+{
+ OpRegRegRegShift(cu, kOpAdd, rl_result.low_reg, rl_src.low_reg, rl_src.low_reg,
+ EncodeShift(kArmLsl, second_bit - first_bit));
+ if (first_bit != 0) {
+ OpRegRegImm(cu, kOpLsl, rl_result.low_reg, rl_result.low_reg, first_bit);
+ }
+}
+
+void ArmCodegen::GenDivZeroCheck(CompilationUnit* cu, int reg_lo, int reg_hi)
+{
+ int t_reg = AllocTemp(cu);
+ NewLIR4(cu, kThumb2OrrRRRs, t_reg, reg_lo, reg_hi, 0);
+ FreeTemp(cu, t_reg);
+ GenCheck(cu, kCondEq, kThrowDivZero);
+}
+
+// Test suspend flag, return target of taken suspend branch
+LIR* ArmCodegen::OpTestSuspend(CompilationUnit* cu, LIR* target)
+{
+ NewLIR2(cu, kThumbSubRI8, rARM_SUSPEND, 1);
+ return OpCondBranch(cu, (target == NULL) ? kCondEq : kCondNe, target);
+}
+
+// Decrement register and branch on condition
+LIR* ArmCodegen::OpDecAndBranch(CompilationUnit* cu, ConditionCode c_code, int reg, LIR* target)
+{
+ // Combine sub & test using sub setflags encoding here
+ NewLIR3(cu, kThumb2SubsRRI12, reg, reg, 1);
+ return OpCondBranch(cu, c_code, target);
+}
+
+void ArmCodegen::GenMemBarrier(CompilationUnit* cu, MemBarrierKind barrier_kind)
+{
+#if ANDROID_SMP != 0
+ int dmb_flavor;
+ // TODO: revisit Arm barrier kinds
+ switch (barrier_kind) {
+ case kLoadStore: dmb_flavor = kSY; break;
+ case kLoadLoad: dmb_flavor = kSY; break;
+ case kStoreStore: dmb_flavor = kST; break;
+ case kStoreLoad: dmb_flavor = kSY; break;
+ default:
+ LOG(FATAL) << "Unexpected MemBarrierKind: " << barrier_kind;
+ dmb_flavor = kSY; // quiet gcc.
+ break;
+ }
+ LIR* dmb = NewLIR1(cu, kThumb2Dmb, dmb_flavor);
+ dmb->def_mask = ENCODE_ALL;
+#endif
+}
+
+void ArmCodegen::GenNegLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src)
+{
+ rl_src = LoadValueWide(cu, rl_src, kCoreReg);
+ RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ int z_reg = AllocTemp(cu);
+ LoadConstantNoClobber(cu, z_reg, 0);
+ // Check for destructive overlap
+ if (rl_result.low_reg == rl_src.high_reg) {
+ int t_reg = AllocTemp(cu);
+ OpRegRegReg(cu, kOpSub, rl_result.low_reg, z_reg, rl_src.low_reg);
+ OpRegRegReg(cu, kOpSbc, rl_result.high_reg, z_reg, t_reg);
+ FreeTemp(cu, t_reg);
+ } else {
+ OpRegRegReg(cu, kOpSub, rl_result.low_reg, z_reg, rl_src.low_reg);
+ OpRegRegReg(cu, kOpSbc, rl_result.high_reg, z_reg, rl_src.high_reg);
+ }
+ FreeTemp(cu, z_reg);
+ StoreValueWide(cu, rl_dest, rl_result);
+}
+
+
+ /*
+ * Check to see if a result pair has a misaligned overlap with an operand pair. This
+ * is not usual for dx to generate, but it is legal (for now). In a future rev of
+ * dex, we'll want to make this case illegal.
+ */
+static bool BadOverlap(CompilationUnit* cu, RegLocation rl_src, RegLocation rl_dest)
+{
+ DCHECK(rl_src.wide);
+ DCHECK(rl_dest.wide);
+ return (abs(SRegToVReg(cu, rl_src.s_reg_low) - SRegToVReg(cu, rl_dest.s_reg_low)) == 1);
+}
+
+void ArmCodegen::GenMulLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2)
+{
+ /*
+ * To pull off inline multiply, we have a worst-case requirement of 8 temporary
+ * registers. Normally for Arm, we get 5. We can get to 6 by including
+ * lr in the temp set. The only problematic case is all operands and result are
+ * distinct, and none have been promoted. In that case, we can succeed by aggressively
+ * freeing operand temp registers after they are no longer needed. All other cases
+ * can proceed normally. We'll just punt on the case of the result having a misaligned
+ * overlap with either operand and send that case to a runtime handler.
+ */
+ RegLocation rl_result;
+ if (BadOverlap(cu, rl_src1, rl_dest) || (BadOverlap(cu, rl_src2, rl_dest))) {
+ int func_offset = ENTRYPOINT_OFFSET(pLmul);
+ FlushAllRegs(cu);
+ CallRuntimeHelperRegLocationRegLocation(cu, func_offset, rl_src1, rl_src2, false);
+ rl_result = GetReturnWide(cu, false);
+ StoreValueWide(cu, rl_dest, rl_result);
+ return;
+ }
+ // Temporarily add LR to the temp pool, and assign it to tmp1
+ MarkTemp(cu, rARM_LR);
+ FreeTemp(cu, rARM_LR);
+ int tmp1 = rARM_LR;
+ LockTemp(cu, rARM_LR);
+
+ rl_src1 = LoadValueWide(cu, rl_src1, kCoreReg);
+ rl_src2 = LoadValueWide(cu, rl_src2, kCoreReg);
+
+ bool special_case = true;
+ // If operands are the same, or any pair has been promoted we're not the special case.
+ if ((rl_src1.s_reg_low == rl_src2.s_reg_low) ||
+ (!IsTemp(cu, rl_src1.low_reg) && !IsTemp(cu, rl_src1.high_reg)) ||
+ (!IsTemp(cu, rl_src2.low_reg) && !IsTemp(cu, rl_src2.high_reg))) {
+ special_case = false;
+ }
+ // Tuning: if rl_dest has been promoted and is *not* either operand, could use directly.
+ int res_lo = AllocTemp(cu);
+ int res_hi;
+ if (rl_src1.low_reg == rl_src2.low_reg) {
+ res_hi = AllocTemp(cu);
+ NewLIR3(cu, kThumb2MulRRR, tmp1, rl_src1.low_reg, rl_src1.high_reg);
+ NewLIR4(cu, kThumb2Umull, res_lo, res_hi, rl_src1.low_reg, rl_src1.low_reg);
+ OpRegRegRegShift(cu, kOpAdd, res_hi, res_hi, tmp1, EncodeShift(kArmLsl, 1));
+ } else {
+ // In the special case, all temps are now allocated
+ NewLIR3(cu, kThumb2MulRRR, tmp1, rl_src2.low_reg, rl_src1.high_reg);
+ if (special_case) {
+ DCHECK_NE(rl_src1.low_reg, rl_src2.low_reg);
+ DCHECK_NE(rl_src1.high_reg, rl_src2.high_reg);
+ FreeTemp(cu, rl_src1.high_reg);
+ }
+ res_hi = AllocTemp(cu);
+
+ NewLIR4(cu, kThumb2Umull, res_lo, res_hi, rl_src2.low_reg, rl_src1.low_reg);
+ NewLIR4(cu, kThumb2Mla, tmp1, rl_src1.low_reg, rl_src2.high_reg, tmp1);
+ NewLIR4(cu, kThumb2AddRRR, res_hi, tmp1, res_hi, 0);
+ if (special_case) {
+ FreeTemp(cu, rl_src1.low_reg);
+ Clobber(cu, rl_src1.low_reg);
+ Clobber(cu, rl_src1.high_reg);
+ }
+ }
+ FreeTemp(cu, tmp1);
+ rl_result = GetReturnWide(cu, false); // Just using as a template.
+ rl_result.low_reg = res_lo;
+ rl_result.high_reg = res_hi;
+ StoreValueWide(cu, rl_dest, rl_result);
+ // Now, restore lr to its non-temp status.
+ Clobber(cu, rARM_LR);
+ UnmarkTemp(cu, rARM_LR);
+}
+
+void ArmCodegen::GenAddLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2)
+{
+ LOG(FATAL) << "Unexpected use of GenAddLong for Arm";
+}
+
+void ArmCodegen::GenSubLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2)
+{
+ LOG(FATAL) << "Unexpected use of GenSubLong for Arm";
+}
+
+void ArmCodegen::GenAndLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2)
+{
+ LOG(FATAL) << "Unexpected use of GenAndLong for Arm";
+}
+
+void ArmCodegen::GenOrLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2)
+{
+ LOG(FATAL) << "Unexpected use of GenOrLong for Arm";
+}
+
+void ArmCodegen::GenXorLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2)
+{
+ LOG(FATAL) << "Unexpected use of genXoLong for Arm";
+}
+
+/*
+ * Generate array load
+ */
+void ArmCodegen::GenArrayGet(CompilationUnit* cu, int opt_flags, OpSize size, RegLocation rl_array,
+ RegLocation rl_index, RegLocation rl_dest, int scale)
+{
+ RegisterClass reg_class = oat_reg_class_by_size(size);
+ int len_offset = mirror::Array::LengthOffset().Int32Value();
+ int data_offset;
+ RegLocation rl_result;
+ bool constant_index = rl_index.is_const;
+ rl_array = LoadValue(cu, rl_array, kCoreReg);
+ if (!constant_index) {
+ rl_index = LoadValue(cu, rl_index, kCoreReg);
+ }
+
+ if (rl_dest.wide) {
+ data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
+ } else {
+ data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
+ }
+
+ // If index is constant, just fold it into the data offset
+ if (constant_index) {
+ data_offset += ConstantValue(cu, rl_index) << scale;
+ }
+
+ /* null object? */
+ GenNullCheck(cu, rl_array.s_reg_low, rl_array.low_reg, opt_flags);
+
+ bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
+ int reg_len = INVALID_REG;
+ if (needs_range_check) {
+ reg_len = AllocTemp(cu);
+ /* Get len */
+ LoadWordDisp(cu, rl_array.low_reg, len_offset, reg_len);
+ }
+ if (rl_dest.wide || rl_dest.fp || constant_index) {
+ int reg_ptr;
+ if (constant_index) {
+ reg_ptr = rl_array.low_reg; // NOTE: must not alter reg_ptr in constant case.
+ } else {
+ // No special indexed operation, lea + load w/ displacement
+ reg_ptr = AllocTemp(cu);
+ OpRegRegRegShift(cu, kOpAdd, reg_ptr, rl_array.low_reg, rl_index.low_reg,
+ EncodeShift(kArmLsl, scale));
+ FreeTemp(cu, rl_index.low_reg);
+ }
+ rl_result = EvalLoc(cu, rl_dest, reg_class, true);
+
+ if (needs_range_check) {
+ if (constant_index) {
+ GenImmedCheck(cu, kCondLs, reg_len, ConstantValue(cu, rl_index), kThrowConstantArrayBounds);
+ } else {
+ GenRegRegCheck(cu, kCondLs, reg_len, rl_index.low_reg, kThrowArrayBounds);
+ }
+ FreeTemp(cu, reg_len);
+ }
+ if (rl_dest.wide) {
+ LoadBaseDispWide(cu, reg_ptr, data_offset, rl_result.low_reg, rl_result.high_reg, INVALID_SREG);
+ if (!constant_index) {
+ FreeTemp(cu, reg_ptr);
+ }
+ StoreValueWide(cu, rl_dest, rl_result);
+ } else {
+ LoadBaseDisp(cu, reg_ptr, data_offset, rl_result.low_reg, size, INVALID_SREG);
+ if (!constant_index) {
+ FreeTemp(cu, reg_ptr);
+ }
+ StoreValue(cu, rl_dest, rl_result);
+ }
+ } else {
+ // Offset base, then use indexed load
+ int reg_ptr = AllocTemp(cu);
+ OpRegRegImm(cu, kOpAdd, reg_ptr, rl_array.low_reg, data_offset);
+ FreeTemp(cu, rl_array.low_reg);
+ rl_result = EvalLoc(cu, rl_dest, reg_class, true);
+
+ if (needs_range_check) {
+ // TODO: change kCondCS to a more meaningful name, is the sense of
+ // carry-set/clear flipped?
+ GenRegRegCheck(cu, kCondCs, rl_index.low_reg, reg_len, kThrowArrayBounds);
+ FreeTemp(cu, reg_len);
+ }
+ LoadBaseIndexed(cu, reg_ptr, rl_index.low_reg, rl_result.low_reg, scale, size);
+ FreeTemp(cu, reg_ptr);
+ StoreValue(cu, rl_dest, rl_result);
+ }
+}
+
+/*
+ * Generate array store
+ *
+ */
+void ArmCodegen::GenArrayPut(CompilationUnit* cu, int opt_flags, OpSize size, RegLocation rl_array,
+ RegLocation rl_index, RegLocation rl_src, int scale)
+{
+ RegisterClass reg_class = oat_reg_class_by_size(size);
+ int len_offset = mirror::Array::LengthOffset().Int32Value();
+ int data_offset;
+ bool constant_index = rl_index.is_const;
+
+ if (rl_src.wide) {
+ data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
+ } else {
+ data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
+ }
+
+ // If index is constant, just fold it into the data offset.
+ if (constant_index) {
+ data_offset += ConstantValue(cu, rl_index) << scale;
+ }
+
+ rl_array = LoadValue(cu, rl_array, kCoreReg);
+ if (!constant_index) {
+ rl_index = LoadValue(cu, rl_index, kCoreReg);
+ }
+
+ int reg_ptr;
+ if (constant_index) {
+ reg_ptr = rl_array.low_reg;
+ } else if (IsTemp(cu, rl_array.low_reg)) {
+ Clobber(cu, rl_array.low_reg);
+ reg_ptr = rl_array.low_reg;
+ } else {
+ reg_ptr = AllocTemp(cu);
+ }
+
+ /* null object? */
+ GenNullCheck(cu, rl_array.s_reg_low, rl_array.low_reg, opt_flags);
+
+ bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
+ int reg_len = INVALID_REG;
+ if (needs_range_check) {
+ reg_len = AllocTemp(cu);
+ //NOTE: max live temps(4) here.
+ /* Get len */
+ LoadWordDisp(cu, rl_array.low_reg, len_offset, reg_len);
+ }
+ /* at this point, reg_ptr points to array, 2 live temps */
+ if (rl_src.wide || rl_src.fp || constant_index) {
+ if (rl_src.wide) {
+ rl_src = LoadValueWide(cu, rl_src, reg_class);
+ } else {
+ rl_src = LoadValue(cu, rl_src, reg_class);
+ }
+ if (!constant_index) {
+ OpRegRegRegShift(cu, kOpAdd, reg_ptr, rl_array.low_reg, rl_index.low_reg,
+ EncodeShift(kArmLsl, scale));
+ }
+ if (needs_range_check) {
+ if (constant_index) {
+ GenImmedCheck(cu, kCondLs, reg_len, ConstantValue(cu, rl_index), kThrowConstantArrayBounds);
+ } else {
+ GenRegRegCheck(cu, kCondLs, reg_len, rl_index.low_reg, kThrowArrayBounds);
+ }
+ FreeTemp(cu, reg_len);
+ }
+
+ if (rl_src.wide) {
+ StoreBaseDispWide(cu, reg_ptr, data_offset, rl_src.low_reg, rl_src.high_reg);
+ } else {
+ StoreBaseDisp(cu, reg_ptr, data_offset, rl_src.low_reg, size);
+ }
+ } else {
+ /* reg_ptr -> array data */
+ OpRegRegImm(cu, kOpAdd, reg_ptr, rl_array.low_reg, data_offset);
+ rl_src = LoadValue(cu, rl_src, reg_class);
+ if (needs_range_check) {
+ GenRegRegCheck(cu, kCondCs, rl_index.low_reg, reg_len, kThrowArrayBounds);
+ FreeTemp(cu, reg_len);
+ }
+ StoreBaseIndexed(cu, reg_ptr, rl_index.low_reg, rl_src.low_reg,
+ scale, size);
+ }
+ if (!constant_index) {
+ FreeTemp(cu, reg_ptr);
+ }
+}
+
+/*
+ * Generate array store
+ *
+ */
+void ArmCodegen::GenArrayObjPut(CompilationUnit* cu, int opt_flags, RegLocation rl_array,
+ RegLocation rl_index, RegLocation rl_src, int scale)
+{
+ int len_offset = mirror::Array::LengthOffset().Int32Value();
+ int data_offset = mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value();
+
+ FlushAllRegs(cu); // Use explicit registers
+ LockCallTemps(cu);
+
+ int r_value = TargetReg(kArg0); // Register holding value
+ int r_array_class = TargetReg(kArg1); // Register holding array's Class
+ int r_array = TargetReg(kArg2); // Register holding array
+ int r_index = TargetReg(kArg3); // Register holding index into array
+
+ LoadValueDirectFixed(cu, rl_array, r_array); // Grab array
+ LoadValueDirectFixed(cu, rl_src, r_value); // Grab value
+ LoadValueDirectFixed(cu, rl_index, r_index); // Grab index
+
+ GenNullCheck(cu, rl_array.s_reg_low, r_array, opt_flags); // NPE?
+
+ // Store of null?
+ LIR* null_value_check = OpCmpImmBranch(cu, kCondEq, r_value, 0, NULL);
+
+ // Get the array's class.
+ LoadWordDisp(cu, r_array, mirror::Object::ClassOffset().Int32Value(), r_array_class);
+ CallRuntimeHelperRegReg(cu, ENTRYPOINT_OFFSET(pCanPutArrayElementFromCode), r_value,
+ r_array_class, true);
+ // Redo LoadValues in case they didn't survive the call.
+ LoadValueDirectFixed(cu, rl_array, r_array); // Reload array
+ LoadValueDirectFixed(cu, rl_index, r_index); // Reload index
+ LoadValueDirectFixed(cu, rl_src, r_value); // Reload value
+ r_array_class = INVALID_REG;
+
+ // Branch here if value to be stored == null
+ LIR* target = NewLIR0(cu, kPseudoTargetLabel);
+ null_value_check->target = target;
+
+ bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
+ int reg_len = INVALID_REG;
+ if (needs_range_check) {
+ reg_len = TargetReg(kArg1);
+ LoadWordDisp(cu, r_array, len_offset, reg_len); // Get len
+ }
+ /* r_ptr -> array data */
+ int r_ptr = AllocTemp(cu);
+ OpRegRegImm(cu, kOpAdd, r_ptr, r_array, data_offset);
+ if (needs_range_check) {
+ GenRegRegCheck(cu, kCondCs, r_index, reg_len, kThrowArrayBounds);
+ }
+ StoreBaseIndexed(cu, r_ptr, r_index, r_value, scale, kWord);
+ FreeTemp(cu, r_ptr);
+ FreeTemp(cu, r_index);
+ if (!IsConstantNullRef(cu, rl_src)) {
+ MarkGCCard(cu, r_value, r_array);
+ }
+}
+
+void ArmCodegen::GenShiftImmOpLong(CompilationUnit* cu, Instruction::Code opcode,
+ RegLocation rl_dest, RegLocation rl_src, RegLocation rl_shift)
+{
+ rl_src = LoadValueWide(cu, rl_src, kCoreReg);
+ // Per spec, we only care about low 6 bits of shift amount.
+ int shift_amount = ConstantValue(cu, rl_shift) & 0x3f;
+ if (shift_amount == 0) {
+ StoreValueWide(cu, rl_dest, rl_src);
+ return;
+ }
+ if (BadOverlap(cu, rl_src, rl_dest)) {
+ GenShiftOpLong(cu, opcode, rl_dest, rl_src, rl_shift);
+ return;
+ }
+ RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ switch(opcode) {
+ case Instruction::SHL_LONG:
+ case Instruction::SHL_LONG_2ADDR:
+ if (shift_amount == 1) {
+ OpRegRegReg(cu, kOpAdd, rl_result.low_reg, rl_src.low_reg, rl_src.low_reg);
+ OpRegRegReg(cu, kOpAdc, rl_result.high_reg, rl_src.high_reg, rl_src.high_reg);
+ } else if (shift_amount == 32) {
+ OpRegCopy(cu, rl_result.high_reg, rl_src.low_reg);
+ LoadConstant(cu, rl_result.low_reg, 0);
+ } else if (shift_amount > 31) {
+ OpRegRegImm(cu, kOpLsl, rl_result.high_reg, rl_src.low_reg, shift_amount - 32);
+ LoadConstant(cu, rl_result.low_reg, 0);
+ } else {
+ OpRegRegImm(cu, kOpLsl, rl_result.high_reg, rl_src.high_reg, shift_amount);
+ OpRegRegRegShift(cu, kOpOr, rl_result.high_reg, rl_result.high_reg, rl_src.low_reg,
+ EncodeShift(kArmLsr, 32 - shift_amount));
+ OpRegRegImm(cu, kOpLsl, rl_result.low_reg, rl_src.low_reg, shift_amount);
+ }
+ break;
+ case Instruction::SHR_LONG:
+ case Instruction::SHR_LONG_2ADDR:
+ if (shift_amount == 32) {
+ OpRegCopy(cu, rl_result.low_reg, rl_src.high_reg);
+ OpRegRegImm(cu, kOpAsr, rl_result.high_reg, rl_src.high_reg, 31);
+ } else if (shift_amount > 31) {
+ OpRegRegImm(cu, kOpAsr, rl_result.low_reg, rl_src.high_reg, shift_amount - 32);
+ OpRegRegImm(cu, kOpAsr, rl_result.high_reg, rl_src.high_reg, 31);
+ } else {
+ int t_reg = AllocTemp(cu);
+ OpRegRegImm(cu, kOpLsr, t_reg, rl_src.low_reg, shift_amount);
+ OpRegRegRegShift(cu, kOpOr, rl_result.low_reg, t_reg, rl_src.high_reg,
+ EncodeShift(kArmLsl, 32 - shift_amount));
+ FreeTemp(cu, t_reg);
+ OpRegRegImm(cu, kOpAsr, rl_result.high_reg, rl_src.high_reg, shift_amount);
+ }
+ break;
+ case Instruction::USHR_LONG:
+ case Instruction::USHR_LONG_2ADDR:
+ if (shift_amount == 32) {
+ OpRegCopy(cu, rl_result.low_reg, rl_src.high_reg);
+ LoadConstant(cu, rl_result.high_reg, 0);
+ } else if (shift_amount > 31) {
+ OpRegRegImm(cu, kOpLsr, rl_result.low_reg, rl_src.high_reg, shift_amount - 32);
+ LoadConstant(cu, rl_result.high_reg, 0);
+ } else {
+ int t_reg = AllocTemp(cu);
+ OpRegRegImm(cu, kOpLsr, t_reg, rl_src.low_reg, shift_amount);
+ OpRegRegRegShift(cu, kOpOr, rl_result.low_reg, t_reg, rl_src.high_reg,
+ EncodeShift(kArmLsl, 32 - shift_amount));
+ FreeTemp(cu, t_reg);
+ OpRegRegImm(cu, kOpLsr, rl_result.high_reg, rl_src.high_reg, shift_amount);
+ }
+ break;
+ default:
+ LOG(FATAL) << "Unexpected case";
+ }
+ StoreValueWide(cu, rl_dest, rl_result);
+}
+
+void ArmCodegen::GenArithImmOpLong(CompilationUnit* cu, Instruction::Code opcode,
+ RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2)
+{
+ if ((opcode == Instruction::SUB_LONG_2ADDR) || (opcode == Instruction::SUB_LONG)) {
+ if (!rl_src2.is_const) {
+ // Don't bother with special handling for subtract from immediate.
+ GenArithOpLong(cu, opcode, rl_dest, rl_src1, rl_src2);
+ return;
+ }
+ } else {
+ // Normalize
+ if (!rl_src2.is_const) {
+ DCHECK(rl_src1.is_const);
+ RegLocation rl_temp = rl_src1;
+ rl_src1 = rl_src2;
+ rl_src2 = rl_temp;
+ }
+ }
+ if (BadOverlap(cu, rl_src1, rl_dest)) {
+ GenArithOpLong(cu, opcode, rl_dest, rl_src1, rl_src2);
+ return;
+ }
+ DCHECK(rl_src2.is_const);
+ int64_t val = ConstantValueWide(cu, rl_src2);
+ uint32_t val_lo = Low32Bits(val);
+ uint32_t val_hi = High32Bits(val);
+ int32_t mod_imm_lo = ModifiedImmediate(val_lo);
+ int32_t mod_imm_hi = ModifiedImmediate(val_hi);
+
+ // Only a subset of add/sub immediate instructions set carry - so bail if we don't fit
+ switch(opcode) {
+ case Instruction::ADD_LONG:
+ case Instruction::ADD_LONG_2ADDR:
+ case Instruction::SUB_LONG:
+ case Instruction::SUB_LONG_2ADDR:
+ if ((mod_imm_lo < 0) || (mod_imm_hi < 0)) {
+ GenArithOpLong(cu, opcode, rl_dest, rl_src1, rl_src2);
+ return;
+ }
+ break;
+ default:
+ break;
+ }
+ rl_src1 = LoadValueWide(cu, rl_src1, kCoreReg);
+ RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ // NOTE: once we've done the EvalLoc on dest, we can no longer bail.
+ switch (opcode) {
+ case Instruction::ADD_LONG:
+ case Instruction::ADD_LONG_2ADDR:
+ NewLIR3(cu, kThumb2AddRRI8, rl_result.low_reg, rl_src1.low_reg, mod_imm_lo);
+ NewLIR3(cu, kThumb2AdcRRI8, rl_result.high_reg, rl_src1.high_reg, mod_imm_hi);
+ break;
+ case Instruction::OR_LONG:
+ case Instruction::OR_LONG_2ADDR:
+ if ((val_lo != 0) || (rl_result.low_reg != rl_src1.low_reg)) {
+ OpRegRegImm(cu, kOpOr, rl_result.low_reg, rl_src1.low_reg, val_lo);
+ }
+ if ((val_hi != 0) || (rl_result.high_reg != rl_src1.high_reg)) {
+ OpRegRegImm(cu, kOpOr, rl_result.high_reg, rl_src1.high_reg, val_hi);
+ }
+ break;
+ case Instruction::XOR_LONG:
+ case Instruction::XOR_LONG_2ADDR:
+ OpRegRegImm(cu, kOpXor, rl_result.low_reg, rl_src1.low_reg, val_lo);
+ OpRegRegImm(cu, kOpXor, rl_result.high_reg, rl_src1.high_reg, val_hi);
+ break;
+ case Instruction::AND_LONG:
+ case Instruction::AND_LONG_2ADDR:
+ if ((val_lo != 0xffffffff) || (rl_result.low_reg != rl_src1.low_reg)) {
+ OpRegRegImm(cu, kOpAnd, rl_result.low_reg, rl_src1.low_reg, val_lo);
+ }
+ if ((val_hi != 0xffffffff) || (rl_result.high_reg != rl_src1.high_reg)) {
+ OpRegRegImm(cu, kOpAnd, rl_result.high_reg, rl_src1.high_reg, val_hi);
+ }
+ break;
+ case Instruction::SUB_LONG_2ADDR:
+ case Instruction::SUB_LONG:
+ NewLIR3(cu, kThumb2SubRRI8, rl_result.low_reg, rl_src1.low_reg, mod_imm_lo);
+ NewLIR3(cu, kThumb2SbcRRI8, rl_result.high_reg, rl_src1.high_reg, mod_imm_hi);
+ break;
+ default:
+ LOG(FATAL) << "Unexpected opcode " << opcode;
+ }
+ StoreValueWide(cu, rl_dest, rl_result);
+}
+
+} // namespace art
diff --git a/src/compiler/dex/quick/arm/target_arm.cc b/src/compiler/dex/quick/arm/target_arm.cc
new file mode 100644
index 0000000..f03e07c
--- /dev/null
+++ b/src/compiler/dex/quick/arm/target_arm.cc
@@ -0,0 +1,765 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <string>
+
+#include "arm_lir.h"
+#include "codegen_arm.h"
+#include "compiler/dex/quick/codegen_util.h"
+#include "compiler/dex/compiler_internals.h"
+#include "compiler/dex/quick/ralloc_util.h"
+
+namespace art {
+
+static int core_regs[] = {r0, r1, r2, r3, rARM_SUSPEND, r5, r6, r7, r8, rARM_SELF, r10,
+ r11, r12, rARM_SP, rARM_LR, rARM_PC};
+static int ReservedRegs[] = {rARM_SUSPEND, rARM_SELF, rARM_SP, rARM_LR, rARM_PC};
+static int FpRegs[] = {fr0, fr1, fr2, fr3, fr4, fr5, fr6, fr7,
+ fr8, fr9, fr10, fr11, fr12, fr13, fr14, fr15,
+ fr16, fr17, fr18, fr19, fr20, fr21, fr22, fr23,
+ fr24, fr25, fr26, fr27, fr28, fr29, fr30, fr31};
+static int core_temps[] = {r0, r1, r2, r3, r12};
+static int fp_temps[] = {fr0, fr1, fr2, fr3, fr4, fr5, fr6, fr7,
+ fr8, fr9, fr10, fr11, fr12, fr13, fr14, fr15};
+
+RegLocation ArmCodegen::LocCReturn()
+{
+ RegLocation res = ARM_LOC_C_RETURN;
+ return res;
+}
+
+RegLocation ArmCodegen::LocCReturnWide()
+{
+ RegLocation res = ARM_LOC_C_RETURN_WIDE;
+ return res;
+}
+
+RegLocation ArmCodegen::LocCReturnFloat()
+{
+ RegLocation res = ARM_LOC_C_RETURN_FLOAT;
+ return res;
+}
+
+RegLocation ArmCodegen::LocCReturnDouble()
+{
+ RegLocation res = ARM_LOC_C_RETURN_DOUBLE;
+ return res;
+}
+
+// Return a target-dependent special register.
+int ArmCodegen::TargetReg(SpecialTargetRegister reg) {
+ int res = INVALID_REG;
+ switch (reg) {
+ case kSelf: res = rARM_SELF; break;
+ case kSuspend: res = rARM_SUSPEND; break;
+ case kLr: res = rARM_LR; break;
+ case kPc: res = rARM_PC; break;
+ case kSp: res = rARM_SP; break;
+ case kArg0: res = rARM_ARG0; break;
+ case kArg1: res = rARM_ARG1; break;
+ case kArg2: res = rARM_ARG2; break;
+ case kArg3: res = rARM_ARG3; break;
+ case kFArg0: res = rARM_FARG0; break;
+ case kFArg1: res = rARM_FARG1; break;
+ case kFArg2: res = rARM_FARG2; break;
+ case kFArg3: res = rARM_FARG3; break;
+ case kRet0: res = rARM_RET0; break;
+ case kRet1: res = rARM_RET1; break;
+ case kInvokeTgt: res = rARM_INVOKE_TGT; break;
+ case kCount: res = rARM_COUNT; break;
+ }
+ return res;
+}
+
+
+// Create a double from a pair of singles.
+int ArmCodegen::S2d(int low_reg, int high_reg)
+{
+ return ARM_S2D(low_reg, high_reg);
+}
+
+// Return mask to strip off fp reg flags and bias.
+uint32_t ArmCodegen::FpRegMask()
+{
+ return ARM_FP_REG_MASK;
+}
+
+// True if both regs single, both core or both double.
+bool ArmCodegen::SameRegType(int reg1, int reg2)
+{
+ return (ARM_REGTYPE(reg1) == ARM_REGTYPE(reg2));
+}
+
+/*
+ * Decode the register id.
+ */
+uint64_t ArmCodegen::GetRegMaskCommon(CompilationUnit* cu, int reg)
+{
+ uint64_t seed;
+ int shift;
+ int reg_id;
+
+
+ reg_id = reg & 0x1f;
+ /* Each double register is equal to a pair of single-precision FP registers */
+ seed = ARM_DOUBLEREG(reg) ? 3 : 1;
+ /* FP register starts at bit position 16 */
+ shift = ARM_FPREG(reg) ? kArmFPReg0 : 0;
+ /* Expand the double register id into single offset */
+ shift += reg_id;
+ return (seed << shift);
+}
+
+uint64_t ArmCodegen::GetPCUseDefEncoding()
+{
+ return ENCODE_ARM_REG_PC;
+}
+
+void ArmCodegen::SetupTargetResourceMasks(CompilationUnit* cu, LIR* lir)
+{
+ DCHECK_EQ(cu->instruction_set, kThumb2);
+
+ // Thumb2 specific setup
+ uint64_t flags = ArmCodegen::EncodingMap[lir->opcode].flags;
+ int opcode = lir->opcode;
+
+ if (flags & REG_DEF_SP) {
+ lir->def_mask |= ENCODE_ARM_REG_SP;
+ }
+
+ if (flags & REG_USE_SP) {
+ lir->use_mask |= ENCODE_ARM_REG_SP;
+ }
+
+ if (flags & REG_DEF_LIST0) {
+ lir->def_mask |= ENCODE_ARM_REG_LIST(lir->operands[0]);
+ }
+
+ if (flags & REG_DEF_LIST1) {
+ lir->def_mask |= ENCODE_ARM_REG_LIST(lir->operands[1]);
+ }
+
+ if (flags & REG_DEF_FPCS_LIST0) {
+ lir->def_mask |= ENCODE_ARM_REG_FPCS_LIST(lir->operands[0]);
+ }
+
+ if (flags & REG_DEF_FPCS_LIST2) {
+ for (int i = 0; i < lir->operands[2]; i++) {
+ SetupRegMask(cu, &lir->def_mask, lir->operands[1] + i);
+ }
+ }
+
+ if (flags & REG_USE_PC) {
+ lir->use_mask |= ENCODE_ARM_REG_PC;
+ }
+
+ /* Conservatively treat the IT block */
+ if (flags & IS_IT) {
+ lir->def_mask = ENCODE_ALL;
+ }
+
+ if (flags & REG_USE_LIST0) {
+ lir->use_mask |= ENCODE_ARM_REG_LIST(lir->operands[0]);
+ }
+
+ if (flags & REG_USE_LIST1) {
+ lir->use_mask |= ENCODE_ARM_REG_LIST(lir->operands[1]);
+ }
+
+ if (flags & REG_USE_FPCS_LIST0) {
+ lir->use_mask |= ENCODE_ARM_REG_FPCS_LIST(lir->operands[0]);
+ }
+
+ if (flags & REG_USE_FPCS_LIST2) {
+ for (int i = 0; i < lir->operands[2]; i++) {
+ SetupRegMask(cu, &lir->use_mask, lir->operands[1] + i);
+ }
+ }
+ /* Fixup for kThumbPush/lr and kThumbPop/pc */
+ if (opcode == kThumbPush || opcode == kThumbPop) {
+ uint64_t r8Mask = GetRegMaskCommon(cu, r8);
+ if ((opcode == kThumbPush) && (lir->use_mask & r8Mask)) {
+ lir->use_mask &= ~r8Mask;
+ lir->use_mask |= ENCODE_ARM_REG_LR;
+ } else if ((opcode == kThumbPop) && (lir->def_mask & r8Mask)) {
+ lir->def_mask &= ~r8Mask;
+ lir->def_mask |= ENCODE_ARM_REG_PC;
+ }
+ }
+ if (flags & REG_DEF_LR) {
+ lir->def_mask |= ENCODE_ARM_REG_LR;
+ }
+}
+
+ArmConditionCode ArmCodegen::ArmConditionEncoding(ConditionCode ccode)
+{
+ ArmConditionCode res;
+ switch (ccode) {
+ case kCondEq: res = kArmCondEq; break;
+ case kCondNe: res = kArmCondNe; break;
+ case kCondCs: res = kArmCondCs; break;
+ case kCondCc: res = kArmCondCc; break;
+ case kCondMi: res = kArmCondMi; break;
+ case kCondPl: res = kArmCondPl; break;
+ case kCondVs: res = kArmCondVs; break;
+ case kCondVc: res = kArmCondVc; break;
+ case kCondHi: res = kArmCondHi; break;
+ case kCondLs: res = kArmCondLs; break;
+ case kCondGe: res = kArmCondGe; break;
+ case kCondLt: res = kArmCondLt; break;
+ case kCondGt: res = kArmCondGt; break;
+ case kCondLe: res = kArmCondLe; break;
+ case kCondAl: res = kArmCondAl; break;
+ case kCondNv: res = kArmCondNv; break;
+ default:
+ LOG(FATAL) << "Bad condition code " << ccode;
+ res = static_cast<ArmConditionCode>(0); // Quiet gcc
+ }
+ return res;
+}
+
+static const char* core_reg_names[16] = {
+ "r0",
+ "r1",
+ "r2",
+ "r3",
+ "r4",
+ "r5",
+ "r6",
+ "r7",
+ "r8",
+ "rSELF",
+ "r10",
+ "r11",
+ "r12",
+ "sp",
+ "lr",
+ "pc",
+};
+
+
+static const char* shift_names[4] = {
+ "lsl",
+ "lsr",
+ "asr",
+ "ror"};
+
+/* Decode and print a ARM register name */
+static char* DecodeRegList(int opcode, int vector, char* buf)
+{
+ int i;
+ bool printed = false;
+ buf[0] = 0;
+ for (i = 0; i < 16; i++, vector >>= 1) {
+ if (vector & 0x1) {
+ int reg_id = i;
+ if (opcode == kThumbPush && i == 8) {
+ reg_id = r14lr;
+ } else if (opcode == kThumbPop && i == 8) {
+ reg_id = r15pc;
+ }
+ if (printed) {
+ sprintf(buf + strlen(buf), ", r%d", reg_id);
+ } else {
+ printed = true;
+ sprintf(buf, "r%d", reg_id);
+ }
+ }
+ }
+ return buf;
+}
+
+static char* DecodeFPCSRegList(int count, int base, char* buf)
+{
+ sprintf(buf, "s%d", base);
+ for (int i = 1; i < count; i++) {
+ sprintf(buf + strlen(buf), ", s%d",base + i);
+ }
+ return buf;
+}
+
+static int ExpandImmediate(int value)
+{
+ int mode = (value & 0xf00) >> 8;
+ uint32_t bits = value & 0xff;
+ switch (mode) {
+ case 0:
+ return bits;
+ case 1:
+ return (bits << 16) | bits;
+ case 2:
+ return (bits << 24) | (bits << 8);
+ case 3:
+ return (bits << 24) | (bits << 16) | (bits << 8) | bits;
+ default:
+ break;
+ }
+ bits = (bits | 0x80) << 24;
+ return bits >> (((value & 0xf80) >> 7) - 8);
+}
+
+const char* cc_names[] = {"eq","ne","cs","cc","mi","pl","vs","vc",
+ "hi","ls","ge","lt","gt","le","al","nv"};
+/*
+ * Interpret a format string and build a string no longer than size
+ * See format key in Assemble.c.
+ */
+std::string ArmCodegen::BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr)
+{
+ std::string buf;
+ int i;
+ const char* fmt_end = &fmt[strlen(fmt)];
+ char tbuf[256];
+ const char* name;
+ char nc;
+ while (fmt < fmt_end) {
+ int operand;
+ if (*fmt == '!') {
+ fmt++;
+ DCHECK_LT(fmt, fmt_end);
+ nc = *fmt++;
+ if (nc=='!') {
+ strcpy(tbuf, "!");
+ } else {
+ DCHECK_LT(fmt, fmt_end);
+ DCHECK_LT(static_cast<unsigned>(nc-'0'), 4U);
+ operand = lir->operands[nc-'0'];
+ switch (*fmt++) {
+ case 'H':
+ if (operand != 0) {
+ sprintf(tbuf, ", %s %d",shift_names[operand & 0x3], operand >> 2);
+ } else {
+ strcpy(tbuf,"");
+ }
+ break;
+ case 'B':
+ switch (operand) {
+ case kSY:
+ name = "sy";
+ break;
+ case kST:
+ name = "st";
+ break;
+ case kISH:
+ name = "ish";
+ break;
+ case kISHST:
+ name = "ishst";
+ break;
+ case kNSH:
+ name = "nsh";
+ break;
+ case kNSHST:
+ name = "shst";
+ break;
+ default:
+ name = "DecodeError2";
+ break;
+ }
+ strcpy(tbuf, name);
+ break;
+ case 'b':
+ strcpy(tbuf,"0000");
+ for (i=3; i>= 0; i--) {
+ tbuf[i] += operand & 1;
+ operand >>= 1;
+ }
+ break;
+ case 'n':
+ operand = ~ExpandImmediate(operand);
+ sprintf(tbuf,"%d [%#x]", operand, operand);
+ break;
+ case 'm':
+ operand = ExpandImmediate(operand);
+ sprintf(tbuf,"%d [%#x]", operand, operand);
+ break;
+ case 's':
+ sprintf(tbuf,"s%d",operand & ARM_FP_REG_MASK);
+ break;
+ case 'S':
+ sprintf(tbuf,"d%d",(operand & ARM_FP_REG_MASK) >> 1);
+ break;
+ case 'h':
+ sprintf(tbuf,"%04x", operand);
+ break;
+ case 'M':
+ case 'd':
+ sprintf(tbuf,"%d", operand);
+ break;
+ case 'C':
+ DCHECK_LT(operand, static_cast<int>(
+ sizeof(core_reg_names)/sizeof(core_reg_names[0])));
+ sprintf(tbuf,"%s",core_reg_names[operand]);
+ break;
+ case 'E':
+ sprintf(tbuf,"%d", operand*4);
+ break;
+ case 'F':
+ sprintf(tbuf,"%d", operand*2);
+ break;
+ case 'c':
+ strcpy(tbuf, cc_names[operand]);
+ break;
+ case 't':
+ sprintf(tbuf,"0x%08x (L%p)",
+ reinterpret_cast<uintptr_t>(base_addr) + lir->offset + 4 +
+ (operand << 1),
+ lir->target);
+ break;
+ case 'u': {
+ int offset_1 = lir->operands[0];
+ int offset_2 = NEXT_LIR(lir)->operands[0];
+ uintptr_t target =
+ (((reinterpret_cast<uintptr_t>(base_addr) + lir->offset + 4) &
+ ~3) + (offset_1 << 21 >> 9) + (offset_2 << 1)) &
+ 0xfffffffc;
+ sprintf(tbuf, "%p", reinterpret_cast<void *>(target));
+ break;
+ }
+
+ /* Nothing to print for BLX_2 */
+ case 'v':
+ strcpy(tbuf, "see above");
+ break;
+ case 'R':
+ DecodeRegList(lir->opcode, operand, tbuf);
+ break;
+ case 'P':
+ DecodeFPCSRegList(operand, 16, tbuf);
+ break;
+ case 'Q':
+ DecodeFPCSRegList(operand, 0, tbuf);
+ break;
+ default:
+ strcpy(tbuf,"DecodeError1");
+ break;
+ }
+ buf += tbuf;
+ }
+ } else {
+ buf += *fmt++;
+ }
+ }
+ return buf;
+}
+
+void ArmCodegen::DumpResourceMask(LIR* arm_lir, uint64_t mask, const char* prefix)
+{
+ char buf[256];
+ buf[0] = 0;
+
+ if (mask == ENCODE_ALL) {
+ strcpy(buf, "all");
+ } else {
+ char num[8];
+ int i;
+
+ for (i = 0; i < kArmRegEnd; i++) {
+ if (mask & (1ULL << i)) {
+ sprintf(num, "%d ", i);
+ strcat(buf, num);
+ }
+ }
+
+ if (mask & ENCODE_CCODE) {
+ strcat(buf, "cc ");
+ }
+ if (mask & ENCODE_FP_STATUS) {
+ strcat(buf, "fpcc ");
+ }
+
+ /* Memory bits */
+ if (arm_lir && (mask & ENCODE_DALVIK_REG)) {
+ sprintf(buf + strlen(buf), "dr%d%s", arm_lir->alias_info & 0xffff,
+ (arm_lir->alias_info & 0x80000000) ? "(+1)" : "");
+ }
+ if (mask & ENCODE_LITERAL) {
+ strcat(buf, "lit ");
+ }
+
+ if (mask & ENCODE_HEAP_REF) {
+ strcat(buf, "heap ");
+ }
+ if (mask & ENCODE_MUST_NOT_ALIAS) {
+ strcat(buf, "noalias ");
+ }
+ }
+ if (buf[0]) {
+ LOG(INFO) << prefix << ": " << buf;
+ }
+}
+
+bool ArmCodegen::IsUnconditionalBranch(LIR* lir)
+{
+ return ((lir->opcode == kThumbBUncond) || (lir->opcode == kThumb2BUncond));
+}
+
+bool InitArmCodegen(CompilationUnit* cu)
+{
+ cu->cg.reset(new ArmCodegen());
+ for (int i = 0; i < kArmLast; i++) {
+ if (ArmCodegen::EncodingMap[i].opcode != i) {
+ LOG(FATAL) << "Encoding order for " << ArmCodegen::EncodingMap[i].name
+ << " is wrong: expecting " << i << ", seeing "
+ << static_cast<int>(ArmCodegen::EncodingMap[i].opcode);
+ }
+ }
+ return true;
+}
+
+/*
+ * Alloc a pair of core registers, or a double. Low reg in low byte,
+ * high reg in next byte.
+ */
+int ArmCodegen::AllocTypedTempPair(CompilationUnit* cu, bool fp_hint, int reg_class)
+{
+ int high_reg;
+ int low_reg;
+ int res = 0;
+
+ if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg)) {
+ low_reg = AllocTempDouble(cu);
+ high_reg = low_reg + 1;
+ } else {
+ low_reg = AllocTemp(cu);
+ high_reg = AllocTemp(cu);
+ }
+ res = (low_reg & 0xff) | ((high_reg & 0xff) << 8);
+ return res;
+}
+
+int ArmCodegen::AllocTypedTemp(CompilationUnit* cu, bool fp_hint, int reg_class)
+{
+ if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg))
+ return AllocTempFloat(cu);
+ return AllocTemp(cu);
+}
+
+void ArmCodegen::CompilerInitializeRegAlloc(CompilationUnit* cu)
+{
+ int num_regs = sizeof(core_regs)/sizeof(*core_regs);
+ int num_reserved = sizeof(ReservedRegs)/sizeof(*ReservedRegs);
+ int num_temps = sizeof(core_temps)/sizeof(*core_temps);
+ int num_fp_regs = sizeof(FpRegs)/sizeof(*FpRegs);
+ int num_fp_temps = sizeof(fp_temps)/sizeof(*fp_temps);
+ RegisterPool *pool =
+ static_cast<RegisterPool*>(NewMem(cu, sizeof(*pool), true, kAllocRegAlloc));
+ cu->reg_pool = pool;
+ pool->num_core_regs = num_regs;
+ pool->core_regs = reinterpret_cast<RegisterInfo*>
+ (NewMem(cu, num_regs * sizeof(*cu->reg_pool->core_regs), true, kAllocRegAlloc));
+ pool->num_fp_regs = num_fp_regs;
+ pool->FPRegs = static_cast<RegisterInfo*>
+ (NewMem(cu, num_fp_regs * sizeof(*cu->reg_pool->FPRegs), true, kAllocRegAlloc));
+ CompilerInitPool(pool->core_regs, core_regs, pool->num_core_regs);
+ CompilerInitPool(pool->FPRegs, FpRegs, pool->num_fp_regs);
+ // Keep special registers from being allocated
+ for (int i = 0; i < num_reserved; i++) {
+ if (NO_SUSPEND && (ReservedRegs[i] == rARM_SUSPEND)) {
+ //To measure cost of suspend check
+ continue;
+ }
+ MarkInUse(cu, ReservedRegs[i]);
+ }
+ // Mark temp regs - all others not in use can be used for promotion
+ for (int i = 0; i < num_temps; i++) {
+ MarkTemp(cu, core_temps[i]);
+ }
+ for (int i = 0; i < num_fp_temps; i++) {
+ MarkTemp(cu, fp_temps[i]);
+ }
+
+ // Start allocation at r2 in an attempt to avoid clobbering return values
+ pool->next_core_reg = r2;
+}
+
+void ArmCodegen::FreeRegLocTemps(CompilationUnit* cu, RegLocation rl_keep,
+ RegLocation rl_free)
+{
+ if ((rl_free.low_reg != rl_keep.low_reg) && (rl_free.low_reg != rl_keep.high_reg) &&
+ (rl_free.high_reg != rl_keep.low_reg) && (rl_free.high_reg != rl_keep.high_reg)) {
+ // No overlap, free both
+ FreeTemp(cu, rl_free.low_reg);
+ FreeTemp(cu, rl_free.high_reg);
+ }
+}
+/*
+ * TUNING: is leaf? Can't just use "has_invoke" to determine as some
+ * instructions might call out to C/assembly helper functions. Until
+ * machinery is in place, always spill lr.
+ */
+
+void ArmCodegen::AdjustSpillMask(CompilationUnit* cu)
+{
+ cu->core_spill_mask |= (1 << rARM_LR);
+ cu->num_core_spills++;
+}
+
+/*
+ * Mark a callee-save fp register as promoted. Note that
+ * vpush/vpop uses contiguous register lists so we must
+ * include any holes in the mask. Associate holes with
+ * Dalvik register INVALID_VREG (0xFFFFU).
+ */
+void ArmCodegen::MarkPreservedSingle(CompilationUnit* cu, int v_reg, int reg)
+{
+ DCHECK_GE(reg, ARM_FP_REG_MASK + ARM_FP_CALLEE_SAVE_BASE);
+ reg = (reg & ARM_FP_REG_MASK) - ARM_FP_CALLEE_SAVE_BASE;
+ // Ensure fp_vmap_table is large enough
+ int table_size = cu->fp_vmap_table.size();
+ for (int i = table_size; i < (reg + 1); i++) {
+ cu->fp_vmap_table.push_back(INVALID_VREG);
+ }
+ // Add the current mapping
+ cu->fp_vmap_table[reg] = v_reg;
+ // Size of fp_vmap_table is high-water mark, use to set mask
+ cu->num_fp_spills = cu->fp_vmap_table.size();
+ cu->fp_spill_mask = ((1 << cu->num_fp_spills) - 1) << ARM_FP_CALLEE_SAVE_BASE;
+}
+
+void ArmCodegen::FlushRegWide(CompilationUnit* cu, int reg1, int reg2)
+{
+ RegisterInfo* info1 = GetRegInfo(cu, reg1);
+ RegisterInfo* info2 = GetRegInfo(cu, reg2);
+ DCHECK(info1 && info2 && info1->pair && info2->pair &&
+ (info1->partner == info2->reg) &&
+ (info2->partner == info1->reg));
+ if ((info1->live && info1->dirty) || (info2->live && info2->dirty)) {
+ if (!(info1->is_temp && info2->is_temp)) {
+ /* Should not happen. If it does, there's a problem in eval_loc */
+ LOG(FATAL) << "Long half-temp, half-promoted";
+ }
+
+ info1->dirty = false;
+ info2->dirty = false;
+ if (SRegToVReg(cu, info2->s_reg) <
+ SRegToVReg(cu, info1->s_reg))
+ info1 = info2;
+ int v_reg = SRegToVReg(cu, info1->s_reg);
+ StoreBaseDispWide(cu, rARM_SP, VRegOffset(cu, v_reg), info1->reg, info1->partner);
+ }
+}
+
+void ArmCodegen::FlushReg(CompilationUnit* cu, int reg)
+{
+ RegisterInfo* info = GetRegInfo(cu, reg);
+ if (info->live && info->dirty) {
+ info->dirty = false;
+ int v_reg = SRegToVReg(cu, info->s_reg);
+ StoreBaseDisp(cu, rARM_SP, VRegOffset(cu, v_reg), reg, kWord);
+ }
+}
+
+/* Give access to the target-dependent FP register encoding to common code */
+bool ArmCodegen::IsFpReg(int reg) {
+ return ARM_FPREG(reg);
+}
+
+/* Clobber all regs that might be used by an external C call */
+void ArmCodegen::ClobberCalleeSave(CompilationUnit *cu)
+{
+ Clobber(cu, r0);
+ Clobber(cu, r1);
+ Clobber(cu, r2);
+ Clobber(cu, r3);
+ Clobber(cu, r12);
+ Clobber(cu, r14lr);
+ Clobber(cu, fr0);
+ Clobber(cu, fr1);
+ Clobber(cu, fr2);
+ Clobber(cu, fr3);
+ Clobber(cu, fr4);
+ Clobber(cu, fr5);
+ Clobber(cu, fr6);
+ Clobber(cu, fr7);
+ Clobber(cu, fr8);
+ Clobber(cu, fr9);
+ Clobber(cu, fr10);
+ Clobber(cu, fr11);
+ Clobber(cu, fr12);
+ Clobber(cu, fr13);
+ Clobber(cu, fr14);
+ Clobber(cu, fr15);
+}
+
+RegLocation ArmCodegen::GetReturnWideAlt(CompilationUnit* cu)
+{
+ RegLocation res = LocCReturnWide();
+ res.low_reg = r2;
+ res.high_reg = r3;
+ Clobber(cu, r2);
+ Clobber(cu, r3);
+ MarkInUse(cu, r2);
+ MarkInUse(cu, r3);
+ MarkPair(cu, res.low_reg, res.high_reg);
+ return res;
+}
+
+RegLocation ArmCodegen::GetReturnAlt(CompilationUnit* cu)
+{
+ RegLocation res = LocCReturn();
+ res.low_reg = r1;
+ Clobber(cu, r1);
+ MarkInUse(cu, r1);
+ return res;
+}
+
+RegisterInfo* ArmCodegen::GetRegInfo(CompilationUnit* cu, int reg)
+{
+ return ARM_FPREG(reg) ? &cu->reg_pool->FPRegs[reg & ARM_FP_REG_MASK]
+ : &cu->reg_pool->core_regs[reg];
+}
+
+/* To be used when explicitly managing register use */
+void ArmCodegen::LockCallTemps(CompilationUnit* cu)
+{
+ LockTemp(cu, r0);
+ LockTemp(cu, r1);
+ LockTemp(cu, r2);
+ LockTemp(cu, r3);
+}
+
+/* To be used when explicitly managing register use */
+void ArmCodegen::FreeCallTemps(CompilationUnit* cu)
+{
+ FreeTemp(cu, r0);
+ FreeTemp(cu, r1);
+ FreeTemp(cu, r2);
+ FreeTemp(cu, r3);
+}
+
+int ArmCodegen::LoadHelper(CompilationUnit* cu, int offset)
+{
+ LoadWordDisp(cu, rARM_SELF, offset, rARM_LR);
+ return rARM_LR;
+}
+
+uint64_t ArmCodegen::GetTargetInstFlags(int opcode)
+{
+ return ArmCodegen::EncodingMap[opcode].flags;
+}
+
+const char* ArmCodegen::GetTargetInstName(int opcode)
+{
+ return ArmCodegen::EncodingMap[opcode].name;
+}
+
+const char* ArmCodegen::GetTargetInstFmt(int opcode)
+{
+ return ArmCodegen::EncodingMap[opcode].fmt;
+}
+
+} // namespace art
diff --git a/src/compiler/dex/quick/arm/utility_arm.cc b/src/compiler/dex/quick/arm/utility_arm.cc
new file mode 100644
index 0000000..ba543ac
--- /dev/null
+++ b/src/compiler/dex/quick/arm/utility_arm.cc
@@ -0,0 +1,1091 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "arm_lir.h"
+#include "codegen_arm.h"
+#include "compiler/dex/quick/codegen_util.h"
+#include "compiler/dex/quick/ralloc_util.h"
+
+namespace art {
+
+/* This file contains codegen for the Thumb ISA. */
+
+static int EncodeImmSingle(int value)
+{
+ int res;
+ int bit_a = (value & 0x80000000) >> 31;
+ int not_bit_b = (value & 0x40000000) >> 30;
+ int bit_b = (value & 0x20000000) >> 29;
+ int b_smear = (value & 0x3e000000) >> 25;
+ int slice = (value & 0x01f80000) >> 19;
+ int zeroes = (value & 0x0007ffff);
+ if (zeroes != 0)
+ return -1;
+ if (bit_b) {
+ if ((not_bit_b != 0) || (b_smear != 0x1f))
+ return -1;
+ } else {
+ if ((not_bit_b != 1) || (b_smear != 0x0))
+ return -1;
+ }
+ res = (bit_a << 7) | (bit_b << 6) | slice;
+ return res;
+}
+
+/*
+ * Determine whether value can be encoded as a Thumb2 floating point
+ * immediate. If not, return -1. If so return encoded 8-bit value.
+ */
+static int EncodeImmDouble(int64_t value)
+{
+ int res;
+ int bit_a = (value & 0x8000000000000000ll) >> 63;
+ int not_bit_b = (value & 0x4000000000000000ll) >> 62;
+ int bit_b = (value & 0x2000000000000000ll) >> 61;
+ int b_smear = (value & 0x3fc0000000000000ll) >> 54;
+ int slice = (value & 0x003f000000000000ll) >> 48;
+ uint64_t zeroes = (value & 0x0000ffffffffffffll);
+ if (zeroes != 0)
+ return -1;
+ if (bit_b) {
+ if ((not_bit_b != 0) || (b_smear != 0xff))
+ return -1;
+ } else {
+ if ((not_bit_b != 1) || (b_smear != 0x0))
+ return -1;
+ }
+ res = (bit_a << 7) | (bit_b << 6) | slice;
+ return res;
+}
+
+static LIR* LoadFPConstantValue(CompilationUnit* cu, int r_dest, int value)
+{
+ DCHECK(ARM_SINGLEREG(r_dest));
+ if (value == 0) {
+ // TODO: we need better info about the target CPU. a vector exclusive or
+ // would probably be better here if we could rely on its existance.
+ // Load an immediate +2.0 (which encodes to 0)
+ NewLIR2(cu, kThumb2Vmovs_IMM8, r_dest, 0);
+ // +0.0 = +2.0 - +2.0
+ return NewLIR3(cu, kThumb2Vsubs, r_dest, r_dest, r_dest);
+ } else {
+ int encoded_imm = EncodeImmSingle(value);
+ if (encoded_imm >= 0) {
+ return NewLIR2(cu, kThumb2Vmovs_IMM8, r_dest, encoded_imm);
+ }
+ }
+ LIR* data_target = ScanLiteralPool(cu->literal_list, value, 0);
+ if (data_target == NULL) {
+ data_target = AddWordData(cu, &cu->literal_list, value);
+ }
+ LIR* load_pc_rel = RawLIR(cu, cu->current_dalvik_offset, kThumb2Vldrs,
+ r_dest, r15pc, 0, 0, 0, data_target);
+ SetMemRefType(cu, load_pc_rel, true, kLiteral);
+ load_pc_rel->alias_info = reinterpret_cast<uintptr_t>(data_target);
+ AppendLIR(cu, load_pc_rel);
+ return load_pc_rel;
+}
+
+static int LeadingZeros(uint32_t val)
+{
+ uint32_t alt;
+ int n;
+ int count;
+
+ count = 16;
+ n = 32;
+ do {
+ alt = val >> count;
+ if (alt != 0) {
+ n = n - count;
+ val = alt;
+ }
+ count >>= 1;
+ } while (count);
+ return n - val;
+}
+
+/*
+ * Determine whether value can be encoded as a Thumb2 modified
+ * immediate. If not, return -1. If so, return i:imm3:a:bcdefgh form.
+ */
+int ArmCodegen::ModifiedImmediate(uint32_t value)
+{
+ int z_leading;
+ int z_trailing;
+ uint32_t b0 = value & 0xff;
+
+ /* Note: case of value==0 must use 0:000:0:0000000 encoding */
+ if (value <= 0xFF)
+ return b0; // 0:000:a:bcdefgh
+ if (value == ((b0 << 16) | b0))
+ return (0x1 << 8) | b0; /* 0:001:a:bcdefgh */
+ if (value == ((b0 << 24) | (b0 << 16) | (b0 << 8) | b0))
+ return (0x3 << 8) | b0; /* 0:011:a:bcdefgh */
+ b0 = (value >> 8) & 0xff;
+ if (value == ((b0 << 24) | (b0 << 8)))
+ return (0x2 << 8) | b0; /* 0:010:a:bcdefgh */
+ /* Can we do it with rotation? */
+ z_leading = LeadingZeros(value);
+ z_trailing = 32 - LeadingZeros(~value & (value - 1));
+ /* A run of eight or fewer active bits? */
+ if ((z_leading + z_trailing) < 24)
+ return -1; /* No - bail */
+ /* left-justify the constant, discarding msb (known to be 1) */
+ value <<= z_leading + 1;
+ /* Create bcdefgh */
+ value >>= 25;
+ /* Put it all together */
+ return value | ((0x8 + z_leading) << 7); /* [01000..11111]:bcdefgh */
+}
+
+bool ArmCodegen::InexpensiveConstantInt(int32_t value)
+{
+ return (ModifiedImmediate(value) >= 0) || (ModifiedImmediate(~value) >= 0);
+}
+
+bool ArmCodegen::InexpensiveConstantFloat(int32_t value)
+{
+ return EncodeImmSingle(value) >= 0;
+}
+
+bool ArmCodegen::InexpensiveConstantLong(int64_t value)
+{
+ return InexpensiveConstantInt(High32Bits(value)) && InexpensiveConstantInt(Low32Bits(value));
+}
+
+bool ArmCodegen::InexpensiveConstantDouble(int64_t value)
+{
+ return EncodeImmDouble(value) >= 0;
+}
+
+/*
+ * Load a immediate using a shortcut if possible; otherwise
+ * grab from the per-translation literal pool.
+ *
+ * No additional register clobbering operation performed. Use this version when
+ * 1) r_dest is freshly returned from AllocTemp or
+ * 2) The codegen is under fixed register usage
+ */
+LIR* ArmCodegen::LoadConstantNoClobber(CompilationUnit* cu, int r_dest, int value)
+{
+ LIR* res;
+ int mod_imm;
+
+ if (ARM_FPREG(r_dest)) {
+ return LoadFPConstantValue(cu, r_dest, value);
+ }
+
+ /* See if the value can be constructed cheaply */
+ if (ARM_LOWREG(r_dest) && (value >= 0) && (value <= 255)) {
+ return NewLIR2(cu, kThumbMovImm, r_dest, value);
+ }
+ /* Check Modified immediate special cases */
+ mod_imm = ModifiedImmediate(value);
+ if (mod_imm >= 0) {
+ res = NewLIR2(cu, kThumb2MovImmShift, r_dest, mod_imm);
+ return res;
+ }
+ mod_imm = ModifiedImmediate(~value);
+ if (mod_imm >= 0) {
+ res = NewLIR2(cu, kThumb2MvnImm12, r_dest, mod_imm);
+ return res;
+ }
+ /* 16-bit immediate? */
+ if ((value & 0xffff) == value) {
+ res = NewLIR2(cu, kThumb2MovImm16, r_dest, value);
+ return res;
+ }
+ /* Do a low/high pair */
+ res = NewLIR2(cu, kThumb2MovImm16, r_dest, Low16Bits(value));
+ NewLIR2(cu, kThumb2MovImm16H, r_dest, High16Bits(value));
+ return res;
+}
+
+LIR* ArmCodegen::OpUnconditionalBranch(CompilationUnit* cu, LIR* target)
+{
+ LIR* res = NewLIR1(cu, kThumbBUncond, 0 /* offset to be patched during assembly*/);
+ res->target = target;
+ return res;
+}
+
+LIR* ArmCodegen::OpCondBranch(CompilationUnit* cu, ConditionCode cc, LIR* target)
+{
+ LIR* branch = NewLIR2(cu, kThumb2BCond, 0 /* offset to be patched */,
+ ArmConditionEncoding(cc));
+ branch->target = target;
+ return branch;
+}
+
+LIR* ArmCodegen::OpReg(CompilationUnit* cu, OpKind op, int r_dest_src)
+{
+ ArmOpcode opcode = kThumbBkpt;
+ switch (op) {
+ case kOpBlx:
+ opcode = kThumbBlxR;
+ break;
+ default:
+ LOG(FATAL) << "Bad opcode " << op;
+ }
+ return NewLIR1(cu, opcode, r_dest_src);
+}
+
+LIR* ArmCodegen::OpRegRegShift(CompilationUnit* cu, OpKind op, int r_dest_src1, int r_src2,
+ int shift)
+{
+ bool thumb_form = ((shift == 0) && ARM_LOWREG(r_dest_src1) && ARM_LOWREG(r_src2));
+ ArmOpcode opcode = kThumbBkpt;
+ switch (op) {
+ case kOpAdc:
+ opcode = (thumb_form) ? kThumbAdcRR : kThumb2AdcRRR;
+ break;
+ case kOpAnd:
+ opcode = (thumb_form) ? kThumbAndRR : kThumb2AndRRR;
+ break;
+ case kOpBic:
+ opcode = (thumb_form) ? kThumbBicRR : kThumb2BicRRR;
+ break;
+ case kOpCmn:
+ DCHECK_EQ(shift, 0);
+ opcode = (thumb_form) ? kThumbCmnRR : kThumb2CmnRR;
+ break;
+ case kOpCmp:
+ if (thumb_form)
+ opcode = kThumbCmpRR;
+ else if ((shift == 0) && !ARM_LOWREG(r_dest_src1) && !ARM_LOWREG(r_src2))
+ opcode = kThumbCmpHH;
+ else if ((shift == 0) && ARM_LOWREG(r_dest_src1))
+ opcode = kThumbCmpLH;
+ else if (shift == 0)
+ opcode = kThumbCmpHL;
+ else
+ opcode = kThumb2CmpRR;
+ break;
+ case kOpXor:
+ opcode = (thumb_form) ? kThumbEorRR : kThumb2EorRRR;
+ break;
+ case kOpMov:
+ DCHECK_EQ(shift, 0);
+ if (ARM_LOWREG(r_dest_src1) && ARM_LOWREG(r_src2))
+ opcode = kThumbMovRR;
+ else if (!ARM_LOWREG(r_dest_src1) && !ARM_LOWREG(r_src2))
+ opcode = kThumbMovRR_H2H;
+ else if (ARM_LOWREG(r_dest_src1))
+ opcode = kThumbMovRR_H2L;
+ else
+ opcode = kThumbMovRR_L2H;
+ break;
+ case kOpMul:
+ DCHECK_EQ(shift, 0);
+ opcode = (thumb_form) ? kThumbMul : kThumb2MulRRR;
+ break;
+ case kOpMvn:
+ opcode = (thumb_form) ? kThumbMvn : kThumb2MnvRR;
+ break;
+ case kOpNeg:
+ DCHECK_EQ(shift, 0);
+ opcode = (thumb_form) ? kThumbNeg : kThumb2NegRR;
+ break;
+ case kOpOr:
+ opcode = (thumb_form) ? kThumbOrr : kThumb2OrrRRR;
+ break;
+ case kOpSbc:
+ opcode = (thumb_form) ? kThumbSbc : kThumb2SbcRRR;
+ break;
+ case kOpTst:
+ opcode = (thumb_form) ? kThumbTst : kThumb2TstRR;
+ break;
+ case kOpLsl:
+ DCHECK_EQ(shift, 0);
+ opcode = (thumb_form) ? kThumbLslRR : kThumb2LslRRR;
+ break;
+ case kOpLsr:
+ DCHECK_EQ(shift, 0);
+ opcode = (thumb_form) ? kThumbLsrRR : kThumb2LsrRRR;
+ break;
+ case kOpAsr:
+ DCHECK_EQ(shift, 0);
+ opcode = (thumb_form) ? kThumbAsrRR : kThumb2AsrRRR;
+ break;
+ case kOpRor:
+ DCHECK_EQ(shift, 0);
+ opcode = (thumb_form) ? kThumbRorRR : kThumb2RorRRR;
+ break;
+ case kOpAdd:
+ opcode = (thumb_form) ? kThumbAddRRR : kThumb2AddRRR;
+ break;
+ case kOpSub:
+ opcode = (thumb_form) ? kThumbSubRRR : kThumb2SubRRR;
+ break;
+ case kOp2Byte:
+ DCHECK_EQ(shift, 0);
+ return NewLIR4(cu, kThumb2Sbfx, r_dest_src1, r_src2, 0, 8);
+ case kOp2Short:
+ DCHECK_EQ(shift, 0);
+ return NewLIR4(cu, kThumb2Sbfx, r_dest_src1, r_src2, 0, 16);
+ case kOp2Char:
+ DCHECK_EQ(shift, 0);
+ return NewLIR4(cu, kThumb2Ubfx, r_dest_src1, r_src2, 0, 16);
+ default:
+ LOG(FATAL) << "Bad opcode: " << op;
+ break;
+ }
+ DCHECK_GE(static_cast<int>(opcode), 0);
+ if (EncodingMap[opcode].flags & IS_BINARY_OP)
+ return NewLIR2(cu, opcode, r_dest_src1, r_src2);
+ else if (EncodingMap[opcode].flags & IS_TERTIARY_OP) {
+ if (EncodingMap[opcode].field_loc[2].kind == kFmtShift)
+ return NewLIR3(cu, opcode, r_dest_src1, r_src2, shift);
+ else
+ return NewLIR3(cu, opcode, r_dest_src1, r_dest_src1, r_src2);
+ } else if (EncodingMap[opcode].flags & IS_QUAD_OP)
+ return NewLIR4(cu, opcode, r_dest_src1, r_dest_src1, r_src2, shift);
+ else {
+ LOG(FATAL) << "Unexpected encoding operand count";
+ return NULL;
+ }
+}
+
+LIR* ArmCodegen::OpRegReg(CompilationUnit* cu, OpKind op, int r_dest_src1, int r_src2)
+{
+ return OpRegRegShift(cu, op, r_dest_src1, r_src2, 0);
+}
+
+LIR* ArmCodegen::OpRegRegRegShift(CompilationUnit* cu, OpKind op, int r_dest, int r_src1,
+ int r_src2, int shift)
+{
+ ArmOpcode opcode = kThumbBkpt;
+ bool thumb_form = (shift == 0) && ARM_LOWREG(r_dest) && ARM_LOWREG(r_src1) &&
+ ARM_LOWREG(r_src2);
+ switch (op) {
+ case kOpAdd:
+ opcode = (thumb_form) ? kThumbAddRRR : kThumb2AddRRR;
+ break;
+ case kOpSub:
+ opcode = (thumb_form) ? kThumbSubRRR : kThumb2SubRRR;
+ break;
+ case kOpRsub:
+ opcode = kThumb2RsubRRR;
+ break;
+ case kOpAdc:
+ opcode = kThumb2AdcRRR;
+ break;
+ case kOpAnd:
+ opcode = kThumb2AndRRR;
+ break;
+ case kOpBic:
+ opcode = kThumb2BicRRR;
+ break;
+ case kOpXor:
+ opcode = kThumb2EorRRR;
+ break;
+ case kOpMul:
+ DCHECK_EQ(shift, 0);
+ opcode = kThumb2MulRRR;
+ break;
+ case kOpOr:
+ opcode = kThumb2OrrRRR;
+ break;
+ case kOpSbc:
+ opcode = kThumb2SbcRRR;
+ break;
+ case kOpLsl:
+ DCHECK_EQ(shift, 0);
+ opcode = kThumb2LslRRR;
+ break;
+ case kOpLsr:
+ DCHECK_EQ(shift, 0);
+ opcode = kThumb2LsrRRR;
+ break;
+ case kOpAsr:
+ DCHECK_EQ(shift, 0);
+ opcode = kThumb2AsrRRR;
+ break;
+ case kOpRor:
+ DCHECK_EQ(shift, 0);
+ opcode = kThumb2RorRRR;
+ break;
+ default:
+ LOG(FATAL) << "Bad opcode: " << op;
+ break;
+ }
+ DCHECK_GE(static_cast<int>(opcode), 0);
+ if (EncodingMap[opcode].flags & IS_QUAD_OP)
+ return NewLIR4(cu, opcode, r_dest, r_src1, r_src2, shift);
+ else {
+ DCHECK(EncodingMap[opcode].flags & IS_TERTIARY_OP);
+ return NewLIR3(cu, opcode, r_dest, r_src1, r_src2);
+ }
+}
+
+LIR* ArmCodegen::OpRegRegReg(CompilationUnit* cu, OpKind op, int r_dest, int r_src1, int r_src2)
+{
+ return OpRegRegRegShift(cu, op, r_dest, r_src1, r_src2, 0);
+}
+
+LIR* ArmCodegen::OpRegRegImm(CompilationUnit* cu, OpKind op, int r_dest, int r_src1, int value)
+{
+ LIR* res;
+ bool neg = (value < 0);
+ int abs_value = (neg) ? -value : value;
+ ArmOpcode opcode = kThumbBkpt;
+ ArmOpcode alt_opcode = kThumbBkpt;
+ bool all_low_regs = (ARM_LOWREG(r_dest) && ARM_LOWREG(r_src1));
+ int mod_imm = ModifiedImmediate(value);
+ int mod_imm_neg = ModifiedImmediate(-value);
+
+ switch (op) {
+ case kOpLsl:
+ if (all_low_regs)
+ return NewLIR3(cu, kThumbLslRRI5, r_dest, r_src1, value);
+ else
+ return NewLIR3(cu, kThumb2LslRRI5, r_dest, r_src1, value);
+ case kOpLsr:
+ if (all_low_regs)
+ return NewLIR3(cu, kThumbLsrRRI5, r_dest, r_src1, value);
+ else
+ return NewLIR3(cu, kThumb2LsrRRI5, r_dest, r_src1, value);
+ case kOpAsr:
+ if (all_low_regs)
+ return NewLIR3(cu, kThumbAsrRRI5, r_dest, r_src1, value);
+ else
+ return NewLIR3(cu, kThumb2AsrRRI5, r_dest, r_src1, value);
+ case kOpRor:
+ return NewLIR3(cu, kThumb2RorRRI5, r_dest, r_src1, value);
+ case kOpAdd:
+ if (ARM_LOWREG(r_dest) && (r_src1 == r13sp) &&
+ (value <= 1020) && ((value & 0x3)==0)) {
+ return NewLIR3(cu, kThumbAddSpRel, r_dest, r_src1, value >> 2);
+ } else if (ARM_LOWREG(r_dest) && (r_src1 == r15pc) &&
+ (value <= 1020) && ((value & 0x3)==0)) {
+ return NewLIR3(cu, kThumbAddPcRel, r_dest, r_src1, value >> 2);
+ }
+ // Note: intentional fallthrough
+ case kOpSub:
+ if (all_low_regs && ((abs_value & 0x7) == abs_value)) {
+ if (op == kOpAdd)
+ opcode = (neg) ? kThumbSubRRI3 : kThumbAddRRI3;
+ else
+ opcode = (neg) ? kThumbAddRRI3 : kThumbSubRRI3;
+ return NewLIR3(cu, opcode, r_dest, r_src1, abs_value);
+ } else if ((abs_value & 0xff) == abs_value) {
+ if (op == kOpAdd)
+ opcode = (neg) ? kThumb2SubRRI12 : kThumb2AddRRI12;
+ else
+ opcode = (neg) ? kThumb2AddRRI12 : kThumb2SubRRI12;
+ return NewLIR3(cu, opcode, r_dest, r_src1, abs_value);
+ }
+ if (mod_imm_neg >= 0) {
+ op = (op == kOpAdd) ? kOpSub : kOpAdd;
+ mod_imm = mod_imm_neg;
+ }
+ if (op == kOpSub) {
+ opcode = kThumb2SubRRI8;
+ alt_opcode = kThumb2SubRRR;
+ } else {
+ opcode = kThumb2AddRRI8;
+ alt_opcode = kThumb2AddRRR;
+ }
+ break;
+ case kOpRsub:
+ opcode = kThumb2RsubRRI8;
+ alt_opcode = kThumb2RsubRRR;
+ break;
+ case kOpAdc:
+ opcode = kThumb2AdcRRI8;
+ alt_opcode = kThumb2AdcRRR;
+ break;
+ case kOpSbc:
+ opcode = kThumb2SbcRRI8;
+ alt_opcode = kThumb2SbcRRR;
+ break;
+ case kOpOr:
+ opcode = kThumb2OrrRRI8;
+ alt_opcode = kThumb2OrrRRR;
+ break;
+ case kOpAnd:
+ opcode = kThumb2AndRRI8;
+ alt_opcode = kThumb2AndRRR;
+ break;
+ case kOpXor:
+ opcode = kThumb2EorRRI8;
+ alt_opcode = kThumb2EorRRR;
+ break;
+ case kOpMul:
+ //TUNING: power of 2, shift & add
+ mod_imm = -1;
+ alt_opcode = kThumb2MulRRR;
+ break;
+ case kOpCmp: {
+ int mod_imm = ModifiedImmediate(value);
+ LIR* res;
+ if (mod_imm >= 0) {
+ res = NewLIR2(cu, kThumb2CmpRI12, r_src1, mod_imm);
+ } else {
+ int r_tmp = AllocTemp(cu);
+ res = LoadConstant(cu, r_tmp, value);
+ OpRegReg(cu, kOpCmp, r_src1, r_tmp);
+ FreeTemp(cu, r_tmp);
+ }
+ return res;
+ }
+ default:
+ LOG(FATAL) << "Bad opcode: " << op;
+ }
+
+ if (mod_imm >= 0) {
+ return NewLIR3(cu, opcode, r_dest, r_src1, mod_imm);
+ } else {
+ int r_scratch = AllocTemp(cu);
+ LoadConstant(cu, r_scratch, value);
+ if (EncodingMap[alt_opcode].flags & IS_QUAD_OP)
+ res = NewLIR4(cu, alt_opcode, r_dest, r_src1, r_scratch, 0);
+ else
+ res = NewLIR3(cu, alt_opcode, r_dest, r_src1, r_scratch);
+ FreeTemp(cu, r_scratch);
+ return res;
+ }
+}
+
+/* Handle Thumb-only variants here - otherwise punt to OpRegRegImm */
+LIR* ArmCodegen::OpRegImm(CompilationUnit* cu, OpKind op, int r_dest_src1, int value)
+{
+ bool neg = (value < 0);
+ int abs_value = (neg) ? -value : value;
+ bool short_form = (((abs_value & 0xff) == abs_value) && ARM_LOWREG(r_dest_src1));
+ ArmOpcode opcode = kThumbBkpt;
+ switch (op) {
+ case kOpAdd:
+ if ( !neg && (r_dest_src1 == r13sp) && (value <= 508)) { /* sp */
+ DCHECK_EQ((value & 0x3), 0);
+ return NewLIR1(cu, kThumbAddSpI7, value >> 2);
+ } else if (short_form) {
+ opcode = (neg) ? kThumbSubRI8 : kThumbAddRI8;
+ }
+ break;
+ case kOpSub:
+ if (!neg && (r_dest_src1 == r13sp) && (value <= 508)) { /* sp */
+ DCHECK_EQ((value & 0x3), 0);
+ return NewLIR1(cu, kThumbSubSpI7, value >> 2);
+ } else if (short_form) {
+ opcode = (neg) ? kThumbAddRI8 : kThumbSubRI8;
+ }
+ break;
+ case kOpCmp:
+ if (ARM_LOWREG(r_dest_src1) && short_form)
+ opcode = (short_form) ? kThumbCmpRI8 : kThumbCmpRR;
+ else if (ARM_LOWREG(r_dest_src1))
+ opcode = kThumbCmpRR;
+ else {
+ short_form = false;
+ opcode = kThumbCmpHL;
+ }
+ break;
+ default:
+ /* Punt to OpRegRegImm - if bad case catch it there */
+ short_form = false;
+ break;
+ }
+ if (short_form)
+ return NewLIR2(cu, opcode, r_dest_src1, abs_value);
+ else {
+ return OpRegRegImm(cu, op, r_dest_src1, r_dest_src1, value);
+ }
+}
+
+LIR* ArmCodegen::LoadConstantWide(CompilationUnit* cu, int r_dest_lo, int r_dest_hi, int64_t value)
+{
+ LIR* res = NULL;
+ int32_t val_lo = Low32Bits(value);
+ int32_t val_hi = High32Bits(value);
+ int target_reg = S2d(r_dest_lo, r_dest_hi);
+ if (ARM_FPREG(r_dest_lo)) {
+ if ((val_lo == 0) && (val_hi == 0)) {
+ // TODO: we need better info about the target CPU. a vector exclusive or
+ // would probably be better here if we could rely on its existance.
+ // Load an immediate +2.0 (which encodes to 0)
+ NewLIR2(cu, kThumb2Vmovd_IMM8, target_reg, 0);
+ // +0.0 = +2.0 - +2.0
+ res = NewLIR3(cu, kThumb2Vsubd, target_reg, target_reg, target_reg);
+ } else {
+ int encoded_imm = EncodeImmDouble(value);
+ if (encoded_imm >= 0) {
+ res = NewLIR2(cu, kThumb2Vmovd_IMM8, target_reg, encoded_imm);
+ }
+ }
+ } else {
+ if ((InexpensiveConstantInt(val_lo) && (InexpensiveConstantInt(val_hi)))) {
+ res = LoadConstantNoClobber(cu, r_dest_lo, val_lo);
+ LoadConstantNoClobber(cu, r_dest_hi, val_hi);
+ }
+ }
+ if (res == NULL) {
+ // No short form - load from the literal pool.
+ LIR* data_target = ScanLiteralPoolWide(cu->literal_list, val_lo, val_hi);
+ if (data_target == NULL) {
+ data_target = AddWideData(cu, &cu->literal_list, val_lo, val_hi);
+ }
+ if (ARM_FPREG(r_dest_lo)) {
+ res = RawLIR(cu, cu->current_dalvik_offset, kThumb2Vldrd,
+ target_reg, r15pc, 0, 0, 0, data_target);
+ } else {
+ res = RawLIR(cu, cu->current_dalvik_offset, kThumb2LdrdPcRel8,
+ r_dest_lo, r_dest_hi, r15pc, 0, 0, data_target);
+ }
+ SetMemRefType(cu, res, true, kLiteral);
+ res->alias_info = reinterpret_cast<uintptr_t>(data_target);
+ AppendLIR(cu, res);
+ }
+ return res;
+}
+
+int ArmCodegen::EncodeShift(int code, int amount) {
+ return ((amount & 0x1f) << 2) | code;
+}
+
+LIR* ArmCodegen::LoadBaseIndexed(CompilationUnit* cu, int rBase, int r_index, int r_dest,
+ int scale, OpSize size)
+{
+ bool all_low_regs = ARM_LOWREG(rBase) && ARM_LOWREG(r_index) && ARM_LOWREG(r_dest);
+ LIR* load;
+ ArmOpcode opcode = kThumbBkpt;
+ bool thumb_form = (all_low_regs && (scale == 0));
+ int reg_ptr;
+
+ if (ARM_FPREG(r_dest)) {
+ if (ARM_SINGLEREG(r_dest)) {
+ DCHECK((size == kWord) || (size == kSingle));
+ opcode = kThumb2Vldrs;
+ size = kSingle;
+ } else {
+ DCHECK(ARM_DOUBLEREG(r_dest));
+ DCHECK((size == kLong) || (size == kDouble));
+ DCHECK_EQ((r_dest & 0x1), 0);
+ opcode = kThumb2Vldrd;
+ size = kDouble;
+ }
+ } else {
+ if (size == kSingle)
+ size = kWord;
+ }
+
+ switch (size) {
+ case kDouble: // fall-through
+ case kSingle:
+ reg_ptr = AllocTemp(cu);
+ if (scale) {
+ NewLIR4(cu, kThumb2AddRRR, reg_ptr, rBase, r_index,
+ EncodeShift(kArmLsl, scale));
+ } else {
+ OpRegRegReg(cu, kOpAdd, reg_ptr, rBase, r_index);
+ }
+ load = NewLIR3(cu, opcode, r_dest, reg_ptr, 0);
+ FreeTemp(cu, reg_ptr);
+ return load;
+ case kWord:
+ opcode = (thumb_form) ? kThumbLdrRRR : kThumb2LdrRRR;
+ break;
+ case kUnsignedHalf:
+ opcode = (thumb_form) ? kThumbLdrhRRR : kThumb2LdrhRRR;
+ break;
+ case kSignedHalf:
+ opcode = (thumb_form) ? kThumbLdrshRRR : kThumb2LdrshRRR;
+ break;
+ case kUnsignedByte:
+ opcode = (thumb_form) ? kThumbLdrbRRR : kThumb2LdrbRRR;
+ break;
+ case kSignedByte:
+ opcode = (thumb_form) ? kThumbLdrsbRRR : kThumb2LdrsbRRR;
+ break;
+ default:
+ LOG(FATAL) << "Bad size: " << size;
+ }
+ if (thumb_form)
+ load = NewLIR3(cu, opcode, r_dest, rBase, r_index);
+ else
+ load = NewLIR4(cu, opcode, r_dest, rBase, r_index, scale);
+
+ return load;
+}
+
+LIR* ArmCodegen::StoreBaseIndexed(CompilationUnit* cu, int rBase, int r_index, int r_src,
+ int scale, OpSize size)
+{
+ bool all_low_regs = ARM_LOWREG(rBase) && ARM_LOWREG(r_index) && ARM_LOWREG(r_src);
+ LIR* store = NULL;
+ ArmOpcode opcode = kThumbBkpt;
+ bool thumb_form = (all_low_regs && (scale == 0));
+ int reg_ptr;
+
+ if (ARM_FPREG(r_src)) {
+ if (ARM_SINGLEREG(r_src)) {
+ DCHECK((size == kWord) || (size == kSingle));
+ opcode = kThumb2Vstrs;
+ size = kSingle;
+ } else {
+ DCHECK(ARM_DOUBLEREG(r_src));
+ DCHECK((size == kLong) || (size == kDouble));
+ DCHECK_EQ((r_src & 0x1), 0);
+ opcode = kThumb2Vstrd;
+ size = kDouble;
+ }
+ } else {
+ if (size == kSingle)
+ size = kWord;
+ }
+
+ switch (size) {
+ case kDouble: // fall-through
+ case kSingle:
+ reg_ptr = AllocTemp(cu);
+ if (scale) {
+ NewLIR4(cu, kThumb2AddRRR, reg_ptr, rBase, r_index,
+ EncodeShift(kArmLsl, scale));
+ } else {
+ OpRegRegReg(cu, kOpAdd, reg_ptr, rBase, r_index);
+ }
+ store = NewLIR3(cu, opcode, r_src, reg_ptr, 0);
+ FreeTemp(cu, reg_ptr);
+ return store;
+ case kWord:
+ opcode = (thumb_form) ? kThumbStrRRR : kThumb2StrRRR;
+ break;
+ case kUnsignedHalf:
+ case kSignedHalf:
+ opcode = (thumb_form) ? kThumbStrhRRR : kThumb2StrhRRR;
+ break;
+ case kUnsignedByte:
+ case kSignedByte:
+ opcode = (thumb_form) ? kThumbStrbRRR : kThumb2StrbRRR;
+ break;
+ default:
+ LOG(FATAL) << "Bad size: " << size;
+ }
+ if (thumb_form)
+ store = NewLIR3(cu, opcode, r_src, rBase, r_index);
+ else
+ store = NewLIR4(cu, opcode, r_src, rBase, r_index, scale);
+
+ return store;
+}
+
+/*
+ * Load value from base + displacement. Optionally perform null check
+ * on base (which must have an associated s_reg and MIR). If not
+ * performing null check, incoming MIR can be null.
+ */
+LIR* ArmCodegen::LoadBaseDispBody(CompilationUnit* cu, int rBase, int displacement, int r_dest,
+ int r_dest_hi, OpSize size, int s_reg)
+{
+ Codegen* cg = cu->cg.get();
+ LIR* load = NULL;
+ ArmOpcode opcode = kThumbBkpt;
+ bool short_form = false;
+ bool thumb2Form = (displacement < 4092 && displacement >= 0);
+ bool all_low_regs = (ARM_LOWREG(rBase) && ARM_LOWREG(r_dest));
+ int encoded_disp = displacement;
+ bool is64bit = false;
+ bool already_generated = false;
+ switch (size) {
+ case kDouble:
+ case kLong:
+ is64bit = true;
+ if (ARM_FPREG(r_dest)) {
+ if (ARM_SINGLEREG(r_dest)) {
+ DCHECK(ARM_FPREG(r_dest_hi));
+ r_dest = cg->S2d(r_dest, r_dest_hi);
+ }
+ opcode = kThumb2Vldrd;
+ if (displacement <= 1020) {
+ short_form = true;
+ encoded_disp >>= 2;
+ }
+ break;
+ } else {
+ if (displacement <= 1020) {
+ load = NewLIR4(cu, kThumb2LdrdI8, r_dest, r_dest_hi, rBase, displacement >> 2);
+ } else {
+ load = LoadBaseDispBody(cu, rBase, displacement, r_dest,
+ -1, kWord, s_reg);
+ LoadBaseDispBody(cu, rBase, displacement + 4, r_dest_hi,
+ -1, kWord, INVALID_SREG);
+ }
+ already_generated = true;
+ }
+ case kSingle:
+ case kWord:
+ if (ARM_FPREG(r_dest)) {
+ opcode = kThumb2Vldrs;
+ if (displacement <= 1020) {
+ short_form = true;
+ encoded_disp >>= 2;
+ }
+ break;
+ }
+ if (ARM_LOWREG(r_dest) && (rBase == r15pc) &&
+ (displacement <= 1020) && (displacement >= 0)) {
+ short_form = true;
+ encoded_disp >>= 2;
+ opcode = kThumbLdrPcRel;
+ } else if (ARM_LOWREG(r_dest) && (rBase == r13sp) &&
+ (displacement <= 1020) && (displacement >= 0)) {
+ short_form = true;
+ encoded_disp >>= 2;
+ opcode = kThumbLdrSpRel;
+ } else if (all_low_regs && displacement < 128 && displacement >= 0) {
+ DCHECK_EQ((displacement & 0x3), 0);
+ short_form = true;
+ encoded_disp >>= 2;
+ opcode = kThumbLdrRRI5;
+ } else if (thumb2Form) {
+ short_form = true;
+ opcode = kThumb2LdrRRI12;
+ }
+ break;
+ case kUnsignedHalf:
+ if (all_low_regs && displacement < 64 && displacement >= 0) {
+ DCHECK_EQ((displacement & 0x1), 0);
+ short_form = true;
+ encoded_disp >>= 1;
+ opcode = kThumbLdrhRRI5;
+ } else if (displacement < 4092 && displacement >= 0) {
+ short_form = true;
+ opcode = kThumb2LdrhRRI12;
+ }
+ break;
+ case kSignedHalf:
+ if (thumb2Form) {
+ short_form = true;
+ opcode = kThumb2LdrshRRI12;
+ }
+ break;
+ case kUnsignedByte:
+ if (all_low_regs && displacement < 32 && displacement >= 0) {
+ short_form = true;
+ opcode = kThumbLdrbRRI5;
+ } else if (thumb2Form) {
+ short_form = true;
+ opcode = kThumb2LdrbRRI12;
+ }
+ break;
+ case kSignedByte:
+ if (thumb2Form) {
+ short_form = true;
+ opcode = kThumb2LdrsbRRI12;
+ }
+ break;
+ default:
+ LOG(FATAL) << "Bad size: " << size;
+ }
+
+ if (!already_generated) {
+ if (short_form) {
+ load = NewLIR3(cu, opcode, r_dest, rBase, encoded_disp);
+ } else {
+ int reg_offset = AllocTemp(cu);
+ cg->LoadConstant(cu, reg_offset, encoded_disp);
+ load = cg->LoadBaseIndexed(cu, rBase, reg_offset, r_dest, 0, size);
+ FreeTemp(cu, reg_offset);
+ }
+ }
+
+ // TODO: in future may need to differentiate Dalvik accesses w/ spills
+ if (rBase == rARM_SP) {
+ AnnotateDalvikRegAccess(cu, load, displacement >> 2, true /* is_load */, is64bit);
+ }
+ return load;
+}
+
+LIR* ArmCodegen::LoadBaseDisp(CompilationUnit* cu, int rBase, int displacement, int r_dest,
+ OpSize size, int s_reg)
+{
+ return LoadBaseDispBody(cu, rBase, displacement, r_dest, -1, size, s_reg);
+}
+
+LIR* ArmCodegen::LoadBaseDispWide(CompilationUnit* cu, int rBase, int displacement, int r_dest_lo,
+ int r_dest_hi, int s_reg)
+{
+ return LoadBaseDispBody(cu, rBase, displacement, r_dest_lo, r_dest_hi, kLong, s_reg);
+}
+
+
+LIR* ArmCodegen::StoreBaseDispBody(CompilationUnit* cu, int rBase, int displacement,
+ int r_src, int r_src_hi, OpSize size) {
+ Codegen* cg = cu->cg.get();
+ LIR* store = NULL;
+ ArmOpcode opcode = kThumbBkpt;
+ bool short_form = false;
+ bool thumb2Form = (displacement < 4092 && displacement >= 0);
+ bool all_low_regs = (ARM_LOWREG(rBase) && ARM_LOWREG(r_src));
+ int encoded_disp = displacement;
+ bool is64bit = false;
+ bool already_generated = false;
+ switch (size) {
+ case kLong:
+ case kDouble:
+ is64bit = true;
+ if (!ARM_FPREG(r_src)) {
+ if (displacement <= 1020) {
+ store = NewLIR4(cu, kThumb2StrdI8, r_src, r_src_hi, rBase, displacement >> 2);
+ } else {
+ store = StoreBaseDispBody(cu, rBase, displacement, r_src, -1, kWord);
+ StoreBaseDispBody(cu, rBase, displacement + 4, r_src_hi, -1, kWord);
+ }
+ already_generated = true;
+ } else {
+ if (ARM_SINGLEREG(r_src)) {
+ DCHECK(ARM_FPREG(r_src_hi));
+ r_src = cg->S2d(r_src, r_src_hi);
+ }
+ opcode = kThumb2Vstrd;
+ if (displacement <= 1020) {
+ short_form = true;
+ encoded_disp >>= 2;
+ }
+ }
+ break;
+ case kSingle:
+ case kWord:
+ if (ARM_FPREG(r_src)) {
+ DCHECK(ARM_SINGLEREG(r_src));
+ opcode = kThumb2Vstrs;
+ if (displacement <= 1020) {
+ short_form = true;
+ encoded_disp >>= 2;
+ }
+ break;
+ }
+ if (all_low_regs && displacement < 128 && displacement >= 0) {
+ DCHECK_EQ((displacement & 0x3), 0);
+ short_form = true;
+ encoded_disp >>= 2;
+ opcode = kThumbStrRRI5;
+ } else if (thumb2Form) {
+ short_form = true;
+ opcode = kThumb2StrRRI12;
+ }
+ break;
+ case kUnsignedHalf:
+ case kSignedHalf:
+ if (all_low_regs && displacement < 64 && displacement >= 0) {
+ DCHECK_EQ((displacement & 0x1), 0);
+ short_form = true;
+ encoded_disp >>= 1;
+ opcode = kThumbStrhRRI5;
+ } else if (thumb2Form) {
+ short_form = true;
+ opcode = kThumb2StrhRRI12;
+ }
+ break;
+ case kUnsignedByte:
+ case kSignedByte:
+ if (all_low_regs && displacement < 32 && displacement >= 0) {
+ short_form = true;
+ opcode = kThumbStrbRRI5;
+ } else if (thumb2Form) {
+ short_form = true;
+ opcode = kThumb2StrbRRI12;
+ }
+ break;
+ default:
+ LOG(FATAL) << "Bad size: " << size;
+ }
+ if (!already_generated) {
+ if (short_form) {
+ store = NewLIR3(cu, opcode, r_src, rBase, encoded_disp);
+ } else {
+ int r_scratch = AllocTemp(cu);
+ cg->LoadConstant(cu, r_scratch, encoded_disp);
+ store = cg->StoreBaseIndexed(cu, rBase, r_scratch, r_src, 0, size);
+ FreeTemp(cu, r_scratch);
+ }
+ }
+
+ // TODO: In future, may need to differentiate Dalvik & spill accesses
+ if (rBase == rARM_SP) {
+ AnnotateDalvikRegAccess(cu, store, displacement >> 2, false /* is_load */, is64bit);
+ }
+ return store;
+}
+
+LIR* ArmCodegen::StoreBaseDisp(CompilationUnit* cu, int rBase, int displacement, int r_src,
+ OpSize size)
+{
+ return StoreBaseDispBody(cu, rBase, displacement, r_src, -1, size);
+}
+
+LIR* ArmCodegen::StoreBaseDispWide(CompilationUnit* cu, int rBase, int displacement,
+ int r_src_lo, int r_src_hi)
+{
+ return StoreBaseDispBody(cu, rBase, displacement, r_src_lo, r_src_hi, kLong);
+}
+
+LIR* ArmCodegen::OpFpRegCopy(CompilationUnit* cu, int r_dest, int r_src)
+{
+ int opcode;
+ DCHECK_EQ(ARM_DOUBLEREG(r_dest), ARM_DOUBLEREG(r_src));
+ if (ARM_DOUBLEREG(r_dest)) {
+ opcode = kThumb2Vmovd;
+ } else {
+ if (ARM_SINGLEREG(r_dest)) {
+ opcode = ARM_SINGLEREG(r_src) ? kThumb2Vmovs : kThumb2Fmsr;
+ } else {
+ DCHECK(ARM_SINGLEREG(r_src));
+ opcode = kThumb2Fmrs;
+ }
+ }
+ LIR* res = RawLIR(cu, cu->current_dalvik_offset, opcode, r_dest, r_src);
+ if (!(cu->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) {
+ res->flags.is_nop = true;
+ }
+ return res;
+}
+
+LIR* ArmCodegen::OpThreadMem(CompilationUnit* cu, OpKind op, int thread_offset)
+{
+ LOG(FATAL) << "Unexpected use of OpThreadMem for Arm";
+ return NULL;
+}
+
+LIR* ArmCodegen::OpMem(CompilationUnit* cu, OpKind op, int rBase, int disp)
+{
+ LOG(FATAL) << "Unexpected use of OpMem for Arm";
+ return NULL;
+}
+
+LIR* ArmCodegen::StoreBaseIndexedDisp(CompilationUnit *cu, int rBase, int r_index, int scale,
+ int displacement, int r_src, int r_src_hi, OpSize size,
+ int s_reg)
+{
+ LOG(FATAL) << "Unexpected use of StoreBaseIndexedDisp for Arm";
+ return NULL;
+}
+
+LIR* ArmCodegen::OpRegMem(CompilationUnit *cu, OpKind op, int r_dest, int rBase, int offset)
+{
+ LOG(FATAL) << "Unexpected use of OpRegMem for Arm";
+ return NULL;
+}
+
+LIR* ArmCodegen::LoadBaseIndexedDisp(CompilationUnit *cu, int rBase, int r_index, int scale,
+ int displacement, int r_dest, int r_dest_hi, OpSize size,
+ int s_reg)
+{
+ LOG(FATAL) << "Unexpected use of LoadBaseIndexedDisp for Arm";
+ return NULL;
+}
+
+} // namespace art
diff --git a/src/compiler/dex/quick/codegen.h b/src/compiler/dex/quick/codegen.h
new file mode 100644
index 0000000..63c8460
--- /dev/null
+++ b/src/compiler/dex/quick/codegen.h
@@ -0,0 +1,406 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_COMPILER_DEX_QUICK_CODEGEN_H_
+#define ART_SRC_COMPILER_DEX_QUICK_CODEGEN_H_
+
+#include "compiler/dex/compiler_ir.h"
+
+namespace art {
+
+// Set to 1 to measure cost of suspend check.
+#define NO_SUSPEND 0
+
+#define IS_BINARY_OP (1ULL << kIsBinaryOp)
+#define IS_BRANCH (1ULL << kIsBranch)
+#define IS_IT (1ULL << kIsIT)
+#define IS_LOAD (1ULL << kMemLoad)
+#define IS_QUAD_OP (1ULL << kIsQuadOp)
+#define IS_QUIN_OP (1ULL << kIsQuinOp)
+#define IS_SEXTUPLE_OP (1ULL << kIsSextupleOp)
+#define IS_STORE (1ULL << kMemStore)
+#define IS_TERTIARY_OP (1ULL << kIsTertiaryOp)
+#define IS_UNARY_OP (1ULL << kIsUnaryOp)
+#define NEEDS_FIXUP (1ULL << kPCRelFixup)
+#define NO_OPERAND (1ULL << kNoOperand)
+#define REG_DEF0 (1ULL << kRegDef0)
+#define REG_DEF1 (1ULL << kRegDef1)
+#define REG_DEFA (1ULL << kRegDefA)
+#define REG_DEFD (1ULL << kRegDefD)
+#define REG_DEF_FPCS_LIST0 (1ULL << kRegDefFPCSList0)
+#define REG_DEF_FPCS_LIST2 (1ULL << kRegDefFPCSList2)
+#define REG_DEF_LIST0 (1ULL << kRegDefList0)
+#define REG_DEF_LIST1 (1ULL << kRegDefList1)
+#define REG_DEF_LR (1ULL << kRegDefLR)
+#define REG_DEF_SP (1ULL << kRegDefSP)
+#define REG_USE0 (1ULL << kRegUse0)
+#define REG_USE1 (1ULL << kRegUse1)
+#define REG_USE2 (1ULL << kRegUse2)
+#define REG_USE3 (1ULL << kRegUse3)
+#define REG_USE4 (1ULL << kRegUse4)
+#define REG_USEA (1ULL << kRegUseA)
+#define REG_USEC (1ULL << kRegUseC)
+#define REG_USED (1ULL << kRegUseD)
+#define REG_USE_FPCS_LIST0 (1ULL << kRegUseFPCSList0)
+#define REG_USE_FPCS_LIST2 (1ULL << kRegUseFPCSList2)
+#define REG_USE_LIST0 (1ULL << kRegUseList0)
+#define REG_USE_LIST1 (1ULL << kRegUseList1)
+#define REG_USE_LR (1ULL << kRegUseLR)
+#define REG_USE_PC (1ULL << kRegUsePC)
+#define REG_USE_SP (1ULL << kRegUseSP)
+#define SETS_CCODES (1ULL << kSetsCCodes)
+#define USES_CCODES (1ULL << kUsesCCodes)
+
+// Common combo register usage patterns.
+#define REG_DEF01 (REG_DEF0 | REG_DEF1)
+#define REG_DEF01_USE2 (REG_DEF0 | REG_DEF1 | REG_USE2)
+#define REG_DEF0_USE01 (REG_DEF0 | REG_USE01)
+#define REG_DEF0_USE0 (REG_DEF0 | REG_USE0)
+#define REG_DEF0_USE12 (REG_DEF0 | REG_USE12)
+#define REG_DEF0_USE1 (REG_DEF0 | REG_USE1)
+#define REG_DEF0_USE2 (REG_DEF0 | REG_USE2)
+#define REG_DEFAD_USEAD (REG_DEFAD_USEA | REG_USED)
+#define REG_DEFAD_USEA (REG_DEFA_USEA | REG_DEFD)
+#define REG_DEFA_USEA (REG_DEFA | REG_USEA)
+#define REG_USE012 (REG_USE01 | REG_USE2)
+#define REG_USE014 (REG_USE01 | REG_USE4)
+#define REG_USE01 (REG_USE0 | REG_USE1)
+#define REG_USE02 (REG_USE0 | REG_USE2)
+#define REG_USE12 (REG_USE1 | REG_USE2)
+#define REG_USE23 (REG_USE2 | REG_USE3)
+
+typedef int (*NextCallInsn)(CompilationUnit*, CallInfo*, int, uint32_t dex_idx,
+ uint32_t method_idx, uintptr_t direct_code,
+ uintptr_t direct_method, InvokeType type);
+
+// Target-specific initialization.
+bool InitArmCodegen(CompilationUnit* cu);
+bool InitMipsCodegen(CompilationUnit* cu);
+bool InitX86Codegen(CompilationUnit* cu);
+
+class Codegen {
+
+ public:
+
+ virtual ~Codegen(){};
+
+ // Shared by all targets - implemented in gen_common.cc.
+ void HandleSuspendLaunchPads(CompilationUnit *cu);
+ void HandleIntrinsicLaunchPads(CompilationUnit *cu);
+ void HandleThrowLaunchPads(CompilationUnit *cu);
+ void GenBarrier(CompilationUnit* cu);
+ LIR* GenCheck(CompilationUnit* cu, ConditionCode c_code, ThrowKind kind);
+ LIR* GenImmedCheck(CompilationUnit* cu, ConditionCode c_code, int reg, int imm_val,
+ ThrowKind kind);
+ LIR* GenNullCheck(CompilationUnit* cu, int s_reg, int m_reg, int opt_flags);
+ LIR* GenRegRegCheck(CompilationUnit* cu, ConditionCode c_code, int reg1, int reg2,
+ ThrowKind kind);
+ void GenCompareAndBranch(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_src1,
+ RegLocation rl_src2, LIR* taken, LIR* fall_through);
+ void GenCompareZeroAndBranch(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_src,
+ LIR* taken, LIR* fall_through);
+ void GenIntToLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src);
+ void GenIntNarrowing(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src);
+ void GenNewArray(CompilationUnit* cu, uint32_t type_idx, RegLocation rl_dest,
+ RegLocation rl_src);
+ void GenFilledNewArray(CompilationUnit* cu, CallInfo* info);
+ void GenSput(CompilationUnit* cu, uint32_t field_idx, RegLocation rl_src,
+ bool is_long_or_double, bool is_object);
+ void GenSget(CompilationUnit* cu, uint32_t field_idx, RegLocation rl_dest,
+ bool is_long_or_double, bool is_object);
+ void GenShowTarget(CompilationUnit* cu);
+ void GenIGet(CompilationUnit* cu, uint32_t field_idx, int opt_flags, OpSize size,
+ RegLocation rl_dest, RegLocation rl_obj, bool is_long_or_double, bool is_object);
+ void GenIPut(CompilationUnit* cu, uint32_t field_idx, int opt_flags, OpSize size,
+ RegLocation rl_src, RegLocation rl_obj, bool is_long_or_double, bool is_object);
+ void GenConstClass(CompilationUnit* cu, uint32_t type_idx, RegLocation rl_dest);
+ void GenConstString(CompilationUnit* cu, uint32_t string_idx, RegLocation rl_dest);
+ void GenNewInstance(CompilationUnit* cu, uint32_t type_idx, RegLocation rl_dest);
+ void GenThrow(CompilationUnit* cu, RegLocation rl_src);
+ void GenInstanceof(CompilationUnit* cu, uint32_t type_idx, RegLocation rl_dest,
+ RegLocation rl_src);
+ void GenCheckCast(CompilationUnit* cu, uint32_t type_idx, RegLocation rl_src);
+ void GenLong3Addr(CompilationUnit* cu, OpKind first_op, OpKind second_op, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2);
+ void GenShiftOpLong(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_shift);
+ void GenArithOpInt(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2);
+ void GenArithOpIntLit(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src, int lit);
+ void GenArithOpLong(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2);
+ void GenConversionCall(CompilationUnit* cu, int func_offset, RegLocation rl_dest,
+ RegLocation rl_src);
+ void GenSuspendTest(CompilationUnit* cu, int opt_flags);
+ void GenSuspendTestAndBranch(CompilationUnit* cu, int opt_flags, LIR* target);
+
+ // Shared by all targets - implemented in gen_invoke.cc.
+ int CallHelperSetup(CompilationUnit* cu, int helper_offset);
+ LIR* CallHelper(CompilationUnit* cu, int r_tgt, int helper_offset, bool safepoint_pc);
+ void CallRuntimeHelperImm(CompilationUnit* cu, int helper_offset, int arg0, bool safepoint_pc);
+ void CallRuntimeHelperReg(CompilationUnit* cu, int helper_offset, int arg0, bool safepoint_pc);
+ void CallRuntimeHelperRegLocation(CompilationUnit* cu, int helper_offset, RegLocation arg0,
+ bool safepoint_pc);
+ void CallRuntimeHelperImmImm(CompilationUnit* cu, int helper_offset, int arg0, int arg1,
+ bool safepoint_pc);
+ void CallRuntimeHelperImmRegLocation(CompilationUnit* cu, int helper_offset, int arg0,
+ RegLocation arg1, bool safepoint_pc);
+ void CallRuntimeHelperRegLocationImm(CompilationUnit* cu, int helper_offset, RegLocation arg0,
+ int arg1, bool safepoint_pc);
+ void CallRuntimeHelperImmReg(CompilationUnit* cu, int helper_offset, int arg0, int arg1,
+ bool safepoint_pc);
+ void CallRuntimeHelperRegImm(CompilationUnit* cu, int helper_offset, int arg0, int arg1,
+ bool safepoint_pc);
+ void CallRuntimeHelperImmMethod(CompilationUnit* cu, int helper_offset, int arg0,
+ bool safepoint_pc);
+ void CallRuntimeHelperRegLocationRegLocation(CompilationUnit* cu, int helper_offset,
+ RegLocation arg0, RegLocation arg1,
+ bool safepoint_pc);
+ void CallRuntimeHelperRegReg(CompilationUnit* cu, int helper_offset, int arg0, int arg1,
+ bool safepoint_pc);
+ void CallRuntimeHelperRegRegImm(CompilationUnit* cu, int helper_offset, int arg0, int arg1,
+ int arg2, bool safepoint_pc);
+ void CallRuntimeHelperImmMethodRegLocation(CompilationUnit* cu, int helper_offset, int arg0,
+ RegLocation arg2, bool safepoint_pc);
+ void CallRuntimeHelperImmMethodImm(CompilationUnit* cu, int helper_offset, int arg0, int arg2,
+ bool safepoint_pc);
+ void CallRuntimeHelperImmRegLocationRegLocation(CompilationUnit* cu, int helper_offset,
+ int arg0, RegLocation arg1, RegLocation arg2,
+ bool safepoint_pc);
+ void GenInvoke(CompilationUnit* cu, CallInfo* info);
+ void FlushIns(CompilationUnit* cu, RegLocation* ArgLocs, RegLocation rl_method);
+ int GenDalvikArgsNoRange(CompilationUnit* cu, CallInfo* info, int call_state, LIR** pcrLabel,
+ NextCallInsn next_call_insn, uint32_t dex_idx, uint32_t method_idx,
+ uintptr_t direct_code, uintptr_t direct_method, InvokeType type,
+ bool skip_this);
+ int GenDalvikArgsRange(CompilationUnit* cu, CallInfo* info, int call_state, LIR** pcrLabel,
+ NextCallInsn next_call_insn, uint32_t dex_idx, uint32_t method_idx,
+ uintptr_t direct_code, uintptr_t direct_method, InvokeType type,
+ bool skip_this);
+ RegLocation InlineTarget(CompilationUnit* cu, CallInfo* info);
+ RegLocation InlineTargetWide(CompilationUnit* cu, CallInfo* info);
+ CallInfo* NewMemCallInfo(CompilationUnit* cu, BasicBlock* bb, MIR* mir, InvokeType type,
+ bool is_range);
+ bool GenInlinedCharAt(CompilationUnit* cu, CallInfo* info);
+ bool GenInlinedStringIsEmptyOrLength(CompilationUnit* cu, CallInfo* info, bool is_empty);
+ bool GenInlinedAbsInt(CompilationUnit *cu, CallInfo* info);
+ bool GenInlinedAbsLong(CompilationUnit *cu, CallInfo* info);
+ bool GenInlinedFloatCvt(CompilationUnit *cu, CallInfo* info);
+ bool GenInlinedDoubleCvt(CompilationUnit *cu, CallInfo* info);
+ bool GenInlinedIndexOf(CompilationUnit* cu, CallInfo* info, bool zero_based);
+ bool GenInlinedStringCompareTo(CompilationUnit* cu, CallInfo* info);
+ bool GenInlinedCurrentThread(CompilationUnit* cu, CallInfo* info);
+ bool GenInlinedUnsafeGet(CompilationUnit* cu, CallInfo* info, bool is_long, bool is_volatile);
+ bool GenInlinedUnsafePut(CompilationUnit* cu, CallInfo* info, bool is_long, bool is_object,
+ bool is_volatile, bool is_ordered);
+ bool GenIntrinsic(CompilationUnit* cu, CallInfo* info);
+
+ // Shared by all targets - implemented in gen_loadstore.cc.
+ RegLocation LoadCurrMethod(CompilationUnit *cu);
+ void LoadCurrMethodDirect(CompilationUnit *cu, int r_tgt);
+ LIR* LoadConstant(CompilationUnit* cu, int r_dest, int value);
+ LIR* LoadWordDisp(CompilationUnit* cu, int rBase, int displacement, int r_dest);
+ RegLocation LoadValue(CompilationUnit* cu, RegLocation rl_src, RegisterClass op_kind);
+ RegLocation LoadValueWide(CompilationUnit* cu, RegLocation rl_src, RegisterClass op_kind);
+ void LoadValueDirect(CompilationUnit* cu, RegLocation rl_src, int r_dest);
+ void LoadValueDirectFixed(CompilationUnit* cu, RegLocation rl_src, int r_dest);
+ void LoadValueDirectWide(CompilationUnit* cu, RegLocation rl_src, int reg_lo, int reg_hi);
+ void LoadValueDirectWideFixed(CompilationUnit* cu, RegLocation rl_src, int reg_lo, int reg_hi);
+ LIR* StoreWordDisp(CompilationUnit* cu, int rBase, int displacement, int r_src);
+ void StoreValue(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src);
+ void StoreValueWide(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src);
+
+ // Required for target - codegen helpers.
+ virtual bool SmallLiteralDivide(CompilationUnit* cu, Instruction::Code dalvik_opcode,
+ RegLocation rl_src, RegLocation rl_dest, int lit) = 0;
+ virtual int LoadHelper(CompilationUnit* cu, int offset) = 0;
+ virtual LIR* LoadBaseDisp(CompilationUnit* cu, int rBase, int displacement, int r_dest,
+ OpSize size, int s_reg) = 0;
+ virtual LIR* LoadBaseDispWide(CompilationUnit* cu, int rBase, int displacement, int r_dest_lo,
+ int r_dest_hi, int s_reg) = 0;
+ virtual LIR* LoadBaseIndexed(CompilationUnit* cu, int rBase, int r_index, int r_dest, int scale,
+ OpSize size) = 0;
+ virtual LIR* LoadBaseIndexedDisp(CompilationUnit *cu, int rBase, int r_index, int scale,
+ int displacement, int r_dest, int r_dest_hi, OpSize size,
+ int s_reg) = 0;
+ virtual LIR* LoadConstantNoClobber(CompilationUnit* cu, int r_dest, int value) = 0;
+ virtual LIR* LoadConstantWide(CompilationUnit* cu, int r_dest_lo, int r_dest_hi,
+ int64_t value) = 0;
+ virtual LIR* StoreBaseDisp(CompilationUnit* cu, int rBase, int displacement, int r_src,
+ OpSize size) = 0;
+ virtual LIR* StoreBaseDispWide(CompilationUnit* cu, int rBase, int displacement, int r_src_lo,
+ int r_src_hi) = 0;
+ virtual LIR* StoreBaseIndexed(CompilationUnit* cu, int rBase, int r_index, int r_src, int scale,
+ OpSize size) = 0;
+ virtual LIR* StoreBaseIndexedDisp(CompilationUnit *cu, int rBase, int r_index, int scale,
+ int displacement, int r_src, int r_src_hi, OpSize size,
+ int s_reg) = 0;
+ virtual void MarkGCCard(CompilationUnit* cu, int val_reg, int tgt_addr_reg) = 0;
+
+ // Required for target - register utilities.
+ virtual bool IsFpReg(int reg) = 0;
+ virtual bool SameRegType(int reg1, int reg2) = 0;
+ virtual int AllocTypedTemp(CompilationUnit* cu, bool fp_hint, int reg_class) = 0;
+ virtual int AllocTypedTempPair(CompilationUnit* cu, bool fp_hint, int reg_class) = 0;
+ virtual int S2d(int low_reg, int high_reg) = 0;
+ virtual int TargetReg(SpecialTargetRegister reg) = 0;
+ virtual RegisterInfo* GetRegInfo(CompilationUnit* cu, int reg) = 0;
+ virtual RegLocation GetReturnAlt(CompilationUnit* cu) = 0;
+ virtual RegLocation GetReturnWideAlt(CompilationUnit* cu) = 0;
+ virtual RegLocation LocCReturn() = 0;
+ virtual RegLocation LocCReturnDouble() = 0;
+ virtual RegLocation LocCReturnFloat() = 0;
+ virtual RegLocation LocCReturnWide() = 0;
+ virtual uint32_t FpRegMask() = 0;
+ virtual uint64_t GetRegMaskCommon(CompilationUnit* cu, int reg) = 0;
+ virtual void AdjustSpillMask(CompilationUnit* cu) = 0;
+ virtual void ClobberCalleeSave(CompilationUnit *cu) = 0;
+ virtual void FlushReg(CompilationUnit* cu, int reg) = 0;
+ virtual void FlushRegWide(CompilationUnit* cu, int reg1, int reg2) = 0;
+ virtual void FreeCallTemps(CompilationUnit* cu) = 0;
+ virtual void FreeRegLocTemps(CompilationUnit* cu, RegLocation rl_keep, RegLocation rl_free) = 0;
+ virtual void LockCallTemps(CompilationUnit* cu) = 0;
+ virtual void MarkPreservedSingle(CompilationUnit* cu, int v_reg, int reg) = 0;
+ virtual void CompilerInitializeRegAlloc(CompilationUnit* cu) = 0;
+
+ // Required for target - miscellaneous.
+ virtual AssemblerStatus AssembleInstructions(CompilationUnit* cu, uintptr_t start_addr) = 0;
+ virtual void DumpResourceMask(LIR* lir, uint64_t mask, const char* prefix) = 0;
+ virtual void SetupTargetResourceMasks(CompilationUnit* cu, LIR* lir) = 0;
+ virtual const char* GetTargetInstFmt(int opcode) = 0;
+ virtual const char* GetTargetInstName(int opcode) = 0;
+ virtual std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr) = 0;
+ virtual uint64_t GetPCUseDefEncoding() = 0;
+ virtual uint64_t GetTargetInstFlags(int opcode) = 0;
+ virtual int GetInsnSize(LIR* lir) = 0;
+ virtual bool IsUnconditionalBranch(LIR* lir) = 0;
+
+ // Required for target - Dalvik-level generators.
+ virtual void GenArithImmOpLong(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2) = 0;
+ virtual void GenMulLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2) = 0;
+ virtual void GenAddLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2) = 0;
+ virtual void GenAndLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2) = 0;
+ virtual void GenArithOpDouble(CompilationUnit* cu, Instruction::Code opcode,
+ RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2) = 0;
+ virtual void GenArithOpFloat(CompilationUnit *cu, Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2) = 0;
+ virtual void GenCmpFP(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2) = 0;
+ virtual void GenConversion(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src) = 0;
+ virtual bool GenInlinedCas32(CompilationUnit* cu, CallInfo* info, bool need_write_barrier) = 0;
+ virtual bool GenInlinedMinMaxInt(CompilationUnit *cu, CallInfo* info, bool is_min) = 0;
+ virtual bool GenInlinedSqrt(CompilationUnit* cu, CallInfo* info) = 0;
+ virtual void GenNegLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src) = 0;
+ virtual void GenOrLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2) = 0;
+ virtual void GenSubLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2) = 0;
+ virtual void GenXorLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2) = 0;
+ virtual LIR* GenRegMemCheck(CompilationUnit* cu, ConditionCode c_code, int reg1, int base,
+ int offset, ThrowKind kind) = 0;
+ virtual RegLocation GenDivRem(CompilationUnit* cu, RegLocation rl_dest, int reg_lo, int reg_hi,
+ bool is_div) = 0;
+ virtual RegLocation GenDivRemLit(CompilationUnit* cu, RegLocation rl_dest, int reg_lo, int lit,
+ bool is_div) = 0;
+ virtual void GenCmpLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2) = 0;
+ virtual void GenDivZeroCheck(CompilationUnit* cu, int reg_lo, int reg_hi) = 0;
+ virtual void GenEntrySequence(CompilationUnit* cu, RegLocation* ArgLocs,
+ RegLocation rl_method) = 0;
+ virtual void GenExitSequence(CompilationUnit* cu) = 0;
+ virtual void GenFillArrayData(CompilationUnit* cu, uint32_t table_offset,
+ RegLocation rl_src) = 0;
+ virtual void GenFusedFPCmpBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir, bool gt_bias,
+ bool is_double) = 0;
+ virtual void GenFusedLongCmpBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir) = 0;
+ virtual void GenSelect(CompilationUnit* cu, BasicBlock* bb, MIR* mir) = 0;
+ virtual void GenMemBarrier(CompilationUnit* cu, MemBarrierKind barrier_kind) = 0;
+ virtual void GenMonitorEnter(CompilationUnit* cu, int opt_flags, RegLocation rl_src) = 0;
+ virtual void GenMonitorExit(CompilationUnit* cu, int opt_flags, RegLocation rl_src) = 0;
+ virtual void GenMoveException(CompilationUnit* cu, RegLocation rl_dest) = 0;
+ virtual void GenMultiplyByTwoBitMultiplier(CompilationUnit* cu, RegLocation rl_src,
+ RegLocation rl_result, int lit, int first_bit,
+ int second_bit) = 0;
+ virtual void GenNegDouble(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src) = 0;
+ virtual void GenNegFloat(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src) = 0;
+ virtual void GenPackedSwitch(CompilationUnit* cu, uint32_t table_offset,
+ RegLocation rl_src) = 0;
+ virtual void GenSparseSwitch(CompilationUnit* cu, uint32_t table_offset,
+ RegLocation rl_src) = 0;
+ virtual void GenSpecialCase(CompilationUnit* cu, BasicBlock* bb, MIR* mir,
+ SpecialCaseHandler special_case) = 0;
+ virtual void GenArrayObjPut(CompilationUnit* cu, int opt_flags, RegLocation rl_array,
+ RegLocation rl_index, RegLocation rl_src, int scale) = 0;
+ virtual void GenArrayGet(CompilationUnit* cu, int opt_flags, OpSize size, RegLocation rl_array,
+ RegLocation rl_index, RegLocation rl_dest, int scale) = 0;
+ virtual void GenArrayPut(CompilationUnit* cu, int opt_flags, OpSize size, RegLocation rl_array,
+ RegLocation rl_index, RegLocation rl_src, int scale) = 0;
+ virtual void GenShiftImmOpLong(CompilationUnit* cu, Instruction::Code opcode,
+ RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_shift) = 0;
+
+ // Required for target - single operation generators.
+ virtual LIR* OpUnconditionalBranch(CompilationUnit* cu, LIR* target) = 0;
+ virtual LIR* OpCmpBranch(CompilationUnit* cu, ConditionCode cond, int src1, int src2,
+ LIR* target) = 0;
+ virtual LIR* OpCmpImmBranch(CompilationUnit* cu, ConditionCode cond, int reg, int check_value,
+ LIR* target) = 0;
+ virtual LIR* OpCondBranch(CompilationUnit* cu, ConditionCode cc, LIR* target) = 0;
+ virtual LIR* OpDecAndBranch(CompilationUnit* cu, ConditionCode c_code, int reg,
+ LIR* target) = 0;
+ virtual LIR* OpFpRegCopy(CompilationUnit* cu, int r_dest, int r_src) = 0;
+ virtual LIR* OpIT(CompilationUnit* cu, ConditionCode cond, const char* guide) = 0;
+ virtual LIR* OpMem(CompilationUnit* cu, OpKind op, int rBase, int disp) = 0;
+ virtual LIR* OpPcRelLoad(CompilationUnit* cu, int reg, LIR* target) = 0;
+ virtual LIR* OpReg(CompilationUnit* cu, OpKind op, int r_dest_src) = 0;
+ virtual LIR* OpRegCopy(CompilationUnit* cu, int r_dest, int r_src) = 0;
+ virtual LIR* OpRegCopyNoInsert(CompilationUnit* cu, int r_dest, int r_src) = 0;
+ virtual LIR* OpRegImm(CompilationUnit* cu, OpKind op, int r_dest_src1, int value) = 0;
+ virtual LIR* OpRegMem(CompilationUnit* cu, OpKind op, int r_dest, int rBase, int offset) = 0;
+ virtual LIR* OpRegReg(CompilationUnit* cu, OpKind op, int r_dest_src1, int r_src2) = 0;
+ virtual LIR* OpRegRegImm(CompilationUnit* cu, OpKind op, int r_dest, int r_src1, int value) = 0;
+ virtual LIR* OpRegRegReg(CompilationUnit* cu, OpKind op, int r_dest, int r_src1,
+ int r_src2) = 0;
+ virtual LIR* OpTestSuspend(CompilationUnit* cu, LIR* target) = 0;
+ virtual LIR* OpThreadMem(CompilationUnit* cu, OpKind op, int thread_offset) = 0;
+ virtual LIR* OpVldm(CompilationUnit* cu, int rBase, int count) = 0;
+ virtual LIR* OpVstm(CompilationUnit* cu, int rBase, int count) = 0;
+ virtual void OpLea(CompilationUnit* cu, int rBase, int reg1, int reg2, int scale,
+ int offset) = 0;
+ virtual void OpRegCopyWide(CompilationUnit* cu, int dest_lo, int dest_hi, int src_lo,
+ int src_hi) = 0;
+ virtual void OpTlsCmp(CompilationUnit* cu, int offset, int val) = 0;
+ virtual bool InexpensiveConstantInt(int32_t value) = 0;
+ virtual bool InexpensiveConstantFloat(int32_t value) = 0;
+ virtual bool InexpensiveConstantLong(int64_t value) = 0;
+ virtual bool InexpensiveConstantDouble(int64_t value) = 0;
+
+ // Temp workaround
+ void Workaround7250540(CompilationUnit* cu, RegLocation rl_dest, int value);
+ }; // Class Codegen
+
+} // namespace art
+
+#endif // ART_SRC_COMPILER_DEX_QUICK_CODEGEN_H_
diff --git a/src/compiler/dex/quick/codegen_util.cc b/src/compiler/dex/quick/codegen_util.cc
new file mode 100644
index 0000000..06221f8
--- /dev/null
+++ b/src/compiler/dex/quick/codegen_util.cc
@@ -0,0 +1,1143 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "compiler/dex/compiler_internals.h"
+#include "gc_map.h"
+#include "verifier/dex_gc_map.h"
+#include "verifier/method_verifier.h"
+#include "ralloc_util.h"
+#include "codegen_util.h"
+
+namespace art {
+
+bool IsInexpensiveConstant(CompilationUnit* cu, RegLocation rl_src)
+{
+ bool res = false;
+ if (rl_src.is_const) {
+ if (rl_src.wide) {
+ if (rl_src.fp) {
+ res = cu->cg->InexpensiveConstantDouble(ConstantValueWide(cu, rl_src));
+ } else {
+ res = cu->cg->InexpensiveConstantLong(ConstantValueWide(cu, rl_src));
+ }
+ } else {
+ if (rl_src.fp) {
+ res = cu->cg->InexpensiveConstantFloat(ConstantValue(cu, rl_src));
+ } else {
+ res = cu->cg->InexpensiveConstantInt(ConstantValue(cu, rl_src));
+ }
+ }
+ }
+ return res;
+}
+
+void MarkSafepointPC(CompilationUnit* cu, LIR* inst)
+{
+ inst->def_mask = ENCODE_ALL;
+ LIR* safepoint_pc = NewLIR0(cu, kPseudoSafepointPC);
+ DCHECK_EQ(safepoint_pc->def_mask, ENCODE_ALL);
+}
+
+bool FastInstance(CompilationUnit* cu, uint32_t field_idx,
+ int& field_offset, bool& is_volatile, bool is_put)
+{
+ OatCompilationUnit m_unit(cu->class_loader, cu->class_linker,
+ *cu->dex_file, cu->code_item,
+ cu->class_def_idx, cu->method_idx,
+ cu->access_flags);
+ return cu->compiler->ComputeInstanceFieldInfo(field_idx, &m_unit,
+ field_offset, is_volatile, is_put);
+}
+
+/* Convert an instruction to a NOP */
+void NopLIR( LIR* lir)
+{
+ lir->flags.is_nop = true;
+}
+
+void SetMemRefType(CompilationUnit* cu, LIR* lir, bool is_load, int mem_type)
+{
+ uint64_t *mask_ptr;
+ uint64_t mask = ENCODE_MEM;;
+ Codegen* cg = cu->cg.get();
+ DCHECK(cg->GetTargetInstFlags(lir->opcode) & (IS_LOAD | IS_STORE));
+ if (is_load) {
+ mask_ptr = &lir->use_mask;
+ } else {
+ mask_ptr = &lir->def_mask;
+ }
+ /* Clear out the memref flags */
+ *mask_ptr &= ~mask;
+ /* ..and then add back the one we need */
+ switch (mem_type) {
+ case kLiteral:
+ DCHECK(is_load);
+ *mask_ptr |= ENCODE_LITERAL;
+ break;
+ case kDalvikReg:
+ *mask_ptr |= ENCODE_DALVIK_REG;
+ break;
+ case kHeapRef:
+ *mask_ptr |= ENCODE_HEAP_REF;
+ break;
+ case kMustNotAlias:
+ /* Currently only loads can be marked as kMustNotAlias */
+ DCHECK(!(cg->GetTargetInstFlags(lir->opcode) & IS_STORE));
+ *mask_ptr |= ENCODE_MUST_NOT_ALIAS;
+ break;
+ default:
+ LOG(FATAL) << "Oat: invalid memref kind - " << mem_type;
+ }
+}
+
+/*
+ * Mark load/store instructions that access Dalvik registers through the stack.
+ */
+void AnnotateDalvikRegAccess(CompilationUnit* cu, LIR* lir, int reg_id, bool is_load, bool is64bit)
+{
+ SetMemRefType(cu, lir, is_load, kDalvikReg);
+
+ /*
+ * Store the Dalvik register id in alias_info. Mark the MSB if it is a 64-bit
+ * access.
+ */
+ lir->alias_info = ENCODE_ALIAS_INFO(reg_id, is64bit);
+}
+
+/*
+ * Mark the corresponding bit(s).
+ */
+void SetupRegMask(CompilationUnit* cu, uint64_t* mask, int reg)
+{
+ Codegen* cg = cu->cg.get();
+ *mask |= cg->GetRegMaskCommon(cu, reg);
+}
+
+/*
+ * Set up the proper fields in the resource mask
+ */
+void SetupResourceMasks(CompilationUnit* cu, LIR* lir)
+{
+ int opcode = lir->opcode;
+ Codegen* cg = cu->cg.get();
+
+ if (opcode <= 0) {
+ lir->use_mask = lir->def_mask = 0;
+ return;
+ }
+
+ uint64_t flags = cg->GetTargetInstFlags(opcode);
+
+ if (flags & NEEDS_FIXUP) {
+ lir->flags.pcRelFixup = true;
+ }
+
+ /* Get the starting size of the instruction's template */
+ lir->flags.size = cg->GetInsnSize(lir);
+
+ /* Set up the mask for resources that are updated */
+ if (flags & (IS_LOAD | IS_STORE)) {
+ /* Default to heap - will catch specialized classes later */
+ SetMemRefType(cu, lir, flags & IS_LOAD, kHeapRef);
+ }
+
+ /*
+ * Conservatively assume the branch here will call out a function that in
+ * turn will trash everything.
+ */
+ if (flags & IS_BRANCH) {
+ lir->def_mask = lir->use_mask = ENCODE_ALL;
+ return;
+ }
+
+ if (flags & REG_DEF0) {
+ SetupRegMask(cu, &lir->def_mask, lir->operands[0]);
+ }
+
+ if (flags & REG_DEF1) {
+ SetupRegMask(cu, &lir->def_mask, lir->operands[1]);
+ }
+
+
+ if (flags & SETS_CCODES) {
+ lir->def_mask |= ENCODE_CCODE;
+ }
+
+ if (flags & (REG_USE0 | REG_USE1 | REG_USE2 | REG_USE3)) {
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ if (flags & (1 << (kRegUse0 + i))) {
+ SetupRegMask(cu, &lir->use_mask, lir->operands[i]);
+ }
+ }
+ }
+
+ if (flags & USES_CCODES) {
+ lir->use_mask |= ENCODE_CCODE;
+ }
+
+ // Handle target-specific actions
+ cg->SetupTargetResourceMasks(cu, lir);
+}
+
+/*
+ * Debugging macros
+ */
+#define DUMP_RESOURCE_MASK(X)
+
+/* Pretty-print a LIR instruction */
+void DumpLIRInsn(CompilationUnit* cu, LIR* lir, unsigned char* base_addr)
+{
+ int offset = lir->offset;
+ int dest = lir->operands[0];
+ const bool dump_nop = (cu->enable_debug & (1 << kDebugShowNops));
+ Codegen* cg = cu->cg.get();
+
+ /* Handle pseudo-ops individually, and all regular insns as a group */
+ switch (lir->opcode) {
+ case kPseudoMethodEntry:
+ LOG(INFO) << "-------- method entry "
+ << PrettyMethod(cu->method_idx, *cu->dex_file);
+ break;
+ case kPseudoMethodExit:
+ LOG(INFO) << "-------- Method_Exit";
+ break;
+ case kPseudoBarrier:
+ LOG(INFO) << "-------- BARRIER";
+ break;
+ case kPseudoEntryBlock:
+ LOG(INFO) << "-------- entry offset: 0x" << std::hex << dest;
+ break;
+ case kPseudoDalvikByteCodeBoundary:
+ if (lir->operands[0] == 0) {
+ lir->operands[0] = reinterpret_cast<uintptr_t>("No instruction string");
+ }
+ LOG(INFO) << "-------- dalvik offset: 0x" << std::hex
+ << lir->dalvik_offset << " @ " << reinterpret_cast<char*>(lir->operands[0]);
+ break;
+ case kPseudoExitBlock:
+ LOG(INFO) << "-------- exit offset: 0x" << std::hex << dest;
+ break;
+ case kPseudoPseudoAlign4:
+ LOG(INFO) << reinterpret_cast<uintptr_t>(base_addr) + offset << " (0x" << std::hex
+ << offset << "): .align4";
+ break;
+ case kPseudoEHBlockLabel:
+ LOG(INFO) << "Exception_Handling:";
+ break;
+ case kPseudoTargetLabel:
+ case kPseudoNormalBlockLabel:
+ LOG(INFO) << "L" << reinterpret_cast<void*>(lir) << ":";
+ break;
+ case kPseudoThrowTarget:
+ LOG(INFO) << "LT" << reinterpret_cast<void*>(lir) << ":";
+ break;
+ case kPseudoIntrinsicRetry:
+ LOG(INFO) << "IR" << reinterpret_cast<void*>(lir) << ":";
+ break;
+ case kPseudoSuspendTarget:
+ LOG(INFO) << "LS" << reinterpret_cast<void*>(lir) << ":";
+ break;
+ case kPseudoSafepointPC:
+ LOG(INFO) << "LsafepointPC_0x" << std::hex << lir->offset << "_" << lir->dalvik_offset << ":";
+ break;
+ case kPseudoExportedPC:
+ LOG(INFO) << "LexportedPC_0x" << std::hex << lir->offset << "_" << lir->dalvik_offset << ":";
+ break;
+ case kPseudoCaseLabel:
+ LOG(INFO) << "LC" << reinterpret_cast<void*>(lir) << ": Case target 0x"
+ << std::hex << lir->operands[0] << "|" << std::dec <<
+ lir->operands[0];
+ break;
+ default:
+ if (lir->flags.is_nop && !dump_nop) {
+ break;
+ } else {
+ std::string op_name(cg->BuildInsnString(cg->GetTargetInstName(lir->opcode),
+ lir, base_addr));
+ std::string op_operands(cg->BuildInsnString(cg->GetTargetInstFmt(lir->opcode),
+ lir, base_addr));
+ LOG(INFO) << StringPrintf("%05x: %-9s%s%s",
+ reinterpret_cast<unsigned int>(base_addr + offset),
+ op_name.c_str(), op_operands.c_str(),
+ lir->flags.is_nop ? "(nop)" : "");
+ }
+ break;
+ }
+
+ if (lir->use_mask && (!lir->flags.is_nop || dump_nop)) {
+ DUMP_RESOURCE_MASK(DumpResourceMask((LIR* ) lir, lir->use_mask, "use"));
+ }
+ if (lir->def_mask && (!lir->flags.is_nop || dump_nop)) {
+ DUMP_RESOURCE_MASK(DumpResourceMask((LIR* ) lir, lir->def_mask, "def"));
+ }
+}
+
+void DumpPromotionMap(CompilationUnit *cu)
+{
+ Codegen* cg = cu->cg.get();
+ int num_regs = cu->num_dalvik_registers + cu->num_compiler_temps + 1;
+ for (int i = 0; i < num_regs; i++) {
+ PromotionMap v_reg_map = cu->promotion_map[i];
+ std::string buf;
+ if (v_reg_map.fp_location == kLocPhysReg) {
+ StringAppendF(&buf, " : s%d", v_reg_map.FpReg & cg->FpRegMask());
+ }
+
+ std::string buf3;
+ if (i < cu->num_dalvik_registers) {
+ StringAppendF(&buf3, "%02d", i);
+ } else if (i == cu->method_sreg) {
+ buf3 = "Method*";
+ } else {
+ StringAppendF(&buf3, "ct%d", i - cu->num_dalvik_registers);
+ }
+
+ LOG(INFO) << StringPrintf("V[%s] -> %s%d%s", buf3.c_str(),
+ v_reg_map.core_location == kLocPhysReg ?
+ "r" : "SP+", v_reg_map.core_location == kLocPhysReg ?
+ v_reg_map.core_reg : SRegOffset(cu, i),
+ buf.c_str());
+ }
+}
+
+/* Dump a mapping table */
+static void DumpMappingTable(const char* table_name, const std::string& descriptor,
+ const std::string& name, const std::string& signature,
+ const std::vector<uint32_t>& v) {
+ if (v.size() > 0) {
+ std::string line(StringPrintf("\n %s %s%s_%s_table[%zu] = {", table_name,
+ descriptor.c_str(), name.c_str(), signature.c_str(), v.size()));
+ std::replace(line.begin(), line.end(), ';', '_');
+ LOG(INFO) << line;
+ for (uint32_t i = 0; i < v.size(); i+=2) {
+ line = StringPrintf(" {0x%05x, 0x%04x},", v[i], v[i+1]);
+ LOG(INFO) << line;
+ }
+ LOG(INFO) <<" };\n\n";
+ }
+}
+
+/* Dump instructions and constant pool contents */
+void CodegenDump(CompilationUnit* cu)
+{
+ LOG(INFO) << "Dumping LIR insns for "
+ << PrettyMethod(cu->method_idx, *cu->dex_file);
+ LIR* lir_insn;
+ int insns_size = cu->insns_size;
+
+ LOG(INFO) << "Regs (excluding ins) : " << cu->num_regs;
+ LOG(INFO) << "Ins : " << cu->num_ins;
+ LOG(INFO) << "Outs : " << cu->num_outs;
+ LOG(INFO) << "CoreSpills : " << cu->num_core_spills;
+ LOG(INFO) << "FPSpills : " << cu->num_fp_spills;
+ LOG(INFO) << "CompilerTemps : " << cu->num_compiler_temps;
+ LOG(INFO) << "Frame size : " << cu->frame_size;
+ LOG(INFO) << "code size is " << cu->total_size <<
+ " bytes, Dalvik size is " << insns_size * 2;
+ LOG(INFO) << "expansion factor: "
+ << static_cast<float>(cu->total_size) / static_cast<float>(insns_size * 2);
+ DumpPromotionMap(cu);
+ for (lir_insn = cu->first_lir_insn; lir_insn != NULL; lir_insn = lir_insn->next) {
+ DumpLIRInsn(cu, lir_insn, 0);
+ }
+ for (lir_insn = cu->literal_list; lir_insn != NULL; lir_insn = lir_insn->next) {
+ LOG(INFO) << StringPrintf("%x (%04x): .word (%#x)", lir_insn->offset, lir_insn->offset,
+ lir_insn->operands[0]);
+ }
+
+ const DexFile::MethodId& method_id =
+ cu->dex_file->GetMethodId(cu->method_idx);
+ std::string signature(cu->dex_file->GetMethodSignature(method_id));
+ std::string name(cu->dex_file->GetMethodName(method_id));
+ std::string descriptor(cu->dex_file->GetMethodDeclaringClassDescriptor(method_id));
+
+ // Dump mapping tables
+ DumpMappingTable("PC2Dex_MappingTable", descriptor, name, signature, cu->pc2dexMappingTable);
+ DumpMappingTable("Dex2PC_MappingTable", descriptor, name, signature, cu->dex2pcMappingTable);
+}
+
+
+LIR* RawLIR(CompilationUnit* cu, int dalvik_offset, int opcode, int op0,
+ int op1, int op2, int op3, int op4, LIR* target)
+{
+ LIR* insn = static_cast<LIR*>(NewMem(cu, sizeof(LIR), true, kAllocLIR));
+ insn->dalvik_offset = dalvik_offset;
+ insn->opcode = opcode;
+ insn->operands[0] = op0;
+ insn->operands[1] = op1;
+ insn->operands[2] = op2;
+ insn->operands[3] = op3;
+ insn->operands[4] = op4;
+ insn->target = target;
+ SetupResourceMasks(cu, insn);
+ if ((opcode == kPseudoTargetLabel) || (opcode == kPseudoSafepointPC) ||
+ (opcode == kPseudoExportedPC)) {
+ // Always make labels scheduling barriers
+ insn->use_mask = insn->def_mask = ENCODE_ALL;
+ }
+ return insn;
+}
+
+/*
+ * The following are building blocks to construct low-level IRs with 0 - 4
+ * operands.
+ */
+LIR* NewLIR0(CompilationUnit* cu, int opcode)
+{
+ Codegen* cg = cu->cg.get();
+ DCHECK(is_pseudo_opcode(opcode) || (cg->GetTargetInstFlags(opcode) & NO_OPERAND))
+ << cg->GetTargetInstName(opcode) << " " << opcode << " "
+ << PrettyMethod(cu->method_idx, *cu->dex_file) << " "
+ << cu->current_dalvik_offset;
+ LIR* insn = RawLIR(cu, cu->current_dalvik_offset, opcode);
+ AppendLIR(cu, insn);
+ return insn;
+}
+
+LIR* NewLIR1(CompilationUnit* cu, int opcode,
+ int dest)
+{
+ Codegen* cg = cu->cg.get();
+ DCHECK(is_pseudo_opcode(opcode) || (cg->GetTargetInstFlags(opcode) & IS_UNARY_OP))
+ << cg->GetTargetInstName(opcode) << " " << opcode << " "
+ << PrettyMethod(cu->method_idx, *cu->dex_file) << " "
+ << cu->current_dalvik_offset;
+ LIR* insn = RawLIR(cu, cu->current_dalvik_offset, opcode, dest);
+ AppendLIR(cu, insn);
+ return insn;
+}
+
+LIR* NewLIR2(CompilationUnit* cu, int opcode,
+ int dest, int src1)
+{
+ Codegen* cg = cu->cg.get();
+ DCHECK(is_pseudo_opcode(opcode) || (cg->GetTargetInstFlags(opcode) & IS_BINARY_OP))
+ << cg->GetTargetInstName(opcode) << " " << opcode << " "
+ << PrettyMethod(cu->method_idx, *cu->dex_file) << " "
+ << cu->current_dalvik_offset;
+ LIR* insn = RawLIR(cu, cu->current_dalvik_offset, opcode, dest, src1);
+ AppendLIR(cu, insn);
+ return insn;
+}
+
+LIR* NewLIR3(CompilationUnit* cu, int opcode,
+ int dest, int src1, int src2)
+{
+ Codegen* cg = cu->cg.get();
+ DCHECK(is_pseudo_opcode(opcode) || (cg->GetTargetInstFlags(opcode) & IS_TERTIARY_OP))
+ << cg->GetTargetInstName(opcode) << " " << opcode << " "
+ << PrettyMethod(cu->method_idx, *cu->dex_file) << " "
+ << cu->current_dalvik_offset;
+ LIR* insn = RawLIR(cu, cu->current_dalvik_offset, opcode, dest, src1, src2);
+ AppendLIR(cu, insn);
+ return insn;
+}
+
+LIR* NewLIR4(CompilationUnit* cu, int opcode,
+ int dest, int src1, int src2, int info)
+{
+ Codegen* cg = cu->cg.get();
+ DCHECK(is_pseudo_opcode(opcode) || (cg->GetTargetInstFlags(opcode) & IS_QUAD_OP))
+ << cg->GetTargetInstName(opcode) << " " << opcode << " "
+ << PrettyMethod(cu->method_idx, *cu->dex_file) << " "
+ << cu->current_dalvik_offset;
+ LIR* insn = RawLIR(cu, cu->current_dalvik_offset, opcode, dest, src1, src2, info);
+ AppendLIR(cu, insn);
+ return insn;
+}
+
+LIR* NewLIR5(CompilationUnit* cu, int opcode,
+ int dest, int src1, int src2, int info1, int info2)
+{
+ Codegen* cg = cu->cg.get();
+ DCHECK(is_pseudo_opcode(opcode) || (cg->GetTargetInstFlags(opcode) & IS_QUIN_OP))
+ << cg->GetTargetInstName(opcode) << " " << opcode << " "
+ << PrettyMethod(cu->method_idx, *cu->dex_file) << " "
+ << cu->current_dalvik_offset;
+ LIR* insn = RawLIR(cu, cu->current_dalvik_offset, opcode, dest, src1, src2, info1, info2);
+ AppendLIR(cu, insn);
+ return insn;
+}
+
+/*
+ * Search the existing constants in the literal pool for an exact or close match
+ * within specified delta (greater or equal to 0).
+ */
+LIR* ScanLiteralPool(LIR* data_target, int value, unsigned int delta)
+{
+ while (data_target) {
+ if ((static_cast<unsigned>(value - data_target->operands[0])) <= delta)
+ return data_target;
+ data_target = data_target->next;
+ }
+ return NULL;
+}
+
+/* Search the existing constants in the literal pool for an exact wide match */
+LIR* ScanLiteralPoolWide(LIR* data_target, int val_lo, int val_hi)
+{
+ bool lo_match = false;
+ LIR* lo_target = NULL;
+ while (data_target) {
+ if (lo_match && (data_target->operands[0] == val_hi)) {
+ // Record high word in case we need to expand this later.
+ lo_target->operands[1] = val_hi;
+ return lo_target;
+ }
+ lo_match = false;
+ if (data_target->operands[0] == val_lo) {
+ lo_match = true;
+ lo_target = data_target;
+ }
+ data_target = data_target->next;
+ }
+ return NULL;
+}
+
+/*
+ * The following are building blocks to insert constants into the pool or
+ * instruction streams.
+ */
+
+/* Add a 32-bit constant to the constant pool */
+LIR* AddWordData(CompilationUnit* cu, LIR* *constant_list_p, int value)
+{
+ /* Add the constant to the literal pool */
+ if (constant_list_p) {
+ LIR* new_value = static_cast<LIR*>(NewMem(cu, sizeof(LIR), true, kAllocData));
+ new_value->operands[0] = value;
+ new_value->next = *constant_list_p;
+ *constant_list_p = new_value;
+ return new_value;
+ }
+ return NULL;
+}
+
+/* Add a 64-bit constant to the constant pool or mixed with code */
+LIR* AddWideData(CompilationUnit* cu, LIR* *constant_list_p,
+ int val_lo, int val_hi)
+{
+ AddWordData(cu, constant_list_p, val_hi);
+ return AddWordData(cu, constant_list_p, val_lo);
+}
+
+static void PushWord(std::vector<uint8_t>&buf, int data) {
+ buf.push_back( data & 0xff);
+ buf.push_back( (data >> 8) & 0xff);
+ buf.push_back( (data >> 16) & 0xff);
+ buf.push_back( (data >> 24) & 0xff);
+}
+
+static void AlignBuffer(std::vector<uint8_t>&buf, size_t offset) {
+ while (buf.size() < offset) {
+ buf.push_back(0);
+ }
+}
+
+/* Write the literal pool to the output stream */
+static void InstallLiteralPools(CompilationUnit* cu)
+{
+ AlignBuffer(cu->code_buffer, cu->data_offset);
+ LIR* data_lir = cu->literal_list;
+ while (data_lir != NULL) {
+ PushWord(cu->code_buffer, data_lir->operands[0]);
+ data_lir = NEXT_LIR(data_lir);
+ }
+ // Push code and method literals, record offsets for the compiler to patch.
+ data_lir = cu->code_literal_list;
+ while (data_lir != NULL) {
+ uint32_t target = data_lir->operands[0];
+ cu->compiler->AddCodePatch(cu->dex_file,
+ cu->method_idx,
+ cu->invoke_type,
+ target,
+ static_cast<InvokeType>(data_lir->operands[1]),
+ cu->code_buffer.size());
+ const DexFile::MethodId& id = cu->dex_file->GetMethodId(target);
+ // unique based on target to ensure code deduplication works
+ uint32_t unique_patch_value = reinterpret_cast<uint32_t>(&id);
+ PushWord(cu->code_buffer, unique_patch_value);
+ data_lir = NEXT_LIR(data_lir);
+ }
+ data_lir = cu->method_literal_list;
+ while (data_lir != NULL) {
+ uint32_t target = data_lir->operands[0];
+ cu->compiler->AddMethodPatch(cu->dex_file,
+ cu->method_idx,
+ cu->invoke_type,
+ target,
+ static_cast<InvokeType>(data_lir->operands[1]),
+ cu->code_buffer.size());
+ const DexFile::MethodId& id = cu->dex_file->GetMethodId(target);
+ // unique based on target to ensure code deduplication works
+ uint32_t unique_patch_value = reinterpret_cast<uint32_t>(&id);
+ PushWord(cu->code_buffer, unique_patch_value);
+ data_lir = NEXT_LIR(data_lir);
+ }
+}
+
+/* Write the switch tables to the output stream */
+static void InstallSwitchTables(CompilationUnit* cu)
+{
+ GrowableListIterator iterator;
+ GrowableListIteratorInit(&cu->switch_tables, &iterator);
+ while (true) {
+ SwitchTable* tab_rec = reinterpret_cast<SwitchTable*>(GrowableListIteratorNext( &iterator));
+ if (tab_rec == NULL) break;
+ AlignBuffer(cu->code_buffer, tab_rec->offset);
+ /*
+ * For Arm, our reference point is the address of the bx
+ * instruction that does the launch, so we have to subtract
+ * the auto pc-advance. For other targets the reference point
+ * is a label, so we can use the offset as-is.
+ */
+ int bx_offset = INVALID_OFFSET;
+ switch (cu->instruction_set) {
+ case kThumb2:
+ bx_offset = tab_rec->anchor->offset + 4;
+ break;
+ case kX86:
+ bx_offset = 0;
+ break;
+ case kMips:
+ bx_offset = tab_rec->anchor->offset;
+ break;
+ default: LOG(FATAL) << "Unexpected instruction set: " << cu->instruction_set;
+ }
+ if (cu->verbose) {
+ LOG(INFO) << "Switch table for offset 0x" << std::hex << bx_offset;
+ }
+ if (tab_rec->table[0] == Instruction::kSparseSwitchSignature) {
+ const int* keys = reinterpret_cast<const int*>(&(tab_rec->table[2]));
+ for (int elems = 0; elems < tab_rec->table[1]; elems++) {
+ int disp = tab_rec->targets[elems]->offset - bx_offset;
+ if (cu->verbose) {
+ LOG(INFO) << " Case[" << elems << "] key: 0x"
+ << std::hex << keys[elems] << ", disp: 0x"
+ << std::hex << disp;
+ }
+ PushWord(cu->code_buffer, keys[elems]);
+ PushWord(cu->code_buffer,
+ tab_rec->targets[elems]->offset - bx_offset);
+ }
+ } else {
+ DCHECK_EQ(static_cast<int>(tab_rec->table[0]),
+ static_cast<int>(Instruction::kPackedSwitchSignature));
+ for (int elems = 0; elems < tab_rec->table[1]; elems++) {
+ int disp = tab_rec->targets[elems]->offset - bx_offset;
+ if (cu->verbose) {
+ LOG(INFO) << " Case[" << elems << "] disp: 0x"
+ << std::hex << disp;
+ }
+ PushWord(cu->code_buffer, tab_rec->targets[elems]->offset - bx_offset);
+ }
+ }
+ }
+}
+
+/* Write the fill array dta to the output stream */
+static void InstallFillArrayData(CompilationUnit* cu)
+{
+ GrowableListIterator iterator;
+ GrowableListIteratorInit(&cu->fill_array_data, &iterator);
+ while (true) {
+ FillArrayData *tab_rec =
+ reinterpret_cast<FillArrayData*>(GrowableListIteratorNext( &iterator));
+ if (tab_rec == NULL) break;
+ AlignBuffer(cu->code_buffer, tab_rec->offset);
+ for (int i = 0; i < (tab_rec->size + 1) / 2; i++) {
+ cu->code_buffer.push_back( tab_rec->table[i] & 0xFF);
+ cu->code_buffer.push_back( (tab_rec->table[i] >> 8) & 0xFF);
+ }
+ }
+}
+
+static int AssignLiteralOffsetCommon(LIR* lir, int offset)
+{
+ for (;lir != NULL; lir = lir->next) {
+ lir->offset = offset;
+ offset += 4;
+ }
+ return offset;
+}
+
+// Make sure we have a code address for every declared catch entry
+static bool VerifyCatchEntries(CompilationUnit* cu)
+{
+ bool success = true;
+ for (std::set<uint32_t>::const_iterator it = cu->catches.begin(); it != cu->catches.end(); ++it) {
+ uint32_t dex_pc = *it;
+ bool found = false;
+ for (size_t i = 0; i < cu->dex2pcMappingTable.size(); i += 2) {
+ if (dex_pc == cu->dex2pcMappingTable[i+1]) {
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ LOG(INFO) << "Missing native PC for catch entry @ 0x" << std::hex << dex_pc;
+ success = false;
+ }
+ }
+ // Now, try in the other direction
+ for (size_t i = 0; i < cu->dex2pcMappingTable.size(); i += 2) {
+ uint32_t dex_pc = cu->dex2pcMappingTable[i+1];
+ if (cu->catches.find(dex_pc) == cu->catches.end()) {
+ LOG(INFO) << "Unexpected catch entry @ dex pc 0x" << std::hex << dex_pc;
+ success = false;
+ }
+ }
+ if (!success) {
+ LOG(INFO) << "Bad dex2pcMapping table in " << PrettyMethod(cu->method_idx, *cu->dex_file);
+ LOG(INFO) << "Entries @ decode: " << cu->catches.size() << ", Entries in table: "
+ << cu->dex2pcMappingTable.size()/2;
+ }
+ return success;
+}
+
+static void CreateMappingTables(CompilationUnit* cu)
+{
+ for (LIR* tgt_lir = cu->first_lir_insn; tgt_lir != NULL; tgt_lir = NEXT_LIR(tgt_lir)) {
+ if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoSafepointPC)) {
+ cu->pc2dexMappingTable.push_back(tgt_lir->offset);
+ cu->pc2dexMappingTable.push_back(tgt_lir->dalvik_offset);
+ }
+ if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoExportedPC)) {
+ cu->dex2pcMappingTable.push_back(tgt_lir->offset);
+ cu->dex2pcMappingTable.push_back(tgt_lir->dalvik_offset);
+ }
+ }
+ DCHECK(VerifyCatchEntries(cu));
+ cu->combined_mapping_table.push_back(cu->pc2dexMappingTable.size() +
+ cu->dex2pcMappingTable.size());
+ cu->combined_mapping_table.push_back(cu->pc2dexMappingTable.size());
+ cu->combined_mapping_table.insert(cu->combined_mapping_table.end(),
+ cu->pc2dexMappingTable.begin(),
+ cu->pc2dexMappingTable.end());
+ cu->combined_mapping_table.insert(cu->combined_mapping_table.end(),
+ cu->dex2pcMappingTable.begin(),
+ cu->dex2pcMappingTable.end());
+}
+
+class NativePcToReferenceMapBuilder {
+ public:
+ NativePcToReferenceMapBuilder(std::vector<uint8_t>* table,
+ size_t entries, uint32_t max_native_offset,
+ size_t references_width) : entries_(entries),
+ references_width_(references_width), in_use_(entries),
+ table_(table) {
+ // Compute width in bytes needed to hold max_native_offset.
+ native_offset_width_ = 0;
+ while (max_native_offset != 0) {
+ native_offset_width_++;
+ max_native_offset >>= 8;
+ }
+ // Resize table and set up header.
+ table->resize((EntryWidth() * entries) + sizeof(uint32_t));
+ CHECK_LT(native_offset_width_, 1U << 3);
+ (*table)[0] = native_offset_width_ & 7;
+ CHECK_LT(references_width_, 1U << 13);
+ (*table)[0] |= (references_width_ << 3) & 0xFF;
+ (*table)[1] = (references_width_ >> 5) & 0xFF;
+ CHECK_LT(entries, 1U << 16);
+ (*table)[2] = entries & 0xFF;
+ (*table)[3] = (entries >> 8) & 0xFF;
+ }
+
+ void AddEntry(uint32_t native_offset, const uint8_t* references) {
+ size_t table_index = TableIndex(native_offset);
+ while (in_use_[table_index]) {
+ table_index = (table_index + 1) % entries_;
+ }
+ in_use_[table_index] = true;
+ SetNativeOffset(table_index, native_offset);
+ DCHECK_EQ(native_offset, GetNativeOffset(table_index));
+ SetReferences(table_index, references);
+ }
+
+ private:
+ size_t TableIndex(uint32_t native_offset) {
+ return NativePcOffsetToReferenceMap::Hash(native_offset) % entries_;
+ }
+
+ uint32_t GetNativeOffset(size_t table_index) {
+ uint32_t native_offset = 0;
+ size_t table_offset = (table_index * EntryWidth()) + sizeof(uint32_t);
+ for (size_t i = 0; i < native_offset_width_; i++) {
+ native_offset |= (*table_)[table_offset + i] << (i * 8);
+ }
+ return native_offset;
+ }
+
+ void SetNativeOffset(size_t table_index, uint32_t native_offset) {
+ size_t table_offset = (table_index * EntryWidth()) + sizeof(uint32_t);
+ for (size_t i = 0; i < native_offset_width_; i++) {
+ (*table_)[table_offset + i] = (native_offset >> (i * 8)) & 0xFF;
+ }
+ }
+
+ void SetReferences(size_t table_index, const uint8_t* references) {
+ size_t table_offset = (table_index * EntryWidth()) + sizeof(uint32_t);
+ memcpy(&(*table_)[table_offset + native_offset_width_], references, references_width_);
+ }
+
+ size_t EntryWidth() const {
+ return native_offset_width_ + references_width_;
+ }
+
+ // Number of entries in the table.
+ const size_t entries_;
+ // Number of bytes used to encode the reference bitmap.
+ const size_t references_width_;
+ // Number of bytes used to encode a native offset.
+ size_t native_offset_width_;
+ // Entries that are in use.
+ std::vector<bool> in_use_;
+ // The table we're building.
+ std::vector<uint8_t>* const table_;
+};
+
+static void CreateNativeGcMap(CompilationUnit* cu) {
+ const std::vector<uint32_t>& mapping_table = cu->pc2dexMappingTable;
+ uint32_t max_native_offset = 0;
+ for (size_t i = 0; i < mapping_table.size(); i += 2) {
+ uint32_t native_offset = mapping_table[i + 0];
+ if (native_offset > max_native_offset) {
+ max_native_offset = native_offset;
+ }
+ }
+ Compiler::MethodReference method_ref(cu->dex_file, cu->method_idx);
+ const std::vector<uint8_t>* gc_map_raw = verifier::MethodVerifier::GetDexGcMap(method_ref);
+ verifier::DexPcToReferenceMap dex_gc_map(&(*gc_map_raw)[4], gc_map_raw->size() - 4);
+ // Compute native offset to references size.
+ NativePcToReferenceMapBuilder native_gc_map_builder(&cu->native_gc_map,
+ mapping_table.size() / 2, max_native_offset,
+ dex_gc_map.RegWidth());
+
+ for (size_t i = 0; i < mapping_table.size(); i += 2) {
+ uint32_t native_offset = mapping_table[i + 0];
+ uint32_t dex_pc = mapping_table[i + 1];
+ const uint8_t* references = dex_gc_map.FindBitMap(dex_pc, false);
+ CHECK(references != NULL) << "Missing ref for dex pc 0x" << std::hex << dex_pc;
+ native_gc_map_builder.AddEntry(native_offset, references);
+ }
+}
+
+/* Determine the offset of each literal field */
+static int AssignLiteralOffset(CompilationUnit* cu, int offset)
+{
+ offset = AssignLiteralOffsetCommon(cu->literal_list, offset);
+ offset = AssignLiteralOffsetCommon(cu->code_literal_list, offset);
+ offset = AssignLiteralOffsetCommon(cu->method_literal_list, offset);
+ return offset;
+}
+
+static int AssignSwitchTablesOffset(CompilationUnit* cu, int offset)
+{
+ GrowableListIterator iterator;
+ GrowableListIteratorInit(&cu->switch_tables, &iterator);
+ while (true) {
+ SwitchTable *tab_rec = reinterpret_cast<SwitchTable*>(GrowableListIteratorNext(&iterator));
+ if (tab_rec == NULL) break;
+ tab_rec->offset = offset;
+ if (tab_rec->table[0] == Instruction::kSparseSwitchSignature) {
+ offset += tab_rec->table[1] * (sizeof(int) * 2);
+ } else {
+ DCHECK_EQ(static_cast<int>(tab_rec->table[0]),
+ static_cast<int>(Instruction::kPackedSwitchSignature));
+ offset += tab_rec->table[1] * sizeof(int);
+ }
+ }
+ return offset;
+}
+
+static int AssignFillArrayDataOffset(CompilationUnit* cu, int offset)
+{
+ GrowableListIterator iterator;
+ GrowableListIteratorInit(&cu->fill_array_data, &iterator);
+ while (true) {
+ FillArrayData *tab_rec =
+ reinterpret_cast<FillArrayData*>(GrowableListIteratorNext(&iterator));
+ if (tab_rec == NULL) break;
+ tab_rec->offset = offset;
+ offset += tab_rec->size;
+ // word align
+ offset = (offset + 3) & ~3;
+ }
+ return offset;
+}
+
+// LIR offset assignment.
+static int AssignInsnOffsets(CompilationUnit* cu)
+{
+ LIR* lir;
+ int offset = 0;
+
+ for (lir = cu->first_lir_insn; lir != NULL; lir = NEXT_LIR(lir)) {
+ lir->offset = offset;
+ if (lir->opcode >= 0) {
+ if (!lir->flags.is_nop) {
+ offset += lir->flags.size;
+ }
+ } else if (lir->opcode == kPseudoPseudoAlign4) {
+ if (offset & 0x2) {
+ offset += 2;
+ lir->operands[0] = 1;
+ } else {
+ lir->operands[0] = 0;
+ }
+ }
+ /* Pseudo opcodes don't consume space */
+ }
+
+ return offset;
+}
+
+/*
+ * Walk the compilation unit and assign offsets to instructions
+ * and literals and compute the total size of the compiled unit.
+ */
+static void AssignOffsets(CompilationUnit* cu)
+{
+ int offset = AssignInsnOffsets(cu);
+
+ /* Const values have to be word aligned */
+ offset = (offset + 3) & ~3;
+
+ /* Set up offsets for literals */
+ cu->data_offset = offset;
+
+ offset = AssignLiteralOffset(cu, offset);
+
+ offset = AssignSwitchTablesOffset(cu, offset);
+
+ offset = AssignFillArrayDataOffset(cu, offset);
+
+ cu->total_size = offset;
+}
+
+/*
+ * Go over each instruction in the list and calculate the offset from the top
+ * before sending them off to the assembler. If out-of-range branch distance is
+ * seen rearrange the instructions a bit to correct it.
+ */
+void AssembleLIR(CompilationUnit* cu)
+{
+ Codegen* cg = cu->cg.get();
+ AssignOffsets(cu);
+ /*
+ * Assemble here. Note that we generate code with optimistic assumptions
+ * and if found now to work, we'll have to redo the sequence and retry.
+ */
+
+ while (true) {
+ AssemblerStatus res = cg->AssembleInstructions(cu, 0);
+ if (res == kSuccess) {
+ break;
+ } else {
+ cu->assembler_retries++;
+ if (cu->assembler_retries > MAX_ASSEMBLER_RETRIES) {
+ CodegenDump(cu);
+ LOG(FATAL) << "Assembler error - too many retries";
+ }
+ // Redo offsets and try again
+ AssignOffsets(cu);
+ cu->code_buffer.clear();
+ }
+ }
+
+ // Install literals
+ InstallLiteralPools(cu);
+
+ // Install switch tables
+ InstallSwitchTables(cu);
+
+ // Install fill array data
+ InstallFillArrayData(cu);
+
+ // Create the mapping table and native offset to reference map.
+ CreateMappingTables(cu);
+
+ CreateNativeGcMap(cu);
+}
+
+/*
+ * Insert a kPseudoCaseLabel at the beginning of the Dalvik
+ * offset vaddr. This label will be used to fix up the case
+ * branch table during the assembly phase. Be sure to set
+ * all resource flags on this to prevent code motion across
+ * target boundaries. KeyVal is just there for debugging.
+ */
+static LIR* InsertCaseLabel(CompilationUnit* cu, int vaddr, int keyVal)
+{
+ SafeMap<unsigned int, LIR*>::iterator it;
+ it = cu->boundary_map.find(vaddr);
+ if (it == cu->boundary_map.end()) {
+ LOG(FATAL) << "Error: didn't find vaddr 0x" << std::hex << vaddr;
+ }
+ LIR* new_label = static_cast<LIR*>(NewMem(cu, sizeof(LIR), true, kAllocLIR));
+ new_label->dalvik_offset = vaddr;
+ new_label->opcode = kPseudoCaseLabel;
+ new_label->operands[0] = keyVal;
+ InsertLIRAfter(it->second, new_label);
+ return new_label;
+}
+
+static void MarkPackedCaseLabels(CompilationUnit* cu, SwitchTable *tab_rec)
+{
+ const uint16_t* table = tab_rec->table;
+ int base_vaddr = tab_rec->vaddr;
+ const int *targets = reinterpret_cast<const int*>(&table[4]);
+ int entries = table[1];
+ int low_key = s4FromSwitchData(&table[2]);
+ for (int i = 0; i < entries; i++) {
+ tab_rec->targets[i] = InsertCaseLabel(cu, base_vaddr + targets[i], i + low_key);
+ }
+}
+
+static void MarkSparseCaseLabels(CompilationUnit* cu, SwitchTable *tab_rec)
+{
+ const uint16_t* table = tab_rec->table;
+ int base_vaddr = tab_rec->vaddr;
+ int entries = table[1];
+ const int* keys = reinterpret_cast<const int*>(&table[2]);
+ const int* targets = &keys[entries];
+ for (int i = 0; i < entries; i++) {
+ tab_rec->targets[i] = InsertCaseLabel(cu, base_vaddr + targets[i], keys[i]);
+ }
+}
+
+void ProcessSwitchTables(CompilationUnit* cu)
+{
+ GrowableListIterator iterator;
+ GrowableListIteratorInit(&cu->switch_tables, &iterator);
+ while (true) {
+ SwitchTable *tab_rec =
+ reinterpret_cast<SwitchTable*>(GrowableListIteratorNext(&iterator));
+ if (tab_rec == NULL) break;
+ if (tab_rec->table[0] == Instruction::kPackedSwitchSignature) {
+ MarkPackedCaseLabels(cu, tab_rec);
+ } else if (tab_rec->table[0] == Instruction::kSparseSwitchSignature) {
+ MarkSparseCaseLabels(cu, tab_rec);
+ } else {
+ LOG(FATAL) << "Invalid switch table";
+ }
+ }
+}
+
+void DumpSparseSwitchTable(const uint16_t* table)
+ /*
+ * Sparse switch data format:
+ * ushort ident = 0x0200 magic value
+ * ushort size number of entries in the table; > 0
+ * int keys[size] keys, sorted low-to-high; 32-bit aligned
+ * int targets[size] branch targets, relative to switch opcode
+ *
+ * Total size is (2+size*4) 16-bit code units.
+ */
+{
+ uint16_t ident = table[0];
+ int entries = table[1];
+ const int* keys = reinterpret_cast<const int*>(&table[2]);
+ const int* targets = &keys[entries];
+ LOG(INFO) << "Sparse switch table - ident:0x" << std::hex << ident
+ << ", entries: " << std::dec << entries;
+ for (int i = 0; i < entries; i++) {
+ LOG(INFO) << " Key[" << keys[i] << "] -> 0x" << std::hex << targets[i];
+ }
+}
+
+void DumpPackedSwitchTable(const uint16_t* table)
+ /*
+ * Packed switch data format:
+ * ushort ident = 0x0100 magic value
+ * ushort size number of entries in the table
+ * int first_key first (and lowest) switch case value
+ * int targets[size] branch targets, relative to switch opcode
+ *
+ * Total size is (4+size*2) 16-bit code units.
+ */
+{
+ uint16_t ident = table[0];
+ const int* targets = reinterpret_cast<const int*>(&table[4]);
+ int entries = table[1];
+ int low_key = s4FromSwitchData(&table[2]);
+ LOG(INFO) << "Packed switch table - ident:0x" << std::hex << ident
+ << ", entries: " << std::dec << entries << ", low_key: " << low_key;
+ for (int i = 0; i < entries; i++) {
+ LOG(INFO) << " Key[" << (i + low_key) << "] -> 0x" << std::hex
+ << targets[i];
+ }
+}
+
+/*
+ * Set up special LIR to mark a Dalvik byte-code instruction start and
+ * record it in the boundary_map. NOTE: in cases such as kMirOpCheck in
+ * which we split a single Dalvik instruction, only the first MIR op
+ * associated with a Dalvik PC should be entered into the map.
+ */
+LIR* MarkBoundary(CompilationUnit* cu, int offset, const char* inst_str)
+{
+ LIR* res = NewLIR1(cu, kPseudoDalvikByteCodeBoundary, reinterpret_cast<uintptr_t>(inst_str));
+ if (cu->boundary_map.find(offset) == cu->boundary_map.end()) {
+ cu->boundary_map.Put(offset, res);
+ }
+ return res;
+}
+
+bool EvaluateBranch(Instruction::Code opcode, int32_t src1, int32_t src2)
+{
+ bool is_taken;
+ switch (opcode) {
+ case Instruction::IF_EQ: is_taken = (src1 == src2); break;
+ case Instruction::IF_NE: is_taken = (src1 != src2); break;
+ case Instruction::IF_LT: is_taken = (src1 < src2); break;
+ case Instruction::IF_GE: is_taken = (src1 >= src2); break;
+ case Instruction::IF_GT: is_taken = (src1 > src2); break;
+ case Instruction::IF_LE: is_taken = (src1 <= src2); break;
+ case Instruction::IF_EQZ: is_taken = (src1 == 0); break;
+ case Instruction::IF_NEZ: is_taken = (src1 != 0); break;
+ case Instruction::IF_LTZ: is_taken = (src1 < 0); break;
+ case Instruction::IF_GEZ: is_taken = (src1 >= 0); break;
+ case Instruction::IF_GTZ: is_taken = (src1 > 0); break;
+ case Instruction::IF_LEZ: is_taken = (src1 <= 0); break;
+ default:
+ LOG(FATAL) << "Unexpected opcode " << opcode;
+ is_taken = false;
+ }
+ return is_taken;
+}
+
+// Convert relation of src1/src2 to src2/src1
+ConditionCode FlipComparisonOrder(ConditionCode before) {
+ ConditionCode res;
+ switch (before) {
+ case kCondEq: res = kCondEq; break;
+ case kCondNe: res = kCondNe; break;
+ case kCondLt: res = kCondGt; break;
+ case kCondGt: res = kCondLt; break;
+ case kCondLe: res = kCondGe; break;
+ case kCondGe: res = kCondLe; break;
+ default:
+ res = static_cast<ConditionCode>(0);
+ LOG(FATAL) << "Unexpected ccode " << before;
+ }
+ return res;
+}
+
+} // namespace art
diff --git a/src/compiler/dex/quick/codegen_util.h b/src/compiler/dex/quick/codegen_util.h
new file mode 100644
index 0000000..7fd26f3
--- /dev/null
+++ b/src/compiler/dex/quick/codegen_util.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_COMPILER_DEX_QUICK_CODEGENUTIL_H_
+#define ART_SRC_COMPILER_DEX_QUICK_CODEGENUTIL_H_
+
+#include <stdint.h>
+
+#include "compiler/dex/compiler_enums.h"
+#include "compiler/dex/compiler_ir.h"
+
+namespace art {
+
+class CompilationUnit;
+struct LIR;
+
+void MarkSafepointPC(CompilationUnit* cu, LIR* inst);
+bool FastInstance(CompilationUnit* cu, uint32_t field_idx,
+ int& field_offset, bool& is_volatile, bool is_put);
+void SetupResourceMasks(CompilationUnit* cu, LIR* lir);
+inline int32_t s4FromSwitchData(const void* switch_data) { return *reinterpret_cast<const int32_t*>(switch_data); }
+inline RegisterClass oat_reg_class_by_size(OpSize size) { return (size == kUnsignedHalf || size == kSignedHalf || size == kUnsignedByte || size == kSignedByte ) ? kCoreReg : kAnyReg; }
+void AssembleLIR(CompilationUnit* cu);
+void SetMemRefType(CompilationUnit* cu, LIR* lir, bool is_load, int mem_type);
+void AnnotateDalvikRegAccess(CompilationUnit* cu, LIR* lir, int reg_id, bool is_load, bool is64bit);
+uint64_t GetRegMaskCommon(CompilationUnit* cu, int reg);
+void SetupRegMask(CompilationUnit* cu, uint64_t* mask, int reg);
+void SetupResourceMasks(CompilationUnit* cu, LIR* lir);
+void DumpLIRInsn(CompilationUnit* cu, LIR* arg, unsigned char* base_addr);
+void DumpPromotionMap(CompilationUnit *cu);
+void CodegenDump(CompilationUnit* cu);
+LIR* RawLIR(CompilationUnit* cu, int dalvik_offset, int opcode, int op0 = 0, int op1 = 0,
+ int op2 = 0, int op3 = 0, int op4 = 0, LIR* target = NULL);
+LIR* NewLIR0(CompilationUnit* cu, int opcode);
+LIR* NewLIR1(CompilationUnit* cu, int opcode, int dest);
+LIR* NewLIR2(CompilationUnit* cu, int opcode, int dest, int src1);
+LIR* NewLIR3(CompilationUnit* cu, int opcode, int dest, int src1, int src2);
+LIR* NewLIR4(CompilationUnit* cu, int opcode, int dest, int src1, int src2, int info);
+LIR* NewLIR5(CompilationUnit* cu, int opcode, int dest, int src1, int src2, int info1, int info2);
+LIR* ScanLiteralPool(LIR* data_target, int value, unsigned int delta);
+LIR* ScanLiteralPoolWide(LIR* data_target, int val_lo, int val_hi);
+LIR* AddWordData(CompilationUnit* cu, LIR* *constant_list_p, int value);
+LIR* AddWideData(CompilationUnit* cu, LIR* *constant_list_p, int val_lo, int val_hi);
+void ProcessSwitchTables(CompilationUnit* cu);
+void DumpSparseSwitchTable(const uint16_t* table);
+void DumpPackedSwitchTable(const uint16_t* table);
+LIR* MarkBoundary(CompilationUnit* cu, int offset, const char* inst_str);
+void NopLIR(LIR* lir);
+bool EvaluateBranch(Instruction::Code opcode, int src1, int src2);
+bool IsInexpensiveConstant(CompilationUnit* cu, RegLocation rl_src);
+ConditionCode FlipComparisonOrder(ConditionCode before);
+
+} // namespace art
+
+#endif // ART_SRC_COMPILER_DEX_QUICK_CODEGENUTIL_H_
diff --git a/src/compiler/dex/quick/gen_common.cc b/src/compiler/dex/quick/gen_common.cc
new file mode 100644
index 0000000..bc022fc
--- /dev/null
+++ b/src/compiler/dex/quick/gen_common.cc
@@ -0,0 +1,1734 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "codegen_util.h"
+#include "compiler/dex/compiler_ir.h"
+#include "oat/runtime/oat_support_entrypoints.h"
+#include "ralloc_util.h"
+
+namespace art {
+
+/*
+ * This source files contains "gen" codegen routines that should
+ * be applicable to most targets. Only mid-level support utilities
+ * and "op" calls may be used here.
+ */
+
+/*
+ * Generate an kPseudoBarrier marker to indicate the boundary of special
+ * blocks.
+ */
+void Codegen::GenBarrier(CompilationUnit* cu)
+{
+ LIR* barrier = NewLIR0(cu, kPseudoBarrier);
+ /* Mark all resources as being clobbered */
+ barrier->def_mask = -1;
+}
+
+// FIXME: need to do some work to split out targets with
+// condition codes and those without
+LIR* Codegen::GenCheck(CompilationUnit* cu, ConditionCode c_code, ThrowKind kind)
+{
+ DCHECK_NE(cu->instruction_set, kMips);
+ LIR* tgt = RawLIR(cu, 0, kPseudoThrowTarget, kind,
+ cu->current_dalvik_offset);
+ LIR* branch = OpCondBranch(cu, c_code, tgt);
+ // Remember branch target - will process later
+ InsertGrowableList(cu, &cu->throw_launchpads, reinterpret_cast<uintptr_t>(tgt));
+ return branch;
+}
+
+LIR* Codegen::GenImmedCheck(CompilationUnit* cu, ConditionCode c_code, int reg, int imm_val,
+ ThrowKind kind)
+{
+ LIR* tgt = RawLIR(cu, 0, kPseudoThrowTarget, kind,
+ cu->current_dalvik_offset, reg, imm_val);
+ LIR* branch;
+ if (c_code == kCondAl) {
+ branch = OpUnconditionalBranch(cu, tgt);
+ } else {
+ branch = OpCmpImmBranch(cu, c_code, reg, imm_val, tgt);
+ }
+ // Remember branch target - will process later
+ InsertGrowableList(cu, &cu->throw_launchpads, reinterpret_cast<uintptr_t>(tgt));
+ return branch;
+}
+
+/* Perform null-check on a register. */
+LIR* Codegen::GenNullCheck(CompilationUnit* cu, int s_reg, int m_reg, int opt_flags)
+{
+ if (!(cu->disable_opt & (1 << kNullCheckElimination)) &&
+ opt_flags & MIR_IGNORE_NULL_CHECK) {
+ return NULL;
+ }
+ return GenImmedCheck(cu, kCondEq, m_reg, 0, kThrowNullPointer);
+}
+
+/* Perform check on two registers */
+LIR* Codegen::GenRegRegCheck(CompilationUnit* cu, ConditionCode c_code, int reg1, int reg2,
+ ThrowKind kind)
+{
+ LIR* tgt = RawLIR(cu, 0, kPseudoThrowTarget, kind,
+ cu->current_dalvik_offset, reg1, reg2);
+ LIR* branch = OpCmpBranch(cu, c_code, reg1, reg2, tgt);
+ // Remember branch target - will process later
+ InsertGrowableList(cu, &cu->throw_launchpads, reinterpret_cast<uintptr_t>(tgt));
+ return branch;
+}
+
+void Codegen::GenCompareAndBranch(CompilationUnit* cu, Instruction::Code opcode,
+ RegLocation rl_src1, RegLocation rl_src2, LIR* taken,
+ LIR* fall_through)
+{
+ ConditionCode cond;
+ switch (opcode) {
+ case Instruction::IF_EQ:
+ cond = kCondEq;
+ break;
+ case Instruction::IF_NE:
+ cond = kCondNe;
+ break;
+ case Instruction::IF_LT:
+ cond = kCondLt;
+ break;
+ case Instruction::IF_GE:
+ cond = kCondGe;
+ break;
+ case Instruction::IF_GT:
+ cond = kCondGt;
+ break;
+ case Instruction::IF_LE:
+ cond = kCondLe;
+ break;
+ default:
+ cond = static_cast<ConditionCode>(0);
+ LOG(FATAL) << "Unexpected opcode " << opcode;
+ }
+
+ // Normalize such that if either operand is constant, src2 will be constant
+ if (rl_src1.is_const) {
+ RegLocation rl_temp = rl_src1;
+ rl_src1 = rl_src2;
+ rl_src2 = rl_temp;
+ cond = FlipComparisonOrder(cond);
+ }
+
+ rl_src1 = LoadValue(cu, rl_src1, kCoreReg);
+ // Is this really an immediate comparison?
+ if (rl_src2.is_const) {
+ // If it's already live in a register or not easily materialized, just keep going
+ RegLocation rl_temp = UpdateLoc(cu, rl_src2);
+ if ((rl_temp.location == kLocDalvikFrame) &&
+ InexpensiveConstantInt(ConstantValue(cu, rl_src2))) {
+ // OK - convert this to a compare immediate and branch
+ OpCmpImmBranch(cu, cond, rl_src1.low_reg, ConstantValue(cu, rl_src2), taken);
+ OpUnconditionalBranch(cu, fall_through);
+ return;
+ }
+ }
+ rl_src2 = LoadValue(cu, rl_src2, kCoreReg);
+ OpCmpBranch(cu, cond, rl_src1.low_reg, rl_src2.low_reg, taken);
+ OpUnconditionalBranch(cu, fall_through);
+}
+
+void Codegen::GenCompareZeroAndBranch(CompilationUnit* cu, Instruction::Code opcode,
+ RegLocation rl_src, LIR* taken, LIR* fall_through)
+{
+ ConditionCode cond;
+ rl_src = LoadValue(cu, rl_src, kCoreReg);
+ switch (opcode) {
+ case Instruction::IF_EQZ:
+ cond = kCondEq;
+ break;
+ case Instruction::IF_NEZ:
+ cond = kCondNe;
+ break;
+ case Instruction::IF_LTZ:
+ cond = kCondLt;
+ break;
+ case Instruction::IF_GEZ:
+ cond = kCondGe;
+ break;
+ case Instruction::IF_GTZ:
+ cond = kCondGt;
+ break;
+ case Instruction::IF_LEZ:
+ cond = kCondLe;
+ break;
+ default:
+ cond = static_cast<ConditionCode>(0);
+ LOG(FATAL) << "Unexpected opcode " << opcode;
+ }
+ OpCmpImmBranch(cu, cond, rl_src.low_reg, 0, taken);
+ OpUnconditionalBranch(cu, fall_through);
+}
+
+void Codegen::GenIntToLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src)
+{
+ RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ if (rl_src.location == kLocPhysReg) {
+ OpRegCopy(cu, rl_result.low_reg, rl_src.low_reg);
+ } else {
+ LoadValueDirect(cu, rl_src, rl_result.low_reg);
+ }
+ OpRegRegImm(cu, kOpAsr, rl_result.high_reg, rl_result.low_reg, 31);
+ StoreValueWide(cu, rl_dest, rl_result);
+}
+
+void Codegen::GenIntNarrowing(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src)
+{
+ rl_src = LoadValue(cu, rl_src, kCoreReg);
+ RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ OpKind op = kOpInvalid;
+ switch (opcode) {
+ case Instruction::INT_TO_BYTE:
+ op = kOp2Byte;
+ break;
+ case Instruction::INT_TO_SHORT:
+ op = kOp2Short;
+ break;
+ case Instruction::INT_TO_CHAR:
+ op = kOp2Char;
+ break;
+ default:
+ LOG(ERROR) << "Bad int conversion type";
+ }
+ OpRegReg(cu, op, rl_result.low_reg, rl_src.low_reg);
+ StoreValue(cu, rl_dest, rl_result);
+}
+
+/*
+ * Let helper function take care of everything. Will call
+ * Array::AllocFromCode(type_idx, method, count);
+ * Note: AllocFromCode will handle checks for errNegativeArraySize.
+ */
+void Codegen::GenNewArray(CompilationUnit* cu, uint32_t type_idx, RegLocation rl_dest,
+ RegLocation rl_src)
+{
+ FlushAllRegs(cu); /* Everything to home location */
+ int func_offset;
+ if (cu->compiler->CanAccessTypeWithoutChecks(cu->method_idx,
+ *cu->dex_file,
+ type_idx)) {
+ func_offset = ENTRYPOINT_OFFSET(pAllocArrayFromCode);
+ } else {
+ func_offset= ENTRYPOINT_OFFSET(pAllocArrayFromCodeWithAccessCheck);
+ }
+ CallRuntimeHelperImmMethodRegLocation(cu, func_offset, type_idx, rl_src, true);
+ RegLocation rl_result = GetReturn(cu, false);
+ StoreValue(cu, rl_dest, rl_result);
+}
+
+/*
+ * Similar to GenNewArray, but with post-allocation initialization.
+ * Verifier guarantees we're dealing with an array class. Current
+ * code throws runtime exception "bad Filled array req" for 'D' and 'J'.
+ * Current code also throws internal unimp if not 'L', '[' or 'I'.
+ */
+void Codegen::GenFilledNewArray(CompilationUnit* cu, CallInfo* info)
+{
+ int elems = info->num_arg_words;
+ int type_idx = info->index;
+ FlushAllRegs(cu); /* Everything to home location */
+ int func_offset;
+ if (cu->compiler->CanAccessTypeWithoutChecks(cu->method_idx,
+ *cu->dex_file,
+ type_idx)) {
+ func_offset = ENTRYPOINT_OFFSET(pCheckAndAllocArrayFromCode);
+ } else {
+ func_offset = ENTRYPOINT_OFFSET(pCheckAndAllocArrayFromCodeWithAccessCheck);
+ }
+ CallRuntimeHelperImmMethodImm(cu, func_offset, type_idx, elems, true);
+ FreeTemp(cu, TargetReg(kArg2));
+ FreeTemp(cu, TargetReg(kArg1));
+ /*
+ * NOTE: the implicit target for Instruction::FILLED_NEW_ARRAY is the
+ * return region. Because AllocFromCode placed the new array
+ * in kRet0, we'll just lock it into place. When debugger support is
+ * added, it may be necessary to additionally copy all return
+ * values to a home location in thread-local storage
+ */
+ LockTemp(cu, TargetReg(kRet0));
+
+ // TODO: use the correct component size, currently all supported types
+ // share array alignment with ints (see comment at head of function)
+ size_t component_size = sizeof(int32_t);
+
+ // Having a range of 0 is legal
+ if (info->is_range && (elems > 0)) {
+ /*
+ * Bit of ugliness here. We're going generate a mem copy loop
+ * on the register range, but it is possible that some regs
+ * in the range have been promoted. This is unlikely, but
+ * before generating the copy, we'll just force a flush
+ * of any regs in the source range that have been promoted to
+ * home location.
+ */
+ for (int i = 0; i < elems; i++) {
+ RegLocation loc = UpdateLoc(cu, info->args[i]);
+ if (loc.location == kLocPhysReg) {
+ StoreBaseDisp(cu, TargetReg(kSp), SRegOffset(cu, loc.s_reg_low),
+ loc.low_reg, kWord);
+ }
+ }
+ /*
+ * TUNING note: generated code here could be much improved, but
+ * this is an uncommon operation and isn't especially performance
+ * critical.
+ */
+ int r_src = AllocTemp(cu);
+ int r_dst = AllocTemp(cu);
+ int r_idx = AllocTemp(cu);
+ int r_val = INVALID_REG;
+ switch(cu->instruction_set) {
+ case kThumb2:
+ r_val = TargetReg(kLr);
+ break;
+ case kX86:
+ FreeTemp(cu, TargetReg(kRet0));
+ r_val = AllocTemp(cu);
+ break;
+ case kMips:
+ r_val = AllocTemp(cu);
+ break;
+ default: LOG(FATAL) << "Unexpected instruction set: " << cu->instruction_set;
+ }
+ // Set up source pointer
+ RegLocation rl_first = info->args[0];
+ OpRegRegImm(cu, kOpAdd, r_src, TargetReg(kSp),
+ SRegOffset(cu, rl_first.s_reg_low));
+ // Set up the target pointer
+ OpRegRegImm(cu, kOpAdd, r_dst, TargetReg(kRet0),
+ mirror::Array::DataOffset(component_size).Int32Value());
+ // Set up the loop counter (known to be > 0)
+ LoadConstant(cu, r_idx, elems - 1);
+ // Generate the copy loop. Going backwards for convenience
+ LIR* target = NewLIR0(cu, kPseudoTargetLabel);
+ // Copy next element
+ LoadBaseIndexed(cu, r_src, r_idx, r_val, 2, kWord);
+ StoreBaseIndexed(cu, r_dst, r_idx, r_val, 2, kWord);
+ FreeTemp(cu, r_val);
+ OpDecAndBranch(cu, kCondGe, r_idx, target);
+ if (cu->instruction_set == kX86) {
+ // Restore the target pointer
+ OpRegRegImm(cu, kOpAdd, TargetReg(kRet0), r_dst,
+ -mirror::Array::DataOffset(component_size).Int32Value());
+ }
+ } else if (!info->is_range) {
+ // TUNING: interleave
+ for (int i = 0; i < elems; i++) {
+ RegLocation rl_arg = LoadValue(cu, info->args[i], kCoreReg);
+ StoreBaseDisp(cu, TargetReg(kRet0),
+ mirror::Array::DataOffset(component_size).Int32Value() +
+ i * 4, rl_arg.low_reg, kWord);
+ // If the LoadValue caused a temp to be allocated, free it
+ if (IsTemp(cu, rl_arg.low_reg)) {
+ FreeTemp(cu, rl_arg.low_reg);
+ }
+ }
+ }
+ if (info->result.location != kLocInvalid) {
+ StoreValue(cu, info->result, GetReturn(cu, false /* not fp */));
+ }
+}
+
+void Codegen::GenSput(CompilationUnit* cu, uint32_t field_idx, RegLocation rl_src,
+ bool is_long_or_double, bool is_object)
+{
+ int field_offset;
+ int ssb_index;
+ bool is_volatile;
+ bool is_referrers_class;
+
+ OatCompilationUnit m_unit(cu->class_loader, cu->class_linker, *cu->dex_file, cu->code_item,
+ cu->class_def_idx, cu->method_idx, cu->access_flags);
+
+ bool fast_path =
+ cu->compiler->ComputeStaticFieldInfo(field_idx, &m_unit,
+ field_offset, ssb_index,
+ is_referrers_class, is_volatile,
+ true);
+ if (fast_path && !SLOW_FIELD_PATH) {
+ DCHECK_GE(field_offset, 0);
+ int rBase;
+ if (is_referrers_class) {
+ // Fast path, static storage base is this method's class
+ RegLocation rl_method = LoadCurrMethod(cu);
+ rBase = AllocTemp(cu);
+ LoadWordDisp(cu, rl_method.low_reg,
+ mirror::AbstractMethod::DeclaringClassOffset().Int32Value(), rBase);
+ if (IsTemp(cu, rl_method.low_reg)) {
+ FreeTemp(cu, rl_method.low_reg);
+ }
+ } else {
+ // Medium path, static storage base in a different class which
+ // requires checks that the other class is initialized.
+ DCHECK_GE(ssb_index, 0);
+ // May do runtime call so everything to home locations.
+ FlushAllRegs(cu);
+ // Using fixed register to sync with possible call to runtime
+ // support.
+ int r_method = TargetReg(kArg1);
+ LockTemp(cu, r_method);
+ LoadCurrMethodDirect(cu, r_method);
+ rBase = TargetReg(kArg0);
+ LockTemp(cu, rBase);
+ LoadWordDisp(cu, r_method,
+ mirror::AbstractMethod::DexCacheInitializedStaticStorageOffset().Int32Value(),
+ rBase);
+ LoadWordDisp(cu, rBase,
+ mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() +
+ sizeof(int32_t*) * ssb_index, rBase);
+ // rBase now points at appropriate static storage base (Class*)
+ // or NULL if not initialized. Check for NULL and call helper if NULL.
+ // TUNING: fast path should fall through
+ LIR* branch_over = OpCmpImmBranch(cu, kCondNe, rBase, 0, NULL);
+ LoadConstant(cu, TargetReg(kArg0), ssb_index);
+ CallRuntimeHelperImm(cu, ENTRYPOINT_OFFSET(pInitializeStaticStorage), ssb_index, true);
+ if (cu->instruction_set == kMips) {
+ // For Arm, kRet0 = kArg0 = rBase, for Mips, we need to copy
+ OpRegCopy(cu, rBase, TargetReg(kRet0));
+ }
+ LIR* skip_target = NewLIR0(cu, kPseudoTargetLabel);
+ branch_over->target = skip_target;
+ FreeTemp(cu, r_method);
+ }
+ // rBase now holds static storage base
+ if (is_long_or_double) {
+ rl_src = LoadValueWide(cu, rl_src, kAnyReg);
+ } else {
+ rl_src = LoadValue(cu, rl_src, kAnyReg);
+ }
+ if (is_volatile) {
+ GenMemBarrier(cu, kStoreStore);
+ }
+ if (is_long_or_double) {
+ StoreBaseDispWide(cu, rBase, field_offset, rl_src.low_reg,
+ rl_src.high_reg);
+ } else {
+ StoreWordDisp(cu, rBase, field_offset, rl_src.low_reg);
+ }
+ if (is_volatile) {
+ GenMemBarrier(cu, kStoreLoad);
+ }
+ if (is_object && !IsConstantNullRef(cu, rl_src)) {
+ MarkGCCard(cu, rl_src.low_reg, rBase);
+ }
+ FreeTemp(cu, rBase);
+ } else {
+ FlushAllRegs(cu); // Everything to home locations
+ int setter_offset = is_long_or_double ? ENTRYPOINT_OFFSET(pSet64Static) :
+ (is_object ? ENTRYPOINT_OFFSET(pSetObjStatic)
+ : ENTRYPOINT_OFFSET(pSet32Static));
+ CallRuntimeHelperImmRegLocation(cu, setter_offset, field_idx, rl_src, true);
+ }
+}
+
+void Codegen::GenSget(CompilationUnit* cu, uint32_t field_idx, RegLocation rl_dest,
+ bool is_long_or_double, bool is_object)
+{
+ int field_offset;
+ int ssb_index;
+ bool is_volatile;
+ bool is_referrers_class;
+
+ OatCompilationUnit m_unit(cu->class_loader, cu->class_linker,
+ *cu->dex_file, cu->code_item,
+ cu->class_def_idx, cu->method_idx,
+ cu->access_flags);
+
+ bool fast_path =
+ cu->compiler->ComputeStaticFieldInfo(field_idx, &m_unit,
+ field_offset, ssb_index,
+ is_referrers_class, is_volatile,
+ false);
+ if (fast_path && !SLOW_FIELD_PATH) {
+ DCHECK_GE(field_offset, 0);
+ int rBase;
+ if (is_referrers_class) {
+ // Fast path, static storage base is this method's class
+ RegLocation rl_method = LoadCurrMethod(cu);
+ rBase = AllocTemp(cu);
+ LoadWordDisp(cu, rl_method.low_reg,
+ mirror::AbstractMethod::DeclaringClassOffset().Int32Value(), rBase);
+ } else {
+ // Medium path, static storage base in a different class which
+ // requires checks that the other class is initialized
+ DCHECK_GE(ssb_index, 0);
+ // May do runtime call so everything to home locations.
+ FlushAllRegs(cu);
+ // Using fixed register to sync with possible call to runtime
+ // support
+ int r_method = TargetReg(kArg1);
+ LockTemp(cu, r_method);
+ LoadCurrMethodDirect(cu, r_method);
+ rBase = TargetReg(kArg0);
+ LockTemp(cu, rBase);
+ LoadWordDisp(cu, r_method,
+ mirror::AbstractMethod::DexCacheInitializedStaticStorageOffset().Int32Value(),
+ rBase);
+ LoadWordDisp(cu, rBase,
+ mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() +
+ sizeof(int32_t*) * ssb_index, rBase);
+ // rBase now points at appropriate static storage base (Class*)
+ // or NULL if not initialized. Check for NULL and call helper if NULL.
+ // TUNING: fast path should fall through
+ LIR* branch_over = OpCmpImmBranch(cu, kCondNe, rBase, 0, NULL);
+ CallRuntimeHelperImm(cu, ENTRYPOINT_OFFSET(pInitializeStaticStorage), ssb_index, true);
+ if (cu->instruction_set == kMips) {
+ // For Arm, kRet0 = kArg0 = rBase, for Mips, we need to copy
+ OpRegCopy(cu, rBase, TargetReg(kRet0));
+ }
+ LIR* skip_target = NewLIR0(cu, kPseudoTargetLabel);
+ branch_over->target = skip_target;
+ FreeTemp(cu, r_method);
+ }
+ // rBase now holds static storage base
+ RegLocation rl_result = EvalLoc(cu, rl_dest, kAnyReg, true);
+ if (is_volatile) {
+ GenMemBarrier(cu, kLoadLoad);
+ }
+ if (is_long_or_double) {
+ LoadBaseDispWide(cu, rBase, field_offset, rl_result.low_reg,
+ rl_result.high_reg, INVALID_SREG);
+ } else {
+ LoadWordDisp(cu, rBase, field_offset, rl_result.low_reg);
+ }
+ FreeTemp(cu, rBase);
+ if (is_long_or_double) {
+ StoreValueWide(cu, rl_dest, rl_result);
+ } else {
+ StoreValue(cu, rl_dest, rl_result);
+ }
+ } else {
+ FlushAllRegs(cu); // Everything to home locations
+ int getterOffset = is_long_or_double ? ENTRYPOINT_OFFSET(pGet64Static) :
+ (is_object ? ENTRYPOINT_OFFSET(pGetObjStatic)
+ : ENTRYPOINT_OFFSET(pGet32Static));
+ CallRuntimeHelperImm(cu, getterOffset, field_idx, true);
+ if (is_long_or_double) {
+ RegLocation rl_result = GetReturnWide(cu, rl_dest.fp);
+ StoreValueWide(cu, rl_dest, rl_result);
+ } else {
+ RegLocation rl_result = GetReturn(cu, rl_dest.fp);
+ StoreValue(cu, rl_dest, rl_result);
+ }
+ }
+}
+
+
+// Debugging routine - if null target, branch to DebugMe
+void Codegen::GenShowTarget(CompilationUnit* cu)
+{
+ DCHECK_NE(cu->instruction_set, kX86) << "unimplemented GenShowTarget";
+ LIR* branch_over = OpCmpImmBranch(cu, kCondNe, TargetReg(kInvokeTgt), 0, NULL);
+ LoadWordDisp(cu, TargetReg(kSelf), ENTRYPOINT_OFFSET(pDebugMe), TargetReg(kInvokeTgt));
+ LIR* target = NewLIR0(cu, kPseudoTargetLabel);
+ branch_over->target = target;
+}
+
+void Codegen::HandleSuspendLaunchPads(CompilationUnit *cu)
+{
+ LIR** suspend_label = reinterpret_cast<LIR**>(cu->suspend_launchpads.elem_list);
+ int num_elems = cu->suspend_launchpads.num_used;
+ int helper_offset = ENTRYPOINT_OFFSET(pTestSuspendFromCode);
+ for (int i = 0; i < num_elems; i++) {
+ ResetRegPool(cu);
+ ResetDefTracking(cu);
+ LIR* lab = suspend_label[i];
+ LIR* resume_lab = reinterpret_cast<LIR*>(lab->operands[0]);
+ cu->current_dalvik_offset = lab->operands[1];
+ AppendLIR(cu, lab);
+ int r_tgt = CallHelperSetup(cu, helper_offset);
+ CallHelper(cu, r_tgt, helper_offset, true /* MarkSafepointPC */);
+ OpUnconditionalBranch(cu, resume_lab);
+ }
+}
+
+void Codegen::HandleIntrinsicLaunchPads(CompilationUnit *cu)
+{
+ LIR** intrinsic_label = reinterpret_cast<LIR**>(cu->intrinsic_launchpads.elem_list);
+ int num_elems = cu->intrinsic_launchpads.num_used;
+ for (int i = 0; i < num_elems; i++) {
+ ResetRegPool(cu);
+ ResetDefTracking(cu);
+ LIR* lab = intrinsic_label[i];
+ CallInfo* info = reinterpret_cast<CallInfo*>(lab->operands[0]);
+ cu->current_dalvik_offset = info->offset;
+ AppendLIR(cu, lab);
+ // NOTE: GenInvoke handles MarkSafepointPC
+ GenInvoke(cu, info);
+ LIR* resume_lab = reinterpret_cast<LIR*>(lab->operands[2]);
+ if (resume_lab != NULL) {
+ OpUnconditionalBranch(cu, resume_lab);
+ }
+ }
+}
+
+void Codegen::HandleThrowLaunchPads(CompilationUnit *cu)
+{
+ LIR** throw_label = reinterpret_cast<LIR**>(cu->throw_launchpads.elem_list);
+ int num_elems = cu->throw_launchpads.num_used;
+ for (int i = 0; i < num_elems; i++) {
+ ResetRegPool(cu);
+ ResetDefTracking(cu);
+ LIR* lab = throw_label[i];
+ cu->current_dalvik_offset = lab->operands[1];
+ AppendLIR(cu, lab);
+ int func_offset = 0;
+ int v1 = lab->operands[2];
+ int v2 = lab->operands[3];
+ bool target_x86 = (cu->instruction_set == kX86);
+ switch (lab->operands[0]) {
+ case kThrowNullPointer:
+ func_offset = ENTRYPOINT_OFFSET(pThrowNullPointerFromCode);
+ break;
+ case kThrowConstantArrayBounds: // v1 is length reg (for Arm/Mips), v2 constant index
+ // v1 holds the constant array index. Mips/Arm uses v2 for length, x86 reloads.
+ if (target_x86) {
+ OpRegMem(cu, kOpMov, TargetReg(kArg1), v1, mirror::Array::LengthOffset().Int32Value());
+ } else {
+ OpRegCopy(cu, TargetReg(kArg1), v1);
+ }
+ // Make sure the following LoadConstant doesn't mess with kArg1.
+ LockTemp(cu, TargetReg(kArg1));
+ LoadConstant(cu, TargetReg(kArg0), v2);
+ func_offset = ENTRYPOINT_OFFSET(pThrowArrayBoundsFromCode);
+ break;
+ case kThrowArrayBounds:
+ // Move v1 (array index) to kArg0 and v2 (array length) to kArg1
+ if (v2 != TargetReg(kArg0)) {
+ OpRegCopy(cu, TargetReg(kArg0), v1);
+ if (target_x86) {
+ // x86 leaves the array pointer in v2, so load the array length that the handler expects
+ OpRegMem(cu, kOpMov, TargetReg(kArg1), v2, mirror::Array::LengthOffset().Int32Value());
+ } else {
+ OpRegCopy(cu, TargetReg(kArg1), v2);
+ }
+ } else {
+ if (v1 == TargetReg(kArg1)) {
+ // Swap v1 and v2, using kArg2 as a temp
+ OpRegCopy(cu, TargetReg(kArg2), v1);
+ if (target_x86) {
+ // x86 leaves the array pointer in v2; load the array length that the handler expects
+ OpRegMem(cu, kOpMov, TargetReg(kArg1), v2, mirror::Array::LengthOffset().Int32Value());
+ } else {
+ OpRegCopy(cu, TargetReg(kArg1), v2);
+ }
+ OpRegCopy(cu, TargetReg(kArg0), TargetReg(kArg2));
+ } else {
+ if (target_x86) {
+ // x86 leaves the array pointer in v2; load the array length that the handler expects
+ OpRegMem(cu, kOpMov, TargetReg(kArg1), v2, mirror::Array::LengthOffset().Int32Value());
+ } else {
+ OpRegCopy(cu, TargetReg(kArg1), v2);
+ }
+ OpRegCopy(cu, TargetReg(kArg0), v1);
+ }
+ }
+ func_offset = ENTRYPOINT_OFFSET(pThrowArrayBoundsFromCode);
+ break;
+ case kThrowDivZero:
+ func_offset = ENTRYPOINT_OFFSET(pThrowDivZeroFromCode);
+ break;
+ case kThrowNoSuchMethod:
+ OpRegCopy(cu, TargetReg(kArg0), v1);
+ func_offset =
+ ENTRYPOINT_OFFSET(pThrowNoSuchMethodFromCode);
+ break;
+ case kThrowStackOverflow:
+ func_offset = ENTRYPOINT_OFFSET(pThrowStackOverflowFromCode);
+ // Restore stack alignment
+ if (target_x86) {
+ OpRegImm(cu, kOpAdd, TargetReg(kSp), cu->frame_size);
+ } else {
+ OpRegImm(cu, kOpAdd, TargetReg(kSp), (cu->num_core_spills + cu->num_fp_spills) * 4);
+ }
+ break;
+ default:
+ LOG(FATAL) << "Unexpected throw kind: " << lab->operands[0];
+ }
+ ClobberCalleeSave(cu);
+ int r_tgt = CallHelperSetup(cu, func_offset);
+ CallHelper(cu, r_tgt, func_offset, true /* MarkSafepointPC */);
+ }
+}
+
+void Codegen::GenIGet(CompilationUnit* cu, uint32_t field_idx, int opt_flags, OpSize size,
+ RegLocation rl_dest, RegLocation rl_obj, bool is_long_or_double,
+ bool is_object)
+{
+ int field_offset;
+ bool is_volatile;
+
+ bool fast_path = FastInstance(cu, field_idx, field_offset, is_volatile, false);
+
+ if (fast_path && !SLOW_FIELD_PATH) {
+ RegLocation rl_result;
+ RegisterClass reg_class = oat_reg_class_by_size(size);
+ DCHECK_GE(field_offset, 0);
+ rl_obj = LoadValue(cu, rl_obj, kCoreReg);
+ if (is_long_or_double) {
+ DCHECK(rl_dest.wide);
+ GenNullCheck(cu, rl_obj.s_reg_low, rl_obj.low_reg, opt_flags);
+ if (cu->instruction_set == kX86) {
+ rl_result = EvalLoc(cu, rl_dest, reg_class, true);
+ GenNullCheck(cu, rl_obj.s_reg_low, rl_obj.low_reg, opt_flags);
+ LoadBaseDispWide(cu, rl_obj.low_reg, field_offset, rl_result.low_reg,
+ rl_result.high_reg, rl_obj.s_reg_low);
+ if (is_volatile) {
+ GenMemBarrier(cu, kLoadLoad);
+ }
+ } else {
+ int reg_ptr = AllocTemp(cu);
+ OpRegRegImm(cu, kOpAdd, reg_ptr, rl_obj.low_reg, field_offset);
+ rl_result = EvalLoc(cu, rl_dest, reg_class, true);
+ LoadBaseDispWide(cu, reg_ptr, 0, rl_result.low_reg, rl_result.high_reg, INVALID_SREG);
+ if (is_volatile) {
+ GenMemBarrier(cu, kLoadLoad);
+ }
+ FreeTemp(cu, reg_ptr);
+ }
+ StoreValueWide(cu, rl_dest, rl_result);
+ } else {
+ rl_result = EvalLoc(cu, rl_dest, reg_class, true);
+ GenNullCheck(cu, rl_obj.s_reg_low, rl_obj.low_reg, opt_flags);
+ LoadBaseDisp(cu, rl_obj.low_reg, field_offset, rl_result.low_reg,
+ kWord, rl_obj.s_reg_low);
+ if (is_volatile) {
+ GenMemBarrier(cu, kLoadLoad);
+ }
+ StoreValue(cu, rl_dest, rl_result);
+ }
+ } else {
+ int getterOffset = is_long_or_double ? ENTRYPOINT_OFFSET(pGet64Instance) :
+ (is_object ? ENTRYPOINT_OFFSET(pGetObjInstance)
+ : ENTRYPOINT_OFFSET(pGet32Instance));
+ CallRuntimeHelperImmRegLocation(cu, getterOffset, field_idx, rl_obj, true);
+ if (is_long_or_double) {
+ RegLocation rl_result = GetReturnWide(cu, rl_dest.fp);
+ StoreValueWide(cu, rl_dest, rl_result);
+ } else {
+ RegLocation rl_result = GetReturn(cu, rl_dest.fp);
+ StoreValue(cu, rl_dest, rl_result);
+ }
+ }
+}
+
+void Codegen::GenIPut(CompilationUnit* cu, uint32_t field_idx, int opt_flags, OpSize size,
+ RegLocation rl_src, RegLocation rl_obj, bool is_long_or_double,
+ bool is_object)
+{
+ int field_offset;
+ bool is_volatile;
+
+ bool fast_path = FastInstance(cu, field_idx, field_offset, is_volatile,
+ true);
+ if (fast_path && !SLOW_FIELD_PATH) {
+ RegisterClass reg_class = oat_reg_class_by_size(size);
+ DCHECK_GE(field_offset, 0);
+ rl_obj = LoadValue(cu, rl_obj, kCoreReg);
+ if (is_long_or_double) {
+ int reg_ptr;
+ rl_src = LoadValueWide(cu, rl_src, kAnyReg);
+ GenNullCheck(cu, rl_obj.s_reg_low, rl_obj.low_reg, opt_flags);
+ reg_ptr = AllocTemp(cu);
+ OpRegRegImm(cu, kOpAdd, reg_ptr, rl_obj.low_reg, field_offset);
+ if (is_volatile) {
+ GenMemBarrier(cu, kStoreStore);
+ }
+ StoreBaseDispWide(cu, reg_ptr, 0, rl_src.low_reg, rl_src.high_reg);
+ if (is_volatile) {
+ GenMemBarrier(cu, kLoadLoad);
+ }
+ FreeTemp(cu, reg_ptr);
+ } else {
+ rl_src = LoadValue(cu, rl_src, reg_class);
+ GenNullCheck(cu, rl_obj.s_reg_low, rl_obj.low_reg, opt_flags);
+ if (is_volatile) {
+ GenMemBarrier(cu, kStoreStore);
+ }
+ StoreBaseDisp(cu, rl_obj.low_reg, field_offset, rl_src.low_reg, kWord);
+ if (is_volatile) {
+ GenMemBarrier(cu, kLoadLoad);
+ }
+ if (is_object && !IsConstantNullRef(cu, rl_src)) {
+ MarkGCCard(cu, rl_src.low_reg, rl_obj.low_reg);
+ }
+ }
+ } else {
+ int setter_offset = is_long_or_double ? ENTRYPOINT_OFFSET(pSet64Instance) :
+ (is_object ? ENTRYPOINT_OFFSET(pSetObjInstance)
+ : ENTRYPOINT_OFFSET(pSet32Instance));
+ CallRuntimeHelperImmRegLocationRegLocation(cu, setter_offset, field_idx, rl_obj, rl_src, true);
+ }
+}
+
+void Codegen::GenConstClass(CompilationUnit* cu, uint32_t type_idx, RegLocation rl_dest)
+{
+ RegLocation rl_method = LoadCurrMethod(cu);
+ int res_reg = AllocTemp(cu);
+ RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ if (!cu->compiler->CanAccessTypeWithoutChecks(cu->method_idx,
+ *cu->dex_file,
+ type_idx)) {
+ // Call out to helper which resolves type and verifies access.
+ // Resolved type returned in kRet0.
+ CallRuntimeHelperImmReg(cu, ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccessFromCode),
+ type_idx, rl_method.low_reg, true);
+ RegLocation rl_result = GetReturn(cu, false);
+ StoreValue(cu, rl_dest, rl_result);
+ } else {
+ // We're don't need access checks, load type from dex cache
+ int32_t dex_cache_offset =
+ mirror::AbstractMethod::DexCacheResolvedTypesOffset().Int32Value();
+ LoadWordDisp(cu, rl_method.low_reg, dex_cache_offset, res_reg);
+ int32_t offset_of_type =
+ mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() + (sizeof(mirror::Class*)
+ * type_idx);
+ LoadWordDisp(cu, res_reg, offset_of_type, rl_result.low_reg);
+ if (!cu->compiler->CanAssumeTypeIsPresentInDexCache(*cu->dex_file,
+ type_idx) || SLOW_TYPE_PATH) {
+ // Slow path, at runtime test if type is null and if so initialize
+ FlushAllRegs(cu);
+ LIR* branch1 = OpCmpImmBranch(cu, kCondEq, rl_result.low_reg, 0, NULL);
+ // Resolved, store and hop over following code
+ StoreValue(cu, rl_dest, rl_result);
+ /*
+ * Because we have stores of the target value on two paths,
+ * clobber temp tracking for the destination using the ssa name
+ */
+ ClobberSReg(cu, rl_dest.s_reg_low);
+ LIR* branch2 = OpUnconditionalBranch(cu,0);
+ // TUNING: move slow path to end & remove unconditional branch
+ LIR* target1 = NewLIR0(cu, kPseudoTargetLabel);
+ // Call out to helper, which will return resolved type in kArg0
+ CallRuntimeHelperImmReg(cu, ENTRYPOINT_OFFSET(pInitializeTypeFromCode), type_idx,
+ rl_method.low_reg, true);
+ RegLocation rl_result = GetReturn(cu, false);
+ StoreValue(cu, rl_dest, rl_result);
+ /*
+ * Because we have stores of the target value on two paths,
+ * clobber temp tracking for the destination using the ssa name
+ */
+ ClobberSReg(cu, rl_dest.s_reg_low);
+ // Rejoin code paths
+ LIR* target2 = NewLIR0(cu, kPseudoTargetLabel);
+ branch1->target = target1;
+ branch2->target = target2;
+ } else {
+ // Fast path, we're done - just store result
+ StoreValue(cu, rl_dest, rl_result);
+ }
+ }
+}
+
+void Codegen::GenConstString(CompilationUnit* cu, uint32_t string_idx, RegLocation rl_dest)
+{
+ /* NOTE: Most strings should be available at compile time */
+ int32_t offset_of_string = mirror::Array::DataOffset(sizeof(mirror::String*)).Int32Value() +
+ (sizeof(mirror::String*) * string_idx);
+ if (!cu->compiler->CanAssumeStringIsPresentInDexCache(
+ *cu->dex_file, string_idx) || SLOW_STRING_PATH) {
+ // slow path, resolve string if not in dex cache
+ FlushAllRegs(cu);
+ LockCallTemps(cu); // Using explicit registers
+ LoadCurrMethodDirect(cu, TargetReg(kArg2));
+ LoadWordDisp(cu, TargetReg(kArg2),
+ mirror::AbstractMethod::DexCacheStringsOffset().Int32Value(), TargetReg(kArg0));
+ // Might call out to helper, which will return resolved string in kRet0
+ int r_tgt = CallHelperSetup(cu, ENTRYPOINT_OFFSET(pResolveStringFromCode));
+ LoadWordDisp(cu, TargetReg(kArg0), offset_of_string, TargetReg(kRet0));
+ LoadConstant(cu, TargetReg(kArg1), string_idx);
+ if (cu->instruction_set == kThumb2) {
+ OpRegImm(cu, kOpCmp, TargetReg(kRet0), 0); // Is resolved?
+ GenBarrier(cu);
+ // For testing, always force through helper
+ if (!EXERCISE_SLOWEST_STRING_PATH) {
+ OpIT(cu, kCondEq, "T");
+ }
+ OpRegCopy(cu, TargetReg(kArg0), TargetReg(kArg2)); // .eq
+ LIR* call_inst = OpReg(cu, kOpBlx, r_tgt); // .eq, helper(Method*, string_idx)
+ MarkSafepointPC(cu, call_inst);
+ FreeTemp(cu, r_tgt);
+ } else if (cu->instruction_set == kMips) {
+ LIR* branch = OpCmpImmBranch(cu, kCondNe, TargetReg(kRet0), 0, NULL);
+ OpRegCopy(cu, TargetReg(kArg0), TargetReg(kArg2)); // .eq
+ LIR* call_inst = OpReg(cu, kOpBlx, r_tgt);
+ MarkSafepointPC(cu, call_inst);
+ FreeTemp(cu, r_tgt);
+ LIR* target = NewLIR0(cu, kPseudoTargetLabel);
+ branch->target = target;
+ } else {
+ DCHECK_EQ(cu->instruction_set, kX86);
+ CallRuntimeHelperRegReg(cu, ENTRYPOINT_OFFSET(pResolveStringFromCode), TargetReg(kArg2), TargetReg(kArg1), true);
+ }
+ GenBarrier(cu);
+ StoreValue(cu, rl_dest, GetReturn(cu, false));
+ } else {
+ RegLocation rl_method = LoadCurrMethod(cu);
+ int res_reg = AllocTemp(cu);
+ RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ LoadWordDisp(cu, rl_method.low_reg,
+ mirror::AbstractMethod::DexCacheStringsOffset().Int32Value(), res_reg);
+ LoadWordDisp(cu, res_reg, offset_of_string, rl_result.low_reg);
+ StoreValue(cu, rl_dest, rl_result);
+ }
+}
+
+/*
+ * Let helper function take care of everything. Will
+ * call Class::NewInstanceFromCode(type_idx, method);
+ */
+void Codegen::GenNewInstance(CompilationUnit* cu, uint32_t type_idx, RegLocation rl_dest)
+{
+ FlushAllRegs(cu); /* Everything to home location */
+ // alloc will always check for resolution, do we also need to verify
+ // access because the verifier was unable to?
+ int func_offset;
+ if (cu->compiler->CanAccessInstantiableTypeWithoutChecks(
+ cu->method_idx, *cu->dex_file, type_idx)) {
+ func_offset = ENTRYPOINT_OFFSET(pAllocObjectFromCode);
+ } else {
+ func_offset = ENTRYPOINT_OFFSET(pAllocObjectFromCodeWithAccessCheck);
+ }
+ CallRuntimeHelperImmMethod(cu, func_offset, type_idx, true);
+ RegLocation rl_result = GetReturn(cu, false);
+ StoreValue(cu, rl_dest, rl_result);
+}
+
+void Codegen::GenThrow(CompilationUnit* cu, RegLocation rl_src)
+{
+ FlushAllRegs(cu);
+ CallRuntimeHelperRegLocation(cu, ENTRYPOINT_OFFSET(pDeliverException), rl_src, true);
+}
+
+void Codegen::GenInstanceof(CompilationUnit* cu, uint32_t type_idx, RegLocation rl_dest,
+ RegLocation rl_src)
+{
+ FlushAllRegs(cu);
+ // May generate a call - use explicit registers
+ LockCallTemps(cu);
+ LoadCurrMethodDirect(cu, TargetReg(kArg1)); // kArg1 <= current Method*
+ int class_reg = TargetReg(kArg2); // kArg2 will hold the Class*
+ if (!cu->compiler->CanAccessTypeWithoutChecks(cu->method_idx,
+ *cu->dex_file,
+ type_idx)) {
+ // Check we have access to type_idx and if not throw IllegalAccessError,
+ // returns Class* in kArg0
+ CallRuntimeHelperImm(cu, ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccessFromCode),
+ type_idx, true);
+ OpRegCopy(cu, class_reg, TargetReg(kRet0)); // Align usage with fast path
+ LoadValueDirectFixed(cu, rl_src, TargetReg(kArg0)); // kArg0 <= ref
+ } else {
+ // Load dex cache entry into class_reg (kArg2)
+ LoadValueDirectFixed(cu, rl_src, TargetReg(kArg0)); // kArg0 <= ref
+ LoadWordDisp(cu, TargetReg(kArg1),
+ mirror::AbstractMethod::DexCacheResolvedTypesOffset().Int32Value(), class_reg);
+ int32_t offset_of_type =
+ mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() + (sizeof(mirror::Class*)
+ * type_idx);
+ LoadWordDisp(cu, class_reg, offset_of_type, class_reg);
+ if (!cu->compiler->CanAssumeTypeIsPresentInDexCache(
+ *cu->dex_file, type_idx)) {
+ // Need to test presence of type in dex cache at runtime
+ LIR* hop_branch = OpCmpImmBranch(cu, kCondNe, class_reg, 0, NULL);
+ // Not resolved
+ // Call out to helper, which will return resolved type in kRet0
+ CallRuntimeHelperImm(cu, ENTRYPOINT_OFFSET(pInitializeTypeFromCode), type_idx, true);
+ OpRegCopy(cu, TargetReg(kArg2), TargetReg(kRet0)); // Align usage with fast path
+ LoadValueDirectFixed(cu, rl_src, TargetReg(kArg0)); /* reload Ref */
+ // Rejoin code paths
+ LIR* hop_target = NewLIR0(cu, kPseudoTargetLabel);
+ hop_branch->target = hop_target;
+ }
+ }
+ /* kArg0 is ref, kArg2 is class. If ref==null, use directly as bool result */
+ RegLocation rl_result = GetReturn(cu, false);
+ if (cu->instruction_set == kMips) {
+ LoadConstant(cu, rl_result.low_reg, 0); // store false result for if branch is taken
+ }
+ LIR* branch1 = OpCmpImmBranch(cu, kCondEq, TargetReg(kArg0), 0, NULL);
+ /* load object->klass_ */
+ DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0);
+ LoadWordDisp(cu, TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1));
+ /* kArg0 is ref, kArg1 is ref->klass_, kArg2 is class */
+ LIR* call_inst;
+ LIR* branchover = NULL;
+ if (cu->instruction_set == kThumb2) {
+ /* Uses conditional nullification */
+ int r_tgt = LoadHelper(cu, ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode));
+ OpRegReg(cu, kOpCmp, TargetReg(kArg1), TargetReg(kArg2)); // Same?
+ OpIT(cu, kCondEq, "EE"); // if-convert the test
+ LoadConstant(cu, TargetReg(kArg0), 1); // .eq case - load true
+ OpRegCopy(cu, TargetReg(kArg0), TargetReg(kArg2)); // .ne case - arg0 <= class
+ call_inst = OpReg(cu, kOpBlx, r_tgt); // .ne case: helper(class, ref->class)
+ FreeTemp(cu, r_tgt);
+ } else {
+ /* Uses branchovers */
+ LoadConstant(cu, rl_result.low_reg, 1); // assume true
+ branchover = OpCmpBranch(cu, kCondEq, TargetReg(kArg1), TargetReg(kArg2), NULL);
+ if (cu->instruction_set != kX86) {
+ int r_tgt = LoadHelper(cu, ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode));
+ OpRegCopy(cu, TargetReg(kArg0), TargetReg(kArg2)); // .ne case - arg0 <= class
+ call_inst = OpReg(cu, kOpBlx, r_tgt); // .ne case: helper(class, ref->class)
+ FreeTemp(cu, r_tgt);
+ } else {
+ OpRegCopy(cu, TargetReg(kArg0), TargetReg(kArg2));
+ call_inst = OpThreadMem(cu, kOpBlx, ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode));
+ }
+ }
+ MarkSafepointPC(cu, call_inst);
+ ClobberCalleeSave(cu);
+ /* branch targets here */
+ LIR* target = NewLIR0(cu, kPseudoTargetLabel);
+ StoreValue(cu, rl_dest, rl_result);
+ branch1->target = target;
+ if (cu->instruction_set != kThumb2) {
+ branchover->target = target;
+ }
+}
+
+void Codegen::GenCheckCast(CompilationUnit* cu, uint32_t type_idx, RegLocation rl_src)
+{
+ FlushAllRegs(cu);
+ // May generate a call - use explicit registers
+ LockCallTemps(cu);
+ LoadCurrMethodDirect(cu, TargetReg(kArg1)); // kArg1 <= current Method*
+ int class_reg = TargetReg(kArg2); // kArg2 will hold the Class*
+ if (!cu->compiler->CanAccessTypeWithoutChecks(cu->method_idx,
+ *cu->dex_file,
+ type_idx)) {
+ // Check we have access to type_idx and if not throw IllegalAccessError,
+ // returns Class* in kRet0
+ // InitializeTypeAndVerifyAccess(idx, method)
+ CallRuntimeHelperImmReg(cu, ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccessFromCode),
+ type_idx, TargetReg(kArg1), true);
+ OpRegCopy(cu, class_reg, TargetReg(kRet0)); // Align usage with fast path
+ } else {
+ // Load dex cache entry into class_reg (kArg2)
+ LoadWordDisp(cu, TargetReg(kArg1),
+ mirror::AbstractMethod::DexCacheResolvedTypesOffset().Int32Value(), class_reg);
+ int32_t offset_of_type =
+ mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() +
+ (sizeof(mirror::Class*) * type_idx);
+ LoadWordDisp(cu, class_reg, offset_of_type, class_reg);
+ if (!cu->compiler->CanAssumeTypeIsPresentInDexCache(
+ *cu->dex_file, type_idx)) {
+ // Need to test presence of type in dex cache at runtime
+ LIR* hop_branch = OpCmpImmBranch(cu, kCondNe, class_reg, 0, NULL);
+ // Not resolved
+ // Call out to helper, which will return resolved type in kArg0
+ // InitializeTypeFromCode(idx, method)
+ CallRuntimeHelperImmReg(cu, ENTRYPOINT_OFFSET(pInitializeTypeFromCode), type_idx, TargetReg(kArg1),
+ true);
+ OpRegCopy(cu, class_reg, TargetReg(kRet0)); // Align usage with fast path
+ // Rejoin code paths
+ LIR* hop_target = NewLIR0(cu, kPseudoTargetLabel);
+ hop_branch->target = hop_target;
+ }
+ }
+ // At this point, class_reg (kArg2) has class
+ LoadValueDirectFixed(cu, rl_src, TargetReg(kArg0)); // kArg0 <= ref
+ /* Null is OK - continue */
+ LIR* branch1 = OpCmpImmBranch(cu, kCondEq, TargetReg(kArg0), 0, NULL);
+ /* load object->klass_ */
+ DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0);
+ LoadWordDisp(cu, TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1));
+ /* kArg1 now contains object->klass_ */
+ LIR* branch2;
+ if (cu->instruction_set == kThumb2) {
+ int r_tgt = LoadHelper(cu, ENTRYPOINT_OFFSET(pCheckCastFromCode));
+ OpRegReg(cu, kOpCmp, TargetReg(kArg1), class_reg);
+ branch2 = OpCondBranch(cu, kCondEq, NULL); /* If eq, trivial yes */
+ OpRegCopy(cu, TargetReg(kArg0), TargetReg(kArg1));
+ OpRegCopy(cu, TargetReg(kArg1), TargetReg(kArg2));
+ ClobberCalleeSave(cu);
+ LIR* call_inst = OpReg(cu, kOpBlx, r_tgt);
+ MarkSafepointPC(cu, call_inst);
+ FreeTemp(cu, r_tgt);
+ } else {
+ branch2 = OpCmpBranch(cu, kCondEq, TargetReg(kArg1), class_reg, NULL);
+ CallRuntimeHelperRegReg(cu, ENTRYPOINT_OFFSET(pCheckCastFromCode), TargetReg(kArg1), TargetReg(kArg2), true);
+ }
+ /* branch target here */
+ LIR* target = NewLIR0(cu, kPseudoTargetLabel);
+ branch1->target = target;
+ branch2->target = target;
+}
+
+void Codegen::GenLong3Addr(CompilationUnit* cu, OpKind first_op, OpKind second_op,
+ RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2)
+{
+ RegLocation rl_result;
+ if (cu->instruction_set == kThumb2) {
+ /*
+ * NOTE: This is the one place in the code in which we might have
+ * as many as six live temporary registers. There are 5 in the normal
+ * set for Arm. Until we have spill capabilities, temporarily add
+ * lr to the temp set. It is safe to do this locally, but note that
+ * lr is used explicitly elsewhere in the code generator and cannot
+ * normally be used as a general temp register.
+ */
+ MarkTemp(cu, TargetReg(kLr)); // Add lr to the temp pool
+ FreeTemp(cu, TargetReg(kLr)); // and make it available
+ }
+ rl_src1 = LoadValueWide(cu, rl_src1, kCoreReg);
+ rl_src2 = LoadValueWide(cu, rl_src2, kCoreReg);
+ rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ // The longs may overlap - use intermediate temp if so
+ if ((rl_result.low_reg == rl_src1.high_reg) || (rl_result.low_reg == rl_src2.high_reg)){
+ int t_reg = AllocTemp(cu);
+ OpRegRegReg(cu, first_op, t_reg, rl_src1.low_reg, rl_src2.low_reg);
+ OpRegRegReg(cu, second_op, rl_result.high_reg, rl_src1.high_reg, rl_src2.high_reg);
+ OpRegCopy(cu, rl_result.low_reg, t_reg);
+ FreeTemp(cu, t_reg);
+ } else {
+ OpRegRegReg(cu, first_op, rl_result.low_reg, rl_src1.low_reg, rl_src2.low_reg);
+ OpRegRegReg(cu, second_op, rl_result.high_reg, rl_src1.high_reg,
+ rl_src2.high_reg);
+ }
+ /*
+ * NOTE: If rl_dest refers to a frame variable in a large frame, the
+ * following StoreValueWide might need to allocate a temp register.
+ * To further work around the lack of a spill capability, explicitly
+ * free any temps from rl_src1 & rl_src2 that aren't still live in rl_result.
+ * Remove when spill is functional.
+ */
+ FreeRegLocTemps(cu, rl_result, rl_src1);
+ FreeRegLocTemps(cu, rl_result, rl_src2);
+ StoreValueWide(cu, rl_dest, rl_result);
+ if (cu->instruction_set == kThumb2) {
+ Clobber(cu, TargetReg(kLr));
+ UnmarkTemp(cu, TargetReg(kLr)); // Remove lr from the temp pool
+ }
+}
+
+
+void Codegen::GenShiftOpLong(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_shift)
+{
+ int func_offset = -1; // Make gcc happy
+
+ switch (opcode) {
+ case Instruction::SHL_LONG:
+ case Instruction::SHL_LONG_2ADDR:
+ func_offset = ENTRYPOINT_OFFSET(pShlLong);
+ break;
+ case Instruction::SHR_LONG:
+ case Instruction::SHR_LONG_2ADDR:
+ func_offset = ENTRYPOINT_OFFSET(pShrLong);
+ break;
+ case Instruction::USHR_LONG:
+ case Instruction::USHR_LONG_2ADDR:
+ func_offset = ENTRYPOINT_OFFSET(pUshrLong);
+ break;
+ default:
+ LOG(FATAL) << "Unexpected case";
+ }
+ FlushAllRegs(cu); /* Send everything to home location */
+ CallRuntimeHelperRegLocationRegLocation(cu, func_offset, rl_src1, rl_shift, false);
+ RegLocation rl_result = GetReturnWide(cu, false);
+ StoreValueWide(cu, rl_dest, rl_result);
+}
+
+
+void Codegen::GenArithOpInt(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2)
+{
+ OpKind op = kOpBkpt;
+ bool is_div_rem = false;
+ bool check_zero = false;
+ bool unary = false;
+ RegLocation rl_result;
+ bool shift_op = false;
+ switch (opcode) {
+ case Instruction::NEG_INT:
+ op = kOpNeg;
+ unary = true;
+ break;
+ case Instruction::NOT_INT:
+ op = kOpMvn;
+ unary = true;
+ break;
+ case Instruction::ADD_INT:
+ case Instruction::ADD_INT_2ADDR:
+ op = kOpAdd;
+ break;
+ case Instruction::SUB_INT:
+ case Instruction::SUB_INT_2ADDR:
+ op = kOpSub;
+ break;
+ case Instruction::MUL_INT:
+ case Instruction::MUL_INT_2ADDR:
+ op = kOpMul;
+ break;
+ case Instruction::DIV_INT:
+ case Instruction::DIV_INT_2ADDR:
+ check_zero = true;
+ op = kOpDiv;
+ is_div_rem = true;
+ break;
+ /* NOTE: returns in kArg1 */
+ case Instruction::REM_INT:
+ case Instruction::REM_INT_2ADDR:
+ check_zero = true;
+ op = kOpRem;
+ is_div_rem = true;
+ break;
+ case Instruction::AND_INT:
+ case Instruction::AND_INT_2ADDR:
+ op = kOpAnd;
+ break;
+ case Instruction::OR_INT:
+ case Instruction::OR_INT_2ADDR:
+ op = kOpOr;
+ break;
+ case Instruction::XOR_INT:
+ case Instruction::XOR_INT_2ADDR:
+ op = kOpXor;
+ break;
+ case Instruction::SHL_INT:
+ case Instruction::SHL_INT_2ADDR:
+ shift_op = true;
+ op = kOpLsl;
+ break;
+ case Instruction::SHR_INT:
+ case Instruction::SHR_INT_2ADDR:
+ shift_op = true;
+ op = kOpAsr;
+ break;
+ case Instruction::USHR_INT:
+ case Instruction::USHR_INT_2ADDR:
+ shift_op = true;
+ op = kOpLsr;
+ break;
+ default:
+ LOG(FATAL) << "Invalid word arith op: " << opcode;
+ }
+ if (!is_div_rem) {
+ if (unary) {
+ rl_src1 = LoadValue(cu, rl_src1, kCoreReg);
+ rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ OpRegReg(cu, op, rl_result.low_reg, rl_src1.low_reg);
+ } else {
+ if (shift_op) {
+ int t_reg = INVALID_REG;
+ if (cu->instruction_set == kX86) {
+ // X86 doesn't require masking and must use ECX
+ t_reg = TargetReg(kCount); // rCX
+ LoadValueDirectFixed(cu, rl_src2, t_reg);
+ } else {
+ rl_src2 = LoadValue(cu, rl_src2, kCoreReg);
+ t_reg = AllocTemp(cu);
+ OpRegRegImm(cu, kOpAnd, t_reg, rl_src2.low_reg, 31);
+ }
+ rl_src1 = LoadValue(cu, rl_src1, kCoreReg);
+ rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ OpRegRegReg(cu, op, rl_result.low_reg, rl_src1.low_reg, t_reg);
+ FreeTemp(cu, t_reg);
+ } else {
+ rl_src1 = LoadValue(cu, rl_src1, kCoreReg);
+ rl_src2 = LoadValue(cu, rl_src2, kCoreReg);
+ rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ OpRegRegReg(cu, op, rl_result.low_reg, rl_src1.low_reg, rl_src2.low_reg);
+ }
+ }
+ StoreValue(cu, rl_dest, rl_result);
+ } else {
+ if (cu->instruction_set == kMips) {
+ rl_src1 = LoadValue(cu, rl_src1, kCoreReg);
+ rl_src2 = LoadValue(cu, rl_src2, kCoreReg);
+ if (check_zero) {
+ GenImmedCheck(cu, kCondEq, rl_src2.low_reg, 0, kThrowDivZero);
+ }
+ rl_result = GenDivRem(cu, rl_dest, rl_src1.low_reg, rl_src2.low_reg, op == kOpDiv);
+ } else {
+ int func_offset = ENTRYPOINT_OFFSET(pIdivmod);
+ FlushAllRegs(cu); /* Send everything to home location */
+ LoadValueDirectFixed(cu, rl_src2, TargetReg(kArg1));
+ int r_tgt = CallHelperSetup(cu, func_offset);
+ LoadValueDirectFixed(cu, rl_src1, TargetReg(kArg0));
+ if (check_zero) {
+ GenImmedCheck(cu, kCondEq, TargetReg(kArg1), 0, kThrowDivZero);
+ }
+ // NOTE: callout here is not a safepoint
+ CallHelper(cu, r_tgt, func_offset, false /* not a safepoint */ );
+ if (op == kOpDiv)
+ rl_result = GetReturn(cu, false);
+ else
+ rl_result = GetReturnAlt(cu);
+ }
+ StoreValue(cu, rl_dest, rl_result);
+ }
+}
+
+/*
+ * The following are the first-level codegen routines that analyze the format
+ * of each bytecode then either dispatch special purpose codegen routines
+ * or produce corresponding Thumb instructions directly.
+ */
+
+static bool IsPowerOfTwo(int x)
+{
+ return (x & (x - 1)) == 0;
+}
+
+// Returns true if no more than two bits are set in 'x'.
+static bool IsPopCountLE2(unsigned int x)
+{
+ x &= x - 1;
+ return (x & (x - 1)) == 0;
+}
+
+// Returns the index of the lowest set bit in 'x'.
+static int LowestSetBit(unsigned int x) {
+ int bit_posn = 0;
+ while ((x & 0xf) == 0) {
+ bit_posn += 4;
+ x >>= 4;
+ }
+ while ((x & 1) == 0) {
+ bit_posn++;
+ x >>= 1;
+ }
+ return bit_posn;
+}
+
+// Returns true if it added instructions to 'cu' to divide 'rl_src' by 'lit'
+// and store the result in 'rl_dest'.
+static bool HandleEasyDivide(CompilationUnit* cu, Instruction::Code dalvik_opcode,
+ RegLocation rl_src, RegLocation rl_dest, int lit)
+{
+ if ((lit < 2) || ((cu->instruction_set != kThumb2) && !IsPowerOfTwo(lit))) {
+ return false;
+ }
+ Codegen* cg = cu->cg.get();
+ // No divide instruction for Arm, so check for more special cases
+ if ((cu->instruction_set == kThumb2) && !IsPowerOfTwo(lit)) {
+ return cg->SmallLiteralDivide(cu, dalvik_opcode, rl_src, rl_dest, lit);
+ }
+ int k = LowestSetBit(lit);
+ if (k >= 30) {
+ // Avoid special cases.
+ return false;
+ }
+ bool div = (dalvik_opcode == Instruction::DIV_INT_LIT8 ||
+ dalvik_opcode == Instruction::DIV_INT_LIT16);
+ rl_src = cg->LoadValue(cu, rl_src, kCoreReg);
+ RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ if (div) {
+ int t_reg = AllocTemp(cu);
+ if (lit == 2) {
+ // Division by 2 is by far the most common division by constant.
+ cg->OpRegRegImm(cu, kOpLsr, t_reg, rl_src.low_reg, 32 - k);
+ cg->OpRegRegReg(cu, kOpAdd, t_reg, t_reg, rl_src.low_reg);
+ cg->OpRegRegImm(cu, kOpAsr, rl_result.low_reg, t_reg, k);
+ } else {
+ cg->OpRegRegImm(cu, kOpAsr, t_reg, rl_src.low_reg, 31);
+ cg->OpRegRegImm(cu, kOpLsr, t_reg, t_reg, 32 - k);
+ cg->OpRegRegReg(cu, kOpAdd, t_reg, t_reg, rl_src.low_reg);
+ cg->OpRegRegImm(cu, kOpAsr, rl_result.low_reg, t_reg, k);
+ }
+ } else {
+ int t_reg1 = AllocTemp(cu);
+ int t_reg2 = AllocTemp(cu);
+ if (lit == 2) {
+ cg->OpRegRegImm(cu, kOpLsr, t_reg1, rl_src.low_reg, 32 - k);
+ cg->OpRegRegReg(cu, kOpAdd, t_reg2, t_reg1, rl_src.low_reg);
+ cg->OpRegRegImm(cu, kOpAnd, t_reg2, t_reg2, lit -1);
+ cg->OpRegRegReg(cu, kOpSub, rl_result.low_reg, t_reg2, t_reg1);
+ } else {
+ cg->OpRegRegImm(cu, kOpAsr, t_reg1, rl_src.low_reg, 31);
+ cg->OpRegRegImm(cu, kOpLsr, t_reg1, t_reg1, 32 - k);
+ cg->OpRegRegReg(cu, kOpAdd, t_reg2, t_reg1, rl_src.low_reg);
+ cg->OpRegRegImm(cu, kOpAnd, t_reg2, t_reg2, lit - 1);
+ cg->OpRegRegReg(cu, kOpSub, rl_result.low_reg, t_reg2, t_reg1);
+ }
+ }
+ cg->StoreValue(cu, rl_dest, rl_result);
+ return true;
+}
+
+// Returns true if it added instructions to 'cu' to multiply 'rl_src' by 'lit'
+// and store the result in 'rl_dest'.
+static bool HandleEasyMultiply(CompilationUnit* cu, RegLocation rl_src,
+ RegLocation rl_dest, int lit)
+{
+ // Can we simplify this multiplication?
+ bool power_of_two = false;
+ bool pop_count_le2 = false;
+ bool power_of_two_minus_one = false;
+ if (lit < 2) {
+ // Avoid special cases.
+ return false;
+ } else if (IsPowerOfTwo(lit)) {
+ power_of_two = true;
+ } else if (IsPopCountLE2(lit)) {
+ pop_count_le2 = true;
+ } else if (IsPowerOfTwo(lit + 1)) {
+ power_of_two_minus_one = true;
+ } else {
+ return false;
+ }
+ Codegen* cg = cu->cg.get();
+ rl_src = cg->LoadValue(cu, rl_src, kCoreReg);
+ RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ if (power_of_two) {
+ // Shift.
+ cg->OpRegRegImm(cu, kOpLsl, rl_result.low_reg, rl_src.low_reg, LowestSetBit(lit));
+ } else if (pop_count_le2) {
+ // Shift and add and shift.
+ int first_bit = LowestSetBit(lit);
+ int second_bit = LowestSetBit(lit ^ (1 << first_bit));
+ cg->GenMultiplyByTwoBitMultiplier(cu, rl_src, rl_result, lit, first_bit, second_bit);
+ } else {
+ // Reverse subtract: (src << (shift + 1)) - src.
+ DCHECK(power_of_two_minus_one);
+ // TUNING: rsb dst, src, src lsl#LowestSetBit(lit + 1)
+ int t_reg = AllocTemp(cu);
+ cg->OpRegRegImm(cu, kOpLsl, t_reg, rl_src.low_reg, LowestSetBit(lit + 1));
+ cg->OpRegRegReg(cu, kOpSub, rl_result.low_reg, t_reg, rl_src.low_reg);
+ }
+ cg->StoreValue(cu, rl_dest, rl_result);
+ return true;
+}
+
+void Codegen::GenArithOpIntLit(CompilationUnit* cu, Instruction::Code opcode,
+ RegLocation rl_dest, RegLocation rl_src, int lit)
+{
+ RegLocation rl_result;
+ OpKind op = static_cast<OpKind>(0); /* Make gcc happy */
+ int shift_op = false;
+ bool is_div = false;
+
+ switch (opcode) {
+ case Instruction::RSUB_INT_LIT8:
+ case Instruction::RSUB_INT: {
+ rl_src = LoadValue(cu, rl_src, kCoreReg);
+ rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ if (cu->instruction_set == kThumb2) {
+ OpRegRegImm(cu, kOpRsub, rl_result.low_reg, rl_src.low_reg, lit);
+ } else {
+ OpRegReg(cu, kOpNeg, rl_result.low_reg, rl_src.low_reg);
+ OpRegImm(cu, kOpAdd, rl_result.low_reg, lit);
+ }
+ StoreValue(cu, rl_dest, rl_result);
+ return;
+ }
+
+ case Instruction::SUB_INT:
+ case Instruction::SUB_INT_2ADDR:
+ lit = -lit;
+ // Intended fallthrough
+ case Instruction::ADD_INT:
+ case Instruction::ADD_INT_2ADDR:
+ case Instruction::ADD_INT_LIT8:
+ case Instruction::ADD_INT_LIT16:
+ op = kOpAdd;
+ break;
+ case Instruction::MUL_INT:
+ case Instruction::MUL_INT_2ADDR:
+ case Instruction::MUL_INT_LIT8:
+ case Instruction::MUL_INT_LIT16: {
+ if (HandleEasyMultiply(cu, rl_src, rl_dest, lit)) {
+ return;
+ }
+ op = kOpMul;
+ break;
+ }
+ case Instruction::AND_INT:
+ case Instruction::AND_INT_2ADDR:
+ case Instruction::AND_INT_LIT8:
+ case Instruction::AND_INT_LIT16:
+ op = kOpAnd;
+ break;
+ case Instruction::OR_INT:
+ case Instruction::OR_INT_2ADDR:
+ case Instruction::OR_INT_LIT8:
+ case Instruction::OR_INT_LIT16:
+ op = kOpOr;
+ break;
+ case Instruction::XOR_INT:
+ case Instruction::XOR_INT_2ADDR:
+ case Instruction::XOR_INT_LIT8:
+ case Instruction::XOR_INT_LIT16:
+ op = kOpXor;
+ break;
+ case Instruction::SHL_INT_LIT8:
+ case Instruction::SHL_INT:
+ case Instruction::SHL_INT_2ADDR:
+ lit &= 31;
+ shift_op = true;
+ op = kOpLsl;
+ break;
+ case Instruction::SHR_INT_LIT8:
+ case Instruction::SHR_INT:
+ case Instruction::SHR_INT_2ADDR:
+ lit &= 31;
+ shift_op = true;
+ op = kOpAsr;
+ break;
+ case Instruction::USHR_INT_LIT8:
+ case Instruction::USHR_INT:
+ case Instruction::USHR_INT_2ADDR:
+ lit &= 31;
+ shift_op = true;
+ op = kOpLsr;
+ break;
+
+ case Instruction::DIV_INT:
+ case Instruction::DIV_INT_2ADDR:
+ case Instruction::DIV_INT_LIT8:
+ case Instruction::DIV_INT_LIT16:
+ case Instruction::REM_INT:
+ case Instruction::REM_INT_2ADDR:
+ case Instruction::REM_INT_LIT8:
+ case Instruction::REM_INT_LIT16: {
+ if (lit == 0) {
+ GenImmedCheck(cu, kCondAl, 0, 0, kThrowDivZero);
+ return;
+ }
+ if (HandleEasyDivide(cu, opcode, rl_src, rl_dest, lit)) {
+ return;
+ }
+ if ((opcode == Instruction::DIV_INT_LIT8) ||
+ (opcode == Instruction::DIV_INT) ||
+ (opcode == Instruction::DIV_INT_2ADDR) ||
+ (opcode == Instruction::DIV_INT_LIT16)) {
+ is_div = true;
+ } else {
+ is_div = false;
+ }
+ if (cu->instruction_set == kMips) {
+ rl_src = LoadValue(cu, rl_src, kCoreReg);
+ rl_result = GenDivRemLit(cu, rl_dest, rl_src.low_reg, lit, is_div);
+ } else {
+ FlushAllRegs(cu); /* Everything to home location */
+ LoadValueDirectFixed(cu, rl_src, TargetReg(kArg0));
+ Clobber(cu, TargetReg(kArg0));
+ int func_offset = ENTRYPOINT_OFFSET(pIdivmod);
+ CallRuntimeHelperRegImm(cu, func_offset, TargetReg(kArg0), lit, false);
+ if (is_div)
+ rl_result = GetReturn(cu, false);
+ else
+ rl_result = GetReturnAlt(cu);
+ }
+ StoreValue(cu, rl_dest, rl_result);
+ return;
+ }
+ default:
+ LOG(FATAL) << "Unexpected opcode " << opcode;
+ }
+ rl_src = LoadValue(cu, rl_src, kCoreReg);
+ rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ // Avoid shifts by literal 0 - no support in Thumb. Change to copy
+ if (shift_op && (lit == 0)) {
+ OpRegCopy(cu, rl_result.low_reg, rl_src.low_reg);
+ } else {
+ OpRegRegImm(cu, op, rl_result.low_reg, rl_src.low_reg, lit);
+ }
+ StoreValue(cu, rl_dest, rl_result);
+}
+
+void Codegen::GenArithOpLong(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2)
+{
+ RegLocation rl_result;
+ OpKind first_op = kOpBkpt;
+ OpKind second_op = kOpBkpt;
+ bool call_out = false;
+ bool check_zero = false;
+ int func_offset;
+ int ret_reg = TargetReg(kRet0);
+
+ switch (opcode) {
+ case Instruction::NOT_LONG:
+ rl_src2 = LoadValueWide(cu, rl_src2, kCoreReg);
+ rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ // Check for destructive overlap
+ if (rl_result.low_reg == rl_src2.high_reg) {
+ int t_reg = AllocTemp(cu);
+ OpRegCopy(cu, t_reg, rl_src2.high_reg);
+ OpRegReg(cu, kOpMvn, rl_result.low_reg, rl_src2.low_reg);
+ OpRegReg(cu, kOpMvn, rl_result.high_reg, t_reg);
+ FreeTemp(cu, t_reg);
+ } else {
+ OpRegReg(cu, kOpMvn, rl_result.low_reg, rl_src2.low_reg);
+ OpRegReg(cu, kOpMvn, rl_result.high_reg, rl_src2.high_reg);
+ }
+ StoreValueWide(cu, rl_dest, rl_result);
+ return;
+ case Instruction::ADD_LONG:
+ case Instruction::ADD_LONG_2ADDR:
+ if (cu->instruction_set != kThumb2) {
+ GenAddLong(cu, rl_dest, rl_src1, rl_src2);
+ return;
+ }
+ first_op = kOpAdd;
+ second_op = kOpAdc;
+ break;
+ case Instruction::SUB_LONG:
+ case Instruction::SUB_LONG_2ADDR:
+ if (cu->instruction_set != kThumb2) {
+ GenSubLong(cu, rl_dest, rl_src1, rl_src2);
+ return;
+ }
+ first_op = kOpSub;
+ second_op = kOpSbc;
+ break;
+ case Instruction::MUL_LONG:
+ case Instruction::MUL_LONG_2ADDR:
+ if (cu->instruction_set == kThumb2) {
+ GenMulLong(cu, rl_dest, rl_src1, rl_src2);
+ return;
+ } else {
+ call_out = true;
+ ret_reg = TargetReg(kRet0);
+ func_offset = ENTRYPOINT_OFFSET(pLmul);
+ }
+ break;
+ case Instruction::DIV_LONG:
+ case Instruction::DIV_LONG_2ADDR:
+ call_out = true;
+ check_zero = true;
+ ret_reg = TargetReg(kRet0);
+ func_offset = ENTRYPOINT_OFFSET(pLdiv);
+ break;
+ case Instruction::REM_LONG:
+ case Instruction::REM_LONG_2ADDR:
+ call_out = true;
+ check_zero = true;
+ func_offset = ENTRYPOINT_OFFSET(pLdivmod);
+ /* NOTE - for Arm, result is in kArg2/kArg3 instead of kRet0/kRet1 */
+ ret_reg = (cu->instruction_set == kThumb2) ? TargetReg(kArg2) : TargetReg(kRet0);
+ break;
+ case Instruction::AND_LONG_2ADDR:
+ case Instruction::AND_LONG:
+ if (cu->instruction_set == kX86) {
+ return GenAndLong(cu, rl_dest, rl_src1, rl_src2);
+ }
+ first_op = kOpAnd;
+ second_op = kOpAnd;
+ break;
+ case Instruction::OR_LONG:
+ case Instruction::OR_LONG_2ADDR:
+ if (cu->instruction_set == kX86) {
+ GenOrLong(cu, rl_dest, rl_src1, rl_src2);
+ return;
+ }
+ first_op = kOpOr;
+ second_op = kOpOr;
+ break;
+ case Instruction::XOR_LONG:
+ case Instruction::XOR_LONG_2ADDR:
+ if (cu->instruction_set == kX86) {
+ GenXorLong(cu, rl_dest, rl_src1, rl_src2);
+ return;
+ }
+ first_op = kOpXor;
+ second_op = kOpXor;
+ break;
+ case Instruction::NEG_LONG: {
+ GenNegLong(cu, rl_dest, rl_src2);
+ return;
+ }
+ default:
+ LOG(FATAL) << "Invalid long arith op";
+ }
+ if (!call_out) {
+ GenLong3Addr(cu, first_op, second_op, rl_dest, rl_src1, rl_src2);
+ } else {
+ FlushAllRegs(cu); /* Send everything to home location */
+ if (check_zero) {
+ LoadValueDirectWideFixed(cu, rl_src2, TargetReg(kArg2), TargetReg(kArg3));
+ int r_tgt = CallHelperSetup(cu, func_offset);
+ GenDivZeroCheck(cu, TargetReg(kArg2), TargetReg(kArg3));
+ LoadValueDirectWideFixed(cu, rl_src1, TargetReg(kArg0), TargetReg(kArg1));
+ // NOTE: callout here is not a safepoint
+ CallHelper(cu, r_tgt, func_offset, false /* not safepoint */);
+ } else {
+ CallRuntimeHelperRegLocationRegLocation(cu, func_offset,
+ rl_src1, rl_src2, false);
+ }
+ // Adjust return regs in to handle case of rem returning kArg2/kArg3
+ if (ret_reg == TargetReg(kRet0))
+ rl_result = GetReturnWide(cu, false);
+ else
+ rl_result = GetReturnWideAlt(cu);
+ StoreValueWide(cu, rl_dest, rl_result);
+ }
+}
+
+void Codegen::GenConversionCall(CompilationUnit* cu, int func_offset,
+ RegLocation rl_dest, RegLocation rl_src)
+{
+ /*
+ * Don't optimize the register usage since it calls out to support
+ * functions
+ */
+ FlushAllRegs(cu); /* Send everything to home location */
+ if (rl_src.wide) {
+ LoadValueDirectWideFixed(cu, rl_src, rl_src.fp ? TargetReg(kFArg0) : TargetReg(kArg0),
+ rl_src.fp ? TargetReg(kFArg1) : TargetReg(kArg1));
+ } else {
+ LoadValueDirectFixed(cu, rl_src, rl_src.fp ? TargetReg(kFArg0) : TargetReg(kArg0));
+ }
+ CallRuntimeHelperRegLocation(cu, func_offset, rl_src, false);
+ if (rl_dest.wide) {
+ RegLocation rl_result;
+ rl_result = GetReturnWide(cu, rl_dest.fp);
+ StoreValueWide(cu, rl_dest, rl_result);
+ } else {
+ RegLocation rl_result;
+ rl_result = GetReturn(cu, rl_dest.fp);
+ StoreValue(cu, rl_dest, rl_result);
+ }
+}
+
+/* Check if we need to check for pending suspend request */
+void Codegen::GenSuspendTest(CompilationUnit* cu, int opt_flags)
+{
+ if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) {
+ return;
+ }
+ FlushAllRegs(cu);
+ LIR* branch = OpTestSuspend(cu, NULL);
+ LIR* ret_lab = NewLIR0(cu, kPseudoTargetLabel);
+ LIR* target = RawLIR(cu, cu->current_dalvik_offset, kPseudoSuspendTarget,
+ reinterpret_cast<uintptr_t>(ret_lab), cu->current_dalvik_offset);
+ branch->target = target;
+ InsertGrowableList(cu, &cu->suspend_launchpads, reinterpret_cast<uintptr_t>(target));
+}
+
+/* Check if we need to check for pending suspend request */
+void Codegen::GenSuspendTestAndBranch(CompilationUnit* cu, int opt_flags, LIR* target)
+{
+ if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) {
+ OpUnconditionalBranch(cu, target);
+ return;
+ }
+ OpTestSuspend(cu, target);
+ LIR* launch_pad =
+ RawLIR(cu, cu->current_dalvik_offset, kPseudoSuspendTarget,
+ reinterpret_cast<uintptr_t>(target), cu->current_dalvik_offset);
+ FlushAllRegs(cu);
+ OpUnconditionalBranch(cu, launch_pad);
+ InsertGrowableList(cu, &cu->suspend_launchpads, reinterpret_cast<uintptr_t>(launch_pad));
+}
+
+} // namespace art
diff --git a/src/compiler/dex/quick/gen_invoke.cc b/src/compiler/dex/quick/gen_invoke.cc
new file mode 100644
index 0000000..9a1fa5c
--- /dev/null
+++ b/src/compiler/dex/quick/gen_invoke.cc
@@ -0,0 +1,1477 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "codegen_util.h"
+#include "compiler/dex/compiler_ir.h"
+#include "oat/runtime/oat_support_entrypoints.h"
+#include "ralloc_util.h"
+#include "x86/codegen_x86.h"
+
+namespace art {
+
+/*
+ * This source files contains "gen" codegen routines that should
+ * be applicable to most targets. Only mid-level support utilities
+ * and "op" calls may be used here.
+ */
+
+/*
+ * To save scheduling time, helper calls are broken into two parts: generation of
+ * the helper target address, and the actuall call to the helper. Because x86
+ * has a memory call operation, part 1 is a NOP for x86. For other targets,
+ * load arguments between the two parts.
+ */
+int Codegen::CallHelperSetup(CompilationUnit* cu, int helper_offset)
+{
+ return (cu->instruction_set == kX86) ? 0 : LoadHelper(cu, helper_offset);
+}
+
+/* NOTE: if r_tgt is a temp, it will be freed following use */
+LIR* Codegen::CallHelper(CompilationUnit* cu, int r_tgt, int helper_offset, bool safepoint_pc)
+{
+ LIR* call_inst;
+ if (cu->instruction_set == kX86) {
+ call_inst = OpThreadMem(cu, kOpBlx, helper_offset);
+ } else {
+ call_inst = OpReg(cu, kOpBlx, r_tgt);
+ FreeTemp(cu, r_tgt);
+ }
+ if (safepoint_pc) {
+ MarkSafepointPC(cu, call_inst);
+ }
+ return call_inst;
+}
+
+void Codegen::CallRuntimeHelperImm(CompilationUnit* cu, int helper_offset, int arg0,
+ bool safepoint_pc) {
+ int r_tgt = CallHelperSetup(cu, helper_offset);
+ LoadConstant(cu, TargetReg(kArg0), arg0);
+ ClobberCalleeSave(cu);
+ CallHelper(cu, r_tgt, helper_offset, safepoint_pc);
+}
+
+void Codegen::CallRuntimeHelperReg(CompilationUnit* cu, int helper_offset, int arg0,
+ bool safepoint_pc) {
+ int r_tgt = CallHelperSetup(cu, helper_offset);
+ OpRegCopy(cu, TargetReg(kArg0), arg0);
+ ClobberCalleeSave(cu);
+ CallHelper(cu, r_tgt, helper_offset, safepoint_pc);
+}
+
+void Codegen::CallRuntimeHelperRegLocation(CompilationUnit* cu, int helper_offset, RegLocation arg0,
+ bool safepoint_pc) {
+ int r_tgt = CallHelperSetup(cu, helper_offset);
+ if (arg0.wide == 0) {
+ LoadValueDirectFixed(cu, arg0, TargetReg(kArg0));
+ } else {
+ LoadValueDirectWideFixed(cu, arg0, TargetReg(kArg0), TargetReg(kArg1));
+ }
+ ClobberCalleeSave(cu);
+ CallHelper(cu, r_tgt, helper_offset, safepoint_pc);
+}
+
+void Codegen::CallRuntimeHelperImmImm(CompilationUnit* cu, int helper_offset, int arg0, int arg1,
+ bool safepoint_pc) {
+ int r_tgt = CallHelperSetup(cu, helper_offset);
+ LoadConstant(cu, TargetReg(kArg0), arg0);
+ LoadConstant(cu, TargetReg(kArg1), arg1);
+ ClobberCalleeSave(cu);
+ CallHelper(cu, r_tgt, helper_offset, safepoint_pc);
+}
+
+void Codegen::CallRuntimeHelperImmRegLocation(CompilationUnit* cu, int helper_offset, int arg0,
+ RegLocation arg1, bool safepoint_pc) {
+ int r_tgt = CallHelperSetup(cu, helper_offset);
+ if (arg1.wide == 0) {
+ LoadValueDirectFixed(cu, arg1, TargetReg(kArg1));
+ } else {
+ LoadValueDirectWideFixed(cu, arg1, TargetReg(kArg1), TargetReg(kArg2));
+ }
+ LoadConstant(cu, TargetReg(kArg0), arg0);
+ ClobberCalleeSave(cu);
+ CallHelper(cu, r_tgt, helper_offset, safepoint_pc);
+}
+
+void Codegen::CallRuntimeHelperRegLocationImm(CompilationUnit* cu, int helper_offset,
+ RegLocation arg0, int arg1, bool safepoint_pc) {
+ int r_tgt = CallHelperSetup(cu, helper_offset);
+ LoadValueDirectFixed(cu, arg0, TargetReg(kArg0));
+ LoadConstant(cu, TargetReg(kArg1), arg1);
+ ClobberCalleeSave(cu);
+ CallHelper(cu, r_tgt, helper_offset, safepoint_pc);
+}
+
+void Codegen::CallRuntimeHelperImmReg(CompilationUnit* cu, int helper_offset, int arg0, int arg1,
+ bool safepoint_pc) {
+ int r_tgt = CallHelperSetup(cu, helper_offset);
+ OpRegCopy(cu, TargetReg(kArg1), arg1);
+ LoadConstant(cu, TargetReg(kArg0), arg0);
+ ClobberCalleeSave(cu);
+ CallHelper(cu, r_tgt, helper_offset, safepoint_pc);
+}
+
+void Codegen::CallRuntimeHelperRegImm(CompilationUnit* cu, int helper_offset, int arg0, int arg1,
+ bool safepoint_pc) {
+ int r_tgt = CallHelperSetup(cu, helper_offset);
+ OpRegCopy(cu, TargetReg(kArg0), arg0);
+ LoadConstant(cu, TargetReg(kArg1), arg1);
+ ClobberCalleeSave(cu);
+ CallHelper(cu, r_tgt, helper_offset, safepoint_pc);
+}
+
+void Codegen::CallRuntimeHelperImmMethod(CompilationUnit* cu, int helper_offset, int arg0,
+ bool safepoint_pc) {
+ int r_tgt = CallHelperSetup(cu, helper_offset);
+ LoadCurrMethodDirect(cu, TargetReg(kArg1));
+ LoadConstant(cu, TargetReg(kArg0), arg0);
+ ClobberCalleeSave(cu);
+ CallHelper(cu, r_tgt, helper_offset, safepoint_pc);
+}
+
+void Codegen::CallRuntimeHelperRegLocationRegLocation(CompilationUnit* cu, int helper_offset,
+ RegLocation arg0, RegLocation arg1,
+ bool safepoint_pc) {
+ int r_tgt = CallHelperSetup(cu, helper_offset);
+ if (arg0.wide == 0) {
+ LoadValueDirectFixed(cu, arg0, arg0.fp ? TargetReg(kFArg0) : TargetReg(kArg0));
+ if (arg1.wide == 0) {
+ if (cu->instruction_set == kMips) {
+ LoadValueDirectFixed(cu, arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg1));
+ } else {
+ LoadValueDirectFixed(cu, arg1, TargetReg(kArg1));
+ }
+ } else {
+ if (cu->instruction_set == kMips) {
+ LoadValueDirectWideFixed(cu, arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg1), arg1.fp ? TargetReg(kFArg3) : TargetReg(kArg2));
+ } else {
+ LoadValueDirectWideFixed(cu, arg1, TargetReg(kArg1), TargetReg(kArg2));
+ }
+ }
+ } else {
+ LoadValueDirectWideFixed(cu, arg0, arg0.fp ? TargetReg(kFArg0) : TargetReg(kArg0), arg0.fp ? TargetReg(kFArg1) : TargetReg(kArg1));
+ if (arg1.wide == 0) {
+ LoadValueDirectFixed(cu, arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg2));
+ } else {
+ LoadValueDirectWideFixed(cu, arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg2), arg1.fp ? TargetReg(kFArg3) : TargetReg(kArg3));
+ }
+ }
+ ClobberCalleeSave(cu);
+ CallHelper(cu, r_tgt, helper_offset, safepoint_pc);
+}
+
+void Codegen::CallRuntimeHelperRegReg(CompilationUnit* cu, int helper_offset, int arg0, int arg1,
+ bool safepoint_pc) {
+ int r_tgt = CallHelperSetup(cu, helper_offset);
+ DCHECK_NE(TargetReg(kArg0), arg1); // check copy into arg0 won't clobber arg1
+ OpRegCopy(cu, TargetReg(kArg0), arg0);
+ OpRegCopy(cu, TargetReg(kArg1), arg1);
+ ClobberCalleeSave(cu);
+ CallHelper(cu, r_tgt, helper_offset, safepoint_pc);
+}
+
+void Codegen::CallRuntimeHelperRegRegImm(CompilationUnit* cu, int helper_offset, int arg0, int arg1,
+ int arg2, bool safepoint_pc) {
+ int r_tgt = CallHelperSetup(cu, helper_offset);
+ DCHECK_NE(TargetReg(kArg0), arg1); // check copy into arg0 won't clobber arg1
+ OpRegCopy(cu, TargetReg(kArg0), arg0);
+ OpRegCopy(cu, TargetReg(kArg1), arg1);
+ LoadConstant(cu, TargetReg(kArg2), arg2);
+ ClobberCalleeSave(cu);
+ CallHelper(cu, r_tgt, helper_offset, safepoint_pc);
+}
+
+void Codegen::CallRuntimeHelperImmMethodRegLocation(CompilationUnit* cu, int helper_offset,
+ int arg0, RegLocation arg2, bool safepoint_pc) {
+ int r_tgt = CallHelperSetup(cu, helper_offset);
+ LoadValueDirectFixed(cu, arg2, TargetReg(kArg2));
+ LoadCurrMethodDirect(cu, TargetReg(kArg1));
+ LoadConstant(cu, TargetReg(kArg0), arg0);
+ ClobberCalleeSave(cu);
+ CallHelper(cu, r_tgt, helper_offset, safepoint_pc);
+}
+
+void Codegen::CallRuntimeHelperImmMethodImm(CompilationUnit* cu, int helper_offset, int arg0,
+ int arg2, bool safepoint_pc) {
+ int r_tgt = CallHelperSetup(cu, helper_offset);
+ LoadCurrMethodDirect(cu, TargetReg(kArg1));
+ LoadConstant(cu, TargetReg(kArg2), arg2);
+ LoadConstant(cu, TargetReg(kArg0), arg0);
+ ClobberCalleeSave(cu);
+ CallHelper(cu, r_tgt, helper_offset, safepoint_pc);
+}
+
+void Codegen::CallRuntimeHelperImmRegLocationRegLocation(CompilationUnit* cu, int helper_offset,
+ int arg0, RegLocation arg1,
+ RegLocation arg2, bool safepoint_pc) {
+ int r_tgt = CallHelperSetup(cu, helper_offset);
+ LoadValueDirectFixed(cu, arg1, TargetReg(kArg1));
+ if (arg2.wide == 0) {
+ LoadValueDirectFixed(cu, arg2, TargetReg(kArg2));
+ } else {
+ LoadValueDirectWideFixed(cu, arg2, TargetReg(kArg2), TargetReg(kArg3));
+ }
+ LoadConstant(cu, TargetReg(kArg0), arg0);
+ ClobberCalleeSave(cu);
+ CallHelper(cu, r_tgt, helper_offset, safepoint_pc);
+}
+
+/*
+ * If there are any ins passed in registers that have not been promoted
+ * to a callee-save register, flush them to the frame. Perform intial
+ * assignment of promoted arguments.
+ *
+ * ArgLocs is an array of location records describing the incoming arguments
+ * with one location record per word of argument.
+ */
+void Codegen::FlushIns(CompilationUnit* cu, RegLocation* ArgLocs, RegLocation rl_method)
+{
+ /*
+ * Dummy up a RegLocation for the incoming Method*
+ * It will attempt to keep kArg0 live (or copy it to home location
+ * if promoted).
+ */
+ RegLocation rl_src = rl_method;
+ rl_src.location = kLocPhysReg;
+ rl_src.low_reg = TargetReg(kArg0);
+ rl_src.home = false;
+ MarkLive(cu, rl_src.low_reg, rl_src.s_reg_low);
+ StoreValue(cu, rl_method, rl_src);
+ // If Method* has been promoted, explicitly flush
+ if (rl_method.location == kLocPhysReg) {
+ StoreWordDisp(cu, TargetReg(kSp), 0, TargetReg(kArg0));
+ }
+
+ if (cu->num_ins == 0)
+ return;
+ const int num_arg_regs = 3;
+ static SpecialTargetRegister arg_regs[] = {kArg1, kArg2, kArg3};
+ int start_vreg = cu->num_dalvik_registers - cu->num_ins;
+ /*
+ * Copy incoming arguments to their proper home locations.
+ * NOTE: an older version of dx had an issue in which
+ * it would reuse static method argument registers.
+ * This could result in the same Dalvik virtual register
+ * being promoted to both core and fp regs. To account for this,
+ * we only copy to the corresponding promoted physical register
+ * if it matches the type of the SSA name for the incoming
+ * argument. It is also possible that long and double arguments
+ * end up half-promoted. In those cases, we must flush the promoted
+ * half to memory as well.
+ */
+ for (int i = 0; i < cu->num_ins; i++) {
+ PromotionMap* v_map = &cu->promotion_map[start_vreg + i];
+ if (i < num_arg_regs) {
+ // If arriving in register
+ bool need_flush = true;
+ RegLocation* t_loc = &ArgLocs[i];
+ if ((v_map->core_location == kLocPhysReg) && !t_loc->fp) {
+ OpRegCopy(cu, v_map->core_reg, TargetReg(arg_regs[i]));
+ need_flush = false;
+ } else if ((v_map->fp_location == kLocPhysReg) && t_loc->fp) {
+ OpRegCopy(cu, v_map->FpReg, TargetReg(arg_regs[i]));
+ need_flush = false;
+ } else {
+ need_flush = true;
+ }
+
+ // For wide args, force flush if only half is promoted
+ if (t_loc->wide) {
+ PromotionMap* p_map = v_map + (t_loc->high_word ? -1 : +1);
+ need_flush |= (p_map->core_location != v_map->core_location) ||
+ (p_map->fp_location != v_map->fp_location);
+ }
+ if (need_flush) {
+ StoreBaseDisp(cu, TargetReg(kSp), SRegOffset(cu, start_vreg + i),
+ TargetReg(arg_regs[i]), kWord);
+ }
+ } else {
+ // If arriving in frame & promoted
+ if (v_map->core_location == kLocPhysReg) {
+ LoadWordDisp(cu, TargetReg(kSp), SRegOffset(cu, start_vreg + i),
+ v_map->core_reg);
+ }
+ if (v_map->fp_location == kLocPhysReg) {
+ LoadWordDisp(cu, TargetReg(kSp), SRegOffset(cu, start_vreg + i),
+ v_map->FpReg);
+ }
+ }
+ }
+}
+
+/*
+ * Bit of a hack here - in the absence of a real scheduling pass,
+ * emit the next instruction in static & direct invoke sequences.
+ */
+static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info,
+ int state, uint32_t dex_idx, uint32_t unused,
+ uintptr_t direct_code, uintptr_t direct_method,
+ InvokeType type)
+{
+ Codegen* cg = cu->cg.get();
+ if (cu->instruction_set != kThumb2) {
+ // Disable sharpening
+ direct_code = 0;
+ direct_method = 0;
+ }
+ if (direct_code != 0 && direct_method != 0) {
+ switch (state) {
+ case 0: // Get the current Method* [sets kArg0]
+ if (direct_code != static_cast<unsigned int>(-1)) {
+ cg->LoadConstant(cu, cg->TargetReg(kInvokeTgt), direct_code);
+ } else {
+ LIR* data_target = ScanLiteralPool(cu->code_literal_list, dex_idx, 0);
+ if (data_target == NULL) {
+ data_target = AddWordData(cu, &cu->code_literal_list, dex_idx);
+ data_target->operands[1] = type;
+ }
+ LIR* load_pc_rel = cg->OpPcRelLoad(cu, cg->TargetReg(kInvokeTgt), data_target);
+ AppendLIR(cu, load_pc_rel);
+ DCHECK_EQ(cu->instruction_set, kThumb2) << reinterpret_cast<void*>(data_target);
+ }
+ if (direct_method != static_cast<unsigned int>(-1)) {
+ cg->LoadConstant(cu, cg->TargetReg(kArg0), direct_method);
+ } else {
+ LIR* data_target = ScanLiteralPool(cu->method_literal_list, dex_idx, 0);
+ if (data_target == NULL) {
+ data_target = AddWordData(cu, &cu->method_literal_list, dex_idx);
+ data_target->operands[1] = type;
+ }
+ LIR* load_pc_rel = cg->OpPcRelLoad(cu, cg->TargetReg(kArg0), data_target);
+ AppendLIR(cu, load_pc_rel);
+ DCHECK_EQ(cu->instruction_set, kThumb2) << reinterpret_cast<void*>(data_target);
+ }
+ break;
+ default:
+ return -1;
+ }
+ } else {
+ switch (state) {
+ case 0: // Get the current Method* [sets kArg0]
+ // TUNING: we can save a reg copy if Method* has been promoted.
+ cg->LoadCurrMethodDirect(cu, cg->TargetReg(kArg0));
+ break;
+ case 1: // Get method->dex_cache_resolved_methods_
+ cg->LoadWordDisp(cu, cg->TargetReg(kArg0),
+ mirror::AbstractMethod::DexCacheResolvedMethodsOffset().Int32Value(), cg->TargetReg(kArg0));
+ // Set up direct code if known.
+ if (direct_code != 0) {
+ if (direct_code != static_cast<unsigned int>(-1)) {
+ cg->LoadConstant(cu, cg->TargetReg(kInvokeTgt), direct_code);
+ } else {
+ LIR* data_target = ScanLiteralPool(cu->code_literal_list, dex_idx, 0);
+ if (data_target == NULL) {
+ data_target = AddWordData(cu, &cu->code_literal_list, dex_idx);
+ data_target->operands[1] = type;
+ }
+ LIR* load_pc_rel = cg->OpPcRelLoad(cu, cg->TargetReg(kInvokeTgt), data_target);
+ AppendLIR(cu, load_pc_rel);
+ DCHECK_EQ(cu->instruction_set, kThumb2) << reinterpret_cast<void*>(data_target);
+ }
+ }
+ break;
+ case 2: // Grab target method*
+ cg->LoadWordDisp(cu, cg->TargetReg(kArg0),
+ mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() + dex_idx * 4,
+ cg-> TargetReg(kArg0));
+ break;
+ case 3: // Grab the code from the method*
+ if (cu->instruction_set != kX86) {
+ if (direct_code == 0) {
+ cg->LoadWordDisp(cu, cg->TargetReg(kArg0),
+ mirror::AbstractMethod::GetCodeOffset().Int32Value(),
+ cg->TargetReg(kInvokeTgt));
+ }
+ break;
+ }
+ // Intentional fallthrough for x86
+ default:
+ return -1;
+ }
+ }
+ return state + 1;
+}
+
+/*
+ * Bit of a hack here - in the absence of a real scheduling pass,
+ * emit the next instruction in a virtual invoke sequence.
+ * We can use kLr as a temp prior to target address loading
+ * Note also that we'll load the first argument ("this") into
+ * kArg1 here rather than the standard LoadArgRegs.
+ */
+static int NextVCallInsn(CompilationUnit* cu, CallInfo* info,
+ int state, uint32_t dex_idx, uint32_t method_idx,
+ uintptr_t unused, uintptr_t unused2, InvokeType unused3)
+{
+ Codegen* cg = cu->cg.get();
+ /*
+ * This is the fast path in which the target virtual method is
+ * fully resolved at compile time.
+ */
+ switch (state) {
+ case 0: { // Get "this" [set kArg1]
+ RegLocation rl_arg = info->args[0];
+ cg->LoadValueDirectFixed(cu, rl_arg, cg->TargetReg(kArg1));
+ break;
+ }
+ case 1: // Is "this" null? [use kArg1]
+ cg->GenNullCheck(cu, info->args[0].s_reg_low, cg->TargetReg(kArg1), info->opt_flags);
+ // get this->klass_ [use kArg1, set kInvokeTgt]
+ cg->LoadWordDisp(cu, cg->TargetReg(kArg1), mirror::Object::ClassOffset().Int32Value(),
+ cg->TargetReg(kInvokeTgt));
+ break;
+ case 2: // Get this->klass_->vtable [usr kInvokeTgt, set kInvokeTgt]
+ cg->LoadWordDisp(cu, cg->TargetReg(kInvokeTgt), mirror::Class::VTableOffset().Int32Value(),
+ cg->TargetReg(kInvokeTgt));
+ break;
+ case 3: // Get target method [use kInvokeTgt, set kArg0]
+ cg->LoadWordDisp(cu, cg->TargetReg(kInvokeTgt), (method_idx * 4) +
+ mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value(),
+ cg->TargetReg(kArg0));
+ break;
+ case 4: // Get the compiled code address [uses kArg0, sets kInvokeTgt]
+ if (cu->instruction_set != kX86) {
+ cg->LoadWordDisp(cu, cg->TargetReg(kArg0),
+ mirror::AbstractMethod::GetCodeOffset().Int32Value(),
+ cg->TargetReg(kInvokeTgt));
+ break;
+ }
+ // Intentional fallthrough for X86
+ default:
+ return -1;
+ }
+ return state + 1;
+}
+
+/*
+ * All invoke-interface calls bounce off of art_quick_invoke_interface_trampoline,
+ * which will locate the target and continue on via a tail call.
+ */
+static int NextInterfaceCallInsn(CompilationUnit* cu, CallInfo* info, int state,
+ uint32_t dex_idx, uint32_t unused, uintptr_t unused2,
+ uintptr_t direct_method, InvokeType unused4)
+{
+ Codegen* cg = cu->cg.get();
+ if (cu->instruction_set != kThumb2) {
+ // Disable sharpening
+ direct_method = 0;
+ }
+ int trampoline = (cu->instruction_set == kX86) ? 0
+ : ENTRYPOINT_OFFSET(pInvokeInterfaceTrampoline);
+
+ if (direct_method != 0) {
+ switch (state) {
+ case 0: // Load the trampoline target [sets kInvokeTgt].
+ if (cu->instruction_set != kX86) {
+ cg->LoadWordDisp(cu, cg->TargetReg(kSelf), trampoline, cg->TargetReg(kInvokeTgt));
+ }
+ // Get the interface Method* [sets kArg0]
+ if (direct_method != static_cast<unsigned int>(-1)) {
+ cg->LoadConstant(cu, cg->TargetReg(kArg0), direct_method);
+ } else {
+ LIR* data_target = ScanLiteralPool(cu->method_literal_list, dex_idx, 0);
+ if (data_target == NULL) {
+ data_target = AddWordData(cu, &cu->method_literal_list, dex_idx);
+ data_target->operands[1] = kInterface;
+ }
+ LIR* load_pc_rel = cg->OpPcRelLoad(cu, cg->TargetReg(kArg0), data_target);
+ AppendLIR(cu, load_pc_rel);
+ DCHECK_EQ(cu->instruction_set, kThumb2) << reinterpret_cast<void*>(data_target);
+ }
+ break;
+ default:
+ return -1;
+ }
+ } else {
+ switch (state) {
+ case 0:
+ // Get the current Method* [sets kArg0] - TUNING: remove copy of method if it is promoted.
+ cg->LoadCurrMethodDirect(cu, cg->TargetReg(kArg0));
+ // Load the trampoline target [sets kInvokeTgt].
+ if (cu->instruction_set != kX86) {
+ cg->LoadWordDisp(cu, cg->TargetReg(kSelf), trampoline, cg->TargetReg(kInvokeTgt));
+ }
+ break;
+ case 1: // Get method->dex_cache_resolved_methods_ [set/use kArg0]
+ cg->LoadWordDisp(cu, cg->TargetReg(kArg0),
+ mirror::AbstractMethod::DexCacheResolvedMethodsOffset().Int32Value(),
+ cg->TargetReg(kArg0));
+ break;
+ case 2: // Grab target method* [set/use kArg0]
+ cg->LoadWordDisp(cu, cg->TargetReg(kArg0),
+ mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() + dex_idx * 4,
+ cg->TargetReg(kArg0));
+ break;
+ default:
+ return -1;
+ }
+ }
+ return state + 1;
+}
+
+static int NextInvokeInsnSP(CompilationUnit* cu, CallInfo* info, int trampoline,
+ int state, uint32_t dex_idx, uint32_t method_idx)
+{
+ Codegen* cg = cu->cg.get();
+ /*
+ * This handles the case in which the base method is not fully
+ * resolved at compile time, we bail to a runtime helper.
+ */
+ if (state == 0) {
+ if (cu->instruction_set != kX86) {
+ // Load trampoline target
+ cg->LoadWordDisp(cu, cg->TargetReg(kSelf), trampoline, cg->TargetReg(kInvokeTgt));
+ }
+ // Load kArg0 with method index
+ cg->LoadConstant(cu, cg->TargetReg(kArg0), dex_idx);
+ return 1;
+ }
+ return -1;
+}
+
+static int NextStaticCallInsnSP(CompilationUnit* cu, CallInfo* info,
+ int state, uint32_t dex_idx, uint32_t method_idx,
+ uintptr_t unused, uintptr_t unused2,
+ InvokeType unused3)
+{
+ int trampoline = ENTRYPOINT_OFFSET(pInvokeStaticTrampolineWithAccessCheck);
+ return NextInvokeInsnSP(cu, info, trampoline, state, dex_idx, 0);
+}
+
+static int NextDirectCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
+ uint32_t dex_idx, uint32_t method_idx, uintptr_t unused,
+ uintptr_t unused2, InvokeType unused3)
+{
+ int trampoline = ENTRYPOINT_OFFSET(pInvokeDirectTrampolineWithAccessCheck);
+ return NextInvokeInsnSP(cu, info, trampoline, state, dex_idx, 0);
+}
+
+static int NextSuperCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
+ uint32_t dex_idx, uint32_t method_idx, uintptr_t unused,
+ uintptr_t unused2, InvokeType unused3)
+{
+ int trampoline = ENTRYPOINT_OFFSET(pInvokeSuperTrampolineWithAccessCheck);
+ return NextInvokeInsnSP(cu, info, trampoline, state, dex_idx, 0);
+}
+
+static int NextVCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
+ uint32_t dex_idx, uint32_t method_idx, uintptr_t unused,
+ uintptr_t unused2, InvokeType unused3)
+{
+ int trampoline = ENTRYPOINT_OFFSET(pInvokeVirtualTrampolineWithAccessCheck);
+ return NextInvokeInsnSP(cu, info, trampoline, state, dex_idx, 0);
+}
+
+static int NextInterfaceCallInsnWithAccessCheck(CompilationUnit* cu,
+ CallInfo* info, int state,
+ uint32_t dex_idx, uint32_t unused,
+ uintptr_t unused2, uintptr_t unused3,
+ InvokeType unused4)
+{
+ int trampoline = ENTRYPOINT_OFFSET(pInvokeInterfaceTrampolineWithAccessCheck);
+ return NextInvokeInsnSP(cu, info, trampoline, state, dex_idx, 0);
+}
+
+static int LoadArgRegs(CompilationUnit* cu, CallInfo* info, int call_state,
+ NextCallInsn next_call_insn, uint32_t dex_idx,
+ uint32_t method_idx, uintptr_t direct_code,
+ uintptr_t direct_method, InvokeType type, bool skip_this)
+{
+ Codegen* cg = cu->cg.get();
+ int last_arg_reg = cg->TargetReg(kArg3);
+ int next_reg = cg->TargetReg(kArg1);
+ int next_arg = 0;
+ if (skip_this) {
+ next_reg++;
+ next_arg++;
+ }
+ for (; (next_reg <= last_arg_reg) && (next_arg < info->num_arg_words); next_reg++) {
+ RegLocation rl_arg = info->args[next_arg++];
+ rl_arg = UpdateRawLoc(cu, rl_arg);
+ if (rl_arg.wide && (next_reg <= cg->TargetReg(kArg2))) {
+ cg->LoadValueDirectWideFixed(cu, rl_arg, next_reg, next_reg + 1);
+ next_reg++;
+ next_arg++;
+ } else {
+ if (rl_arg.wide) {
+ rl_arg.wide = false;
+ rl_arg.is_const = false;
+ }
+ cg->LoadValueDirectFixed(cu, rl_arg, next_reg);
+ }
+ call_state = next_call_insn(cu, info, call_state, dex_idx, method_idx,
+ direct_code, direct_method, type);
+ }
+ return call_state;
+}
+
+/*
+ * Load up to 5 arguments, the first three of which will be in
+ * kArg1 .. kArg3. On entry kArg0 contains the current method pointer,
+ * and as part of the load sequence, it must be replaced with
+ * the target method pointer. Note, this may also be called
+ * for "range" variants if the number of arguments is 5 or fewer.
+ */
+int Codegen::GenDalvikArgsNoRange(CompilationUnit* cu, CallInfo* info,
+ int call_state, LIR** pcrLabel, NextCallInsn next_call_insn,
+ uint32_t dex_idx, uint32_t method_idx, uintptr_t direct_code,
+ uintptr_t direct_method, InvokeType type, bool skip_this)
+{
+ RegLocation rl_arg;
+
+ /* If no arguments, just return */
+ if (info->num_arg_words == 0)
+ return call_state;
+
+ call_state = next_call_insn(cu, info, call_state, dex_idx, method_idx,
+ direct_code, direct_method, type);
+
+ DCHECK_LE(info->num_arg_words, 5);
+ if (info->num_arg_words > 3) {
+ int32_t next_use = 3;
+ //Detect special case of wide arg spanning arg3/arg4
+ RegLocation rl_use0 = info->args[0];
+ RegLocation rl_use1 = info->args[1];
+ RegLocation rl_use2 = info->args[2];
+ if (((!rl_use0.wide && !rl_use1.wide) || rl_use0.wide) &&
+ rl_use2.wide) {
+ int reg = -1;
+ // Wide spans, we need the 2nd half of uses[2].
+ rl_arg = UpdateLocWide(cu, rl_use2);
+ if (rl_arg.location == kLocPhysReg) {
+ reg = rl_arg.high_reg;
+ } else {
+ // kArg2 & rArg3 can safely be used here
+ reg = TargetReg(kArg3);
+ LoadWordDisp(cu, TargetReg(kSp), SRegOffset(cu, rl_arg.s_reg_low) + 4, reg);
+ call_state = next_call_insn(cu, info, call_state, dex_idx,
+ method_idx, direct_code, direct_method, type);
+ }
+ StoreBaseDisp(cu, TargetReg(kSp), (next_use + 1) * 4, reg, kWord);
+ StoreBaseDisp(cu, TargetReg(kSp), 16 /* (3+1)*4 */, reg, kWord);
+ call_state = next_call_insn(cu, info, call_state, dex_idx, method_idx,
+ direct_code, direct_method, type);
+ next_use++;
+ }
+ // Loop through the rest
+ while (next_use < info->num_arg_words) {
+ int low_reg;
+ int high_reg = -1;
+ rl_arg = info->args[next_use];
+ rl_arg = UpdateRawLoc(cu, rl_arg);
+ if (rl_arg.location == kLocPhysReg) {
+ low_reg = rl_arg.low_reg;
+ high_reg = rl_arg.high_reg;
+ } else {
+ low_reg = TargetReg(kArg2);
+ if (rl_arg.wide) {
+ high_reg = TargetReg(kArg3);
+ LoadValueDirectWideFixed(cu, rl_arg, low_reg, high_reg);
+ } else {
+ LoadValueDirectFixed(cu, rl_arg, low_reg);
+ }
+ call_state = next_call_insn(cu, info, call_state, dex_idx,
+ method_idx, direct_code, direct_method, type);
+ }
+ int outs_offset = (next_use + 1) * 4;
+ if (rl_arg.wide) {
+ StoreBaseDispWide(cu, TargetReg(kSp), outs_offset, low_reg, high_reg);
+ next_use += 2;
+ } else {
+ StoreWordDisp(cu, TargetReg(kSp), outs_offset, low_reg);
+ next_use++;
+ }
+ call_state = next_call_insn(cu, info, call_state, dex_idx, method_idx,
+ direct_code, direct_method, type);
+ }
+ }
+
+ call_state = LoadArgRegs(cu, info, call_state, next_call_insn,
+ dex_idx, method_idx, direct_code, direct_method,
+ type, skip_this);
+
+ if (pcrLabel) {
+ *pcrLabel = GenNullCheck(cu, info->args[0].s_reg_low, TargetReg(kArg1), info->opt_flags);
+ }
+ return call_state;
+}
+
+/*
+ * May have 0+ arguments (also used for jumbo). Note that
+ * source virtual registers may be in physical registers, so may
+ * need to be flushed to home location before copying. This
+ * applies to arg3 and above (see below).
+ *
+ * Two general strategies:
+ * If < 20 arguments
+ * Pass args 3-18 using vldm/vstm block copy
+ * Pass arg0, arg1 & arg2 in kArg1-kArg3
+ * If 20+ arguments
+ * Pass args arg19+ using memcpy block copy
+ * Pass arg0, arg1 & arg2 in kArg1-kArg3
+ *
+ */
+int Codegen::GenDalvikArgsRange(CompilationUnit* cu, CallInfo* info, int call_state,
+ LIR** pcrLabel, NextCallInsn next_call_insn, uint32_t dex_idx,
+ uint32_t method_idx, uintptr_t direct_code, uintptr_t direct_method,
+ InvokeType type, bool skip_this)
+{
+
+ // If we can treat it as non-range (Jumbo ops will use range form)
+ if (info->num_arg_words <= 5)
+ return GenDalvikArgsNoRange(cu, info, call_state, pcrLabel,
+ next_call_insn, dex_idx, method_idx,
+ direct_code, direct_method, type, skip_this);
+ /*
+ * First load the non-register arguments. Both forms expect all
+ * of the source arguments to be in their home frame location, so
+ * scan the s_reg names and flush any that have been promoted to
+ * frame backing storage.
+ */
+ // Scan the rest of the args - if in phys_reg flush to memory
+ for (int next_arg = 0; next_arg < info->num_arg_words;) {
+ RegLocation loc = info->args[next_arg];
+ if (loc.wide) {
+ loc = UpdateLocWide(cu, loc);
+ if ((next_arg >= 2) && (loc.location == kLocPhysReg)) {
+ StoreBaseDispWide(cu, TargetReg(kSp), SRegOffset(cu, loc.s_reg_low),
+ loc.low_reg, loc.high_reg);
+ }
+ next_arg += 2;
+ } else {
+ loc = UpdateLoc(cu, loc);
+ if ((next_arg >= 3) && (loc.location == kLocPhysReg)) {
+ StoreBaseDisp(cu, TargetReg(kSp), SRegOffset(cu, loc.s_reg_low),
+ loc.low_reg, kWord);
+ }
+ next_arg++;
+ }
+ }
+
+ int start_offset = SRegOffset(cu, info->args[3].s_reg_low);
+ int outs_offset = 4 /* Method* */ + (3 * 4);
+ if (cu->instruction_set != kThumb2) {
+ // Generate memcpy
+ OpRegRegImm(cu, kOpAdd, TargetReg(kArg0), TargetReg(kSp), outs_offset);
+ OpRegRegImm(cu, kOpAdd, TargetReg(kArg1), TargetReg(kSp), start_offset);
+ CallRuntimeHelperRegRegImm(cu, ENTRYPOINT_OFFSET(pMemcpy), TargetReg(kArg0),
+ TargetReg(kArg1), (info->num_arg_words - 3) * 4, false);
+ } else {
+ if (info->num_arg_words >= 20) {
+ // Generate memcpy
+ OpRegRegImm(cu, kOpAdd, TargetReg(kArg0), TargetReg(kSp), outs_offset);
+ OpRegRegImm(cu, kOpAdd, TargetReg(kArg1), TargetReg(kSp), start_offset);
+ CallRuntimeHelperRegRegImm(cu, ENTRYPOINT_OFFSET(pMemcpy), TargetReg(kArg0),
+ TargetReg(kArg1), (info->num_arg_words - 3) * 4, false);
+ } else {
+ // Use vldm/vstm pair using kArg3 as a temp
+ int regs_left = std::min(info->num_arg_words - 3, 16);
+ call_state = next_call_insn(cu, info, call_state, dex_idx, method_idx,
+ direct_code, direct_method, type);
+ OpRegRegImm(cu, kOpAdd, TargetReg(kArg3), TargetReg(kSp), start_offset);
+ LIR* ld = OpVldm(cu, TargetReg(kArg3), regs_left);
+ //TUNING: loosen barrier
+ ld->def_mask = ENCODE_ALL;
+ SetMemRefType(cu, ld, true /* is_load */, kDalvikReg);
+ call_state = next_call_insn(cu, info, call_state, dex_idx, method_idx,
+ direct_code, direct_method, type);
+ OpRegRegImm(cu, kOpAdd, TargetReg(kArg3), TargetReg(kSp), 4 /* Method* */ + (3 * 4));
+ call_state = next_call_insn(cu, info, call_state, dex_idx, method_idx,
+ direct_code, direct_method, type);
+ LIR* st = OpVstm(cu, TargetReg(kArg3), regs_left);
+ SetMemRefType(cu, st, false /* is_load */, kDalvikReg);
+ st->def_mask = ENCODE_ALL;
+ call_state = next_call_insn(cu, info, call_state, dex_idx, method_idx,
+ direct_code, direct_method, type);
+ }
+ }
+
+ call_state = LoadArgRegs(cu, info, call_state, next_call_insn,
+ dex_idx, method_idx, direct_code, direct_method,
+ type, skip_this);
+
+ call_state = next_call_insn(cu, info, call_state, dex_idx, method_idx,
+ direct_code, direct_method, type);
+ if (pcrLabel) {
+ *pcrLabel = GenNullCheck(cu, info->args[0].s_reg_low, TargetReg(kArg1),
+ info->opt_flags);
+ }
+ return call_state;
+}
+
+RegLocation Codegen::InlineTarget(CompilationUnit* cu, CallInfo* info)
+{
+ RegLocation res;
+ if (info->result.location == kLocInvalid) {
+ res = GetReturn(cu, false);
+ } else {
+ res = info->result;
+ }
+ return res;
+}
+
+RegLocation Codegen::InlineTargetWide(CompilationUnit* cu, CallInfo* info)
+{
+ RegLocation res;
+ if (info->result.location == kLocInvalid) {
+ res = GetReturnWide(cu, false);
+ } else {
+ res = info->result;
+ }
+ return res;
+}
+
+bool Codegen::GenInlinedCharAt(CompilationUnit* cu, CallInfo* info)
+{
+ if (cu->instruction_set == kMips) {
+ // TODO - add Mips implementation
+ return false;
+ }
+ // Location of reference to data array
+ int value_offset = mirror::String::ValueOffset().Int32Value();
+ // Location of count
+ int count_offset = mirror::String::CountOffset().Int32Value();
+ // Starting offset within data array
+ int offset_offset = mirror::String::OffsetOffset().Int32Value();
+ // Start of char data with array_
+ int data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Int32Value();
+
+ RegLocation rl_obj = info->args[0];
+ RegLocation rl_idx = info->args[1];
+ rl_obj = LoadValue(cu, rl_obj, kCoreReg);
+ rl_idx = LoadValue(cu, rl_idx, kCoreReg);
+ int reg_max;
+ GenNullCheck(cu, rl_obj.s_reg_low, rl_obj.low_reg, info->opt_flags);
+ bool range_check = (!(info->opt_flags & MIR_IGNORE_RANGE_CHECK));
+ LIR* launch_pad = NULL;
+ int reg_off = INVALID_REG;
+ int reg_ptr = INVALID_REG;
+ if (cu->instruction_set != kX86) {
+ reg_off = AllocTemp(cu);
+ reg_ptr = AllocTemp(cu);
+ if (range_check) {
+ reg_max = AllocTemp(cu);
+ LoadWordDisp(cu, rl_obj.low_reg, count_offset, reg_max);
+ }
+ LoadWordDisp(cu, rl_obj.low_reg, offset_offset, reg_off);
+ LoadWordDisp(cu, rl_obj.low_reg, value_offset, reg_ptr);
+ if (range_check) {
+ // Set up a launch pad to allow retry in case of bounds violation */
+ launch_pad = RawLIR(cu, 0, kPseudoIntrinsicRetry, reinterpret_cast<uintptr_t>(info));
+ InsertGrowableList(cu, &cu->intrinsic_launchpads,
+ reinterpret_cast<uintptr_t>(launch_pad));
+ OpRegReg(cu, kOpCmp, rl_idx.low_reg, reg_max);
+ FreeTemp(cu, reg_max);
+ OpCondBranch(cu, kCondCs, launch_pad);
+ }
+ } else {
+ if (range_check) {
+ reg_max = AllocTemp(cu);
+ LoadWordDisp(cu, rl_obj.low_reg, count_offset, reg_max);
+ // Set up a launch pad to allow retry in case of bounds violation */
+ launch_pad = RawLIR(cu, 0, kPseudoIntrinsicRetry, reinterpret_cast<uintptr_t>(info));
+ InsertGrowableList(cu, &cu->intrinsic_launchpads,
+ reinterpret_cast<uintptr_t>(launch_pad));
+ OpRegReg(cu, kOpCmp, rl_idx.low_reg, reg_max);
+ FreeTemp(cu, reg_max);
+ OpCondBranch(cu, kCondCc, launch_pad);
+ }
+ reg_off = AllocTemp(cu);
+ reg_ptr = AllocTemp(cu);
+ LoadWordDisp(cu, rl_obj.low_reg, offset_offset, reg_off);
+ LoadWordDisp(cu, rl_obj.low_reg, value_offset, reg_ptr);
+ }
+ OpRegImm(cu, kOpAdd, reg_ptr, data_offset);
+ OpRegReg(cu, kOpAdd, reg_off, rl_idx.low_reg);
+ FreeTemp(cu, rl_obj.low_reg);
+ FreeTemp(cu, rl_idx.low_reg);
+ RegLocation rl_dest = InlineTarget(cu, info);
+ RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ LoadBaseIndexed(cu, reg_ptr, reg_off, rl_result.low_reg, 1, kUnsignedHalf);
+ FreeTemp(cu, reg_off);
+ FreeTemp(cu, reg_ptr);
+ StoreValue(cu, rl_dest, rl_result);
+ if (range_check) {
+ launch_pad->operands[2] = 0; // no resumption
+ }
+ // Record that we've already inlined & null checked
+ info->opt_flags |= (MIR_INLINED | MIR_IGNORE_NULL_CHECK);
+ return true;
+}
+
+// Generates an inlined String.is_empty or String.length.
+bool Codegen::GenInlinedStringIsEmptyOrLength(CompilationUnit* cu, CallInfo* info, bool is_empty)
+{
+ if (cu->instruction_set == kMips) {
+ // TODO - add Mips implementation
+ return false;
+ }
+ // dst = src.length();
+ RegLocation rl_obj = info->args[0];
+ rl_obj = LoadValue(cu, rl_obj, kCoreReg);
+ RegLocation rl_dest = InlineTarget(cu, info);
+ RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ GenNullCheck(cu, rl_obj.s_reg_low, rl_obj.low_reg, info->opt_flags);
+ LoadWordDisp(cu, rl_obj.low_reg, mirror::String::CountOffset().Int32Value(),
+ rl_result.low_reg);
+ if (is_empty) {
+ // dst = (dst == 0);
+ if (cu->instruction_set == kThumb2) {
+ int t_reg = AllocTemp(cu);
+ OpRegReg(cu, kOpNeg, t_reg, rl_result.low_reg);
+ OpRegRegReg(cu, kOpAdc, rl_result.low_reg, rl_result.low_reg, t_reg);
+ } else {
+ DCHECK_EQ(cu->instruction_set, kX86);
+ OpRegImm(cu, kOpSub, rl_result.low_reg, 1);
+ OpRegImm(cu, kOpLsr, rl_result.low_reg, 31);
+ }
+ }
+ StoreValue(cu, rl_dest, rl_result);
+ return true;
+}
+
+bool Codegen::GenInlinedAbsInt(CompilationUnit *cu, CallInfo* info)
+{
+ if (cu->instruction_set == kMips) {
+ // TODO - add Mips implementation
+ return false;
+ }
+ RegLocation rl_src = info->args[0];
+ rl_src = LoadValue(cu, rl_src, kCoreReg);
+ RegLocation rl_dest = InlineTarget(cu, info);
+ RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ int sign_reg = AllocTemp(cu);
+ // abs(x) = y<=x>>31, (x+y)^y.
+ OpRegRegImm(cu, kOpAsr, sign_reg, rl_src.low_reg, 31);
+ OpRegRegReg(cu, kOpAdd, rl_result.low_reg, rl_src.low_reg, sign_reg);
+ OpRegReg(cu, kOpXor, rl_result.low_reg, sign_reg);
+ StoreValue(cu, rl_dest, rl_result);
+ return true;
+}
+
+bool Codegen::GenInlinedAbsLong(CompilationUnit *cu, CallInfo* info)
+{
+ if (cu->instruction_set == kMips) {
+ // TODO - add Mips implementation
+ return false;
+ }
+ if (cu->instruction_set == kThumb2) {
+ RegLocation rl_src = info->args[0];
+ rl_src = LoadValueWide(cu, rl_src, kCoreReg);
+ RegLocation rl_dest = InlineTargetWide(cu, info);
+ RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ int sign_reg = AllocTemp(cu);
+ // abs(x) = y<=x>>31, (x+y)^y.
+ OpRegRegImm(cu, kOpAsr, sign_reg, rl_src.high_reg, 31);
+ OpRegRegReg(cu, kOpAdd, rl_result.low_reg, rl_src.low_reg, sign_reg);
+ OpRegRegReg(cu, kOpAdc, rl_result.high_reg, rl_src.high_reg, sign_reg);
+ OpRegReg(cu, kOpXor, rl_result.low_reg, sign_reg);
+ OpRegReg(cu, kOpXor, rl_result.high_reg, sign_reg);
+ StoreValueWide(cu, rl_dest, rl_result);
+ return true;
+ } else {
+ DCHECK_EQ(cu->instruction_set, kX86);
+ // Reuse source registers to avoid running out of temps
+ RegLocation rl_src = info->args[0];
+ rl_src = LoadValueWide(cu, rl_src, kCoreReg);
+ RegLocation rl_dest = InlineTargetWide(cu, info);
+ RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ OpRegCopyWide(cu, rl_result.low_reg, rl_result.high_reg, rl_src.low_reg, rl_src.high_reg);
+ FreeTemp(cu, rl_src.low_reg);
+ FreeTemp(cu, rl_src.high_reg);
+ int sign_reg = AllocTemp(cu);
+ // abs(x) = y<=x>>31, (x+y)^y.
+ OpRegRegImm(cu, kOpAsr, sign_reg, rl_result.high_reg, 31);
+ OpRegReg(cu, kOpAdd, rl_result.low_reg, sign_reg);
+ OpRegReg(cu, kOpAdc, rl_result.high_reg, sign_reg);
+ OpRegReg(cu, kOpXor, rl_result.low_reg, sign_reg);
+ OpRegReg(cu, kOpXor, rl_result.high_reg, sign_reg);
+ StoreValueWide(cu, rl_dest, rl_result);
+ return true;
+ }
+}
+
+bool Codegen::GenInlinedFloatCvt(CompilationUnit *cu, CallInfo* info)
+{
+ if (cu->instruction_set == kMips) {
+ // TODO - add Mips implementation
+ return false;
+ }
+ RegLocation rl_src = info->args[0];
+ RegLocation rl_dest = InlineTarget(cu, info);
+ StoreValue(cu, rl_dest, rl_src);
+ return true;
+}
+
+bool Codegen::GenInlinedDoubleCvt(CompilationUnit *cu, CallInfo* info)
+{
+ if (cu->instruction_set == kMips) {
+ // TODO - add Mips implementation
+ return false;
+ }
+ RegLocation rl_src = info->args[0];
+ RegLocation rl_dest = InlineTargetWide(cu, info);
+ StoreValueWide(cu, rl_dest, rl_src);
+ return true;
+}
+
+/*
+ * Fast string.index_of(I) & (II). Tests for simple case of char <= 0xffff,
+ * otherwise bails to standard library code.
+ */
+bool Codegen::GenInlinedIndexOf(CompilationUnit* cu, CallInfo* info, bool zero_based)
+{
+ if (cu->instruction_set == kMips) {
+ // TODO - add Mips implementation
+ return false;
+ }
+ ClobberCalleeSave(cu);
+ LockCallTemps(cu); // Using fixed registers
+ int reg_ptr = TargetReg(kArg0);
+ int reg_char = TargetReg(kArg1);
+ int reg_start = TargetReg(kArg2);
+
+ RegLocation rl_obj = info->args[0];
+ RegLocation rl_char = info->args[1];
+ RegLocation rl_start = info->args[2];
+ LoadValueDirectFixed(cu, rl_obj, reg_ptr);
+ LoadValueDirectFixed(cu, rl_char, reg_char);
+ if (zero_based) {
+ LoadConstant(cu, reg_start, 0);
+ } else {
+ LoadValueDirectFixed(cu, rl_start, reg_start);
+ }
+ int r_tgt = (cu->instruction_set != kX86) ? LoadHelper(cu, ENTRYPOINT_OFFSET(pIndexOf)) : 0;
+ GenNullCheck(cu, rl_obj.s_reg_low, reg_ptr, info->opt_flags);
+ LIR* launch_pad = RawLIR(cu, 0, kPseudoIntrinsicRetry, reinterpret_cast<uintptr_t>(info));
+ InsertGrowableList(cu, &cu->intrinsic_launchpads, reinterpret_cast<uintptr_t>(launch_pad));
+ OpCmpImmBranch(cu, kCondGt, reg_char, 0xFFFF, launch_pad);
+ // NOTE: not a safepoint
+ if (cu->instruction_set != kX86) {
+ OpReg(cu, kOpBlx, r_tgt);
+ } else {
+ OpThreadMem(cu, kOpBlx, ENTRYPOINT_OFFSET(pIndexOf));
+ }
+ LIR* resume_tgt = NewLIR0(cu, kPseudoTargetLabel);
+ launch_pad->operands[2] = reinterpret_cast<uintptr_t>(resume_tgt);
+ // Record that we've already inlined & null checked
+ info->opt_flags |= (MIR_INLINED | MIR_IGNORE_NULL_CHECK);
+ RegLocation rl_return = GetReturn(cu, false);
+ RegLocation rl_dest = InlineTarget(cu, info);
+ StoreValue(cu, rl_dest, rl_return);
+ return true;
+}
+
+/* Fast string.compareTo(Ljava/lang/string;)I. */
+bool Codegen::GenInlinedStringCompareTo(CompilationUnit* cu, CallInfo* info)
+{
+ if (cu->instruction_set == kMips) {
+ // TODO - add Mips implementation
+ return false;
+ }
+ ClobberCalleeSave(cu);
+ LockCallTemps(cu); // Using fixed registers
+ int reg_this = TargetReg(kArg0);
+ int reg_cmp = TargetReg(kArg1);
+
+ RegLocation rl_this = info->args[0];
+ RegLocation rl_cmp = info->args[1];
+ LoadValueDirectFixed(cu, rl_this, reg_this);
+ LoadValueDirectFixed(cu, rl_cmp, reg_cmp);
+ int r_tgt = (cu->instruction_set != kX86) ?
+ LoadHelper(cu, ENTRYPOINT_OFFSET(pStringCompareTo)) : 0;
+ GenNullCheck(cu, rl_this.s_reg_low, reg_this, info->opt_flags);
+ //TUNING: check if rl_cmp.s_reg_low is already null checked
+ LIR* launch_pad = RawLIR(cu, 0, kPseudoIntrinsicRetry, reinterpret_cast<uintptr_t>(info));
+ InsertGrowableList(cu, &cu->intrinsic_launchpads, reinterpret_cast<uintptr_t>(launch_pad));
+ OpCmpImmBranch(cu, kCondEq, reg_cmp, 0, launch_pad);
+ // NOTE: not a safepoint
+ if (cu->instruction_set != kX86) {
+ OpReg(cu, kOpBlx, r_tgt);
+ } else {
+ OpThreadMem(cu, kOpBlx, ENTRYPOINT_OFFSET(pStringCompareTo));
+ }
+ launch_pad->operands[2] = 0; // No return possible
+ // Record that we've already inlined & null checked
+ info->opt_flags |= (MIR_INLINED | MIR_IGNORE_NULL_CHECK);
+ RegLocation rl_return = GetReturn(cu, false);
+ RegLocation rl_dest = InlineTarget(cu, info);
+ StoreValue(cu, rl_dest, rl_return);
+ return true;
+}
+
+bool Codegen::GenInlinedCurrentThread(CompilationUnit* cu, CallInfo* info) {
+ RegLocation rl_dest = InlineTarget(cu, info);
+ RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ int offset = Thread::PeerOffset().Int32Value();
+ if (cu->instruction_set == kThumb2 || cu->instruction_set == kMips) {
+ LoadWordDisp(cu, TargetReg(kSelf), offset, rl_result.low_reg);
+ } else {
+ CHECK(cu->instruction_set == kX86);
+ ((X86Codegen*)this)->OpRegThreadMem(cu, kOpMov, rl_result.low_reg, offset);
+ }
+ StoreValue(cu, rl_dest, rl_result);
+ return true;
+}
+
+bool Codegen::GenInlinedUnsafeGet(CompilationUnit* cu, CallInfo* info,
+ bool is_long, bool is_volatile) {
+ if (cu->instruction_set == kMips) {
+ // TODO - add Mips implementation
+ return false;
+ }
+ // Unused - RegLocation rl_src_unsafe = info->args[0];
+ RegLocation rl_src_obj = info->args[1]; // Object
+ RegLocation rl_src_offset = info->args[2]; // long low
+ rl_src_offset.wide = 0; // ignore high half in info->args[3]
+ RegLocation rl_dest = InlineTarget(cu, info); // result reg
+ if (is_volatile) {
+ GenMemBarrier(cu, kLoadLoad);
+ }
+ RegLocation rl_object = LoadValue(cu, rl_src_obj, kCoreReg);
+ RegLocation rl_offset = LoadValue(cu, rl_src_offset, kCoreReg);
+ RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ if (is_long) {
+ OpRegReg(cu, kOpAdd, rl_object.low_reg, rl_offset.low_reg);
+ LoadBaseDispWide(cu, rl_object.low_reg, 0, rl_result.low_reg, rl_result.high_reg, INVALID_SREG);
+ StoreValueWide(cu, rl_dest, rl_result);
+ } else {
+ LoadBaseIndexed(cu, rl_object.low_reg, rl_offset.low_reg, rl_result.low_reg, 0, kWord);
+ StoreValue(cu, rl_dest, rl_result);
+ }
+ return true;
+}
+
+bool Codegen::GenInlinedUnsafePut(CompilationUnit* cu, CallInfo* info, bool is_long,
+ bool is_object, bool is_volatile, bool is_ordered) {
+ if (cu->instruction_set == kMips) {
+ // TODO - add Mips implementation
+ return false;
+ }
+ // Unused - RegLocation rl_src_unsafe = info->args[0];
+ RegLocation rl_src_obj = info->args[1]; // Object
+ RegLocation rl_src_offset = info->args[2]; // long low
+ rl_src_offset.wide = 0; // ignore high half in info->args[3]
+ RegLocation rl_src_value = info->args[4]; // value to store
+ if (is_volatile || is_ordered) {
+ GenMemBarrier(cu, kStoreStore);
+ }
+ RegLocation rl_object = LoadValue(cu, rl_src_obj, kCoreReg);
+ RegLocation rl_offset = LoadValue(cu, rl_src_offset, kCoreReg);
+ RegLocation rl_value = LoadValue(cu, rl_src_value, kCoreReg);
+ if (is_long) {
+ OpRegReg(cu, kOpAdd, rl_object.low_reg, rl_offset.low_reg);
+ StoreBaseDispWide(cu, rl_object.low_reg, 0, rl_value.low_reg, rl_value.high_reg);
+ } else {
+ StoreBaseIndexed(cu, rl_object.low_reg, rl_offset.low_reg, rl_value.low_reg, 0, kWord);
+ }
+ if (is_volatile) {
+ GenMemBarrier(cu, kStoreLoad);
+ }
+ if (is_object) {
+ MarkGCCard(cu, rl_value.low_reg, rl_object.low_reg);
+ }
+ return true;
+}
+
+bool Codegen::GenIntrinsic(CompilationUnit* cu, CallInfo* info)
+{
+ if (info->opt_flags & MIR_INLINED) {
+ return false;
+ }
+ /*
+ * TODO: move these to a target-specific structured constant array
+ * and use a generic match function. The list of intrinsics may be
+ * slightly different depending on target.
+ * TODO: Fold this into a matching function that runs during
+ * basic block building. This should be part of the action for
+ * small method inlining and recognition of the special object init
+ * method. By doing this during basic block construction, we can also
+ * take advantage of/generate new useful dataflow info.
+ */
+ std::string tgt_method(PrettyMethod(info->index, *cu->dex_file));
+ if (tgt_method.find(" java.lang") != std::string::npos) {
+ if (tgt_method == "long java.lang.Double.doubleToRawLongBits(double)") {
+ return GenInlinedDoubleCvt(cu, info);
+ }
+ if (tgt_method == "double java.lang.Double.longBitsToDouble(long)") {
+ return GenInlinedDoubleCvt(cu, info);
+ }
+ if (tgt_method == "int java.lang.Float.float_to_raw_int_bits(float)") {
+ return GenInlinedFloatCvt(cu, info);
+ }
+ if (tgt_method == "float java.lang.Float.intBitsToFloat(int)") {
+ return GenInlinedFloatCvt(cu, info);
+ }
+ if (tgt_method == "int java.lang.Math.abs(int)" ||
+ tgt_method == "int java.lang.StrictMath.abs(int)") {
+ return GenInlinedAbsInt(cu, info);
+ }
+ if (tgt_method == "long java.lang.Math.abs(long)" ||
+ tgt_method == "long java.lang.StrictMath.abs(long)") {
+ return GenInlinedAbsLong(cu, info);
+ }
+ if (tgt_method == "int java.lang.Math.max(int, int)" ||
+ tgt_method == "int java.lang.StrictMath.max(int, int)") {
+ return GenInlinedMinMaxInt(cu, info, false /* is_min */);
+ }
+ if (tgt_method == "int java.lang.Math.min(int, int)" ||
+ tgt_method == "int java.lang.StrictMath.min(int, int)") {
+ return GenInlinedMinMaxInt(cu, info, true /* is_min */);
+ }
+ if (tgt_method == "double java.lang.Math.sqrt(double)" ||
+ tgt_method == "double java.lang.StrictMath.sqrt(double)") {
+ return GenInlinedSqrt(cu, info);
+ }
+ if (tgt_method == "char java.lang.String.charAt(int)") {
+ return GenInlinedCharAt(cu, info);
+ }
+ if (tgt_method == "int java.lang.String.compareTo(java.lang.String)") {
+ return GenInlinedStringCompareTo(cu, info);
+ }
+ if (tgt_method == "boolean java.lang.String.is_empty()") {
+ return GenInlinedStringIsEmptyOrLength(cu, info, true /* is_empty */);
+ }
+ if (tgt_method == "int java.lang.String.index_of(int, int)") {
+ return GenInlinedIndexOf(cu, info, false /* base 0 */);
+ }
+ if (tgt_method == "int java.lang.String.index_of(int)") {
+ return GenInlinedIndexOf(cu, info, true /* base 0 */);
+ }
+ if (tgt_method == "int java.lang.String.length()") {
+ return GenInlinedStringIsEmptyOrLength(cu, info, false /* is_empty */);
+ }
+ if (tgt_method == "java.lang.Thread java.lang.Thread.currentThread()") {
+ return GenInlinedCurrentThread(cu, info);
+ }
+ } else if (tgt_method.find(" sun.misc.Unsafe") != std::string::npos) {
+ if (tgt_method == "boolean sun.misc.Unsafe.compareAndSwapInt(java.lang.Object, long, int, int)") {
+ return GenInlinedCas32(cu, info, false);
+ }
+ if (tgt_method == "boolean sun.misc.Unsafe.compareAndSwapObject(java.lang.Object, long, java.lang.Object, java.lang.Object)") {
+ return GenInlinedCas32(cu, info, true);
+ }
+ if (tgt_method == "int sun.misc.Unsafe.getInt(java.lang.Object, long)") {
+ return GenInlinedUnsafeGet(cu, info, false /* is_long */, false /* is_volatile */);
+ }
+ if (tgt_method == "int sun.misc.Unsafe.getIntVolatile(java.lang.Object, long)") {
+ return GenInlinedUnsafeGet(cu, info, false /* is_long */, true /* is_volatile */);
+ }
+ if (tgt_method == "void sun.misc.Unsafe.putInt(java.lang.Object, long, int)") {
+ return GenInlinedUnsafePut(cu, info, false /* is_long */, false /* is_object */,
+ false /* is_volatile */, false /* is_ordered */);
+ }
+ if (tgt_method == "void sun.misc.Unsafe.putIntVolatile(java.lang.Object, long, int)") {
+ return GenInlinedUnsafePut(cu, info, false /* is_long */, false /* is_object */,
+ true /* is_volatile */, false /* is_ordered */);
+ }
+ if (tgt_method == "void sun.misc.Unsafe.putOrderedInt(java.lang.Object, long, int)") {
+ return GenInlinedUnsafePut(cu, info, false /* is_long */, false /* is_object */,
+ false /* is_volatile */, true /* is_ordered */);
+ }
+ if (tgt_method == "long sun.misc.Unsafe.getLong(java.lang.Object, long)") {
+ return GenInlinedUnsafeGet(cu, info, true /* is_long */, false /* is_volatile */);
+ }
+ if (tgt_method == "long sun.misc.Unsafe.getLongVolatile(java.lang.Object, long)") {
+ return GenInlinedUnsafeGet(cu, info, true /* is_long */, true /* is_volatile */);
+ }
+ if (tgt_method == "void sun.misc.Unsafe.putLong(java.lang.Object, long, long)") {
+ return GenInlinedUnsafePut(cu, info, true /* is_long */, false /* is_object */,
+ false /* is_volatile */, false /* is_ordered */);
+ }
+ if (tgt_method == "void sun.misc.Unsafe.putLongVolatile(java.lang.Object, long, long)") {
+ return GenInlinedUnsafePut(cu, info, true /* is_long */, false /* is_object */,
+ true /* is_volatile */, false /* is_ordered */);
+ }
+ if (tgt_method == "void sun.misc.Unsafe.putOrderedLong(java.lang.Object, long, long)") {
+ return GenInlinedUnsafePut(cu, info, true /* is_long */, false /* is_object */,
+ false /* is_volatile */, true /* is_ordered */);
+ }
+ if (tgt_method == "java.lang.Object sun.misc.Unsafe.getObject(java.lang.Object, long)") {
+ return GenInlinedUnsafeGet(cu, info, false /* is_long */, false /* is_volatile */);
+ }
+ if (tgt_method == "java.lang.Object sun.misc.Unsafe.getObjectVolatile(java.lang.Object, long)") {
+ return GenInlinedUnsafeGet(cu, info, false /* is_long */, true /* is_volatile */);
+ }
+ if (tgt_method == "void sun.misc.Unsafe.putObject(java.lang.Object, long, java.lang.Object)") {
+ return GenInlinedUnsafePut(cu, info, false /* is_long */, true /* is_object */,
+ false /* is_volatile */, false /* is_ordered */);
+ }
+ if (tgt_method == "void sun.misc.Unsafe.putObjectVolatile(java.lang.Object, long, java.lang.Object)") {
+ return GenInlinedUnsafePut(cu, info, false /* is_long */, true /* is_object */,
+ true /* is_volatile */, false /* is_ordered */);
+ }
+ if (tgt_method == "void sun.misc.Unsafe.putOrderedObject(java.lang.Object, long, java.lang.Object)") {
+ return GenInlinedUnsafePut(cu, info, false /* is_long */, true /* is_object */,
+ false /* is_volatile */, true /* is_ordered */);
+ }
+ }
+ return false;
+}
+
+void Codegen::GenInvoke(CompilationUnit* cu, CallInfo* info)
+{
+ if (GenIntrinsic(cu, info)) {
+ return;
+ }
+ InvokeType original_type = info->type; // avoiding mutation by ComputeInvokeInfo
+ int call_state = 0;
+ LIR* null_ck;
+ LIR** p_null_ck = NULL;
+ NextCallInsn next_call_insn;
+ FlushAllRegs(cu); /* Everything to home location */
+ // Explicit register usage
+ LockCallTemps(cu);
+
+ OatCompilationUnit m_unit(cu->class_loader, cu->class_linker,
+ *cu->dex_file, cu->code_item,
+ cu->class_def_idx, cu->method_idx,
+ cu->access_flags);
+
+ uint32_t dex_method_idx = info->index;
+ int vtable_idx;
+ uintptr_t direct_code;
+ uintptr_t direct_method;
+ bool skip_this;
+ bool fast_path =
+ cu->compiler->ComputeInvokeInfo(dex_method_idx, &m_unit, info->type,
+ vtable_idx, direct_code,
+ direct_method)
+ && !SLOW_INVOKE_PATH;
+ if (info->type == kInterface) {
+ if (fast_path) {
+ p_null_ck = &null_ck;
+ }
+ next_call_insn = fast_path ? NextInterfaceCallInsn
+ : NextInterfaceCallInsnWithAccessCheck;
+ skip_this = false;
+ } else if (info->type == kDirect) {
+ if (fast_path) {
+ p_null_ck = &null_ck;
+ }
+ next_call_insn = fast_path ? NextSDCallInsn : NextDirectCallInsnSP;
+ skip_this = false;
+ } else if (info->type == kStatic) {
+ next_call_insn = fast_path ? NextSDCallInsn : NextStaticCallInsnSP;
+ skip_this = false;
+ } else if (info->type == kSuper) {
+ DCHECK(!fast_path); // Fast path is a direct call.
+ next_call_insn = NextSuperCallInsnSP;
+ skip_this = false;
+ } else {
+ DCHECK_EQ(info->type, kVirtual);
+ next_call_insn = fast_path ? NextVCallInsn : NextVCallInsnSP;
+ skip_this = fast_path;
+ }
+ if (!info->is_range) {
+ call_state = GenDalvikArgsNoRange(cu, info, call_state, p_null_ck,
+ next_call_insn, dex_method_idx,
+ vtable_idx, direct_code, direct_method,
+ original_type, skip_this);
+ } else {
+ call_state = GenDalvikArgsRange(cu, info, call_state, p_null_ck,
+ next_call_insn, dex_method_idx, vtable_idx,
+ direct_code, direct_method, original_type,
+ skip_this);
+ }
+ // Finish up any of the call sequence not interleaved in arg loading
+ while (call_state >= 0) {
+ call_state = next_call_insn(cu, info, call_state, dex_method_idx,
+ vtable_idx, direct_code, direct_method,
+ original_type);
+ }
+ if (cu->enable_debug & (1 << kDebugDisplayMissingTargets)) {
+ GenShowTarget(cu);
+ }
+ LIR* call_inst;
+ if (cu->instruction_set != kX86) {
+ call_inst = OpReg(cu, kOpBlx, TargetReg(kInvokeTgt));
+ } else {
+ if (fast_path && info->type != kInterface) {
+ call_inst = OpMem(cu, kOpBlx, TargetReg(kArg0),
+ mirror::AbstractMethod::GetCodeOffset().Int32Value());
+ } else {
+ int trampoline = 0;
+ switch (info->type) {
+ case kInterface:
+ trampoline = fast_path ? ENTRYPOINT_OFFSET(pInvokeInterfaceTrampoline)
+ : ENTRYPOINT_OFFSET(pInvokeInterfaceTrampolineWithAccessCheck);
+ break;
+ case kDirect:
+ trampoline = ENTRYPOINT_OFFSET(pInvokeDirectTrampolineWithAccessCheck);
+ break;
+ case kStatic:
+ trampoline = ENTRYPOINT_OFFSET(pInvokeStaticTrampolineWithAccessCheck);
+ break;
+ case kSuper:
+ trampoline = ENTRYPOINT_OFFSET(pInvokeSuperTrampolineWithAccessCheck);
+ break;
+ case kVirtual:
+ trampoline = ENTRYPOINT_OFFSET(pInvokeVirtualTrampolineWithAccessCheck);
+ break;
+ default:
+ LOG(FATAL) << "Unexpected invoke type";
+ }
+ call_inst = OpThreadMem(cu, kOpBlx, trampoline);
+ }
+ }
+ MarkSafepointPC(cu, call_inst);
+
+ ClobberCalleeSave(cu);
+ if (info->result.location != kLocInvalid) {
+ // We have a following MOVE_RESULT - do it now.
+ if (info->result.wide) {
+ RegLocation ret_loc = GetReturnWide(cu, info->result.fp);
+ StoreValueWide(cu, info->result, ret_loc);
+ } else {
+ RegLocation ret_loc = GetReturn(cu, info->result.fp);
+ StoreValue(cu, info->result, ret_loc);
+ }
+ }
+}
+
+/*
+ * Build an array of location records for the incoming arguments.
+ * Note: one location record per word of arguments, with dummy
+ * high-word loc for wide arguments. Also pull up any following
+ * MOVE_RESULT and incorporate it into the invoke.
+ */
+CallInfo* Codegen::NewMemCallInfo(CompilationUnit* cu, BasicBlock* bb, MIR* mir, InvokeType type,
+ bool is_range)
+{
+ CallInfo* info = static_cast<CallInfo*>(NewMem(cu, sizeof(CallInfo), true, kAllocMisc));
+ MIR* move_result_mir = FindMoveResult(cu, bb, mir);
+ if (move_result_mir == NULL) {
+ info->result.location = kLocInvalid;
+ } else {
+ info->result = GetRawDest(cu, move_result_mir);
+ move_result_mir->meta.original_opcode = move_result_mir->dalvikInsn.opcode;
+ move_result_mir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
+ }
+ info->num_arg_words = mir->ssa_rep->num_uses;
+ info->args = (info->num_arg_words == 0) ? NULL : static_cast<RegLocation*>
+ (NewMem(cu, sizeof(RegLocation) * info->num_arg_words, false, kAllocMisc));
+ for (int i = 0; i < info->num_arg_words; i++) {
+ info->args[i] = GetRawSrc(cu, mir, i);
+ }
+ info->opt_flags = mir->optimization_flags;
+ info->type = type;
+ info->is_range = is_range;
+ info->index = mir->dalvikInsn.vB;
+ info->offset = mir->offset;
+ return info;
+}
+
+} // namespace art
diff --git a/src/compiler/dex/quick/gen_loadstore.cc b/src/compiler/dex/quick/gen_loadstore.cc
new file mode 100644
index 0000000..b945e31
--- /dev/null
+++ b/src/compiler/dex/quick/gen_loadstore.cc
@@ -0,0 +1,314 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "codegen_util.h"
+#include "compiler/dex/compiler_ir.h"
+#include "ralloc_util.h"
+
+namespace art {
+
+/* This file contains target-independent codegen and support. */
+
+/*
+ * Load an immediate value into a fixed or temp register. Target
+ * register is clobbered, and marked in_use.
+ */
+LIR* Codegen::LoadConstant(CompilationUnit* cu, int r_dest, int value)
+{
+ if (IsTemp(cu, r_dest)) {
+ Clobber(cu, r_dest);
+ MarkInUse(cu, r_dest);
+ }
+ return LoadConstantNoClobber(cu, r_dest, value);
+}
+
+/*
+ * Temporary workaround for Issue 7250540. If we're loading a constant zero into a
+ * promoted floating point register, also copy a zero into the int/ref identity of
+ * that sreg.
+ */
+void Codegen::Workaround7250540(CompilationUnit* cu, RegLocation rl_dest, int zero_reg)
+{
+ if (rl_dest.fp) {
+ int pmap_index = SRegToPMap(cu, rl_dest.s_reg_low);
+ if (cu->promotion_map[pmap_index].fp_location == kLocPhysReg) {
+ // Now, determine if this vreg is ever used as a reference. If not, we're done.
+ if (!cu->gen_bitcode) {
+ // TUNING: We no longer have this info for QuickGBC - assume the worst
+ bool used_as_reference = false;
+ int base_vreg = SRegToVReg(cu, rl_dest.s_reg_low);
+ for (int i = 0; !used_as_reference && (i < cu->num_ssa_regs); i++) {
+ if (SRegToVReg(cu, cu->reg_location[i].s_reg_low) == base_vreg) {
+ used_as_reference |= cu->reg_location[i].ref;
+ }
+ }
+ if (!used_as_reference) {
+ return;
+ }
+ }
+ int temp_reg = zero_reg;
+ if (temp_reg == INVALID_REG) {
+ temp_reg = AllocTemp(cu);
+ cu->cg->LoadConstant(cu, temp_reg, 0);
+ }
+ if (cu->promotion_map[pmap_index].core_location == kLocPhysReg) {
+ // Promoted - just copy in a zero
+ OpRegCopy(cu, cu->promotion_map[pmap_index].core_reg, temp_reg);
+ } else {
+ // Lives in the frame, need to store.
+ StoreBaseDisp(cu, TargetReg(kSp), SRegOffset(cu, rl_dest.s_reg_low), temp_reg, kWord);
+ }
+ if (zero_reg == INVALID_REG) {
+ FreeTemp(cu, temp_reg);
+ }
+ }
+ }
+}
+
+/* Load a word at base + displacement. Displacement must be word multiple */
+LIR* Codegen::LoadWordDisp(CompilationUnit* cu, int rBase, int displacement, int r_dest)
+{
+ return LoadBaseDisp(cu, rBase, displacement, r_dest, kWord,
+ INVALID_SREG);
+}
+
+LIR* Codegen::StoreWordDisp(CompilationUnit* cu, int rBase, int displacement, int r_src)
+{
+ return StoreBaseDisp(cu, rBase, displacement, r_src, kWord);
+}
+
+/*
+ * Load a Dalvik register into a physical register. Take care when
+ * using this routine, as it doesn't perform any bookkeeping regarding
+ * register liveness. That is the responsibility of the caller.
+ */
+void Codegen::LoadValueDirect(CompilationUnit* cu, RegLocation rl_src, int r_dest)
+{
+ rl_src = UpdateLoc(cu, rl_src);
+ if (rl_src.location == kLocPhysReg) {
+ OpRegCopy(cu, r_dest, rl_src.low_reg);
+ } else if (IsInexpensiveConstant(cu, rl_src)) {
+ LoadConstantNoClobber(cu, r_dest, ConstantValue(cu, rl_src));
+ } else {
+ DCHECK((rl_src.location == kLocDalvikFrame) ||
+ (rl_src.location == kLocCompilerTemp));
+ LoadWordDisp(cu, TargetReg(kSp), SRegOffset(cu, rl_src.s_reg_low), r_dest);
+ }
+}
+
+/*
+ * Similar to LoadValueDirect, but clobbers and allocates the target
+ * register. Should be used when loading to a fixed register (for example,
+ * loading arguments to an out of line call.
+ */
+void Codegen::LoadValueDirectFixed(CompilationUnit* cu, RegLocation rl_src, int r_dest)
+{
+ Clobber(cu, r_dest);
+ MarkInUse(cu, r_dest);
+ LoadValueDirect(cu, rl_src, r_dest);
+}
+
+/*
+ * Load a Dalvik register pair into a physical register[s]. Take care when
+ * using this routine, as it doesn't perform any bookkeeping regarding
+ * register liveness. That is the responsibility of the caller.
+ */
+void Codegen::LoadValueDirectWide(CompilationUnit* cu, RegLocation rl_src, int reg_lo,
+ int reg_hi)
+{
+ rl_src = UpdateLocWide(cu, rl_src);
+ if (rl_src.location == kLocPhysReg) {
+ OpRegCopyWide(cu, reg_lo, reg_hi, rl_src.low_reg, rl_src.high_reg);
+ } else if (IsInexpensiveConstant(cu, rl_src)) {
+ LoadConstantWide(cu, reg_lo, reg_hi, ConstantValueWide(cu, rl_src));
+ } else {
+ DCHECK((rl_src.location == kLocDalvikFrame) ||
+ (rl_src.location == kLocCompilerTemp));
+ LoadBaseDispWide(cu, TargetReg(kSp), SRegOffset(cu, rl_src.s_reg_low),
+ reg_lo, reg_hi, INVALID_SREG);
+ }
+}
+
+/*
+ * Similar to LoadValueDirect, but clobbers and allocates the target
+ * registers. Should be used when loading to a fixed registers (for example,
+ * loading arguments to an out of line call.
+ */
+void Codegen::LoadValueDirectWideFixed(CompilationUnit* cu, RegLocation rl_src, int reg_lo,
+ int reg_hi)
+{
+ Clobber(cu, reg_lo);
+ Clobber(cu, reg_hi);
+ MarkInUse(cu, reg_lo);
+ MarkInUse(cu, reg_hi);
+ LoadValueDirectWide(cu, rl_src, reg_lo, reg_hi);
+}
+
+RegLocation Codegen::LoadValue(CompilationUnit* cu, RegLocation rl_src, RegisterClass op_kind)
+{
+ rl_src = EvalLoc(cu, rl_src, op_kind, false);
+ if (IsInexpensiveConstant(cu, rl_src) || rl_src.location != kLocPhysReg) {
+ LoadValueDirect(cu, rl_src, rl_src.low_reg);
+ rl_src.location = kLocPhysReg;
+ MarkLive(cu, rl_src.low_reg, rl_src.s_reg_low);
+ }
+ return rl_src;
+}
+
+void Codegen::StoreValue(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src)
+{
+#ifndef NDEBUG
+ /*
+ * Sanity checking - should never try to store to the same
+ * ssa name during the compilation of a single instruction
+ * without an intervening ClobberSReg().
+ */
+ DCHECK((cu->live_sreg == INVALID_SREG) ||
+ (rl_dest.s_reg_low != cu->live_sreg));
+ cu->live_sreg = rl_dest.s_reg_low;
+#endif
+ LIR* def_start;
+ LIR* def_end;
+ DCHECK(!rl_dest.wide);
+ DCHECK(!rl_src.wide);
+ rl_src = UpdateLoc(cu, rl_src);
+ rl_dest = UpdateLoc(cu, rl_dest);
+ if (rl_src.location == kLocPhysReg) {
+ if (IsLive(cu, rl_src.low_reg) ||
+ IsPromoted(cu, rl_src.low_reg) ||
+ (rl_dest.location == kLocPhysReg)) {
+ // Src is live/promoted or Dest has assigned reg.
+ rl_dest = EvalLoc(cu, rl_dest, kAnyReg, false);
+ OpRegCopy(cu, rl_dest.low_reg, rl_src.low_reg);
+ } else {
+ // Just re-assign the registers. Dest gets Src's regs
+ rl_dest.low_reg = rl_src.low_reg;
+ Clobber(cu, rl_src.low_reg);
+ }
+ } else {
+ // Load Src either into promoted Dest or temps allocated for Dest
+ rl_dest = EvalLoc(cu, rl_dest, kAnyReg, false);
+ LoadValueDirect(cu, rl_src, rl_dest.low_reg);
+ }
+
+ // Dest is now live and dirty (until/if we flush it to home location)
+ MarkLive(cu, rl_dest.low_reg, rl_dest.s_reg_low);
+ MarkDirty(cu, rl_dest);
+
+
+ ResetDefLoc(cu, rl_dest);
+ if (IsDirty(cu, rl_dest.low_reg) &&
+ oat_live_out(cu, rl_dest.s_reg_low)) {
+ def_start = cu->last_lir_insn;
+ StoreBaseDisp(cu, TargetReg(kSp), SRegOffset(cu, rl_dest.s_reg_low),
+ rl_dest.low_reg, kWord);
+ MarkClean(cu, rl_dest);
+ def_end = cu->last_lir_insn;
+ if (!rl_dest.ref) {
+ // Exclude references from store elimination
+ MarkDef(cu, rl_dest, def_start, def_end);
+ }
+ }
+}
+
+RegLocation Codegen::LoadValueWide(CompilationUnit* cu, RegLocation rl_src, RegisterClass op_kind)
+{
+ DCHECK(rl_src.wide);
+ rl_src = EvalLoc(cu, rl_src, op_kind, false);
+ if (IsInexpensiveConstant(cu, rl_src) || rl_src.location != kLocPhysReg) {
+ LoadValueDirectWide(cu, rl_src, rl_src.low_reg, rl_src.high_reg);
+ rl_src.location = kLocPhysReg;
+ MarkLive(cu, rl_src.low_reg, rl_src.s_reg_low);
+ MarkLive(cu, rl_src.high_reg, GetSRegHi(rl_src.s_reg_low));
+ }
+ return rl_src;
+}
+
+void Codegen::StoreValueWide(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src)
+{
+#ifndef NDEBUG
+ /*
+ * Sanity checking - should never try to store to the same
+ * ssa name during the compilation of a single instruction
+ * without an intervening ClobberSReg().
+ */
+ DCHECK((cu->live_sreg == INVALID_SREG) ||
+ (rl_dest.s_reg_low != cu->live_sreg));
+ cu->live_sreg = rl_dest.s_reg_low;
+#endif
+ LIR* def_start;
+ LIR* def_end;
+ DCHECK_EQ(IsFpReg(rl_src.low_reg), IsFpReg(rl_src.high_reg));
+ DCHECK(rl_dest.wide);
+ DCHECK(rl_src.wide);
+ if (rl_src.location == kLocPhysReg) {
+ if (IsLive(cu, rl_src.low_reg) ||
+ IsLive(cu, rl_src.high_reg) ||
+ IsPromoted(cu, rl_src.low_reg) ||
+ IsPromoted(cu, rl_src.high_reg) ||
+ (rl_dest.location == kLocPhysReg)) {
+ // Src is live or promoted or Dest has assigned reg.
+ rl_dest = EvalLoc(cu, rl_dest, kAnyReg, false);
+ OpRegCopyWide(cu, rl_dest.low_reg, rl_dest.high_reg,
+ rl_src.low_reg, rl_src.high_reg);
+ } else {
+ // Just re-assign the registers. Dest gets Src's regs
+ rl_dest.low_reg = rl_src.low_reg;
+ rl_dest.high_reg = rl_src.high_reg;
+ Clobber(cu, rl_src.low_reg);
+ Clobber(cu, rl_src.high_reg);
+ }
+ } else {
+ // Load Src either into promoted Dest or temps allocated for Dest
+ rl_dest = EvalLoc(cu, rl_dest, kAnyReg, false);
+ LoadValueDirectWide(cu, rl_src, rl_dest.low_reg, rl_dest.high_reg);
+ }
+
+ // Dest is now live and dirty (until/if we flush it to home location)
+ MarkLive(cu, rl_dest.low_reg, rl_dest.s_reg_low);
+ MarkLive(cu, rl_dest.high_reg, GetSRegHi(rl_dest.s_reg_low));
+ MarkDirty(cu, rl_dest);
+ MarkPair(cu, rl_dest.low_reg, rl_dest.high_reg);
+
+
+ ResetDefLocWide(cu, rl_dest);
+ if ((IsDirty(cu, rl_dest.low_reg) ||
+ IsDirty(cu, rl_dest.high_reg)) &&
+ (oat_live_out(cu, rl_dest.s_reg_low) ||
+ oat_live_out(cu, GetSRegHi(rl_dest.s_reg_low)))) {
+ def_start = cu->last_lir_insn;
+ DCHECK_EQ((SRegToVReg(cu, rl_dest.s_reg_low)+1),
+ SRegToVReg(cu, GetSRegHi(rl_dest.s_reg_low)));
+ StoreBaseDispWide(cu, TargetReg(kSp), SRegOffset(cu, rl_dest.s_reg_low),
+ rl_dest.low_reg, rl_dest.high_reg);
+ MarkClean(cu, rl_dest);
+ def_end = cu->last_lir_insn;
+ MarkDefWide(cu, rl_dest, def_start, def_end);
+ }
+}
+
+/* Utilities to load the current Method* */
+void Codegen::LoadCurrMethodDirect(CompilationUnit *cu, int r_tgt)
+{
+ LoadValueDirectFixed(cu, cu->method_loc, r_tgt);
+}
+
+RegLocation Codegen::LoadCurrMethod(CompilationUnit *cu)
+{
+ return LoadValue(cu, cu->method_loc, kCoreReg);
+}
+
+} // namespace art
diff --git a/src/compiler/dex/quick/local_optimizations.cc b/src/compiler/dex/quick/local_optimizations.cc
new file mode 100644
index 0000000..3c589373
--- /dev/null
+++ b/src/compiler/dex/quick/local_optimizations.cc
@@ -0,0 +1,504 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "compiler/dex/compiler_internals.h"
+
+namespace art {
+
+#define DEBUG_OPT(X)
+
+/* Check RAW, WAR, and RAW dependency on the register operands */
+#define CHECK_REG_DEP(use, def, check) ((def & check->use_mask) || \
+ ((use | def) & check->def_mask))
+
+/* Scheduler heuristics */
+#define MAX_HOIST_DISTANCE 20
+#define LDLD_DISTANCE 4
+#define LD_LATENCY 2
+
+static bool IsDalvikRegisterClobbered(LIR* lir1, LIR* lir2)
+{
+ int reg1Lo = DECODE_ALIAS_INFO_REG(lir1->alias_info);
+ int reg1Hi = reg1Lo + DECODE_ALIAS_INFO_WIDE(lir1->alias_info);
+ int reg2Lo = DECODE_ALIAS_INFO_REG(lir2->alias_info);
+ int reg2Hi = reg2Lo + DECODE_ALIAS_INFO_WIDE(lir2->alias_info);
+
+ return (reg1Lo == reg2Lo) || (reg1Lo == reg2Hi) || (reg1Hi == reg2Lo);
+}
+
+/* Convert a more expensive instruction (ie load) into a move */
+static void ConvertMemOpIntoMove(CompilationUnit* cu, LIR* orig_lir, int dest, int src)
+{
+ Codegen* cg = cu->cg.get();
+ /* Insert a move to replace the load */
+ LIR* move_lir;
+ move_lir = cg->OpRegCopyNoInsert( cu, dest, src);
+ /*
+ * Insert the converted instruction after the original since the
+ * optimization is scannng in the top-down order and the new instruction
+ * will need to be re-checked (eg the new dest clobbers the src used in
+ * this_lir).
+ */
+ InsertLIRAfter(orig_lir, move_lir);
+}
+
+/*
+ * Perform a pass of top-down walk, from the second-last instruction in the
+ * superblock, to eliminate redundant loads and stores.
+ *
+ * An earlier load can eliminate a later load iff
+ * 1) They are must-aliases
+ * 2) The native register is not clobbered in between
+ * 3) The memory location is not written to in between
+ *
+ * An earlier store can eliminate a later load iff
+ * 1) They are must-aliases
+ * 2) The native register is not clobbered in between
+ * 3) The memory location is not written to in between
+ *
+ * A later store can be eliminated by an earlier store iff
+ * 1) They are must-aliases
+ * 2) The memory location is not written to in between
+ */
+static void ApplyLoadStoreElimination(CompilationUnit* cu, LIR* head_lir, LIR* tail_lir)
+{
+ Codegen* cg = cu->cg.get();
+ LIR* this_lir;
+
+ if (head_lir == tail_lir) return;
+
+ for (this_lir = PREV_LIR(tail_lir); this_lir != head_lir; this_lir = PREV_LIR(this_lir)) {
+
+ if (is_pseudo_opcode(this_lir->opcode)) continue;
+
+ int sink_distance = 0;
+
+ uint64_t target_flags = cg->GetTargetInstFlags(this_lir->opcode);
+
+ /* Skip non-interesting instructions */
+ if ((this_lir->flags.is_nop == true) ||
+ (target_flags & IS_BRANCH) ||
+ ((target_flags & (REG_DEF0 | REG_DEF1)) == (REG_DEF0 | REG_DEF1)) || // Skip wide loads.
+ ((target_flags & (REG_USE0 | REG_USE1 | REG_USE2)) ==
+ (REG_USE0 | REG_USE1 | REG_USE2)) || // Skip wide stores.
+ !(target_flags & (IS_LOAD | IS_STORE))) {
+ continue;
+ }
+
+ int native_reg_id;
+ if (cu->instruction_set == kX86) {
+ // If x86, location differs depending on whether memory/reg operation.
+ native_reg_id = (cg->GetTargetInstFlags(this_lir->opcode) & IS_STORE) ? this_lir->operands[2]
+ : this_lir->operands[0];
+ } else {
+ native_reg_id = this_lir->operands[0];
+ }
+ bool is_this_lir_load = cg->GetTargetInstFlags(this_lir->opcode) & IS_LOAD;
+ LIR* check_lir;
+ /* Use the mem mask to determine the rough memory location */
+ uint64_t this_mem_mask = (this_lir->use_mask | this_lir->def_mask) & ENCODE_MEM;
+
+ /*
+ * Currently only eliminate redundant ld/st for constant and Dalvik
+ * register accesses.
+ */
+ if (!(this_mem_mask & (ENCODE_LITERAL | ENCODE_DALVIK_REG))) continue;
+
+ uint64_t stop_def_reg_mask = this_lir->def_mask & ~ENCODE_MEM;
+ uint64_t stop_use_reg_mask;
+ if (cu->instruction_set == kX86) {
+ stop_use_reg_mask = (IS_BRANCH | this_lir->use_mask) & ~ENCODE_MEM;
+ } else {
+ /*
+ * Add pc to the resource mask to prevent this instruction
+ * from sinking past branch instructions. Also take out the memory
+ * region bits since stop_mask is used to check data/control
+ * dependencies.
+ */
+ stop_use_reg_mask = (cg->GetPCUseDefEncoding() | this_lir->use_mask) & ~ENCODE_MEM;
+ }
+
+ for (check_lir = NEXT_LIR(this_lir); check_lir != tail_lir; check_lir = NEXT_LIR(check_lir)) {
+
+ /*
+ * Skip already dead instructions (whose dataflow information is
+ * outdated and misleading).
+ */
+ if (check_lir->flags.is_nop || is_pseudo_opcode(check_lir->opcode)) continue;
+
+ uint64_t check_mem_mask = (check_lir->use_mask | check_lir->def_mask) & ENCODE_MEM;
+ uint64_t alias_condition = this_mem_mask & check_mem_mask;
+ bool stop_here = false;
+
+ /*
+ * Potential aliases seen - check the alias relations
+ */
+ uint64_t check_flags = cg->GetTargetInstFlags(check_lir->opcode);
+ // TUNING: Support instructions with multiple register targets.
+ if ((check_flags & (REG_DEF0 | REG_DEF1)) == (REG_DEF0 | REG_DEF1)) {
+ stop_here = true;
+ } else if (check_mem_mask != ENCODE_MEM && alias_condition != 0) {
+ bool is_check_lir_load = check_flags & IS_LOAD;
+ if (alias_condition == ENCODE_LITERAL) {
+ /*
+ * Should only see literal loads in the instruction
+ * stream.
+ */
+ DCHECK(!(check_flags & IS_STORE));
+ /* Same value && same register type */
+ if (check_lir->alias_info == this_lir->alias_info &&
+ cg->SameRegType(check_lir->operands[0], native_reg_id)) {
+ /*
+ * Different destination register - insert
+ * a move
+ */
+ if (check_lir->operands[0] != native_reg_id) {
+ ConvertMemOpIntoMove(cu, check_lir, check_lir->operands[0],
+ native_reg_id);
+ }
+ check_lir->flags.is_nop = true;
+ }
+ } else if (alias_condition == ENCODE_DALVIK_REG) {
+ /* Must alias */
+ if (check_lir->alias_info == this_lir->alias_info) {
+ /* Only optimize compatible registers */
+ bool reg_compatible = cg->SameRegType(check_lir->operands[0], native_reg_id);
+ if ((is_this_lir_load && is_check_lir_load) ||
+ (!is_this_lir_load && is_check_lir_load)) {
+ /* RAR or RAW */
+ if (reg_compatible) {
+ /*
+ * Different destination register -
+ * insert a move
+ */
+ if (check_lir->operands[0] !=
+ native_reg_id) {
+ ConvertMemOpIntoMove(cu, check_lir, check_lir->operands[0],
+ native_reg_id);
+ }
+ check_lir->flags.is_nop = true;
+ } else {
+ /*
+ * Destinaions are of different types -
+ * something complicated going on so
+ * stop looking now.
+ */
+ stop_here = true;
+ }
+ } else if (is_this_lir_load && !is_check_lir_load) {
+ /* WAR - register value is killed */
+ stop_here = true;
+ } else if (!is_this_lir_load && !is_check_lir_load) {
+ /* WAW - nuke the earlier store */
+ this_lir->flags.is_nop = true;
+ stop_here = true;
+ }
+ /* Partial overlap */
+ } else if (IsDalvikRegisterClobbered(this_lir, check_lir)) {
+ /*
+ * It is actually ok to continue if check_lir
+ * is a read. But it is hard to make a test
+ * case for this so we just stop here to be
+ * conservative.
+ */
+ stop_here = true;
+ }
+ }
+ /* Memory content may be updated. Stop looking now. */
+ if (stop_here) {
+ break;
+ /* The check_lir has been transformed - check the next one */
+ } else if (check_lir->flags.is_nop) {
+ continue;
+ }
+ }
+
+
+ /*
+ * this and check LIRs have no memory dependency. Now check if
+ * their register operands have any RAW, WAR, and WAW
+ * dependencies. If so, stop looking.
+ */
+ if (stop_here == false) {
+ stop_here = CHECK_REG_DEP(stop_use_reg_mask, stop_def_reg_mask, check_lir);
+ }
+
+ if (stop_here == true) {
+ if (cu->instruction_set == kX86) {
+ // Prevent stores from being sunk between ops that generate ccodes and
+ // ops that use them.
+ uint64_t flags = cg->GetTargetInstFlags(check_lir->opcode);
+ if (sink_distance > 0 && (flags & IS_BRANCH) && (flags & USES_CCODES)) {
+ check_lir = PREV_LIR(check_lir);
+ sink_distance--;
+ }
+ }
+ DEBUG_OPT(dump_dependent_insn_pair(this_lir, check_lir, "REG CLOBBERED"));
+ /* Only sink store instructions */
+ if (sink_distance && !is_this_lir_load) {
+ LIR* new_store_lir = static_cast<LIR*>(NewMem(cu, sizeof(LIR), true, kAllocLIR));
+ *new_store_lir = *this_lir;
+ /*
+ * Stop point found - insert *before* the check_lir
+ * since the instruction list is scanned in the
+ * top-down order.
+ */
+ InsertLIRBefore(check_lir, new_store_lir);
+ this_lir->flags.is_nop = true;
+ }
+ break;
+ } else if (!check_lir->flags.is_nop) {
+ sink_distance++;
+ }
+ }
+ }
+}
+
+/*
+ * Perform a pass of bottom-up walk, from the second instruction in the
+ * superblock, to try to hoist loads to earlier slots.
+ */
+void ApplyLoadHoisting(CompilationUnit* cu, LIR* head_lir, LIR* tail_lir)
+{
+ Codegen* cg = cu->cg.get();
+ LIR* this_lir, *check_lir;
+ /*
+ * Store the list of independent instructions that can be hoisted past.
+ * Will decide the best place to insert later.
+ */
+ LIR* prev_inst_list[MAX_HOIST_DISTANCE];
+
+ /* Empty block */
+ if (head_lir == tail_lir) return;
+
+ /* Start from the second instruction */
+ for (this_lir = NEXT_LIR(head_lir); this_lir != tail_lir; this_lir = NEXT_LIR(this_lir)) {
+
+ if (is_pseudo_opcode(this_lir->opcode)) continue;
+
+ uint64_t target_flags = cg->GetTargetInstFlags(this_lir->opcode);
+ /* Skip non-interesting instructions */
+ if ((this_lir->flags.is_nop == true) ||
+ ((target_flags & (REG_DEF0 | REG_DEF1)) == (REG_DEF0 | REG_DEF1)) ||
+ !(target_flags & IS_LOAD)) {
+ continue;
+ }
+
+ uint64_t stop_use_all_mask = this_lir->use_mask;
+
+ if (cu->instruction_set != kX86) {
+ /*
+ * Branches for null/range checks are marked with the true resource
+ * bits, and loads to Dalvik registers, constant pools, and non-alias
+ * locations are safe to be hoisted. So only mark the heap references
+ * conservatively here.
+ */
+ if (stop_use_all_mask & ENCODE_HEAP_REF) {
+ stop_use_all_mask |= cg->GetPCUseDefEncoding();
+ }
+ }
+
+ /* Similar as above, but just check for pure register dependency */
+ uint64_t stop_use_reg_mask = stop_use_all_mask & ~ENCODE_MEM;
+ uint64_t stop_def_reg_mask = this_lir->def_mask & ~ENCODE_MEM;
+
+ int next_slot = 0;
+ bool stop_here = false;
+
+ /* Try to hoist the load to a good spot */
+ for (check_lir = PREV_LIR(this_lir); check_lir != head_lir; check_lir = PREV_LIR(check_lir)) {
+
+ /*
+ * Skip already dead instructions (whose dataflow information is
+ * outdated and misleading).
+ */
+ if (check_lir->flags.is_nop) continue;
+
+ uint64_t check_mem_mask = check_lir->def_mask & ENCODE_MEM;
+ uint64_t alias_condition = stop_use_all_mask & check_mem_mask;
+ stop_here = false;
+
+ /* Potential WAR alias seen - check the exact relation */
+ if (check_mem_mask != ENCODE_MEM && alias_condition != 0) {
+ /* We can fully disambiguate Dalvik references */
+ if (alias_condition == ENCODE_DALVIK_REG) {
+ /* Must alias or partually overlap */
+ if ((check_lir->alias_info == this_lir->alias_info) ||
+ IsDalvikRegisterClobbered(this_lir, check_lir)) {
+ stop_here = true;
+ }
+ /* Conservatively treat all heap refs as may-alias */
+ } else {
+ DCHECK_EQ(alias_condition, ENCODE_HEAP_REF);
+ stop_here = true;
+ }
+ /* Memory content may be updated. Stop looking now. */
+ if (stop_here) {
+ prev_inst_list[next_slot++] = check_lir;
+ break;
+ }
+ }
+
+ if (stop_here == false) {
+ stop_here = CHECK_REG_DEP(stop_use_reg_mask, stop_def_reg_mask,
+ check_lir);
+ }
+
+ /*
+ * Store the dependent or non-pseudo/indepedent instruction to the
+ * list.
+ */
+ if (stop_here || !is_pseudo_opcode(check_lir->opcode)) {
+ prev_inst_list[next_slot++] = check_lir;
+ if (next_slot == MAX_HOIST_DISTANCE) break;
+ }
+
+ /* Found a new place to put the load - move it here */
+ if (stop_here == true) {
+ DEBUG_OPT(dump_dependent_insn_pair(check_lir, this_lir "HOIST STOP"));
+ break;
+ }
+ }
+
+ /*
+ * Reached the top - use head_lir as the dependent marker as all labels
+ * are barriers.
+ */
+ if (stop_here == false && next_slot < MAX_HOIST_DISTANCE) {
+ prev_inst_list[next_slot++] = head_lir;
+ }
+
+ /*
+ * At least one independent instruction is found. Scan in the reversed
+ * direction to find a beneficial slot.
+ */
+ if (next_slot >= 2) {
+ int first_slot = next_slot - 2;
+ int slot;
+ LIR* dep_lir = prev_inst_list[next_slot-1];
+ /* If there is ld-ld dependency, wait LDLD_DISTANCE cycles */
+ if (!is_pseudo_opcode(dep_lir->opcode) &&
+ (cg->GetTargetInstFlags(dep_lir->opcode) & IS_LOAD)) {
+ first_slot -= LDLD_DISTANCE;
+ }
+ /*
+ * Make sure we check slot >= 0 since first_slot may be negative
+ * when the loop is first entered.
+ */
+ for (slot = first_slot; slot >= 0; slot--) {
+ LIR* cur_lir = prev_inst_list[slot];
+ LIR* prev_lir = prev_inst_list[slot+1];
+
+ /* Check the highest instruction */
+ if (prev_lir->def_mask == ENCODE_ALL) {
+ /*
+ * If the first instruction is a load, don't hoist anything
+ * above it since it is unlikely to be beneficial.
+ */
+ if (cg->GetTargetInstFlags(cur_lir->opcode) & IS_LOAD) continue;
+ /*
+ * If the remaining number of slots is less than LD_LATENCY,
+ * insert the hoisted load here.
+ */
+ if (slot < LD_LATENCY) break;
+ }
+
+ // Don't look across a barrier label
+ if ((prev_lir->opcode == kPseudoTargetLabel) ||
+ (prev_lir->opcode == kPseudoSafepointPC) ||
+ (prev_lir->opcode == kPseudoBarrier)) {
+ break;
+ }
+
+ /*
+ * Try to find two instructions with load/use dependency until
+ * the remaining instructions are less than LD_LATENCY.
+ */
+ bool prev_is_load = is_pseudo_opcode(prev_lir->opcode) ? false :
+ (cg->GetTargetInstFlags(prev_lir->opcode) & IS_LOAD);
+ if (((cur_lir->use_mask & prev_lir->def_mask) && prev_is_load) || (slot < LD_LATENCY)) {
+ break;
+ }
+ }
+
+ /* Found a slot to hoist to */
+ if (slot >= 0) {
+ LIR* cur_lir = prev_inst_list[slot];
+ LIR* new_load_lir = static_cast<LIR*>(NewMem(cu, sizeof(LIR), true, kAllocLIR));
+ *new_load_lir = *this_lir;
+ /*
+ * Insertion is guaranteed to succeed since check_lir
+ * is never the first LIR on the list
+ */
+ InsertLIRBefore(cur_lir, new_load_lir);
+ this_lir->flags.is_nop = true;
+ }
+ }
+ }
+}
+
+void ApplyLocalOptimizations(CompilationUnit* cu, LIR* head_lir,
+ LIR* tail_lir)
+{
+ if (!(cu->disable_opt & (1 << kLoadStoreElimination))) {
+ ApplyLoadStoreElimination(cu, head_lir, tail_lir);
+ }
+ if (!(cu->disable_opt & (1 << kLoadHoisting))) {
+ ApplyLoadHoisting(cu, head_lir, tail_lir);
+ }
+}
+
+/*
+ * Nop any unconditional branches that go to the next instruction.
+ * Note: new redundant branches may be inserted later, and we'll
+ * use a check in final instruction assembly to nop those out.
+ */
+void RemoveRedundantBranches(CompilationUnit* cu)
+{
+ LIR* this_lir;
+ Codegen* cg = cu->cg.get();
+
+ for (this_lir = cu->first_lir_insn; this_lir != cu->last_lir_insn; this_lir = NEXT_LIR(this_lir)) {
+
+ /* Branch to the next instruction */
+ if (cg->IsUnconditionalBranch(this_lir)) {
+ LIR* next_lir = this_lir;
+
+ while (true) {
+ next_lir = NEXT_LIR(next_lir);
+
+ /*
+ * Is the branch target the next instruction?
+ */
+ if (next_lir == this_lir->target) {
+ this_lir->flags.is_nop = true;
+ break;
+ }
+
+ /*
+ * Found real useful stuff between the branch and the target.
+ * Need to explicitly check the last_lir_insn here because it
+ * might be the last real instruction.
+ */
+ if (!is_pseudo_opcode(next_lir->opcode) ||
+ (next_lir == cu->last_lir_insn))
+ break;
+ }
+ }
+ }
+}
+
+} // namespace art
diff --git a/src/compiler/dex/quick/local_optimizations.h b/src/compiler/dex/quick/local_optimizations.h
new file mode 100644
index 0000000..3a376fe
--- /dev/null
+++ b/src/compiler/dex/quick/local_optimizations.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_COMPILER_DEX_QUICK_LOCALOPTIMIZATIONS_H_
+#define ART_SRC_COMPILER_DEX_QUICK_LOCALOPTIMIZATIONS_H_
+
+namespace art {
+
+void ApplyLocalOptimizations(CompilationUnit* cu, LIR* head_lir, LIR* tail_lir);
+void RemoveRedundantBranches(CompilationUnit* cu);
+
+} // namespace art
+
+#endif // ART_SRC_COMPILER_DEX_QUICK_LOCALOPTIMIZATIONS_H_
diff --git a/src/compiler/dex/quick/mips/README.mips b/src/compiler/dex/quick/mips/README.mips
new file mode 100644
index 0000000..061c157
--- /dev/null
+++ b/src/compiler/dex/quick/mips/README.mips
@@ -0,0 +1,57 @@
+ Notes on the Mips target (3/4/2012)
+ -----------------------------------
+
+Testing
+
+The initial implementation of Mips support in the compiler is untested on
+actual hardware, and as such should be expected to have many bugs. However,
+the vast majority of code for Mips support is either shared with other
+tested targets, or was taken from the functional Mips JIT compiler. The
+expectation is that when it is first tried out on actual hardware lots of
+small bugs will be flushed out, but it should not take long to get it
+solidly running. The following areas are considered most likely to have
+problems that need to be addressed:
+
+ o Endianness. Focus was on little-endian support, and if a big-endian
+ target is desired, you should pay particular attention to the
+ code generation for switch tables, fill array data, 64-bit
+ data handling and the register usage conventions.
+
+ o The memory model. Verify that oatGenMemoryBarrier() generates the
+ appropriate flavor of sync.
+
+Register promotion
+
+The resource masks in the LIR structure are 64-bits wide, which is enough
+room to fully describe def/use info for Arm and x86 instructions. However,
+the larger number of MIPS core and float registers render this too small.
+Currently, the workaround for this limitation is to avoid using floating
+point registers 16-31. These are the callee-save registers, which therefore
+means that no floating point promotion is allowed. Among the solution are:
+ o Expand the def/use mask (which, unfortunately, is a significant change)
+ o The Arm target uses 52 of the 64 bits, so we could support float
+ registers 16-27 without much effort.
+ o We could likely assign the 4 non-register bits (kDalvikReg, kLiteral,
+ kHeapRef & kMustNotAlias) to positions occuped by MIPS registers that
+ don't need def/use bits because they are never modified by code
+ subject to scheduling: r_K0, r_K1, r_SP, r_ZERO, r_S1 (rSELF).
+
+Branch delay slots
+
+Little to no attempt was made to fill branch delay slots. Branch
+instructions in the encoding map are given a length of 8 bytes to include
+an implicit NOP. It should not be too difficult to provide a slot-filling
+pass following successful assembly, but thought should be given to the
+design. Branches are currently treated as scheduling barriers. One
+simple solution would be to copy the instruction at branch targets to the
+slot and adjust the displacement. However, given that code expansion is
+already a problem it would be preferable to use a more sophisticated
+scheduling solution.
+
+Code expansion
+
+Code expansion for the MIPS target is significantly higher than we see
+for Arm and x86. It might make sense to replace the inline code generation
+for some of the more verbose Dalik byte codes with subroutine calls to
+shared helper functions.
+
diff --git a/src/compiler/dex/quick/mips/assemble_mips.cc b/src/compiler/dex/quick/mips/assemble_mips.cc
new file mode 100644
index 0000000..c5cd401
--- /dev/null
+++ b/src/compiler/dex/quick/mips/assemble_mips.cc
@@ -0,0 +1,716 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "codegen_mips.h"
+#include "compiler/dex/quick/codegen_util.h"
+#include "mips_lir.h"
+
+namespace art {
+
+#define MAX_ASSEMBLER_RETRIES 50
+
+/*
+ * opcode: MipsOpCode enum
+ * skeleton: pre-designated bit-pattern for this opcode
+ * k0: key to applying ds/de
+ * ds: dest start bit position
+ * de: dest end bit position
+ * k1: key to applying s1s/s1e
+ * s1s: src1 start bit position
+ * s1e: src1 end bit position
+ * k2: key to applying s2s/s2e
+ * s2s: src2 start bit position
+ * s2e: src2 end bit position
+ * operands: number of operands (for sanity check purposes)
+ * name: mnemonic name
+ * fmt: for pretty-printing
+ */
+#define ENCODING_MAP(opcode, skeleton, k0, ds, de, k1, s1s, s1e, k2, s2s, s2e, \
+ k3, k3s, k3e, flags, name, fmt, size) \
+ {skeleton, {{k0, ds, de}, {k1, s1s, s1e}, {k2, s2s, s2e}, \
+ {k3, k3s, k3e}}, opcode, flags, name, fmt, size}
+
+/* Instruction dump string format keys: !pf, where "!" is the start
+ * of the key, "p" is which numeric operand to use and "f" is the
+ * print format.
+ *
+ * [p]ositions:
+ * 0 -> operands[0] (dest)
+ * 1 -> operands[1] (src1)
+ * 2 -> operands[2] (src2)
+ * 3 -> operands[3] (extra)
+ *
+ * [f]ormats:
+ * h -> 4-digit hex
+ * d -> decimal
+ * E -> decimal*4
+ * F -> decimal*2
+ * c -> branch condition (beq, bne, etc.)
+ * t -> pc-relative target
+ * T -> pc-region target
+ * u -> 1st half of bl[x] target
+ * v -> 2nd half ob bl[x] target
+ * R -> register list
+ * s -> single precision floating point register
+ * S -> double precision floating point register
+ * m -> Thumb2 modified immediate
+ * n -> complimented Thumb2 modified immediate
+ * M -> Thumb2 16-bit zero-extended immediate
+ * b -> 4-digit binary
+ * N -> append a NOP
+ *
+ * [!] escape. To insert "!", use "!!"
+ */
+/* NOTE: must be kept in sync with enum MipsOpcode from LIR.h */
+/*
+ * TUNING: We're currently punting on the branch delay slots. All branch
+ * instructions in this map are given a size of 8, which during assembly
+ * is expanded to include a nop. This scheme should be replaced with
+ * an assembler pass to fill those slots when possible.
+ */
+const MipsEncodingMap MipsCodegen::EncodingMap[kMipsLast] = {
+ ENCODING_MAP(kMips32BitData, 0x00000000,
+ kFmtBitBlt, 31, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_UNARY_OP,
+ "data", "0x!0h(!0d)", 4),
+ ENCODING_MAP(kMipsAddiu, 0x24000000,
+ kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+ "addiu", "!0r,!1r,0x!2h(!2d)", 4),
+ ENCODING_MAP(kMipsAddu, 0x00000021,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "addu", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMipsAnd, 0x00000024,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "and", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMipsAndi, 0x30000000,
+ kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+ "andi", "!0r,!1r,0x!2h(!2d)", 4),
+ ENCODING_MAP(kMipsB, 0x10000000,
+ kFmtBitBlt, 15, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | NEEDS_FIXUP,
+ "b", "!0t!0N", 8),
+ ENCODING_MAP(kMipsBal, 0x04110000,
+ kFmtBitBlt, 15, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_DEF_LR |
+ NEEDS_FIXUP, "bal", "!0t!0N", 8),
+ ENCODING_MAP(kMipsBeq, 0x10000000,
+ kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0,
+ kFmtUnused, -1, -1, IS_BINARY_OP | IS_BRANCH | REG_USE01 |
+ NEEDS_FIXUP, "beq", "!0r,!1r,!2t!0N", 8),
+ ENCODING_MAP(kMipsBeqz, 0x10000000, /* same as beq above with t = $zero */
+ kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_USE0 |
+ NEEDS_FIXUP, "beqz", "!0r,!1t!0N", 8),
+ ENCODING_MAP(kMipsBgez, 0x04010000,
+ kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_USE0 |
+ NEEDS_FIXUP, "bgez", "!0r,!1t!0N", 8),
+ ENCODING_MAP(kMipsBgtz, 0x1C000000,
+ kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_USE0 |
+ NEEDS_FIXUP, "bgtz", "!0r,!1t!0N", 8),
+ ENCODING_MAP(kMipsBlez, 0x18000000,
+ kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_USE0 |
+ NEEDS_FIXUP, "blez", "!0r,!1t!0N", 8),
+ ENCODING_MAP(kMipsBltz, 0x04000000,
+ kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_USE0 |
+ NEEDS_FIXUP, "bltz", "!0r,!1t!0N", 8),
+ ENCODING_MAP(kMipsBnez, 0x14000000, /* same as bne below with t = $zero */
+ kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_USE0 |
+ NEEDS_FIXUP, "bnez", "!0r,!1t!0N", 8),
+ ENCODING_MAP(kMipsBne, 0x14000000,
+ kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0,
+ kFmtUnused, -1, -1, IS_BINARY_OP | IS_BRANCH | REG_USE01 |
+ NEEDS_FIXUP, "bne", "!0r,!1r,!2t!0N", 8),
+ ENCODING_MAP(kMipsDiv, 0x0000001a,
+ kFmtUnused, -1, -1, kFmtUnused, -1, -1, kFmtBitBlt, 25, 21,
+ kFmtBitBlt, 20, 16, IS_QUAD_OP | REG_DEF01 | REG_USE23,
+ "div", "!2r,!3r", 4),
+#if __mips_isa_rev>=2
+ ENCODING_MAP(kMipsExt, 0x7c000000,
+ kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 10, 6,
+ kFmtBitBlt, 15, 11, IS_QUAD_OP | REG_DEF0 | REG_USE1,
+ "ext", "!0r,!1r,!2d,!3D", 4),
+#endif
+ ENCODING_MAP(kMipsJal, 0x0c000000,
+ kFmtBitBlt, 25, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_DEF_LR,
+ "jal", "!0T(!0E)!0N", 8),
+ ENCODING_MAP(kMipsJalr, 0x00000009,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | IS_BRANCH | REG_DEF0_USE1,
+ "jalr", "!0r,!1r!0N", 8),
+ ENCODING_MAP(kMipsJr, 0x00000008,
+ kFmtBitBlt, 25, 21, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_USE0 |
+ NEEDS_FIXUP, "jr", "!0r!0N", 8),
+ ENCODING_MAP(kMipsLahi, 0x3C000000,
+ kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0,
+ "lahi/lui", "!0r,0x!1h(!1d)", 4),
+ ENCODING_MAP(kMipsLalo, 0x34000000,
+ kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+ "lalo/ori", "!0r,!1r,0x!2h(!2d)", 4),
+ ENCODING_MAP(kMipsLui, 0x3C000000,
+ kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0,
+ "lui", "!0r,0x!1h(!1d)", 4),
+ ENCODING_MAP(kMipsLb, 0x80000000,
+ kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
+ "lb", "!0r,!1d(!2r)", 4),
+ ENCODING_MAP(kMipsLbu, 0x90000000,
+ kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
+ "lbu", "!0r,!1d(!2r)", 4),
+ ENCODING_MAP(kMipsLh, 0x84000000,
+ kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
+ "lh", "!0r,!1d(!2r)", 4),
+ ENCODING_MAP(kMipsLhu, 0x94000000,
+ kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
+ "lhu", "!0r,!1d(!2r)", 4),
+ ENCODING_MAP(kMipsLw, 0x8C000000,
+ kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
+ "lw", "!0r,!1d(!2r)", 4),
+ ENCODING_MAP(kMipsMfhi, 0x00000010,
+ kFmtBitBlt, 15, 11, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "mfhi", "!0r", 4),
+ ENCODING_MAP(kMipsMflo, 0x00000012,
+ kFmtBitBlt, 15, 11, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "mflo", "!0r", 4),
+ ENCODING_MAP(kMipsMove, 0x00000025, /* or using zero reg */
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "move", "!0r,!1r", 4),
+ ENCODING_MAP(kMipsMovz, 0x0000000a,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "movz", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMipsMul, 0x70000002,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "mul", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMipsNop, 0x00000000,
+ kFmtUnused, -1, -1, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, NO_OPERAND,
+ "nop", ";", 4),
+ ENCODING_MAP(kMipsNor, 0x00000027, /* used for "not" too */
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "nor", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMipsOr, 0x00000025,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "or", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMipsOri, 0x34000000,
+ kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+ "ori", "!0r,!1r,0x!2h(!2d)", 4),
+ ENCODING_MAP(kMipsPref, 0xCC000000,
+ kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE2,
+ "pref", "!0d,!1d(!2r)", 4),
+ ENCODING_MAP(kMipsSb, 0xA0000000,
+ kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE02 | IS_STORE,
+ "sb", "!0r,!1d(!2r)", 4),
+#if __mips_isa_rev>=2
+ ENCODING_MAP(kMipsSeb, 0x7c000420,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "seb", "!0r,!1r", 4),
+ ENCODING_MAP(kMipsSeh, 0x7c000620,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "seh", "!0r,!1r", 4),
+#endif
+ ENCODING_MAP(kMipsSh, 0xA4000000,
+ kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE02 | IS_STORE,
+ "sh", "!0r,!1d(!2r)", 4),
+ ENCODING_MAP(kMipsSll, 0x00000000,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+ "sll", "!0r,!1r,0x!2h(!2d)", 4),
+ ENCODING_MAP(kMipsSllv, 0x00000004,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "sllv", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMipsSlt, 0x0000002a,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "slt", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMipsSlti, 0x28000000,
+ kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+ "slti", "!0r,!1r,0x!2h(!2d)", 4),
+ ENCODING_MAP(kMipsSltu, 0x0000002b,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "sltu", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMipsSra, 0x00000003,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+ "sra", "!0r,!1r,0x!2h(!2d)", 4),
+ ENCODING_MAP(kMipsSrav, 0x00000007,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "srav", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMipsSrl, 0x00000002,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+ "srl", "!0r,!1r,0x!2h(!2d)", 4),
+ ENCODING_MAP(kMipsSrlv, 0x00000006,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "srlv", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMipsSubu, 0x00000023, /* used for "neg" too */
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "subu", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMipsSw, 0xAC000000,
+ kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE02 | IS_STORE,
+ "sw", "!0r,!1d(!2r)", 4),
+ ENCODING_MAP(kMipsXor, 0x00000026,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "xor", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMipsXori, 0x38000000,
+ kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+ "xori", "!0r,!1r,0x!2h(!2d)", 4),
+ ENCODING_MAP(kMipsFadds, 0x46000000,
+ kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtSfp, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "add.s", "!0s,!1s,!2s", 4),
+ ENCODING_MAP(kMipsFsubs, 0x46000001,
+ kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtSfp, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "sub.s", "!0s,!1s,!2s", 4),
+ ENCODING_MAP(kMipsFmuls, 0x46000002,
+ kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtSfp, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "mul.s", "!0s,!1s,!2s", 4),
+ ENCODING_MAP(kMipsFdivs, 0x46000003,
+ kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtSfp, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "div.s", "!0s,!1s,!2s", 4),
+ ENCODING_MAP(kMipsFaddd, 0x46200000,
+ kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtDfp, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "add.d", "!0S,!1S,!2S", 4),
+ ENCODING_MAP(kMipsFsubd, 0x46200001,
+ kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtDfp, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "sub.d", "!0S,!1S,!2S", 4),
+ ENCODING_MAP(kMipsFmuld, 0x46200002,
+ kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtDfp, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "mul.d", "!0S,!1S,!2S", 4),
+ ENCODING_MAP(kMipsFdivd, 0x46200003,
+ kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtDfp, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "div.d", "!0S,!1S,!2S", 4),
+ ENCODING_MAP(kMipsFcvtsd, 0x46200020,
+ kFmtSfp, 10, 6, kFmtDfp, 15, 11, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "cvt.s.d", "!0s,!1S", 4),
+ ENCODING_MAP(kMipsFcvtsw, 0x46800020,
+ kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "cvt.s.w", "!0s,!1s", 4),
+ ENCODING_MAP(kMipsFcvtds, 0x46000021,
+ kFmtDfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "cvt.d.s", "!0S,!1s", 4),
+ ENCODING_MAP(kMipsFcvtdw, 0x46800021,
+ kFmtDfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "cvt.d.w", "!0S,!1s", 4),
+ ENCODING_MAP(kMipsFcvtws, 0x46000024,
+ kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "cvt.w.s", "!0s,!1s", 4),
+ ENCODING_MAP(kMipsFcvtwd, 0x46200024,
+ kFmtSfp, 10, 6, kFmtDfp, 15, 11, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "cvt.w.d", "!0s,!1S", 4),
+ ENCODING_MAP(kMipsFmovs, 0x46000006,
+ kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "mov.s", "!0s,!1s", 4),
+ ENCODING_MAP(kMipsFmovd, 0x46200006,
+ kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "mov.d", "!0S,!1S", 4),
+ ENCODING_MAP(kMipsFlwc1, 0xC4000000,
+ kFmtSfp, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
+ "lwc1", "!0s,!1d(!2r)", 4),
+ ENCODING_MAP(kMipsFldc1, 0xD4000000,
+ kFmtDfp, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
+ "ldc1", "!0S,!1d(!2r)", 4),
+ ENCODING_MAP(kMipsFswc1, 0xE4000000,
+ kFmtSfp, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE02 | IS_STORE,
+ "swc1", "!0s,!1d(!2r)", 4),
+ ENCODING_MAP(kMipsFsdc1, 0xF4000000,
+ kFmtDfp, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE02 | IS_STORE,
+ "sdc1", "!0S,!1d(!2r)", 4),
+ ENCODING_MAP(kMipsMfc1, 0x44000000,
+ kFmtBitBlt, 20, 16, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "mfc1", "!0r,!1s", 4),
+ ENCODING_MAP(kMipsMtc1, 0x44800000,
+ kFmtBitBlt, 20, 16, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE0 | REG_DEF1,
+ "mtc1", "!0r,!1s", 4),
+ ENCODING_MAP(kMipsDelta, 0x27e00000,
+ kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtUnused, 15, 0,
+ kFmtUnused, -1, -1, IS_QUAD_OP | REG_DEF0 | REG_USE_LR |
+ NEEDS_FIXUP, "addiu", "!0r,ra,0x!1h(!1d)", 4),
+ ENCODING_MAP(kMipsDeltaHi, 0x3C000000,
+ kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_QUAD_OP | REG_DEF0 | NEEDS_FIXUP,
+ "lui", "!0r,0x!1h(!1d)", 4),
+ ENCODING_MAP(kMipsDeltaLo, 0x34000000,
+ kFmtBlt5_2, 16, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_QUAD_OP | REG_DEF0_USE0 | NEEDS_FIXUP,
+ "ori", "!0r,!0r,0x!1h(!1d)", 4),
+ ENCODING_MAP(kMipsCurrPC, 0x04110001,
+ kFmtUnused, -1, -1, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, NO_OPERAND | IS_BRANCH | REG_DEF_LR,
+ "addiu", "ra,pc,8", 4),
+ ENCODING_MAP(kMipsSync, 0x0000000f,
+ kFmtBitBlt, 10, 6, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_UNARY_OP,
+ "sync", ";", 4),
+ ENCODING_MAP(kMipsUndefined, 0x64000000,
+ kFmtUnused, -1, -1, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, NO_OPERAND,
+ "undefined", "", 4),
+};
+
+
+/*
+ * Convert a short-form branch to long form. Hopefully, this won't happen
+ * very often because the PIC sequence is especially unfortunate.
+ *
+ * Orig conditional branch
+ * -----------------------
+ * beq rs,rt,target
+ *
+ * Long conditional branch
+ * -----------------------
+ * bne rs,rt,hop
+ * bal .+8 ; r_RA <- anchor
+ * lui r_AT, ((target-anchor) >> 16)
+ * anchor:
+ * ori r_AT, r_AT, ((target-anchor) & 0xffff)
+ * addu r_AT, r_AT, r_RA
+ * jr r_AT
+ * hop:
+ *
+ * Orig unconditional branch
+ * -------------------------
+ * b target
+ *
+ * Long unconditional branch
+ * -----------------------
+ * bal .+8 ; r_RA <- anchor
+ * lui r_AT, ((target-anchor) >> 16)
+ * anchor:
+ * ori r_AT, r_AT, ((target-anchor) & 0xffff)
+ * addu r_AT, r_AT, r_RA
+ * jr r_AT
+ *
+ *
+ * NOTE: An out-of-range bal isn't supported because it should
+ * never happen with the current PIC model.
+ */
+static void ConvertShortToLongBranch(CompilationUnit* cu, LIR* lir)
+{
+ // For conditional branches we'll need to reverse the sense
+ bool unconditional = false;
+ int opcode = lir->opcode;
+ int dalvik_offset = lir->dalvik_offset;
+ switch (opcode) {
+ case kMipsBal:
+ LOG(FATAL) << "long branch and link unsupported";
+ case kMipsB:
+ unconditional = true;
+ break;
+ case kMipsBeq: opcode = kMipsBne; break;
+ case kMipsBne: opcode = kMipsBeq; break;
+ case kMipsBeqz: opcode = kMipsBnez; break;
+ case kMipsBgez: opcode = kMipsBltz; break;
+ case kMipsBgtz: opcode = kMipsBlez; break;
+ case kMipsBlez: opcode = kMipsBgtz; break;
+ case kMipsBltz: opcode = kMipsBgez; break;
+ case kMipsBnez: opcode = kMipsBeqz; break;
+ default:
+ LOG(FATAL) << "Unexpected branch kind " << opcode;
+ }
+ LIR* hop_target = NULL;
+ if (!unconditional) {
+ hop_target = RawLIR(cu, dalvik_offset, kPseudoTargetLabel);
+ LIR* hop_branch = RawLIR(cu, dalvik_offset, opcode, lir->operands[0],
+ lir->operands[1], 0, 0, 0, hop_target);
+ InsertLIRBefore(lir, hop_branch);
+ }
+ LIR* curr_pc = RawLIR(cu, dalvik_offset, kMipsCurrPC);
+ InsertLIRBefore(lir, curr_pc);
+ LIR* anchor = RawLIR(cu, dalvik_offset, kPseudoTargetLabel);
+ LIR* delta_hi = RawLIR(cu, dalvik_offset, kMipsDeltaHi, r_AT, 0,
+ reinterpret_cast<uintptr_t>(anchor), 0, 0, lir->target);
+ InsertLIRBefore(lir, delta_hi);
+ InsertLIRBefore(lir, anchor);
+ LIR* delta_lo = RawLIR(cu, dalvik_offset, kMipsDeltaLo, r_AT, 0,
+ reinterpret_cast<uintptr_t>(anchor), 0, 0, lir->target);
+ InsertLIRBefore(lir, delta_lo);
+ LIR* addu = RawLIR(cu, dalvik_offset, kMipsAddu, r_AT, r_AT, r_RA);
+ InsertLIRBefore(lir, addu);
+ LIR* jr = RawLIR(cu, dalvik_offset, kMipsJr, r_AT);
+ InsertLIRBefore(lir, jr);
+ if (!unconditional) {
+ InsertLIRBefore(lir, hop_target);
+ }
+ lir->flags.is_nop = true;
+}
+
+/*
+ * Assemble the LIR into binary instruction format. Note that we may
+ * discover that pc-relative displacements may not fit the selected
+ * instruction. In those cases we will try to substitute a new code
+ * sequence or request that the trace be shortened and retried.
+ */
+AssemblerStatus MipsCodegen::AssembleInstructions(CompilationUnit *cu, uintptr_t start_addr)
+{
+ LIR *lir;
+ AssemblerStatus res = kSuccess; // Assume success
+
+ for (lir = cu->first_lir_insn; lir != NULL; lir = NEXT_LIR(lir)) {
+ if (lir->opcode < 0) {
+ continue;
+ }
+
+
+ if (lir->flags.is_nop) {
+ continue;
+ }
+
+ if (lir->flags.pcRelFixup) {
+ if (lir->opcode == kMipsDelta) {
+ /*
+ * The "Delta" pseudo-ops load the difference between
+ * two pc-relative locations into a the target register
+ * found in operands[0]. The delta is determined by
+ * (label2 - label1), where label1 is a standard
+ * kPseudoTargetLabel and is stored in operands[2].
+ * If operands[3] is null, then label2 is a kPseudoTargetLabel
+ * and is found in lir->target. If operands[3] is non-NULL,
+ * then it is a Switch/Data table.
+ */
+ int offset1 = (reinterpret_cast<LIR*>(lir->operands[2]))->offset;
+ SwitchTable *tab_rec = reinterpret_cast<SwitchTable*>(lir->operands[3]);
+ int offset2 = tab_rec ? tab_rec->offset : lir->target->offset;
+ int delta = offset2 - offset1;
+ if ((delta & 0xffff) == delta && ((delta & 0x8000) == 0)) {
+ // Fits
+ lir->operands[1] = delta;
+ } else {
+ // Doesn't fit - must expand to kMipsDelta[Hi|Lo] pair
+ LIR *new_delta_hi =
+ RawLIR(cu, lir->dalvik_offset, kMipsDeltaHi,
+ lir->operands[0], 0, lir->operands[2],
+ lir->operands[3], 0, lir->target);
+ InsertLIRBefore(lir, new_delta_hi);
+ LIR *new_delta_lo =
+ RawLIR(cu, lir->dalvik_offset, kMipsDeltaLo,
+ lir->operands[0], 0, lir->operands[2],
+ lir->operands[3], 0, lir->target);
+ InsertLIRBefore(lir, new_delta_lo);
+ LIR *new_addu =
+ RawLIR(cu, lir->dalvik_offset, kMipsAddu,
+ lir->operands[0], lir->operands[0], r_RA);
+ InsertLIRBefore(lir, new_addu);
+ lir->flags.is_nop = true;
+ res = kRetryAll;
+ }
+ } else if (lir->opcode == kMipsDeltaLo) {
+ int offset1 = (reinterpret_cast<LIR*>(lir->operands[2]))->offset;
+ SwitchTable *tab_rec = reinterpret_cast<SwitchTable*>(lir->operands[3]);
+ int offset2 = tab_rec ? tab_rec->offset : lir->target->offset;
+ int delta = offset2 - offset1;
+ lir->operands[1] = delta & 0xffff;
+ } else if (lir->opcode == kMipsDeltaHi) {
+ int offset1 = (reinterpret_cast<LIR*>(lir->operands[2]))->offset;
+ SwitchTable *tab_rec = reinterpret_cast<SwitchTable*>(lir->operands[3]);
+ int offset2 = tab_rec ? tab_rec->offset : lir->target->offset;
+ int delta = offset2 - offset1;
+ lir->operands[1] = (delta >> 16) & 0xffff;
+ } else if (lir->opcode == kMipsB || lir->opcode == kMipsBal) {
+ LIR *target_lir = lir->target;
+ uintptr_t pc = lir->offset + 4;
+ uintptr_t target = target_lir->offset;
+ int delta = target - pc;
+ if (delta & 0x3) {
+ LOG(FATAL) << "PC-rel offset not multiple of 4: " << delta;
+ }
+ if (delta > 131068 || delta < -131069) {
+ res = kRetryAll;
+ ConvertShortToLongBranch(cu, lir);
+ } else {
+ lir->operands[0] = delta >> 2;
+ }
+ } else if (lir->opcode >= kMipsBeqz && lir->opcode <= kMipsBnez) {
+ LIR *target_lir = lir->target;
+ uintptr_t pc = lir->offset + 4;
+ uintptr_t target = target_lir->offset;
+ int delta = target - pc;
+ if (delta & 0x3) {
+ LOG(FATAL) << "PC-rel offset not multiple of 4: " << delta;
+ }
+ if (delta > 131068 || delta < -131069) {
+ res = kRetryAll;
+ ConvertShortToLongBranch(cu, lir);
+ } else {
+ lir->operands[1] = delta >> 2;
+ }
+ } else if (lir->opcode == kMipsBeq || lir->opcode == kMipsBne) {
+ LIR *target_lir = lir->target;
+ uintptr_t pc = lir->offset + 4;
+ uintptr_t target = target_lir->offset;
+ int delta = target - pc;
+ if (delta & 0x3) {
+ LOG(FATAL) << "PC-rel offset not multiple of 4: " << delta;
+ }
+ if (delta > 131068 || delta < -131069) {
+ res = kRetryAll;
+ ConvertShortToLongBranch(cu, lir);
+ } else {
+ lir->operands[2] = delta >> 2;
+ }
+ } else if (lir->opcode == kMipsJal) {
+ uintptr_t cur_pc = (start_addr + lir->offset + 4) & ~3;
+ uintptr_t target = lir->operands[0];
+ /* ensure PC-region branch can be used */
+ DCHECK_EQ((cur_pc & 0xF0000000), (target & 0xF0000000));
+ if (target & 0x3) {
+ LOG(FATAL) << "Jump target not multiple of 4: " << target;
+ }
+ lir->operands[0] = target >> 2;
+ } else if (lir->opcode == kMipsLahi) { /* ld address hi (via lui) */
+ LIR *target_lir = lir->target;
+ uintptr_t target = start_addr + target_lir->offset;
+ lir->operands[1] = target >> 16;
+ } else if (lir->opcode == kMipsLalo) { /* ld address lo (via ori) */
+ LIR *target_lir = lir->target;
+ uintptr_t target = start_addr + target_lir->offset;
+ lir->operands[2] = lir->operands[2] + target;
+ }
+ }
+
+ /*
+ * If one of the pc-relative instructions expanded we'll have
+ * to make another pass. Don't bother to fully assemble the
+ * instruction.
+ */
+ if (res != kSuccess) {
+ continue;
+ }
+ const MipsEncodingMap *encoder = &EncodingMap[lir->opcode];
+ uint32_t bits = encoder->skeleton;
+ int i;
+ for (i = 0; i < 4; i++) {
+ uint32_t operand;
+ uint32_t value;
+ operand = lir->operands[i];
+ switch (encoder->field_loc[i].kind) {
+ case kFmtUnused:
+ break;
+ case kFmtBitBlt:
+ if (encoder->field_loc[i].start == 0 && encoder->field_loc[i].end == 31) {
+ value = operand;
+ } else {
+ value = (operand << encoder->field_loc[i].start) &
+ ((1 << (encoder->field_loc[i].end + 1)) - 1);
+ }
+ bits |= value;
+ break;
+ case kFmtBlt5_2:
+ value = (operand & 0x1f);
+ bits |= (value << encoder->field_loc[i].start);
+ bits |= (value << encoder->field_loc[i].end);
+ break;
+ case kFmtDfp: {
+ DCHECK(MIPS_DOUBLEREG(operand));
+ DCHECK_EQ((operand & 0x1), 0U);
+ value = ((operand & MIPS_FP_REG_MASK) << encoder->field_loc[i].start) &
+ ((1 << (encoder->field_loc[i].end + 1)) - 1);
+ bits |= value;
+ break;
+ }
+ case kFmtSfp:
+ DCHECK(MIPS_SINGLEREG(operand));
+ value = ((operand & MIPS_FP_REG_MASK) << encoder->field_loc[i].start) &
+ ((1 << (encoder->field_loc[i].end + 1)) - 1);
+ bits |= value;
+ break;
+ default:
+ LOG(FATAL) << "Bad encoder format: " << encoder->field_loc[i].kind;
+ }
+ }
+ // We only support little-endian MIPS.
+ cu->code_buffer.push_back(bits & 0xff);
+ cu->code_buffer.push_back((bits >> 8) & 0xff);
+ cu->code_buffer.push_back((bits >> 16) & 0xff);
+ cu->code_buffer.push_back((bits >> 24) & 0xff);
+ // TUNING: replace with proper delay slot handling
+ if (encoder->size == 8) {
+ const MipsEncodingMap *encoder = &EncodingMap[kMipsNop];
+ uint32_t bits = encoder->skeleton;
+ cu->code_buffer.push_back(bits & 0xff);
+ cu->code_buffer.push_back((bits >> 8) & 0xff);
+ cu->code_buffer.push_back((bits >> 16) & 0xff);
+ cu->code_buffer.push_back((bits >> 24) & 0xff);
+ }
+ }
+ return res;
+}
+
+int MipsCodegen::GetInsnSize(LIR* lir)
+{
+ return EncodingMap[lir->opcode].size;
+}
+
+} // namespace art
diff --git a/src/compiler/dex/quick/mips/call_mips.cc b/src/compiler/dex/quick/mips/call_mips.cc
new file mode 100644
index 0000000..4fbb16b
--- /dev/null
+++ b/src/compiler/dex/quick/mips/call_mips.cc
@@ -0,0 +1,387 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* This file contains codegen for the Mips ISA */
+
+#include "codegen_mips.h"
+#include "compiler/dex/quick/codegen_util.h"
+#include "compiler/dex/quick/ralloc_util.h"
+#include "mips_lir.h"
+#include "oat/runtime/oat_support_entrypoints.h"
+
+namespace art {
+
+void MipsCodegen::GenSpecialCase(CompilationUnit* cu, BasicBlock* bb, MIR* mir,
+ SpecialCaseHandler special_case)
+{
+ // TODO
+}
+
+/*
+ * The lack of pc-relative loads on Mips presents somewhat of a challenge
+ * for our PIC switch table strategy. To materialize the current location
+ * we'll do a dummy JAL and reference our tables using r_RA as the
+ * base register. Note that r_RA will be used both as the base to
+ * locate the switch table data and as the reference base for the switch
+ * target offsets stored in the table. We'll use a special pseudo-instruction
+ * to represent the jal and trigger the construction of the
+ * switch table offsets (which will happen after final assembly and all
+ * labels are fixed).
+ *
+ * The test loop will look something like:
+ *
+ * ori rEnd, r_ZERO, #table_size ; size in bytes
+ * jal BaseLabel ; stores "return address" (BaseLabel) in r_RA
+ * nop ; opportunistically fill
+ * BaseLabel:
+ * addiu rBase, r_RA, <table> - <BaseLabel> ; table relative to BaseLabel
+ addu rEnd, rEnd, rBase ; end of table
+ * lw r_val, [rSP, v_reg_off] ; Test Value
+ * loop:
+ * beq rBase, rEnd, done
+ * lw r_key, 0(rBase)
+ * addu rBase, 8
+ * bne r_val, r_key, loop
+ * lw r_disp, -4(rBase)
+ * addu r_RA, r_disp
+ * jr r_RA
+ * done:
+ *
+ */
+void MipsCodegen::GenSparseSwitch(CompilationUnit* cu, uint32_t table_offset, RegLocation rl_src)
+{
+ const uint16_t* table = cu->insns + cu->current_dalvik_offset + table_offset;
+ if (cu->verbose) {
+ DumpSparseSwitchTable(table);
+ }
+ // Add the table to the list - we'll process it later
+ SwitchTable *tab_rec =
+ static_cast<SwitchTable*>(NewMem(cu, sizeof(SwitchTable), true, kAllocData));
+ tab_rec->table = table;
+ tab_rec->vaddr = cu->current_dalvik_offset;
+ int elements = table[1];
+ tab_rec->targets =
+ static_cast<LIR**>(NewMem(cu, elements * sizeof(LIR*), true, kAllocLIR));
+ InsertGrowableList(cu, &cu->switch_tables, reinterpret_cast<uintptr_t>(tab_rec));
+
+ // The table is composed of 8-byte key/disp pairs
+ int byte_size = elements * 8;
+
+ int size_hi = byte_size >> 16;
+ int size_lo = byte_size & 0xffff;
+
+ int rEnd = AllocTemp(cu);
+ if (size_hi) {
+ NewLIR2(cu, kMipsLui, rEnd, size_hi);
+ }
+ // Must prevent code motion for the curr pc pair
+ GenBarrier(cu); // Scheduling barrier
+ NewLIR0(cu, kMipsCurrPC); // Really a jal to .+8
+ // Now, fill the branch delay slot
+ if (size_hi) {
+ NewLIR3(cu, kMipsOri, rEnd, rEnd, size_lo);
+ } else {
+ NewLIR3(cu, kMipsOri, rEnd, r_ZERO, size_lo);
+ }
+ GenBarrier(cu); // Scheduling barrier
+
+ // Construct BaseLabel and set up table base register
+ LIR* base_label = NewLIR0(cu, kPseudoTargetLabel);
+ // Remember base label so offsets can be computed later
+ tab_rec->anchor = base_label;
+ int rBase = AllocTemp(cu);
+ NewLIR4(cu, kMipsDelta, rBase, 0, reinterpret_cast<uintptr_t>(base_label),
+ reinterpret_cast<uintptr_t>(tab_rec));
+ OpRegRegReg(cu, kOpAdd, rEnd, rEnd, rBase);
+
+ // Grab switch test value
+ rl_src = LoadValue(cu, rl_src, kCoreReg);
+
+ // Test loop
+ int r_key = AllocTemp(cu);
+ LIR* loop_label = NewLIR0(cu, kPseudoTargetLabel);
+ LIR* exit_branch = OpCmpBranch(cu , kCondEq, rBase, rEnd, NULL);
+ LoadWordDisp(cu, rBase, 0, r_key);
+ OpRegImm(cu, kOpAdd, rBase, 8);
+ OpCmpBranch(cu, kCondNe, rl_src.low_reg, r_key, loop_label);
+ int r_disp = AllocTemp(cu);
+ LoadWordDisp(cu, rBase, -4, r_disp);
+ OpRegRegReg(cu, kOpAdd, r_RA, r_RA, r_disp);
+ OpReg(cu, kOpBx, r_RA);
+
+ // Loop exit
+ LIR* exit_label = NewLIR0(cu, kPseudoTargetLabel);
+ exit_branch->target = exit_label;
+}
+
+/*
+ * Code pattern will look something like:
+ *
+ * lw r_val
+ * jal BaseLabel ; stores "return address" (BaseLabel) in r_RA
+ * nop ; opportunistically fill
+ * [subiu r_val, bias] ; Remove bias if low_val != 0
+ * bound check -> done
+ * lw r_disp, [r_RA, r_val]
+ * addu r_RA, r_disp
+ * jr r_RA
+ * done:
+ */
+void MipsCodegen::GenPackedSwitch(CompilationUnit* cu, uint32_t table_offset, RegLocation rl_src)
+{
+ const uint16_t* table = cu->insns + cu->current_dalvik_offset + table_offset;
+ if (cu->verbose) {
+ DumpPackedSwitchTable(table);
+ }
+ // Add the table to the list - we'll process it later
+ SwitchTable *tab_rec =
+ static_cast<SwitchTable*>(NewMem(cu, sizeof(SwitchTable), true, kAllocData));
+ tab_rec->table = table;
+ tab_rec->vaddr = cu->current_dalvik_offset;
+ int size = table[1];
+ tab_rec->targets = static_cast<LIR**>(NewMem(cu, size * sizeof(LIR*), true, kAllocLIR));
+ InsertGrowableList(cu, &cu->switch_tables, reinterpret_cast<uintptr_t>(tab_rec));
+
+ // Get the switch value
+ rl_src = LoadValue(cu, rl_src, kCoreReg);
+
+ // Prepare the bias. If too big, handle 1st stage here
+ int low_key = s4FromSwitchData(&table[2]);
+ bool large_bias = false;
+ int r_key;
+ if (low_key == 0) {
+ r_key = rl_src.low_reg;
+ } else if ((low_key & 0xffff) != low_key) {
+ r_key = AllocTemp(cu);
+ LoadConstant(cu, r_key, low_key);
+ large_bias = true;
+ } else {
+ r_key = AllocTemp(cu);
+ }
+
+ // Must prevent code motion for the curr pc pair
+ GenBarrier(cu);
+ NewLIR0(cu, kMipsCurrPC); // Really a jal to .+8
+ // Now, fill the branch delay slot with bias strip
+ if (low_key == 0) {
+ NewLIR0(cu, kMipsNop);
+ } else {
+ if (large_bias) {
+ OpRegRegReg(cu, kOpSub, r_key, rl_src.low_reg, r_key);
+ } else {
+ OpRegRegImm(cu, kOpSub, r_key, rl_src.low_reg, low_key);
+ }
+ }
+ GenBarrier(cu); // Scheduling barrier
+
+ // Construct BaseLabel and set up table base register
+ LIR* base_label = NewLIR0(cu, kPseudoTargetLabel);
+ // Remember base label so offsets can be computed later
+ tab_rec->anchor = base_label;
+
+ // Bounds check - if < 0 or >= size continue following switch
+ LIR* branch_over = OpCmpImmBranch(cu, kCondHi, r_key, size-1, NULL);
+
+ // Materialize the table base pointer
+ int rBase = AllocTemp(cu);
+ NewLIR4(cu, kMipsDelta, rBase, 0, reinterpret_cast<uintptr_t>(base_label),
+ reinterpret_cast<uintptr_t>(tab_rec));
+
+ // Load the displacement from the switch table
+ int r_disp = AllocTemp(cu);
+ LoadBaseIndexed(cu, rBase, r_key, r_disp, 2, kWord);
+
+ // Add to r_AP and go
+ OpRegRegReg(cu, kOpAdd, r_RA, r_RA, r_disp);
+ OpReg(cu, kOpBx, r_RA);
+
+ /* branch_over target here */
+ LIR* target = NewLIR0(cu, kPseudoTargetLabel);
+ branch_over->target = target;
+}
+
+/*
+ * Array data table format:
+ * ushort ident = 0x0300 magic value
+ * ushort width width of each element in the table
+ * uint size number of elements in the table
+ * ubyte data[size*width] table of data values (may contain a single-byte
+ * padding at the end)
+ *
+ * Total size is 4+(width * size + 1)/2 16-bit code units.
+ */
+void MipsCodegen::GenFillArrayData(CompilationUnit* cu, uint32_t table_offset, RegLocation rl_src)
+{
+ const uint16_t* table = cu->insns + cu->current_dalvik_offset + table_offset;
+ // Add the table to the list - we'll process it later
+ FillArrayData *tab_rec =
+ reinterpret_cast<FillArrayData*>(NewMem(cu, sizeof(FillArrayData), true, kAllocData));
+ tab_rec->table = table;
+ tab_rec->vaddr = cu->current_dalvik_offset;
+ uint16_t width = tab_rec->table[1];
+ uint32_t size = tab_rec->table[2] | ((static_cast<uint32_t>(tab_rec->table[3])) << 16);
+ tab_rec->size = (size * width) + 8;
+
+ InsertGrowableList(cu, &cu->fill_array_data, reinterpret_cast<uintptr_t>(tab_rec));
+
+ // Making a call - use explicit registers
+ FlushAllRegs(cu); /* Everything to home location */
+ LockCallTemps(cu);
+ LoadValueDirectFixed(cu, rl_src, rMIPS_ARG0);
+
+ // Must prevent code motion for the curr pc pair
+ GenBarrier(cu);
+ NewLIR0(cu, kMipsCurrPC); // Really a jal to .+8
+ // Now, fill the branch delay slot with the helper load
+ int r_tgt = LoadHelper(cu, ENTRYPOINT_OFFSET(pHandleFillArrayDataFromCode));
+ GenBarrier(cu); // Scheduling barrier
+
+ // Construct BaseLabel and set up table base register
+ LIR* base_label = NewLIR0(cu, kPseudoTargetLabel);
+
+ // Materialize a pointer to the fill data image
+ NewLIR4(cu, kMipsDelta, rMIPS_ARG1, 0, reinterpret_cast<uintptr_t>(base_label),
+ reinterpret_cast<uintptr_t>(tab_rec));
+
+ // And go...
+ ClobberCalleeSave(cu);
+ LIR* call_inst = OpReg(cu, kOpBlx, r_tgt); // ( array*, fill_data* )
+ MarkSafepointPC(cu, call_inst);
+}
+
+/*
+ * TODO: implement fast path to short-circuit thin-lock case
+ */
+void MipsCodegen::GenMonitorEnter(CompilationUnit* cu, int opt_flags, RegLocation rl_src)
+{
+ FlushAllRegs(cu);
+ LoadValueDirectFixed(cu, rl_src, rMIPS_ARG0); // Get obj
+ LockCallTemps(cu); // Prepare for explicit register usage
+ GenNullCheck(cu, rl_src.s_reg_low, rMIPS_ARG0, opt_flags);
+ // Go expensive route - artLockObjectFromCode(self, obj);
+ int r_tgt = LoadHelper(cu, ENTRYPOINT_OFFSET(pLockObjectFromCode));
+ ClobberCalleeSave(cu);
+ LIR* call_inst = OpReg(cu, kOpBlx, r_tgt);
+ MarkSafepointPC(cu, call_inst);
+}
+
+/*
+ * TODO: implement fast path to short-circuit thin-lock case
+ */
+void MipsCodegen::GenMonitorExit(CompilationUnit* cu, int opt_flags, RegLocation rl_src)
+{
+ FlushAllRegs(cu);
+ LoadValueDirectFixed(cu, rl_src, rMIPS_ARG0); // Get obj
+ LockCallTemps(cu); // Prepare for explicit register usage
+ GenNullCheck(cu, rl_src.s_reg_low, rMIPS_ARG0, opt_flags);
+ // Go expensive route - UnlockObjectFromCode(obj);
+ int r_tgt = LoadHelper(cu, ENTRYPOINT_OFFSET(pUnlockObjectFromCode));
+ ClobberCalleeSave(cu);
+ LIR* call_inst = OpReg(cu, kOpBlx, r_tgt);
+ MarkSafepointPC(cu, call_inst);
+}
+
+void MipsCodegen::GenMoveException(CompilationUnit* cu, RegLocation rl_dest)
+{
+ int ex_offset = Thread::ExceptionOffset().Int32Value();
+ RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ int reset_reg = AllocTemp(cu);
+ LoadWordDisp(cu, rMIPS_SELF, ex_offset, rl_result.low_reg);
+ LoadConstant(cu, reset_reg, 0);
+ StoreWordDisp(cu, rMIPS_SELF, ex_offset, reset_reg);
+ FreeTemp(cu, reset_reg);
+ StoreValue(cu, rl_dest, rl_result);
+}
+
+/*
+ * Mark garbage collection card. Skip if the value we're storing is null.
+ */
+void MipsCodegen::MarkGCCard(CompilationUnit* cu, int val_reg, int tgt_addr_reg)
+{
+ int reg_card_base = AllocTemp(cu);
+ int reg_card_no = AllocTemp(cu);
+ LIR* branch_over = OpCmpImmBranch(cu, kCondEq, val_reg, 0, NULL);
+ LoadWordDisp(cu, rMIPS_SELF, Thread::CardTableOffset().Int32Value(), reg_card_base);
+ OpRegRegImm(cu, kOpLsr, reg_card_no, tgt_addr_reg, CardTable::kCardShift);
+ StoreBaseIndexed(cu, reg_card_base, reg_card_no, reg_card_base, 0,
+ kUnsignedByte);
+ LIR* target = NewLIR0(cu, kPseudoTargetLabel);
+ branch_over->target = target;
+ FreeTemp(cu, reg_card_base);
+ FreeTemp(cu, reg_card_no);
+}
+void MipsCodegen::GenEntrySequence(CompilationUnit* cu, RegLocation* ArgLocs, RegLocation rl_method)
+{
+ int spill_count = cu->num_core_spills + cu->num_fp_spills;
+ /*
+ * On entry, rMIPS_ARG0, rMIPS_ARG1, rMIPS_ARG2 & rMIPS_ARG3 are live. Let the register
+ * allocation mechanism know so it doesn't try to use any of them when
+ * expanding the frame or flushing. This leaves the utility
+ * code with a single temp: r12. This should be enough.
+ */
+ LockTemp(cu, rMIPS_ARG0);
+ LockTemp(cu, rMIPS_ARG1);
+ LockTemp(cu, rMIPS_ARG2);
+ LockTemp(cu, rMIPS_ARG3);
+
+ /*
+ * We can safely skip the stack overflow check if we're
+ * a leaf *and* our frame size < fudge factor.
+ */
+ bool skip_overflow_check = ((cu->attrs & METHOD_IS_LEAF) &&
+ (static_cast<size_t>(cu->frame_size) < Thread::kStackOverflowReservedBytes));
+ NewLIR0(cu, kPseudoMethodEntry);
+ int check_reg = AllocTemp(cu);
+ int new_sp = AllocTemp(cu);
+ if (!skip_overflow_check) {
+ /* Load stack limit */
+ LoadWordDisp(cu, rMIPS_SELF, Thread::StackEndOffset().Int32Value(), check_reg);
+ }
+ /* Spill core callee saves */
+ SpillCoreRegs(cu);
+ /* NOTE: promotion of FP regs currently unsupported, thus no FP spill */
+ DCHECK_EQ(cu->num_fp_spills, 0);
+ if (!skip_overflow_check) {
+ OpRegRegImm(cu, kOpSub, new_sp, rMIPS_SP, cu->frame_size - (spill_count * 4));
+ GenRegRegCheck(cu, kCondCc, new_sp, check_reg, kThrowStackOverflow);
+ OpRegCopy(cu, rMIPS_SP, new_sp); // Establish stack
+ } else {
+ OpRegImm(cu, kOpSub, rMIPS_SP, cu->frame_size - (spill_count * 4));
+ }
+
+ FlushIns(cu, ArgLocs, rl_method);
+
+ FreeTemp(cu, rMIPS_ARG0);
+ FreeTemp(cu, rMIPS_ARG1);
+ FreeTemp(cu, rMIPS_ARG2);
+ FreeTemp(cu, rMIPS_ARG3);
+}
+
+void MipsCodegen::GenExitSequence(CompilationUnit* cu)
+{
+ /*
+ * In the exit path, rMIPS_RET0/rMIPS_RET1 are live - make sure they aren't
+ * allocated by the register utilities as temps.
+ */
+ LockTemp(cu, rMIPS_RET0);
+ LockTemp(cu, rMIPS_RET1);
+
+ NewLIR0(cu, kPseudoMethodExit);
+ UnSpillCoreRegs(cu);
+ OpReg(cu, kOpBx, r_RA);
+}
+
+} // namespace art
diff --git a/src/compiler/dex/quick/mips/codegen_mips.h b/src/compiler/dex/quick/mips/codegen_mips.h
new file mode 100644
index 0000000..f889ece
--- /dev/null
+++ b/src/compiler/dex/quick/mips/codegen_mips.h
@@ -0,0 +1,208 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_DEX_QUICK_CODEGEN_MIPS_CODEGENMIPS_H_
+#define ART_SRC_DEX_QUICK_CODEGEN_MIPS_CODEGENMIPS_H_
+
+#include "compiler/dex/compiler_internals.h"
+#include "mips_lir.h"
+
+namespace art {
+
+class MipsCodegen : public Codegen {
+ public:
+ // Required for target - codegen utilities.
+ virtual bool SmallLiteralDivide(CompilationUnit* cu, Instruction::Code dalvik_opcode,
+ RegLocation rl_src, RegLocation rl_dest, int lit);
+ virtual int LoadHelper(CompilationUnit* cu, int offset);
+ virtual LIR* LoadBaseDisp(CompilationUnit* cu, int rBase, int displacement, int r_dest,
+ OpSize size, int s_reg);
+ virtual LIR* LoadBaseDispWide(CompilationUnit* cu, int rBase, int displacement, int r_dest_lo,
+ int r_dest_hi, int s_reg);
+ virtual LIR* LoadBaseIndexed(CompilationUnit* cu, int rBase, int r_index, int r_dest, int scale,
+ OpSize size);
+ virtual LIR* LoadBaseIndexedDisp(CompilationUnit *cu, int rBase, int r_index, int scale,
+ int displacement, int r_dest, int r_dest_hi, OpSize size,
+ int s_reg);
+ virtual LIR* LoadConstantNoClobber(CompilationUnit* cu, int r_dest, int value);
+ virtual LIR* LoadConstantWide(CompilationUnit* cu, int r_dest_lo, int r_dest_hi, int64_t value);
+ virtual LIR* StoreBaseDisp(CompilationUnit* cu, int rBase, int displacement, int r_src,
+ OpSize size);
+ virtual LIR* StoreBaseDispWide(CompilationUnit* cu, int rBase, int displacement, int r_src_lo,
+ int r_src_hi);
+ virtual LIR* StoreBaseIndexed(CompilationUnit* cu, int rBase, int r_index, int r_src, int scale,
+ OpSize size);
+ virtual LIR* StoreBaseIndexedDisp(CompilationUnit *cu, int rBase, int r_index, int scale,
+ int displacement, int r_src, int r_src_hi, OpSize size,
+ int s_reg);
+ virtual void MarkGCCard(CompilationUnit* cu, int val_reg, int tgt_addr_reg);
+
+ // Required for target - register utilities.
+ virtual bool IsFpReg(int reg);
+ virtual bool SameRegType(int reg1, int reg2);
+ virtual int AllocTypedTemp(CompilationUnit* cu, bool fp_hint, int reg_class);
+ virtual int AllocTypedTempPair(CompilationUnit* cu, bool fp_hint, int reg_class);
+ virtual int S2d(int low_reg, int high_reg);
+ virtual int TargetReg(SpecialTargetRegister reg);
+ virtual RegisterInfo* GetRegInfo(CompilationUnit* cu, int reg);
+ virtual RegLocation GetReturnAlt(CompilationUnit* cu);
+ virtual RegLocation GetReturnWideAlt(CompilationUnit* cu);
+ virtual RegLocation LocCReturn();
+ virtual RegLocation LocCReturnDouble();
+ virtual RegLocation LocCReturnFloat();
+ virtual RegLocation LocCReturnWide();
+ virtual uint32_t FpRegMask();
+ virtual uint64_t GetRegMaskCommon(CompilationUnit* cu, int reg);
+ virtual void AdjustSpillMask(CompilationUnit* cu);
+ virtual void ClobberCalleeSave(CompilationUnit *cu);
+ virtual void FlushReg(CompilationUnit* cu, int reg);
+ virtual void FlushRegWide(CompilationUnit* cu, int reg1, int reg2);
+ virtual void FreeCallTemps(CompilationUnit* cu);
+ virtual void FreeRegLocTemps(CompilationUnit* cu, RegLocation rl_keep, RegLocation rl_free);
+ virtual void LockCallTemps(CompilationUnit* cu);
+ virtual void MarkPreservedSingle(CompilationUnit* cu, int v_reg, int reg);
+ virtual void CompilerInitializeRegAlloc(CompilationUnit* cu);
+
+ // Required for target - miscellaneous.
+ virtual AssemblerStatus AssembleInstructions(CompilationUnit* cu, uintptr_t start_addr);
+ virtual void DumpResourceMask(LIR* lir, uint64_t mask, const char* prefix);
+ virtual void SetupTargetResourceMasks(CompilationUnit* cu, LIR* lir);
+ virtual const char* GetTargetInstFmt(int opcode);
+ virtual const char* GetTargetInstName(int opcode);
+ virtual std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr);
+ virtual uint64_t GetPCUseDefEncoding();
+ virtual uint64_t GetTargetInstFlags(int opcode);
+ virtual int GetInsnSize(LIR* lir);
+ virtual bool IsUnconditionalBranch(LIR* lir);
+
+ // Required for target - Dalvik-level generators.
+ virtual void GenArithImmOpLong(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2);
+ virtual void GenArrayObjPut(CompilationUnit* cu, int opt_flags, RegLocation rl_array,
+ RegLocation rl_index, RegLocation rl_src, int scale);
+ virtual void GenArrayGet(CompilationUnit* cu, int opt_flags, OpSize size, RegLocation rl_array,
+ RegLocation rl_index, RegLocation rl_dest, int scale);
+ virtual void GenArrayPut(CompilationUnit* cu, int opt_flags, OpSize size, RegLocation rl_array,
+ RegLocation rl_index, RegLocation rl_src, int scale);
+ virtual void GenShiftImmOpLong(CompilationUnit* cu, Instruction::Code opcode,
+ RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_shift);
+ virtual void GenMulLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2);
+ virtual void GenAddLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2);
+ virtual void GenAndLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2);
+ virtual void GenArithOpDouble(CompilationUnit* cu, Instruction::Code opcode,
+ RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2);
+ virtual void GenArithOpFloat(CompilationUnit *cu, Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2);
+ virtual void GenCmpFP(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2);
+ virtual void GenConversion(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src);
+ virtual bool GenInlinedCas32(CompilationUnit* cu, CallInfo* info, bool need_write_barrier);
+ virtual bool GenInlinedMinMaxInt(CompilationUnit *cu, CallInfo* info, bool is_min);
+ virtual bool GenInlinedSqrt(CompilationUnit* cu, CallInfo* info);
+ virtual void GenNegLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src);
+ virtual void GenOrLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2);
+ virtual void GenSubLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2);
+ virtual void GenXorLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2);
+ virtual LIR* GenRegMemCheck(CompilationUnit* cu, ConditionCode c_code, int reg1, int base,
+ int offset, ThrowKind kind);
+ virtual RegLocation GenDivRem(CompilationUnit* cu, RegLocation rl_dest, int reg_lo, int reg_hi,
+ bool is_div);
+ virtual RegLocation GenDivRemLit(CompilationUnit* cu, RegLocation rl_dest, int reg_lo, int lit,
+ bool is_div);
+ virtual void GenCmpLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2);
+ virtual void GenDivZeroCheck(CompilationUnit* cu, int reg_lo, int reg_hi);
+ virtual void GenEntrySequence(CompilationUnit* cu, RegLocation* ArgLocs,
+ RegLocation rl_method);
+ virtual void GenExitSequence(CompilationUnit* cu);
+ virtual void GenFillArrayData(CompilationUnit* cu, uint32_t table_offset,
+ RegLocation rl_src);
+ virtual void GenFusedFPCmpBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir, bool gt_bias,
+ bool is_double);
+ virtual void GenFusedLongCmpBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir);
+ virtual void GenSelect(CompilationUnit* cu, BasicBlock* bb, MIR* mir);
+ virtual void GenMemBarrier(CompilationUnit* cu, MemBarrierKind barrier_kind);
+ virtual void GenMonitorEnter(CompilationUnit* cu, int opt_flags, RegLocation rl_src);
+ virtual void GenMonitorExit(CompilationUnit* cu, int opt_flags, RegLocation rl_src);
+ virtual void GenMoveException(CompilationUnit* cu, RegLocation rl_dest);
+ virtual void GenMultiplyByTwoBitMultiplier(CompilationUnit* cu, RegLocation rl_src,
+ RegLocation rl_result, int lit, int first_bit,
+ int second_bit);
+ virtual void GenNegDouble(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src);
+ virtual void GenNegFloat(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src);
+ virtual void GenPackedSwitch(CompilationUnit* cu, uint32_t table_offset,
+ RegLocation rl_src);
+ virtual void GenSparseSwitch(CompilationUnit* cu, uint32_t table_offset,
+ RegLocation rl_src);
+ virtual void GenSpecialCase(CompilationUnit* cu, BasicBlock* bb, MIR* mir,
+ SpecialCaseHandler special_case);
+
+ // Required for target - single operation generators.
+ virtual LIR* OpUnconditionalBranch(CompilationUnit* cu, LIR* target);
+ virtual LIR* OpCmpBranch(CompilationUnit* cu, ConditionCode cond, int src1, int src2,
+ LIR* target);
+ virtual LIR* OpCmpImmBranch(CompilationUnit* cu, ConditionCode cond, int reg, int check_value,
+ LIR* target);
+ virtual LIR* OpCondBranch(CompilationUnit* cu, ConditionCode cc, LIR* target);
+ virtual LIR* OpDecAndBranch(CompilationUnit* cu, ConditionCode c_code, int reg,
+ LIR* target);
+ virtual LIR* OpFpRegCopy(CompilationUnit* cu, int r_dest, int r_src);
+ virtual LIR* OpIT(CompilationUnit* cu, ConditionCode cond, const char* guide);
+ virtual LIR* OpMem(CompilationUnit* cu, OpKind op, int rBase, int disp);
+ virtual LIR* OpPcRelLoad(CompilationUnit* cu, int reg, LIR* target);
+ virtual LIR* OpReg(CompilationUnit* cu, OpKind op, int r_dest_src);
+ virtual LIR* OpRegCopy(CompilationUnit* cu, int r_dest, int r_src);
+ virtual LIR* OpRegCopyNoInsert(CompilationUnit* cu, int r_dest, int r_src);
+ virtual LIR* OpRegImm(CompilationUnit* cu, OpKind op, int r_dest_src1, int value);
+ virtual LIR* OpRegMem(CompilationUnit* cu, OpKind op, int r_dest, int rBase, int offset);
+ virtual LIR* OpRegReg(CompilationUnit* cu, OpKind op, int r_dest_src1, int r_src2);
+ virtual LIR* OpRegRegImm(CompilationUnit* cu, OpKind op, int r_dest, int r_src1, int value);
+ virtual LIR* OpRegRegReg(CompilationUnit* cu, OpKind op, int r_dest, int r_src1,
+ int r_src2);
+ virtual LIR* OpTestSuspend(CompilationUnit* cu, LIR* target);
+ virtual LIR* OpThreadMem(CompilationUnit* cu, OpKind op, int thread_offset);
+ virtual LIR* OpVldm(CompilationUnit* cu, int rBase, int count);
+ virtual LIR* OpVstm(CompilationUnit* cu, int rBase, int count);
+ virtual void OpLea(CompilationUnit* cu, int rBase, int reg1, int reg2, int scale,
+ int offset);
+ virtual void OpRegCopyWide(CompilationUnit* cu, int dest_lo, int dest_hi, int src_lo,
+ int src_hi);
+ virtual void OpTlsCmp(CompilationUnit* cu, int offset, int val);
+
+ LIR* LoadBaseDispBody(CompilationUnit* cu, int rBase, int displacement, int r_dest,
+ int r_dest_hi, OpSize size, int s_reg);
+ LIR* StoreBaseDispBody(CompilationUnit* cu, int rBase, int displacement, int r_src,
+ int r_src_hi, OpSize size);
+ void SpillCoreRegs(CompilationUnit* cu);
+ void UnSpillCoreRegs(CompilationUnit* cu);
+ static const MipsEncodingMap EncodingMap[kMipsLast];
+ bool InexpensiveConstantInt(int32_t value);
+ bool InexpensiveConstantFloat(int32_t value);
+ bool InexpensiveConstantLong(int64_t value);
+ bool InexpensiveConstantDouble(int64_t value);
+};
+
+} // namespace art
+
+#endif // ART_SRC_DEX_QUICK_CODEGEN_MIPS_CODEGENMIPS_H_
diff --git a/src/compiler/dex/quick/mips/fp_mips.cc b/src/compiler/dex/quick/mips/fp_mips.cc
new file mode 100644
index 0000000..96007d8
--- /dev/null
+++ b/src/compiler/dex/quick/mips/fp_mips.cc
@@ -0,0 +1,249 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "codegen_mips.h"
+#include "compiler/dex/quick/codegen_util.h"
+#include "compiler/dex/quick/ralloc_util.h"
+#include "mips_lir.h"
+#include "oat/runtime/oat_support_entrypoints.h"
+
+namespace art {
+
+void MipsCodegen::GenArithOpFloat(CompilationUnit *cu, Instruction::Code opcode,
+ RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2)
+{
+ int op = kMipsNop;
+ RegLocation rl_result;
+
+ /*
+ * Don't attempt to optimize register usage since these opcodes call out to
+ * the handlers.
+ */
+ switch (opcode) {
+ case Instruction::ADD_FLOAT_2ADDR:
+ case Instruction::ADD_FLOAT:
+ op = kMipsFadds;
+ break;
+ case Instruction::SUB_FLOAT_2ADDR:
+ case Instruction::SUB_FLOAT:
+ op = kMipsFsubs;
+ break;
+ case Instruction::DIV_FLOAT_2ADDR:
+ case Instruction::DIV_FLOAT:
+ op = kMipsFdivs;
+ break;
+ case Instruction::MUL_FLOAT_2ADDR:
+ case Instruction::MUL_FLOAT:
+ op = kMipsFmuls;
+ break;
+ case Instruction::REM_FLOAT_2ADDR:
+ case Instruction::REM_FLOAT:
+ FlushAllRegs(cu); // Send everything to home location
+ CallRuntimeHelperRegLocationRegLocation(cu, ENTRYPOINT_OFFSET(pFmodf), rl_src1, rl_src2, false);
+ rl_result = GetReturn(cu, true);
+ StoreValue(cu, rl_dest, rl_result);
+ return;
+ case Instruction::NEG_FLOAT:
+ GenNegFloat(cu, rl_dest, rl_src1);
+ return;
+ default:
+ LOG(FATAL) << "Unexpected opcode: " << opcode;
+ }
+ rl_src1 = LoadValue(cu, rl_src1, kFPReg);
+ rl_src2 = LoadValue(cu, rl_src2, kFPReg);
+ rl_result = EvalLoc(cu, rl_dest, kFPReg, true);
+ NewLIR3(cu, op, rl_result.low_reg, rl_src1.low_reg, rl_src2.low_reg);
+ StoreValue(cu, rl_dest, rl_result);
+}
+
+void MipsCodegen::GenArithOpDouble(CompilationUnit *cu, Instruction::Code opcode,
+ RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2)
+{
+ int op = kMipsNop;
+ RegLocation rl_result;
+
+ switch (opcode) {
+ case Instruction::ADD_DOUBLE_2ADDR:
+ case Instruction::ADD_DOUBLE:
+ op = kMipsFaddd;
+ break;
+ case Instruction::SUB_DOUBLE_2ADDR:
+ case Instruction::SUB_DOUBLE:
+ op = kMipsFsubd;
+ break;
+ case Instruction::DIV_DOUBLE_2ADDR:
+ case Instruction::DIV_DOUBLE:
+ op = kMipsFdivd;
+ break;
+ case Instruction::MUL_DOUBLE_2ADDR:
+ case Instruction::MUL_DOUBLE:
+ op = kMipsFmuld;
+ break;
+ case Instruction::REM_DOUBLE_2ADDR:
+ case Instruction::REM_DOUBLE:
+ FlushAllRegs(cu); // Send everything to home location
+ CallRuntimeHelperRegLocationRegLocation(cu, ENTRYPOINT_OFFSET(pFmod), rl_src1, rl_src2, false);
+ rl_result = GetReturnWide(cu, true);
+ StoreValueWide(cu, rl_dest, rl_result);
+ return;
+ case Instruction::NEG_DOUBLE:
+ GenNegDouble(cu, rl_dest, rl_src1);
+ return;
+ default:
+ LOG(FATAL) << "Unpexpected opcode: " << opcode;
+ }
+ rl_src1 = LoadValueWide(cu, rl_src1, kFPReg);
+ DCHECK(rl_src1.wide);
+ rl_src2 = LoadValueWide(cu, rl_src2, kFPReg);
+ DCHECK(rl_src2.wide);
+ rl_result = EvalLoc(cu, rl_dest, kFPReg, true);
+ DCHECK(rl_dest.wide);
+ DCHECK(rl_result.wide);
+ NewLIR3(cu, op, S2d(rl_result.low_reg, rl_result.high_reg), S2d(rl_src1.low_reg, rl_src1.high_reg),
+ S2d(rl_src2.low_reg, rl_src2.high_reg));
+ StoreValueWide(cu, rl_dest, rl_result);
+}
+
+void MipsCodegen::GenConversion(CompilationUnit *cu, Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src)
+{
+ int op = kMipsNop;
+ int src_reg;
+ RegLocation rl_result;
+ switch (opcode) {
+ case Instruction::INT_TO_FLOAT:
+ op = kMipsFcvtsw;
+ break;
+ case Instruction::DOUBLE_TO_FLOAT:
+ op = kMipsFcvtsd;
+ break;
+ case Instruction::FLOAT_TO_DOUBLE:
+ op = kMipsFcvtds;
+ break;
+ case Instruction::INT_TO_DOUBLE:
+ op = kMipsFcvtdw;
+ break;
+ case Instruction::FLOAT_TO_INT:
+ GenConversionCall(cu, ENTRYPOINT_OFFSET(pF2iz), rl_dest, rl_src);
+ return;
+ case Instruction::DOUBLE_TO_INT:
+ GenConversionCall(cu, ENTRYPOINT_OFFSET(pD2iz), rl_dest, rl_src);
+ return;
+ case Instruction::LONG_TO_DOUBLE:
+ GenConversionCall(cu, ENTRYPOINT_OFFSET(pL2d), rl_dest, rl_src);
+ return;
+ case Instruction::FLOAT_TO_LONG:
+ GenConversionCall(cu, ENTRYPOINT_OFFSET(pF2l), rl_dest, rl_src);
+ return;
+ case Instruction::LONG_TO_FLOAT:
+ GenConversionCall(cu, ENTRYPOINT_OFFSET(pL2f), rl_dest, rl_src);
+ return;
+ case Instruction::DOUBLE_TO_LONG:
+ GenConversionCall(cu, ENTRYPOINT_OFFSET(pD2l), rl_dest, rl_src);
+ return;
+ default:
+ LOG(FATAL) << "Unexpected opcode: " << opcode;
+ }
+ if (rl_src.wide) {
+ rl_src = LoadValueWide(cu, rl_src, kFPReg);
+ src_reg = S2d(rl_src.low_reg, rl_src.high_reg);
+ } else {
+ rl_src = LoadValue(cu, rl_src, kFPReg);
+ src_reg = rl_src.low_reg;
+ }
+ if (rl_dest.wide) {
+ rl_result = EvalLoc(cu, rl_dest, kFPReg, true);
+ NewLIR2(cu, op, S2d(rl_result.low_reg, rl_result.high_reg), src_reg);
+ StoreValueWide(cu, rl_dest, rl_result);
+ } else {
+ rl_result = EvalLoc(cu, rl_dest, kFPReg, true);
+ NewLIR2(cu, op, rl_result.low_reg, src_reg);
+ StoreValue(cu, rl_dest, rl_result);
+ }
+}
+
+void MipsCodegen::GenCmpFP(CompilationUnit *cu, Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2)
+{
+ bool wide = true;
+ int offset = -1; // Make gcc happy.
+
+ switch (opcode) {
+ case Instruction::CMPL_FLOAT:
+ offset = ENTRYPOINT_OFFSET(pCmplFloat);
+ wide = false;
+ break;
+ case Instruction::CMPG_FLOAT:
+ offset = ENTRYPOINT_OFFSET(pCmpgFloat);
+ wide = false;
+ break;
+ case Instruction::CMPL_DOUBLE:
+ offset = ENTRYPOINT_OFFSET(pCmplDouble);
+ break;
+ case Instruction::CMPG_DOUBLE:
+ offset = ENTRYPOINT_OFFSET(pCmpgDouble);
+ break;
+ default:
+ LOG(FATAL) << "Unexpected opcode: " << opcode;
+ }
+ FlushAllRegs(cu);
+ LockCallTemps(cu);
+ if (wide) {
+ LoadValueDirectWideFixed(cu, rl_src1, rMIPS_FARG0, rMIPS_FARG1);
+ LoadValueDirectWideFixed(cu, rl_src2, rMIPS_FARG2, rMIPS_FARG3);
+ } else {
+ LoadValueDirectFixed(cu, rl_src1, rMIPS_FARG0);
+ LoadValueDirectFixed(cu, rl_src2, rMIPS_FARG2);
+ }
+ int r_tgt = LoadHelper(cu, offset);
+ // NOTE: not a safepoint
+ OpReg(cu, kOpBlx, r_tgt);
+ RegLocation rl_result = GetReturn(cu, false);
+ StoreValue(cu, rl_dest, rl_result);
+}
+
+void MipsCodegen::GenFusedFPCmpBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir,
+ bool gt_bias, bool is_double)
+{
+ UNIMPLEMENTED(FATAL) << "Need codegen for fused fp cmp branch";
+}
+
+void MipsCodegen::GenNegFloat(CompilationUnit *cu, RegLocation rl_dest, RegLocation rl_src)
+{
+ RegLocation rl_result;
+ rl_src = LoadValue(cu, rl_src, kCoreReg);
+ rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ OpRegRegImm(cu, kOpAdd, rl_result.low_reg, rl_src.low_reg, 0x80000000);
+ StoreValue(cu, rl_dest, rl_result);
+}
+
+void MipsCodegen::GenNegDouble(CompilationUnit *cu, RegLocation rl_dest, RegLocation rl_src)
+{
+ RegLocation rl_result;
+ rl_src = LoadValueWide(cu, rl_src, kCoreReg);
+ rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ OpRegRegImm(cu, kOpAdd, rl_result.high_reg, rl_src.high_reg, 0x80000000);
+ OpRegCopy(cu, rl_result.low_reg, rl_src.low_reg);
+ StoreValueWide(cu, rl_dest, rl_result);
+}
+
+bool MipsCodegen::GenInlinedMinMaxInt(CompilationUnit *cu, CallInfo* info, bool is_min)
+{
+ // TODO: need Mips implementation
+ return false;
+}
+
+} // namespace art
diff --git a/src/compiler/dex/quick/mips/int_mips.cc b/src/compiler/dex/quick/mips/int_mips.cc
new file mode 100644
index 0000000..d648c44
--- /dev/null
+++ b/src/compiler/dex/quick/mips/int_mips.cc
@@ -0,0 +1,659 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* This file contains codegen for the Mips ISA */
+
+#include "codegen_mips.h"
+#include "compiler/dex/quick/codegen_util.h"
+#include "compiler/dex/quick/ralloc_util.h"
+#include "mips_lir.h"
+#include "oat/runtime/oat_support_entrypoints.h"
+
+namespace art {
+
+/*
+ * Compare two 64-bit values
+ * x = y return 0
+ * x < y return -1
+ * x > y return 1
+ *
+ * slt t0, x.hi, y.hi; # (x.hi < y.hi) ? 1:0
+ * sgt t1, x.hi, y.hi; # (y.hi > x.hi) ? 1:0
+ * subu res, t0, t1 # res = -1:1:0 for [ < > = ]
+ * bnez res, finish
+ * sltu t0, x.lo, y.lo
+ * sgtu r1, x.lo, y.lo
+ * subu res, t0, t1
+ * finish:
+ *
+ */
+void MipsCodegen::GenCmpLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2)
+{
+ rl_src1 = LoadValueWide(cu, rl_src1, kCoreReg);
+ rl_src2 = LoadValueWide(cu, rl_src2, kCoreReg);
+ int t0 = AllocTemp(cu);
+ int t1 = AllocTemp(cu);
+ RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ NewLIR3(cu, kMipsSlt, t0, rl_src1.high_reg, rl_src2.high_reg);
+ NewLIR3(cu, kMipsSlt, t1, rl_src2.high_reg, rl_src1.high_reg);
+ NewLIR3(cu, kMipsSubu, rl_result.low_reg, t1, t0);
+ LIR* branch = OpCmpImmBranch(cu, kCondNe, rl_result.low_reg, 0, NULL);
+ NewLIR3(cu, kMipsSltu, t0, rl_src1.low_reg, rl_src2.low_reg);
+ NewLIR3(cu, kMipsSltu, t1, rl_src2.low_reg, rl_src1.low_reg);
+ NewLIR3(cu, kMipsSubu, rl_result.low_reg, t1, t0);
+ FreeTemp(cu, t0);
+ FreeTemp(cu, t1);
+ LIR* target = NewLIR0(cu, kPseudoTargetLabel);
+ branch->target = target;
+ StoreValue(cu, rl_dest, rl_result);
+}
+
+LIR* MipsCodegen::OpCmpBranch(CompilationUnit* cu, ConditionCode cond, int src1, int src2,
+ LIR* target)
+{
+ LIR* branch;
+ MipsOpCode slt_op;
+ MipsOpCode br_op;
+ bool cmp_zero = false;
+ bool swapped = false;
+ switch (cond) {
+ case kCondEq:
+ br_op = kMipsBeq;
+ cmp_zero = true;
+ break;
+ case kCondNe:
+ br_op = kMipsBne;
+ cmp_zero = true;
+ break;
+ case kCondCc:
+ slt_op = kMipsSltu;
+ br_op = kMipsBnez;
+ break;
+ case kCondCs:
+ slt_op = kMipsSltu;
+ br_op = kMipsBeqz;
+ break;
+ case kCondGe:
+ slt_op = kMipsSlt;
+ br_op = kMipsBeqz;
+ break;
+ case kCondGt:
+ slt_op = kMipsSlt;
+ br_op = kMipsBnez;
+ swapped = true;
+ break;
+ case kCondLe:
+ slt_op = kMipsSlt;
+ br_op = kMipsBeqz;
+ swapped = true;
+ break;
+ case kCondLt:
+ slt_op = kMipsSlt;
+ br_op = kMipsBnez;
+ break;
+ case kCondHi: // Gtu
+ slt_op = kMipsSltu;
+ br_op = kMipsBnez;
+ swapped = true;
+ break;
+ default:
+ LOG(FATAL) << "No support for ConditionCode: " << cond;
+ return NULL;
+ }
+ if (cmp_zero) {
+ branch = NewLIR2(cu, br_op, src1, src2);
+ } else {
+ int t_reg = AllocTemp(cu);
+ if (swapped) {
+ NewLIR3(cu, slt_op, t_reg, src2, src1);
+ } else {
+ NewLIR3(cu, slt_op, t_reg, src1, src2);
+ }
+ branch = NewLIR1(cu, br_op, t_reg);
+ FreeTemp(cu, t_reg);
+ }
+ branch->target = target;
+ return branch;
+}
+
+LIR* MipsCodegen::OpCmpImmBranch(CompilationUnit* cu, ConditionCode cond, int reg,
+ int check_value, LIR* target)
+{
+ LIR* branch;
+ if (check_value != 0) {
+ // TUNING: handle s16 & kCondLt/Mi case using slti
+ int t_reg = AllocTemp(cu);
+ LoadConstant(cu, t_reg, check_value);
+ branch = OpCmpBranch(cu, cond, reg, t_reg, target);
+ FreeTemp(cu, t_reg);
+ return branch;
+ }
+ MipsOpCode opc;
+ switch (cond) {
+ case kCondEq: opc = kMipsBeqz; break;
+ case kCondGe: opc = kMipsBgez; break;
+ case kCondGt: opc = kMipsBgtz; break;
+ case kCondLe: opc = kMipsBlez; break;
+ //case KCondMi:
+ case kCondLt: opc = kMipsBltz; break;
+ case kCondNe: opc = kMipsBnez; break;
+ default:
+ // Tuning: use slti when applicable
+ int t_reg = AllocTemp(cu);
+ LoadConstant(cu, t_reg, check_value);
+ branch = OpCmpBranch(cu, cond, reg, t_reg, target);
+ FreeTemp(cu, t_reg);
+ return branch;
+ }
+ branch = NewLIR1(cu, opc, reg);
+ branch->target = target;
+ return branch;
+}
+
+LIR* MipsCodegen::OpRegCopyNoInsert(CompilationUnit *cu, int r_dest, int r_src)
+{
+ if (MIPS_FPREG(r_dest) || MIPS_FPREG(r_src))
+ return OpFpRegCopy(cu, r_dest, r_src);
+ LIR* res = RawLIR(cu, cu->current_dalvik_offset, kMipsMove,
+ r_dest, r_src);
+ if (!(cu->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) {
+ res->flags.is_nop = true;
+ }
+ return res;
+}
+
+LIR* MipsCodegen::OpRegCopy(CompilationUnit *cu, int r_dest, int r_src)
+{
+ LIR *res = OpRegCopyNoInsert(cu, r_dest, r_src);
+ AppendLIR(cu, res);
+ return res;
+}
+
+void MipsCodegen::OpRegCopyWide(CompilationUnit *cu, int dest_lo, int dest_hi, int src_lo,
+ int src_hi)
+{
+ bool dest_fp = MIPS_FPREG(dest_lo) && MIPS_FPREG(dest_hi);
+ bool src_fp = MIPS_FPREG(src_lo) && MIPS_FPREG(src_hi);
+ assert(MIPS_FPREG(src_lo) == MIPS_FPREG(src_hi));
+ assert(MIPS_FPREG(dest_lo) == MIPS_FPREG(dest_hi));
+ if (dest_fp) {
+ if (src_fp) {
+ OpRegCopy(cu, S2d(dest_lo, dest_hi), S2d(src_lo, src_hi));
+ } else {
+ /* note the operands are swapped for the mtc1 instr */
+ NewLIR2(cu, kMipsMtc1, src_lo, dest_lo);
+ NewLIR2(cu, kMipsMtc1, src_hi, dest_hi);
+ }
+ } else {
+ if (src_fp) {
+ NewLIR2(cu, kMipsMfc1, dest_lo, src_lo);
+ NewLIR2(cu, kMipsMfc1, dest_hi, src_hi);
+ } else {
+ // Handle overlap
+ if (src_hi == dest_lo) {
+ OpRegCopy(cu, dest_hi, src_hi);
+ OpRegCopy(cu, dest_lo, src_lo);
+ } else {
+ OpRegCopy(cu, dest_lo, src_lo);
+ OpRegCopy(cu, dest_hi, src_hi);
+ }
+ }
+ }
+}
+
+void MipsCodegen::GenSelect(CompilationUnit* cu, BasicBlock* bb, MIR* mir)
+{
+ UNIMPLEMENTED(FATAL) << "Need codegen for select";
+}
+
+void MipsCodegen::GenFusedLongCmpBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir)
+{
+ UNIMPLEMENTED(FATAL) << "Need codegen for fused long cmp branch";
+}
+
+LIR* MipsCodegen::GenRegMemCheck(CompilationUnit* cu, ConditionCode c_code,
+ int reg1, int base, int offset, ThrowKind kind)
+{
+ LOG(FATAL) << "Unexpected use of GenRegMemCheck for Arm";
+ return NULL;
+}
+
+RegLocation MipsCodegen::GenDivRem(CompilationUnit* cu, RegLocation rl_dest, int reg1, int reg2,
+ bool is_div)
+{
+ NewLIR4(cu, kMipsDiv, r_HI, r_LO, reg1, reg2);
+ RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ if (is_div) {
+ NewLIR2(cu, kMipsMflo, rl_result.low_reg, r_LO);
+ } else {
+ NewLIR2(cu, kMipsMfhi, rl_result.low_reg, r_HI);
+ }
+ return rl_result;
+}
+
+RegLocation MipsCodegen::GenDivRemLit(CompilationUnit* cu, RegLocation rl_dest, int reg1, int lit,
+ bool is_div)
+{
+ int t_reg = AllocTemp(cu);
+ NewLIR3(cu, kMipsAddiu, t_reg, r_ZERO, lit);
+ NewLIR4(cu, kMipsDiv, r_HI, r_LO, reg1, t_reg);
+ RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ if (is_div) {
+ NewLIR2(cu, kMipsMflo, rl_result.low_reg, r_LO);
+ } else {
+ NewLIR2(cu, kMipsMfhi, rl_result.low_reg, r_HI);
+ }
+ FreeTemp(cu, t_reg);
+ return rl_result;
+}
+
+void MipsCodegen::OpLea(CompilationUnit* cu, int rBase, int reg1, int reg2, int scale, int offset)
+{
+ LOG(FATAL) << "Unexpected use of OpLea for Arm";
+}
+
+void MipsCodegen::OpTlsCmp(CompilationUnit* cu, int offset, int val)
+{
+ LOG(FATAL) << "Unexpected use of OpTlsCmp for Arm";
+}
+
+bool MipsCodegen::GenInlinedCas32(CompilationUnit* cu, CallInfo* info, bool need_write_barrier) {
+ DCHECK_NE(cu->instruction_set, kThumb2);
+ return false;
+}
+
+bool MipsCodegen::GenInlinedSqrt(CompilationUnit* cu, CallInfo* info) {
+ DCHECK_NE(cu->instruction_set, kThumb2);
+ return false;
+}
+
+LIR* MipsCodegen::OpPcRelLoad(CompilationUnit* cu, int reg, LIR* target) {
+ LOG(FATAL) << "Unexpected use of OpPcRelLoad for Mips";
+ return NULL;
+}
+
+LIR* MipsCodegen::OpVldm(CompilationUnit* cu, int rBase, int count)
+{
+ LOG(FATAL) << "Unexpected use of OpVldm for Mips";
+ return NULL;
+}
+
+LIR* MipsCodegen::OpVstm(CompilationUnit* cu, int rBase, int count)
+{
+ LOG(FATAL) << "Unexpected use of OpVstm for Mips";
+ return NULL;
+}
+
+void MipsCodegen::GenMultiplyByTwoBitMultiplier(CompilationUnit* cu, RegLocation rl_src,
+ RegLocation rl_result, int lit,
+ int first_bit, int second_bit)
+{
+ int t_reg = AllocTemp(cu);
+ OpRegRegImm(cu, kOpLsl, t_reg, rl_src.low_reg, second_bit - first_bit);
+ OpRegRegReg(cu, kOpAdd, rl_result.low_reg, rl_src.low_reg, t_reg);
+ FreeTemp(cu, t_reg);
+ if (first_bit != 0) {
+ OpRegRegImm(cu, kOpLsl, rl_result.low_reg, rl_result.low_reg, first_bit);
+ }
+}
+
+void MipsCodegen::GenDivZeroCheck(CompilationUnit* cu, int reg_lo, int reg_hi)
+{
+ int t_reg = AllocTemp(cu);
+ OpRegRegReg(cu, kOpOr, t_reg, reg_lo, reg_hi);
+ GenImmedCheck(cu, kCondEq, t_reg, 0, kThrowDivZero);
+ FreeTemp(cu, t_reg);
+}
+
+// Test suspend flag, return target of taken suspend branch
+LIR* MipsCodegen::OpTestSuspend(CompilationUnit* cu, LIR* target)
+{
+ OpRegImm(cu, kOpSub, rMIPS_SUSPEND, 1);
+ return OpCmpImmBranch(cu, (target == NULL) ? kCondEq : kCondNe, rMIPS_SUSPEND, 0, target);
+}
+
+// Decrement register and branch on condition
+LIR* MipsCodegen::OpDecAndBranch(CompilationUnit* cu, ConditionCode c_code, int reg, LIR* target)
+{
+ OpRegImm(cu, kOpSub, reg, 1);
+ return OpCmpImmBranch(cu, c_code, reg, 0, target);
+}
+
+bool MipsCodegen::SmallLiteralDivide(CompilationUnit* cu, Instruction::Code dalvik_opcode,
+ RegLocation rl_src, RegLocation rl_dest, int lit)
+{
+ LOG(FATAL) << "Unexpected use of smallLiteralDive in Mips";
+ return false;
+}
+
+LIR* MipsCodegen::OpIT(CompilationUnit* cu, ConditionCode cond, const char* guide)
+{
+ LOG(FATAL) << "Unexpected use of OpIT in Mips";
+ return NULL;
+}
+
+void MipsCodegen::GenMulLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2)
+{
+ LOG(FATAL) << "Unexpected use of GenMulLong for Mips";
+}
+
+void MipsCodegen::GenAddLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2)
+{
+ rl_src1 = LoadValueWide(cu, rl_src1, kCoreReg);
+ rl_src2 = LoadValueWide(cu, rl_src2, kCoreReg);
+ RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ /*
+ * [v1 v0] = [a1 a0] + [a3 a2];
+ * addu v0,a2,a0
+ * addu t1,a3,a1
+ * sltu v1,v0,a2
+ * addu v1,v1,t1
+ */
+
+ OpRegRegReg(cu, kOpAdd, rl_result.low_reg, rl_src2.low_reg, rl_src1.low_reg);
+ int t_reg = AllocTemp(cu);
+ OpRegRegReg(cu, kOpAdd, t_reg, rl_src2.high_reg, rl_src1.high_reg);
+ NewLIR3(cu, kMipsSltu, rl_result.high_reg, rl_result.low_reg, rl_src2.low_reg);
+ OpRegRegReg(cu, kOpAdd, rl_result.high_reg, rl_result.high_reg, t_reg);
+ FreeTemp(cu, t_reg);
+ StoreValueWide(cu, rl_dest, rl_result);
+}
+
+void MipsCodegen::GenSubLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2)
+{
+ rl_src1 = LoadValueWide(cu, rl_src1, kCoreReg);
+ rl_src2 = LoadValueWide(cu, rl_src2, kCoreReg);
+ RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ /*
+ * [v1 v0] = [a1 a0] - [a3 a2];
+ * sltu t1,a0,a2
+ * subu v0,a0,a2
+ * subu v1,a1,a3
+ * subu v1,v1,t1
+ */
+
+ int t_reg = AllocTemp(cu);
+ NewLIR3(cu, kMipsSltu, t_reg, rl_src1.low_reg, rl_src2.low_reg);
+ OpRegRegReg(cu, kOpSub, rl_result.low_reg, rl_src1.low_reg, rl_src2.low_reg);
+ OpRegRegReg(cu, kOpSub, rl_result.high_reg, rl_src1.high_reg, rl_src2.high_reg);
+ OpRegRegReg(cu, kOpSub, rl_result.high_reg, rl_result.high_reg, t_reg);
+ FreeTemp(cu, t_reg);
+ StoreValueWide(cu, rl_dest, rl_result);
+}
+
+void MipsCodegen::GenNegLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src)
+{
+ rl_src = LoadValueWide(cu, rl_src, kCoreReg);
+ RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ /*
+ * [v1 v0] = -[a1 a0]
+ * negu v0,a0
+ * negu v1,a1
+ * sltu t1,r_zero
+ * subu v1,v1,t1
+ */
+
+ OpRegReg(cu, kOpNeg, rl_result.low_reg, rl_src.low_reg);
+ OpRegReg(cu, kOpNeg, rl_result.high_reg, rl_src.high_reg);
+ int t_reg = AllocTemp(cu);
+ NewLIR3(cu, kMipsSltu, t_reg, r_ZERO, rl_result.low_reg);
+ OpRegRegReg(cu, kOpSub, rl_result.high_reg, rl_result.high_reg, t_reg);
+ FreeTemp(cu, t_reg);
+ StoreValueWide(cu, rl_dest, rl_result);
+}
+
+void MipsCodegen::GenAndLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2)
+{
+ LOG(FATAL) << "Unexpected use of GenAndLong for Mips";
+}
+
+void MipsCodegen::GenOrLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2)
+{
+ LOG(FATAL) << "Unexpected use of GenOrLong for Mips";
+}
+
+void MipsCodegen::GenXorLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2)
+{
+ LOG(FATAL) << "Unexpected use of GenXorLong for Mips";
+}
+
+/*
+ * Generate array load
+ */
+void MipsCodegen::GenArrayGet(CompilationUnit* cu, int opt_flags, OpSize size, RegLocation rl_array,
+ RegLocation rl_index, RegLocation rl_dest, int scale)
+{
+ RegisterClass reg_class = oat_reg_class_by_size(size);
+ int len_offset = mirror::Array::LengthOffset().Int32Value();
+ int data_offset;
+ RegLocation rl_result;
+ rl_array = LoadValue(cu, rl_array, kCoreReg);
+ rl_index = LoadValue(cu, rl_index, kCoreReg);
+
+ if (size == kLong || size == kDouble) {
+ data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
+ } else {
+ data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
+ }
+
+ /* null object? */
+ GenNullCheck(cu, rl_array.s_reg_low, rl_array.low_reg, opt_flags);
+
+ int reg_ptr = AllocTemp(cu);
+ bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
+ int reg_len = INVALID_REG;
+ if (needs_range_check) {
+ reg_len = AllocTemp(cu);
+ /* Get len */
+ LoadWordDisp(cu, rl_array.low_reg, len_offset, reg_len);
+ }
+ /* reg_ptr -> array data */
+ OpRegRegImm(cu, kOpAdd, reg_ptr, rl_array.low_reg, data_offset);
+ FreeTemp(cu, rl_array.low_reg);
+ if ((size == kLong) || (size == kDouble)) {
+ if (scale) {
+ int r_new_index = AllocTemp(cu);
+ OpRegRegImm(cu, kOpLsl, r_new_index, rl_index.low_reg, scale);
+ OpRegReg(cu, kOpAdd, reg_ptr, r_new_index);
+ FreeTemp(cu, r_new_index);
+ } else {
+ OpRegReg(cu, kOpAdd, reg_ptr, rl_index.low_reg);
+ }
+ FreeTemp(cu, rl_index.low_reg);
+ rl_result = EvalLoc(cu, rl_dest, reg_class, true);
+
+ if (needs_range_check) {
+ // TODO: change kCondCS to a more meaningful name, is the sense of
+ // carry-set/clear flipped?
+ GenRegRegCheck(cu, kCondCs, rl_index.low_reg, reg_len, kThrowArrayBounds);
+ FreeTemp(cu, reg_len);
+ }
+ LoadBaseDispWide(cu, reg_ptr, 0, rl_result.low_reg, rl_result.high_reg, INVALID_SREG);
+
+ FreeTemp(cu, reg_ptr);
+ StoreValueWide(cu, rl_dest, rl_result);
+ } else {
+ rl_result = EvalLoc(cu, rl_dest, reg_class, true);
+
+ if (needs_range_check) {
+ // TODO: change kCondCS to a more meaningful name, is the sense of
+ // carry-set/clear flipped?
+ GenRegRegCheck(cu, kCondCs, rl_index.low_reg, reg_len, kThrowArrayBounds);
+ FreeTemp(cu, reg_len);
+ }
+ LoadBaseIndexed(cu, reg_ptr, rl_index.low_reg, rl_result.low_reg, scale, size);
+
+ FreeTemp(cu, reg_ptr);
+ StoreValue(cu, rl_dest, rl_result);
+ }
+}
+
+/*
+ * Generate array store
+ *
+ */
+void MipsCodegen::GenArrayPut(CompilationUnit* cu, int opt_flags, OpSize size, RegLocation rl_array,
+ RegLocation rl_index, RegLocation rl_src, int scale)
+{
+ RegisterClass reg_class = oat_reg_class_by_size(size);
+ int len_offset = mirror::Array::LengthOffset().Int32Value();
+ int data_offset;
+
+ if (size == kLong || size == kDouble) {
+ data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
+ } else {
+ data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
+ }
+
+ rl_array = LoadValue(cu, rl_array, kCoreReg);
+ rl_index = LoadValue(cu, rl_index, kCoreReg);
+ int reg_ptr = INVALID_REG;
+ if (IsTemp(cu, rl_array.low_reg)) {
+ Clobber(cu, rl_array.low_reg);
+ reg_ptr = rl_array.low_reg;
+ } else {
+ reg_ptr = AllocTemp(cu);
+ OpRegCopy(cu, reg_ptr, rl_array.low_reg);
+ }
+
+ /* null object? */
+ GenNullCheck(cu, rl_array.s_reg_low, rl_array.low_reg, opt_flags);
+
+ bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
+ int reg_len = INVALID_REG;
+ if (needs_range_check) {
+ reg_len = AllocTemp(cu);
+ //NOTE: max live temps(4) here.
+ /* Get len */
+ LoadWordDisp(cu, rl_array.low_reg, len_offset, reg_len);
+ }
+ /* reg_ptr -> array data */
+ OpRegImm(cu, kOpAdd, reg_ptr, data_offset);
+ /* at this point, reg_ptr points to array, 2 live temps */
+ if ((size == kLong) || (size == kDouble)) {
+ //TUNING: specific wide routine that can handle fp regs
+ if (scale) {
+ int r_new_index = AllocTemp(cu);
+ OpRegRegImm(cu, kOpLsl, r_new_index, rl_index.low_reg, scale);
+ OpRegReg(cu, kOpAdd, reg_ptr, r_new_index);
+ FreeTemp(cu, r_new_index);
+ } else {
+ OpRegReg(cu, kOpAdd, reg_ptr, rl_index.low_reg);
+ }
+ rl_src = LoadValueWide(cu, rl_src, reg_class);
+
+ if (needs_range_check) {
+ GenRegRegCheck(cu, kCondCs, rl_index.low_reg, reg_len, kThrowArrayBounds);
+ FreeTemp(cu, reg_len);
+ }
+
+ StoreBaseDispWide(cu, reg_ptr, 0, rl_src.low_reg, rl_src.high_reg);
+
+ FreeTemp(cu, reg_ptr);
+ } else {
+ rl_src = LoadValue(cu, rl_src, reg_class);
+ if (needs_range_check) {
+ GenRegRegCheck(cu, kCondCs, rl_index.low_reg, reg_len, kThrowArrayBounds);
+ FreeTemp(cu, reg_len);
+ }
+ StoreBaseIndexed(cu, reg_ptr, rl_index.low_reg, rl_src.low_reg,
+ scale, size);
+ }
+}
+
+/*
+ * Generate array store
+ *
+ */
+void MipsCodegen::GenArrayObjPut(CompilationUnit* cu, int opt_flags, RegLocation rl_array,
+ RegLocation rl_index, RegLocation rl_src, int scale)
+{
+ int len_offset = mirror::Array::LengthOffset().Int32Value();
+ int data_offset = mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value();
+
+ FlushAllRegs(cu); // Use explicit registers
+ LockCallTemps(cu);
+
+ int r_value = TargetReg(kArg0); // Register holding value
+ int r_array_class = TargetReg(kArg1); // Register holding array's Class
+ int r_array = TargetReg(kArg2); // Register holding array
+ int r_index = TargetReg(kArg3); // Register holding index into array
+
+ LoadValueDirectFixed(cu, rl_array, r_array); // Grab array
+ LoadValueDirectFixed(cu, rl_src, r_value); // Grab value
+ LoadValueDirectFixed(cu, rl_index, r_index); // Grab index
+
+ GenNullCheck(cu, rl_array.s_reg_low, r_array, opt_flags); // NPE?
+
+ // Store of null?
+ LIR* null_value_check = OpCmpImmBranch(cu, kCondEq, r_value, 0, NULL);
+
+ // Get the array's class.
+ LoadWordDisp(cu, r_array, mirror::Object::ClassOffset().Int32Value(), r_array_class);
+ CallRuntimeHelperRegReg(cu, ENTRYPOINT_OFFSET(pCanPutArrayElementFromCode), r_value,
+ r_array_class, true);
+ // Redo LoadValues in case they didn't survive the call.
+ LoadValueDirectFixed(cu, rl_array, r_array); // Reload array
+ LoadValueDirectFixed(cu, rl_index, r_index); // Reload index
+ LoadValueDirectFixed(cu, rl_src, r_value); // Reload value
+ r_array_class = INVALID_REG;
+
+ // Branch here if value to be stored == null
+ LIR* target = NewLIR0(cu, kPseudoTargetLabel);
+ null_value_check->target = target;
+
+ bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
+ int reg_len = INVALID_REG;
+ if (needs_range_check) {
+ reg_len = TargetReg(kArg1);
+ LoadWordDisp(cu, r_array, len_offset, reg_len); // Get len
+ }
+ /* r_ptr -> array data */
+ int r_ptr = AllocTemp(cu);
+ OpRegRegImm(cu, kOpAdd, r_ptr, r_array, data_offset);
+ if (needs_range_check) {
+ GenRegRegCheck(cu, kCondCs, r_index, reg_len, kThrowArrayBounds);
+ }
+ StoreBaseIndexed(cu, r_ptr, r_index, r_value, scale, kWord);
+ FreeTemp(cu, r_ptr);
+ FreeTemp(cu, r_index);
+ if (!IsConstantNullRef(cu, rl_src)) {
+ MarkGCCard(cu, r_value, r_array);
+ }
+}
+
+void MipsCodegen::GenShiftImmOpLong(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_shift)
+{
+ // Default implementation is just to ignore the constant case.
+ GenShiftOpLong(cu, opcode, rl_dest, rl_src1, rl_shift);
+}
+
+void MipsCodegen::GenArithImmOpLong(CompilationUnit* cu, Instruction::Code opcode,
+ RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2)
+{
+ // Default - bail to non-const handler.
+ GenArithOpLong(cu, opcode, rl_dest, rl_src1, rl_src2);
+}
+
+} // namespace art
diff --git a/src/compiler/dex/quick/mips/mips_lir.h b/src/compiler/dex/quick/mips/mips_lir.h
new file mode 100644
index 0000000..9771ea9
--- /dev/null
+++ b/src/compiler/dex/quick/mips/mips_lir.h
@@ -0,0 +1,432 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_COMPILER_DEX_QUICK_MIPS_MIPSLIR_H_
+#define ART_SRC_COMPILER_DEX_QUICK_MIPS_MIPSLIR_H_
+
+#include "compiler/dex/compiler_internals.h"
+
+namespace art {
+
+/*
+ * Runtime register conventions.
+ *
+ * zero is always the value 0
+ * at is scratch (normally used as temp reg by assembler)
+ * v0, v1 are scratch (normally hold subroutine return values)
+ * a0-a3 are scratch (normally hold subroutine arguments)
+ * t0-t8 are scratch
+ * t9 is scratch (normally used for function calls)
+ * s0 (rMIPS_SUSPEND) is reserved [holds suspend-check counter]
+ * s1 (rMIPS_SELF) is reserved [holds current &Thread]
+ * s2-s7 are callee save (promotion target)
+ * k0, k1 are reserved for use by interrupt handlers
+ * gp is reserved for global pointer
+ * sp is reserved
+ * s8 is callee save (promotion target)
+ * ra is scratch (normally holds the return addr)
+ *
+ * Preserved across C calls: s0-s8
+ * Trashed across C calls: at, v0-v1, a0-a3, t0-t9, gp, ra
+ *
+ * Floating pointer registers
+ * NOTE: there are 32 fp registers (16 df pairs), but currently
+ * only support 16 fp registers (8 df pairs).
+ * f0-f15
+ * df0-df7, where df0={f0,f1}, df1={f2,f3}, ... , df7={f14,f15}
+ *
+ * f0-f15 (df0-df7) trashed across C calls
+ *
+ * For mips32 code use:
+ * a0-a3 to hold operands
+ * v0-v1 to hold results
+ * t0-t9 for temps
+ *
+ * All jump/branch instructions have a delay slot after it.
+ *
+ * Stack frame diagram (stack grows down, higher addresses at top):
+ *
+ * +------------------------+
+ * | IN[ins-1] | {Note: resides in caller's frame}
+ * | . |
+ * | IN[0] |
+ * | caller's Method* |
+ * +========================+ {Note: start of callee's frame}
+ * | spill region | {variable sized - will include lr if non-leaf.}
+ * +------------------------+
+ * | ...filler word... | {Note: used as 2nd word of V[locals-1] if long]
+ * +------------------------+
+ * | V[locals-1] |
+ * | V[locals-2] |
+ * | . |
+ * | . |
+ * | V[1] |
+ * | V[0] |
+ * +------------------------+
+ * | 0 to 3 words padding |
+ * +------------------------+
+ * | OUT[outs-1] |
+ * | OUT[outs-2] |
+ * | . |
+ * | OUT[0] |
+ * | cur_method* | <<== sp w/ 16-byte alignment
+ * +========================+
+ */
+
+// Offset to distingish FP regs.
+#define MIPS_FP_REG_OFFSET 32
+// Offset to distinguish DP FP regs.
+#define MIPS_FP_DOUBLE 64
+// Offset to distingish the extra regs.
+#define MIPS_EXTRA_REG_OFFSET 128
+// Reg types.
+#define MIPS_REGTYPE(x) (x & (MIPS_FP_REG_OFFSET | MIPS_FP_DOUBLE))
+#define MIPS_FPREG(x) ((x & MIPS_FP_REG_OFFSET) == MIPS_FP_REG_OFFSET)
+#define MIPS_EXTRAREG(x) ((x & MIPS_EXTRA_REG_OFFSET) == MIPS_EXTRA_REG_OFFSET)
+#define MIPS_DOUBLEREG(x) ((x & MIPS_FP_DOUBLE) == MIPS_FP_DOUBLE)
+#define MIPS_SINGLEREG(x) (MIPS_FPREG(x) && !MIPS_DOUBLEREG(x))
+/*
+ * Note: the low register of a floating point pair is sufficient to
+ * create the name of a double, but require both names to be passed to
+ * allow for asserts to verify that the pair is consecutive if significant
+ * rework is done in this area. Also, it is a good reminder in the calling
+ * code that reg locations always describe doubles as a pair of singles.
+ */
+#define MIPS_S2D(x,y) ((x) | MIPS_FP_DOUBLE)
+// Mask to strip off fp flags.
+#define MIPS_FP_REG_MASK (MIPS_FP_REG_OFFSET-1)
+
+#ifdef HAVE_LITTLE_ENDIAN
+#define LOWORD_OFFSET 0
+#define HIWORD_OFFSET 4
+#define r_ARG0 r_A0
+#define r_ARG1 r_A1
+#define r_ARG2 r_A2
+#define r_ARG3 r_A3
+#define r_RESULT0 r_V0
+#define r_RESULT1 r_V1
+#else
+#define LOWORD_OFFSET 4
+#define HIWORD_OFFSET 0
+#define r_ARG0 r_A1
+#define r_ARG1 r_A0
+#define r_ARG2 r_A3
+#define r_ARG3 r_A2
+#define r_RESULT0 r_V1
+#define r_RESULT1 r_V0
+#endif
+
+// These are the same for both big and little endian.
+#define r_FARG0 r_F12
+#define r_FARG1 r_F13
+#define r_FARG2 r_F14
+#define r_FARG3 r_F15
+#define r_FRESULT0 r_F0
+#define r_FRESULT1 r_F1
+
+// Regs not used for Mips.
+#define rMIPS_LR INVALID_REG
+#define rMIPS_PC INVALID_REG
+
+// RegisterLocation templates return values (r_V0, or r_V0/r_V1).
+#define MIPS_LOC_C_RETURN {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, r_V0, INVALID_REG, \
+ INVALID_SREG, INVALID_SREG}
+#define MIPS_LOC_C_RETURN_FLOAT {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, r_FRESULT0, \
+ INVALID_REG, INVALID_SREG, INVALID_SREG}
+#define MIPS_LOC_C_RETURN_WIDE {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r_RESULT0, \
+ r_RESULT1, INVALID_SREG, INVALID_SREG}
+#define MIPS_LOC_C_RETURN_DOUBLE {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r_FRESULT0,\
+ r_FRESULT1, INVALID_SREG, INVALID_SREG}
+
+enum MipsResourceEncodingPos {
+ kMipsGPReg0 = 0,
+ kMipsRegSP = 29,
+ kMipsRegLR = 31,
+ kMipsFPReg0 = 32, // only 16 fp regs supported currently.
+ kMipsFPRegEnd = 48,
+ kMipsRegHI = kMipsFPRegEnd,
+ kMipsRegLO,
+ kMipsRegPC,
+ kMipsRegEnd = 51,
+};
+
+#define ENCODE_MIPS_REG_LIST(N) (static_cast<uint64_t>(N))
+#define ENCODE_MIPS_REG_SP (1ULL << kMipsRegSP)
+#define ENCODE_MIPS_REG_LR (1ULL << kMipsRegLR)
+#define ENCODE_MIPS_REG_PC (1ULL << kMipsRegPC)
+
+enum MipsNativeRegisterPool {
+ r_ZERO = 0,
+ r_AT = 1,
+ r_V0 = 2,
+ r_V1 = 3,
+ r_A0 = 4,
+ r_A1 = 5,
+ r_A2 = 6,
+ r_A3 = 7,
+ r_T0 = 8,
+ r_T1 = 9,
+ r_T2 = 10,
+ r_T3 = 11,
+ r_T4 = 12,
+ r_T5 = 13,
+ r_T6 = 14,
+ r_T7 = 15,
+ r_S0 = 16,
+ r_S1 = 17,
+ r_S2 = 18,
+ r_S3 = 19,
+ r_S4 = 20,
+ r_S5 = 21,
+ r_S6 = 22,
+ r_S7 = 23,
+ r_T8 = 24,
+ r_T9 = 25,
+ r_K0 = 26,
+ r_K1 = 27,
+ r_GP = 28,
+ r_SP = 29,
+ r_FP = 30,
+ r_RA = 31,
+
+ r_F0 = 0 + MIPS_FP_REG_OFFSET,
+ r_F1,
+ r_F2,
+ r_F3,
+ r_F4,
+ r_F5,
+ r_F6,
+ r_F7,
+ r_F8,
+ r_F9,
+ r_F10,
+ r_F11,
+ r_F12,
+ r_F13,
+ r_F14,
+ r_F15,
+#if 0
+ /*
+ * TODO: The shared resource mask doesn't have enough bit positions to describe all
+ * MIPS registers. Expand it and enable use of fp registers 16 through 31.
+ */
+ r_F16,
+ r_F17,
+ r_F18,
+ r_F19,
+ r_F20,
+ r_F21,
+ r_F22,
+ r_F23,
+ r_F24,
+ r_F25,
+ r_F26,
+ r_F27,
+ r_F28,
+ r_F29,
+ r_F30,
+ r_F31,
+#endif
+ r_DF0 = r_F0 + MIPS_FP_DOUBLE,
+ r_DF1 = r_F2 + MIPS_FP_DOUBLE,
+ r_DF2 = r_F4 + MIPS_FP_DOUBLE,
+ r_DF3 = r_F6 + MIPS_FP_DOUBLE,
+ r_DF4 = r_F8 + MIPS_FP_DOUBLE,
+ r_DF5 = r_F10 + MIPS_FP_DOUBLE,
+ r_DF6 = r_F12 + MIPS_FP_DOUBLE,
+ r_DF7 = r_F14 + MIPS_FP_DOUBLE,
+#if 0 // TODO: expand resource mask to enable use of all MIPS fp registers.
+ r_DF8 = r_F16 + MIPS_FP_DOUBLE,
+ r_DF9 = r_F18 + MIPS_FP_DOUBLE,
+ r_DF10 = r_F20 + MIPS_FP_DOUBLE,
+ r_DF11 = r_F22 + MIPS_FP_DOUBLE,
+ r_DF12 = r_F24 + MIPS_FP_DOUBLE,
+ r_DF13 = r_F26 + MIPS_FP_DOUBLE,
+ r_DF14 = r_F28 + MIPS_FP_DOUBLE,
+ r_DF15 = r_F30 + MIPS_FP_DOUBLE,
+#endif
+ r_HI = MIPS_EXTRA_REG_OFFSET,
+ r_LO,
+ r_PC,
+};
+
+#define rMIPS_SUSPEND r_S0
+#define rMIPS_SELF r_S1
+#define rMIPS_SP r_SP
+#define rMIPS_ARG0 r_ARG0
+#define rMIPS_ARG1 r_ARG1
+#define rMIPS_ARG2 r_ARG2
+#define rMIPS_ARG3 r_ARG3
+#define rMIPS_FARG0 r_FARG0
+#define rMIPS_FARG1 r_FARG1
+#define rMIPS_FARG2 r_FARG2
+#define rMIPS_FARG3 r_FARG3
+#define rMIPS_RET0 r_RESULT0
+#define rMIPS_RET1 r_RESULT1
+#define rMIPS_INVOKE_TGT r_T9
+#define rMIPS_COUNT INVALID_REG
+
+enum MipsShiftEncodings {
+ kMipsLsl = 0x0,
+ kMipsLsr = 0x1,
+ kMipsAsr = 0x2,
+ kMipsRor = 0x3
+};
+
+// MIPS sync kinds (Note: support for kinds other than kSYNC0 may not exist).
+#define kSYNC0 0x00
+#define kSYNC_WMB 0x04
+#define kSYNC_MB 0x01
+#define kSYNC_ACQUIRE 0x11
+#define kSYNC_RELEASE 0x12
+#define kSYNC_RMB 0x13
+
+// TODO: Use smaller hammer when appropriate for target CPU.
+#define kST kSYNC0
+#define kSY kSYNC0
+
+/*
+ * The following enum defines the list of supported Thumb instructions by the
+ * assembler. Their corresponding EncodingMap positions will be defined in
+ * Assemble.cc.
+ */
+enum MipsOpCode {
+ kMipsFirst = 0,
+ kMips32BitData = kMipsFirst, // data [31..0].
+ kMipsAddiu, // addiu t,s,imm16 [001001] s[25..21] t[20..16] imm16[15..0].
+ kMipsAddu, // add d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100001].
+ kMipsAnd, // and d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100100].
+ kMipsAndi, // andi t,s,imm16 [001100] s[25..21] t[20..16] imm16[15..0].
+ kMipsB, // b o [0001000000000000] o[15..0].
+ kMipsBal, // bal o [0000010000010001] o[15..0].
+ // NOTE: the code tests the range kMipsBeq thru kMipsBne, so adding an instruction in this
+ // range may require updates.
+ kMipsBeq, // beq s,t,o [000100] s[25..21] t[20..16] o[15..0].
+ kMipsBeqz, // beqz s,o [000100] s[25..21] [00000] o[15..0].
+ kMipsBgez, // bgez s,o [000001] s[25..21] [00001] o[15..0].
+ kMipsBgtz, // bgtz s,o [000111] s[25..21] [00000] o[15..0].
+ kMipsBlez, // blez s,o [000110] s[25..21] [00000] o[15..0].
+ kMipsBltz, // bltz s,o [000001] s[25..21] [00000] o[15..0].
+ kMipsBnez, // bnez s,o [000101] s[25..21] [00000] o[15..0].
+ kMipsBne, // bne s,t,o [000101] s[25..21] t[20..16] o[15..0].
+ kMipsDiv, // div s,t [000000] s[25..21] t[20..16] [0000000000011010].
+#if __mips_isa_rev>=2
+ kMipsExt, // ext t,s,p,z [011111] s[25..21] t[20..16] z[15..11] p[10..6] [000000].
+#endif
+ kMipsJal, // jal t [000011] t[25..0].
+ kMipsJalr, // jalr d,s [000000] s[25..21] [00000] d[15..11] hint[10..6] [001001].
+ kMipsJr, // jr s [000000] s[25..21] [0000000000] hint[10..6] [001000].
+ kMipsLahi, // lui t,imm16 [00111100000] t[20..16] imm16[15..0] load addr hi.
+ kMipsLalo, // ori t,s,imm16 [001001] s[25..21] t[20..16] imm16[15..0] load addr lo.
+ kMipsLui, // lui t,imm16 [00111100000] t[20..16] imm16[15..0].
+ kMipsLb, // lb t,o(b) [100000] b[25..21] t[20..16] o[15..0].
+ kMipsLbu, // lbu t,o(b) [100100] b[25..21] t[20..16] o[15..0].
+ kMipsLh, // lh t,o(b) [100001] b[25..21] t[20..16] o[15..0].
+ kMipsLhu, // lhu t,o(b) [100101] b[25..21] t[20..16] o[15..0].
+ kMipsLw, // lw t,o(b) [100011] b[25..21] t[20..16] o[15..0].
+ kMipsMfhi, // mfhi d [0000000000000000] d[15..11] [00000010000].
+ kMipsMflo, // mflo d [0000000000000000] d[15..11] [00000010010].
+ kMipsMove, // move d,s [000000] s[25..21] [00000] d[15..11] [00000100101].
+ kMipsMovz, // movz d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000001010].
+ kMipsMul, // mul d,s,t [011100] s[25..21] t[20..16] d[15..11] [00000000010].
+ kMipsNop, // nop [00000000000000000000000000000000].
+ kMipsNor, // nor d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100111].
+ kMipsOr, // or d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100101].
+ kMipsOri, // ori t,s,imm16 [001001] s[25..21] t[20..16] imm16[15..0].
+ kMipsPref, // pref h,o(b) [101011] b[25..21] h[20..16] o[15..0].
+ kMipsSb, // sb t,o(b) [101000] b[25..21] t[20..16] o[15..0].
+#if __mips_isa_rev>=2
+ kMipsSeb, // seb d,t [01111100000] t[20..16] d[15..11] [10000100000].
+ kMipsSeh, // seh d,t [01111100000] t[20..16] d[15..11] [11000100000].
+#endif
+ kMipsSh, // sh t,o(b) [101001] b[25..21] t[20..16] o[15..0].
+ kMipsSll, // sll d,t,a [00000000000] t[20..16] d[15..11] a[10..6] [000000].
+ kMipsSllv, // sllv d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000000100].
+ kMipsSlt, // slt d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000101010].
+ kMipsSlti, // slti t,s,imm16 [001010] s[25..21] t[20..16] imm16[15..0].
+ kMipsSltu, // sltu d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000101011].
+ kMipsSra, // sra d,s,imm5 [00000000000] t[20..16] d[15..11] imm5[10..6] [000011].
+ kMipsSrav, // srav d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000000111].
+ kMipsSrl, // srl d,t,a [00000000000] t[20..16] d[20..16] a[10..6] [000010].
+ kMipsSrlv, // srlv d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000000110].
+ kMipsSubu, // subu d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100011].
+ kMipsSw, // sw t,o(b) [101011] b[25..21] t[20..16] o[15..0].
+ kMipsXor, // xor d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100110].
+ kMipsXori, // xori t,s,imm16 [001110] s[25..21] t[20..16] imm16[15..0].
+ kMipsFadds, // add.s d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000000].
+ kMipsFsubs, // sub.s d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000001].
+ kMipsFmuls, // mul.s d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000010].
+ kMipsFdivs, // div.s d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000011].
+ kMipsFaddd, // add.d d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000000].
+ kMipsFsubd, // sub.d d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000001].
+ kMipsFmuld, // mul.d d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000010].
+ kMipsFdivd, // div.d d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000011].
+ kMipsFcvtsd,// cvt.s.d d,s [01000110001] [00000] s[15..11] d[10..6] [100000].
+ kMipsFcvtsw,// cvt.s.w d,s [01000110100] [00000] s[15..11] d[10..6] [100000].
+ kMipsFcvtds,// cvt.d.s d,s [01000110000] [00000] s[15..11] d[10..6] [100001].
+ kMipsFcvtdw,// cvt.d.w d,s [01000110100] [00000] s[15..11] d[10..6] [100001].
+ kMipsFcvtws,// cvt.w.d d,s [01000110000] [00000] s[15..11] d[10..6] [100100].
+ kMipsFcvtwd,// cvt.w.d d,s [01000110001] [00000] s[15..11] d[10..6] [100100].
+ kMipsFmovs, // mov.s d,s [01000110000] [00000] s[15..11] d[10..6] [000110].
+ kMipsFmovd, // mov.d d,s [01000110001] [00000] s[15..11] d[10..6] [000110].
+ kMipsFlwc1, // lwc1 t,o(b) [110001] b[25..21] t[20..16] o[15..0].
+ kMipsFldc1, // ldc1 t,o(b) [110101] b[25..21] t[20..16] o[15..0].
+ kMipsFswc1, // swc1 t,o(b) [111001] b[25..21] t[20..16] o[15..0].
+ kMipsFsdc1, // sdc1 t,o(b) [111101] b[25..21] t[20..16] o[15..0].
+ kMipsMfc1, // mfc1 t,s [01000100000] t[20..16] s[15..11] [00000000000].
+ kMipsMtc1, // mtc1 t,s [01000100100] t[20..16] s[15..11] [00000000000].
+ kMipsDelta, // Psuedo for ori t, s, <label>-<label>.
+ kMipsDeltaHi, // Pseudo for lui t, high16(<label>-<label>).
+ kMipsDeltaLo, // Pseudo for ori t, s, low16(<label>-<label>).
+ kMipsCurrPC, // jal to .+8 to materialize pc.
+ kMipsSync, // sync kind [000000] [0000000000000000] s[10..6] [001111].
+ kMipsUndefined, // undefined [011001xxxxxxxxxxxxxxxx].
+ kMipsLast
+};
+
+// Instruction assembly field_loc kind.
+enum MipsEncodingKind {
+ kFmtUnused,
+ kFmtBitBlt, /* Bit string using end/start */
+ kFmtDfp, /* Double FP reg */
+ kFmtSfp, /* Single FP reg */
+ kFmtBlt5_2, /* Same 5-bit field to 2 locations */
+};
+
+// Struct used to define the snippet positions for each MIPS opcode.
+struct MipsEncodingMap {
+ uint32_t skeleton;
+ struct {
+ MipsEncodingKind kind;
+ int end; // end for kFmtBitBlt, 1-bit slice end for FP regs.
+ int start; // start for kFmtBitBlt, 4-bit slice end for FP regs.
+ } field_loc[4];
+ MipsOpCode opcode;
+ uint64_t flags;
+ const char *name;
+ const char* fmt;
+ int size; // Note: size is in bytes.
+};
+
+extern MipsEncodingMap EncodingMap[kMipsLast];
+
+#define IS_UIMM16(v) ((0 <= (v)) && ((v) <= 65535))
+#define IS_SIMM16(v) ((-32768 <= (v)) && ((v) <= 32766))
+#define IS_SIMM16_2WORD(v) ((-32764 <= (v)) && ((v) <= 32763)) // 2 offsets must fit.
+
+} // namespace art
+
+#endif // ART_SRC_COMPILER_DEX_QUICK_MIPS_MIPSLIR_H_
diff --git a/src/compiler/dex/quick/mips/target_mips.cc b/src/compiler/dex/quick/mips/target_mips.cc
new file mode 100644
index 0000000..ab6517c
--- /dev/null
+++ b/src/compiler/dex/quick/mips/target_mips.cc
@@ -0,0 +1,608 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "codegen_mips.h"
+#include "compiler/dex/quick/codegen_util.h"
+#include "compiler/dex/compiler_internals.h"
+#include "compiler/dex/quick/ralloc_util.h"
+#include "mips_lir.h"
+
+#include <string>
+
+namespace art {
+
+static int core_regs[] = {r_ZERO, r_AT, r_V0, r_V1, r_A0, r_A1, r_A2, r_A3,
+ r_T0, r_T1, r_T2, r_T3, r_T4, r_T5, r_T6, r_T7,
+ r_S0, r_S1, r_S2, r_S3, r_S4, r_S5, r_S6, r_S7, r_T8,
+ r_T9, r_K0, r_K1, r_GP, r_SP, r_FP, r_RA};
+static int ReservedRegs[] = {r_ZERO, r_AT, r_S0, r_S1, r_K0, r_K1, r_GP, r_SP,
+ r_RA};
+static int core_temps[] = {r_V0, r_V1, r_A0, r_A1, r_A2, r_A3, r_T0, r_T1, r_T2,
+ r_T3, r_T4, r_T5, r_T6, r_T7, r_T8};
+static int FpRegs[] = {r_F0, r_F1, r_F2, r_F3, r_F4, r_F5, r_F6, r_F7,
+ r_F8, r_F9, r_F10, r_F11, r_F12, r_F13, r_F14, r_F15};
+static int fp_temps[] = {r_F0, r_F1, r_F2, r_F3, r_F4, r_F5, r_F6, r_F7,
+ r_F8, r_F9, r_F10, r_F11, r_F12, r_F13, r_F14, r_F15};
+
+RegLocation MipsCodegen::LocCReturn()
+{
+ RegLocation res = MIPS_LOC_C_RETURN;
+ return res;
+}
+
+RegLocation MipsCodegen::LocCReturnWide()
+{
+ RegLocation res = MIPS_LOC_C_RETURN_WIDE;
+ return res;
+}
+
+RegLocation MipsCodegen::LocCReturnFloat()
+{
+ RegLocation res = MIPS_LOC_C_RETURN_FLOAT;
+ return res;
+}
+
+RegLocation MipsCodegen::LocCReturnDouble()
+{
+ RegLocation res = MIPS_LOC_C_RETURN_DOUBLE;
+ return res;
+}
+
+// Return a target-dependent special register.
+int MipsCodegen::TargetReg(SpecialTargetRegister reg) {
+ int res = INVALID_REG;
+ switch (reg) {
+ case kSelf: res = rMIPS_SELF; break;
+ case kSuspend: res = rMIPS_SUSPEND; break;
+ case kLr: res = rMIPS_LR; break;
+ case kPc: res = rMIPS_PC; break;
+ case kSp: res = rMIPS_SP; break;
+ case kArg0: res = rMIPS_ARG0; break;
+ case kArg1: res = rMIPS_ARG1; break;
+ case kArg2: res = rMIPS_ARG2; break;
+ case kArg3: res = rMIPS_ARG3; break;
+ case kFArg0: res = rMIPS_FARG0; break;
+ case kFArg1: res = rMIPS_FARG1; break;
+ case kFArg2: res = rMIPS_FARG2; break;
+ case kFArg3: res = rMIPS_FARG3; break;
+ case kRet0: res = rMIPS_RET0; break;
+ case kRet1: res = rMIPS_RET1; break;
+ case kInvokeTgt: res = rMIPS_INVOKE_TGT; break;
+ case kCount: res = rMIPS_COUNT; break;
+ }
+ return res;
+}
+
+// Create a double from a pair of singles.
+int MipsCodegen::S2d(int low_reg, int high_reg)
+{
+ return MIPS_S2D(low_reg, high_reg);
+}
+
+// Return mask to strip off fp reg flags and bias.
+uint32_t MipsCodegen::FpRegMask()
+{
+ return MIPS_FP_REG_MASK;
+}
+
+// True if both regs single, both core or both double.
+bool MipsCodegen::SameRegType(int reg1, int reg2)
+{
+ return (MIPS_REGTYPE(reg1) == MIPS_REGTYPE(reg2));
+}
+
+/*
+ * Decode the register id.
+ */
+uint64_t MipsCodegen::GetRegMaskCommon(CompilationUnit* cu, int reg)
+{
+ uint64_t seed;
+ int shift;
+ int reg_id;
+
+
+ reg_id = reg & 0x1f;
+ /* Each double register is equal to a pair of single-precision FP registers */
+ seed = MIPS_DOUBLEREG(reg) ? 3 : 1;
+ /* FP register starts at bit position 16 */
+ shift = MIPS_FPREG(reg) ? kMipsFPReg0 : 0;
+ /* Expand the double register id into single offset */
+ shift += reg_id;
+ return (seed << shift);
+}
+
+uint64_t MipsCodegen::GetPCUseDefEncoding()
+{
+ return ENCODE_MIPS_REG_PC;
+}
+
+
+void MipsCodegen::SetupTargetResourceMasks(CompilationUnit* cu, LIR* lir)
+{
+ DCHECK_EQ(cu->instruction_set, kMips);
+
+ // Mips-specific resource map setup here.
+ uint64_t flags = MipsCodegen::EncodingMap[lir->opcode].flags;
+
+ if (flags & REG_DEF_SP) {
+ lir->def_mask |= ENCODE_MIPS_REG_SP;
+ }
+
+ if (flags & REG_USE_SP) {
+ lir->use_mask |= ENCODE_MIPS_REG_SP;
+ }
+
+ if (flags & REG_DEF_LR) {
+ lir->def_mask |= ENCODE_MIPS_REG_LR;
+ }
+}
+
+/* For dumping instructions */
+#define MIPS_REG_COUNT 32
+static const char *mips_reg_name[MIPS_REG_COUNT] = {
+ "zero", "at", "v0", "v1", "a0", "a1", "a2", "a3",
+ "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7",
+ "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
+ "t8", "t9", "k0", "k1", "gp", "sp", "fp", "ra"
+};
+
+/*
+ * Interpret a format string and build a string no longer than size
+ * See format key in Assemble.c.
+ */
+std::string MipsCodegen::BuildInsnString(const char *fmt, LIR *lir, unsigned char* base_addr)
+{
+ std::string buf;
+ int i;
+ const char *fmt_end = &fmt[strlen(fmt)];
+ char tbuf[256];
+ char nc;
+ while (fmt < fmt_end) {
+ int operand;
+ if (*fmt == '!') {
+ fmt++;
+ DCHECK_LT(fmt, fmt_end);
+ nc = *fmt++;
+ if (nc=='!') {
+ strcpy(tbuf, "!");
+ } else {
+ DCHECK_LT(fmt, fmt_end);
+ DCHECK_LT(static_cast<unsigned>(nc-'0'), 4u);
+ operand = lir->operands[nc-'0'];
+ switch (*fmt++) {
+ case 'b':
+ strcpy(tbuf,"0000");
+ for (i=3; i>= 0; i--) {
+ tbuf[i] += operand & 1;
+ operand >>= 1;
+ }
+ break;
+ case 's':
+ sprintf(tbuf,"$f%d",operand & MIPS_FP_REG_MASK);
+ break;
+ case 'S':
+ DCHECK_EQ(((operand & MIPS_FP_REG_MASK) & 1), 0);
+ sprintf(tbuf,"$f%d",operand & MIPS_FP_REG_MASK);
+ break;
+ case 'h':
+ sprintf(tbuf,"%04x", operand);
+ break;
+ case 'M':
+ case 'd':
+ sprintf(tbuf,"%d", operand);
+ break;
+ case 'D':
+ sprintf(tbuf,"%d", operand+1);
+ break;
+ case 'E':
+ sprintf(tbuf,"%d", operand*4);
+ break;
+ case 'F':
+ sprintf(tbuf,"%d", operand*2);
+ break;
+ case 't':
+ sprintf(tbuf,"0x%08x (L%p)", reinterpret_cast<uintptr_t>(base_addr) + lir->offset + 4 +
+ (operand << 2), lir->target);
+ break;
+ case 'T':
+ sprintf(tbuf,"0x%08x", operand << 2);
+ break;
+ case 'u': {
+ int offset_1 = lir->operands[0];
+ int offset_2 = NEXT_LIR(lir)->operands[0];
+ uintptr_t target =
+ (((reinterpret_cast<uintptr_t>(base_addr) + lir->offset + 4) & ~3) +
+ (offset_1 << 21 >> 9) + (offset_2 << 1)) & 0xfffffffc;
+ sprintf(tbuf, "%p", reinterpret_cast<void*>(target));
+ break;
+ }
+
+ /* Nothing to print for BLX_2 */
+ case 'v':
+ strcpy(tbuf, "see above");
+ break;
+ case 'r':
+ DCHECK(operand >= 0 && operand < MIPS_REG_COUNT);
+ strcpy(tbuf, mips_reg_name[operand]);
+ break;
+ case 'N':
+ // Placeholder for delay slot handling
+ strcpy(tbuf, "; nop");
+ break;
+ default:
+ strcpy(tbuf,"DecodeError");
+ break;
+ }
+ buf += tbuf;
+ }
+ } else {
+ buf += *fmt++;
+ }
+ }
+ return buf;
+}
+
+// FIXME: need to redo resource maps for MIPS - fix this at that time
+void MipsCodegen::DumpResourceMask(LIR *mips_lir, uint64_t mask, const char *prefix)
+{
+ char buf[256];
+ buf[0] = 0;
+
+ if (mask == ENCODE_ALL) {
+ strcpy(buf, "all");
+ } else {
+ char num[8];
+ int i;
+
+ for (i = 0; i < kMipsRegEnd; i++) {
+ if (mask & (1ULL << i)) {
+ sprintf(num, "%d ", i);
+ strcat(buf, num);
+ }
+ }
+
+ if (mask & ENCODE_CCODE) {
+ strcat(buf, "cc ");
+ }
+ if (mask & ENCODE_FP_STATUS) {
+ strcat(buf, "fpcc ");
+ }
+ /* Memory bits */
+ if (mips_lir && (mask & ENCODE_DALVIK_REG)) {
+ sprintf(buf + strlen(buf), "dr%d%s", mips_lir->alias_info & 0xffff,
+ (mips_lir->alias_info & 0x80000000) ? "(+1)" : "");
+ }
+ if (mask & ENCODE_LITERAL) {
+ strcat(buf, "lit ");
+ }
+
+ if (mask & ENCODE_HEAP_REF) {
+ strcat(buf, "heap ");
+ }
+ if (mask & ENCODE_MUST_NOT_ALIAS) {
+ strcat(buf, "noalias ");
+ }
+ }
+ if (buf[0]) {
+ LOG(INFO) << prefix << ": " << buf;
+ }
+}
+
+/*
+ * TUNING: is leaf? Can't just use "has_invoke" to determine as some
+ * instructions might call out to C/assembly helper functions. Until
+ * machinery is in place, always spill lr.
+ */
+
+void MipsCodegen::AdjustSpillMask(CompilationUnit* cu)
+{
+ cu->core_spill_mask |= (1 << r_RA);
+ cu->num_core_spills++;
+}
+
+/*
+ * Mark a callee-save fp register as promoted. Note that
+ * vpush/vpop uses contiguous register lists so we must
+ * include any holes in the mask. Associate holes with
+ * Dalvik register INVALID_VREG (0xFFFFU).
+ */
+void MipsCodegen::MarkPreservedSingle(CompilationUnit* cu, int s_reg, int reg)
+{
+ LOG(FATAL) << "No support yet for promoted FP regs";
+}
+
+void MipsCodegen::FlushRegWide(CompilationUnit* cu, int reg1, int reg2)
+{
+ RegisterInfo* info1 = GetRegInfo(cu, reg1);
+ RegisterInfo* info2 = GetRegInfo(cu, reg2);
+ DCHECK(info1 && info2 && info1->pair && info2->pair &&
+ (info1->partner == info2->reg) &&
+ (info2->partner == info1->reg));
+ if ((info1->live && info1->dirty) || (info2->live && info2->dirty)) {
+ if (!(info1->is_temp && info2->is_temp)) {
+ /* Should not happen. If it does, there's a problem in eval_loc */
+ LOG(FATAL) << "Long half-temp, half-promoted";
+ }
+
+ info1->dirty = false;
+ info2->dirty = false;
+ if (SRegToVReg(cu, info2->s_reg) < SRegToVReg(cu, info1->s_reg))
+ info1 = info2;
+ int v_reg = SRegToVReg(cu, info1->s_reg);
+ StoreBaseDispWide(cu, rMIPS_SP, VRegOffset(cu, v_reg), info1->reg, info1->partner);
+ }
+}
+
+void MipsCodegen::FlushReg(CompilationUnit* cu, int reg)
+{
+ RegisterInfo* info = GetRegInfo(cu, reg);
+ if (info->live && info->dirty) {
+ info->dirty = false;
+ int v_reg = SRegToVReg(cu, info->s_reg);
+ StoreBaseDisp(cu, rMIPS_SP, VRegOffset(cu, v_reg), reg, kWord);
+ }
+}
+
+/* Give access to the target-dependent FP register encoding to common code */
+bool MipsCodegen::IsFpReg(int reg) {
+ return MIPS_FPREG(reg);
+}
+
+/* Clobber all regs that might be used by an external C call */
+void MipsCodegen::ClobberCalleeSave(CompilationUnit *cu)
+{
+ Clobber(cu, r_ZERO);
+ Clobber(cu, r_AT);
+ Clobber(cu, r_V0);
+ Clobber(cu, r_V1);
+ Clobber(cu, r_A0);
+ Clobber(cu, r_A1);
+ Clobber(cu, r_A2);
+ Clobber(cu, r_A3);
+ Clobber(cu, r_T0);
+ Clobber(cu, r_T1);
+ Clobber(cu, r_T2);
+ Clobber(cu, r_T3);
+ Clobber(cu, r_T4);
+ Clobber(cu, r_T5);
+ Clobber(cu, r_T6);
+ Clobber(cu, r_T7);
+ Clobber(cu, r_T8);
+ Clobber(cu, r_T9);
+ Clobber(cu, r_K0);
+ Clobber(cu, r_K1);
+ Clobber(cu, r_GP);
+ Clobber(cu, r_FP);
+ Clobber(cu, r_RA);
+ Clobber(cu, r_F0);
+ Clobber(cu, r_F1);
+ Clobber(cu, r_F2);
+ Clobber(cu, r_F3);
+ Clobber(cu, r_F4);
+ Clobber(cu, r_F5);
+ Clobber(cu, r_F6);
+ Clobber(cu, r_F7);
+ Clobber(cu, r_F8);
+ Clobber(cu, r_F9);
+ Clobber(cu, r_F10);
+ Clobber(cu, r_F11);
+ Clobber(cu, r_F12);
+ Clobber(cu, r_F13);
+ Clobber(cu, r_F14);
+ Clobber(cu, r_F15);
+}
+
+RegLocation MipsCodegen::GetReturnWideAlt(CompilationUnit* cu)
+{
+ UNIMPLEMENTED(FATAL) << "No GetReturnWideAlt for MIPS";
+ RegLocation res = LocCReturnWide();
+ return res;
+}
+
+RegLocation MipsCodegen::GetReturnAlt(CompilationUnit* cu)
+{
+ UNIMPLEMENTED(FATAL) << "No GetReturnAlt for MIPS";
+ RegLocation res = LocCReturn();
+ return res;
+}
+
+RegisterInfo* MipsCodegen::GetRegInfo(CompilationUnit* cu, int reg)
+{
+ return MIPS_FPREG(reg) ? &cu->reg_pool->FPRegs[reg & MIPS_FP_REG_MASK]
+ : &cu->reg_pool->core_regs[reg];
+}
+
+/* To be used when explicitly managing register use */
+void MipsCodegen::LockCallTemps(CompilationUnit* cu)
+{
+ LockTemp(cu, rMIPS_ARG0);
+ LockTemp(cu, rMIPS_ARG1);
+ LockTemp(cu, rMIPS_ARG2);
+ LockTemp(cu, rMIPS_ARG3);
+}
+
+/* To be used when explicitly managing register use */
+void MipsCodegen::FreeCallTemps(CompilationUnit* cu)
+{
+ FreeTemp(cu, rMIPS_ARG0);
+ FreeTemp(cu, rMIPS_ARG1);
+ FreeTemp(cu, rMIPS_ARG2);
+ FreeTemp(cu, rMIPS_ARG3);
+}
+
+void MipsCodegen::GenMemBarrier(CompilationUnit *cu, MemBarrierKind barrier_kind)
+{
+#if ANDROID_SMP != 0
+ NewLIR1(cu, kMipsSync, 0 /* Only stype currently supported */);
+#endif
+}
+
+/*
+ * Alloc a pair of core registers, or a double. Low reg in low byte,
+ * high reg in next byte.
+ */
+int MipsCodegen::AllocTypedTempPair(CompilationUnit *cu, bool fp_hint,
+ int reg_class)
+{
+ int high_reg;
+ int low_reg;
+ int res = 0;
+
+ if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg)) {
+ low_reg = AllocTempDouble(cu);
+ high_reg = low_reg + 1;
+ res = (low_reg & 0xff) | ((high_reg & 0xff) << 8);
+ return res;
+ }
+
+ low_reg = AllocTemp(cu);
+ high_reg = AllocTemp(cu);
+ res = (low_reg & 0xff) | ((high_reg & 0xff) << 8);
+ return res;
+}
+
+int MipsCodegen::AllocTypedTemp(CompilationUnit *cu, bool fp_hint, int reg_class)
+{
+ if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg))
+{
+ return AllocTempFloat(cu);
+}
+ return AllocTemp(cu);
+}
+
+void MipsCodegen::CompilerInitializeRegAlloc(CompilationUnit* cu)
+{
+ int num_regs = sizeof(core_regs)/sizeof(*core_regs);
+ int num_reserved = sizeof(ReservedRegs)/sizeof(*ReservedRegs);
+ int num_temps = sizeof(core_temps)/sizeof(*core_temps);
+ int num_fp_regs = sizeof(FpRegs)/sizeof(*FpRegs);
+ int num_fp_temps = sizeof(fp_temps)/sizeof(*fp_temps);
+ RegisterPool *pool =
+ static_cast<RegisterPool*>(NewMem(cu, sizeof(*pool), true, kAllocRegAlloc));
+ cu->reg_pool = pool;
+ pool->num_core_regs = num_regs;
+ pool->core_regs = static_cast<RegisterInfo*>
+ (NewMem(cu, num_regs * sizeof(*cu->reg_pool->core_regs), true, kAllocRegAlloc));
+ pool->num_fp_regs = num_fp_regs;
+ pool->FPRegs = static_cast<RegisterInfo*>
+ (NewMem(cu, num_fp_regs * sizeof(*cu->reg_pool->FPRegs), true, kAllocRegAlloc));
+ CompilerInitPool(pool->core_regs, core_regs, pool->num_core_regs);
+ CompilerInitPool(pool->FPRegs, FpRegs, pool->num_fp_regs);
+ // Keep special registers from being allocated
+ for (int i = 0; i < num_reserved; i++) {
+ if (NO_SUSPEND && (ReservedRegs[i] == rMIPS_SUSPEND)) {
+ //To measure cost of suspend check
+ continue;
+ }
+ MarkInUse(cu, ReservedRegs[i]);
+ }
+ // Mark temp regs - all others not in use can be used for promotion
+ for (int i = 0; i < num_temps; i++) {
+ MarkTemp(cu, core_temps[i]);
+ }
+ for (int i = 0; i < num_fp_temps; i++) {
+ MarkTemp(cu, fp_temps[i]);
+ }
+}
+
+void MipsCodegen::FreeRegLocTemps(CompilationUnit* cu, RegLocation rl_keep, RegLocation rl_free)
+{
+ if ((rl_free.low_reg != rl_keep.low_reg) && (rl_free.low_reg != rl_keep.high_reg) &&
+ (rl_free.high_reg != rl_keep.low_reg) && (rl_free.high_reg != rl_keep.high_reg)) {
+ // No overlap, free both
+ FreeTemp(cu, rl_free.low_reg);
+ FreeTemp(cu, rl_free.high_reg);
+ }
+}
+/*
+ * In the Arm code a it is typical to use the link register
+ * to hold the target address. However, for Mips we must
+ * ensure that all branch instructions can be restarted if
+ * there is a trap in the shadow. Allocate a temp register.
+ */
+int MipsCodegen::LoadHelper(CompilationUnit* cu, int offset)
+{
+ LoadWordDisp(cu, rMIPS_SELF, offset, r_T9);
+ return r_T9;
+}
+
+void MipsCodegen::SpillCoreRegs(CompilationUnit* cu)
+{
+ if (cu->num_core_spills == 0) {
+ return;
+ }
+ uint32_t mask = cu->core_spill_mask;
+ int offset = cu->num_core_spills * 4;
+ OpRegImm(cu, kOpSub, rMIPS_SP, offset);
+ for (int reg = 0; mask; mask >>= 1, reg++) {
+ if (mask & 0x1) {
+ offset -= 4;
+ StoreWordDisp(cu, rMIPS_SP, offset, reg);
+ }
+ }
+}
+
+void MipsCodegen::UnSpillCoreRegs(CompilationUnit* cu)
+{
+ if (cu->num_core_spills == 0) {
+ return;
+ }
+ uint32_t mask = cu->core_spill_mask;
+ int offset = cu->frame_size;
+ for (int reg = 0; mask; mask >>= 1, reg++) {
+ if (mask & 0x1) {
+ offset -= 4;
+ LoadWordDisp(cu, rMIPS_SP, offset, reg);
+ }
+ }
+ OpRegImm(cu, kOpAdd, rMIPS_SP, cu->frame_size);
+}
+
+bool MipsCodegen::IsUnconditionalBranch(LIR* lir)
+{
+ return (lir->opcode == kMipsB);
+}
+
+/* Common initialization routine for an architecture family */
+bool InitMipsCodegen(CompilationUnit* cu)
+{
+ cu->cg.reset(new MipsCodegen());
+ for (int i = 0; i < kMipsLast; i++) {
+ if (MipsCodegen::EncodingMap[i].opcode != i) {
+ LOG(FATAL) << "Encoding order for " << MipsCodegen::EncodingMap[i].name
+ << " is wrong: expecting " << i << ", seeing "
+ << static_cast<int>(MipsCodegen::EncodingMap[i].opcode);
+ }
+ }
+ return true;
+}
+
+uint64_t MipsCodegen::GetTargetInstFlags(int opcode)
+{
+ return MipsCodegen::EncodingMap[opcode].flags;
+}
+
+const char* MipsCodegen::GetTargetInstName(int opcode)
+{
+ return MipsCodegen::EncodingMap[opcode].name;
+}
+
+const char* MipsCodegen::GetTargetInstFmt(int opcode)
+{
+ return MipsCodegen::EncodingMap[opcode].fmt;
+}
+
+} // namespace art
diff --git a/src/compiler/dex/quick/mips/utility_mips.cc b/src/compiler/dex/quick/mips/utility_mips.cc
new file mode 100644
index 0000000..168eb68
--- /dev/null
+++ b/src/compiler/dex/quick/mips/utility_mips.cc
@@ -0,0 +1,705 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "codegen_mips.h"
+#include "compiler/dex/quick/codegen_util.h"
+#include "compiler/dex/quick/ralloc_util.h"
+#include "mips_lir.h"
+
+namespace art {
+
+/* This file contains codegen for the MIPS32 ISA. */
+LIR* MipsCodegen::OpFpRegCopy(CompilationUnit *cu, int r_dest, int r_src)
+{
+ int opcode;
+ /* must be both DOUBLE or both not DOUBLE */
+ DCHECK_EQ(MIPS_DOUBLEREG(r_dest),MIPS_DOUBLEREG(r_src));
+ if (MIPS_DOUBLEREG(r_dest)) {
+ opcode = kMipsFmovd;
+ } else {
+ if (MIPS_SINGLEREG(r_dest)) {
+ if (MIPS_SINGLEREG(r_src)) {
+ opcode = kMipsFmovs;
+ } else {
+ /* note the operands are swapped for the mtc1 instr */
+ int t_opnd = r_src;
+ r_src = r_dest;
+ r_dest = t_opnd;
+ opcode = kMipsMtc1;
+ }
+ } else {
+ DCHECK(MIPS_SINGLEREG(r_src));
+ opcode = kMipsMfc1;
+ }
+ }
+ LIR* res = RawLIR(cu, cu->current_dalvik_offset, opcode, r_src, r_dest);
+ if (!(cu->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) {
+ res->flags.is_nop = true;
+ }
+ return res;
+}
+
+bool MipsCodegen::InexpensiveConstantInt(int32_t value)
+{
+ return ((value == 0) || IsUint(16, value) || ((value < 0) && (value >= -32768)));
+}
+
+bool MipsCodegen::InexpensiveConstantFloat(int32_t value)
+{
+ return false; // TUNING
+}
+
+bool MipsCodegen::InexpensiveConstantLong(int64_t value)
+{
+ return false; // TUNING
+}
+
+bool MipsCodegen::InexpensiveConstantDouble(int64_t value)
+{
+ return false; // TUNING
+}
+
+/*
+ * Load a immediate using a shortcut if possible; otherwise
+ * grab from the per-translation literal pool. If target is
+ * a high register, build constant into a low register and copy.
+ *
+ * No additional register clobbering operation performed. Use this version when
+ * 1) r_dest is freshly returned from AllocTemp or
+ * 2) The codegen is under fixed register usage
+ */
+LIR* MipsCodegen::LoadConstantNoClobber(CompilationUnit *cu, int r_dest, int value)
+{
+ LIR *res;
+
+ int r_dest_save = r_dest;
+ int is_fp_reg = MIPS_FPREG(r_dest);
+ if (is_fp_reg) {
+ DCHECK(MIPS_SINGLEREG(r_dest));
+ r_dest = AllocTemp(cu);
+ }
+
+ /* See if the value can be constructed cheaply */
+ if (value == 0) {
+ res = NewLIR2(cu, kMipsMove, r_dest, r_ZERO);
+ } else if ((value > 0) && (value <= 65535)) {
+ res = NewLIR3(cu, kMipsOri, r_dest, r_ZERO, value);
+ } else if ((value < 0) && (value >= -32768)) {
+ res = NewLIR3(cu, kMipsAddiu, r_dest, r_ZERO, value);
+ } else {
+ res = NewLIR2(cu, kMipsLui, r_dest, value>>16);
+ if (value & 0xffff)
+ NewLIR3(cu, kMipsOri, r_dest, r_dest, value);
+ }
+
+ if (is_fp_reg) {
+ NewLIR2(cu, kMipsMtc1, r_dest, r_dest_save);
+ FreeTemp(cu, r_dest);
+ }
+
+ return res;
+}
+
+LIR* MipsCodegen::OpUnconditionalBranch(CompilationUnit* cu, LIR* target)
+{
+ LIR* res = NewLIR1(cu, kMipsB, 0 /* offset to be patched during assembly*/ );
+ res->target = target;
+ return res;
+}
+
+LIR* MipsCodegen::OpReg(CompilationUnit *cu, OpKind op, int r_dest_src)
+{
+ MipsOpCode opcode = kMipsNop;
+ switch (op) {
+ case kOpBlx:
+ opcode = kMipsJalr;
+ break;
+ case kOpBx:
+ return NewLIR1(cu, kMipsJr, r_dest_src);
+ break;
+ default:
+ LOG(FATAL) << "Bad case in OpReg";
+ }
+ return NewLIR2(cu, opcode, r_RA, r_dest_src);
+}
+
+LIR* MipsCodegen::OpRegImm(CompilationUnit *cu, OpKind op, int r_dest_src1,
+ int value)
+{
+ LIR *res;
+ bool neg = (value < 0);
+ int abs_value = (neg) ? -value : value;
+ bool short_form = (abs_value & 0xff) == abs_value;
+ MipsOpCode opcode = kMipsNop;
+ switch (op) {
+ case kOpAdd:
+ return OpRegRegImm(cu, op, r_dest_src1, r_dest_src1, value);
+ break;
+ case kOpSub:
+ return OpRegRegImm(cu, op, r_dest_src1, r_dest_src1, value);
+ break;
+ default:
+ LOG(FATAL) << "Bad case in OpRegImm";
+ break;
+ }
+ if (short_form)
+ res = NewLIR2(cu, opcode, r_dest_src1, abs_value);
+ else {
+ int r_scratch = AllocTemp(cu);
+ res = LoadConstant(cu, r_scratch, value);
+ if (op == kOpCmp)
+ NewLIR2(cu, opcode, r_dest_src1, r_scratch);
+ else
+ NewLIR3(cu, opcode, r_dest_src1, r_dest_src1, r_scratch);
+ }
+ return res;
+}
+
+LIR* MipsCodegen::OpRegRegReg(CompilationUnit *cu, OpKind op, int r_dest, int r_src1, int r_src2)
+{
+ MipsOpCode opcode = kMipsNop;
+ switch (op) {
+ case kOpAdd:
+ opcode = kMipsAddu;
+ break;
+ case kOpSub:
+ opcode = kMipsSubu;
+ break;
+ case kOpAnd:
+ opcode = kMipsAnd;
+ break;
+ case kOpMul:
+ opcode = kMipsMul;
+ break;
+ case kOpOr:
+ opcode = kMipsOr;
+ break;
+ case kOpXor:
+ opcode = kMipsXor;
+ break;
+ case kOpLsl:
+ opcode = kMipsSllv;
+ break;
+ case kOpLsr:
+ opcode = kMipsSrlv;
+ break;
+ case kOpAsr:
+ opcode = kMipsSrav;
+ break;
+ case kOpAdc:
+ case kOpSbc:
+ LOG(FATAL) << "No carry bit on MIPS";
+ break;
+ default:
+ LOG(FATAL) << "bad case in OpRegRegReg";
+ break;
+ }
+ return NewLIR3(cu, opcode, r_dest, r_src1, r_src2);
+}
+
+LIR* MipsCodegen::OpRegRegImm(CompilationUnit *cu, OpKind op, int r_dest, int r_src1, int value)
+{
+ LIR *res;
+ MipsOpCode opcode = kMipsNop;
+ bool short_form = true;
+
+ switch (op) {
+ case kOpAdd:
+ if (IS_SIMM16(value)) {
+ opcode = kMipsAddiu;
+ }
+ else {
+ short_form = false;
+ opcode = kMipsAddu;
+ }
+ break;
+ case kOpSub:
+ if (IS_SIMM16((-value))) {
+ value = -value;
+ opcode = kMipsAddiu;
+ }
+ else {
+ short_form = false;
+ opcode = kMipsSubu;
+ }
+ break;
+ case kOpLsl:
+ DCHECK(value >= 0 && value <= 31);
+ opcode = kMipsSll;
+ break;
+ case kOpLsr:
+ DCHECK(value >= 0 && value <= 31);
+ opcode = kMipsSrl;
+ break;
+ case kOpAsr:
+ DCHECK(value >= 0 && value <= 31);
+ opcode = kMipsSra;
+ break;
+ case kOpAnd:
+ if (IS_UIMM16((value))) {
+ opcode = kMipsAndi;
+ }
+ else {
+ short_form = false;
+ opcode = kMipsAnd;
+ }
+ break;
+ case kOpOr:
+ if (IS_UIMM16((value))) {
+ opcode = kMipsOri;
+ }
+ else {
+ short_form = false;
+ opcode = kMipsOr;
+ }
+ break;
+ case kOpXor:
+ if (IS_UIMM16((value))) {
+ opcode = kMipsXori;
+ }
+ else {
+ short_form = false;
+ opcode = kMipsXor;
+ }
+ break;
+ case kOpMul:
+ short_form = false;
+ opcode = kMipsMul;
+ break;
+ default:
+ LOG(FATAL) << "Bad case in OpRegRegImm";
+ break;
+ }
+
+ if (short_form)
+ res = NewLIR3(cu, opcode, r_dest, r_src1, value);
+ else {
+ if (r_dest != r_src1) {
+ res = LoadConstant(cu, r_dest, value);
+ NewLIR3(cu, opcode, r_dest, r_src1, r_dest);
+ } else {
+ int r_scratch = AllocTemp(cu);
+ res = LoadConstant(cu, r_scratch, value);
+ NewLIR3(cu, opcode, r_dest, r_src1, r_scratch);
+ }
+ }
+ return res;
+}
+
+LIR* MipsCodegen::OpRegReg(CompilationUnit *cu, OpKind op, int r_dest_src1, int r_src2)
+{
+ MipsOpCode opcode = kMipsNop;
+ LIR *res;
+ switch (op) {
+ case kOpMov:
+ opcode = kMipsMove;
+ break;
+ case kOpMvn:
+ return NewLIR3(cu, kMipsNor, r_dest_src1, r_src2, r_ZERO);
+ case kOpNeg:
+ return NewLIR3(cu, kMipsSubu, r_dest_src1, r_ZERO, r_src2);
+ case kOpAdd:
+ case kOpAnd:
+ case kOpMul:
+ case kOpOr:
+ case kOpSub:
+ case kOpXor:
+ return OpRegRegReg(cu, op, r_dest_src1, r_dest_src1, r_src2);
+ case kOp2Byte:
+#if __mips_isa_rev>=2
+ res = NewLIR2(cu, kMipsSeb, r_dest_src1, r_src2);
+#else
+ res = OpRegRegImm(cu, kOpLsl, r_dest_src1, r_src2, 24);
+ OpRegRegImm(cu, kOpAsr, r_dest_src1, r_dest_src1, 24);
+#endif
+ return res;
+ case kOp2Short:
+#if __mips_isa_rev>=2
+ res = NewLIR2(cu, kMipsSeh, r_dest_src1, r_src2);
+#else
+ res = OpRegRegImm(cu, kOpLsl, r_dest_src1, r_src2, 16);
+ OpRegRegImm(cu, kOpAsr, r_dest_src1, r_dest_src1, 16);
+#endif
+ return res;
+ case kOp2Char:
+ return NewLIR3(cu, kMipsAndi, r_dest_src1, r_src2, 0xFFFF);
+ default:
+ LOG(FATAL) << "Bad case in OpRegReg";
+ break;
+ }
+ return NewLIR2(cu, opcode, r_dest_src1, r_src2);
+}
+
+LIR* MipsCodegen::LoadConstantWide(CompilationUnit *cu, int r_dest_lo, int r_dest_hi, int64_t value)
+{
+ LIR *res;
+ res = LoadConstantNoClobber(cu, r_dest_lo, Low32Bits(value));
+ LoadConstantNoClobber(cu, r_dest_hi, High32Bits(value));
+ return res;
+}
+
+/* Load value from base + scaled index. */
+LIR* MipsCodegen::LoadBaseIndexed(CompilationUnit *cu, int rBase, int r_index, int r_dest,
+ int scale, OpSize size)
+{
+ LIR *first = NULL;
+ LIR *res;
+ MipsOpCode opcode = kMipsNop;
+ int t_reg = AllocTemp(cu);
+
+ if (MIPS_FPREG(r_dest)) {
+ DCHECK(MIPS_SINGLEREG(r_dest));
+ DCHECK((size == kWord) || (size == kSingle));
+ size = kSingle;
+ } else {
+ if (size == kSingle)
+ size = kWord;
+ }
+
+ if (!scale) {
+ first = NewLIR3(cu, kMipsAddu, t_reg , rBase, r_index);
+ } else {
+ first = OpRegRegImm(cu, kOpLsl, t_reg, r_index, scale);
+ NewLIR3(cu, kMipsAddu, t_reg , rBase, t_reg);
+ }
+
+ switch (size) {
+ case kSingle:
+ opcode = kMipsFlwc1;
+ break;
+ case kWord:
+ opcode = kMipsLw;
+ break;
+ case kUnsignedHalf:
+ opcode = kMipsLhu;
+ break;
+ case kSignedHalf:
+ opcode = kMipsLh;
+ break;
+ case kUnsignedByte:
+ opcode = kMipsLbu;
+ break;
+ case kSignedByte:
+ opcode = kMipsLb;
+ break;
+ default:
+ LOG(FATAL) << "Bad case in LoadBaseIndexed";
+ }
+
+ res = NewLIR3(cu, opcode, r_dest, 0, t_reg);
+ FreeTemp(cu, t_reg);
+ return (first) ? first : res;
+}
+
+/* store value base base + scaled index. */
+LIR* MipsCodegen::StoreBaseIndexed(CompilationUnit *cu, int rBase, int r_index, int r_src,
+ int scale, OpSize size)
+{
+ LIR *first = NULL;
+ MipsOpCode opcode = kMipsNop;
+ int r_new_index = r_index;
+ int t_reg = AllocTemp(cu);
+
+ if (MIPS_FPREG(r_src)) {
+ DCHECK(MIPS_SINGLEREG(r_src));
+ DCHECK((size == kWord) || (size == kSingle));
+ size = kSingle;
+ } else {
+ if (size == kSingle)
+ size = kWord;
+ }
+
+ if (!scale) {
+ first = NewLIR3(cu, kMipsAddu, t_reg , rBase, r_index);
+ } else {
+ first = OpRegRegImm(cu, kOpLsl, t_reg, r_index, scale);
+ NewLIR3(cu, kMipsAddu, t_reg , rBase, t_reg);
+ }
+
+ switch (size) {
+ case kSingle:
+ opcode = kMipsFswc1;
+ break;
+ case kWord:
+ opcode = kMipsSw;
+ break;
+ case kUnsignedHalf:
+ case kSignedHalf:
+ opcode = kMipsSh;
+ break;
+ case kUnsignedByte:
+ case kSignedByte:
+ opcode = kMipsSb;
+ break;
+ default:
+ LOG(FATAL) << "Bad case in StoreBaseIndexed";
+ }
+ NewLIR3(cu, opcode, r_src, 0, t_reg);
+ FreeTemp(cu, r_new_index);
+ return first;
+}
+
+LIR* MipsCodegen::LoadBaseDispBody(CompilationUnit *cu, int rBase, int displacement, int r_dest,
+ int r_dest_hi, OpSize size, int s_reg)
+/*
+ * Load value from base + displacement. Optionally perform null check
+ * on base (which must have an associated s_reg and MIR). If not
+ * performing null check, incoming MIR can be null. IMPORTANT: this
+ * code must not allocate any new temps. If a new register is needed
+ * and base and dest are the same, spill some other register to
+ * rlp and then restore.
+ */
+{
+ LIR *res;
+ LIR *load = NULL;
+ LIR *load2 = NULL;
+ MipsOpCode opcode = kMipsNop;
+ bool short_form = IS_SIMM16(displacement);
+ bool pair = false;
+
+ switch (size) {
+ case kLong:
+ case kDouble:
+ pair = true;
+ opcode = kMipsLw;
+ if (MIPS_FPREG(r_dest)) {
+ opcode = kMipsFlwc1;
+ if (MIPS_DOUBLEREG(r_dest)) {
+ r_dest = r_dest - MIPS_FP_DOUBLE;
+ } else {
+ DCHECK(MIPS_FPREG(r_dest_hi));
+ DCHECK(r_dest == (r_dest_hi - 1));
+ }
+ r_dest_hi = r_dest + 1;
+ }
+ short_form = IS_SIMM16_2WORD(displacement);
+ DCHECK_EQ((displacement & 0x3), 0);
+ break;
+ case kWord:
+ case kSingle:
+ opcode = kMipsLw;
+ if (MIPS_FPREG(r_dest)) {
+ opcode = kMipsFlwc1;
+ DCHECK(MIPS_SINGLEREG(r_dest));
+ }
+ DCHECK_EQ((displacement & 0x3), 0);
+ break;
+ case kUnsignedHalf:
+ opcode = kMipsLhu;
+ DCHECK_EQ((displacement & 0x1), 0);
+ break;
+ case kSignedHalf:
+ opcode = kMipsLh;
+ DCHECK_EQ((displacement & 0x1), 0);
+ break;
+ case kUnsignedByte:
+ opcode = kMipsLbu;
+ break;
+ case kSignedByte:
+ opcode = kMipsLb;
+ break;
+ default:
+ LOG(FATAL) << "Bad case in LoadBaseIndexedBody";
+ }
+
+ if (short_form) {
+ if (!pair) {
+ load = res = NewLIR3(cu, opcode, r_dest, displacement, rBase);
+ } else {
+ load = res = NewLIR3(cu, opcode, r_dest,
+ displacement + LOWORD_OFFSET, rBase);
+ load2 = NewLIR3(cu, opcode, r_dest_hi,
+ displacement + HIWORD_OFFSET, rBase);
+ }
+ } else {
+ if (pair) {
+ int r_tmp = AllocFreeTemp(cu);
+ res = OpRegRegImm(cu, kOpAdd, r_tmp, rBase, displacement);
+ load = NewLIR3(cu, opcode, r_dest, LOWORD_OFFSET, r_tmp);
+ load2 = NewLIR3(cu, opcode, r_dest_hi, HIWORD_OFFSET, r_tmp);
+ FreeTemp(cu, r_tmp);
+ } else {
+ int r_tmp = (rBase == r_dest) ? AllocFreeTemp(cu) : r_dest;
+ res = OpRegRegImm(cu, kOpAdd, r_tmp, rBase, displacement);
+ load = NewLIR3(cu, opcode, r_dest, 0, r_tmp);
+ if (r_tmp != r_dest)
+ FreeTemp(cu, r_tmp);
+ }
+ }
+
+ if (rBase == rMIPS_SP) {
+ AnnotateDalvikRegAccess(cu, load, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2,
+ true /* is_load */, pair /* is64bit */);
+ if (pair) {
+ AnnotateDalvikRegAccess(cu, load2, (displacement + HIWORD_OFFSET) >> 2,
+ true /* is_load */, pair /* is64bit */);
+ }
+ }
+ return load;
+}
+
+LIR* MipsCodegen::LoadBaseDisp(CompilationUnit *cu, int rBase, int displacement, int r_dest,
+ OpSize size, int s_reg)
+{
+ return LoadBaseDispBody(cu, rBase, displacement, r_dest, -1,
+ size, s_reg);
+}
+
+LIR* MipsCodegen::LoadBaseDispWide(CompilationUnit *cu, int rBase, int displacement,
+ int r_dest_lo, int r_dest_hi, int s_reg)
+{
+ return LoadBaseDispBody(cu, rBase, displacement, r_dest_lo, r_dest_hi, kLong, s_reg);
+}
+
+LIR* MipsCodegen::StoreBaseDispBody(CompilationUnit *cu, int rBase, int displacement,
+ int r_src, int r_src_hi, OpSize size)
+{
+ LIR *res;
+ LIR *store = NULL;
+ LIR *store2 = NULL;
+ MipsOpCode opcode = kMipsNop;
+ bool short_form = IS_SIMM16(displacement);
+ bool pair = false;
+
+ switch (size) {
+ case kLong:
+ case kDouble:
+ pair = true;
+ opcode = kMipsSw;
+ if (MIPS_FPREG(r_src)) {
+ opcode = kMipsFswc1;
+ if (MIPS_DOUBLEREG(r_src)) {
+ r_src = r_src - MIPS_FP_DOUBLE;
+ } else {
+ DCHECK(MIPS_FPREG(r_src_hi));
+ DCHECK_EQ(r_src, (r_src_hi - 1));
+ }
+ r_src_hi = r_src + 1;
+ }
+ short_form = IS_SIMM16_2WORD(displacement);
+ DCHECK_EQ((displacement & 0x3), 0);
+ break;
+ case kWord:
+ case kSingle:
+ opcode = kMipsSw;
+ if (MIPS_FPREG(r_src)) {
+ opcode = kMipsFswc1;
+ DCHECK(MIPS_SINGLEREG(r_src));
+ }
+ DCHECK_EQ((displacement & 0x3), 0);
+ break;
+ case kUnsignedHalf:
+ case kSignedHalf:
+ opcode = kMipsSh;
+ DCHECK_EQ((displacement & 0x1), 0);
+ break;
+ case kUnsignedByte:
+ case kSignedByte:
+ opcode = kMipsSb;
+ break;
+ default:
+ LOG(FATAL) << "Bad case in StoreBaseIndexedBody";
+ }
+
+ if (short_form) {
+ if (!pair) {
+ store = res = NewLIR3(cu, opcode, r_src, displacement, rBase);
+ } else {
+ store = res = NewLIR3(cu, opcode, r_src, displacement + LOWORD_OFFSET,
+ rBase);
+ store2 = NewLIR3(cu, opcode, r_src_hi, displacement + HIWORD_OFFSET,
+ rBase);
+ }
+ } else {
+ int r_scratch = AllocTemp(cu);
+ res = OpRegRegImm(cu, kOpAdd, r_scratch, rBase, displacement);
+ if (!pair) {
+ store = NewLIR3(cu, opcode, r_src, 0, r_scratch);
+ } else {
+ store = NewLIR3(cu, opcode, r_src, LOWORD_OFFSET, r_scratch);
+ store2 = NewLIR3(cu, opcode, r_src_hi, HIWORD_OFFSET, r_scratch);
+ }
+ FreeTemp(cu, r_scratch);
+ }
+
+ if (rBase == rMIPS_SP) {
+ AnnotateDalvikRegAccess(cu, store, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2,
+ false /* is_load */, pair /* is64bit */);
+ if (pair) {
+ AnnotateDalvikRegAccess(cu, store2, (displacement + HIWORD_OFFSET) >> 2,
+ false /* is_load */, pair /* is64bit */);
+ }
+ }
+
+ return res;
+}
+
+LIR* MipsCodegen::StoreBaseDisp(CompilationUnit *cu, int rBase, int displacement, int r_src,
+ OpSize size)
+{
+ return StoreBaseDispBody(cu, rBase, displacement, r_src, -1, size);
+}
+
+LIR* MipsCodegen::StoreBaseDispWide(CompilationUnit *cu, int rBase, int displacement,
+ int r_src_lo, int r_src_hi)
+{
+ return StoreBaseDispBody(cu, rBase, displacement, r_src_lo, r_src_hi, kLong);
+}
+
+LIR* MipsCodegen::OpThreadMem(CompilationUnit* cu, OpKind op, int thread_offset)
+{
+ LOG(FATAL) << "Unexpected use of OpThreadMem for MIPS";
+ return NULL;
+}
+
+LIR* MipsCodegen::OpMem(CompilationUnit* cu, OpKind op, int rBase, int disp)
+{
+ LOG(FATAL) << "Unexpected use of OpMem for MIPS";
+ return NULL;
+}
+
+LIR* MipsCodegen::StoreBaseIndexedDisp(CompilationUnit *cu,
+ int rBase, int r_index, int scale, int displacement,
+ int r_src, int r_src_hi,
+ OpSize size, int s_reg)
+{
+ LOG(FATAL) << "Unexpected use of StoreBaseIndexedDisp for MIPS";
+ return NULL;
+}
+
+LIR* MipsCodegen::OpRegMem(CompilationUnit *cu, OpKind op, int r_dest, int rBase,
+ int offset)
+{
+ LOG(FATAL) << "Unexpected use of OpRegMem for MIPS";
+ return NULL;
+}
+
+LIR* MipsCodegen::LoadBaseIndexedDisp(CompilationUnit *cu,
+ int rBase, int r_index, int scale, int displacement,
+ int r_dest, int r_dest_hi,
+ OpSize size, int s_reg)
+{
+ LOG(FATAL) << "Unexpected use of LoadBaseIndexedDisp for MIPS";
+ return NULL;
+}
+
+LIR* MipsCodegen::OpCondBranch(CompilationUnit* cu, ConditionCode cc, LIR* target)
+{
+ LOG(FATAL) << "Unexpected use of OpCondBranch for MIPS";
+ return NULL;
+}
+
+} // namespace art
diff --git a/src/compiler/dex/quick/mir_to_lir.cc b/src/compiler/dex/quick/mir_to_lir.cc
new file mode 100644
index 0000000..267f61e
--- /dev/null
+++ b/src/compiler/dex/quick/mir_to_lir.cc
@@ -0,0 +1,851 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "object_utils.h"
+
+#include "compiler/dex/compiler_internals.h"
+#include "local_optimizations.h"
+#include "codegen_util.h"
+#include "ralloc_util.h"
+
+namespace art {
+
+/*
+ * Target-independent code generation. Use only high-level
+ * load/store utilities here, or target-dependent genXX() handlers
+ * when necessary.
+ */
+static void CompileDalvikInstruction(CompilationUnit* cu, MIR* mir, BasicBlock* bb,
+ LIR* label_list)
+{
+ Codegen* cg = cu->cg.get();
+ RegLocation rl_src[3];
+ RegLocation rl_dest = GetBadLoc();
+ RegLocation rl_result = GetBadLoc();
+ Instruction::Code opcode = mir->dalvikInsn.opcode;
+ int opt_flags = mir->optimization_flags;
+ uint32_t vB = mir->dalvikInsn.vB;
+ uint32_t vC = mir->dalvikInsn.vC;
+
+ // Prep Src and Dest locations.
+ int next_sreg = 0;
+ int next_loc = 0;
+ int attrs = oat_data_flow_attributes[opcode];
+ rl_src[0] = rl_src[1] = rl_src[2] = GetBadLoc();
+ if (attrs & DF_UA) {
+ if (attrs & DF_A_WIDE) {
+ rl_src[next_loc++] = GetSrcWide(cu, mir, next_sreg);
+ next_sreg+= 2;
+ } else {
+ rl_src[next_loc++] = GetSrc(cu, mir, next_sreg);
+ next_sreg++;
+ }
+ }
+ if (attrs & DF_UB) {
+ if (attrs & DF_B_WIDE) {
+ rl_src[next_loc++] = GetSrcWide(cu, mir, next_sreg);
+ next_sreg+= 2;
+ } else {
+ rl_src[next_loc++] = GetSrc(cu, mir, next_sreg);
+ next_sreg++;
+ }
+ }
+ if (attrs & DF_UC) {
+ if (attrs & DF_C_WIDE) {
+ rl_src[next_loc++] = GetSrcWide(cu, mir, next_sreg);
+ } else {
+ rl_src[next_loc++] = GetSrc(cu, mir, next_sreg);
+ }
+ }
+ if (attrs & DF_DA) {
+ if (attrs & DF_A_WIDE) {
+ rl_dest = GetDestWide(cu, mir);
+ } else {
+ rl_dest = GetDest(cu, mir);
+ }
+ }
+ switch (opcode) {
+ case Instruction::NOP:
+ break;
+
+ case Instruction::MOVE_EXCEPTION:
+ cg->GenMoveException(cu, rl_dest);
+ break;
+
+ case Instruction::RETURN_VOID:
+ if (((cu->access_flags & kAccConstructor) != 0) &&
+ cu->compiler->RequiresConstructorBarrier(Thread::Current(), cu->dex_file,
+ cu->class_def_idx)) {
+ cg->GenMemBarrier(cu, kStoreStore);
+ }
+ if (!(cu->attrs & METHOD_IS_LEAF)) {
+ cg->GenSuspendTest(cu, opt_flags);
+ }
+ break;
+
+ case Instruction::RETURN:
+ case Instruction::RETURN_OBJECT:
+ if (!(cu->attrs & METHOD_IS_LEAF)) {
+ cg->GenSuspendTest(cu, opt_flags);
+ }
+ cg->StoreValue(cu, GetReturn(cu, cu->shorty[0] == 'F'), rl_src[0]);
+ break;
+
+ case Instruction::RETURN_WIDE:
+ if (!(cu->attrs & METHOD_IS_LEAF)) {
+ cg->GenSuspendTest(cu, opt_flags);
+ }
+ cg->StoreValueWide(cu, GetReturnWide(cu,
+ cu->shorty[0] == 'D'), rl_src[0]);
+ break;
+
+ case Instruction::MOVE_RESULT_WIDE:
+ if (opt_flags & MIR_INLINED)
+ break; // Nop - combined w/ previous invoke.
+ cg->StoreValueWide(cu, rl_dest, GetReturnWide(cu, rl_dest.fp));
+ break;
+
+ case Instruction::MOVE_RESULT:
+ case Instruction::MOVE_RESULT_OBJECT:
+ if (opt_flags & MIR_INLINED)
+ break; // Nop - combined w/ previous invoke.
+ cg->StoreValue(cu, rl_dest, GetReturn(cu, rl_dest.fp));
+ break;
+
+ case Instruction::MOVE:
+ case Instruction::MOVE_OBJECT:
+ case Instruction::MOVE_16:
+ case Instruction::MOVE_OBJECT_16:
+ case Instruction::MOVE_FROM16:
+ case Instruction::MOVE_OBJECT_FROM16:
+ cg->StoreValue(cu, rl_dest, rl_src[0]);
+ break;
+
+ case Instruction::MOVE_WIDE:
+ case Instruction::MOVE_WIDE_16:
+ case Instruction::MOVE_WIDE_FROM16:
+ cg->StoreValueWide(cu, rl_dest, rl_src[0]);
+ break;
+
+ case Instruction::CONST:
+ case Instruction::CONST_4:
+ case Instruction::CONST_16:
+ rl_result = EvalLoc(cu, rl_dest, kAnyReg, true);
+ cg->LoadConstantNoClobber(cu, rl_result.low_reg, vB);
+ cg->StoreValue(cu, rl_dest, rl_result);
+ if (vB == 0) {
+ cg->Workaround7250540(cu, rl_dest, rl_result.low_reg);
+ }
+ break;
+
+ case Instruction::CONST_HIGH16:
+ rl_result = EvalLoc(cu, rl_dest, kAnyReg, true);
+ cg->LoadConstantNoClobber(cu, rl_result.low_reg, vB << 16);
+ cg->StoreValue(cu, rl_dest, rl_result);
+ if (vB == 0) {
+ cg->Workaround7250540(cu, rl_dest, rl_result.low_reg);
+ }
+ break;
+
+ case Instruction::CONST_WIDE_16:
+ case Instruction::CONST_WIDE_32:
+ rl_result = EvalLoc(cu, rl_dest, kAnyReg, true);
+ cg->LoadConstantWide(cu, rl_result.low_reg, rl_result.high_reg,
+ static_cast<int64_t>(static_cast<int32_t>(vB)));
+ cg->StoreValueWide(cu, rl_dest, rl_result);
+ break;
+
+ case Instruction::CONST_WIDE:
+ rl_result = EvalLoc(cu, rl_dest, kAnyReg, true);
+ cg->LoadConstantWide(cu, rl_result.low_reg, rl_result.high_reg, mir->dalvikInsn.vB_wide);
+ cg->StoreValueWide(cu, rl_dest, rl_result);
+ break;
+
+ case Instruction::CONST_WIDE_HIGH16:
+ rl_result = EvalLoc(cu, rl_dest, kAnyReg, true);
+ cg->LoadConstantWide(cu, rl_result.low_reg, rl_result.high_reg,
+ static_cast<int64_t>(vB) << 48);
+ cg->StoreValueWide(cu, rl_dest, rl_result);
+ break;
+
+ case Instruction::MONITOR_ENTER:
+ cg->GenMonitorEnter(cu, opt_flags, rl_src[0]);
+ break;
+
+ case Instruction::MONITOR_EXIT:
+ cg->GenMonitorExit(cu, opt_flags, rl_src[0]);
+ break;
+
+ case Instruction::CHECK_CAST:
+ cg->GenCheckCast(cu, vB, rl_src[0]);
+ break;
+
+ case Instruction::INSTANCE_OF:
+ cg->GenInstanceof(cu, vC, rl_dest, rl_src[0]);
+ break;
+
+ case Instruction::NEW_INSTANCE:
+ cg->GenNewInstance(cu, vB, rl_dest);
+ break;
+
+ case Instruction::THROW:
+ cg->GenThrow(cu, rl_src[0]);
+ break;
+
+ case Instruction::ARRAY_LENGTH:
+ int len_offset;
+ len_offset = mirror::Array::LengthOffset().Int32Value();
+ rl_src[0] = cg->LoadValue(cu, rl_src[0], kCoreReg);
+ cg->GenNullCheck(cu, rl_src[0].s_reg_low, rl_src[0].low_reg, opt_flags);
+ rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ cg->LoadWordDisp(cu, rl_src[0].low_reg, len_offset, rl_result.low_reg);
+ cg->StoreValue(cu, rl_dest, rl_result);
+ break;
+
+ case Instruction::CONST_STRING:
+ case Instruction::CONST_STRING_JUMBO:
+ cg->GenConstString(cu, vB, rl_dest);
+ break;
+
+ case Instruction::CONST_CLASS:
+ cg->GenConstClass(cu, vB, rl_dest);
+ break;
+
+ case Instruction::FILL_ARRAY_DATA:
+ cg->GenFillArrayData(cu, vB, rl_src[0]);
+ break;
+
+ case Instruction::FILLED_NEW_ARRAY:
+ cg->GenFilledNewArray(cu, cg->NewMemCallInfo(cu, bb, mir, kStatic,
+ false /* not range */));
+ break;
+
+ case Instruction::FILLED_NEW_ARRAY_RANGE:
+ cg->GenFilledNewArray(cu, cg->NewMemCallInfo(cu, bb, mir, kStatic,
+ true /* range */));
+ break;
+
+ case Instruction::NEW_ARRAY:
+ cg->GenNewArray(cu, vC, rl_dest, rl_src[0]);
+ break;
+
+ case Instruction::GOTO:
+ case Instruction::GOTO_16:
+ case Instruction::GOTO_32:
+ if (bb->taken->start_offset <= mir->offset) {
+ cg->GenSuspendTestAndBranch(cu, opt_flags, &label_list[bb->taken->id]);
+ } else {
+ cg->OpUnconditionalBranch(cu, &label_list[bb->taken->id]);
+ }
+ break;
+
+ case Instruction::PACKED_SWITCH:
+ cg->GenPackedSwitch(cu, vB, rl_src[0]);
+ break;
+
+ case Instruction::SPARSE_SWITCH:
+ cg->GenSparseSwitch(cu, vB, rl_src[0]);
+ break;
+
+ case Instruction::CMPL_FLOAT:
+ case Instruction::CMPG_FLOAT:
+ case Instruction::CMPL_DOUBLE:
+ case Instruction::CMPG_DOUBLE:
+ cg->GenCmpFP(cu, opcode, rl_dest, rl_src[0], rl_src[1]);
+ break;
+
+ case Instruction::CMP_LONG:
+ cg->GenCmpLong(cu, rl_dest, rl_src[0], rl_src[1]);
+ break;
+
+ case Instruction::IF_EQ:
+ case Instruction::IF_NE:
+ case Instruction::IF_LT:
+ case Instruction::IF_GE:
+ case Instruction::IF_GT:
+ case Instruction::IF_LE: {
+ LIR* taken = &label_list[bb->taken->id];
+ LIR* fall_through = &label_list[bb->fall_through->id];
+ bool backward_branch;
+ backward_branch = (bb->taken->start_offset <= mir->offset);
+ // Result known at compile time?
+ if (rl_src[0].is_const && rl_src[1].is_const) {
+ bool is_taken = EvaluateBranch(opcode, cu->constant_values[rl_src[0].orig_sreg],
+ cu->constant_values[rl_src[1].orig_sreg]);
+ if (is_taken && backward_branch) {
+ cg->GenSuspendTest(cu, opt_flags);
+ }
+ int id = is_taken ? bb->taken->id : bb->fall_through->id;
+ cg->OpUnconditionalBranch(cu, &label_list[id]);
+ } else {
+ if (backward_branch) {
+ cg->GenSuspendTest(cu, opt_flags);
+ }
+ cg->GenCompareAndBranch(cu, opcode, rl_src[0], rl_src[1], taken,
+ fall_through);
+ }
+ break;
+ }
+
+ case Instruction::IF_EQZ:
+ case Instruction::IF_NEZ:
+ case Instruction::IF_LTZ:
+ case Instruction::IF_GEZ:
+ case Instruction::IF_GTZ:
+ case Instruction::IF_LEZ: {
+ LIR* taken = &label_list[bb->taken->id];
+ LIR* fall_through = &label_list[bb->fall_through->id];
+ bool backward_branch;
+ backward_branch = (bb->taken->start_offset <= mir->offset);
+ // Result known at compile time?
+ if (rl_src[0].is_const) {
+ bool is_taken = EvaluateBranch(opcode, cu->constant_values[rl_src[0].orig_sreg], 0);
+ if (is_taken && backward_branch) {
+ cg->GenSuspendTest(cu, opt_flags);
+ }
+ int id = is_taken ? bb->taken->id : bb->fall_through->id;
+ cg->OpUnconditionalBranch(cu, &label_list[id]);
+ } else {
+ if (backward_branch) {
+ cg->GenSuspendTest(cu, opt_flags);
+ }
+ cg->GenCompareZeroAndBranch(cu, opcode, rl_src[0], taken, fall_through);
+ }
+ break;
+ }
+
+ case Instruction::AGET_WIDE:
+ cg->GenArrayGet(cu, opt_flags, kLong, rl_src[0], rl_src[1], rl_dest, 3);
+ break;
+ case Instruction::AGET:
+ case Instruction::AGET_OBJECT:
+ cg->GenArrayGet(cu, opt_flags, kWord, rl_src[0], rl_src[1], rl_dest, 2);
+ break;
+ case Instruction::AGET_BOOLEAN:
+ cg->GenArrayGet(cu, opt_flags, kUnsignedByte, rl_src[0], rl_src[1], rl_dest, 0);
+ break;
+ case Instruction::AGET_BYTE:
+ cg->GenArrayGet(cu, opt_flags, kSignedByte, rl_src[0], rl_src[1], rl_dest, 0);
+ break;
+ case Instruction::AGET_CHAR:
+ cg->GenArrayGet(cu, opt_flags, kUnsignedHalf, rl_src[0], rl_src[1], rl_dest, 1);
+ break;
+ case Instruction::AGET_SHORT:
+ cg->GenArrayGet(cu, opt_flags, kSignedHalf, rl_src[0], rl_src[1], rl_dest, 1);
+ break;
+ case Instruction::APUT_WIDE:
+ cg->GenArrayPut(cu, opt_flags, kLong, rl_src[1], rl_src[2], rl_src[0], 3);
+ break;
+ case Instruction::APUT:
+ cg->GenArrayPut(cu, opt_flags, kWord, rl_src[1], rl_src[2], rl_src[0], 2);
+ break;
+ case Instruction::APUT_OBJECT:
+ cg->GenArrayObjPut(cu, opt_flags, rl_src[1], rl_src[2], rl_src[0], 2);
+ break;
+ case Instruction::APUT_SHORT:
+ case Instruction::APUT_CHAR:
+ cg->GenArrayPut(cu, opt_flags, kUnsignedHalf, rl_src[1], rl_src[2], rl_src[0], 1);
+ break;
+ case Instruction::APUT_BYTE:
+ case Instruction::APUT_BOOLEAN:
+ cg->GenArrayPut(cu, opt_flags, kUnsignedByte, rl_src[1], rl_src[2],
+ rl_src[0], 0);
+ break;
+
+ case Instruction::IGET_OBJECT:
+ cg->GenIGet(cu, vC, opt_flags, kWord, rl_dest, rl_src[0], false, true);
+ break;
+
+ case Instruction::IGET_WIDE:
+ cg->GenIGet(cu, vC, opt_flags, kLong, rl_dest, rl_src[0], true, false);
+ break;
+
+ case Instruction::IGET:
+ cg->GenIGet(cu, vC, opt_flags, kWord, rl_dest, rl_src[0], false, false);
+ break;
+
+ case Instruction::IGET_CHAR:
+ cg->GenIGet(cu, vC, opt_flags, kUnsignedHalf, rl_dest, rl_src[0], false, false);
+ break;
+
+ case Instruction::IGET_SHORT:
+ cg->GenIGet(cu, vC, opt_flags, kSignedHalf, rl_dest, rl_src[0], false, false);
+ break;
+
+ case Instruction::IGET_BOOLEAN:
+ case Instruction::IGET_BYTE:
+ cg->GenIGet(cu, vC, opt_flags, kUnsignedByte, rl_dest, rl_src[0], false, false);
+ break;
+
+ case Instruction::IPUT_WIDE:
+ cg->GenIPut(cu, vC, opt_flags, kLong, rl_src[0], rl_src[1], true, false);
+ break;
+
+ case Instruction::IPUT_OBJECT:
+ cg->GenIPut(cu, vC, opt_flags, kWord, rl_src[0], rl_src[1], false, true);
+ break;
+
+ case Instruction::IPUT:
+ cg->GenIPut(cu, vC, opt_flags, kWord, rl_src[0], rl_src[1], false, false);
+ break;
+
+ case Instruction::IPUT_BOOLEAN:
+ case Instruction::IPUT_BYTE:
+ cg->GenIPut(cu, vC, opt_flags, kUnsignedByte, rl_src[0], rl_src[1], false, false);
+ break;
+
+ case Instruction::IPUT_CHAR:
+ cg->GenIPut(cu, vC, opt_flags, kUnsignedHalf, rl_src[0], rl_src[1], false, false);
+ break;
+
+ case Instruction::IPUT_SHORT:
+ cg->GenIPut(cu, vC, opt_flags, kSignedHalf, rl_src[0], rl_src[1], false, false);
+ break;
+
+ case Instruction::SGET_OBJECT:
+ cg->GenSget(cu, vB, rl_dest, false, true);
+ break;
+ case Instruction::SGET:
+ case Instruction::SGET_BOOLEAN:
+ case Instruction::SGET_BYTE:
+ case Instruction::SGET_CHAR:
+ case Instruction::SGET_SHORT:
+ cg->GenSget(cu, vB, rl_dest, false, false);
+ break;
+
+ case Instruction::SGET_WIDE:
+ cg->GenSget(cu, vB, rl_dest, true, false);
+ break;
+
+ case Instruction::SPUT_OBJECT:
+ cg->GenSput(cu, vB, rl_src[0], false, true);
+ break;
+
+ case Instruction::SPUT:
+ case Instruction::SPUT_BOOLEAN:
+ case Instruction::SPUT_BYTE:
+ case Instruction::SPUT_CHAR:
+ case Instruction::SPUT_SHORT:
+ cg->GenSput(cu, vB, rl_src[0], false, false);
+ break;
+
+ case Instruction::SPUT_WIDE:
+ cg->GenSput(cu, vB, rl_src[0], true, false);
+ break;
+
+ case Instruction::INVOKE_STATIC_RANGE:
+ cg->GenInvoke(cu, cg->NewMemCallInfo(cu, bb, mir, kStatic, true));
+ break;
+ case Instruction::INVOKE_STATIC:
+ cg->GenInvoke(cu, cg->NewMemCallInfo(cu, bb, mir, kStatic, false));
+ break;
+
+ case Instruction::INVOKE_DIRECT:
+ cg->GenInvoke(cu, cg->NewMemCallInfo(cu, bb, mir, kDirect, false));
+ break;
+ case Instruction::INVOKE_DIRECT_RANGE:
+ cg->GenInvoke(cu, cg->NewMemCallInfo(cu, bb, mir, kDirect, true));
+ break;
+
+ case Instruction::INVOKE_VIRTUAL:
+ cg->GenInvoke(cu, cg->NewMemCallInfo(cu, bb, mir, kVirtual, false));
+ break;
+ case Instruction::INVOKE_VIRTUAL_RANGE:
+ cg->GenInvoke(cu, cg->NewMemCallInfo(cu, bb, mir, kVirtual, true));
+ break;
+
+ case Instruction::INVOKE_SUPER:
+ cg->GenInvoke(cu, cg->NewMemCallInfo(cu, bb, mir, kSuper, false));
+ break;
+ case Instruction::INVOKE_SUPER_RANGE:
+ cg->GenInvoke(cu, cg->NewMemCallInfo(cu, bb, mir, kSuper, true));
+ break;
+
+ case Instruction::INVOKE_INTERFACE:
+ cg->GenInvoke(cu, cg->NewMemCallInfo(cu, bb, mir, kInterface, false));
+ break;
+ case Instruction::INVOKE_INTERFACE_RANGE:
+ cg->GenInvoke(cu, cg->NewMemCallInfo(cu, bb, mir, kInterface, true));
+ break;
+
+ case Instruction::NEG_INT:
+ case Instruction::NOT_INT:
+ cg->GenArithOpInt(cu, opcode, rl_dest, rl_src[0], rl_src[0]);
+ break;
+
+ case Instruction::NEG_LONG:
+ case Instruction::NOT_LONG:
+ cg->GenArithOpLong(cu, opcode, rl_dest, rl_src[0], rl_src[0]);
+ break;
+
+ case Instruction::NEG_FLOAT:
+ cg->GenArithOpFloat(cu, opcode, rl_dest, rl_src[0], rl_src[0]);
+ break;
+
+ case Instruction::NEG_DOUBLE:
+ cg->GenArithOpDouble(cu, opcode, rl_dest, rl_src[0], rl_src[0]);
+ break;
+
+ case Instruction::INT_TO_LONG:
+ cg->GenIntToLong(cu, rl_dest, rl_src[0]);
+ break;
+
+ case Instruction::LONG_TO_INT:
+ rl_src[0] = UpdateLocWide(cu, rl_src[0]);
+ rl_src[0] = WideToNarrow(cu, rl_src[0]);
+ cg->StoreValue(cu, rl_dest, rl_src[0]);
+ break;
+
+ case Instruction::INT_TO_BYTE:
+ case Instruction::INT_TO_SHORT:
+ case Instruction::INT_TO_CHAR:
+ cg->GenIntNarrowing(cu, opcode, rl_dest, rl_src[0]);
+ break;
+
+ case Instruction::INT_TO_FLOAT:
+ case Instruction::INT_TO_DOUBLE:
+ case Instruction::LONG_TO_FLOAT:
+ case Instruction::LONG_TO_DOUBLE:
+ case Instruction::FLOAT_TO_INT:
+ case Instruction::FLOAT_TO_LONG:
+ case Instruction::FLOAT_TO_DOUBLE:
+ case Instruction::DOUBLE_TO_INT:
+ case Instruction::DOUBLE_TO_LONG:
+ case Instruction::DOUBLE_TO_FLOAT:
+ cg->GenConversion(cu, opcode, rl_dest, rl_src[0]);
+ break;
+
+
+ case Instruction::ADD_INT:
+ case Instruction::ADD_INT_2ADDR:
+ case Instruction::MUL_INT:
+ case Instruction::MUL_INT_2ADDR:
+ case Instruction::AND_INT:
+ case Instruction::AND_INT_2ADDR:
+ case Instruction::OR_INT:
+ case Instruction::OR_INT_2ADDR:
+ case Instruction::XOR_INT:
+ case Instruction::XOR_INT_2ADDR:
+ if (rl_src[0].is_const &&
+ cu->cg->InexpensiveConstantInt(ConstantValue(cu, rl_src[0]))) {
+ cg->GenArithOpIntLit(cu, opcode, rl_dest, rl_src[1],
+ cu->constant_values[rl_src[0].orig_sreg]);
+ } else if (rl_src[1].is_const &&
+ cu->cg->InexpensiveConstantInt(ConstantValue(cu, rl_src[1]))) {
+ cg->GenArithOpIntLit(cu, opcode, rl_dest, rl_src[0],
+ cu->constant_values[rl_src[1].orig_sreg]);
+ } else {
+ cg->GenArithOpInt(cu, opcode, rl_dest, rl_src[0], rl_src[1]);
+ }
+ break;
+
+ case Instruction::SUB_INT:
+ case Instruction::SUB_INT_2ADDR:
+ case Instruction::DIV_INT:
+ case Instruction::DIV_INT_2ADDR:
+ case Instruction::REM_INT:
+ case Instruction::REM_INT_2ADDR:
+ case Instruction::SHL_INT:
+ case Instruction::SHL_INT_2ADDR:
+ case Instruction::SHR_INT:
+ case Instruction::SHR_INT_2ADDR:
+ case Instruction::USHR_INT:
+ case Instruction::USHR_INT_2ADDR:
+ if (rl_src[1].is_const &&
+ cu->cg->InexpensiveConstantInt(ConstantValue(cu, rl_src[1]))) {
+ cg->GenArithOpIntLit(cu, opcode, rl_dest, rl_src[0], ConstantValue(cu, rl_src[1]));
+ } else {
+ cg->GenArithOpInt(cu, opcode, rl_dest, rl_src[0], rl_src[1]);
+ }
+ break;
+
+ case Instruction::ADD_LONG:
+ case Instruction::SUB_LONG:
+ case Instruction::AND_LONG:
+ case Instruction::OR_LONG:
+ case Instruction::XOR_LONG:
+ case Instruction::ADD_LONG_2ADDR:
+ case Instruction::SUB_LONG_2ADDR:
+ case Instruction::AND_LONG_2ADDR:
+ case Instruction::OR_LONG_2ADDR:
+ case Instruction::XOR_LONG_2ADDR:
+ if (rl_src[0].is_const || rl_src[1].is_const) {
+ cg->GenArithImmOpLong(cu, opcode, rl_dest, rl_src[0], rl_src[1]);
+ break;
+ }
+ // Note: intentional fallthrough.
+
+ case Instruction::MUL_LONG:
+ case Instruction::DIV_LONG:
+ case Instruction::REM_LONG:
+ case Instruction::MUL_LONG_2ADDR:
+ case Instruction::DIV_LONG_2ADDR:
+ case Instruction::REM_LONG_2ADDR:
+ cg->GenArithOpLong(cu, opcode, rl_dest, rl_src[0], rl_src[1]);
+ break;
+
+ case Instruction::SHL_LONG:
+ case Instruction::SHR_LONG:
+ case Instruction::USHR_LONG:
+ case Instruction::SHL_LONG_2ADDR:
+ case Instruction::SHR_LONG_2ADDR:
+ case Instruction::USHR_LONG_2ADDR:
+ if (rl_src[1].is_const) {
+ cg->GenShiftImmOpLong(cu, opcode, rl_dest, rl_src[0], rl_src[1]);
+ } else {
+ cg->GenShiftOpLong(cu, opcode, rl_dest, rl_src[0], rl_src[1]);
+ }
+ break;
+
+ case Instruction::ADD_FLOAT:
+ case Instruction::SUB_FLOAT:
+ case Instruction::MUL_FLOAT:
+ case Instruction::DIV_FLOAT:
+ case Instruction::REM_FLOAT:
+ case Instruction::ADD_FLOAT_2ADDR:
+ case Instruction::SUB_FLOAT_2ADDR:
+ case Instruction::MUL_FLOAT_2ADDR:
+ case Instruction::DIV_FLOAT_2ADDR:
+ case Instruction::REM_FLOAT_2ADDR:
+ cg->GenArithOpFloat(cu, opcode, rl_dest, rl_src[0], rl_src[1]);
+ break;
+
+ case Instruction::ADD_DOUBLE:
+ case Instruction::SUB_DOUBLE:
+ case Instruction::MUL_DOUBLE:
+ case Instruction::DIV_DOUBLE:
+ case Instruction::REM_DOUBLE:
+ case Instruction::ADD_DOUBLE_2ADDR:
+ case Instruction::SUB_DOUBLE_2ADDR:
+ case Instruction::MUL_DOUBLE_2ADDR:
+ case Instruction::DIV_DOUBLE_2ADDR:
+ case Instruction::REM_DOUBLE_2ADDR:
+ cg->GenArithOpDouble(cu, opcode, rl_dest, rl_src[0], rl_src[1]);
+ break;
+
+ case Instruction::RSUB_INT:
+ case Instruction::ADD_INT_LIT16:
+ case Instruction::MUL_INT_LIT16:
+ case Instruction::DIV_INT_LIT16:
+ case Instruction::REM_INT_LIT16:
+ case Instruction::AND_INT_LIT16:
+ case Instruction::OR_INT_LIT16:
+ case Instruction::XOR_INT_LIT16:
+ case Instruction::ADD_INT_LIT8:
+ case Instruction::RSUB_INT_LIT8:
+ case Instruction::MUL_INT_LIT8:
+ case Instruction::DIV_INT_LIT8:
+ case Instruction::REM_INT_LIT8:
+ case Instruction::AND_INT_LIT8:
+ case Instruction::OR_INT_LIT8:
+ case Instruction::XOR_INT_LIT8:
+ case Instruction::SHL_INT_LIT8:
+ case Instruction::SHR_INT_LIT8:
+ case Instruction::USHR_INT_LIT8:
+ cg->GenArithOpIntLit(cu, opcode, rl_dest, rl_src[0], vC);
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected opcode: " << opcode;
+ }
+}
+
+// Process extended MIR instructions
+static void HandleExtendedMethodMIR(CompilationUnit* cu, BasicBlock* bb, MIR* mir)
+{
+ Codegen* cg = cu->cg.get();
+ switch (static_cast<ExtendedMIROpcode>(mir->dalvikInsn.opcode)) {
+ case kMirOpCopy: {
+ RegLocation rl_src = GetSrc(cu, mir, 0);
+ RegLocation rl_dest = GetDest(cu, mir);
+ cg->StoreValue(cu, rl_dest, rl_src);
+ break;
+ }
+ case kMirOpFusedCmplFloat:
+ cg->GenFusedFPCmpBranch(cu, bb, mir, false /*gt bias*/, false /*double*/);
+ break;
+ case kMirOpFusedCmpgFloat:
+ cg->GenFusedFPCmpBranch(cu, bb, mir, true /*gt bias*/, false /*double*/);
+ break;
+ case kMirOpFusedCmplDouble:
+ cg->GenFusedFPCmpBranch(cu, bb, mir, false /*gt bias*/, true /*double*/);
+ break;
+ case kMirOpFusedCmpgDouble:
+ cg->GenFusedFPCmpBranch(cu, bb, mir, true /*gt bias*/, true /*double*/);
+ break;
+ case kMirOpFusedCmpLong:
+ cg->GenFusedLongCmpBranch(cu, bb, mir);
+ break;
+ case kMirOpSelect:
+ cg->GenSelect(cu, bb, mir);
+ break;
+ default:
+ break;
+ }
+}
+
+// Handle the content in each basic block.
+static bool MethodBlockCodeGen(CompilationUnit* cu, BasicBlock* bb)
+{
+ if (bb->block_type == kDead) return false;
+ Codegen* cg = cu->cg.get();
+ cu->current_dalvik_offset = bb->start_offset;
+ MIR* mir;
+ LIR* label_list = cu->block_label_list;
+ int block_id = bb->id;
+
+ cu->cur_block = bb;
+ label_list[block_id].operands[0] = bb->start_offset;
+
+ // Insert the block label.
+ label_list[block_id].opcode = kPseudoNormalBlockLabel;
+ AppendLIR(cu, &label_list[block_id]);
+
+ LIR* head_lir = NULL;
+
+ // If this is a catch block, export the start address.
+ if (bb->catch_entry) {
+ head_lir = NewLIR0(cu, kPseudoExportedPC);
+ }
+
+ // Free temp registers and reset redundant store tracking.
+ ResetRegPool(cu);
+ ResetDefTracking(cu);
+
+ ClobberAllRegs(cu);
+
+ if (bb->block_type == kEntryBlock) {
+ int start_vreg = cu->num_dalvik_registers - cu->num_ins;
+ cg->GenEntrySequence(cu, &cu->reg_location[start_vreg],
+ cu->reg_location[cu->method_sreg]);
+ } else if (bb->block_type == kExitBlock) {
+ cg->GenExitSequence(cu);
+ }
+
+ for (mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+ ResetRegPool(cu);
+ if (cu->disable_opt & (1 << kTrackLiveTemps)) {
+ ClobberAllRegs(cu);
+ }
+
+ if (cu->disable_opt & (1 << kSuppressLoads)) {
+ ResetDefTracking(cu);
+ }
+
+#ifndef NDEBUG
+ // Reset temp tracking sanity check.
+ cu->live_sreg = INVALID_SREG;
+#endif
+
+ cu->current_dalvik_offset = mir->offset;
+ int opcode = mir->dalvikInsn.opcode;
+ LIR* boundary_lir;
+
+ // Mark the beginning of a Dalvik instruction for line tracking.
+ char* inst_str = cu->verbose ?
+ GetDalvikDisassembly(cu, mir) : NULL;
+ boundary_lir = MarkBoundary(cu, mir->offset, inst_str);
+ // Remember the first LIR for this block.
+ if (head_lir == NULL) {
+ head_lir = boundary_lir;
+ // Set the first boundary_lir as a scheduling barrier.
+ head_lir->def_mask = ENCODE_ALL;
+ }
+
+ if (opcode == kMirOpCheck) {
+ // Combine check and work halves of throwing instruction.
+ MIR* work_half = mir->meta.throw_insn;
+ mir->dalvikInsn.opcode = work_half->dalvikInsn.opcode;
+ opcode = work_half->dalvikInsn.opcode;
+ SSARepresentation* ssa_rep = work_half->ssa_rep;
+ work_half->ssa_rep = mir->ssa_rep;
+ mir->ssa_rep = ssa_rep;
+ work_half->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpCheckPart2);
+ }
+
+ if (opcode >= kMirOpFirst) {
+ HandleExtendedMethodMIR(cu, bb, mir);
+ continue;
+ }
+
+ CompileDalvikInstruction(cu, mir, bb, label_list);
+ }
+
+ if (head_lir) {
+ // Eliminate redundant loads/stores and delay stores into later slots.
+ ApplyLocalOptimizations(cu, head_lir, cu->last_lir_insn);
+
+ // Generate an unconditional branch to the fallthrough block.
+ if (bb->fall_through) {
+ cg->OpUnconditionalBranch(cu, &label_list[bb->fall_through->id]);
+ }
+ }
+ return false;
+}
+
+void SpecialMIR2LIR(CompilationUnit* cu, SpecialCaseHandler special_case)
+{
+ Codegen* cg = cu->cg.get();
+ // Find the first DalvikByteCode block.
+ int num_reachable_blocks = cu->num_reachable_blocks;
+ const GrowableList *block_list = &cu->block_list;
+ BasicBlock*bb = NULL;
+ for (int idx = 0; idx < num_reachable_blocks; idx++) {
+ int dfs_index = cu->dfs_order.elem_list[idx];
+ bb = reinterpret_cast<BasicBlock*>(GrowableListGetElement(block_list, dfs_index));
+ if (bb->block_type == kDalvikByteCode) {
+ break;
+ }
+ }
+ if (bb == NULL) {
+ return;
+ }
+ DCHECK_EQ(bb->start_offset, 0);
+ DCHECK(bb->first_mir_insn != NULL);
+
+ // Get the first instruction.
+ MIR* mir = bb->first_mir_insn;
+
+ // Free temp registers and reset redundant store tracking.
+ ResetRegPool(cu);
+ ResetDefTracking(cu);
+ ClobberAllRegs(cu);
+
+ cg->GenSpecialCase(cu, bb, mir, special_case);
+}
+
+void MethodMIR2LIR(CompilationUnit* cu)
+{
+ Codegen* cg = cu->cg.get();
+ // Hold the labels of each block.
+ cu->block_label_list =
+ static_cast<LIR*>(NewMem(cu, sizeof(LIR) * cu->num_blocks, true, kAllocLIR));
+
+ DataFlowAnalysisDispatcher(cu, MethodBlockCodeGen,
+ kPreOrderDFSTraversal, false /* Iterative */);
+
+ cg->HandleSuspendLaunchPads(cu);
+
+ cg->HandleThrowLaunchPads(cu);
+
+ cg->HandleIntrinsicLaunchPads(cu);
+
+ if (!(cu->disable_opt & (1 << kSafeOptimizations))) {
+ RemoveRedundantBranches(cu);
+ }
+}
+
+} // namespace art
diff --git a/src/compiler/dex/quick/mir_to_lir.h b/src/compiler/dex/quick/mir_to_lir.h
new file mode 100644
index 0000000..b2767ba
--- /dev/null
+++ b/src/compiler/dex/quick/mir_to_lir.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_COMPILER_DEX_QUICK_MIRTOLIR_H_
+#define ART_SRC_COMPILER_DEX_QUICK_MIRTOLIR_H_
+
+namespace art {
+void SpecialMIR2LIR(CompilationUnit* cu, SpecialCaseHandler special_case);
+void MethodMIR2LIR(CompilationUnit* cu);
+
+
+} // namespace art
+
+#endif // ART_SRC_COMPILER_DEX_QUICK_MIRTOLIR_H_
diff --git a/src/compiler/dex/quick/ralloc_util.cc b/src/compiler/dex/quick/ralloc_util.cc
new file mode 100644
index 0000000..a782264
--- /dev/null
+++ b/src/compiler/dex/quick/ralloc_util.cc
@@ -0,0 +1,1334 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* This file contains register alloction support. */
+
+#include "codegen_util.h"
+#include "compiler/dex/compiler_ir.h"
+#include "compiler/dex/compiler_utility.h"
+#include "compiler/dex/dataflow.h"
+#include "ralloc_util.h"
+
+namespace art {
+
+static const RegLocation bad_loc = {kLocDalvikFrame, 0, 0, 0, 0, 0, 0, 0, 0,
+ INVALID_REG, INVALID_REG, INVALID_SREG,
+ INVALID_SREG};
+
+/*
+ * Free all allocated temps in the temp pools. Note that this does
+ * not affect the "liveness" of a temp register, which will stay
+ * live until it is either explicitly killed or reallocated.
+ */
+void ResetRegPool(CompilationUnit* cu)
+{
+ int i;
+ for (i=0; i < cu->reg_pool->num_core_regs; i++) {
+ if (cu->reg_pool->core_regs[i].is_temp)
+ cu->reg_pool->core_regs[i].in_use = false;
+ }
+ for (i=0; i < cu->reg_pool->num_fp_regs; i++) {
+ if (cu->reg_pool->FPRegs[i].is_temp)
+ cu->reg_pool->FPRegs[i].in_use = false;
+ }
+}
+
+ /*
+ * Set up temp & preserved register pools specialized by target.
+ * Note: num_regs may be zero.
+ */
+void CompilerInitPool(RegisterInfo* regs, int* reg_nums, int num)
+{
+ int i;
+ for (i=0; i < num; i++) {
+ regs[i].reg = reg_nums[i];
+ regs[i].in_use = false;
+ regs[i].is_temp = false;
+ regs[i].pair = false;
+ regs[i].live = false;
+ regs[i].dirty = false;
+ regs[i].s_reg = INVALID_SREG;
+ }
+}
+
+void DumpRegPool(RegisterInfo* p, int num_regs)
+{
+ LOG(INFO) << "================================================";
+ for (int i = 0; i < num_regs; i++) {
+ LOG(INFO) << StringPrintf(
+ "R[%d]: T:%d, U:%d, P:%d, p:%d, LV:%d, D:%d, SR:%d, ST:%x, EN:%x",
+ p[i].reg, p[i].is_temp, p[i].in_use, p[i].pair, p[i].partner,
+ p[i].live, p[i].dirty, p[i].s_reg, reinterpret_cast<uintptr_t>(p[i].def_start),
+ reinterpret_cast<uintptr_t>(p[i].def_end));
+ }
+ LOG(INFO) << "================================================";
+}
+
+void DumpCoreRegPool(CompilationUnit* cu)
+{
+ DumpRegPool(cu->reg_pool->core_regs, cu->reg_pool->num_core_regs);
+}
+
+void DumpFpRegPool(CompilationUnit* cu)
+{
+ DumpRegPool(cu->reg_pool->FPRegs, cu->reg_pool->num_fp_regs);
+}
+
+/* Mark a temp register as dead. Does not affect allocation state. */
+static void ClobberBody(CompilationUnit *cu, RegisterInfo* p)
+{
+ if (p->is_temp) {
+ DCHECK(!(p->live && p->dirty)) << "Live & dirty temp in clobber";
+ p->live = false;
+ p->s_reg = INVALID_SREG;
+ p->def_start = NULL;
+ p->def_end = NULL;
+ if (p->pair) {
+ p->pair = false;
+ Clobber(cu, p->partner);
+ }
+ }
+}
+
+/* Mark a temp register as dead. Does not affect allocation state. */
+void Clobber(CompilationUnit* cu, int reg)
+{
+ Codegen* cg = cu->cg.get();
+ ClobberBody(cu, cg->GetRegInfo(cu, reg));
+}
+
+static void ClobberSRegBody(RegisterInfo* p, int num_regs, int s_reg)
+{
+ int i;
+ for (i=0; i< num_regs; i++) {
+ if (p[i].s_reg == s_reg) {
+ if (p[i].is_temp) {
+ p[i].live = false;
+ }
+ p[i].def_start = NULL;
+ p[i].def_end = NULL;
+ }
+ }
+}
+
+/*
+ * Break the association between a Dalvik vreg and a physical temp register of either register
+ * class.
+ * TODO: Ideally, the public version of this code should not exist. Besides its local usage
+ * in the register utilities, is is also used by code gen routines to work around a deficiency in
+ * local register allocation, which fails to distinguish between the "in" and "out" identities
+ * of Dalvik vregs. This can result in useless register copies when the same Dalvik vreg
+ * is used both as the source and destination register of an operation in which the type
+ * changes (for example: INT_TO_FLOAT v1, v1). Revisit when improved register allocation is
+ * addressed.
+ */
+void ClobberSReg(CompilationUnit* cu, int s_reg)
+{
+#ifndef NDEBUG
+ /* Reset live temp tracking sanity checker */
+ if (s_reg == cu->live_sreg) {
+ cu->live_sreg = INVALID_SREG;
+ }
+#endif
+ ClobberSRegBody(cu->reg_pool->core_regs, cu->reg_pool->num_core_regs, s_reg);
+ ClobberSRegBody(cu->reg_pool->FPRegs, cu->reg_pool->num_fp_regs, s_reg);
+}
+
+/*
+ * SSA names associated with the initial definitions of Dalvik
+ * registers are the same as the Dalvik register number (and
+ * thus take the same position in the promotion_map. However,
+ * the special Method* and compiler temp resisters use negative
+ * v_reg numbers to distinguish them and can have an arbitrary
+ * ssa name (above the last original Dalvik register). This function
+ * maps SSA names to positions in the promotion_map array.
+ */
+int SRegToPMap(CompilationUnit* cu, int s_reg)
+{
+ DCHECK_LT(s_reg, cu->num_ssa_regs);
+ DCHECK_GE(s_reg, 0);
+ int v_reg = SRegToVReg(cu, s_reg);
+ if (v_reg >= 0) {
+ DCHECK_LT(v_reg, cu->num_dalvik_registers);
+ return v_reg;
+ } else {
+ int pos = std::abs(v_reg) - std::abs(SSA_METHOD_BASEREG);
+ DCHECK_LE(pos, cu->num_compiler_temps);
+ return cu->num_dalvik_registers + pos;
+ }
+}
+
+void RecordCorePromotion(CompilationUnit* cu, int reg, int s_reg)
+{
+ Codegen* cg = cu->cg.get();
+ int p_map_idx = SRegToPMap(cu, s_reg);
+ int v_reg = SRegToVReg(cu, s_reg);
+ cg->GetRegInfo(cu, reg)->in_use = true;
+ cu->core_spill_mask |= (1 << reg);
+ // Include reg for later sort
+ cu->core_vmap_table.push_back(reg << VREG_NUM_WIDTH |
+ (v_reg & ((1 << VREG_NUM_WIDTH) - 1)));
+ cu->num_core_spills++;
+ cu->promotion_map[p_map_idx].core_location = kLocPhysReg;
+ cu->promotion_map[p_map_idx].core_reg = reg;
+}
+
+/* Reserve a callee-save register. Return -1 if none available */
+static int AllocPreservedCoreReg(CompilationUnit* cu, int s_reg)
+{
+ int res = -1;
+ RegisterInfo* core_regs = cu->reg_pool->core_regs;
+ for (int i = 0; i < cu->reg_pool->num_core_regs; i++) {
+ if (!core_regs[i].is_temp && !core_regs[i].in_use) {
+ res = core_regs[i].reg;
+ RecordCorePromotion(cu, res, s_reg);
+ break;
+ }
+ }
+ return res;
+}
+
+void RecordFpPromotion(CompilationUnit* cu, int reg, int s_reg)
+{
+ Codegen* cg = cu->cg.get();
+ int p_map_idx = SRegToPMap(cu, s_reg);
+ int v_reg = SRegToVReg(cu, s_reg);
+ cg->GetRegInfo(cu, reg)->in_use = true;
+ cg->MarkPreservedSingle(cu, v_reg, reg);
+ cu->promotion_map[p_map_idx].fp_location = kLocPhysReg;
+ cu->promotion_map[p_map_idx].FpReg = reg;
+}
+
+/*
+ * Reserve a callee-save fp single register. Try to fullfill request for
+ * even/odd allocation, but go ahead and allocate anything if not
+ * available. If nothing's available, return -1.
+ */
+static int AllocPreservedSingle(CompilationUnit* cu, int s_reg, bool even)
+{
+ int res = -1;
+ RegisterInfo* FPRegs = cu->reg_pool->FPRegs;
+ for (int i = 0; i < cu->reg_pool->num_fp_regs; i++) {
+ if (!FPRegs[i].is_temp && !FPRegs[i].in_use &&
+ ((FPRegs[i].reg & 0x1) == 0) == even) {
+ res = FPRegs[i].reg;
+ RecordFpPromotion(cu, res, s_reg);
+ break;
+ }
+ }
+ return res;
+}
+
+/*
+ * Somewhat messy code here. We want to allocate a pair of contiguous
+ * physical single-precision floating point registers starting with
+ * an even numbered reg. It is possible that the paired s_reg (s_reg+1)
+ * has already been allocated - try to fit if possible. Fail to
+ * allocate if we can't meet the requirements for the pair of
+ * s_reg<=sX[even] & (s_reg+1)<= sX+1.
+ */
+static int AllocPreservedDouble(CompilationUnit* cu, int s_reg)
+{
+ Codegen* cg = cu->cg.get();
+ int res = -1; // Assume failure
+ int v_reg = SRegToVReg(cu, s_reg);
+ int p_map_idx = SRegToPMap(cu, s_reg);
+ if (cu->promotion_map[p_map_idx+1].fp_location == kLocPhysReg) {
+ // Upper reg is already allocated. Can we fit?
+ int high_reg = cu->promotion_map[p_map_idx+1].FpReg;
+ if ((high_reg & 1) == 0) {
+ // High reg is even - fail.
+ return res;
+ }
+ // Is the low reg of the pair free?
+ RegisterInfo* p = cg->GetRegInfo(cu, high_reg-1);
+ if (p->in_use || p->is_temp) {
+ // Already allocated or not preserved - fail.
+ return res;
+ }
+ // OK - good to go.
+ res = p->reg;
+ p->in_use = true;
+ DCHECK_EQ((res & 1), 0);
+ cg->MarkPreservedSingle(cu, v_reg, res);
+ } else {
+ RegisterInfo* FPRegs = cu->reg_pool->FPRegs;
+ for (int i = 0; i < cu->reg_pool->num_fp_regs; i++) {
+ if (!FPRegs[i].is_temp && !FPRegs[i].in_use &&
+ ((FPRegs[i].reg & 0x1) == 0x0) &&
+ !FPRegs[i+1].is_temp && !FPRegs[i+1].in_use &&
+ ((FPRegs[i+1].reg & 0x1) == 0x1) &&
+ (FPRegs[i].reg + 1) == FPRegs[i+1].reg) {
+ res = FPRegs[i].reg;
+ FPRegs[i].in_use = true;
+ cg->MarkPreservedSingle(cu, v_reg, res);
+ FPRegs[i+1].in_use = true;
+ DCHECK_EQ(res + 1, FPRegs[i+1].reg);
+ cg->MarkPreservedSingle(cu, v_reg+1, res+1);
+ break;
+ }
+ }
+ }
+ if (res != -1) {
+ cu->promotion_map[p_map_idx].fp_location = kLocPhysReg;
+ cu->promotion_map[p_map_idx].FpReg = res;
+ cu->promotion_map[p_map_idx+1].fp_location = kLocPhysReg;
+ cu->promotion_map[p_map_idx+1].FpReg = res + 1;
+ }
+ return res;
+}
+
+
+/*
+ * Reserve a callee-save fp register. If this register can be used
+ * as the first of a double, attempt to allocate an even pair of fp
+ * single regs (but if can't still attempt to allocate a single, preferring
+ * first to allocate an odd register.
+ */
+static int AllocPreservedFPReg(CompilationUnit* cu, int s_reg, bool double_start)
+{
+ int res = -1;
+ if (double_start) {
+ res = AllocPreservedDouble(cu, s_reg);
+ }
+ if (res == -1) {
+ res = AllocPreservedSingle(cu, s_reg, false /* try odd # */);
+ }
+ if (res == -1)
+ res = AllocPreservedSingle(cu, s_reg, true /* try even # */);
+ return res;
+}
+
+static int AllocTempBody(CompilationUnit* cu, RegisterInfo* p, int num_regs, int* next_temp,
+ bool required)
+{
+ int i;
+ int next = *next_temp;
+ for (i=0; i< num_regs; i++) {
+ if (next >= num_regs)
+ next = 0;
+ if (p[next].is_temp && !p[next].in_use && !p[next].live) {
+ Clobber(cu, p[next].reg);
+ p[next].in_use = true;
+ p[next].pair = false;
+ *next_temp = next + 1;
+ return p[next].reg;
+ }
+ next++;
+ }
+ next = *next_temp;
+ for (i=0; i< num_regs; i++) {
+ if (next >= num_regs)
+ next = 0;
+ if (p[next].is_temp && !p[next].in_use) {
+ Clobber(cu, p[next].reg);
+ p[next].in_use = true;
+ p[next].pair = false;
+ *next_temp = next + 1;
+ return p[next].reg;
+ }
+ next++;
+ }
+ if (required) {
+ CodegenDump(cu);
+ DumpRegPool(cu->reg_pool->core_regs,
+ cu->reg_pool->num_core_regs);
+ LOG(FATAL) << "No free temp registers";
+ }
+ return -1; // No register available
+}
+
+//REDO: too many assumptions.
+int AllocTempDouble(CompilationUnit* cu)
+{
+ RegisterInfo* p = cu->reg_pool->FPRegs;
+ int num_regs = cu->reg_pool->num_fp_regs;
+ /* Start looking at an even reg */
+ int next = cu->reg_pool->next_fp_reg & ~0x1;
+
+ // First try to avoid allocating live registers
+ for (int i=0; i < num_regs; i+=2) {
+ if (next >= num_regs)
+ next = 0;
+ if ((p[next].is_temp && !p[next].in_use && !p[next].live) &&
+ (p[next+1].is_temp && !p[next+1].in_use && !p[next+1].live)) {
+ Clobber(cu, p[next].reg);
+ Clobber(cu, p[next+1].reg);
+ p[next].in_use = true;
+ p[next+1].in_use = true;
+ DCHECK_EQ((p[next].reg+1), p[next+1].reg);
+ DCHECK_EQ((p[next].reg & 0x1), 0);
+ cu->reg_pool->next_fp_reg = next + 2;
+ if (cu->reg_pool->next_fp_reg >= num_regs) {
+ cu->reg_pool->next_fp_reg = 0;
+ }
+ return p[next].reg;
+ }
+ next += 2;
+ }
+ next = cu->reg_pool->next_fp_reg & ~0x1;
+
+ // No choice - find a pair and kill it.
+ for (int i=0; i < num_regs; i+=2) {
+ if (next >= num_regs)
+ next = 0;
+ if (p[next].is_temp && !p[next].in_use && p[next+1].is_temp &&
+ !p[next+1].in_use) {
+ Clobber(cu, p[next].reg);
+ Clobber(cu, p[next+1].reg);
+ p[next].in_use = true;
+ p[next+1].in_use = true;
+ DCHECK_EQ((p[next].reg+1), p[next+1].reg);
+ DCHECK_EQ((p[next].reg & 0x1), 0);
+ cu->reg_pool->next_fp_reg = next + 2;
+ if (cu->reg_pool->next_fp_reg >= num_regs) {
+ cu->reg_pool->next_fp_reg = 0;
+ }
+ return p[next].reg;
+ }
+ next += 2;
+ }
+ LOG(FATAL) << "No free temp registers (pair)";
+ return -1;
+}
+
+/* Return a temp if one is available, -1 otherwise */
+int AllocFreeTemp(CompilationUnit* cu)
+{
+ return AllocTempBody(cu, cu->reg_pool->core_regs,
+ cu->reg_pool->num_core_regs,
+ &cu->reg_pool->next_core_reg, true);
+}
+
+int AllocTemp(CompilationUnit* cu)
+{
+ return AllocTempBody(cu, cu->reg_pool->core_regs,
+ cu->reg_pool->num_core_regs,
+ &cu->reg_pool->next_core_reg, true);
+}
+
+int AllocTempFloat(CompilationUnit* cu)
+{
+ return AllocTempBody(cu, cu->reg_pool->FPRegs,
+ cu->reg_pool->num_fp_regs,
+ &cu->reg_pool->next_fp_reg, true);
+}
+
+static RegisterInfo* AllocLiveBody(RegisterInfo* p, int num_regs, int s_reg)
+{
+ int i;
+ if (s_reg == -1)
+ return NULL;
+ for (i=0; i < num_regs; i++) {
+ if (p[i].live && (p[i].s_reg == s_reg)) {
+ if (p[i].is_temp)
+ p[i].in_use = true;
+ return &p[i];
+ }
+ }
+ return NULL;
+}
+
+RegisterInfo* AllocLive(CompilationUnit* cu, int s_reg, int reg_class)
+{
+ RegisterInfo* res = NULL;
+ switch (reg_class) {
+ case kAnyReg:
+ res = AllocLiveBody(cu->reg_pool->FPRegs,
+ cu->reg_pool->num_fp_regs, s_reg);
+ if (res)
+ break;
+ /* Intentional fallthrough */
+ case kCoreReg:
+ res = AllocLiveBody(cu->reg_pool->core_regs,
+ cu->reg_pool->num_core_regs, s_reg);
+ break;
+ case kFPReg:
+ res = AllocLiveBody(cu->reg_pool->FPRegs,
+ cu->reg_pool->num_fp_regs, s_reg);
+ break;
+ default:
+ LOG(FATAL) << "Invalid register type";
+ }
+ return res;
+}
+
+void FreeTemp(CompilationUnit* cu, int reg)
+{
+ RegisterInfo* p = cu->reg_pool->core_regs;
+ int num_regs = cu->reg_pool->num_core_regs;
+ int i;
+ for (i=0; i< num_regs; i++) {
+ if (p[i].reg == reg) {
+ if (p[i].is_temp) {
+ p[i].in_use = false;
+ }
+ p[i].pair = false;
+ return;
+ }
+ }
+ p = cu->reg_pool->FPRegs;
+ num_regs = cu->reg_pool->num_fp_regs;
+ for (i=0; i< num_regs; i++) {
+ if (p[i].reg == reg) {
+ if (p[i].is_temp) {
+ p[i].in_use = false;
+ }
+ p[i].pair = false;
+ return;
+ }
+ }
+ LOG(FATAL) << "Tried to free a non-existant temp: r" << reg;
+}
+
+RegisterInfo* IsLive(CompilationUnit* cu, int reg)
+{
+ RegisterInfo* p = cu->reg_pool->core_regs;
+ int num_regs = cu->reg_pool->num_core_regs;
+ int i;
+ for (i=0; i< num_regs; i++) {
+ if (p[i].reg == reg) {
+ return p[i].live ? &p[i] : NULL;
+ }
+ }
+ p = cu->reg_pool->FPRegs;
+ num_regs = cu->reg_pool->num_fp_regs;
+ for (i=0; i< num_regs; i++) {
+ if (p[i].reg == reg) {
+ return p[i].live ? &p[i] : NULL;
+ }
+ }
+ return NULL;
+}
+
+RegisterInfo* IsTemp(CompilationUnit* cu, int reg)
+{
+ Codegen* cg = cu->cg.get();
+ RegisterInfo* p = cg->GetRegInfo(cu, reg);
+ return (p->is_temp) ? p : NULL;
+}
+
+RegisterInfo* IsPromoted(CompilationUnit* cu, int reg)
+{
+ Codegen* cg = cu->cg.get();
+ RegisterInfo* p = cg->GetRegInfo(cu, reg);
+ return (p->is_temp) ? NULL : p;
+}
+
+bool IsDirty(CompilationUnit* cu, int reg)
+{
+ Codegen* cg = cu->cg.get();
+ RegisterInfo* p = cg->GetRegInfo(cu, reg);
+ return p->dirty;
+}
+
+/*
+ * Similar to AllocTemp(), but forces the allocation of a specific
+ * register. No check is made to see if the register was previously
+ * allocated. Use with caution.
+ */
+void LockTemp(CompilationUnit* cu, int reg)
+{
+ RegisterInfo* p = cu->reg_pool->core_regs;
+ int num_regs = cu->reg_pool->num_core_regs;
+ int i;
+ for (i=0; i< num_regs; i++) {
+ if (p[i].reg == reg) {
+ DCHECK(p[i].is_temp);
+ p[i].in_use = true;
+ p[i].live = false;
+ return;
+ }
+ }
+ p = cu->reg_pool->FPRegs;
+ num_regs = cu->reg_pool->num_fp_regs;
+ for (i=0; i< num_regs; i++) {
+ if (p[i].reg == reg) {
+ DCHECK(p[i].is_temp);
+ p[i].in_use = true;
+ p[i].live = false;
+ return;
+ }
+ }
+ LOG(FATAL) << "Tried to lock a non-existant temp: r" << reg;
+}
+
+static void ResetDefBody(RegisterInfo* p)
+{
+ p->def_start = NULL;
+ p->def_end = NULL;
+}
+
+void ResetDef(CompilationUnit* cu, int reg)
+{
+ Codegen* cg = cu->cg.get();
+ ResetDefBody(cg->GetRegInfo(cu, reg));
+}
+
+static void NullifyRange(CompilationUnit* cu, LIR *start, LIR *finish, int s_reg1, int s_reg2)
+{
+ if (start && finish) {
+ LIR *p;
+ DCHECK_EQ(s_reg1, s_reg2);
+ for (p = start; ;p = p->next) {
+ NopLIR(p);
+ if (p == finish)
+ break;
+ }
+ }
+}
+
+/*
+ * Mark the beginning and end LIR of a def sequence. Note that
+ * on entry start points to the LIR prior to the beginning of the
+ * sequence.
+ */
+void MarkDef(CompilationUnit* cu, RegLocation rl,
+ LIR *start, LIR *finish)
+{
+ DCHECK(!rl.wide);
+ DCHECK(start && start->next);
+ DCHECK(finish);
+ Codegen* cg = cu->cg.get();
+ RegisterInfo* p = cg->GetRegInfo(cu, rl.low_reg);
+ p->def_start = start->next;
+ p->def_end = finish;
+}
+
+/*
+ * Mark the beginning and end LIR of a def sequence. Note that
+ * on entry start points to the LIR prior to the beginning of the
+ * sequence.
+ */
+void MarkDefWide(CompilationUnit* cu, RegLocation rl,
+ LIR *start, LIR *finish)
+{
+ DCHECK(rl.wide);
+ DCHECK(start && start->next);
+ DCHECK(finish);
+ Codegen* cg = cu->cg.get();
+ RegisterInfo* p = cg->GetRegInfo(cu, rl.low_reg);
+ ResetDef(cu, rl.high_reg); // Only track low of pair
+ p->def_start = start->next;
+ p->def_end = finish;
+}
+
+RegLocation WideToNarrow(CompilationUnit* cu, RegLocation rl)
+{
+ DCHECK(rl.wide);
+ Codegen* cg = cu->cg.get();
+ if (rl.location == kLocPhysReg) {
+ RegisterInfo* info_lo = cg->GetRegInfo(cu, rl.low_reg);
+ RegisterInfo* info_hi = cg->GetRegInfo(cu, rl.high_reg);
+ if (info_lo->is_temp) {
+ info_lo->pair = false;
+ info_lo->def_start = NULL;
+ info_lo->def_end = NULL;
+ }
+ if (info_hi->is_temp) {
+ info_hi->pair = false;
+ info_hi->def_start = NULL;
+ info_hi->def_end = NULL;
+ }
+ }
+ rl.wide = false;
+ return rl;
+}
+
+void ResetDefLoc(CompilationUnit* cu, RegLocation rl)
+{
+ DCHECK(!rl.wide);
+ RegisterInfo* p = IsTemp(cu, rl.low_reg);
+ if (p && !(cu->disable_opt & (1 << kSuppressLoads))) {
+ DCHECK(!p->pair);
+ NullifyRange(cu, p->def_start, p->def_end, p->s_reg, rl.s_reg_low);
+ }
+ ResetDef(cu, rl.low_reg);
+}
+
+void ResetDefLocWide(CompilationUnit* cu, RegLocation rl)
+{
+ DCHECK(rl.wide);
+ RegisterInfo* p_low = IsTemp(cu, rl.low_reg);
+ RegisterInfo* p_high = IsTemp(cu, rl.high_reg);
+ if (p_low && !(cu->disable_opt & (1 << kSuppressLoads))) {
+ DCHECK(p_low->pair);
+ NullifyRange(cu, p_low->def_start, p_low->def_end, p_low->s_reg, rl.s_reg_low);
+ }
+ if (p_high && !(cu->disable_opt & (1 << kSuppressLoads))) {
+ DCHECK(p_high->pair);
+ }
+ ResetDef(cu, rl.low_reg);
+ ResetDef(cu, rl.high_reg);
+}
+
+void ResetDefTracking(CompilationUnit* cu)
+{
+ int i;
+ for (i=0; i< cu->reg_pool->num_core_regs; i++) {
+ ResetDefBody(&cu->reg_pool->core_regs[i]);
+ }
+ for (i=0; i< cu->reg_pool->num_fp_regs; i++) {
+ ResetDefBody(&cu->reg_pool->FPRegs[i]);
+ }
+}
+
+void ClobberAllRegs(CompilationUnit* cu)
+{
+ int i;
+ for (i=0; i< cu->reg_pool->num_core_regs; i++) {
+ ClobberBody(cu, &cu->reg_pool->core_regs[i]);
+ }
+ for (i=0; i< cu->reg_pool->num_fp_regs; i++) {
+ ClobberBody(cu, &cu->reg_pool->FPRegs[i]);
+ }
+}
+
+// Make sure nothing is live and dirty
+static void FlushAllRegsBody(CompilationUnit* cu, RegisterInfo* info, int num_regs)
+{
+ Codegen* cg = cu->cg.get();
+ int i;
+ for (i=0; i < num_regs; i++) {
+ if (info[i].live && info[i].dirty) {
+ if (info[i].pair) {
+ cg->FlushRegWide(cu, info[i].reg, info[i].partner);
+ } else {
+ cg->FlushReg(cu, info[i].reg);
+ }
+ }
+ }
+}
+
+void FlushAllRegs(CompilationUnit* cu)
+{
+ FlushAllRegsBody(cu, cu->reg_pool->core_regs,
+ cu->reg_pool->num_core_regs);
+ FlushAllRegsBody(cu, cu->reg_pool->FPRegs,
+ cu->reg_pool->num_fp_regs);
+ ClobberAllRegs(cu);
+}
+
+
+//TUNING: rewrite all of this reg stuff. Probably use an attribute table
+static bool RegClassMatches(CompilationUnit* cu, int reg_class, int reg)
+{
+ Codegen* cg = cu->cg.get();
+ if (reg_class == kAnyReg) {
+ return true;
+ } else if (reg_class == kCoreReg) {
+ return !cg->IsFpReg(reg);
+ } else {
+ return cg->IsFpReg(reg);
+ }
+}
+
+void MarkLive(CompilationUnit* cu, int reg, int s_reg)
+{
+ Codegen* cg = cu->cg.get();
+ RegisterInfo* info = cg->GetRegInfo(cu, reg);
+ if ((info->reg == reg) && (info->s_reg == s_reg) && info->live) {
+ return; /* already live */
+ } else if (s_reg != INVALID_SREG) {
+ ClobberSReg(cu, s_reg);
+ if (info->is_temp) {
+ info->live = true;
+ }
+ } else {
+ /* Can't be live if no associated s_reg */
+ DCHECK(info->is_temp);
+ info->live = false;
+ }
+ info->s_reg = s_reg;
+}
+
+void MarkTemp(CompilationUnit* cu, int reg)
+{
+ Codegen* cg = cu->cg.get();
+ RegisterInfo* info = cg->GetRegInfo(cu, reg);
+ info->is_temp = true;
+}
+
+void UnmarkTemp(CompilationUnit* cu, int reg)
+{
+ Codegen* cg = cu->cg.get();
+ RegisterInfo* info = cg->GetRegInfo(cu, reg);
+ info->is_temp = false;
+}
+
+void MarkPair(CompilationUnit* cu, int low_reg, int high_reg)
+{
+ Codegen* cg = cu->cg.get();
+ RegisterInfo* info_lo = cg->GetRegInfo(cu, low_reg);
+ RegisterInfo* info_hi = cg->GetRegInfo(cu, high_reg);
+ info_lo->pair = info_hi->pair = true;
+ info_lo->partner = high_reg;
+ info_hi->partner = low_reg;
+}
+
+void MarkClean(CompilationUnit* cu, RegLocation loc)
+{
+ Codegen* cg = cu->cg.get();
+ RegisterInfo* info = cg->GetRegInfo(cu, loc.low_reg);
+ info->dirty = false;
+ if (loc.wide) {
+ info = cg->GetRegInfo(cu, loc.high_reg);
+ info->dirty = false;
+ }
+}
+
+void MarkDirty(CompilationUnit* cu, RegLocation loc)
+{
+ if (loc.home) {
+ // If already home, can't be dirty
+ return;
+ }
+ Codegen* cg = cu->cg.get();
+ RegisterInfo* info = cg->GetRegInfo(cu, loc.low_reg);
+ info->dirty = true;
+ if (loc.wide) {
+ info = cg->GetRegInfo(cu, loc.high_reg);
+ info->dirty = true;
+ }
+}
+
+void MarkInUse(CompilationUnit* cu, int reg)
+{
+ Codegen* cg = cu->cg.get();
+ RegisterInfo* info = cg->GetRegInfo(cu, reg);
+ info->in_use = true;
+}
+
+static void CopyRegInfo(CompilationUnit* cu, int new_reg, int old_reg)
+{
+ Codegen* cg = cu->cg.get();
+ RegisterInfo* new_info = cg->GetRegInfo(cu, new_reg);
+ RegisterInfo* old_info = cg->GetRegInfo(cu, old_reg);
+ // Target temp status must not change
+ bool is_temp = new_info->is_temp;
+ *new_info = *old_info;
+ // Restore target's temp status
+ new_info->is_temp = is_temp;
+ new_info->reg = new_reg;
+}
+
+static bool CheckCorePoolSanity(CompilationUnit* cu)
+{
+ Codegen* cg = cu->cg.get();
+ for (static int i = 0; i < cu->reg_pool->num_core_regs; i++) {
+ if (cu->reg_pool->core_regs[i].pair) {
+ static int my_reg = cu->reg_pool->core_regs[i].reg;
+ static int my_sreg = cu->reg_pool->core_regs[i].s_reg;
+ static int partner_reg = cu->reg_pool->core_regs[i].partner;
+ static RegisterInfo* partner = cg->GetRegInfo(cu, partner_reg);
+ DCHECK(partner != NULL);
+ DCHECK(partner->pair);
+ DCHECK_EQ(my_reg, partner->partner);
+ static int partner_sreg = partner->s_reg;
+ if (my_sreg == INVALID_SREG) {
+ DCHECK_EQ(partner_sreg, INVALID_SREG);
+ } else {
+ int diff = my_sreg - partner_sreg;
+ DCHECK((diff == -1) || (diff == 1));
+ }
+ }
+ if (!cu->reg_pool->core_regs[i].live) {
+ DCHECK(cu->reg_pool->core_regs[i].def_start == NULL);
+ DCHECK(cu->reg_pool->core_regs[i].def_end == NULL);
+ }
+ }
+ return true;
+}
+
+/*
+ * Return an updated location record with current in-register status.
+ * If the value lives in live temps, reflect that fact. No code
+ * is generated. If the live value is part of an older pair,
+ * clobber both low and high.
+ * TUNING: clobbering both is a bit heavy-handed, but the alternative
+ * is a bit complex when dealing with FP regs. Examine code to see
+ * if it's worthwhile trying to be more clever here.
+ */
+
+RegLocation UpdateLoc(CompilationUnit* cu, RegLocation loc)
+{
+ DCHECK(!loc.wide);
+ DCHECK(CheckCorePoolSanity(cu));
+ if (loc.location != kLocPhysReg) {
+ DCHECK((loc.location == kLocDalvikFrame) ||
+ (loc.location == kLocCompilerTemp));
+ RegisterInfo* info_lo = AllocLive(cu, loc.s_reg_low, kAnyReg);
+ if (info_lo) {
+ if (info_lo->pair) {
+ Clobber(cu, info_lo->reg);
+ Clobber(cu, info_lo->partner);
+ FreeTemp(cu, info_lo->reg);
+ } else {
+ loc.low_reg = info_lo->reg;
+ loc.location = kLocPhysReg;
+ }
+ }
+ }
+
+ return loc;
+}
+
+/* see comments for update_loc */
+RegLocation UpdateLocWide(CompilationUnit* cu, RegLocation loc)
+{
+ DCHECK(loc.wide);
+ DCHECK(CheckCorePoolSanity(cu));
+ Codegen* cg = cu->cg.get();
+ if (loc.location != kLocPhysReg) {
+ DCHECK((loc.location == kLocDalvikFrame) ||
+ (loc.location == kLocCompilerTemp));
+ // Are the dalvik regs already live in physical registers?
+ RegisterInfo* info_lo = AllocLive(cu, loc.s_reg_low, kAnyReg);
+ RegisterInfo* info_hi = AllocLive(cu,
+ GetSRegHi(loc.s_reg_low), kAnyReg);
+ bool match = true;
+ match = match && (info_lo != NULL);
+ match = match && (info_hi != NULL);
+ // Are they both core or both FP?
+ match = match && (cg->IsFpReg(info_lo->reg) == cg->IsFpReg(info_hi->reg));
+ // If a pair of floating point singles, are they properly aligned?
+ if (match && cg->IsFpReg(info_lo->reg)) {
+ match &= ((info_lo->reg & 0x1) == 0);
+ match &= ((info_hi->reg - info_lo->reg) == 1);
+ }
+ // If previously used as a pair, it is the same pair?
+ if (match && (info_lo->pair || info_hi->pair)) {
+ match = (info_lo->pair == info_hi->pair);
+ match &= ((info_lo->reg == info_hi->partner) &&
+ (info_hi->reg == info_lo->partner));
+ }
+ if (match) {
+ // Can reuse - update the register usage info
+ loc.low_reg = info_lo->reg;
+ loc.high_reg = info_hi->reg;
+ loc.location = kLocPhysReg;
+ MarkPair(cu, loc.low_reg, loc.high_reg);
+ DCHECK(!cg->IsFpReg(loc.low_reg) || ((loc.low_reg & 0x1) == 0));
+ return loc;
+ }
+ // Can't easily reuse - clobber and free any overlaps
+ if (info_lo) {
+ Clobber(cu, info_lo->reg);
+ FreeTemp(cu, info_lo->reg);
+ if (info_lo->pair)
+ Clobber(cu, info_lo->partner);
+ }
+ if (info_hi) {
+ Clobber(cu, info_hi->reg);
+ FreeTemp(cu, info_hi->reg);
+ if (info_hi->pair)
+ Clobber(cu, info_hi->partner);
+ }
+ }
+ return loc;
+}
+
+
+/* For use in cases we don't know (or care) width */
+RegLocation UpdateRawLoc(CompilationUnit* cu, RegLocation loc)
+{
+ if (loc.wide)
+ return UpdateLocWide(cu, loc);
+ else
+ return UpdateLoc(cu, loc);
+}
+
+RegLocation EvalLocWide(CompilationUnit* cu, RegLocation loc, int reg_class, bool update)
+{
+ DCHECK(loc.wide);
+ int new_regs;
+ int low_reg;
+ int high_reg;
+ Codegen* cg = cu->cg.get();
+
+ loc = UpdateLocWide(cu, loc);
+
+ /* If already in registers, we can assume proper form. Right reg class? */
+ if (loc.location == kLocPhysReg) {
+ DCHECK_EQ(cg->IsFpReg(loc.low_reg), cg->IsFpReg(loc.high_reg));
+ DCHECK(!cg->IsFpReg(loc.low_reg) || ((loc.low_reg & 0x1) == 0));
+ if (!RegClassMatches(cu, reg_class, loc.low_reg)) {
+ /* Wrong register class. Reallocate and copy */
+ new_regs = cg->AllocTypedTempPair(cu, loc.fp, reg_class);
+ low_reg = new_regs & 0xff;
+ high_reg = (new_regs >> 8) & 0xff;
+ cg->OpRegCopyWide(cu, low_reg, high_reg, loc.low_reg, loc.high_reg);
+ CopyRegInfo(cu, low_reg, loc.low_reg);
+ CopyRegInfo(cu, high_reg, loc.high_reg);
+ Clobber(cu, loc.low_reg);
+ Clobber(cu, loc.high_reg);
+ loc.low_reg = low_reg;
+ loc.high_reg = high_reg;
+ MarkPair(cu, loc.low_reg, loc.high_reg);
+ DCHECK(!cg->IsFpReg(loc.low_reg) || ((loc.low_reg & 0x1) == 0));
+ }
+ return loc;
+ }
+
+ DCHECK_NE(loc.s_reg_low, INVALID_SREG);
+ DCHECK_NE(GetSRegHi(loc.s_reg_low), INVALID_SREG);
+
+ new_regs = cg->AllocTypedTempPair(cu, loc.fp, reg_class);
+ loc.low_reg = new_regs & 0xff;
+ loc.high_reg = (new_regs >> 8) & 0xff;
+
+ MarkPair(cu, loc.low_reg, loc.high_reg);
+ if (update) {
+ loc.location = kLocPhysReg;
+ MarkLive(cu, loc.low_reg, loc.s_reg_low);
+ MarkLive(cu, loc.high_reg, GetSRegHi(loc.s_reg_low));
+ }
+ DCHECK(!cg->IsFpReg(loc.low_reg) || ((loc.low_reg & 0x1) == 0));
+ return loc;
+}
+
+RegLocation EvalLoc(CompilationUnit* cu, RegLocation loc,
+ int reg_class, bool update)
+{
+ int new_reg;
+
+ if (loc.wide)
+ return EvalLocWide(cu, loc, reg_class, update);
+
+ Codegen* cg = cu->cg.get();
+ loc = UpdateLoc(cu, loc);
+
+ if (loc.location == kLocPhysReg) {
+ if (!RegClassMatches(cu, reg_class, loc.low_reg)) {
+ /* Wrong register class. Realloc, copy and transfer ownership */
+ new_reg = cg->AllocTypedTemp(cu, loc.fp, reg_class);
+ cg->OpRegCopy(cu, new_reg, loc.low_reg);
+ CopyRegInfo(cu, new_reg, loc.low_reg);
+ Clobber(cu, loc.low_reg);
+ loc.low_reg = new_reg;
+ }
+ return loc;
+ }
+
+ DCHECK_NE(loc.s_reg_low, INVALID_SREG);
+
+ new_reg = cg->AllocTypedTemp(cu, loc.fp, reg_class);
+ loc.low_reg = new_reg;
+
+ if (update) {
+ loc.location = kLocPhysReg;
+ MarkLive(cu, loc.low_reg, loc.s_reg_low);
+ }
+ return loc;
+}
+
+RegLocation GetRawSrc(CompilationUnit* cu, MIR* mir, int num)
+{
+ DCHECK(num < mir->ssa_rep->num_uses);
+ RegLocation res = cu->reg_location[mir->ssa_rep->uses[num]];
+ return res;
+}
+
+RegLocation GetRawDest(CompilationUnit* cu, MIR* mir)
+{
+ DCHECK_GT(mir->ssa_rep->num_defs, 0);
+ RegLocation res = cu->reg_location[mir->ssa_rep->defs[0]];
+ return res;
+}
+
+RegLocation GetDest(CompilationUnit* cu, MIR* mir)
+{
+ RegLocation res = GetRawDest(cu, mir);
+ DCHECK(!res.wide);
+ return res;
+}
+
+RegLocation GetSrc(CompilationUnit* cu, MIR* mir, int num)
+{
+ RegLocation res = GetRawSrc(cu, mir, num);
+ DCHECK(!res.wide);
+ return res;
+}
+
+RegLocation GetDestWide(CompilationUnit* cu, MIR* mir)
+{
+ RegLocation res = GetRawDest(cu, mir);
+ DCHECK(res.wide);
+ return res;
+}
+
+RegLocation GetSrcWide(CompilationUnit* cu, MIR* mir,
+ int low)
+{
+ RegLocation res = GetRawSrc(cu, mir, low);
+ DCHECK(res.wide);
+ return res;
+}
+
+/* USE SSA names to count references of base Dalvik v_regs. */
+static void CountRefs(CompilationUnit *cu, BasicBlock* bb, RefCounts* core_counts,
+ RefCounts* fp_counts)
+{
+ // TUNING: this routine could use some tweaking.
+ if ((cu->disable_opt & (1 << kPromoteRegs)) ||
+ !((bb->block_type == kEntryBlock) || (bb->block_type == kExitBlock) ||
+ (bb->block_type == kDalvikByteCode))) {
+ return;
+ }
+ for (int i = 0; i < cu->num_ssa_regs; i++) {
+ RegLocation loc = cu->reg_location[i];
+ RefCounts* counts = loc.fp ? fp_counts : core_counts;
+ int p_map_idx = SRegToPMap(cu, loc.s_reg_low);
+ //Don't count easily regenerated immediates
+ if (loc.fp || !IsInexpensiveConstant(cu, loc)) {
+ counts[p_map_idx].count += cu->raw_use_counts.elem_list[i];
+ }
+ if (loc.wide && loc.fp && !loc.high_word) {
+ counts[p_map_idx].double_start = true;
+ }
+ }
+}
+
+/* qsort callback function, sort descending */
+static int SortCounts(const void *val1, const void *val2)
+{
+ const RefCounts* op1 = reinterpret_cast<const RefCounts*>(val1);
+ const RefCounts* op2 = reinterpret_cast<const RefCounts*>(val2);
+ return (op1->count == op2->count) ? 0 : (op1->count < op2->count ? 1 : -1);
+}
+
+static void DumpCounts(const RefCounts* arr, int size, const char* msg)
+{
+ LOG(INFO) << msg;
+ for (int i = 0; i < size; i++) {
+ LOG(INFO) << "s_reg[" << arr[i].s_reg << "]: " << arr[i].count;
+ }
+}
+
+/*
+ * Note: some portions of this code required even if the kPromoteRegs
+ * optimization is disabled.
+ */
+void DoPromotion(CompilationUnit* cu)
+{
+ Codegen* cg = cu->cg.get();
+ int reg_bias = cu->num_compiler_temps + 1;
+ int dalvik_regs = cu->num_dalvik_registers;
+ int num_regs = dalvik_regs + reg_bias;
+ const int promotion_threshold = 2;
+
+ // Allow target code to add any special registers
+ cg->AdjustSpillMask(cu);
+
+ /*
+ * Simple register promotion. Just do a static count of the uses
+ * of Dalvik registers. Note that we examine the SSA names, but
+ * count based on original Dalvik register name. Count refs
+ * separately based on type in order to give allocation
+ * preference to fp doubles - which must be allocated sequential
+ * physical single fp registers started with an even-numbered
+ * reg.
+ * TUNING: replace with linear scan once we have the ability
+ * to describe register live ranges for GC.
+ */
+ RefCounts *core_regs = static_cast<RefCounts*>(NewMem(cu, sizeof(RefCounts) * num_regs,
+ true, kAllocRegAlloc));
+ RefCounts *FpRegs = static_cast<RefCounts *>(NewMem(cu, sizeof(RefCounts) * num_regs,
+ true, kAllocRegAlloc));
+ // Set ssa names for original Dalvik registers
+ for (int i = 0; i < dalvik_regs; i++) {
+ core_regs[i].s_reg = FpRegs[i].s_reg = i;
+ }
+ // Set ssa name for Method*
+ core_regs[dalvik_regs].s_reg = cu->method_sreg;
+ FpRegs[dalvik_regs].s_reg = cu->method_sreg; // For consistecy
+ // Set ssa names for compiler_temps
+ for (int i = 1; i <= cu->num_compiler_temps; i++) {
+ CompilerTemp* ct = reinterpret_cast<CompilerTemp*>(cu->compiler_temps.elem_list[i]);
+ core_regs[dalvik_regs + i].s_reg = ct->s_reg;
+ FpRegs[dalvik_regs + i].s_reg = ct->s_reg;
+ }
+
+ GrowableListIterator iterator;
+ GrowableListIteratorInit(&cu->block_list, &iterator);
+ while (true) {
+ BasicBlock* bb;
+ bb = reinterpret_cast<BasicBlock*>(GrowableListIteratorNext(&iterator));
+ if (bb == NULL) break;
+ CountRefs(cu, bb, core_regs, FpRegs);
+ }
+
+ /*
+ * Ideally, we'd allocate doubles starting with an even-numbered
+ * register. Bias the counts to try to allocate any vreg that's
+ * used as the start of a pair first.
+ */
+ for (int i = 0; i < num_regs; i++) {
+ if (FpRegs[i].double_start) {
+ FpRegs[i].count *= 2;
+ }
+ }
+
+ // Sort the count arrays
+ qsort(core_regs, num_regs, sizeof(RefCounts), SortCounts);
+ qsort(FpRegs, num_regs, sizeof(RefCounts), SortCounts);
+
+ if (cu->verbose) {
+ DumpCounts(core_regs, num_regs, "Core regs after sort");
+ DumpCounts(FpRegs, num_regs, "Fp regs after sort");
+ }
+
+ if (!(cu->disable_opt & (1 << kPromoteRegs))) {
+ // Promote FpRegs
+ for (int i = 0; (i < num_regs) &&
+ (FpRegs[i].count >= promotion_threshold ); i++) {
+ int p_map_idx = SRegToPMap(cu, FpRegs[i].s_reg);
+ if (cu->promotion_map[p_map_idx].fp_location != kLocPhysReg) {
+ int reg = AllocPreservedFPReg(cu, FpRegs[i].s_reg,
+ FpRegs[i].double_start);
+ if (reg < 0) {
+ break; // No more left
+ }
+ }
+ }
+
+ // Promote core regs
+ for (int i = 0; (i < num_regs) &&
+ (core_regs[i].count > promotion_threshold); i++) {
+ int p_map_idx = SRegToPMap(cu, core_regs[i].s_reg);
+ if (cu->promotion_map[p_map_idx].core_location !=
+ kLocPhysReg) {
+ int reg = AllocPreservedCoreReg(cu, core_regs[i].s_reg);
+ if (reg < 0) {
+ break; // No more left
+ }
+ }
+ }
+ } else if (cu->qd_mode) {
+ AllocPreservedCoreReg(cu, cu->method_sreg);
+ for (int i = 0; i < num_regs; i++) {
+ int reg = AllocPreservedCoreReg(cu, i);
+ if (reg < 0) {
+ break; // No more left
+ }
+ }
+ }
+
+
+ // Now, update SSA names to new home locations
+ for (int i = 0; i < cu->num_ssa_regs; i++) {
+ RegLocation *curr = &cu->reg_location[i];
+ int p_map_idx = SRegToPMap(cu, curr->s_reg_low);
+ if (!curr->wide) {
+ if (curr->fp) {
+ if (cu->promotion_map[p_map_idx].fp_location == kLocPhysReg) {
+ curr->location = kLocPhysReg;
+ curr->low_reg = cu->promotion_map[p_map_idx].FpReg;
+ curr->home = true;
+ }
+ } else {
+ if (cu->promotion_map[p_map_idx].core_location == kLocPhysReg) {
+ curr->location = kLocPhysReg;
+ curr->low_reg = cu->promotion_map[p_map_idx].core_reg;
+ curr->home = true;
+ }
+ }
+ curr->high_reg = INVALID_REG;
+ } else {
+ if (curr->high_word) {
+ continue;
+ }
+ if (curr->fp) {
+ if ((cu->promotion_map[p_map_idx].fp_location == kLocPhysReg) &&
+ (cu->promotion_map[p_map_idx+1].fp_location ==
+ kLocPhysReg)) {
+ int low_reg = cu->promotion_map[p_map_idx].FpReg;
+ int high_reg = cu->promotion_map[p_map_idx+1].FpReg;
+ // Doubles require pair of singles starting at even reg
+ if (((low_reg & 0x1) == 0) && ((low_reg + 1) == high_reg)) {
+ curr->location = kLocPhysReg;
+ curr->low_reg = low_reg;
+ curr->high_reg = high_reg;
+ curr->home = true;
+ }
+ }
+ } else {
+ if ((cu->promotion_map[p_map_idx].core_location == kLocPhysReg)
+ && (cu->promotion_map[p_map_idx+1].core_location ==
+ kLocPhysReg)) {
+ curr->location = kLocPhysReg;
+ curr->low_reg = cu->promotion_map[p_map_idx].core_reg;
+ curr->high_reg = cu->promotion_map[p_map_idx+1].core_reg;
+ curr->home = true;
+ }
+ }
+ }
+ }
+ if (cu->verbose) {
+ DumpPromotionMap(cu);
+ }
+}
+
+/* Returns sp-relative offset in bytes for a VReg */
+int VRegOffset(CompilationUnit* cu, int v_reg)
+{
+ return StackVisitor::GetVRegOffset(cu->code_item, cu->core_spill_mask,
+ cu->fp_spill_mask, cu->frame_size, v_reg);
+}
+
+/* Returns sp-relative offset in bytes for a SReg */
+int SRegOffset(CompilationUnit* cu, int s_reg)
+{
+ return VRegOffset(cu, SRegToVReg(cu, s_reg));
+}
+
+RegLocation GetBadLoc()
+{
+ RegLocation res = bad_loc;
+ return res;
+}
+
+/* Mark register usage state and return long retloc */
+RegLocation GetReturnWide(CompilationUnit* cu, bool is_double)
+{
+ Codegen* cg = cu->cg.get();
+ RegLocation gpr_res = cg->LocCReturnWide();
+ RegLocation fpr_res = cg->LocCReturnDouble();
+ RegLocation res = is_double ? fpr_res : gpr_res;
+ Clobber(cu, res.low_reg);
+ Clobber(cu, res.high_reg);
+ LockTemp(cu, res.low_reg);
+ LockTemp(cu, res.high_reg);
+ MarkPair(cu, res.low_reg, res.high_reg);
+ return res;
+}
+
+RegLocation GetReturn(CompilationUnit* cu, bool is_float)
+{
+ Codegen* cg = cu->cg.get();
+ RegLocation gpr_res = cg->LocCReturn();
+ RegLocation fpr_res = cg->LocCReturnFloat();
+ RegLocation res = is_float ? fpr_res : gpr_res;
+ Clobber(cu, res.low_reg);
+ if (cu->instruction_set == kMips) {
+ MarkInUse(cu, res.low_reg);
+ } else {
+ LockTemp(cu, res.low_reg);
+ }
+ return res;
+}
+
+} // namespace art
diff --git a/src/compiler/dex/quick/ralloc_util.h b/src/compiler/dex/quick/ralloc_util.h
new file mode 100644
index 0000000..3287047
--- /dev/null
+++ b/src/compiler/dex/quick/ralloc_util.h
@@ -0,0 +1,164 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_COMPILER_DEX_QUICK_RALLOCUTIL_H_
+#define ART_SRC_COMPILER_DEX_QUICK_RALLOCUTIL_H_
+
+/*
+ * This file contains target independent register alloction support.
+ */
+
+#include "compiler/dex/compiler_ir.h"
+#include "compiler/dex/compiler_utility.h"
+#include "compiler/dex/dataflow.h"
+
+namespace art {
+
+/* Static register use counts */
+struct RefCounts {
+ int count;
+ int s_reg;
+ bool double_start; // Starting v_reg for a double
+};
+
+/*
+ * Get the "real" sreg number associated with an s_reg slot. In general,
+ * s_reg values passed through codegen are the SSA names created by
+ * dataflow analysis and refer to slot numbers in the cu->reg_location
+ * array. However, renaming is accomplished by simply replacing RegLocation
+ * entries in the cu->reglocation[] array. Therefore, when location
+ * records for operands are first created, we need to ask the locRecord
+ * identified by the dataflow pass what it's new name is.
+ */
+inline int GetSRegHi(int lowSreg) {
+ return (lowSreg == INVALID_SREG) ? INVALID_SREG : lowSreg + 1;
+}
+
+inline bool oat_live_out(CompilationUnit* cu, int s_reg) {
+ //For now.
+ return true;
+}
+
+inline int oatSSASrc(MIR* mir, int num) {
+ DCHECK_GT(mir->ssa_rep->num_uses, num);
+ return mir->ssa_rep->uses[num];
+}
+
+void ClobberSReg(CompilationUnit* cu, int s_reg);
+RegLocation EvalLoc(CompilationUnit* cu, RegLocation loc,
+ int reg_class, bool update);
+// Mark a temp register as dead. Does not affect allocation state.
+void Clobber(CompilationUnit* cu, int reg);
+
+RegLocation UpdateLoc(CompilationUnit* cu, RegLocation loc);
+RegLocation UpdateLocWide(CompilationUnit* cu, RegLocation loc);
+RegLocation UpdateRawLoc(CompilationUnit* cu, RegLocation loc);
+
+void MarkLive(CompilationUnit* cu, int reg, int s_reg);
+void MarkTemp(CompilationUnit* cu, int reg);
+void UnmarkTemp(CompilationUnit* cu, int reg);
+void MarkDirty(CompilationUnit* cu, RegLocation loc);
+void MarkPair(CompilationUnit* cu, int low_reg, int high_reg);
+void MarkClean(CompilationUnit* cu, RegLocation loc);
+void ResetDef(CompilationUnit* cu, int reg);
+void ResetDefLoc(CompilationUnit* cu, RegLocation rl);
+
+// Set up temp & preserved register pools specialized by target.
+void CompilerInitPool(RegisterInfo* regs, int* reg_nums, int num);
+
+/*
+ * Mark the beginning and end LIR of a def sequence. Note that
+ * on entry start points to the LIR prior to the beginning of the
+ * sequence.
+ */
+void MarkDef(CompilationUnit* cu, RegLocation rl, LIR* start, LIR* finish);
+void MarkDefWide(CompilationUnit* cu, RegLocation rl, LIR* start, LIR* finish);
+void ResetDefLocWide(CompilationUnit* cu, RegLocation rl);
+void ResetDefTracking(CompilationUnit* cu);
+
+
+// Get the LocRecord associated with an SSA name use.
+RegLocation GetSrc(CompilationUnit* cu, MIR* mir, int num);
+RegLocation GetSrcWide(CompilationUnit* cu, MIR* mir, int low);
+// Non-width checking version.
+RegLocation GetRawSrc(CompilationUnit* cu, MIR* mir, int num);
+
+// Get the LocRecord associated with an SSA name def.
+RegLocation GetDest(CompilationUnit* cu, MIR* mir);
+RegLocation GetDestWide(CompilationUnit* cu, MIR* mir);
+// Non-width checking version.
+RegLocation GetRawDest(CompilationUnit* cu, MIR* mir);
+
+// Clobber all regs that might be used by an external C call.
+void ClobberCalleeSave(CompilationUnit* cu);
+
+RegisterInfo *IsTemp(CompilationUnit* cu, int reg);
+RegisterInfo *IsPromoted(CompilationUnit* cu, int reg);
+RegisterInfo *IsLive(CompilationUnit* cu, int reg);
+bool IsDirty(CompilationUnit* cu, int reg);
+
+void MarkInUse(CompilationUnit* cu, int reg);
+
+int AllocTemp(CompilationUnit* cu);
+int AllocTempFloat(CompilationUnit* cu);
+int AllocTempDouble(CompilationUnit* cu);
+void FreeTemp(CompilationUnit* cu, int reg);
+// Return a temp if one is available, -1 otherwise.
+int AllocFreeTemp(CompilationUnit* cu);
+/*
+ * Attempt to allocate a callee-save register.
+ * Similar to AllocTemp(), but forces the allocation of a specific
+ * register. No check is made to see if the register was previously
+ * allocated. Use with caution.
+ */
+void LockTemp(CompilationUnit* cu, int reg);
+
+/* To be used when explicitly managing register use */
+void LockCallTemps(CompilationUnit* cu);
+void FreeCallTemps(CompilationUnit* cu);
+
+void FlushAllRegs(CompilationUnit* cu);
+
+RegLocation GetReturn(CompilationUnit* cu, bool is_float);
+RegLocation GetReturnWide(CompilationUnit* cu, bool is_double);
+RegLocation GetBadLoc();
+RegLocation WideToNarrow(CompilationUnit* cu, RegLocation rl);
+
+/*
+ * Free all allocated temps in the temp pools. Note that this does
+ * not affect the "liveness" of a temp register, which will stay
+ * live until it is either explicitly killed or reallocated.
+ */
+void ResetRegPool(CompilationUnit* cu);
+
+void ClobberAllRegs(CompilationUnit* cu);
+
+void FlushRegWide(CompilationUnit* cu, int reg1, int reg2);
+
+void FlushReg(CompilationUnit* cu, int reg);
+
+void DoPromotion(CompilationUnit* cu);
+int VRegOffset(CompilationUnit* cu, int reg);
+int SRegOffset(CompilationUnit* cu, int reg);
+void RecordCorePromotion(CompilationUnit* cu, int reg, int s_reg);
+void RecordFpPromotion(CompilationUnit* cu, int reg, int s_reg);
+int ComputeFrameSize(CompilationUnit* cu);
+int SRegToPMap(CompilationUnit* cu, int s_reg);
+void DumpRegPool(RegisterInfo* p, int num_regs);
+
+} // namespace art
+
+#endif // ART_SRC_COMPILER_DEX_QUICK_RALLOCUTIL_H_
diff --git a/src/compiler/dex/quick/x86/assemble_x86.cc b/src/compiler/dex/quick/x86/assemble_x86.cc
new file mode 100644
index 0000000..9ee0cb5
--- /dev/null
+++ b/src/compiler/dex/quick/x86/assemble_x86.cc
@@ -0,0 +1,1400 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "codegen_x86.h"
+#include "compiler/dex/quick/codegen_util.h"
+#include "x86_lir.h"
+
+namespace art {
+
+#define MAX_ASSEMBLER_RETRIES 50
+
+const X86EncodingMap X86Codegen::EncodingMap[kX86Last] = {
+ { kX8632BitData, kData, IS_UNARY_OP, { 0, 0, 0x00, 0, 0, 0, 0, 4 }, "data", "0x!0d" },
+ { kX86Bkpt, kNullary, NO_OPERAND | IS_BRANCH, { 0, 0, 0xCC, 0, 0, 0, 0, 0 }, "int 3", "" },
+ { kX86Nop, kNop, IS_UNARY_OP, { 0, 0, 0x90, 0, 0, 0, 0, 0 }, "nop", "" },
+
+#define ENCODING_MAP(opname, mem_use, reg_def, uses_ccodes, \
+ rm8_r8, rm32_r32, \
+ r8_rm8, r32_rm32, \
+ ax8_i8, ax32_i32, \
+ rm8_i8, rm8_i8_modrm, \
+ rm32_i32, rm32_i32_modrm, \
+ rm32_i8, rm32_i8_modrm) \
+{ kX86 ## opname ## 8MR, kMemReg, mem_use | IS_TERTIARY_OP | REG_USE02 | SETS_CCODES | uses_ccodes, { 0, 0, rm8_r8, 0, 0, 0, 0, 0 }, #opname "8MR", "[!0r+!1d],!2r" }, \
+{ kX86 ## opname ## 8AR, kArrayReg, mem_use | IS_QUIN_OP | REG_USE014 | SETS_CCODES | uses_ccodes, { 0, 0, rm8_r8, 0, 0, 0, 0, 0 }, #opname "8AR", "[!0r+!1r<<!2d+!3d],!4r" }, \
+{ kX86 ## opname ## 8TR, kThreadReg, mem_use | IS_BINARY_OP | REG_USE1 | SETS_CCODES | uses_ccodes, { THREAD_PREFIX, 0, rm8_r8, 0, 0, 0, 0, 0 }, #opname "8TR", "fs:[!0d],!1r" }, \
+{ kX86 ## opname ## 8RR, kRegReg, IS_BINARY_OP | reg_def | REG_USE01 | SETS_CCODES | uses_ccodes, { 0, 0, r8_rm8, 0, 0, 0, 0, 0 }, #opname "8RR", "!0r,!1r" }, \
+{ kX86 ## opname ## 8RM, kRegMem, IS_LOAD | IS_TERTIARY_OP | reg_def | REG_USE01 | SETS_CCODES | uses_ccodes, { 0, 0, r8_rm8, 0, 0, 0, 0, 0 }, #opname "8RM", "!0r,[!1r+!2d]" }, \
+{ kX86 ## opname ## 8RA, kRegArray, IS_LOAD | IS_QUIN_OP | reg_def | REG_USE012 | SETS_CCODES | uses_ccodes, { 0, 0, r8_rm8, 0, 0, 0, 0, 0 }, #opname "8RA", "!0r,[!1r+!2r<<!3d+!4d]" }, \
+{ kX86 ## opname ## 8RT, kRegThread, IS_LOAD | IS_BINARY_OP | reg_def | REG_USE0 | SETS_CCODES | uses_ccodes, { THREAD_PREFIX, 0, r8_rm8, 0, 0, 0, 0, 0 }, #opname "8RT", "!0r,fs:[!1d]" }, \
+{ kX86 ## opname ## 8RI, kRegImm, IS_BINARY_OP | reg_def | REG_USE0 | SETS_CCODES | uses_ccodes, { 0, 0, rm8_i8, 0, 0, rm8_i8_modrm, ax8_i8, 1 }, #opname "8RI", "!0r,!1d" }, \
+{ kX86 ## opname ## 8MI, kMemImm, mem_use | IS_TERTIARY_OP | REG_USE0 | SETS_CCODES | uses_ccodes, { 0, 0, rm8_i8, 0, 0, rm8_i8_modrm, 0, 1 }, #opname "8MI", "[!0r+!1d],!2d" }, \
+{ kX86 ## opname ## 8AI, kArrayImm, mem_use | IS_QUIN_OP | REG_USE01 | SETS_CCODES | uses_ccodes, { 0, 0, rm8_i8, 0, 0, rm8_i8_modrm, 0, 1 }, #opname "8AI", "[!0r+!1r<<!2d+!3d],!4d" }, \
+{ kX86 ## opname ## 8TI, kThreadImm, mem_use | IS_BINARY_OP | SETS_CCODES | uses_ccodes, { THREAD_PREFIX, 0, rm8_i8, 0, 0, rm8_i8_modrm, 0, 1 }, #opname "8TI", "fs:[!0d],!1d" }, \
+ \
+{ kX86 ## opname ## 16MR, kMemReg, mem_use | IS_TERTIARY_OP | REG_USE02 | SETS_CCODES | uses_ccodes, { 0x66, 0, rm32_r32, 0, 0, 0, 0, 0 }, #opname "16MR", "[!0r+!1d],!2r" }, \
+{ kX86 ## opname ## 16AR, kArrayReg, mem_use | IS_QUIN_OP | REG_USE014 | SETS_CCODES | uses_ccodes, { 0x66, 0, rm32_r32, 0, 0, 0, 0, 0 }, #opname "16AR", "[!0r+!1r<<!2d+!3d],!4r" }, \
+{ kX86 ## opname ## 16TR, kThreadReg, mem_use | IS_BINARY_OP | REG_USE1 | SETS_CCODES | uses_ccodes, { THREAD_PREFIX, 0x66, rm32_r32, 0, 0, 0, 0, 0 }, #opname "16TR", "fs:[!0d],!1r" }, \
+{ kX86 ## opname ## 16RR, kRegReg, IS_BINARY_OP | reg_def | REG_USE01 | SETS_CCODES | uses_ccodes, { 0x66, 0, r32_rm32, 0, 0, 0, 0, 0 }, #opname "16RR", "!0r,!1r" }, \
+{ kX86 ## opname ## 16RM, kRegMem, IS_LOAD | IS_TERTIARY_OP | reg_def | REG_USE01 | SETS_CCODES | uses_ccodes, { 0x66, 0, r32_rm32, 0, 0, 0, 0, 0 }, #opname "16RM", "!0r,[!1r+!2d]" }, \
+{ kX86 ## opname ## 16RA, kRegArray, IS_LOAD | IS_QUIN_OP | reg_def | REG_USE012 | SETS_CCODES | uses_ccodes, { 0x66, 0, r32_rm32, 0, 0, 0, 0, 0 }, #opname "16RA", "!0r,[!1r+!2r<<!3d+!4d]" }, \
+{ kX86 ## opname ## 16RT, kRegThread, IS_LOAD | IS_BINARY_OP | reg_def | REG_USE0 | SETS_CCODES | uses_ccodes, { THREAD_PREFIX, 0x66, r32_rm32, 0, 0, 0, 0, 0 }, #opname "16RT", "!0r,fs:[!1d]" }, \
+{ kX86 ## opname ## 16RI, kRegImm, IS_BINARY_OP | reg_def | REG_USE0 | SETS_CCODES | uses_ccodes, { 0x66, 0, rm32_i32, 0, 0, rm32_i32_modrm, ax32_i32, 2 }, #opname "16RI", "!0r,!1d" }, \
+{ kX86 ## opname ## 16MI, kMemImm, mem_use | IS_TERTIARY_OP | REG_USE0 | SETS_CCODES | uses_ccodes, { 0x66, 0, rm32_i32, 0, 0, rm32_i32_modrm, 0, 2 }, #opname "16MI", "[!0r+!1d],!2d" }, \
+{ kX86 ## opname ## 16AI, kArrayImm, mem_use | IS_QUIN_OP | REG_USE01 | SETS_CCODES | uses_ccodes, { 0x66, 0, rm32_i32, 0, 0, rm32_i32_modrm, 0, 2 }, #opname "16AI", "[!0r+!1r<<!2d+!3d],!4d" }, \
+{ kX86 ## opname ## 16TI, kThreadImm, mem_use | IS_BINARY_OP | SETS_CCODES | uses_ccodes, { THREAD_PREFIX, 0x66, rm32_i32, 0, 0, rm32_i32_modrm, 0, 2 }, #opname "16TI", "fs:[!0d],!1d" }, \
+{ kX86 ## opname ## 16RI8, kRegImm, IS_BINARY_OP | reg_def | REG_USE0 | SETS_CCODES | uses_ccodes, { 0x66, 0, rm32_i8, 0, 0, rm32_i8_modrm, 0, 1 }, #opname "16RI8", "!0r,!1d" }, \
+{ kX86 ## opname ## 16MI8, kMemImm, mem_use | IS_TERTIARY_OP | REG_USE0 | SETS_CCODES | uses_ccodes, { 0x66, 0, rm32_i8, 0, 0, rm32_i8_modrm, 0, 1 }, #opname "16MI8", "[!0r+!1d],!2d" }, \
+{ kX86 ## opname ## 16AI8, kArrayImm, mem_use | IS_QUIN_OP | REG_USE01 | SETS_CCODES | uses_ccodes, { 0x66, 0, rm32_i8, 0, 0, rm32_i8_modrm, 0, 1 }, #opname "16AI8", "[!0r+!1r<<!2d+!3d],!4d" }, \
+{ kX86 ## opname ## 16TI8, kThreadImm, mem_use | IS_BINARY_OP | SETS_CCODES | uses_ccodes, { THREAD_PREFIX, 0x66, rm32_i8, 0, 0, rm32_i8_modrm, 0, 1 }, #opname "16TI8", "fs:[!0d],!1d" }, \
+ \
+{ kX86 ## opname ## 32MR, kMemReg, mem_use | IS_TERTIARY_OP | REG_USE02 | SETS_CCODES | uses_ccodes, { 0, 0, rm32_r32, 0, 0, 0, 0, 0 }, #opname "32MR", "[!0r+!1d],!2r" }, \
+{ kX86 ## opname ## 32AR, kArrayReg, mem_use | IS_QUIN_OP | REG_USE014 | SETS_CCODES | uses_ccodes, { 0, 0, rm32_r32, 0, 0, 0, 0, 0 }, #opname "32AR", "[!0r+!1r<<!2d+!3d],!4r" }, \
+{ kX86 ## opname ## 32TR, kThreadReg, mem_use | IS_BINARY_OP | REG_USE1 | SETS_CCODES | uses_ccodes, { THREAD_PREFIX, 0, rm32_r32, 0, 0, 0, 0, 0 }, #opname "32TR", "fs:[!0d],!1r" }, \
+{ kX86 ## opname ## 32RR, kRegReg, IS_BINARY_OP | reg_def | REG_USE01 | SETS_CCODES | uses_ccodes, { 0, 0, r32_rm32, 0, 0, 0, 0, 0 }, #opname "32RR", "!0r,!1r" }, \
+{ kX86 ## opname ## 32RM, kRegMem, IS_LOAD | IS_TERTIARY_OP | reg_def | REG_USE01 | SETS_CCODES | uses_ccodes, { 0, 0, r32_rm32, 0, 0, 0, 0, 0 }, #opname "32RM", "!0r,[!1r+!2d]" }, \
+{ kX86 ## opname ## 32RA, kRegArray, IS_LOAD | IS_QUIN_OP | reg_def | REG_USE012 | SETS_CCODES | uses_ccodes, { 0, 0, r32_rm32, 0, 0, 0, 0, 0 }, #opname "32RA", "!0r,[!1r+!2r<<!3d+!4d]" }, \
+{ kX86 ## opname ## 32RT, kRegThread, IS_LOAD | IS_BINARY_OP | reg_def | REG_USE0 | SETS_CCODES | uses_ccodes, { THREAD_PREFIX, 0, r32_rm32, 0, 0, 0, 0, 0 }, #opname "32RT", "!0r,fs:[!1d]" }, \
+{ kX86 ## opname ## 32RI, kRegImm, IS_BINARY_OP | reg_def | REG_USE0 | SETS_CCODES | uses_ccodes, { 0, 0, rm32_i32, 0, 0, rm32_i32_modrm, ax32_i32, 4 }, #opname "32RI", "!0r,!1d" }, \
+{ kX86 ## opname ## 32MI, kMemImm, mem_use | IS_TERTIARY_OP | REG_USE0 | SETS_CCODES | uses_ccodes, { 0, 0, rm32_i32, 0, 0, rm32_i32_modrm, 0, 4 }, #opname "32MI", "[!0r+!1d],!2d" }, \
+{ kX86 ## opname ## 32AI, kArrayImm, mem_use | IS_QUIN_OP | REG_USE01 | SETS_CCODES | uses_ccodes, { 0, 0, rm32_i32, 0, 0, rm32_i32_modrm, 0, 4 }, #opname "32AI", "[!0r+!1r<<!2d+!3d],!4d" }, \
+{ kX86 ## opname ## 32TI, kThreadImm, mem_use | IS_BINARY_OP | SETS_CCODES | uses_ccodes, { THREAD_PREFIX, 0, rm32_i32, 0, 0, rm32_i32_modrm, 0, 4 }, #opname "32TI", "fs:[!0d],!1d" }, \
+{ kX86 ## opname ## 32RI8, kRegImm, IS_BINARY_OP | reg_def | REG_USE0 | SETS_CCODES | uses_ccodes, { 0, 0, rm32_i8, 0, 0, rm32_i8_modrm, 0, 1 }, #opname "32RI8", "!0r,!1d" }, \
+{ kX86 ## opname ## 32MI8, kMemImm, mem_use | IS_TERTIARY_OP | REG_USE0 | SETS_CCODES | uses_ccodes, { 0, 0, rm32_i8, 0, 0, rm32_i8_modrm, 0, 1 }, #opname "32MI8", "[!0r+!1d],!2d" }, \
+{ kX86 ## opname ## 32AI8, kArrayImm, mem_use | IS_QUIN_OP | REG_USE01 | SETS_CCODES | uses_ccodes, { 0, 0, rm32_i8, 0, 0, rm32_i8_modrm, 0, 1 }, #opname "32AI8", "[!0r+!1r<<!2d+!3d],!4d" }, \
+{ kX86 ## opname ## 32TI8, kThreadImm, mem_use | IS_BINARY_OP | SETS_CCODES | uses_ccodes, { THREAD_PREFIX, 0, rm32_i8, 0, 0, rm32_i8_modrm, 0, 1 }, #opname "32TI8", "fs:[!0d],!1d" }
+
+ENCODING_MAP(Add, IS_LOAD | IS_STORE, REG_DEF0, 0,
+ 0x00 /* RegMem8/Reg8 */, 0x01 /* RegMem32/Reg32 */,
+ 0x02 /* Reg8/RegMem8 */, 0x03 /* Reg32/RegMem32 */,
+ 0x04 /* Rax8/imm8 opcode */, 0x05 /* Rax32/imm32 */,
+ 0x80, 0x0 /* RegMem8/imm8 */,
+ 0x81, 0x0 /* RegMem32/imm32 */, 0x83, 0x0 /* RegMem32/imm8 */),
+ENCODING_MAP(Or, IS_LOAD | IS_STORE, REG_DEF0, 0,
+ 0x08 /* RegMem8/Reg8 */, 0x09 /* RegMem32/Reg32 */,
+ 0x0A /* Reg8/RegMem8 */, 0x0B /* Reg32/RegMem32 */,
+ 0x0C /* Rax8/imm8 opcode */, 0x0D /* Rax32/imm32 */,
+ 0x80, 0x1 /* RegMem8/imm8 */,
+ 0x81, 0x1 /* RegMem32/imm32 */, 0x83, 0x1 /* RegMem32/imm8 */),
+ENCODING_MAP(Adc, IS_LOAD | IS_STORE, REG_DEF0, USES_CCODES,
+ 0x10 /* RegMem8/Reg8 */, 0x11 /* RegMem32/Reg32 */,
+ 0x12 /* Reg8/RegMem8 */, 0x13 /* Reg32/RegMem32 */,
+ 0x14 /* Rax8/imm8 opcode */, 0x15 /* Rax32/imm32 */,
+ 0x80, 0x2 /* RegMem8/imm8 */,
+ 0x81, 0x2 /* RegMem32/imm32 */, 0x83, 0x2 /* RegMem32/imm8 */),
+ENCODING_MAP(Sbb, IS_LOAD | IS_STORE, REG_DEF0, USES_CCODES,
+ 0x18 /* RegMem8/Reg8 */, 0x19 /* RegMem32/Reg32 */,
+ 0x1A /* Reg8/RegMem8 */, 0x1B /* Reg32/RegMem32 */,
+ 0x1C /* Rax8/imm8 opcode */, 0x1D /* Rax32/imm32 */,
+ 0x80, 0x3 /* RegMem8/imm8 */,
+ 0x81, 0x3 /* RegMem32/imm32 */, 0x83, 0x3 /* RegMem32/imm8 */),
+ENCODING_MAP(And, IS_LOAD | IS_STORE, REG_DEF0, 0,
+ 0x20 /* RegMem8/Reg8 */, 0x21 /* RegMem32/Reg32 */,
+ 0x22 /* Reg8/RegMem8 */, 0x23 /* Reg32/RegMem32 */,
+ 0x24 /* Rax8/imm8 opcode */, 0x25 /* Rax32/imm32 */,
+ 0x80, 0x4 /* RegMem8/imm8 */,
+ 0x81, 0x4 /* RegMem32/imm32 */, 0x83, 0x4 /* RegMem32/imm8 */),
+ENCODING_MAP(Sub, IS_LOAD | IS_STORE, REG_DEF0, 0,
+ 0x28 /* RegMem8/Reg8 */, 0x29 /* RegMem32/Reg32 */,
+ 0x2A /* Reg8/RegMem8 */, 0x2B /* Reg32/RegMem32 */,
+ 0x2C /* Rax8/imm8 opcode */, 0x2D /* Rax32/imm32 */,
+ 0x80, 0x5 /* RegMem8/imm8 */,
+ 0x81, 0x5 /* RegMem32/imm32 */, 0x83, 0x5 /* RegMem32/imm8 */),
+ENCODING_MAP(Xor, IS_LOAD | IS_STORE, REG_DEF0, 0,
+ 0x30 /* RegMem8/Reg8 */, 0x31 /* RegMem32/Reg32 */,
+ 0x32 /* Reg8/RegMem8 */, 0x33 /* Reg32/RegMem32 */,
+ 0x34 /* Rax8/imm8 opcode */, 0x35 /* Rax32/imm32 */,
+ 0x80, 0x6 /* RegMem8/imm8 */,
+ 0x81, 0x6 /* RegMem32/imm32 */, 0x83, 0x6 /* RegMem32/imm8 */),
+ENCODING_MAP(Cmp, IS_LOAD, 0, 0,
+ 0x38 /* RegMem8/Reg8 */, 0x39 /* RegMem32/Reg32 */,
+ 0x3A /* Reg8/RegMem8 */, 0x3B /* Reg32/RegMem32 */,
+ 0x3C /* Rax8/imm8 opcode */, 0x3D /* Rax32/imm32 */,
+ 0x80, 0x7 /* RegMem8/imm8 */,
+ 0x81, 0x7 /* RegMem32/imm32 */, 0x83, 0x7 /* RegMem32/imm8 */),
+#undef ENCODING_MAP
+
+ { kX86Imul16RRI, kRegRegImm, IS_TERTIARY_OP | REG_DEF0_USE1 | SETS_CCODES, { 0x66, 0, 0x69, 0, 0, 0, 0, 2 }, "Imul16RRI", "!0r,!1r,!2d" },
+ { kX86Imul16RMI, kRegMemImm, IS_LOAD | IS_QUAD_OP | REG_DEF0_USE1 | SETS_CCODES, { 0x66, 0, 0x69, 0, 0, 0, 0, 2 }, "Imul16RMI", "!0r,[!1r+!2d],!3d" },
+ { kX86Imul16RAI, kRegArrayImm, IS_LOAD | IS_SEXTUPLE_OP | REG_DEF0_USE12 | SETS_CCODES, { 0x66, 0, 0x69, 0, 0, 0, 0, 2 }, "Imul16RAI", "!0r,[!1r+!2r<<!3d+!4d],!5d" },
+
+ { kX86Imul32RRI, kRegRegImm, IS_TERTIARY_OP | REG_DEF0_USE1 | SETS_CCODES, { 0, 0, 0x69, 0, 0, 0, 0, 4 }, "Imul32RRI", "!0r,!1r,!2d" },
+ { kX86Imul32RMI, kRegMemImm, IS_LOAD | IS_QUAD_OP | REG_DEF0_USE1 | SETS_CCODES, { 0, 0, 0x69, 0, 0, 0, 0, 4 }, "Imul32RMI", "!0r,[!1r+!2d],!3d" },
+ { kX86Imul32RAI, kRegArrayImm, IS_LOAD | IS_SEXTUPLE_OP | REG_DEF0_USE12 | SETS_CCODES, { 0, 0, 0x69, 0, 0, 0, 0, 4 }, "Imul32RAI", "!0r,[!1r+!2r<<!3d+!4d],!5d" },
+ { kX86Imul32RRI8, kRegRegImm, IS_TERTIARY_OP | REG_DEF0_USE1 | SETS_CCODES, { 0, 0, 0x6B, 0, 0, 0, 0, 1 }, "Imul32RRI8", "!0r,!1r,!2d" },
+ { kX86Imul32RMI8, kRegMemImm, IS_LOAD | IS_QUAD_OP | REG_DEF0_USE1 | SETS_CCODES, { 0, 0, 0x6B, 0, 0, 0, 0, 1 }, "Imul32RMI8", "!0r,[!1r+!2d],!3d" },
+ { kX86Imul32RAI8, kRegArrayImm, IS_LOAD | IS_SEXTUPLE_OP | REG_DEF0_USE12 | SETS_CCODES, { 0, 0, 0x6B, 0, 0, 0, 0, 1 }, "Imul32RAI8", "!0r,[!1r+!2r<<!3d+!4d],!5d" },
+
+ { kX86Mov8MR, kMemReg, IS_STORE | IS_TERTIARY_OP | REG_USE02, { 0, 0, 0x88, 0, 0, 0, 0, 0 }, "Mov8MR", "[!0r+!1d],!2r" },
+ { kX86Mov8AR, kArrayReg, IS_STORE | IS_QUIN_OP | REG_USE014, { 0, 0, 0x88, 0, 0, 0, 0, 0 }, "Mov8AR", "[!0r+!1r<<!2d+!3d],!4r" },
+ { kX86Mov8TR, kThreadReg, IS_STORE | IS_BINARY_OP | REG_USE1, { THREAD_PREFIX, 0, 0x88, 0, 0, 0, 0, 0 }, "Mov8TR", "fs:[!0d],!1r" },
+ { kX86Mov8RR, kRegReg, IS_BINARY_OP | REG_DEF0_USE1, { 0, 0, 0x8A, 0, 0, 0, 0, 0 }, "Mov8RR", "!0r,!1r" },
+ { kX86Mov8RM, kRegMem, IS_LOAD | IS_TERTIARY_OP | REG_DEF0_USE1, { 0, 0, 0x8A, 0, 0, 0, 0, 0 }, "Mov8RM", "!0r,[!1r+!2d]" },
+ { kX86Mov8RA, kRegArray, IS_LOAD | IS_QUIN_OP | REG_DEF0_USE12, { 0, 0, 0x8A, 0, 0, 0, 0, 0 }, "Mov8RA", "!0r,[!1r+!2r<<!3d+!4d]" },
+ { kX86Mov8RT, kRegThread, IS_LOAD | IS_BINARY_OP | REG_DEF0, { THREAD_PREFIX, 0, 0x8A, 0, 0, 0, 0, 0 }, "Mov8RT", "!0r,fs:[!1d]" },
+ { kX86Mov8RI, kMovRegImm, IS_BINARY_OP | REG_DEF0, { 0, 0, 0xB0, 0, 0, 0, 0, 1 }, "Mov8RI", "!0r,!1d" },
+ { kX86Mov8MI, kMemImm, IS_STORE | IS_TERTIARY_OP | REG_USE0, { 0, 0, 0xC6, 0, 0, 0, 0, 1 }, "Mov8MI", "[!0r+!1d],!2d" },
+ { kX86Mov8AI, kArrayImm, IS_STORE | IS_QUIN_OP | REG_USE01, { 0, 0, 0xC6, 0, 0, 0, 0, 1 }, "Mov8AI", "[!0r+!1r<<!2d+!3d],!4d" },
+ { kX86Mov8TI, kThreadImm, IS_STORE | IS_BINARY_OP, { THREAD_PREFIX, 0, 0xC6, 0, 0, 0, 0, 1 }, "Mov8TI", "fs:[!0d],!1d" },
+
+ { kX86Mov16MR, kMemReg, IS_STORE | IS_TERTIARY_OP | REG_USE02, { 0x66, 0, 0x89, 0, 0, 0, 0, 0 }, "Mov16MR", "[!0r+!1d],!2r" },
+ { kX86Mov16AR, kArrayReg, IS_STORE | IS_QUIN_OP | REG_USE014, { 0x66, 0, 0x89, 0, 0, 0, 0, 0 }, "Mov16AR", "[!0r+!1r<<!2d+!3d],!4r" },
+ { kX86Mov16TR, kThreadReg, IS_STORE | IS_BINARY_OP | REG_USE1, { THREAD_PREFIX, 0x66, 0x89, 0, 0, 0, 0, 0 }, "Mov16TR", "fs:[!0d],!1r" },
+ { kX86Mov16RR, kRegReg, IS_BINARY_OP | REG_DEF0_USE1, { 0x66, 0, 0x8B, 0, 0, 0, 0, 0 }, "Mov16RR", "!0r,!1r" },
+ { kX86Mov16RM, kRegMem, IS_LOAD | IS_TERTIARY_OP | REG_DEF0_USE1, { 0x66, 0, 0x8B, 0, 0, 0, 0, 0 }, "Mov16RM", "!0r,[!1r+!2d]" },
+ { kX86Mov16RA, kRegArray, IS_LOAD | IS_QUIN_OP | REG_DEF0_USE12, { 0x66, 0, 0x8B, 0, 0, 0, 0, 0 }, "Mov16RA", "!0r,[!1r+!2r<<!3d+!4d]" },
+ { kX86Mov16RT, kRegThread, IS_LOAD | IS_BINARY_OP | REG_DEF0, { THREAD_PREFIX, 0x66, 0x8B, 0, 0, 0, 0, 0 }, "Mov16RT", "!0r,fs:[!1d]" },
+ { kX86Mov16RI, kMovRegImm, IS_BINARY_OP | REG_DEF0, { 0x66, 0, 0xB8, 0, 0, 0, 0, 2 }, "Mov16RI", "!0r,!1d" },
+ { kX86Mov16MI, kMemImm, IS_STORE | IS_TERTIARY_OP | REG_USE0, { 0x66, 0, 0xC7, 0, 0, 0, 0, 2 }, "Mov16MI", "[!0r+!1d],!2d" },
+ { kX86Mov16AI, kArrayImm, IS_STORE | IS_QUIN_OP | REG_USE01, { 0x66, 0, 0xC7, 0, 0, 0, 0, 2 }, "Mov16AI", "[!0r+!1r<<!2d+!3d],!4d" },
+ { kX86Mov16TI, kThreadImm, IS_STORE | IS_BINARY_OP, { THREAD_PREFIX, 0x66, 0xC7, 0, 0, 0, 0, 2 }, "Mov16TI", "fs:[!0d],!1d" },
+
+ { kX86Mov32MR, kMemReg, IS_STORE | IS_TERTIARY_OP | REG_USE02, { 0, 0, 0x89, 0, 0, 0, 0, 0 }, "Mov32MR", "[!0r+!1d],!2r" },
+ { kX86Mov32AR, kArrayReg, IS_STORE | IS_QUIN_OP | REG_USE014, { 0, 0, 0x89, 0, 0, 0, 0, 0 }, "Mov32AR", "[!0r+!1r<<!2d+!3d],!4r" },
+ { kX86Mov32TR, kThreadReg, IS_STORE | IS_BINARY_OP | REG_USE1, { THREAD_PREFIX, 0, 0x89, 0, 0, 0, 0, 0 }, "Mov32TR", "fs:[!0d],!1r" },
+ { kX86Mov32RR, kRegReg, IS_BINARY_OP | REG_DEF0_USE1, { 0, 0, 0x8B, 0, 0, 0, 0, 0 }, "Mov32RR", "!0r,!1r" },
+ { kX86Mov32RM, kRegMem, IS_LOAD | IS_TERTIARY_OP | REG_DEF0_USE1, { 0, 0, 0x8B, 0, 0, 0, 0, 0 }, "Mov32RM", "!0r,[!1r+!2d]" },
+ { kX86Mov32RA, kRegArray, IS_LOAD | IS_QUIN_OP | REG_DEF0_USE12, { 0, 0, 0x8B, 0, 0, 0, 0, 0 }, "Mov32RA", "!0r,[!1r+!2r<<!3d+!4d]" },
+ { kX86Mov32RT, kRegThread, IS_LOAD | IS_BINARY_OP | REG_DEF0, { THREAD_PREFIX, 0, 0x8B, 0, 0, 0, 0, 0 }, "Mov32RT", "!0r,fs:[!1d]" },
+ { kX86Mov32RI, kMovRegImm, IS_BINARY_OP | REG_DEF0, { 0, 0, 0xB8, 0, 0, 0, 0, 4 }, "Mov32RI", "!0r,!1d" },
+ { kX86Mov32MI, kMemImm, IS_STORE | IS_TERTIARY_OP | REG_USE0, { 0, 0, 0xC7, 0, 0, 0, 0, 4 }, "Mov32MI", "[!0r+!1d],!2d" },
+ { kX86Mov32AI, kArrayImm, IS_STORE | IS_QUIN_OP | REG_USE01, { 0, 0, 0xC7, 0, 0, 0, 0, 4 }, "Mov32AI", "[!0r+!1r<<!2d+!3d],!4d" },
+ { kX86Mov32TI, kThreadImm, IS_STORE | IS_BINARY_OP, { THREAD_PREFIX, 0, 0xC7, 0, 0, 0, 0, 4 }, "Mov32TI", "fs:[!0d],!1d" },
+
+ { kX86Lea32RA, kRegArray, IS_QUIN_OP | REG_DEF0_USE12, { 0, 0, 0x8D, 0, 0, 0, 0, 0 }, "Lea32RA", "!0r,[!1r+!2r<<!3d+!4d]" },
+
+#define SHIFT_ENCODING_MAP(opname, modrm_opcode) \
+{ kX86 ## opname ## 8RI, kShiftRegImm, IS_BINARY_OP | REG_DEF0_USE0 | SETS_CCODES, { 0, 0, 0xC0, 0, 0, modrm_opcode, 0xD1, 1 }, #opname "8RI", "!0r,!1d" }, \
+{ kX86 ## opname ## 8MI, kShiftMemImm, IS_LOAD | IS_STORE | IS_TERTIARY_OP | REG_USE0 | SETS_CCODES, { 0, 0, 0xC0, 0, 0, modrm_opcode, 0xD1, 1 }, #opname "8MI", "[!0r+!1d],!2d" }, \
+{ kX86 ## opname ## 8AI, kShiftArrayImm, IS_LOAD | IS_STORE | IS_QUIN_OP | REG_USE01 | SETS_CCODES, { 0, 0, 0xC0, 0, 0, modrm_opcode, 0xD1, 1 }, #opname "8AI", "[!0r+!1r<<!2d+!3d],!4d" }, \
+{ kX86 ## opname ## 8RC, kShiftRegCl, IS_BINARY_OP | REG_DEF0_USE0 | REG_USEC | SETS_CCODES, { 0, 0, 0xD2, 0, 0, modrm_opcode, 0, 1 }, #opname "8RC", "!0r,cl" }, \
+{ kX86 ## opname ## 8MC, kShiftMemCl, IS_LOAD | IS_STORE | IS_TERTIARY_OP | REG_USE0 | REG_USEC | SETS_CCODES, { 0, 0, 0xD2, 0, 0, modrm_opcode, 0, 1 }, #opname "8MC", "[!0r+!1d],cl" }, \
+{ kX86 ## opname ## 8AC, kShiftArrayCl, IS_LOAD | IS_STORE | IS_QUIN_OP | REG_USE01 | REG_USEC | SETS_CCODES, { 0, 0, 0xD2, 0, 0, modrm_opcode, 0, 1 }, #opname "8AC", "[!0r+!1r<<!2d+!3d],cl" }, \
+ \
+{ kX86 ## opname ## 16RI, kShiftRegImm, IS_BINARY_OP | REG_DEF0_USE0 | SETS_CCODES, { 0x66, 0, 0xC1, 0, 0, modrm_opcode, 0xD1, 1 }, #opname "16RI", "!0r,!1d" }, \
+{ kX86 ## opname ## 16MI, kShiftMemImm, IS_LOAD | IS_STORE | IS_TERTIARY_OP | REG_USE0 | SETS_CCODES, { 0x66, 0, 0xC1, 0, 0, modrm_opcode, 0xD1, 1 }, #opname "16MI", "[!0r+!1d],!2d" }, \
+{ kX86 ## opname ## 16AI, kShiftArrayImm, IS_LOAD | IS_STORE | IS_QUIN_OP | REG_USE01 | SETS_CCODES, { 0x66, 0, 0xC1, 0, 0, modrm_opcode, 0xD1, 1 }, #opname "16AI", "[!0r+!1r<<!2d+!3d],!4d" }, \
+{ kX86 ## opname ## 16RC, kShiftRegCl, IS_BINARY_OP | REG_DEF0_USE0 | REG_USEC | SETS_CCODES, { 0x66, 0, 0xD3, 0, 0, modrm_opcode, 0, 1 }, #opname "16RC", "!0r,cl" }, \
+{ kX86 ## opname ## 16MC, kShiftMemCl, IS_LOAD | IS_STORE | IS_TERTIARY_OP | REG_USE0 | REG_USEC | SETS_CCODES, { 0x66, 0, 0xD3, 0, 0, modrm_opcode, 0, 1 }, #opname "16MC", "[!0r+!1d],cl" }, \
+{ kX86 ## opname ## 16AC, kShiftArrayCl, IS_LOAD | IS_STORE | IS_QUIN_OP | REG_USE01 | REG_USEC | SETS_CCODES, { 0x66, 0, 0xD3, 0, 0, modrm_opcode, 0, 1 }, #opname "16AC", "[!0r+!1r<<!2d+!3d],cl" }, \
+ \
+{ kX86 ## opname ## 32RI, kShiftRegImm, IS_BINARY_OP | REG_DEF0_USE0 | SETS_CCODES, { 0, 0, 0xC1, 0, 0, modrm_opcode, 0xD1, 1 }, #opname "32RI", "!0r,!1d" }, \
+{ kX86 ## opname ## 32MI, kShiftMemImm, IS_LOAD | IS_STORE | IS_TERTIARY_OP | REG_USE0 | SETS_CCODES, { 0, 0, 0xC1, 0, 0, modrm_opcode, 0xD1, 1 }, #opname "32MI", "[!0r+!1d],!2d" }, \
+{ kX86 ## opname ## 32AI, kShiftArrayImm, IS_LOAD | IS_STORE | IS_QUIN_OP | REG_USE01 | SETS_CCODES, { 0, 0, 0xC1, 0, 0, modrm_opcode, 0xD1, 1 }, #opname "32AI", "[!0r+!1r<<!2d+!3d],!4d" }, \
+{ kX86 ## opname ## 32RC, kShiftRegCl, IS_BINARY_OP | REG_DEF0_USE0 | REG_USEC | SETS_CCODES, { 0, 0, 0xD3, 0, 0, modrm_opcode, 0, 0 }, #opname "32RC", "!0r,cl" }, \
+{ kX86 ## opname ## 32MC, kShiftMemCl, IS_LOAD | IS_STORE | IS_TERTIARY_OP | REG_USE0 | REG_USEC | SETS_CCODES, { 0, 0, 0xD3, 0, 0, modrm_opcode, 0, 0 }, #opname "32MC", "[!0r+!1d],cl" }, \
+{ kX86 ## opname ## 32AC, kShiftArrayCl, IS_LOAD | IS_STORE | IS_QUIN_OP | REG_USE01 | REG_USEC | SETS_CCODES, { 0, 0, 0xD3, 0, 0, modrm_opcode, 0, 0 }, #opname "32AC", "[!0r+!1r<<!2d+!3d],cl" }
+
+ SHIFT_ENCODING_MAP(Rol, 0x0),
+ SHIFT_ENCODING_MAP(Ror, 0x1),
+ SHIFT_ENCODING_MAP(Rcl, 0x2),
+ SHIFT_ENCODING_MAP(Rcr, 0x3),
+ SHIFT_ENCODING_MAP(Sal, 0x4),
+ SHIFT_ENCODING_MAP(Shr, 0x5),
+ SHIFT_ENCODING_MAP(Sar, 0x7),
+#undef SHIFT_ENCODING_MAP
+
+ { kX86Cmc, kNullary, NO_OPERAND, { 0, 0, 0xF5, 0, 0, 0, 0, 0}, "Cmc", "" },
+
+ { kX86Test8RI, kRegImm, IS_BINARY_OP | REG_USE0 | SETS_CCODES, { 0, 0, 0xF6, 0, 0, 0, 0, 1}, "Test8RI", "!0r,!1d" },
+ { kX86Test8MI, kMemImm, IS_LOAD | IS_TERTIARY_OP | REG_USE0 | SETS_CCODES, { 0, 0, 0xF6, 0, 0, 0, 0, 1}, "Test8MI", "[!0r+!1d],!2d" },
+ { kX86Test8AI, kArrayImm, IS_LOAD | IS_QUIN_OP | REG_USE01 | SETS_CCODES, { 0, 0, 0xF6, 0, 0, 0, 0, 1}, "Test8AI", "[!0r+!1r<<!2d+!3d],!4d" },
+ { kX86Test16RI, kRegImm, IS_BINARY_OP | REG_USE0 | SETS_CCODES, { 0x66, 0, 0xF7, 0, 0, 0, 0, 2}, "Test16RI", "!0r,!1d" },
+ { kX86Test16MI, kMemImm, IS_LOAD | IS_TERTIARY_OP | REG_USE0 | SETS_CCODES, { 0x66, 0, 0xF7, 0, 0, 0, 0, 2}, "Test16MI", "[!0r+!1d],!2d" },
+ { kX86Test16AI, kArrayImm, IS_LOAD | IS_QUIN_OP | REG_USE01 | SETS_CCODES, { 0x66, 0, 0xF7, 0, 0, 0, 0, 2}, "Test16AI", "[!0r+!1r<<!2d+!3d],!4d" },
+ { kX86Test32RI, kRegImm, IS_BINARY_OP | REG_USE0 | SETS_CCODES, { 0, 0, 0xF7, 0, 0, 0, 0, 4}, "Test32RI", "!0r,!1d" },
+ { kX86Test32MI, kMemImm, IS_LOAD | IS_TERTIARY_OP | REG_USE0 | SETS_CCODES, { 0, 0, 0xF7, 0, 0, 0, 0, 4}, "Test32MI", "[!0r+!1d],!2d" },
+ { kX86Test32AI, kArrayImm, IS_LOAD | IS_QUIN_OP | REG_USE01 | SETS_CCODES, { 0, 0, 0xF7, 0, 0, 0, 0, 4}, "Test32AI", "[!0r+!1r<<!2d+!3d],!4d" },
+ { kX86Test32RR, kRegReg, IS_BINARY_OP | REG_USE01 | SETS_CCODES, { 0, 0, 0x85, 0, 0, 0, 0, 0}, "Test32RR", "!0r,!1r" },
+
+#define UNARY_ENCODING_MAP(opname, modrm, is_store, sets_ccodes, \
+ reg, reg_kind, reg_flags, \
+ mem, mem_kind, mem_flags, \
+ arr, arr_kind, arr_flags, imm, \
+ b_flags, hw_flags, w_flags, \
+ b_format, hw_format, w_format) \
+{ kX86 ## opname ## 8 ## reg, reg_kind, reg_flags | b_flags | sets_ccodes, { 0, 0, 0xF6, 0, 0, modrm, 0, imm << 0}, #opname "8" #reg, #b_format "!0r" }, \
+{ kX86 ## opname ## 8 ## mem, mem_kind, IS_LOAD | is_store | mem_flags | b_flags | sets_ccodes, { 0, 0, 0xF6, 0, 0, modrm, 0, imm << 0}, #opname "8" #mem, #b_format "[!0r+!1d]" }, \
+{ kX86 ## opname ## 8 ## arr, arr_kind, IS_LOAD | is_store | arr_flags | b_flags | sets_ccodes, { 0, 0, 0xF6, 0, 0, modrm, 0, imm << 0}, #opname "8" #arr, #b_format "[!0r+!1r<<!2d+!3d]" }, \
+{ kX86 ## opname ## 16 ## reg, reg_kind, reg_flags | hw_flags | sets_ccodes, { 0x66, 0, 0xF7, 0, 0, modrm, 0, imm << 1}, #opname "16" #reg, #hw_format "!0r" }, \
+{ kX86 ## opname ## 16 ## mem, mem_kind, IS_LOAD | is_store | mem_flags | hw_flags | sets_ccodes, { 0x66, 0, 0xF7, 0, 0, modrm, 0, imm << 1}, #opname "16" #mem, #hw_format "[!0r+!1d]" }, \
+{ kX86 ## opname ## 16 ## arr, arr_kind, IS_LOAD | is_store | arr_flags | hw_flags | sets_ccodes, { 0x66, 0, 0xF7, 0, 0, modrm, 0, imm << 1}, #opname "16" #arr, #hw_format "[!0r+!1r<<!2d+!3d]" }, \
+{ kX86 ## opname ## 32 ## reg, reg_kind, reg_flags | w_flags | sets_ccodes, { 0, 0, 0xF7, 0, 0, modrm, 0, imm << 2}, #opname "32" #reg, #w_format "!0r" }, \
+{ kX86 ## opname ## 32 ## mem, mem_kind, IS_LOAD | is_store | mem_flags | w_flags | sets_ccodes, { 0, 0, 0xF7, 0, 0, modrm, 0, imm << 2}, #opname "32" #mem, #w_format "[!0r+!1d]" }, \
+{ kX86 ## opname ## 32 ## arr, arr_kind, IS_LOAD | is_store | arr_flags | w_flags | sets_ccodes, { 0, 0, 0xF7, 0, 0, modrm, 0, imm << 2}, #opname "32" #arr, #w_format "[!0r+!1r<<!2d+!3d]" }
+
+ UNARY_ENCODING_MAP(Not, 0x2, IS_STORE, 0, R, kReg, IS_UNARY_OP | REG_DEF0_USE0, M, kMem, IS_BINARY_OP | REG_USE0, A, kArray, IS_QUAD_OP | REG_USE01, 0, 0, 0, 0, "", "", ""),
+ UNARY_ENCODING_MAP(Neg, 0x3, IS_STORE, SETS_CCODES, R, kReg, IS_UNARY_OP | REG_DEF0_USE0, M, kMem, IS_BINARY_OP | REG_USE0, A, kArray, IS_QUAD_OP | REG_USE01, 0, 0, 0, 0, "", "", ""),
+
+ UNARY_ENCODING_MAP(Mul, 0x4, 0, SETS_CCODES, DaR, kRegRegReg, IS_UNARY_OP | REG_USE0, DaM, kRegRegMem, IS_BINARY_OP | REG_USE0, DaA, kRegRegArray, IS_QUAD_OP | REG_USE01, 0, REG_DEFA_USEA, REG_DEFAD_USEA, REG_DEFAD_USEA, "ax,al,", "dx:ax,ax,", "edx:eax,eax,"),
+ UNARY_ENCODING_MAP(Imul, 0x5, 0, SETS_CCODES, DaR, kRegRegReg, IS_UNARY_OP | REG_USE0, DaM, kRegRegMem, IS_BINARY_OP | REG_USE0, DaA, kRegRegArray, IS_QUAD_OP | REG_USE01, 0, REG_DEFA_USEA, REG_DEFAD_USEA, REG_DEFAD_USEA, "ax,al,", "dx:ax,ax,", "edx:eax,eax,"),
+ UNARY_ENCODING_MAP(Divmod, 0x6, 0, SETS_CCODES, DaR, kRegRegReg, IS_UNARY_OP | REG_USE0, DaM, kRegRegMem, IS_BINARY_OP | REG_USE0, DaA, kRegRegArray, IS_QUAD_OP | REG_USE01, 0, REG_DEFA_USEA, REG_DEFAD_USEAD, REG_DEFAD_USEAD, "ah:al,ax,", "dx:ax,dx:ax,", "edx:eax,edx:eax,"),
+ UNARY_ENCODING_MAP(Idivmod, 0x7, 0, SETS_CCODES, DaR, kRegRegReg, IS_UNARY_OP | REG_USE0, DaM, kRegRegMem, IS_BINARY_OP | REG_USE0, DaA, kRegRegArray, IS_QUAD_OP | REG_USE01, 0, REG_DEFA_USEA, REG_DEFAD_USEAD, REG_DEFAD_USEAD, "ah:al,ax,", "dx:ax,dx:ax,", "edx:eax,edx:eax,"),
+#undef UNARY_ENCODING_MAP
+
+#define EXT_0F_ENCODING_MAP(opname, prefix, opcode, reg_def) \
+{ kX86 ## opname ## RR, kRegReg, IS_BINARY_OP | reg_def | REG_USE01, { prefix, 0, 0x0F, opcode, 0, 0, 0, 0 }, #opname "RR", "!0r,!1r" }, \
+{ kX86 ## opname ## RM, kRegMem, IS_LOAD | IS_TERTIARY_OP | reg_def | REG_USE01, { prefix, 0, 0x0F, opcode, 0, 0, 0, 0 }, #opname "RM", "!0r,[!1r+!2d]" }, \
+{ kX86 ## opname ## RA, kRegArray, IS_LOAD | IS_QUIN_OP | reg_def | REG_USE012, { prefix, 0, 0x0F, opcode, 0, 0, 0, 0 }, #opname "RA", "!0r,[!1r+!2r<<!3d+!4d]" }
+
+ EXT_0F_ENCODING_MAP(Movsd, 0xF2, 0x10, REG_DEF0),
+ { kX86MovsdMR, kMemReg, IS_STORE | IS_TERTIARY_OP | REG_USE02, { 0xF2, 0, 0x0F, 0x11, 0, 0, 0, 0 }, "MovsdMR", "[!0r+!1d],!2r" },
+ { kX86MovsdAR, kArrayReg, IS_STORE | IS_QUIN_OP | REG_USE014, { 0xF2, 0, 0x0F, 0x11, 0, 0, 0, 0 }, "MovsdAR", "[!0r+!1r<<!2d+!3d],!4r" },
+
+ EXT_0F_ENCODING_MAP(Movss, 0xF3, 0x10, REG_DEF0),
+ { kX86MovssMR, kMemReg, IS_STORE | IS_TERTIARY_OP | REG_USE02, { 0xF3, 0, 0x0F, 0x11, 0, 0, 0, 0 }, "MovssMR", "[!0r+!1d],!2r" },
+ { kX86MovssAR, kArrayReg, IS_STORE | IS_QUIN_OP | REG_USE014, { 0xF3, 0, 0x0F, 0x11, 0, 0, 0, 0 }, "MovssAR", "[!0r+!1r<<!2d+!3d],!4r" },
+
+ EXT_0F_ENCODING_MAP(Cvtsi2sd, 0xF2, 0x2A, REG_DEF0),
+ EXT_0F_ENCODING_MAP(Cvtsi2ss, 0xF3, 0x2A, REG_DEF0),
+ EXT_0F_ENCODING_MAP(Cvttsd2si, 0xF2, 0x2C, REG_DEF0),
+ EXT_0F_ENCODING_MAP(Cvttss2si, 0xF3, 0x2C, REG_DEF0),
+ EXT_0F_ENCODING_MAP(Cvtsd2si, 0xF2, 0x2D, REG_DEF0),
+ EXT_0F_ENCODING_MAP(Cvtss2si, 0xF3, 0x2D, REG_DEF0),
+ EXT_0F_ENCODING_MAP(Ucomisd, 0x66, 0x2E, SETS_CCODES),
+ EXT_0F_ENCODING_MAP(Ucomiss, 0x00, 0x2E, SETS_CCODES),
+ EXT_0F_ENCODING_MAP(Comisd, 0x66, 0x2F, SETS_CCODES),
+ EXT_0F_ENCODING_MAP(Comiss, 0x00, 0x2F, SETS_CCODES),
+ EXT_0F_ENCODING_MAP(Orps, 0x00, 0x56, REG_DEF0),
+ EXT_0F_ENCODING_MAP(Xorps, 0x00, 0x57, REG_DEF0),
+ EXT_0F_ENCODING_MAP(Addsd, 0xF2, 0x58, REG_DEF0),
+ EXT_0F_ENCODING_MAP(Addss, 0xF3, 0x58, REG_DEF0),
+ EXT_0F_ENCODING_MAP(Mulsd, 0xF2, 0x59, REG_DEF0),
+ EXT_0F_ENCODING_MAP(Mulss, 0xF3, 0x59, REG_DEF0),
+ EXT_0F_ENCODING_MAP(Cvtsd2ss, 0xF2, 0x5A, REG_DEF0),
+ EXT_0F_ENCODING_MAP(Cvtss2sd, 0xF3, 0x5A, REG_DEF0),
+ EXT_0F_ENCODING_MAP(Subsd, 0xF2, 0x5C, REG_DEF0),
+ EXT_0F_ENCODING_MAP(Subss, 0xF3, 0x5C, REG_DEF0),
+ EXT_0F_ENCODING_MAP(Divsd, 0xF2, 0x5E, REG_DEF0),
+ EXT_0F_ENCODING_MAP(Divss, 0xF3, 0x5E, REG_DEF0),
+
+ { kX86PsrlqRI, kRegImm, IS_BINARY_OP | REG_DEF0_USE0, { 0x66, 0, 0x0F, 0x73, 0, 2, 0, 1 }, "PsrlqRI", "!0r,!1d" },
+ { kX86PsllqRI, kRegImm, IS_BINARY_OP | REG_DEF0_USE0, { 0x66, 0, 0x0F, 0x73, 0, 6, 0, 1 }, "PsllqRI", "!0r,!1d" },
+
+ EXT_0F_ENCODING_MAP(Movdxr, 0x66, 0x6E, REG_DEF0),
+ { kX86MovdrxRR, kRegRegStore, IS_BINARY_OP | REG_DEF0 | REG_USE01, { 0x66, 0, 0x0F, 0x7E, 0, 0, 0, 0 }, "MovdrxRR", "!0r,!1r" },
+ { kX86MovdrxMR, kMemReg, IS_STORE | IS_TERTIARY_OP | REG_USE02, { 0x66, 0, 0x0F, 0x7E, 0, 0, 0, 0 }, "MovdrxMR", "[!0r+!1d],!2r" },
+ { kX86MovdrxAR, kArrayReg, IS_STORE | IS_QUIN_OP | REG_USE014, { 0x66, 0, 0x0F, 0x7E, 0, 0, 0, 0 }, "MovdrxAR", "[!0r+!1r<<!2d+!3d],!4r" },
+
+ { kX86Set8R, kRegCond, IS_BINARY_OP | REG_DEF0 | USES_CCODES, { 0, 0, 0x0F, 0x90, 0, 0, 0, 0 }, "Set8R", "!1c !0r" },
+ { kX86Set8M, kMemCond, IS_STORE | IS_TERTIARY_OP | REG_USE0 | USES_CCODES, { 0, 0, 0x0F, 0x90, 0, 0, 0, 0 }, "Set8M", "!2c [!0r+!1d]" },
+ { kX86Set8A, kArrayCond, IS_STORE | IS_QUIN_OP | REG_USE01 | USES_CCODES, { 0, 0, 0x0F, 0x90, 0, 0, 0, 0 }, "Set8A", "!4c [!0r+!1r<<!2d+!3d]" },
+
+ // TODO: load/store?
+ // Encode the modrm opcode as an extra opcode byte to avoid computation during assembly.
+ { kX86Mfence, kReg, NO_OPERAND, { 0, 0, 0x0F, 0xAE, 0, 6, 0, 0 }, "Mfence", "" },
+
+ EXT_0F_ENCODING_MAP(Imul16, 0x66, 0xAF, REG_DEF0 | SETS_CCODES),
+ EXT_0F_ENCODING_MAP(Imul32, 0x00, 0xAF, REG_DEF0 | SETS_CCODES),
+
+ { kX86CmpxchgRR, kRegRegStore, IS_BINARY_OP | REG_DEF0 | REG_USE01 | REG_DEFA_USEA | SETS_CCODES, { 0, 0, 0x0F, 0xB1, 0, 0, 0, 0 }, "Cmpxchg", "!0r,!1r" },
+ { kX86CmpxchgMR, kMemReg, IS_STORE | IS_TERTIARY_OP | REG_USE02 | REG_DEFA_USEA | SETS_CCODES, { 0, 0, 0x0F, 0xB1, 0, 0, 0, 0 }, "Cmpxchg", "[!0r+!1d],!2r" },
+ { kX86CmpxchgAR, kArrayReg, IS_STORE | IS_QUIN_OP | REG_USE014 | REG_DEFA_USEA | SETS_CCODES, { 0, 0, 0x0F, 0xB1, 0, 0, 0, 0 }, "Cmpxchg", "[!0r+!1r<<!2d+!3d],!4r" },
+ { kX86LockCmpxchgRR, kRegRegStore, IS_BINARY_OP | REG_DEF0 | REG_USE01 | REG_DEFA_USEA | SETS_CCODES, { 0xF0, 0, 0x0F, 0xB1, 0, 0, 0, 0 }, "Lock Cmpxchg", "!0r,!1r" },
+ { kX86LockCmpxchgMR, kMemReg, IS_STORE | IS_TERTIARY_OP | REG_USE02 | REG_DEFA_USEA | SETS_CCODES, { 0xF0, 0, 0x0F, 0xB1, 0, 0, 0, 0 }, "Lock Cmpxchg", "[!0r+!1d],!2r" },
+ { kX86LockCmpxchgAR, kArrayReg, IS_STORE | IS_QUIN_OP | REG_USE014 | REG_DEFA_USEA | SETS_CCODES, { 0xF0, 0, 0x0F, 0xB1, 0, 0, 0, 0 }, "Lock Cmpxchg", "[!0r+!1r<<!2d+!3d],!4r" },
+
+ EXT_0F_ENCODING_MAP(Movzx8, 0x00, 0xB6, REG_DEF0),
+ EXT_0F_ENCODING_MAP(Movzx16, 0x00, 0xB7, REG_DEF0),
+ EXT_0F_ENCODING_MAP(Movsx8, 0x00, 0xBE, REG_DEF0),
+ EXT_0F_ENCODING_MAP(Movsx16, 0x00, 0xBF, REG_DEF0),
+#undef EXT_0F_ENCODING_MAP
+
+ { kX86Jcc8, kJcc, IS_BINARY_OP | IS_BRANCH | NEEDS_FIXUP | USES_CCODES, { 0, 0, 0x70, 0, 0, 0, 0, 0 }, "Jcc8", "!1c !0t" },
+ { kX86Jcc32, kJcc, IS_BINARY_OP | IS_BRANCH | NEEDS_FIXUP | USES_CCODES, { 0, 0, 0x0F, 0x80, 0, 0, 0, 0 }, "Jcc32", "!1c !0t" },
+ { kX86Jmp8, kJmp, IS_UNARY_OP | IS_BRANCH | NEEDS_FIXUP, { 0, 0, 0xEB, 0, 0, 0, 0, 0 }, "Jmp8", "!0t" },
+ { kX86Jmp32, kJmp, IS_UNARY_OP | IS_BRANCH | NEEDS_FIXUP, { 0, 0, 0xE9, 0, 0, 0, 0, 0 }, "Jmp32", "!0t" },
+ { kX86JmpR, kJmp, IS_UNARY_OP | IS_BRANCH | REG_USE0, { 0, 0, 0xFF, 0, 0, 4, 0, 0 }, "JmpR", "!0r" },
+ { kX86CallR, kCall, IS_UNARY_OP | IS_BRANCH | REG_USE0, { 0, 0, 0xE8, 0, 0, 0, 0, 0 }, "CallR", "!0r" },
+ { kX86CallM, kCall, IS_BINARY_OP | IS_BRANCH | IS_LOAD | REG_USE0, { 0, 0, 0xFF, 0, 0, 2, 0, 0 }, "CallM", "[!0r+!1d]" },
+ { kX86CallA, kCall, IS_QUAD_OP | IS_BRANCH | IS_LOAD | REG_USE01, { 0, 0, 0xFF, 0, 0, 2, 0, 0 }, "CallA", "[!0r+!1r<<!2d+!3d]" },
+ { kX86CallT, kCall, IS_UNARY_OP | IS_BRANCH | IS_LOAD, { THREAD_PREFIX, 0, 0xFF, 0, 0, 2, 0, 0 }, "CallT", "fs:[!0d]" },
+ { kX86Ret, kNullary,NO_OPERAND | IS_BRANCH, { 0, 0, 0xC3, 0, 0, 0, 0, 0 }, "Ret", "" },
+
+ { kX86StartOfMethod, kMacro, IS_UNARY_OP | SETS_CCODES, { 0, 0, 0, 0, 0, 0, 0, 0 }, "StartOfMethod", "!0r" },
+ { kX86PcRelLoadRA, kPcRel, IS_LOAD | IS_QUIN_OP | REG_DEF0_USE12, { 0, 0, 0x8B, 0, 0, 0, 0, 0 }, "PcRelLoadRA", "!0r,[!1r+!2r<<!3d+!4p]" },
+ { kX86PcRelAdr, kPcRel, IS_LOAD | IS_BINARY_OP | REG_DEF0, { 0, 0, 0xB8, 0, 0, 0, 0, 4 }, "PcRelAdr", "!0r,!1d" },
+};
+
+static size_t ComputeSize(const X86EncodingMap* entry, int base, int displacement, bool has_sib) {
+ size_t size = 0;
+ if (entry->skeleton.prefix1 > 0) {
+ ++size;
+ if (entry->skeleton.prefix2 > 0) {
+ ++size;
+ }
+ }
+ ++size; // opcode
+ if (entry->skeleton.opcode == 0x0F) {
+ ++size;
+ if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode1 == 0x3A) {
+ ++size;
+ }
+ }
+ ++size; // modrm
+ if (has_sib || base == rX86_SP) {
+ // SP requires a SIB byte.
+ ++size;
+ }
+ if (displacement != 0 || base == rBP) {
+ // BP requires an explicit displacement, even when it's 0.
+ if (entry->opcode != kX86Lea32RA) {
+ DCHECK_NE(entry->flags & (IS_LOAD | IS_STORE), 0ULL) << entry->name;
+ }
+ size += IS_SIMM8(displacement) ? 1 : 4;
+ }
+ size += entry->skeleton.immediate_bytes;
+ return size;
+}
+
+int X86Codegen::GetInsnSize(LIR* lir) {
+ const X86EncodingMap* entry = &X86Codegen::EncodingMap[lir->opcode];
+ switch (entry->kind) {
+ case kData:
+ return 4; // 4 bytes of data
+ case kNop:
+ return lir->operands[0]; // length of nop is sole operand
+ case kNullary:
+ return 1; // 1 byte of opcode
+ case kReg: // lir operands - 0: reg
+ return ComputeSize(entry, 0, 0, false);
+ case kMem: // lir operands - 0: base, 1: disp
+ return ComputeSize(entry, lir->operands[0], lir->operands[1], false);
+ case kArray: // lir operands - 0: base, 1: index, 2: scale, 3: disp
+ return ComputeSize(entry, lir->operands[0], lir->operands[3], true);
+ case kMemReg: // lir operands - 0: base, 1: disp, 2: reg
+ return ComputeSize(entry, lir->operands[0], lir->operands[1], false);
+ case kArrayReg: // lir operands - 0: base, 1: index, 2: scale, 3: disp, 4: reg
+ return ComputeSize(entry, lir->operands[0], lir->operands[3], true);
+ case kThreadReg: // lir operands - 0: disp, 1: reg
+ return ComputeSize(entry, 0, lir->operands[0], false);
+ case kRegReg:
+ return ComputeSize(entry, 0, 0, false);
+ case kRegRegStore:
+ return ComputeSize(entry, 0, 0, false);
+ case kRegMem: // lir operands - 0: reg, 1: base, 2: disp
+ return ComputeSize(entry, lir->operands[1], lir->operands[2], false);
+ case kRegArray: // lir operands - 0: reg, 1: base, 2: index, 3: scale, 4: disp
+ return ComputeSize(entry, lir->operands[1], lir->operands[4], true);
+ case kRegThread: // lir operands - 0: reg, 1: disp
+ return ComputeSize(entry, 0, 0x12345678, false); // displacement size is always 32bit
+ case kRegImm: { // lir operands - 0: reg, 1: immediate
+ size_t size = ComputeSize(entry, 0, 0, false);
+ if (entry->skeleton.ax_opcode == 0) {
+ return size;
+ } else {
+ // AX opcodes don't require the modrm byte.
+ int reg = lir->operands[0];
+ return size - (reg == rAX ? 1 : 0);
+ }
+ }
+ case kMemImm: // lir operands - 0: base, 1: disp, 2: immediate
+ return ComputeSize(entry, lir->operands[0], lir->operands[1], false);
+ case kArrayImm: // lir operands - 0: base, 1: index, 2: scale, 3: disp 4: immediate
+ return ComputeSize(entry, lir->operands[0], lir->operands[3], true);
+ case kThreadImm: // lir operands - 0: disp, 1: imm
+ return ComputeSize(entry, 0, 0x12345678, false); // displacement size is always 32bit
+ case kRegRegImm: // lir operands - 0: reg, 1: reg, 2: imm
+ return ComputeSize(entry, 0, 0, false);
+ case kRegMemImm: // lir operands - 0: reg, 1: base, 2: disp, 3: imm
+ return ComputeSize(entry, lir->operands[1], lir->operands[2], false);
+ case kRegArrayImm: // lir operands - 0: reg, 1: base, 2: index, 3: scale, 4: disp, 5: imm
+ return ComputeSize(entry, lir->operands[1], lir->operands[4], true);
+ case kMovRegImm: // lir operands - 0: reg, 1: immediate
+ return 1 + entry->skeleton.immediate_bytes;
+ case kShiftRegImm: // lir operands - 0: reg, 1: immediate
+ // Shift by immediate one has a shorter opcode.
+ return ComputeSize(entry, 0, 0, false) - (lir->operands[1] == 1 ? 1 : 0);
+ case kShiftMemImm: // lir operands - 0: base, 1: disp, 2: immediate
+ // Shift by immediate one has a shorter opcode.
+ return ComputeSize(entry, lir->operands[0], lir->operands[1], false) -
+ (lir->operands[2] == 1 ? 1 : 0);
+ case kShiftArrayImm: // lir operands - 0: base, 1: index, 2: scale, 3: disp 4: immediate
+ // Shift by immediate one has a shorter opcode.
+ return ComputeSize(entry, lir->operands[0], lir->operands[3], true) -
+ (lir->operands[4] == 1 ? 1 : 0);
+ case kShiftRegCl:
+ return ComputeSize(entry, 0, 0, false);
+ case kShiftMemCl: // lir operands - 0: base, 1: disp, 2: cl
+ return ComputeSize(entry, lir->operands[0], lir->operands[1], false);
+ case kShiftArrayCl: // lir operands - 0: base, 1: index, 2: scale, 3: disp, 4: reg
+ return ComputeSize(entry, lir->operands[0], lir->operands[3], true);
+ case kRegCond: // lir operands - 0: reg, 1: cond
+ return ComputeSize(entry, 0, 0, false);
+ case kMemCond: // lir operands - 0: base, 1: disp, 2: cond
+ return ComputeSize(entry, lir->operands[0], lir->operands[1], false);
+ case kArrayCond: // lir operands - 0: base, 1: index, 2: scale, 3: disp, 4: cond
+ return ComputeSize(entry, lir->operands[0], lir->operands[3], true);
+ case kJcc:
+ if (lir->opcode == kX86Jcc8) {
+ return 2; // opcode + rel8
+ } else {
+ DCHECK(lir->opcode == kX86Jcc32);
+ return 6; // 2 byte opcode + rel32
+ }
+ case kJmp:
+ if (lir->opcode == kX86Jmp8) {
+ return 2; // opcode + rel8
+ } else if (lir->opcode == kX86Jmp32) {
+ return 5; // opcode + rel32
+ } else {
+ DCHECK(lir->opcode == kX86JmpR);
+ return 2; // opcode + modrm
+ }
+ case kCall:
+ switch (lir->opcode) {
+ case kX86CallR: return 2; // opcode modrm
+ case kX86CallM: // lir operands - 0: base, 1: disp
+ return ComputeSize(entry, lir->operands[0], lir->operands[1], false);
+ case kX86CallA: // lir operands - 0: base, 1: index, 2: scale, 3: disp
+ return ComputeSize(entry, lir->operands[0], lir->operands[3], true);
+ case kX86CallT: // lir operands - 0: disp
+ return ComputeSize(entry, 0, 0x12345678, false); // displacement size is always 32bit
+ default:
+ break;
+ }
+ break;
+ case kPcRel:
+ if (entry->opcode == kX86PcRelLoadRA) {
+ // lir operands - 0: reg, 1: base, 2: index, 3: scale, 4: table
+ return ComputeSize(entry, lir->operands[1], 0x12345678, true);
+ } else {
+ DCHECK(entry->opcode == kX86PcRelAdr);
+ return 5; // opcode with reg + 4 byte immediate
+ }
+ case kMacro:
+ DCHECK_EQ(lir->opcode, static_cast<int>(kX86StartOfMethod));
+ return 5 /* call opcode + 4 byte displacement */ + 1 /* pop reg */ +
+ ComputeSize(&X86Codegen::EncodingMap[kX86Sub32RI], 0, 0, false) -
+ (lir->operands[0] == rAX ? 1 : 0); // shorter ax encoding
+ default:
+ break;
+ }
+ UNIMPLEMENTED(FATAL) << "Unimplemented size encoding for: " << entry->name;
+ return 0;
+}
+
+static uint8_t ModrmForDisp(int base, int disp) {
+ // BP requires an explicit disp, so do not omit it in the 0 case
+ if (disp == 0 && base != rBP) {
+ return 0;
+ } else if (IS_SIMM8(disp)) {
+ return 1;
+ } else {
+ return 2;
+ }
+}
+
+static void EmitDisp(CompilationUnit* cu, int base, int disp) {
+ // BP requires an explicit disp, so do not omit it in the 0 case
+ if (disp == 0 && base != rBP) {
+ return;
+ } else if (IS_SIMM8(disp)) {
+ cu->code_buffer.push_back(disp & 0xFF);
+ } else {
+ cu->code_buffer.push_back(disp & 0xFF);
+ cu->code_buffer.push_back((disp >> 8) & 0xFF);
+ cu->code_buffer.push_back((disp >> 16) & 0xFF);
+ cu->code_buffer.push_back((disp >> 24) & 0xFF);
+ }
+}
+
+static void EmitOpReg(CompilationUnit* cu, const X86EncodingMap* entry, uint8_t reg) {
+ if (entry->skeleton.prefix1 != 0) {
+ cu->code_buffer.push_back(entry->skeleton.prefix1);
+ if (entry->skeleton.prefix2 != 0) {
+ cu->code_buffer.push_back(entry->skeleton.prefix2);
+ }
+ } else {
+ DCHECK_EQ(0, entry->skeleton.prefix2);
+ }
+ cu->code_buffer.push_back(entry->skeleton.opcode);
+ if (entry->skeleton.opcode == 0x0F) {
+ cu->code_buffer.push_back(entry->skeleton.extra_opcode1);
+ if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode2 == 0x3A) {
+ cu->code_buffer.push_back(entry->skeleton.extra_opcode2);
+ } else {
+ DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+ }
+ } else {
+ DCHECK_EQ(0, entry->skeleton.extra_opcode1);
+ DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+ }
+ if (X86_FPREG(reg)) {
+ reg = reg & X86_FP_REG_MASK;
+ }
+ if (reg >= 4) {
+ DCHECK(strchr(entry->name, '8') == NULL) << entry->name << " " << static_cast<int>(reg)
+ << " in " << PrettyMethod(cu->method_idx, *cu->dex_file);
+ }
+ DCHECK_LT(reg, 8);
+ uint8_t modrm = (3 << 6) | (entry->skeleton.modrm_opcode << 3) | reg;
+ cu->code_buffer.push_back(modrm);
+ DCHECK_EQ(0, entry->skeleton.ax_opcode);
+ DCHECK_EQ(0, entry->skeleton.immediate_bytes);
+}
+
+static void EmitOpMem(CompilationUnit* cu, const X86EncodingMap* entry, uint8_t base, int disp) {
+ if (entry->skeleton.prefix1 != 0) {
+ cu->code_buffer.push_back(entry->skeleton.prefix1);
+ if (entry->skeleton.prefix2 != 0) {
+ cu->code_buffer.push_back(entry->skeleton.prefix2);
+ }
+ } else {
+ DCHECK_EQ(0, entry->skeleton.prefix2);
+ }
+ cu->code_buffer.push_back(entry->skeleton.opcode);
+ DCHECK_EQ(0, entry->skeleton.extra_opcode1);
+ DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+ DCHECK_LT(entry->skeleton.modrm_opcode, 8);
+ DCHECK_LT(base, 8);
+ uint8_t modrm = (ModrmForDisp(base, disp) << 6) | (entry->skeleton.modrm_opcode << 3) | base;
+ cu->code_buffer.push_back(modrm);
+ EmitDisp(cu, base, disp);
+ DCHECK_EQ(0, entry->skeleton.ax_opcode);
+ DCHECK_EQ(0, entry->skeleton.immediate_bytes);
+}
+
+static void EmitMemReg(CompilationUnit* cu, const X86EncodingMap* entry,
+ uint8_t base, int disp, uint8_t reg) {
+ if (entry->skeleton.prefix1 != 0) {
+ cu->code_buffer.push_back(entry->skeleton.prefix1);
+ if (entry->skeleton.prefix2 != 0) {
+ cu->code_buffer.push_back(entry->skeleton.prefix2);
+ }
+ } else {
+ DCHECK_EQ(0, entry->skeleton.prefix2);
+ }
+ cu->code_buffer.push_back(entry->skeleton.opcode);
+ if (entry->skeleton.opcode == 0x0F) {
+ cu->code_buffer.push_back(entry->skeleton.extra_opcode1);
+ if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode2 == 0x3A) {
+ cu->code_buffer.push_back(entry->skeleton.extra_opcode2);
+ } else {
+ DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+ }
+ } else {
+ DCHECK_EQ(0, entry->skeleton.extra_opcode1);
+ DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+ }
+ if (X86_FPREG(reg)) {
+ reg = reg & X86_FP_REG_MASK;
+ }
+ if (reg >= 4) {
+ DCHECK(strchr(entry->name, '8') == NULL) << entry->name << " " << static_cast<int>(reg)
+ << " in " << PrettyMethod(cu->method_idx, *cu->dex_file);
+ }
+ DCHECK_LT(reg, 8);
+ DCHECK_LT(base, 8);
+ uint8_t modrm = (ModrmForDisp(base, disp) << 6) | (reg << 3) | base;
+ cu->code_buffer.push_back(modrm);
+ if (base == rX86_SP) {
+ // Special SIB for SP base
+ cu->code_buffer.push_back(0 << 6 | (rX86_SP << 3) | rX86_SP);
+ }
+ EmitDisp(cu, base, disp);
+ DCHECK_EQ(0, entry->skeleton.modrm_opcode);
+ DCHECK_EQ(0, entry->skeleton.ax_opcode);
+ DCHECK_EQ(0, entry->skeleton.immediate_bytes);
+}
+
+static void EmitRegMem(CompilationUnit* cu, const X86EncodingMap* entry,
+ uint8_t reg, uint8_t base, int disp) {
+ // Opcode will flip operands.
+ EmitMemReg(cu, entry, base, disp, reg);
+}
+
+static void EmitRegArray(CompilationUnit* cu, const X86EncodingMap* entry, uint8_t reg,
+ uint8_t base, uint8_t index, int scale, int disp) {
+ if (entry->skeleton.prefix1 != 0) {
+ cu->code_buffer.push_back(entry->skeleton.prefix1);
+ if (entry->skeleton.prefix2 != 0) {
+ cu->code_buffer.push_back(entry->skeleton.prefix2);
+ }
+ } else {
+ DCHECK_EQ(0, entry->skeleton.prefix2);
+ }
+ cu->code_buffer.push_back(entry->skeleton.opcode);
+ if (entry->skeleton.opcode == 0x0F) {
+ cu->code_buffer.push_back(entry->skeleton.extra_opcode1);
+ if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode2 == 0x3A) {
+ cu->code_buffer.push_back(entry->skeleton.extra_opcode2);
+ } else {
+ DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+ }
+ } else {
+ DCHECK_EQ(0, entry->skeleton.extra_opcode1);
+ DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+ }
+ if (X86_FPREG(reg)) {
+ reg = reg & X86_FP_REG_MASK;
+ }
+ DCHECK_LT(reg, 8);
+ uint8_t modrm = (ModrmForDisp(base, disp) << 6) | (reg << 3) | rX86_SP;
+ cu->code_buffer.push_back(modrm);
+ DCHECK_LT(scale, 4);
+ DCHECK_LT(index, 8);
+ DCHECK_LT(base, 8);
+ uint8_t sib = (scale << 6) | (index << 3) | base;
+ cu->code_buffer.push_back(sib);
+ EmitDisp(cu, base, disp);
+ DCHECK_EQ(0, entry->skeleton.modrm_opcode);
+ DCHECK_EQ(0, entry->skeleton.ax_opcode);
+ DCHECK_EQ(0, entry->skeleton.immediate_bytes);
+}
+
+static void EmitArrayReg(CompilationUnit* cu, const X86EncodingMap* entry,
+ uint8_t base, uint8_t index, int scale, int disp, uint8_t reg) {
+ // Opcode will flip operands.
+ EmitRegArray(cu, entry, reg, base, index, scale, disp);
+}
+
+static void EmitRegThread(CompilationUnit* cu, const X86EncodingMap* entry,
+ uint8_t reg, int disp) {
+ DCHECK_NE(entry->skeleton.prefix1, 0);
+ cu->code_buffer.push_back(entry->skeleton.prefix1);
+ if (entry->skeleton.prefix2 != 0) {
+ cu->code_buffer.push_back(entry->skeleton.prefix2);
+ }
+ cu->code_buffer.push_back(entry->skeleton.opcode);
+ if (entry->skeleton.opcode == 0x0F) {
+ cu->code_buffer.push_back(entry->skeleton.extra_opcode1);
+ if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode2 == 0x3A) {
+ cu->code_buffer.push_back(entry->skeleton.extra_opcode2);
+ } else {
+ DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+ }
+ } else {
+ DCHECK_EQ(0, entry->skeleton.extra_opcode1);
+ DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+ }
+ if (X86_FPREG(reg)) {
+ reg = reg & X86_FP_REG_MASK;
+ }
+ if (reg >= 4) {
+ DCHECK(strchr(entry->name, '8') == NULL) << entry->name << " " << static_cast<int>(reg)
+ << " in " << PrettyMethod(cu->method_idx, *cu->dex_file);
+ }
+ DCHECK_LT(reg, 8);
+ uint8_t modrm = (0 << 6) | (reg << 3) | rBP;
+ cu->code_buffer.push_back(modrm);
+ cu->code_buffer.push_back(disp & 0xFF);
+ cu->code_buffer.push_back((disp >> 8) & 0xFF);
+ cu->code_buffer.push_back((disp >> 16) & 0xFF);
+ cu->code_buffer.push_back((disp >> 24) & 0xFF);
+ DCHECK_EQ(0, entry->skeleton.modrm_opcode);
+ DCHECK_EQ(0, entry->skeleton.ax_opcode);
+ DCHECK_EQ(0, entry->skeleton.immediate_bytes);
+}
+
+static void EmitRegReg(CompilationUnit* cu, const X86EncodingMap* entry,
+ uint8_t reg1, uint8_t reg2) {
+ if (entry->skeleton.prefix1 != 0) {
+ cu->code_buffer.push_back(entry->skeleton.prefix1);
+ if (entry->skeleton.prefix2 != 0) {
+ cu->code_buffer.push_back(entry->skeleton.prefix2);
+ }
+ } else {
+ DCHECK_EQ(0, entry->skeleton.prefix2);
+ }
+ cu->code_buffer.push_back(entry->skeleton.opcode);
+ if (entry->skeleton.opcode == 0x0F) {
+ cu->code_buffer.push_back(entry->skeleton.extra_opcode1);
+ if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode2 == 0x3A) {
+ cu->code_buffer.push_back(entry->skeleton.extra_opcode2);
+ } else {
+ DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+ }
+ } else {
+ DCHECK_EQ(0, entry->skeleton.extra_opcode1);
+ DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+ }
+ if (X86_FPREG(reg1)) {
+ reg1 = reg1 & X86_FP_REG_MASK;
+ }
+ if (X86_FPREG(reg2)) {
+ reg2 = reg2 & X86_FP_REG_MASK;
+ }
+ DCHECK_LT(reg1, 8);
+ DCHECK_LT(reg2, 8);
+ uint8_t modrm = (3 << 6) | (reg1 << 3) | reg2;
+ cu->code_buffer.push_back(modrm);
+ DCHECK_EQ(0, entry->skeleton.modrm_opcode);
+ DCHECK_EQ(0, entry->skeleton.ax_opcode);
+ DCHECK_EQ(0, entry->skeleton.immediate_bytes);
+}
+
+static void EmitRegRegImm(CompilationUnit* cu, const X86EncodingMap* entry,
+ uint8_t reg1, uint8_t reg2, int32_t imm) {
+ if (entry->skeleton.prefix1 != 0) {
+ cu->code_buffer.push_back(entry->skeleton.prefix1);
+ if (entry->skeleton.prefix2 != 0) {
+ cu->code_buffer.push_back(entry->skeleton.prefix2);
+ }
+ } else {
+ DCHECK_EQ(0, entry->skeleton.prefix2);
+ }
+ cu->code_buffer.push_back(entry->skeleton.opcode);
+ if (entry->skeleton.opcode == 0x0F) {
+ cu->code_buffer.push_back(entry->skeleton.extra_opcode1);
+ if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode2 == 0x3A) {
+ cu->code_buffer.push_back(entry->skeleton.extra_opcode2);
+ } else {
+ DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+ }
+ } else {
+ DCHECK_EQ(0, entry->skeleton.extra_opcode1);
+ DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+ }
+ if (X86_FPREG(reg1)) {
+ reg1 = reg1 & X86_FP_REG_MASK;
+ }
+ if (X86_FPREG(reg2)) {
+ reg2 = reg2 & X86_FP_REG_MASK;
+ }
+ DCHECK_LT(reg1, 8);
+ DCHECK_LT(reg2, 8);
+ uint8_t modrm = (3 << 6) | (reg1 << 3) | reg2;
+ cu->code_buffer.push_back(modrm);
+ DCHECK_EQ(0, entry->skeleton.modrm_opcode);
+ DCHECK_EQ(0, entry->skeleton.ax_opcode);
+ switch (entry->skeleton.immediate_bytes) {
+ case 1:
+ DCHECK(IS_SIMM8(imm));
+ cu->code_buffer.push_back(imm & 0xFF);
+ break;
+ case 2:
+ DCHECK(IS_SIMM16(imm));
+ cu->code_buffer.push_back(imm & 0xFF);
+ cu->code_buffer.push_back((imm >> 8) & 0xFF);
+ break;
+ case 4:
+ cu->code_buffer.push_back(imm & 0xFF);
+ cu->code_buffer.push_back((imm >> 8) & 0xFF);
+ cu->code_buffer.push_back((imm >> 16) & 0xFF);
+ cu->code_buffer.push_back((imm >> 24) & 0xFF);
+ break;
+ default:
+ LOG(FATAL) << "Unexpected immediate bytes (" << entry->skeleton.immediate_bytes
+ << ") for instruction: " << entry->name;
+ break;
+ }
+}
+
+static void EmitRegImm(CompilationUnit* cu, const X86EncodingMap* entry,
+ uint8_t reg, int imm) {
+ if (entry->skeleton.prefix1 != 0) {
+ cu->code_buffer.push_back(entry->skeleton.prefix1);
+ if (entry->skeleton.prefix2 != 0) {
+ cu->code_buffer.push_back(entry->skeleton.prefix2);
+ }
+ } else {
+ DCHECK_EQ(0, entry->skeleton.prefix2);
+ }
+ if (reg == rAX && entry->skeleton.ax_opcode != 0) {
+ cu->code_buffer.push_back(entry->skeleton.ax_opcode);
+ } else {
+ cu->code_buffer.push_back(entry->skeleton.opcode);
+ if (entry->skeleton.opcode == 0x0F) {
+ cu->code_buffer.push_back(entry->skeleton.extra_opcode1);
+ if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode2 == 0x3A) {
+ cu->code_buffer.push_back(entry->skeleton.extra_opcode2);
+ } else {
+ DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+ }
+ } else {
+ DCHECK_EQ(0, entry->skeleton.extra_opcode1);
+ DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+ }
+ if (X86_FPREG(reg)) {
+ reg = reg & X86_FP_REG_MASK;
+ }
+ uint8_t modrm = (3 << 6) | (entry->skeleton.modrm_opcode << 3) | reg;
+ cu->code_buffer.push_back(modrm);
+ }
+ switch (entry->skeleton.immediate_bytes) {
+ case 1:
+ DCHECK(IS_SIMM8(imm));
+ cu->code_buffer.push_back(imm & 0xFF);
+ break;
+ case 2:
+ DCHECK(IS_SIMM16(imm));
+ cu->code_buffer.push_back(imm & 0xFF);
+ cu->code_buffer.push_back((imm >> 8) & 0xFF);
+ break;
+ case 4:
+ cu->code_buffer.push_back(imm & 0xFF);
+ cu->code_buffer.push_back((imm >> 8) & 0xFF);
+ cu->code_buffer.push_back((imm >> 16) & 0xFF);
+ cu->code_buffer.push_back((imm >> 24) & 0xFF);
+ break;
+ default:
+ LOG(FATAL) << "Unexpected immediate bytes (" << entry->skeleton.immediate_bytes
+ << ") for instruction: " << entry->name;
+ break;
+ }
+}
+
+static void EmitThreadImm(CompilationUnit* cu, const X86EncodingMap* entry,
+ int disp, int imm) {
+ if (entry->skeleton.prefix1 != 0) {
+ cu->code_buffer.push_back(entry->skeleton.prefix1);
+ if (entry->skeleton.prefix2 != 0) {
+ cu->code_buffer.push_back(entry->skeleton.prefix2);
+ }
+ } else {
+ DCHECK_EQ(0, entry->skeleton.prefix2);
+ }
+ cu->code_buffer.push_back(entry->skeleton.opcode);
+ if (entry->skeleton.opcode == 0x0F) {
+ cu->code_buffer.push_back(entry->skeleton.extra_opcode1);
+ if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode2 == 0x3A) {
+ cu->code_buffer.push_back(entry->skeleton.extra_opcode2);
+ } else {
+ DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+ }
+ } else {
+ DCHECK_EQ(0, entry->skeleton.extra_opcode1);
+ DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+ }
+ uint8_t modrm = (0 << 6) | (entry->skeleton.modrm_opcode << 3) | rBP;
+ cu->code_buffer.push_back(modrm);
+ cu->code_buffer.push_back(disp & 0xFF);
+ cu->code_buffer.push_back((disp >> 8) & 0xFF);
+ cu->code_buffer.push_back((disp >> 16) & 0xFF);
+ cu->code_buffer.push_back((disp >> 24) & 0xFF);
+ switch (entry->skeleton.immediate_bytes) {
+ case 1:
+ DCHECK(IS_SIMM8(imm));
+ cu->code_buffer.push_back(imm & 0xFF);
+ break;
+ case 2:
+ DCHECK(IS_SIMM16(imm));
+ cu->code_buffer.push_back(imm & 0xFF);
+ cu->code_buffer.push_back((imm >> 8) & 0xFF);
+ break;
+ case 4:
+ cu->code_buffer.push_back(imm & 0xFF);
+ cu->code_buffer.push_back((imm >> 8) & 0xFF);
+ cu->code_buffer.push_back((imm >> 16) & 0xFF);
+ cu->code_buffer.push_back((imm >> 24) & 0xFF);
+ break;
+ default:
+ LOG(FATAL) << "Unexpected immediate bytes (" << entry->skeleton.immediate_bytes
+ << ") for instruction: " << entry->name;
+ break;
+ }
+ DCHECK_EQ(entry->skeleton.ax_opcode, 0);
+}
+
+static void EmitMovRegImm(CompilationUnit* cu, const X86EncodingMap* entry,
+ uint8_t reg, int imm) {
+ DCHECK_LT(reg, 8);
+ cu->code_buffer.push_back(0xB8 + reg);
+ cu->code_buffer.push_back(imm & 0xFF);
+ cu->code_buffer.push_back((imm >> 8) & 0xFF);
+ cu->code_buffer.push_back((imm >> 16) & 0xFF);
+ cu->code_buffer.push_back((imm >> 24) & 0xFF);
+}
+
+static void EmitShiftRegImm(CompilationUnit* cu, const X86EncodingMap* entry,
+ uint8_t reg, int imm) {
+ if (entry->skeleton.prefix1 != 0) {
+ cu->code_buffer.push_back(entry->skeleton.prefix1);
+ if (entry->skeleton.prefix2 != 0) {
+ cu->code_buffer.push_back(entry->skeleton.prefix2);
+ }
+ } else {
+ DCHECK_EQ(0, entry->skeleton.prefix2);
+ }
+ if (imm != 1) {
+ cu->code_buffer.push_back(entry->skeleton.opcode);
+ } else {
+ // Shorter encoding for 1 bit shift
+ cu->code_buffer.push_back(entry->skeleton.ax_opcode);
+ }
+ if (entry->skeleton.opcode == 0x0F) {
+ cu->code_buffer.push_back(entry->skeleton.extra_opcode1);
+ if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode2 == 0x3A) {
+ cu->code_buffer.push_back(entry->skeleton.extra_opcode2);
+ } else {
+ DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+ }
+ } else {
+ DCHECK_EQ(0, entry->skeleton.extra_opcode1);
+ DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+ }
+ if (reg >= 4) {
+ DCHECK(strchr(entry->name, '8') == NULL) << entry->name << " " << static_cast<int>(reg)
+ << " in " << PrettyMethod(cu->method_idx, *cu->dex_file);
+ }
+ DCHECK_LT(reg, 8);
+ uint8_t modrm = (3 << 6) | (entry->skeleton.modrm_opcode << 3) | reg;
+ cu->code_buffer.push_back(modrm);
+ if (imm != 1) {
+ DCHECK_EQ(entry->skeleton.immediate_bytes, 1);
+ DCHECK(IS_SIMM8(imm));
+ cu->code_buffer.push_back(imm & 0xFF);
+ }
+}
+
+static void EmitShiftRegCl(CompilationUnit* cu, const X86EncodingMap* entry,
+ uint8_t reg, uint8_t cl) {
+ DCHECK_EQ(cl, static_cast<uint8_t>(rCX));
+ if (entry->skeleton.prefix1 != 0) {
+ cu->code_buffer.push_back(entry->skeleton.prefix1);
+ if (entry->skeleton.prefix2 != 0) {
+ cu->code_buffer.push_back(entry->skeleton.prefix2);
+ }
+ } else {
+ DCHECK_EQ(0, entry->skeleton.prefix2);
+ }
+ cu->code_buffer.push_back(entry->skeleton.opcode);
+ DCHECK_EQ(0, entry->skeleton.extra_opcode1);
+ DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+ DCHECK_LT(reg, 8);
+ uint8_t modrm = (3 << 6) | (entry->skeleton.modrm_opcode << 3) | reg;
+ cu->code_buffer.push_back(modrm);
+ DCHECK_EQ(0, entry->skeleton.ax_opcode);
+ DCHECK_EQ(0, entry->skeleton.immediate_bytes);
+}
+
+static void EmitRegCond(CompilationUnit* cu, const X86EncodingMap* entry,
+ uint8_t reg, uint8_t condition) {
+ if (entry->skeleton.prefix1 != 0) {
+ cu->code_buffer.push_back(entry->skeleton.prefix1);
+ if (entry->skeleton.prefix2 != 0) {
+ cu->code_buffer.push_back(entry->skeleton.prefix2);
+ }
+ } else {
+ DCHECK_EQ(0, entry->skeleton.prefix2);
+ }
+ DCHECK_EQ(0, entry->skeleton.ax_opcode);
+ DCHECK_EQ(0x0F, entry->skeleton.opcode);
+ cu->code_buffer.push_back(0x0F);
+ DCHECK_EQ(0x90, entry->skeleton.extra_opcode1);
+ cu->code_buffer.push_back(0x90 | condition);
+ DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+ DCHECK_LT(reg, 8);
+ uint8_t modrm = (3 << 6) | (entry->skeleton.modrm_opcode << 3) | reg;
+ cu->code_buffer.push_back(modrm);
+ DCHECK_EQ(entry->skeleton.immediate_bytes, 0);
+}
+
+static void EmitJmp(CompilationUnit* cu, const X86EncodingMap* entry, int rel) {
+ if (entry->opcode == kX86Jmp8) {
+ DCHECK(IS_SIMM8(rel));
+ cu->code_buffer.push_back(0xEB);
+ cu->code_buffer.push_back(rel & 0xFF);
+ } else if (entry->opcode == kX86Jmp32) {
+ cu->code_buffer.push_back(0xE9);
+ cu->code_buffer.push_back(rel & 0xFF);
+ cu->code_buffer.push_back((rel >> 8) & 0xFF);
+ cu->code_buffer.push_back((rel >> 16) & 0xFF);
+ cu->code_buffer.push_back((rel >> 24) & 0xFF);
+ } else {
+ DCHECK(entry->opcode == kX86JmpR);
+ cu->code_buffer.push_back(entry->skeleton.opcode);
+ uint8_t reg = static_cast<uint8_t>(rel);
+ DCHECK_LT(reg, 8);
+ uint8_t modrm = (3 << 6) | (entry->skeleton.modrm_opcode << 3) | reg;
+ cu->code_buffer.push_back(modrm);
+ }
+}
+
+static void EmitJcc(CompilationUnit* cu, const X86EncodingMap* entry,
+ int rel, uint8_t cc) {
+ DCHECK_LT(cc, 16);
+ if (entry->opcode == kX86Jcc8) {
+ DCHECK(IS_SIMM8(rel));
+ cu->code_buffer.push_back(0x70 | cc);
+ cu->code_buffer.push_back(rel & 0xFF);
+ } else {
+ DCHECK(entry->opcode == kX86Jcc32);
+ cu->code_buffer.push_back(0x0F);
+ cu->code_buffer.push_back(0x80 | cc);
+ cu->code_buffer.push_back(rel & 0xFF);
+ cu->code_buffer.push_back((rel >> 8) & 0xFF);
+ cu->code_buffer.push_back((rel >> 16) & 0xFF);
+ cu->code_buffer.push_back((rel >> 24) & 0xFF);
+ }
+}
+
+static void EmitCallMem(CompilationUnit* cu, const X86EncodingMap* entry,
+ uint8_t base, int disp) {
+ if (entry->skeleton.prefix1 != 0) {
+ cu->code_buffer.push_back(entry->skeleton.prefix1);
+ if (entry->skeleton.prefix2 != 0) {
+ cu->code_buffer.push_back(entry->skeleton.prefix2);
+ }
+ } else {
+ DCHECK_EQ(0, entry->skeleton.prefix2);
+ }
+ cu->code_buffer.push_back(entry->skeleton.opcode);
+ if (entry->skeleton.opcode == 0x0F) {
+ cu->code_buffer.push_back(entry->skeleton.extra_opcode1);
+ if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode2 == 0x3A) {
+ cu->code_buffer.push_back(entry->skeleton.extra_opcode2);
+ } else {
+ DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+ }
+ } else {
+ DCHECK_EQ(0, entry->skeleton.extra_opcode1);
+ DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+ }
+ uint8_t modrm = (ModrmForDisp(base, disp) << 6) | (entry->skeleton.modrm_opcode << 3) | base;
+ cu->code_buffer.push_back(modrm);
+ if (base == rX86_SP) {
+ // Special SIB for SP base
+ cu->code_buffer.push_back(0 << 6 | (rX86_SP << 3) | rX86_SP);
+ }
+ EmitDisp(cu, base, disp);
+ DCHECK_EQ(0, entry->skeleton.ax_opcode);
+ DCHECK_EQ(0, entry->skeleton.immediate_bytes);
+}
+
+static void EmitCallThread(CompilationUnit* cu, const X86EncodingMap* entry, int disp) {
+ DCHECK_NE(entry->skeleton.prefix1, 0);
+ cu->code_buffer.push_back(entry->skeleton.prefix1);
+ if (entry->skeleton.prefix2 != 0) {
+ cu->code_buffer.push_back(entry->skeleton.prefix2);
+ }
+ cu->code_buffer.push_back(entry->skeleton.opcode);
+ if (entry->skeleton.opcode == 0x0F) {
+ cu->code_buffer.push_back(entry->skeleton.extra_opcode1);
+ if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode2 == 0x3A) {
+ cu->code_buffer.push_back(entry->skeleton.extra_opcode2);
+ } else {
+ DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+ }
+ } else {
+ DCHECK_EQ(0, entry->skeleton.extra_opcode1);
+ DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+ }
+ uint8_t modrm = (0 << 6) | (entry->skeleton.modrm_opcode << 3) | rBP;
+ cu->code_buffer.push_back(modrm);
+ cu->code_buffer.push_back(disp & 0xFF);
+ cu->code_buffer.push_back((disp >> 8) & 0xFF);
+ cu->code_buffer.push_back((disp >> 16) & 0xFF);
+ cu->code_buffer.push_back((disp >> 24) & 0xFF);
+ DCHECK_EQ(0, entry->skeleton.ax_opcode);
+ DCHECK_EQ(0, entry->skeleton.immediate_bytes);
+}
+
+static void EmitPcRel(CompilationUnit* cu, const X86EncodingMap* entry, uint8_t reg,
+ int base_or_table, uint8_t index, int scale, int table_or_disp) {
+ int disp;
+ if (entry->opcode == kX86PcRelLoadRA) {
+ SwitchTable *tab_rec = reinterpret_cast<SwitchTable*>(table_or_disp);
+ disp = tab_rec->offset;
+ } else {
+ DCHECK(entry->opcode == kX86PcRelAdr);
+ FillArrayData *tab_rec = reinterpret_cast<FillArrayData*>(base_or_table);
+ disp = tab_rec->offset;
+ }
+ if (entry->skeleton.prefix1 != 0) {
+ cu->code_buffer.push_back(entry->skeleton.prefix1);
+ if (entry->skeleton.prefix2 != 0) {
+ cu->code_buffer.push_back(entry->skeleton.prefix2);
+ }
+ } else {
+ DCHECK_EQ(0, entry->skeleton.prefix2);
+ }
+ if (X86_FPREG(reg)) {
+ reg = reg & X86_FP_REG_MASK;
+ }
+ DCHECK_LT(reg, 8);
+ if (entry->opcode == kX86PcRelLoadRA) {
+ cu->code_buffer.push_back(entry->skeleton.opcode);
+ DCHECK_EQ(0, entry->skeleton.extra_opcode1);
+ DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+ uint8_t modrm = (2 << 6) | (reg << 3) | rX86_SP;
+ cu->code_buffer.push_back(modrm);
+ DCHECK_LT(scale, 4);
+ DCHECK_LT(index, 8);
+ DCHECK_LT(base_or_table, 8);
+ uint8_t base = static_cast<uint8_t>(base_or_table);
+ uint8_t sib = (scale << 6) | (index << 3) | base;
+ cu->code_buffer.push_back(sib);
+ DCHECK_EQ(0, entry->skeleton.immediate_bytes);
+ } else {
+ cu->code_buffer.push_back(entry->skeleton.opcode + reg);
+ }
+ cu->code_buffer.push_back(disp & 0xFF);
+ cu->code_buffer.push_back((disp >> 8) & 0xFF);
+ cu->code_buffer.push_back((disp >> 16) & 0xFF);
+ cu->code_buffer.push_back((disp >> 24) & 0xFF);
+ DCHECK_EQ(0, entry->skeleton.modrm_opcode);
+ DCHECK_EQ(0, entry->skeleton.ax_opcode);
+}
+
+static void EmitMacro(CompilationUnit* cu, const X86EncodingMap* entry,
+ uint8_t reg, int offset) {
+ DCHECK(entry->opcode == kX86StartOfMethod) << entry->name;
+ cu->code_buffer.push_back(0xE8); // call +0
+ cu->code_buffer.push_back(0);
+ cu->code_buffer.push_back(0);
+ cu->code_buffer.push_back(0);
+ cu->code_buffer.push_back(0);
+
+ DCHECK_LT(reg, 8);
+ cu->code_buffer.push_back(0x58 + reg); // pop reg
+
+ EmitRegImm(cu, &X86Codegen::EncodingMap[kX86Sub32RI], reg, offset + 5 /* size of call +0 */);
+}
+
+static void EmitUnimplemented(CompilationUnit* cu, const X86EncodingMap* entry, LIR* lir) {
+ Codegen* cg = cu->cg.get();
+ UNIMPLEMENTED(WARNING) << "encoding kind for " << entry->name << " "
+ << cg->BuildInsnString(entry->fmt, lir, 0);
+ for (int i = 0; i < cg->GetInsnSize(lir); ++i) {
+ cu->code_buffer.push_back(0xCC); // push breakpoint instruction - int 3
+ }
+}
+
+/*
+ * Assemble the LIR into binary instruction format. Note that we may
+ * discover that pc-relative displacements may not fit the selected
+ * instruction. In those cases we will try to substitute a new code
+ * sequence or request that the trace be shortened and retried.
+ */
+AssemblerStatus X86Codegen::AssembleInstructions(CompilationUnit *cu, uintptr_t start_addr) {
+ LIR *lir;
+ AssemblerStatus res = kSuccess; // Assume success
+
+ const bool kVerbosePcFixup = false;
+ for (lir = cu->first_lir_insn; lir != NULL; lir = NEXT_LIR(lir)) {
+ if (lir->opcode < 0) {
+ continue;
+ }
+
+ if (lir->flags.is_nop) {
+ continue;
+ }
+
+ if (lir->flags.pcRelFixup) {
+ switch (lir->opcode) {
+ case kX86Jcc8: {
+ LIR *target_lir = lir->target;
+ DCHECK(target_lir != NULL);
+ int delta = 0;
+ uintptr_t pc;
+ if (IS_SIMM8(lir->operands[0])) {
+ pc = lir->offset + 2 /* opcode + rel8 */;
+ } else {
+ pc = lir->offset + 6 /* 2 byte opcode + rel32 */;
+ }
+ uintptr_t target = target_lir->offset;
+ delta = target - pc;
+ if (IS_SIMM8(delta) != IS_SIMM8(lir->operands[0])) {
+ if (kVerbosePcFixup) {
+ LOG(INFO) << "Retry for JCC growth at " << lir->offset
+ << " delta: " << delta << " old delta: " << lir->operands[0];
+ }
+ lir->opcode = kX86Jcc32;
+ SetupResourceMasks(cu, lir);
+ res = kRetryAll;
+ }
+ if (kVerbosePcFixup) {
+ LOG(INFO) << "Source:";
+ DumpLIRInsn(cu, lir, 0);
+ LOG(INFO) << "Target:";
+ DumpLIRInsn(cu, target_lir, 0);
+ LOG(INFO) << "Delta " << delta;
+ }
+ lir->operands[0] = delta;
+ break;
+ }
+ case kX86Jcc32: {
+ LIR *target_lir = lir->target;
+ DCHECK(target_lir != NULL);
+ uintptr_t pc = lir->offset + 6 /* 2 byte opcode + rel32 */;
+ uintptr_t target = target_lir->offset;
+ int delta = target - pc;
+ if (kVerbosePcFixup) {
+ LOG(INFO) << "Source:";
+ DumpLIRInsn(cu, lir, 0);
+ LOG(INFO) << "Target:";
+ DumpLIRInsn(cu, target_lir, 0);
+ LOG(INFO) << "Delta " << delta;
+ }
+ lir->operands[0] = delta;
+ break;
+ }
+ case kX86Jmp8: {
+ LIR *target_lir = lir->target;
+ DCHECK(target_lir != NULL);
+ int delta = 0;
+ uintptr_t pc;
+ if (IS_SIMM8(lir->operands[0])) {
+ pc = lir->offset + 2 /* opcode + rel8 */;
+ } else {
+ pc = lir->offset + 5 /* opcode + rel32 */;
+ }
+ uintptr_t target = target_lir->offset;
+ delta = target - pc;
+ if (!(cu->disable_opt & (1 << kSafeOptimizations)) && delta == 0) {
+ // Useless branch
+ lir->flags.is_nop = true;
+ if (kVerbosePcFixup) {
+ LOG(INFO) << "Retry for useless branch at " << lir->offset;
+ }
+ res = kRetryAll;
+ } else if (IS_SIMM8(delta) != IS_SIMM8(lir->operands[0])) {
+ if (kVerbosePcFixup) {
+ LOG(INFO) << "Retry for JMP growth at " << lir->offset;
+ }
+ lir->opcode = kX86Jmp32;
+ SetupResourceMasks(cu, lir);
+ res = kRetryAll;
+ }
+ lir->operands[0] = delta;
+ break;
+ }
+ case kX86Jmp32: {
+ LIR *target_lir = lir->target;
+ DCHECK(target_lir != NULL);
+ uintptr_t pc = lir->offset + 5 /* opcode + rel32 */;
+ uintptr_t target = target_lir->offset;
+ int delta = target - pc;
+ lir->operands[0] = delta;
+ break;
+ }
+ default:
+ break;
+ }
+ }
+
+ /*
+ * If one of the pc-relative instructions expanded we'll have
+ * to make another pass. Don't bother to fully assemble the
+ * instruction.
+ */
+ if (res != kSuccess) {
+ continue;
+ }
+ CHECK_EQ(static_cast<size_t>(lir->offset), cu->code_buffer.size());
+ const X86EncodingMap *entry = &X86Codegen::EncodingMap[lir->opcode];
+ size_t starting_cbuf_size = cu->code_buffer.size();
+ switch (entry->kind) {
+ case kData: // 4 bytes of data
+ cu->code_buffer.push_back(lir->operands[0]);
+ break;
+ case kNullary: // 1 byte of opcode
+ DCHECK_EQ(0, entry->skeleton.prefix1);
+ DCHECK_EQ(0, entry->skeleton.prefix2);
+ cu->code_buffer.push_back(entry->skeleton.opcode);
+ if (entry->skeleton.extra_opcode1 != 0) {
+ cu->code_buffer.push_back(entry->skeleton.extra_opcode1);
+ if (entry->skeleton.extra_opcode2 != 0) {
+ cu->code_buffer.push_back(entry->skeleton.extra_opcode2);
+ }
+ } else {
+ DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+ }
+ DCHECK_EQ(0, entry->skeleton.modrm_opcode);
+ DCHECK_EQ(0, entry->skeleton.ax_opcode);
+ DCHECK_EQ(0, entry->skeleton.immediate_bytes);
+ break;
+ case kReg: // lir operands - 0: reg
+ EmitOpReg(cu, entry, lir->operands[0]);
+ break;
+ case kMem: // lir operands - 0: base, 1: disp
+ EmitOpMem(cu, entry, lir->operands[0], lir->operands[1]);
+ break;
+ case kMemReg: // lir operands - 0: base, 1: disp, 2: reg
+ EmitMemReg(cu, entry, lir->operands[0], lir->operands[1], lir->operands[2]);
+ break;
+ case kArrayReg: // lir operands - 0: base, 1: index, 2: scale, 3: disp, 4: reg
+ EmitArrayReg(cu, entry, lir->operands[0], lir->operands[1], lir->operands[2],
+ lir->operands[3], lir->operands[4]);
+ break;
+ case kRegMem: // lir operands - 0: reg, 1: base, 2: disp
+ EmitRegMem(cu, entry, lir->operands[0], lir->operands[1], lir->operands[2]);
+ break;
+ case kRegArray: // lir operands - 0: reg, 1: base, 2: index, 3: scale, 4: disp
+ EmitRegArray(cu, entry, lir->operands[0], lir->operands[1], lir->operands[2],
+ lir->operands[3], lir->operands[4]);
+ break;
+ case kRegThread: // lir operands - 0: reg, 1: disp
+ EmitRegThread(cu, entry, lir->operands[0], lir->operands[1]);
+ break;
+ case kRegReg: // lir operands - 0: reg1, 1: reg2
+ EmitRegReg(cu, entry, lir->operands[0], lir->operands[1]);
+ break;
+ case kRegRegStore: // lir operands - 0: reg2, 1: reg1
+ EmitRegReg(cu, entry, lir->operands[1], lir->operands[0]);
+ break;
+ case kRegRegImm:
+ EmitRegRegImm(cu, entry, lir->operands[0], lir->operands[1], lir->operands[2]);
+ break;
+ case kRegImm: // lir operands - 0: reg, 1: immediate
+ EmitRegImm(cu, entry, lir->operands[0], lir->operands[1]);
+ break;
+ case kThreadImm: // lir operands - 0: disp, 1: immediate
+ EmitThreadImm(cu, entry, lir->operands[0], lir->operands[1]);
+ break;
+ case kMovRegImm: // lir operands - 0: reg, 1: immediate
+ EmitMovRegImm(cu, entry, lir->operands[0], lir->operands[1]);
+ break;
+ case kShiftRegImm: // lir operands - 0: reg, 1: immediate
+ EmitShiftRegImm(cu, entry, lir->operands[0], lir->operands[1]);
+ break;
+ case kShiftRegCl: // lir operands - 0: reg, 1: cl
+ EmitShiftRegCl(cu, entry, lir->operands[0], lir->operands[1]);
+ break;
+ case kRegCond: // lir operands - 0: reg, 1: condition
+ EmitRegCond(cu, entry, lir->operands[0], lir->operands[1]);
+ break;
+ case kJmp: // lir operands - 0: rel
+ EmitJmp(cu, entry, lir->operands[0]);
+ break;
+ case kJcc: // lir operands - 0: rel, 1: CC, target assigned
+ EmitJcc(cu, entry, lir->operands[0], lir->operands[1]);
+ break;
+ case kCall:
+ switch (entry->opcode) {
+ case kX86CallM: // lir operands - 0: base, 1: disp
+ EmitCallMem(cu, entry, lir->operands[0], lir->operands[1]);
+ break;
+ case kX86CallT: // lir operands - 0: disp
+ EmitCallThread(cu, entry, lir->operands[0]);
+ break;
+ default:
+ EmitUnimplemented(cu, entry, lir);
+ break;
+ }
+ break;
+ case kPcRel: // lir operands - 0: reg, 1: base, 2: index, 3: scale, 4: table
+ EmitPcRel(cu, entry, lir->operands[0], lir->operands[1], lir->operands[2],
+ lir->operands[3], lir->operands[4]);
+ break;
+ case kMacro:
+ EmitMacro(cu, entry, lir->operands[0], lir->offset);
+ break;
+ default:
+ EmitUnimplemented(cu, entry, lir);
+ break;
+ }
+ CHECK_EQ(static_cast<size_t>(GetInsnSize(lir)),
+ cu->code_buffer.size() - starting_cbuf_size)
+ << "Instruction size mismatch for entry: " << X86Codegen::EncodingMap[lir->opcode].name;
+ }
+ return res;
+}
+
+} // namespace art
diff --git a/src/compiler/dex/quick/x86/call_x86.cc b/src/compiler/dex/quick/x86/call_x86.cc
new file mode 100644
index 0000000..7b1a7fb
--- /dev/null
+++ b/src/compiler/dex/quick/x86/call_x86.cc
@@ -0,0 +1,279 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* This file contains codegen for the X86 ISA */
+
+#include "codegen_x86.h"
+#include "compiler/dex/quick/codegen_util.h"
+#include "compiler/dex/quick/ralloc_util.h"
+#include "x86_lir.h"
+
+namespace art {
+
+void X86Codegen::GenSpecialCase(CompilationUnit* cu, BasicBlock* bb, MIR* mir,
+ SpecialCaseHandler special_case)
+{
+ // TODO
+}
+
+/*
+ * The sparse table in the literal pool is an array of <key,displacement>
+ * pairs.
+ */
+void X86Codegen::GenSparseSwitch(CompilationUnit* cu, uint32_t table_offset, RegLocation rl_src)
+{
+ const uint16_t* table = cu->insns + cu->current_dalvik_offset + table_offset;
+ if (cu->verbose) {
+ DumpSparseSwitchTable(table);
+ }
+ int entries = table[1];
+ const int* keys = reinterpret_cast<const int*>(&table[2]);
+ const int* targets = &keys[entries];
+ rl_src = LoadValue(cu, rl_src, kCoreReg);
+ for (int i = 0; i < entries; i++) {
+ int key = keys[i];
+ BasicBlock* case_block = FindBlock(cu, cu->current_dalvik_offset + targets[i]);
+ LIR* label_list = cu->block_label_list;
+ OpCmpImmBranch(cu, kCondEq, rl_src.low_reg, key,
+ &label_list[case_block->id]);
+ }
+}
+
+/*
+ * Code pattern will look something like:
+ *
+ * mov r_val, ..
+ * call 0
+ * pop r_start_of_method
+ * sub r_start_of_method, ..
+ * mov r_key_reg, r_val
+ * sub r_key_reg, low_key
+ * cmp r_key_reg, size-1 ; bound check
+ * ja done
+ * mov r_disp, [r_start_of_method + r_key_reg * 4 + table_offset]
+ * add r_start_of_method, r_disp
+ * jmp r_start_of_method
+ * done:
+ */
+void X86Codegen::GenPackedSwitch(CompilationUnit* cu, uint32_t table_offset, RegLocation rl_src)
+{
+ const uint16_t* table = cu->insns + cu->current_dalvik_offset + table_offset;
+ if (cu->verbose) {
+ DumpPackedSwitchTable(table);
+ }
+ // Add the table to the list - we'll process it later
+ SwitchTable *tab_rec =
+ static_cast<SwitchTable *>(NewMem(cu, sizeof(SwitchTable), true, kAllocData));
+ tab_rec->table = table;
+ tab_rec->vaddr = cu->current_dalvik_offset;
+ int size = table[1];
+ tab_rec->targets = static_cast<LIR**>(NewMem(cu, size * sizeof(LIR*), true, kAllocLIR));
+ InsertGrowableList(cu, &cu->switch_tables, reinterpret_cast<uintptr_t>(tab_rec));
+
+ // Get the switch value
+ rl_src = LoadValue(cu, rl_src, kCoreReg);
+ int start_of_method_reg = AllocTemp(cu);
+ // Materialize a pointer to the switch table
+ //NewLIR0(cu, kX86Bkpt);
+ NewLIR1(cu, kX86StartOfMethod, start_of_method_reg);
+ int low_key = s4FromSwitchData(&table[2]);
+ int keyReg;
+ // Remove the bias, if necessary
+ if (low_key == 0) {
+ keyReg = rl_src.low_reg;
+ } else {
+ keyReg = AllocTemp(cu);
+ OpRegRegImm(cu, kOpSub, keyReg, rl_src.low_reg, low_key);
+ }
+ // Bounds check - if < 0 or >= size continue following switch
+ OpRegImm(cu, kOpCmp, keyReg, size-1);
+ LIR* branch_over = OpCondBranch(cu, kCondHi, NULL);
+
+ // Load the displacement from the switch table
+ int disp_reg = AllocTemp(cu);
+ NewLIR5(cu, kX86PcRelLoadRA, disp_reg, start_of_method_reg, keyReg, 2,
+ reinterpret_cast<uintptr_t>(tab_rec));
+ // Add displacement to start of method
+ OpRegReg(cu, kOpAdd, start_of_method_reg, disp_reg);
+ // ..and go!
+ LIR* switch_branch = NewLIR1(cu, kX86JmpR, start_of_method_reg);
+ tab_rec->anchor = switch_branch;
+
+ /* branch_over target here */
+ LIR* target = NewLIR0(cu, kPseudoTargetLabel);
+ branch_over->target = target;
+}
+
+/*
+ * Array data table format:
+ * ushort ident = 0x0300 magic value
+ * ushort width width of each element in the table
+ * uint size number of elements in the table
+ * ubyte data[size*width] table of data values (may contain a single-byte
+ * padding at the end)
+ *
+ * Total size is 4+(width * size + 1)/2 16-bit code units.
+ */
+void X86Codegen::GenFillArrayData(CompilationUnit* cu, uint32_t table_offset, RegLocation rl_src)
+{
+ const uint16_t* table = cu->insns + cu->current_dalvik_offset + table_offset;
+ // Add the table to the list - we'll process it later
+ FillArrayData *tab_rec =
+ static_cast<FillArrayData*>(NewMem(cu, sizeof(FillArrayData), true, kAllocData));
+ tab_rec->table = table;
+ tab_rec->vaddr = cu->current_dalvik_offset;
+ uint16_t width = tab_rec->table[1];
+ uint32_t size = tab_rec->table[2] | ((static_cast<uint32_t>(tab_rec->table[3])) << 16);
+ tab_rec->size = (size * width) + 8;
+
+ InsertGrowableList(cu, &cu->fill_array_data, reinterpret_cast<uintptr_t>(tab_rec));
+
+ // Making a call - use explicit registers
+ FlushAllRegs(cu); /* Everything to home location */
+ LoadValueDirectFixed(cu, rl_src, rX86_ARG0);
+ // Materialize a pointer to the fill data image
+ NewLIR1(cu, kX86StartOfMethod, rX86_ARG2);
+ NewLIR2(cu, kX86PcRelAdr, rX86_ARG1, reinterpret_cast<uintptr_t>(tab_rec));
+ NewLIR2(cu, kX86Add32RR, rX86_ARG1, rX86_ARG2);
+ CallRuntimeHelperRegReg(cu, ENTRYPOINT_OFFSET(pHandleFillArrayDataFromCode), rX86_ARG0,
+ rX86_ARG1, true);
+}
+
+void X86Codegen::GenMonitorEnter(CompilationUnit* cu, int opt_flags, RegLocation rl_src)
+{
+ FlushAllRegs(cu);
+ LoadValueDirectFixed(cu, rl_src, rCX); // Get obj
+ LockCallTemps(cu); // Prepare for explicit register usage
+ GenNullCheck(cu, rl_src.s_reg_low, rCX, opt_flags);
+ // If lock is unheld, try to grab it quickly with compare and exchange
+ // TODO: copy and clear hash state?
+ NewLIR2(cu, kX86Mov32RT, rDX, Thread::ThinLockIdOffset().Int32Value());
+ NewLIR2(cu, kX86Sal32RI, rDX, LW_LOCK_OWNER_SHIFT);
+ NewLIR2(cu, kX86Xor32RR, rAX, rAX);
+ NewLIR3(cu, kX86LockCmpxchgMR, rCX, mirror::Object::MonitorOffset().Int32Value(), rDX);
+ LIR* branch = NewLIR2(cu, kX86Jcc8, 0, kX86CondEq);
+ // If lock is held, go the expensive route - artLockObjectFromCode(self, obj);
+ CallRuntimeHelperReg(cu, ENTRYPOINT_OFFSET(pLockObjectFromCode), rCX, true);
+ branch->target = NewLIR0(cu, kPseudoTargetLabel);
+}
+
+void X86Codegen::GenMonitorExit(CompilationUnit* cu, int opt_flags, RegLocation rl_src)
+{
+ FlushAllRegs(cu);
+ LoadValueDirectFixed(cu, rl_src, rAX); // Get obj
+ LockCallTemps(cu); // Prepare for explicit register usage
+ GenNullCheck(cu, rl_src.s_reg_low, rAX, opt_flags);
+ // If lock is held by the current thread, clear it to quickly release it
+ // TODO: clear hash state?
+ NewLIR2(cu, kX86Mov32RT, rDX, Thread::ThinLockIdOffset().Int32Value());
+ NewLIR2(cu, kX86Sal32RI, rDX, LW_LOCK_OWNER_SHIFT);
+ NewLIR3(cu, kX86Mov32RM, rCX, rAX, mirror::Object::MonitorOffset().Int32Value());
+ OpRegReg(cu, kOpSub, rCX, rDX);
+ LIR* branch = NewLIR2(cu, kX86Jcc8, 0, kX86CondNe);
+ NewLIR3(cu, kX86Mov32MR, rAX, mirror::Object::MonitorOffset().Int32Value(), rCX);
+ LIR* branch2 = NewLIR1(cu, kX86Jmp8, 0);
+ branch->target = NewLIR0(cu, kPseudoTargetLabel);
+ // Otherwise, go the expensive route - UnlockObjectFromCode(obj);
+ CallRuntimeHelperReg(cu, ENTRYPOINT_OFFSET(pUnlockObjectFromCode), rAX, true);
+ branch2->target = NewLIR0(cu, kPseudoTargetLabel);
+}
+
+void X86Codegen::GenMoveException(CompilationUnit* cu, RegLocation rl_dest)
+{
+ int ex_offset = Thread::ExceptionOffset().Int32Value();
+ RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ NewLIR2(cu, kX86Mov32RT, rl_result.low_reg, ex_offset);
+ NewLIR2(cu, kX86Mov32TI, ex_offset, 0);
+ StoreValue(cu, rl_dest, rl_result);
+}
+
+/*
+ * Mark garbage collection card. Skip if the value we're storing is null.
+ */
+void X86Codegen::MarkGCCard(CompilationUnit* cu, int val_reg, int tgt_addr_reg)
+{
+ int reg_card_base = AllocTemp(cu);
+ int reg_card_no = AllocTemp(cu);
+ LIR* branch_over = OpCmpImmBranch(cu, kCondEq, val_reg, 0, NULL);
+ NewLIR2(cu, kX86Mov32RT, reg_card_base, Thread::CardTableOffset().Int32Value());
+ OpRegRegImm(cu, kOpLsr, reg_card_no, tgt_addr_reg, CardTable::kCardShift);
+ StoreBaseIndexed(cu, reg_card_base, reg_card_no, reg_card_base, 0,
+ kUnsignedByte);
+ LIR* target = NewLIR0(cu, kPseudoTargetLabel);
+ branch_over->target = target;
+ FreeTemp(cu, reg_card_base);
+ FreeTemp(cu, reg_card_no);
+}
+
+void X86Codegen::GenEntrySequence(CompilationUnit* cu, RegLocation* ArgLocs, RegLocation rl_method)
+{
+ /*
+ * On entry, rX86_ARG0, rX86_ARG1, rX86_ARG2 are live. Let the register
+ * allocation mechanism know so it doesn't try to use any of them when
+ * expanding the frame or flushing. This leaves the utility
+ * code with no spare temps.
+ */
+ LockTemp(cu, rX86_ARG0);
+ LockTemp(cu, rX86_ARG1);
+ LockTemp(cu, rX86_ARG2);
+
+ /* Build frame, return address already on stack */
+ OpRegImm(cu, kOpSub, rX86_SP, cu->frame_size - 4);
+
+ /*
+ * We can safely skip the stack overflow check if we're
+ * a leaf *and* our frame size < fudge factor.
+ */
+ bool skip_overflow_check = ((cu->attrs & METHOD_IS_LEAF) &&
+ (static_cast<size_t>(cu->frame_size) <
+ Thread::kStackOverflowReservedBytes));
+ NewLIR0(cu, kPseudoMethodEntry);
+ /* Spill core callee saves */
+ SpillCoreRegs(cu);
+ /* NOTE: promotion of FP regs currently unsupported, thus no FP spill */
+ DCHECK_EQ(cu->num_fp_spills, 0);
+ if (!skip_overflow_check) {
+ // cmp rX86_SP, fs:[stack_end_]; jcc throw_launchpad
+ LIR* tgt = RawLIR(cu, 0, kPseudoThrowTarget, kThrowStackOverflow, 0, 0, 0, 0);
+ OpRegThreadMem(cu, kOpCmp, rX86_SP, Thread::StackEndOffset().Int32Value());
+ OpCondBranch(cu, kCondUlt, tgt);
+ // Remember branch target - will process later
+ InsertGrowableList(cu, &cu->throw_launchpads, reinterpret_cast<uintptr_t>(tgt));
+ }
+
+ FlushIns(cu, ArgLocs, rl_method);
+
+ FreeTemp(cu, rX86_ARG0);
+ FreeTemp(cu, rX86_ARG1);
+ FreeTemp(cu, rX86_ARG2);
+}
+
+void X86Codegen::GenExitSequence(CompilationUnit* cu) {
+ /*
+ * In the exit path, rX86_RET0/rX86_RET1 are live - make sure they aren't
+ * allocated by the register utilities as temps.
+ */
+ LockTemp(cu, rX86_RET0);
+ LockTemp(cu, rX86_RET1);
+
+ NewLIR0(cu, kPseudoMethodExit);
+ UnSpillCoreRegs(cu);
+ /* Remove frame except for return address */
+ OpRegImm(cu, kOpAdd, rX86_SP, cu->frame_size - 4);
+ NewLIR0(cu, kX86Ret);
+}
+
+} // namespace art
diff --git a/src/compiler/dex/quick/x86/codegen_x86.h b/src/compiler/dex/quick/x86/codegen_x86.h
new file mode 100644
index 0000000..c1e8fb3
--- /dev/null
+++ b/src/compiler/dex/quick/x86/codegen_x86.h
@@ -0,0 +1,205 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_COMPILER_DEX_QUICK_X86_CODEGENX86_H_
+#define ART_SRC_COMPILER_DEX_QUICK_X86_CODEGENX86_H_
+
+#include "compiler/dex/compiler_internals.h"
+#include "x86_lir.h"
+
+namespace art {
+
+class X86Codegen : public Codegen {
+ public:
+ // Required for target - codegen helpers.
+ virtual bool SmallLiteralDivide(CompilationUnit* cu, Instruction::Code dalvik_opcode,
+ RegLocation rl_src, RegLocation rl_dest, int lit);
+ virtual int LoadHelper(CompilationUnit* cu, int offset);
+ virtual LIR* LoadBaseDisp(CompilationUnit* cu, int rBase, int displacement, int r_dest,
+ OpSize size, int s_reg);
+ virtual LIR* LoadBaseDispWide(CompilationUnit* cu, int rBase, int displacement, int r_dest_lo,
+ int r_dest_hi, int s_reg);
+ virtual LIR* LoadBaseIndexed(CompilationUnit* cu, int rBase, int r_index, int r_dest, int scale,
+ OpSize size);
+ virtual LIR* LoadBaseIndexedDisp(CompilationUnit *cu, int rBase, int r_index, int scale,
+ int displacement, int r_dest, int r_dest_hi, OpSize size,
+ int s_reg);
+ virtual LIR* LoadConstantNoClobber(CompilationUnit* cu, int r_dest, int value);
+ virtual LIR* LoadConstantWide(CompilationUnit* cu, int r_dest_lo, int r_dest_hi, int64_t value);
+ virtual LIR* StoreBaseDisp(CompilationUnit* cu, int rBase, int displacement, int r_src,
+ OpSize size);
+ virtual LIR* StoreBaseDispWide(CompilationUnit* cu, int rBase, int displacement, int r_src_lo,
+ int r_src_hi);
+ virtual LIR* StoreBaseIndexed(CompilationUnit* cu, int rBase, int r_index, int r_src, int scale,
+ OpSize size);
+ virtual LIR* StoreBaseIndexedDisp(CompilationUnit *cu, int rBase, int r_index, int scale,
+ int displacement, int r_src, int r_src_hi, OpSize size,
+ int s_reg);
+ virtual void MarkGCCard(CompilationUnit* cu, int val_reg, int tgt_addr_reg);
+
+ // Required for target - register utilities.
+ virtual bool IsFpReg(int reg);
+ virtual bool SameRegType(int reg1, int reg2);
+ virtual int AllocTypedTemp(CompilationUnit* cu, bool fp_hint, int reg_class);
+ virtual int AllocTypedTempPair(CompilationUnit* cu, bool fp_hint, int reg_class);
+ virtual int S2d(int low_reg, int high_reg);
+ virtual int TargetReg(SpecialTargetRegister reg);
+ virtual RegisterInfo* GetRegInfo(CompilationUnit* cu, int reg);
+ virtual RegLocation GetReturnAlt(CompilationUnit* cu);
+ virtual RegLocation GetReturnWideAlt(CompilationUnit* cu);
+ virtual RegLocation LocCReturn();
+ virtual RegLocation LocCReturnDouble();
+ virtual RegLocation LocCReturnFloat();
+ virtual RegLocation LocCReturnWide();
+ virtual uint32_t FpRegMask();
+ virtual uint64_t GetRegMaskCommon(CompilationUnit* cu, int reg);
+ virtual void AdjustSpillMask(CompilationUnit* cu);
+ virtual void ClobberCalleeSave(CompilationUnit *cu);
+ virtual void FlushReg(CompilationUnit* cu, int reg);
+ virtual void FlushRegWide(CompilationUnit* cu, int reg1, int reg2);
+ virtual void FreeCallTemps(CompilationUnit* cu);
+ virtual void FreeRegLocTemps(CompilationUnit* cu, RegLocation rl_keep, RegLocation rl_free);
+ virtual void LockCallTemps(CompilationUnit* cu);
+ virtual void MarkPreservedSingle(CompilationUnit* cu, int v_reg, int reg);
+ virtual void CompilerInitializeRegAlloc(CompilationUnit* cu);
+
+ // Required for target - miscellaneous.
+ virtual AssemblerStatus AssembleInstructions(CompilationUnit* cu, uintptr_t start_addr);
+ virtual void DumpResourceMask(LIR* lir, uint64_t mask, const char* prefix);
+ virtual void SetupTargetResourceMasks(CompilationUnit* cu, LIR* lir);
+ virtual const char* GetTargetInstFmt(int opcode);
+ virtual const char* GetTargetInstName(int opcode);
+ virtual std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr);
+ virtual uint64_t GetPCUseDefEncoding();
+ virtual uint64_t GetTargetInstFlags(int opcode);
+ virtual int GetInsnSize(LIR* lir);
+ virtual bool IsUnconditionalBranch(LIR* lir);
+
+ // Required for target - Dalvik-level generators.
+ virtual void GenArithImmOpLong(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2);
+ virtual void GenArrayObjPut(CompilationUnit* cu, int opt_flags, RegLocation rl_array,
+ RegLocation rl_index, RegLocation rl_src, int scale);
+ virtual void GenArrayGet(CompilationUnit* cu, int opt_flags, OpSize size, RegLocation rl_array,
+ RegLocation rl_index, RegLocation rl_dest, int scale);
+ virtual void GenArrayPut(CompilationUnit* cu, int opt_flags, OpSize size, RegLocation rl_array,
+ RegLocation rl_index, RegLocation rl_src, int scale);
+ virtual void GenShiftImmOpLong(CompilationUnit* cu, Instruction::Code opcode,
+ RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_shift);
+ virtual void GenMulLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2);
+ virtual void GenAddLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2);
+ virtual void GenAndLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2);
+ virtual void GenArithOpDouble(CompilationUnit* cu, Instruction::Code opcode,
+ RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2);
+ virtual void GenArithOpFloat(CompilationUnit *cu, Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2);
+ virtual void GenCmpFP(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2);
+ virtual void GenConversion(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src);
+ virtual bool GenInlinedCas32(CompilationUnit* cu, CallInfo* info, bool need_write_barrier);
+ virtual bool GenInlinedMinMaxInt(CompilationUnit *cu, CallInfo* info, bool is_min);
+ virtual bool GenInlinedSqrt(CompilationUnit* cu, CallInfo* info);
+ virtual void GenNegLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src);
+ virtual void GenOrLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2);
+ virtual void GenSubLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2);
+ virtual void GenXorLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2);
+ virtual LIR* GenRegMemCheck(CompilationUnit* cu, ConditionCode c_code, int reg1, int base,
+ int offset, ThrowKind kind);
+ virtual RegLocation GenDivRem(CompilationUnit* cu, RegLocation rl_dest, int reg_lo, int reg_hi,
+ bool is_div);
+ virtual RegLocation GenDivRemLit(CompilationUnit* cu, RegLocation rl_dest, int reg_lo, int lit,
+ bool is_div);
+ virtual void GenCmpLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2);
+ virtual void GenDivZeroCheck(CompilationUnit* cu, int reg_lo, int reg_hi);
+ virtual void GenEntrySequence(CompilationUnit* cu, RegLocation* ArgLocs,
+ RegLocation rl_method);
+ virtual void GenExitSequence(CompilationUnit* cu);
+ virtual void GenFillArrayData(CompilationUnit* cu, uint32_t table_offset,
+ RegLocation rl_src);
+ virtual void GenFusedFPCmpBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir, bool gt_bias,
+ bool is_double);
+ virtual void GenFusedLongCmpBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir);
+ virtual void GenSelect(CompilationUnit* cu, BasicBlock* bb, MIR* mir);
+ virtual void GenMemBarrier(CompilationUnit* cu, MemBarrierKind barrier_kind);
+ virtual void GenMonitorEnter(CompilationUnit* cu, int opt_flags, RegLocation rl_src);
+ virtual void GenMonitorExit(CompilationUnit* cu, int opt_flags, RegLocation rl_src);
+ virtual void GenMoveException(CompilationUnit* cu, RegLocation rl_dest);
+ virtual void GenMultiplyByTwoBitMultiplier(CompilationUnit* cu, RegLocation rl_src,
+ RegLocation rl_result, int lit, int first_bit,
+ int second_bit);
+ virtual void GenNegDouble(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src);
+ virtual void GenNegFloat(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src);
+ virtual void GenPackedSwitch(CompilationUnit* cu, uint32_t table_offset,
+ RegLocation rl_src);
+ virtual void GenSparseSwitch(CompilationUnit* cu, uint32_t table_offset,
+ RegLocation rl_src);
+ virtual void GenSpecialCase(CompilationUnit* cu, BasicBlock* bb, MIR* mir,
+ SpecialCaseHandler special_case);
+
+ // Single operation generators.
+ virtual LIR* OpUnconditionalBranch(CompilationUnit* cu, LIR* target);
+ virtual LIR* OpCmpBranch(CompilationUnit* cu, ConditionCode cond, int src1, int src2,
+ LIR* target);
+ virtual LIR* OpCmpImmBranch(CompilationUnit* cu, ConditionCode cond, int reg, int check_value,
+ LIR* target);
+ virtual LIR* OpCondBranch(CompilationUnit* cu, ConditionCode cc, LIR* target);
+ virtual LIR* OpDecAndBranch(CompilationUnit* cu, ConditionCode c_code, int reg,
+ LIR* target);
+ virtual LIR* OpFpRegCopy(CompilationUnit* cu, int r_dest, int r_src);
+ virtual LIR* OpIT(CompilationUnit* cu, ConditionCode cond, const char* guide);
+ virtual LIR* OpMem(CompilationUnit* cu, OpKind op, int rBase, int disp);
+ virtual LIR* OpPcRelLoad(CompilationUnit* cu, int reg, LIR* target);
+ virtual LIR* OpReg(CompilationUnit* cu, OpKind op, int r_dest_src);
+ virtual LIR* OpRegCopy(CompilationUnit* cu, int r_dest, int r_src);
+ virtual LIR* OpRegCopyNoInsert(CompilationUnit* cu, int r_dest, int r_src);
+ virtual LIR* OpRegImm(CompilationUnit* cu, OpKind op, int r_dest_src1, int value);
+ virtual LIR* OpRegMem(CompilationUnit* cu, OpKind op, int r_dest, int rBase, int offset);
+ virtual LIR* OpRegReg(CompilationUnit* cu, OpKind op, int r_dest_src1, int r_src2);
+ virtual LIR* OpRegRegImm(CompilationUnit* cu, OpKind op, int r_dest, int r_src1, int value);
+ virtual LIR* OpRegRegReg(CompilationUnit* cu, OpKind op, int r_dest, int r_src1,
+ int r_src2);
+ virtual LIR* OpTestSuspend(CompilationUnit* cu, LIR* target);
+ virtual LIR* OpThreadMem(CompilationUnit* cu, OpKind op, int thread_offset);
+ virtual LIR* OpVldm(CompilationUnit* cu, int rBase, int count);
+ virtual LIR* OpVstm(CompilationUnit* cu, int rBase, int count);
+ virtual void OpLea(CompilationUnit* cu, int rBase, int reg1, int reg2, int scale,
+ int offset);
+ virtual void OpRegCopyWide(CompilationUnit* cu, int dest_lo, int dest_hi, int src_lo,
+ int src_hi);
+ virtual void OpTlsCmp(CompilationUnit* cu, int offset, int val);
+
+ void OpRegThreadMem(CompilationUnit* cu, OpKind op, int r_dest, int thread_offset);
+ void SpillCoreRegs(CompilationUnit* cu);
+ void UnSpillCoreRegs(CompilationUnit* cu);
+ static const X86EncodingMap EncodingMap[kX86Last];
+ bool InexpensiveConstantInt(int32_t value);
+ bool InexpensiveConstantFloat(int32_t value);
+ bool InexpensiveConstantLong(int64_t value);
+ bool InexpensiveConstantDouble(int64_t value);
+};
+
+} // namespace art
+
+#endif // ART_SRC_COMPILER_DEX_QUICK_X86_CODEGENX86_H_
diff --git a/src/compiler/dex/quick/x86/fp_x86.cc b/src/compiler/dex/quick/x86/fp_x86.cc
new file mode 100644
index 0000000..888a4a6
--- /dev/null
+++ b/src/compiler/dex/quick/x86/fp_x86.cc
@@ -0,0 +1,380 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "codegen_x86.h"
+#include "compiler/dex/quick/codegen_util.h"
+#include "compiler/dex/quick/ralloc_util.h"
+#include "x86_lir.h"
+
+namespace art {
+
+void X86Codegen::GenArithOpFloat(CompilationUnit *cu, Instruction::Code opcode,
+ RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) {
+ X86OpCode op = kX86Nop;
+ RegLocation rl_result;
+
+ /*
+ * Don't attempt to optimize register usage since these opcodes call out to
+ * the handlers.
+ */
+ switch (opcode) {
+ case Instruction::ADD_FLOAT_2ADDR:
+ case Instruction::ADD_FLOAT:
+ op = kX86AddssRR;
+ break;
+ case Instruction::SUB_FLOAT_2ADDR:
+ case Instruction::SUB_FLOAT:
+ op = kX86SubssRR;
+ break;
+ case Instruction::DIV_FLOAT_2ADDR:
+ case Instruction::DIV_FLOAT:
+ op = kX86DivssRR;
+ break;
+ case Instruction::MUL_FLOAT_2ADDR:
+ case Instruction::MUL_FLOAT:
+ op = kX86MulssRR;
+ break;
+ case Instruction::REM_FLOAT_2ADDR:
+ case Instruction::REM_FLOAT:
+ FlushAllRegs(cu); // Send everything to home location
+ CallRuntimeHelperRegLocationRegLocation(cu, ENTRYPOINT_OFFSET(pFmodf), rl_src1, rl_src2, false);
+ rl_result = GetReturn(cu, true);
+ StoreValue(cu, rl_dest, rl_result);
+ return;
+ case Instruction::NEG_FLOAT:
+ GenNegFloat(cu, rl_dest, rl_src1);
+ return;
+ default:
+ LOG(FATAL) << "Unexpected opcode: " << opcode;
+ }
+ rl_src1 = LoadValue(cu, rl_src1, kFPReg);
+ rl_src2 = LoadValue(cu, rl_src2, kFPReg);
+ rl_result = EvalLoc(cu, rl_dest, kFPReg, true);
+ int r_dest = rl_result.low_reg;
+ int r_src1 = rl_src1.low_reg;
+ int r_src2 = rl_src2.low_reg;
+ if (r_dest == r_src2) {
+ r_src2 = AllocTempFloat(cu);
+ OpRegCopy(cu, r_src2, r_dest);
+ }
+ OpRegCopy(cu, r_dest, r_src1);
+ NewLIR2(cu, op, r_dest, r_src2);
+ StoreValue(cu, rl_dest, rl_result);
+}
+
+void X86Codegen::GenArithOpDouble(CompilationUnit *cu, Instruction::Code opcode,
+ RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) {
+ X86OpCode op = kX86Nop;
+ RegLocation rl_result;
+
+ switch (opcode) {
+ case Instruction::ADD_DOUBLE_2ADDR:
+ case Instruction::ADD_DOUBLE:
+ op = kX86AddsdRR;
+ break;
+ case Instruction::SUB_DOUBLE_2ADDR:
+ case Instruction::SUB_DOUBLE:
+ op = kX86SubsdRR;
+ break;
+ case Instruction::DIV_DOUBLE_2ADDR:
+ case Instruction::DIV_DOUBLE:
+ op = kX86DivsdRR;
+ break;
+ case Instruction::MUL_DOUBLE_2ADDR:
+ case Instruction::MUL_DOUBLE:
+ op = kX86MulsdRR;
+ break;
+ case Instruction::REM_DOUBLE_2ADDR:
+ case Instruction::REM_DOUBLE:
+ FlushAllRegs(cu); // Send everything to home location
+ CallRuntimeHelperRegLocationRegLocation(cu, ENTRYPOINT_OFFSET(pFmod), rl_src1, rl_src2, false);
+ rl_result = GetReturnWide(cu, true);
+ StoreValueWide(cu, rl_dest, rl_result);
+ return;
+ case Instruction::NEG_DOUBLE:
+ GenNegDouble(cu, rl_dest, rl_src1);
+ return;
+ default:
+ LOG(FATAL) << "Unexpected opcode: " << opcode;
+ }
+ rl_src1 = LoadValueWide(cu, rl_src1, kFPReg);
+ DCHECK(rl_src1.wide);
+ rl_src2 = LoadValueWide(cu, rl_src2, kFPReg);
+ DCHECK(rl_src2.wide);
+ rl_result = EvalLoc(cu, rl_dest, kFPReg, true);
+ DCHECK(rl_dest.wide);
+ DCHECK(rl_result.wide);
+ int r_dest = S2d(rl_result.low_reg, rl_result.high_reg);
+ int r_src1 = S2d(rl_src1.low_reg, rl_src1.high_reg);
+ int r_src2 = S2d(rl_src2.low_reg, rl_src2.high_reg);
+ if (r_dest == r_src2) {
+ r_src2 = AllocTempDouble(cu) | X86_FP_DOUBLE;
+ OpRegCopy(cu, r_src2, r_dest);
+ }
+ OpRegCopy(cu, r_dest, r_src1);
+ NewLIR2(cu, op, r_dest, r_src2);
+ StoreValueWide(cu, rl_dest, rl_result);
+}
+
+void X86Codegen::GenConversion(CompilationUnit *cu, Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src) {
+ RegisterClass rcSrc = kFPReg;
+ X86OpCode op = kX86Nop;
+ int src_reg;
+ RegLocation rl_result;
+ switch (opcode) {
+ case Instruction::INT_TO_FLOAT:
+ rcSrc = kCoreReg;
+ op = kX86Cvtsi2ssRR;
+ break;
+ case Instruction::DOUBLE_TO_FLOAT:
+ rcSrc = kFPReg;
+ op = kX86Cvtsd2ssRR;
+ break;
+ case Instruction::FLOAT_TO_DOUBLE:
+ rcSrc = kFPReg;
+ op = kX86Cvtss2sdRR;
+ break;
+ case Instruction::INT_TO_DOUBLE:
+ rcSrc = kCoreReg;
+ op = kX86Cvtsi2sdRR;
+ break;
+ case Instruction::FLOAT_TO_INT: {
+ rl_src = LoadValue(cu, rl_src, kFPReg);
+ src_reg = rl_src.low_reg;
+ // In case result vreg is also src vreg, break association to avoid useless copy by EvalLoc()
+ ClobberSReg(cu, rl_dest.s_reg_low);
+ rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ int temp_reg = AllocTempFloat(cu);
+
+ LoadConstant(cu, rl_result.low_reg, 0x7fffffff);
+ NewLIR2(cu, kX86Cvtsi2ssRR, temp_reg, rl_result.low_reg);
+ NewLIR2(cu, kX86ComissRR, src_reg, temp_reg);
+ LIR* branch_pos_overflow = NewLIR2(cu, kX86Jcc8, 0, kX86CondA);
+ LIR* branch_na_n = NewLIR2(cu, kX86Jcc8, 0, kX86CondP);
+ NewLIR2(cu, kX86Cvttss2siRR, rl_result.low_reg, src_reg);
+ LIR* branch_normal = NewLIR1(cu, kX86Jmp8, 0);
+ branch_na_n->target = NewLIR0(cu, kPseudoTargetLabel);
+ NewLIR2(cu, kX86Xor32RR, rl_result.low_reg, rl_result.low_reg);
+ branch_pos_overflow->target = NewLIR0(cu, kPseudoTargetLabel);
+ branch_normal->target = NewLIR0(cu, kPseudoTargetLabel);
+ StoreValue(cu, rl_dest, rl_result);
+ return;
+ }
+ case Instruction::DOUBLE_TO_INT: {
+ rl_src = LoadValueWide(cu, rl_src, kFPReg);
+ src_reg = rl_src.low_reg;
+ // In case result vreg is also src vreg, break association to avoid useless copy by EvalLoc()
+ ClobberSReg(cu, rl_dest.s_reg_low);
+ rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ int temp_reg = AllocTempDouble(cu) | X86_FP_DOUBLE;
+
+ LoadConstant(cu, rl_result.low_reg, 0x7fffffff);
+ NewLIR2(cu, kX86Cvtsi2sdRR, temp_reg, rl_result.low_reg);
+ NewLIR2(cu, kX86ComisdRR, src_reg, temp_reg);
+ LIR* branch_pos_overflow = NewLIR2(cu, kX86Jcc8, 0, kX86CondA);
+ LIR* branch_na_n = NewLIR2(cu, kX86Jcc8, 0, kX86CondP);
+ NewLIR2(cu, kX86Cvttsd2siRR, rl_result.low_reg, src_reg);
+ LIR* branch_normal = NewLIR1(cu, kX86Jmp8, 0);
+ branch_na_n->target = NewLIR0(cu, kPseudoTargetLabel);
+ NewLIR2(cu, kX86Xor32RR, rl_result.low_reg, rl_result.low_reg);
+ branch_pos_overflow->target = NewLIR0(cu, kPseudoTargetLabel);
+ branch_normal->target = NewLIR0(cu, kPseudoTargetLabel);
+ StoreValue(cu, rl_dest, rl_result);
+ return;
+ }
+ case Instruction::LONG_TO_DOUBLE:
+ GenConversionCall(cu, ENTRYPOINT_OFFSET(pL2d), rl_dest, rl_src);
+ return;
+ case Instruction::LONG_TO_FLOAT:
+ // TODO: inline by using memory as a 64-bit source. Be careful about promoted registers.
+ GenConversionCall(cu, ENTRYPOINT_OFFSET(pL2f), rl_dest, rl_src);
+ return;
+ case Instruction::FLOAT_TO_LONG:
+ GenConversionCall(cu, ENTRYPOINT_OFFSET(pF2l), rl_dest, rl_src);
+ return;
+ case Instruction::DOUBLE_TO_LONG:
+ GenConversionCall(cu, ENTRYPOINT_OFFSET(pD2l), rl_dest, rl_src);
+ return;
+ default:
+ LOG(INFO) << "Unexpected opcode: " << opcode;
+ }
+ if (rl_src.wide) {
+ rl_src = LoadValueWide(cu, rl_src, rcSrc);
+ src_reg = S2d(rl_src.low_reg, rl_src.high_reg);
+ } else {
+ rl_src = LoadValue(cu, rl_src, rcSrc);
+ src_reg = rl_src.low_reg;
+ }
+ if (rl_dest.wide) {
+ rl_result = EvalLoc(cu, rl_dest, kFPReg, true);
+ NewLIR2(cu, op, S2d(rl_result.low_reg, rl_result.high_reg), src_reg);
+ StoreValueWide(cu, rl_dest, rl_result);
+ } else {
+ rl_result = EvalLoc(cu, rl_dest, kFPReg, true);
+ NewLIR2(cu, op, rl_result.low_reg, src_reg);
+ StoreValue(cu, rl_dest, rl_result);
+ }
+}
+
+void X86Codegen::GenCmpFP(CompilationUnit *cu, Instruction::Code code, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2) {
+ bool single = (code == Instruction::CMPL_FLOAT) || (code == Instruction::CMPG_FLOAT);
+ bool unordered_gt = (code == Instruction::CMPG_DOUBLE) || (code == Instruction::CMPG_FLOAT);
+ int src_reg1;
+ int src_reg2;
+ if (single) {
+ rl_src1 = LoadValue(cu, rl_src1, kFPReg);
+ src_reg1 = rl_src1.low_reg;
+ rl_src2 = LoadValue(cu, rl_src2, kFPReg);
+ src_reg2 = rl_src2.low_reg;
+ } else {
+ rl_src1 = LoadValueWide(cu, rl_src1, kFPReg);
+ src_reg1 = S2d(rl_src1.low_reg, rl_src1.high_reg);
+ rl_src2 = LoadValueWide(cu, rl_src2, kFPReg);
+ src_reg2 = S2d(rl_src2.low_reg, rl_src2.high_reg);
+ }
+ // In case result vreg is also src vreg, break association to avoid useless copy by EvalLoc()
+ ClobberSReg(cu, rl_dest.s_reg_low);
+ RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ LoadConstantNoClobber(cu, rl_result.low_reg, unordered_gt ? 1 : 0);
+ if (single) {
+ NewLIR2(cu, kX86UcomissRR, src_reg1, src_reg2);
+ } else {
+ NewLIR2(cu, kX86UcomisdRR, src_reg1, src_reg2);
+ }
+ LIR* branch = NULL;
+ if (unordered_gt) {
+ branch = NewLIR2(cu, kX86Jcc8, 0, kX86CondPE);
+ }
+ // If the result reg can't be byte accessed, use a jump and move instead of a set.
+ if (rl_result.low_reg >= 4) {
+ LIR* branch2 = NULL;
+ if (unordered_gt) {
+ branch2 = NewLIR2(cu, kX86Jcc8, 0, kX86CondA);
+ NewLIR2(cu, kX86Mov32RI, rl_result.low_reg, 0x0);
+ } else {
+ branch2 = NewLIR2(cu, kX86Jcc8, 0, kX86CondBe);
+ NewLIR2(cu, kX86Mov32RI, rl_result.low_reg, 0x1);
+ }
+ branch2->target = NewLIR0(cu, kPseudoTargetLabel);
+ } else {
+ NewLIR2(cu, kX86Set8R, rl_result.low_reg, kX86CondA /* above - unsigned > */);
+ }
+ NewLIR2(cu, kX86Sbb32RI, rl_result.low_reg, 0);
+ if (unordered_gt) {
+ branch->target = NewLIR0(cu, kPseudoTargetLabel);
+ }
+ StoreValue(cu, rl_dest, rl_result);
+}
+
+void X86Codegen::GenFusedFPCmpBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir, bool gt_bias,
+ bool is_double) {
+ LIR* label_list = cu->block_label_list;
+ LIR* taken = &label_list[bb->taken->id];
+ LIR* not_taken = &label_list[bb->fall_through->id];
+ LIR* branch = NULL;
+ RegLocation rl_src1;
+ RegLocation rl_src2;
+ if (is_double) {
+ rl_src1 = GetSrcWide(cu, mir, 0);
+ rl_src2 = GetSrcWide(cu, mir, 2);
+ rl_src1 = LoadValueWide(cu, rl_src1, kFPReg);
+ rl_src2 = LoadValueWide(cu, rl_src2, kFPReg);
+ NewLIR2(cu, kX86UcomisdRR, S2d(rl_src1.low_reg, rl_src1.high_reg),
+ S2d(rl_src2.low_reg, rl_src2.high_reg));
+ } else {
+ rl_src1 = GetSrc(cu, mir, 0);
+ rl_src2 = GetSrc(cu, mir, 1);
+ rl_src1 = LoadValue(cu, rl_src1, kFPReg);
+ rl_src2 = LoadValue(cu, rl_src2, kFPReg);
+ NewLIR2(cu, kX86UcomissRR, rl_src1.low_reg, rl_src2.low_reg);
+ }
+ ConditionCode ccode = static_cast<ConditionCode>(mir->dalvikInsn.arg[0]);
+ switch (ccode) {
+ case kCondEq:
+ if (!gt_bias) {
+ branch = NewLIR2(cu, kX86Jcc8, 0, kX86CondPE);
+ branch->target = not_taken;
+ }
+ break;
+ case kCondNe:
+ if (!gt_bias) {
+ branch = NewLIR2(cu, kX86Jcc8, 0, kX86CondPE);
+ branch->target = taken;
+ }
+ break;
+ case kCondLt:
+ if (gt_bias) {
+ branch = NewLIR2(cu, kX86Jcc8, 0, kX86CondPE);
+ branch->target = not_taken;
+ }
+ ccode = kCondCs;
+ break;
+ case kCondLe:
+ if (gt_bias) {
+ branch = NewLIR2(cu, kX86Jcc8, 0, kX86CondPE);
+ branch->target = not_taken;
+ }
+ ccode = kCondLs;
+ break;
+ case kCondGt:
+ if (gt_bias) {
+ branch = NewLIR2(cu, kX86Jcc8, 0, kX86CondPE);
+ branch->target = taken;
+ }
+ ccode = kCondHi;
+ break;
+ case kCondGe:
+ if (gt_bias) {
+ branch = NewLIR2(cu, kX86Jcc8, 0, kX86CondPE);
+ branch->target = taken;
+ }
+ ccode = kCondCc;
+ break;
+ default:
+ LOG(FATAL) << "Unexpected ccode: " << ccode;
+ }
+ OpCondBranch(cu, ccode, taken);
+}
+
+void X86Codegen::GenNegFloat(CompilationUnit *cu, RegLocation rl_dest, RegLocation rl_src)
+{
+ RegLocation rl_result;
+ rl_src = LoadValue(cu, rl_src, kCoreReg);
+ rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ OpRegRegImm(cu, kOpAdd, rl_result.low_reg, rl_src.low_reg, 0x80000000);
+ StoreValue(cu, rl_dest, rl_result);
+}
+
+void X86Codegen::GenNegDouble(CompilationUnit *cu, RegLocation rl_dest, RegLocation rl_src)
+{
+ RegLocation rl_result;
+ rl_src = LoadValueWide(cu, rl_src, kCoreReg);
+ rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ OpRegRegImm(cu, kOpAdd, rl_result.high_reg, rl_src.high_reg, 0x80000000);
+ OpRegCopy(cu, rl_result.low_reg, rl_src.low_reg);
+ StoreValueWide(cu, rl_dest, rl_result);
+}
+
+bool X86Codegen::GenInlinedSqrt(CompilationUnit* cu, CallInfo* info) {
+ DCHECK_NE(cu->instruction_set, kThumb2);
+ return false;
+}
+
+
+
+} // namespace art
diff --git a/src/compiler/dex/quick/x86/int_x86.cc b/src/compiler/dex/quick/x86/int_x86.cc
new file mode 100644
index 0000000..81b1d72
--- /dev/null
+++ b/src/compiler/dex/quick/x86/int_x86.cc
@@ -0,0 +1,607 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* This file contains codegen for the X86 ISA */
+
+#include "codegen_x86.h"
+#include "compiler/dex/quick/codegen_util.h"
+#include "compiler/dex/quick/ralloc_util.h"
+#include "x86_lir.h"
+
+namespace art {
+
+/*
+ * Perform register memory operation.
+ */
+LIR* X86Codegen::GenRegMemCheck(CompilationUnit* cu, ConditionCode c_code,
+ int reg1, int base, int offset, ThrowKind kind)
+{
+ LIR* tgt = RawLIR(cu, 0, kPseudoThrowTarget, kind,
+ cu->current_dalvik_offset, reg1, base, offset);
+ OpRegMem(cu, kOpCmp, reg1, base, offset);
+ LIR* branch = OpCondBranch(cu, c_code, tgt);
+ // Remember branch target - will process later
+ InsertGrowableList(cu, &cu->throw_launchpads, reinterpret_cast<uintptr_t>(tgt));
+ return branch;
+}
+
+/*
+ * Compare two 64-bit values
+ * x = y return 0
+ * x < y return -1
+ * x > y return 1
+ */
+void X86Codegen::GenCmpLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2)
+{
+ FlushAllRegs(cu);
+ LockCallTemps(cu); // Prepare for explicit register usage
+ LoadValueDirectWideFixed(cu, rl_src1, r0, r1);
+ LoadValueDirectWideFixed(cu, rl_src2, r2, r3);
+ // Compute (r1:r0) = (r1:r0) - (r3:r2)
+ OpRegReg(cu, kOpSub, r0, r2); // r0 = r0 - r2
+ OpRegReg(cu, kOpSbc, r1, r3); // r1 = r1 - r3 - CF
+ NewLIR2(cu, kX86Set8R, r2, kX86CondL); // r2 = (r1:r0) < (r3:r2) ? 1 : 0
+ NewLIR2(cu, kX86Movzx8RR, r2, r2);
+ OpReg(cu, kOpNeg, r2); // r2 = -r2
+ OpRegReg(cu, kOpOr, r0, r1); // r0 = high | low - sets ZF
+ NewLIR2(cu, kX86Set8R, r0, kX86CondNz); // r0 = (r1:r0) != (r3:r2) ? 1 : 0
+ NewLIR2(cu, kX86Movzx8RR, r0, r0);
+ OpRegReg(cu, kOpOr, r0, r2); // r0 = r0 | r2
+ RegLocation rl_result = LocCReturn();
+ StoreValue(cu, rl_dest, rl_result);
+}
+
+X86ConditionCode X86ConditionEncoding(ConditionCode cond) {
+ switch (cond) {
+ case kCondEq: return kX86CondEq;
+ case kCondNe: return kX86CondNe;
+ case kCondCs: return kX86CondC;
+ case kCondCc: return kX86CondNc;
+ case kCondMi: return kX86CondS;
+ case kCondPl: return kX86CondNs;
+ case kCondVs: return kX86CondO;
+ case kCondVc: return kX86CondNo;
+ case kCondHi: return kX86CondA;
+ case kCondLs: return kX86CondBe;
+ case kCondGe: return kX86CondGe;
+ case kCondLt: return kX86CondL;
+ case kCondGt: return kX86CondG;
+ case kCondLe: return kX86CondLe;
+ case kCondAl:
+ case kCondNv: LOG(FATAL) << "Should not reach here";
+ }
+ return kX86CondO;
+}
+
+LIR* X86Codegen::OpCmpBranch(CompilationUnit* cu, ConditionCode cond, int src1, int src2,
+ LIR* target)
+{
+ NewLIR2(cu, kX86Cmp32RR, src1, src2);
+ X86ConditionCode cc = X86ConditionEncoding(cond);
+ LIR* branch = NewLIR2(cu, kX86Jcc8, 0 /* lir operand for Jcc offset */ ,
+ cc);
+ branch->target = target;
+ return branch;
+}
+
+LIR* X86Codegen::OpCmpImmBranch(CompilationUnit* cu, ConditionCode cond, int reg,
+ int check_value, LIR* target)
+{
+ if ((check_value == 0) && (cond == kCondEq || cond == kCondNe)) {
+ // TODO: when check_value == 0 and reg is rCX, use the jcxz/nz opcode
+ NewLIR2(cu, kX86Test32RR, reg, reg);
+ } else {
+ NewLIR2(cu, IS_SIMM8(check_value) ? kX86Cmp32RI8 : kX86Cmp32RI, reg, check_value);
+ }
+ X86ConditionCode cc = X86ConditionEncoding(cond);
+ LIR* branch = NewLIR2(cu, kX86Jcc8, 0 /* lir operand for Jcc offset */ , cc);
+ branch->target = target;
+ return branch;
+}
+
+LIR* X86Codegen::OpRegCopyNoInsert(CompilationUnit *cu, int r_dest, int r_src)
+{
+ if (X86_FPREG(r_dest) || X86_FPREG(r_src))
+ return OpFpRegCopy(cu, r_dest, r_src);
+ LIR* res = RawLIR(cu, cu->current_dalvik_offset, kX86Mov32RR,
+ r_dest, r_src);
+ if (r_dest == r_src) {
+ res->flags.is_nop = true;
+ }
+ return res;
+}
+
+LIR* X86Codegen::OpRegCopy(CompilationUnit *cu, int r_dest, int r_src)
+{
+ LIR *res = OpRegCopyNoInsert(cu, r_dest, r_src);
+ AppendLIR(cu, res);
+ return res;
+}
+
+void X86Codegen::OpRegCopyWide(CompilationUnit *cu, int dest_lo, int dest_hi,
+ int src_lo, int src_hi)
+{
+ bool dest_fp = X86_FPREG(dest_lo) && X86_FPREG(dest_hi);
+ bool src_fp = X86_FPREG(src_lo) && X86_FPREG(src_hi);
+ assert(X86_FPREG(src_lo) == X86_FPREG(src_hi));
+ assert(X86_FPREG(dest_lo) == X86_FPREG(dest_hi));
+ if (dest_fp) {
+ if (src_fp) {
+ OpRegCopy(cu, S2d(dest_lo, dest_hi), S2d(src_lo, src_hi));
+ } else {
+ // TODO: Prevent this from happening in the code. The result is often
+ // unused or could have been loaded more easily from memory.
+ NewLIR2(cu, kX86MovdxrRR, dest_lo, src_lo);
+ NewLIR2(cu, kX86MovdxrRR, dest_hi, src_hi);
+ NewLIR2(cu, kX86PsllqRI, dest_hi, 32);
+ NewLIR2(cu, kX86OrpsRR, dest_lo, dest_hi);
+ }
+ } else {
+ if (src_fp) {
+ NewLIR2(cu, kX86MovdrxRR, dest_lo, src_lo);
+ NewLIR2(cu, kX86PsrlqRI, src_lo, 32);
+ NewLIR2(cu, kX86MovdrxRR, dest_hi, src_lo);
+ } else {
+ // Handle overlap
+ if (src_hi == dest_lo) {
+ OpRegCopy(cu, dest_hi, src_hi);
+ OpRegCopy(cu, dest_lo, src_lo);
+ } else {
+ OpRegCopy(cu, dest_lo, src_lo);
+ OpRegCopy(cu, dest_hi, src_hi);
+ }
+ }
+ }
+}
+
+void X86Codegen::GenSelect(CompilationUnit* cu, BasicBlock* bb, MIR* mir)
+{
+ UNIMPLEMENTED(FATAL) << "Need codegen for GenSelect";
+}
+
+void X86Codegen::GenFusedLongCmpBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir) {
+ LIR* label_list = cu->block_label_list;
+ LIR* taken = &label_list[bb->taken->id];
+ RegLocation rl_src1 = GetSrcWide(cu, mir, 0);
+ RegLocation rl_src2 = GetSrcWide(cu, mir, 2);
+ FlushAllRegs(cu);
+ LockCallTemps(cu); // Prepare for explicit register usage
+ LoadValueDirectWideFixed(cu, rl_src1, r0, r1);
+ LoadValueDirectWideFixed(cu, rl_src2, r2, r3);
+ ConditionCode ccode = static_cast<ConditionCode>(mir->dalvikInsn.arg[0]);
+ // Swap operands and condition code to prevent use of zero flag.
+ if (ccode == kCondLe || ccode == kCondGt) {
+ // Compute (r3:r2) = (r3:r2) - (r1:r0)
+ OpRegReg(cu, kOpSub, r2, r0); // r2 = r2 - r0
+ OpRegReg(cu, kOpSbc, r3, r1); // r3 = r3 - r1 - CF
+ } else {
+ // Compute (r1:r0) = (r1:r0) - (r3:r2)
+ OpRegReg(cu, kOpSub, r0, r2); // r0 = r0 - r2
+ OpRegReg(cu, kOpSbc, r1, r3); // r1 = r1 - r3 - CF
+ }
+ switch (ccode) {
+ case kCondEq:
+ case kCondNe:
+ OpRegReg(cu, kOpOr, r0, r1); // r0 = r0 | r1
+ break;
+ case kCondLe:
+ ccode = kCondGe;
+ break;
+ case kCondGt:
+ ccode = kCondLt;
+ break;
+ case kCondLt:
+ case kCondGe:
+ break;
+ default:
+ LOG(FATAL) << "Unexpected ccode: " << ccode;
+ }
+ OpCondBranch(cu, ccode, taken);
+}
+
+RegLocation X86Codegen::GenDivRemLit(CompilationUnit* cu, RegLocation rl_dest, int reg_lo,
+ int lit, bool is_div)
+{
+ LOG(FATAL) << "Unexpected use of GenDivRemLit for x86";
+ return rl_dest;
+}
+
+RegLocation X86Codegen::GenDivRem(CompilationUnit* cu, RegLocation rl_dest, int reg_lo,
+ int reg_hi, bool is_div)
+{
+ LOG(FATAL) << "Unexpected use of GenDivRem for x86";
+ return rl_dest;
+}
+
+bool X86Codegen::GenInlinedMinMaxInt(CompilationUnit *cu, CallInfo* info, bool is_min)
+{
+ DCHECK_EQ(cu->instruction_set, kX86);
+ RegLocation rl_src1 = info->args[0];
+ RegLocation rl_src2 = info->args[1];
+ rl_src1 = LoadValue(cu, rl_src1, kCoreReg);
+ rl_src2 = LoadValue(cu, rl_src2, kCoreReg);
+ RegLocation rl_dest = InlineTarget(cu, info);
+ RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ OpRegReg(cu, kOpCmp, rl_src1.low_reg, rl_src2.low_reg);
+ DCHECK_EQ(cu->instruction_set, kX86);
+ LIR* branch = NewLIR2(cu, kX86Jcc8, 0, is_min ? kX86CondG : kX86CondL);
+ OpRegReg(cu, kOpMov, rl_result.low_reg, rl_src1.low_reg);
+ LIR* branch2 = NewLIR1(cu, kX86Jmp8, 0);
+ branch->target = NewLIR0(cu, kPseudoTargetLabel);
+ OpRegReg(cu, kOpMov, rl_result.low_reg, rl_src2.low_reg);
+ branch2->target = NewLIR0(cu, kPseudoTargetLabel);
+ StoreValue(cu, rl_dest, rl_result);
+ return true;
+}
+
+void X86Codegen::OpLea(CompilationUnit* cu, int rBase, int reg1, int reg2, int scale, int offset)
+{
+ NewLIR5(cu, kX86Lea32RA, rBase, reg1, reg2, scale, offset);
+}
+
+void X86Codegen::OpTlsCmp(CompilationUnit* cu, int offset, int val)
+{
+ NewLIR2(cu, kX86Cmp16TI8, offset, val);
+}
+
+bool X86Codegen::GenInlinedCas32(CompilationUnit* cu, CallInfo* info, bool need_write_barrier) {
+ DCHECK_NE(cu->instruction_set, kThumb2);
+ return false;
+}
+
+LIR* X86Codegen::OpPcRelLoad(CompilationUnit* cu, int reg, LIR* target) {
+ LOG(FATAL) << "Unexpected use of OpPcRelLoad for x86";
+ return NULL;
+}
+
+LIR* X86Codegen::OpVldm(CompilationUnit* cu, int rBase, int count)
+{
+ LOG(FATAL) << "Unexpected use of OpVldm for x86";
+ return NULL;
+}
+
+LIR* X86Codegen::OpVstm(CompilationUnit* cu, int rBase, int count)
+{
+ LOG(FATAL) << "Unexpected use of OpVstm for x86";
+ return NULL;
+}
+
+void X86Codegen::GenMultiplyByTwoBitMultiplier(CompilationUnit* cu, RegLocation rl_src,
+ RegLocation rl_result, int lit,
+ int first_bit, int second_bit)
+{
+ int t_reg = AllocTemp(cu);
+ OpRegRegImm(cu, kOpLsl, t_reg, rl_src.low_reg, second_bit - first_bit);
+ OpRegRegReg(cu, kOpAdd, rl_result.low_reg, rl_src.low_reg, t_reg);
+ FreeTemp(cu, t_reg);
+ if (first_bit != 0) {
+ OpRegRegImm(cu, kOpLsl, rl_result.low_reg, rl_result.low_reg, first_bit);
+ }
+}
+
+void X86Codegen::GenDivZeroCheck(CompilationUnit* cu, int reg_lo, int reg_hi)
+{
+ int t_reg = AllocTemp(cu);
+ OpRegRegReg(cu, kOpOr, t_reg, reg_lo, reg_hi);
+ GenImmedCheck(cu, kCondEq, t_reg, 0, kThrowDivZero);
+ FreeTemp(cu, t_reg);
+}
+
+// Test suspend flag, return target of taken suspend branch
+LIR* X86Codegen::OpTestSuspend(CompilationUnit* cu, LIR* target)
+{
+ OpTlsCmp(cu, Thread::ThreadFlagsOffset().Int32Value(), 0);
+ return OpCondBranch(cu, (target == NULL) ? kCondNe : kCondEq, target);
+}
+
+// Decrement register and branch on condition
+LIR* X86Codegen::OpDecAndBranch(CompilationUnit* cu, ConditionCode c_code, int reg, LIR* target)
+{
+ OpRegImm(cu, kOpSub, reg, 1);
+ return OpCmpImmBranch(cu, c_code, reg, 0, target);
+}
+
+bool X86Codegen::SmallLiteralDivide(CompilationUnit* cu, Instruction::Code dalvik_opcode,
+ RegLocation rl_src, RegLocation rl_dest, int lit)
+{
+ LOG(FATAL) << "Unexpected use of smallLiteralDive in x86";
+ return false;
+}
+
+LIR* X86Codegen::OpIT(CompilationUnit* cu, ConditionCode cond, const char* guide)
+{
+ LOG(FATAL) << "Unexpected use of OpIT in x86";
+ return NULL;
+}
+
+void X86Codegen::GenMulLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2)
+{
+ LOG(FATAL) << "Unexpected use of GenX86Long for x86";
+}
+void X86Codegen::GenAddLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2)
+{
+ // TODO: fixed register usage here as we only have 4 temps and temporary allocation isn't smart
+ // enough.
+ FlushAllRegs(cu);
+ LockCallTemps(cu); // Prepare for explicit register usage
+ LoadValueDirectWideFixed(cu, rl_src1, r0, r1);
+ LoadValueDirectWideFixed(cu, rl_src2, r2, r3);
+ // Compute (r1:r0) = (r1:r0) + (r2:r3)
+ OpRegReg(cu, kOpAdd, r0, r2); // r0 = r0 + r2
+ OpRegReg(cu, kOpAdc, r1, r3); // r1 = r1 + r3 + CF
+ RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r0, r1,
+ INVALID_SREG, INVALID_SREG};
+ StoreValueWide(cu, rl_dest, rl_result);
+}
+
+void X86Codegen::GenSubLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2)
+{
+ // TODO: fixed register usage here as we only have 4 temps and temporary allocation isn't smart
+ // enough.
+ FlushAllRegs(cu);
+ LockCallTemps(cu); // Prepare for explicit register usage
+ LoadValueDirectWideFixed(cu, rl_src1, r0, r1);
+ LoadValueDirectWideFixed(cu, rl_src2, r2, r3);
+ // Compute (r1:r0) = (r1:r0) + (r2:r3)
+ OpRegReg(cu, kOpSub, r0, r2); // r0 = r0 - r2
+ OpRegReg(cu, kOpSbc, r1, r3); // r1 = r1 - r3 - CF
+ RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r0, r1,
+ INVALID_SREG, INVALID_SREG};
+ StoreValueWide(cu, rl_dest, rl_result);
+}
+
+void X86Codegen::GenAndLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2)
+{
+ // TODO: fixed register usage here as we only have 4 temps and temporary allocation isn't smart
+ // enough.
+ FlushAllRegs(cu);
+ LockCallTemps(cu); // Prepare for explicit register usage
+ LoadValueDirectWideFixed(cu, rl_src1, r0, r1);
+ LoadValueDirectWideFixed(cu, rl_src2, r2, r3);
+ // Compute (r1:r0) = (r1:r0) & (r2:r3)
+ OpRegReg(cu, kOpAnd, r0, r2); // r0 = r0 & r2
+ OpRegReg(cu, kOpAnd, r1, r3); // r1 = r1 & r3
+ RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r0, r1,
+ INVALID_SREG, INVALID_SREG};
+ StoreValueWide(cu, rl_dest, rl_result);
+}
+
+void X86Codegen::GenOrLong(CompilationUnit* cu, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2)
+{
+ // TODO: fixed register usage here as we only have 4 temps and temporary allocation isn't smart
+ // enough.
+ FlushAllRegs(cu);
+ LockCallTemps(cu); // Prepare for explicit register usage
+ LoadValueDirectWideFixed(cu, rl_src1, r0, r1);
+ LoadValueDirectWideFixed(cu, rl_src2, r2, r3);
+ // Compute (r1:r0) = (r1:r0) | (r2:r3)
+ OpRegReg(cu, kOpOr, r0, r2); // r0 = r0 | r2
+ OpRegReg(cu, kOpOr, r1, r3); // r1 = r1 | r3
+ RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r0, r1,
+ INVALID_SREG, INVALID_SREG};
+ StoreValueWide(cu, rl_dest, rl_result);
+}
+
+void X86Codegen::GenXorLong(CompilationUnit* cu, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2)
+{
+ // TODO: fixed register usage here as we only have 4 temps and temporary allocation isn't smart
+ // enough.
+ FlushAllRegs(cu);
+ LockCallTemps(cu); // Prepare for explicit register usage
+ LoadValueDirectWideFixed(cu, rl_src1, r0, r1);
+ LoadValueDirectWideFixed(cu, rl_src2, r2, r3);
+ // Compute (r1:r0) = (r1:r0) ^ (r2:r3)
+ OpRegReg(cu, kOpXor, r0, r2); // r0 = r0 ^ r2
+ OpRegReg(cu, kOpXor, r1, r3); // r1 = r1 ^ r3
+ RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r0, r1,
+ INVALID_SREG, INVALID_SREG};
+ StoreValueWide(cu, rl_dest, rl_result);
+}
+
+void X86Codegen::GenNegLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src)
+{
+ FlushAllRegs(cu);
+ LockCallTemps(cu); // Prepare for explicit register usage
+ LoadValueDirectWideFixed(cu, rl_src, r0, r1);
+ // Compute (r1:r0) = -(r1:r0)
+ OpRegReg(cu, kOpNeg, r0, r0); // r0 = -r0
+ OpRegImm(cu, kOpAdc, r1, 0); // r1 = r1 + CF
+ OpRegReg(cu, kOpNeg, r1, r1); // r1 = -r1
+ RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r0, r1,
+ INVALID_SREG, INVALID_SREG};
+ StoreValueWide(cu, rl_dest, rl_result);
+}
+
+void X86Codegen::OpRegThreadMem(CompilationUnit* cu, OpKind op, int r_dest, int thread_offset) {
+ X86OpCode opcode = kX86Bkpt;
+ switch (op) {
+ case kOpCmp: opcode = kX86Cmp32RT; break;
+ case kOpMov: opcode = kX86Mov32RT; break;
+ default:
+ LOG(FATAL) << "Bad opcode: " << op;
+ break;
+ }
+ NewLIR2(cu, opcode, r_dest, thread_offset);
+}
+
+/*
+ * Generate array load
+ */
+void X86Codegen::GenArrayGet(CompilationUnit* cu, int opt_flags, OpSize size, RegLocation rl_array,
+ RegLocation rl_index, RegLocation rl_dest, int scale)
+{
+ RegisterClass reg_class = oat_reg_class_by_size(size);
+ int len_offset = mirror::Array::LengthOffset().Int32Value();
+ int data_offset;
+ RegLocation rl_result;
+ rl_array = LoadValue(cu, rl_array, kCoreReg);
+ rl_index = LoadValue(cu, rl_index, kCoreReg);
+
+ if (size == kLong || size == kDouble) {
+ data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
+ } else {
+ data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
+ }
+
+ /* null object? */
+ GenNullCheck(cu, rl_array.s_reg_low, rl_array.low_reg, opt_flags);
+
+ if (!(opt_flags & MIR_IGNORE_RANGE_CHECK)) {
+ /* if (rl_index >= [rl_array + len_offset]) goto kThrowArrayBounds */
+ GenRegMemCheck(cu, kCondUge, rl_index.low_reg, rl_array.low_reg,
+ len_offset, kThrowArrayBounds);
+ }
+ if ((size == kLong) || (size == kDouble)) {
+ int reg_addr = AllocTemp(cu);
+ OpLea(cu, reg_addr, rl_array.low_reg, rl_index.low_reg, scale, data_offset);
+ FreeTemp(cu, rl_array.low_reg);
+ FreeTemp(cu, rl_index.low_reg);
+ rl_result = EvalLoc(cu, rl_dest, reg_class, true);
+ LoadBaseIndexedDisp(cu, reg_addr, INVALID_REG, 0, 0, rl_result.low_reg,
+ rl_result.high_reg, size, INVALID_SREG);
+ StoreValueWide(cu, rl_dest, rl_result);
+ } else {
+ rl_result = EvalLoc(cu, rl_dest, reg_class, true);
+
+ LoadBaseIndexedDisp(cu, rl_array.low_reg, rl_index.low_reg, scale,
+ data_offset, rl_result.low_reg, INVALID_REG, size,
+ INVALID_SREG);
+
+ StoreValue(cu, rl_dest, rl_result);
+ }
+}
+
+/*
+ * Generate array store
+ *
+ */
+void X86Codegen::GenArrayPut(CompilationUnit* cu, int opt_flags, OpSize size, RegLocation rl_array,
+ RegLocation rl_index, RegLocation rl_src, int scale)
+{
+ RegisterClass reg_class = oat_reg_class_by_size(size);
+ int len_offset = mirror::Array::LengthOffset().Int32Value();
+ int data_offset;
+
+ if (size == kLong || size == kDouble) {
+ data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
+ } else {
+ data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
+ }
+
+ rl_array = LoadValue(cu, rl_array, kCoreReg);
+ rl_index = LoadValue(cu, rl_index, kCoreReg);
+
+ /* null object? */
+ GenNullCheck(cu, rl_array.s_reg_low, rl_array.low_reg, opt_flags);
+
+ if (!(opt_flags & MIR_IGNORE_RANGE_CHECK)) {
+ /* if (rl_index >= [rl_array + len_offset]) goto kThrowArrayBounds */
+ GenRegMemCheck(cu, kCondUge, rl_index.low_reg, rl_array.low_reg, len_offset, kThrowArrayBounds);
+ }
+ if ((size == kLong) || (size == kDouble)) {
+ rl_src = LoadValueWide(cu, rl_src, reg_class);
+ } else {
+ rl_src = LoadValue(cu, rl_src, reg_class);
+ }
+ // If the src reg can't be byte accessed, move it to a temp first.
+ if ((size == kSignedByte || size == kUnsignedByte) && rl_src.low_reg >= 4) {
+ int temp = AllocTemp(cu);
+ OpRegCopy(cu, temp, rl_src.low_reg);
+ StoreBaseIndexedDisp(cu, rl_array.low_reg, rl_index.low_reg, scale, data_offset, temp,
+ INVALID_REG, size, INVALID_SREG);
+ } else {
+ StoreBaseIndexedDisp(cu, rl_array.low_reg, rl_index.low_reg, scale, data_offset, rl_src.low_reg,
+ rl_src.high_reg, size, INVALID_SREG);
+ }
+}
+
+/*
+ * Generate array store
+ *
+ */
+void X86Codegen::GenArrayObjPut(CompilationUnit* cu, int opt_flags, RegLocation rl_array,
+ RegLocation rl_index, RegLocation rl_src, int scale)
+{
+ int len_offset = mirror::Array::LengthOffset().Int32Value();
+ int data_offset = mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value();
+
+ FlushAllRegs(cu); // Use explicit registers
+ LockCallTemps(cu);
+
+ int r_value = TargetReg(kArg0); // Register holding value
+ int r_array_class = TargetReg(kArg1); // Register holding array's Class
+ int r_array = TargetReg(kArg2); // Register holding array
+ int r_index = TargetReg(kArg3); // Register holding index into array
+
+ LoadValueDirectFixed(cu, rl_array, r_array); // Grab array
+ LoadValueDirectFixed(cu, rl_src, r_value); // Grab value
+ LoadValueDirectFixed(cu, rl_index, r_index); // Grab index
+
+ GenNullCheck(cu, rl_array.s_reg_low, r_array, opt_flags); // NPE?
+
+ // Store of null?
+ LIR* null_value_check = OpCmpImmBranch(cu, kCondEq, r_value, 0, NULL);
+
+ // Get the array's class.
+ LoadWordDisp(cu, r_array, mirror::Object::ClassOffset().Int32Value(), r_array_class);
+ CallRuntimeHelperRegReg(cu, ENTRYPOINT_OFFSET(pCanPutArrayElementFromCode), r_value,
+ r_array_class, true);
+ // Redo LoadValues in case they didn't survive the call.
+ LoadValueDirectFixed(cu, rl_array, r_array); // Reload array
+ LoadValueDirectFixed(cu, rl_index, r_index); // Reload index
+ LoadValueDirectFixed(cu, rl_src, r_value); // Reload value
+ r_array_class = INVALID_REG;
+
+ // Branch here if value to be stored == null
+ LIR* target = NewLIR0(cu, kPseudoTargetLabel);
+ null_value_check->target = target;
+
+ // make an extra temp available for card mark below
+ FreeTemp(cu, TargetReg(kArg1));
+ if (!(opt_flags & MIR_IGNORE_RANGE_CHECK)) {
+ /* if (rl_index >= [rl_array + len_offset]) goto kThrowArrayBounds */
+ GenRegMemCheck(cu, kCondUge, r_index, r_array, len_offset, kThrowArrayBounds);
+ }
+ StoreBaseIndexedDisp(cu, r_array, r_index, scale,
+ data_offset, r_value, INVALID_REG, kWord, INVALID_SREG);
+ FreeTemp(cu, r_index);
+ if (!IsConstantNullRef(cu, rl_src)) {
+ MarkGCCard(cu, r_value, r_array);
+ }
+}
+
+void X86Codegen::GenShiftImmOpLong(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_shift)
+{
+ // Default implementation is just to ignore the constant case.
+ GenShiftOpLong(cu, opcode, rl_dest, rl_src1, rl_shift);
+}
+
+void X86Codegen::GenArithImmOpLong(CompilationUnit* cu, Instruction::Code opcode,
+ RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2)
+{
+ // Default - bail to non-const handler.
+ GenArithOpLong(cu, opcode, rl_dest, rl_src1, rl_src2);
+}
+
+} // namespace art
diff --git a/src/compiler/dex/quick/x86/target_x86.cc b/src/compiler/dex/quick/x86/target_x86.cc
new file mode 100644
index 0000000..cb41fde
--- /dev/null
+++ b/src/compiler/dex/quick/x86/target_x86.cc
@@ -0,0 +1,570 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "codegen_x86.h"
+#include "compiler/dex/quick/codegen_util.h"
+#include "compiler/dex/compiler_internals.h"
+#include "compiler/dex/quick/ralloc_util.h"
+#include "x86_lir.h"
+
+#include <string>
+
+namespace art {
+
+//FIXME: restore "static" when usage uncovered
+/*static*/ int core_regs[] = {
+ rAX, rCX, rDX, rBX, rX86_SP, rBP, rSI, rDI
+#ifdef TARGET_REX_SUPPORT
+ r8, r9, r10, r11, r12, r13, r14, 15
+#endif
+};
+/*static*/ int ReservedRegs[] = {rX86_SP};
+/*static*/ int core_temps[] = {rAX, rCX, rDX, rBX};
+/*static*/ int FpRegs[] = {
+ fr0, fr1, fr2, fr3, fr4, fr5, fr6, fr7,
+#ifdef TARGET_REX_SUPPORT
+ fr8, fr9, fr10, fr11, fr12, fr13, fr14, fr15
+#endif
+};
+/*static*/ int fp_temps[] = {
+ fr0, fr1, fr2, fr3, fr4, fr5, fr6, fr7,
+#ifdef TARGET_REX_SUPPORT
+ fr8, fr9, fr10, fr11, fr12, fr13, fr14, fr15
+#endif
+};
+
+RegLocation X86Codegen::LocCReturn()
+{
+ RegLocation res = X86_LOC_C_RETURN;
+ return res;
+}
+
+RegLocation X86Codegen::LocCReturnWide()
+{
+ RegLocation res = X86_LOC_C_RETURN_WIDE;
+ return res;
+}
+
+RegLocation X86Codegen::LocCReturnFloat()
+{
+ RegLocation res = X86_LOC_C_RETURN_FLOAT;
+ return res;
+}
+
+RegLocation X86Codegen::LocCReturnDouble()
+{
+ RegLocation res = X86_LOC_C_RETURN_DOUBLE;
+ return res;
+}
+
+// Return a target-dependent special register.
+int X86Codegen::TargetReg(SpecialTargetRegister reg) {
+ int res = INVALID_REG;
+ switch (reg) {
+ case kSelf: res = rX86_SELF; break;
+ case kSuspend: res = rX86_SUSPEND; break;
+ case kLr: res = rX86_LR; break;
+ case kPc: res = rX86_PC; break;
+ case kSp: res = rX86_SP; break;
+ case kArg0: res = rX86_ARG0; break;
+ case kArg1: res = rX86_ARG1; break;
+ case kArg2: res = rX86_ARG2; break;
+ case kArg3: res = rX86_ARG3; break;
+ case kFArg0: res = rX86_FARG0; break;
+ case kFArg1: res = rX86_FARG1; break;
+ case kFArg2: res = rX86_FARG2; break;
+ case kFArg3: res = rX86_FARG3; break;
+ case kRet0: res = rX86_RET0; break;
+ case kRet1: res = rX86_RET1; break;
+ case kInvokeTgt: res = rX86_INVOKE_TGT; break;
+ case kCount: res = rX86_COUNT; break;
+ }
+ return res;
+}
+
+// Create a double from a pair of singles.
+int X86Codegen::S2d(int low_reg, int high_reg)
+{
+ return X86_S2D(low_reg, high_reg);
+}
+
+// Return mask to strip off fp reg flags and bias.
+uint32_t X86Codegen::FpRegMask()
+{
+ return X86_FP_REG_MASK;
+}
+
+// True if both regs single, both core or both double.
+bool X86Codegen::SameRegType(int reg1, int reg2)
+{
+ return (X86_REGTYPE(reg1) == X86_REGTYPE(reg2));
+}
+
+/*
+ * Decode the register id.
+ */
+uint64_t X86Codegen::GetRegMaskCommon(CompilationUnit* cu, int reg)
+{
+ uint64_t seed;
+ int shift;
+ int reg_id;
+
+ reg_id = reg & 0xf;
+ /* Double registers in x86 are just a single FP register */
+ seed = 1;
+ /* FP register starts at bit position 16 */
+ shift = X86_FPREG(reg) ? kX86FPReg0 : 0;
+ /* Expand the double register id into single offset */
+ shift += reg_id;
+ return (seed << shift);
+}
+
+uint64_t X86Codegen::GetPCUseDefEncoding()
+{
+ /*
+ * FIXME: might make sense to use a virtual resource encoding bit for pc. Might be
+ * able to clean up some of the x86/Arm_Mips differences
+ */
+ LOG(FATAL) << "Unexpected call to GetPCUseDefEncoding for x86";
+ return 0ULL;
+}
+
+void X86Codegen::SetupTargetResourceMasks(CompilationUnit* cu, LIR* lir)
+{
+ DCHECK_EQ(cu->instruction_set, kX86);
+
+ // X86-specific resource map setup here.
+ uint64_t flags = X86Codegen::EncodingMap[lir->opcode].flags;
+
+ if (flags & REG_USE_SP) {
+ lir->use_mask |= ENCODE_X86_REG_SP;
+ }
+
+ if (flags & REG_DEF_SP) {
+ lir->def_mask |= ENCODE_X86_REG_SP;
+ }
+
+ if (flags & REG_DEFA) {
+ SetupRegMask(cu, &lir->def_mask, rAX);
+ }
+
+ if (flags & REG_DEFD) {
+ SetupRegMask(cu, &lir->def_mask, rDX);
+ }
+ if (flags & REG_USEA) {
+ SetupRegMask(cu, &lir->use_mask, rAX);
+ }
+
+ if (flags & REG_USEC) {
+ SetupRegMask(cu, &lir->use_mask, rCX);
+ }
+
+ if (flags & REG_USED) {
+ SetupRegMask(cu, &lir->use_mask, rDX);
+ }
+}
+
+/* For dumping instructions */
+static const char* x86RegName[] = {
+ "rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi",
+ "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
+};
+
+static const char* x86CondName[] = {
+ "O",
+ "NO",
+ "B/NAE/C",
+ "NB/AE/NC",
+ "Z/EQ",
+ "NZ/NE",
+ "BE/NA",
+ "NBE/A",
+ "S",
+ "NS",
+ "P/PE",
+ "NP/PO",
+ "L/NGE",
+ "NL/GE",
+ "LE/NG",
+ "NLE/G"
+};
+
+/*
+ * Interpret a format string and build a string no longer than size
+ * See format key in Assemble.cc.
+ */
+std::string X86Codegen::BuildInsnString(const char *fmt, LIR *lir, unsigned char* base_addr) {
+ std::string buf;
+ size_t i = 0;
+ size_t fmt_len = strlen(fmt);
+ while (i < fmt_len) {
+ if (fmt[i] != '!') {
+ buf += fmt[i];
+ i++;
+ } else {
+ i++;
+ DCHECK_LT(i, fmt_len);
+ char operand_number_ch = fmt[i];
+ i++;
+ if (operand_number_ch == '!') {
+ buf += "!";
+ } else {
+ int operand_number = operand_number_ch - '0';
+ DCHECK_LT(operand_number, 6); // Expect upto 6 LIR operands.
+ DCHECK_LT(i, fmt_len);
+ int operand = lir->operands[operand_number];
+ switch (fmt[i]) {
+ case 'c':
+ DCHECK_LT(static_cast<size_t>(operand), sizeof(x86CondName));
+ buf += x86CondName[operand];
+ break;
+ case 'd':
+ buf += StringPrintf("%d", operand);
+ break;
+ case 'p': {
+ SwitchTable *tab_rec = reinterpret_cast<SwitchTable*>(operand);
+ buf += StringPrintf("0x%08x", tab_rec->offset);
+ break;
+ }
+ case 'r':
+ if (X86_FPREG(operand) || X86_DOUBLEREG(operand)) {
+ int fp_reg = operand & X86_FP_REG_MASK;
+ buf += StringPrintf("xmm%d", fp_reg);
+ } else {
+ DCHECK_LT(static_cast<size_t>(operand), sizeof(x86RegName));
+ buf += x86RegName[operand];
+ }
+ break;
+ case 't':
+ buf += StringPrintf("0x%08x (L%p)",
+ reinterpret_cast<uint32_t>(base_addr)
+ + lir->offset + operand, lir->target);
+ break;
+ default:
+ buf += StringPrintf("DecodeError '%c'", fmt[i]);
+ break;
+ }
+ i++;
+ }
+ }
+ }
+ return buf;
+}
+
+void X86Codegen::DumpResourceMask(LIR *x86LIR, uint64_t mask, const char *prefix)
+{
+ char buf[256];
+ buf[0] = 0;
+
+ if (mask == ENCODE_ALL) {
+ strcpy(buf, "all");
+ } else {
+ char num[8];
+ int i;
+
+ for (i = 0; i < kX86RegEnd; i++) {
+ if (mask & (1ULL << i)) {
+ sprintf(num, "%d ", i);
+ strcat(buf, num);
+ }
+ }
+
+ if (mask & ENCODE_CCODE) {
+ strcat(buf, "cc ");
+ }
+ /* Memory bits */
+ if (x86LIR && (mask & ENCODE_DALVIK_REG)) {
+ sprintf(buf + strlen(buf), "dr%d%s", x86LIR->alias_info & 0xffff,
+ (x86LIR->alias_info & 0x80000000) ? "(+1)" : "");
+ }
+ if (mask & ENCODE_LITERAL) {
+ strcat(buf, "lit ");
+ }
+
+ if (mask & ENCODE_HEAP_REF) {
+ strcat(buf, "heap ");
+ }
+ if (mask & ENCODE_MUST_NOT_ALIAS) {
+ strcat(buf, "noalias ");
+ }
+ }
+ if (buf[0]) {
+ LOG(INFO) << prefix << ": " << buf;
+ }
+}
+
+void X86Codegen::AdjustSpillMask(CompilationUnit* cu) {
+ // Adjustment for LR spilling, x86 has no LR so nothing to do here
+ cu->core_spill_mask |= (1 << rRET);
+ cu->num_core_spills++;
+}
+
+/*
+ * Mark a callee-save fp register as promoted. Note that
+ * vpush/vpop uses contiguous register lists so we must
+ * include any holes in the mask. Associate holes with
+ * Dalvik register INVALID_VREG (0xFFFFU).
+ */
+void X86Codegen::MarkPreservedSingle(CompilationUnit* cu, int v_reg, int reg)
+{
+ UNIMPLEMENTED(WARNING) << "MarkPreservedSingle";
+#if 0
+ LOG(FATAL) << "No support yet for promoted FP regs";
+#endif
+}
+
+void X86Codegen::FlushRegWide(CompilationUnit* cu, int reg1, int reg2)
+{
+ RegisterInfo* info1 = GetRegInfo(cu, reg1);
+ RegisterInfo* info2 = GetRegInfo(cu, reg2);
+ DCHECK(info1 && info2 && info1->pair && info2->pair &&
+ (info1->partner == info2->reg) &&
+ (info2->partner == info1->reg));
+ if ((info1->live && info1->dirty) || (info2->live && info2->dirty)) {
+ if (!(info1->is_temp && info2->is_temp)) {
+ /* Should not happen. If it does, there's a problem in eval_loc */
+ LOG(FATAL) << "Long half-temp, half-promoted";
+ }
+
+ info1->dirty = false;
+ info2->dirty = false;
+ if (SRegToVReg(cu, info2->s_reg) < SRegToVReg(cu, info1->s_reg))
+ info1 = info2;
+ int v_reg = SRegToVReg(cu, info1->s_reg);
+ StoreBaseDispWide(cu, rX86_SP, VRegOffset(cu, v_reg), info1->reg, info1->partner);
+ }
+}
+
+void X86Codegen::FlushReg(CompilationUnit* cu, int reg)
+{
+ RegisterInfo* info = GetRegInfo(cu, reg);
+ if (info->live && info->dirty) {
+ info->dirty = false;
+ int v_reg = SRegToVReg(cu, info->s_reg);
+ StoreBaseDisp(cu, rX86_SP, VRegOffset(cu, v_reg), reg, kWord);
+ }
+}
+
+/* Give access to the target-dependent FP register encoding to common code */
+bool X86Codegen::IsFpReg(int reg) {
+ return X86_FPREG(reg);
+}
+
+/* Clobber all regs that might be used by an external C call */
+void X86Codegen::ClobberCalleeSave(CompilationUnit *cu)
+{
+ Clobber(cu, rAX);
+ Clobber(cu, rCX);
+ Clobber(cu, rDX);
+}
+
+RegLocation X86Codegen::GetReturnWideAlt(CompilationUnit* cu) {
+ RegLocation res = LocCReturnWide();
+ CHECK(res.low_reg == rAX);
+ CHECK(res.high_reg == rDX);
+ Clobber(cu, rAX);
+ Clobber(cu, rDX);
+ MarkInUse(cu, rAX);
+ MarkInUse(cu, rDX);
+ MarkPair(cu, res.low_reg, res.high_reg);
+ return res;
+}
+
+RegLocation X86Codegen::GetReturnAlt(CompilationUnit* cu)
+{
+ RegLocation res = LocCReturn();
+ res.low_reg = rDX;
+ Clobber(cu, rDX);
+ MarkInUse(cu, rDX);
+ return res;
+}
+
+RegisterInfo* X86Codegen::GetRegInfo(CompilationUnit* cu, int reg)
+{
+ return X86_FPREG(reg) ? &cu->reg_pool->FPRegs[reg & X86_FP_REG_MASK]
+ : &cu->reg_pool->core_regs[reg];
+}
+
+/* To be used when explicitly managing register use */
+void X86Codegen::LockCallTemps(CompilationUnit* cu)
+{
+ LockTemp(cu, rX86_ARG0);
+ LockTemp(cu, rX86_ARG1);
+ LockTemp(cu, rX86_ARG2);
+ LockTemp(cu, rX86_ARG3);
+}
+
+/* To be used when explicitly managing register use */
+void X86Codegen::FreeCallTemps(CompilationUnit* cu)
+{
+ FreeTemp(cu, rX86_ARG0);
+ FreeTemp(cu, rX86_ARG1);
+ FreeTemp(cu, rX86_ARG2);
+ FreeTemp(cu, rX86_ARG3);
+}
+
+void X86Codegen::GenMemBarrier(CompilationUnit *cu, MemBarrierKind barrier_kind)
+{
+#if ANDROID_SMP != 0
+ // TODO: optimize fences
+ NewLIR0(cu, kX86Mfence);
+#endif
+}
+/*
+ * Alloc a pair of core registers, or a double. Low reg in low byte,
+ * high reg in next byte.
+ */
+int X86Codegen::AllocTypedTempPair(CompilationUnit *cu, bool fp_hint,
+ int reg_class)
+{
+ int high_reg;
+ int low_reg;
+ int res = 0;
+
+ if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg)) {
+ low_reg = AllocTempDouble(cu);
+ high_reg = low_reg + 1;
+ res = (low_reg & 0xff) | ((high_reg & 0xff) << 8);
+ return res;
+ }
+
+ low_reg = AllocTemp(cu);
+ high_reg = AllocTemp(cu);
+ res = (low_reg & 0xff) | ((high_reg & 0xff) << 8);
+ return res;
+}
+
+int X86Codegen::AllocTypedTemp(CompilationUnit *cu, bool fp_hint, int reg_class) {
+ if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg)) {
+ return AllocTempFloat(cu);
+ }
+ return AllocTemp(cu);
+}
+
+void X86Codegen::CompilerInitializeRegAlloc(CompilationUnit* cu) {
+ int num_regs = sizeof(core_regs)/sizeof(*core_regs);
+ int num_reserved = sizeof(ReservedRegs)/sizeof(*ReservedRegs);
+ int num_temps = sizeof(core_temps)/sizeof(*core_temps);
+ int num_fp_regs = sizeof(FpRegs)/sizeof(*FpRegs);
+ int num_fp_temps = sizeof(fp_temps)/sizeof(*fp_temps);
+ RegisterPool *pool =
+ static_cast<RegisterPool*>(NewMem(cu, sizeof(*pool), true, kAllocRegAlloc));
+ cu->reg_pool = pool;
+ pool->num_core_regs = num_regs;
+ pool->core_regs =
+ static_cast<RegisterInfo*>(NewMem(cu, num_regs * sizeof(*cu->reg_pool->core_regs),
+ true, kAllocRegAlloc));
+ pool->num_fp_regs = num_fp_regs;
+ pool->FPRegs =
+ static_cast<RegisterInfo *>(NewMem(cu, num_fp_regs * sizeof(*cu->reg_pool->FPRegs),
+ true, kAllocRegAlloc));
+ CompilerInitPool(pool->core_regs, core_regs, pool->num_core_regs);
+ CompilerInitPool(pool->FPRegs, FpRegs, pool->num_fp_regs);
+ // Keep special registers from being allocated
+ for (int i = 0; i < num_reserved; i++) {
+ MarkInUse(cu, ReservedRegs[i]);
+ }
+ // Mark temp regs - all others not in use can be used for promotion
+ for (int i = 0; i < num_temps; i++) {
+ MarkTemp(cu, core_temps[i]);
+ }
+ for (int i = 0; i < num_fp_temps; i++) {
+ MarkTemp(cu, fp_temps[i]);
+ }
+}
+
+void X86Codegen::FreeRegLocTemps(CompilationUnit* cu, RegLocation rl_keep,
+ RegLocation rl_free)
+{
+ if ((rl_free.low_reg != rl_keep.low_reg) && (rl_free.low_reg != rl_keep.high_reg) &&
+ (rl_free.high_reg != rl_keep.low_reg) && (rl_free.high_reg != rl_keep.high_reg)) {
+ // No overlap, free both
+ FreeTemp(cu, rl_free.low_reg);
+ FreeTemp(cu, rl_free.high_reg);
+ }
+}
+
+void X86Codegen::SpillCoreRegs(CompilationUnit* cu) {
+ if (cu->num_core_spills == 0) {
+ return;
+ }
+ // Spill mask not including fake return address register
+ uint32_t mask = cu->core_spill_mask & ~(1 << rRET);
+ int offset = cu->frame_size - (4 * cu->num_core_spills);
+ for (int reg = 0; mask; mask >>= 1, reg++) {
+ if (mask & 0x1) {
+ StoreWordDisp(cu, rX86_SP, offset, reg);
+ offset += 4;
+ }
+ }
+}
+
+void X86Codegen::UnSpillCoreRegs(CompilationUnit* cu) {
+ if (cu->num_core_spills == 0) {
+ return;
+ }
+ // Spill mask not including fake return address register
+ uint32_t mask = cu->core_spill_mask & ~(1 << rRET);
+ int offset = cu->frame_size - (4 * cu->num_core_spills);
+ for (int reg = 0; mask; mask >>= 1, reg++) {
+ if (mask & 0x1) {
+ LoadWordDisp(cu, rX86_SP, offset, reg);
+ offset += 4;
+ }
+ }
+}
+
+bool X86Codegen::IsUnconditionalBranch(LIR* lir)
+{
+ return (lir->opcode == kX86Jmp8 || lir->opcode == kX86Jmp32);
+}
+
+/* Common initialization routine for an architecture family */
+bool InitX86Codegen(CompilationUnit* cu) {
+ cu->cg.reset(new X86Codegen());
+ for (int i = 0; i < kX86Last; i++) {
+ if (X86Codegen::EncodingMap[i].opcode != i) {
+ LOG(FATAL) << "Encoding order for " << X86Codegen::EncodingMap[i].name
+ << " is wrong: expecting " << i << ", seeing "
+ << static_cast<int>(X86Codegen::EncodingMap[i].opcode);
+ }
+ }
+ return true;
+}
+
+// Not used in x86
+int X86Codegen::LoadHelper(CompilationUnit* cu, int offset)
+{
+ LOG(FATAL) << "Unexpected use of LoadHelper in x86";
+ return INVALID_REG;
+}
+
+uint64_t X86Codegen::GetTargetInstFlags(int opcode)
+{
+ return X86Codegen::EncodingMap[opcode].flags;
+}
+
+const char* X86Codegen::GetTargetInstName(int opcode)
+{
+ return X86Codegen::EncodingMap[opcode].name;
+}
+
+const char* X86Codegen::GetTargetInstFmt(int opcode)
+{
+ return X86Codegen::EncodingMap[opcode].fmt;
+}
+
+} // namespace art
diff --git a/src/compiler/dex/quick/x86/utility_x86.cc b/src/compiler/dex/quick/x86/utility_x86.cc
new file mode 100644
index 0000000..fa40635
--- /dev/null
+++ b/src/compiler/dex/quick/x86/utility_x86.cc
@@ -0,0 +1,583 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "codegen_x86.h"
+#include "compiler/dex/quick/codegen_util.h"
+#include "compiler/dex/quick/ralloc_util.h"
+#include "x86_lir.h"
+
+namespace art {
+
+/* This file contains codegen for the X86 ISA */
+
+LIR* X86Codegen::OpFpRegCopy(CompilationUnit *cu, int r_dest, int r_src)
+{
+ int opcode;
+ /* must be both DOUBLE or both not DOUBLE */
+ DCHECK_EQ(X86_DOUBLEREG(r_dest), X86_DOUBLEREG(r_src));
+ if (X86_DOUBLEREG(r_dest)) {
+ opcode = kX86MovsdRR;
+ } else {
+ if (X86_SINGLEREG(r_dest)) {
+ if (X86_SINGLEREG(r_src)) {
+ opcode = kX86MovssRR;
+ } else { // Fpr <- Gpr
+ opcode = kX86MovdxrRR;
+ }
+ } else { // Gpr <- Fpr
+ DCHECK(X86_SINGLEREG(r_src));
+ opcode = kX86MovdrxRR;
+ }
+ }
+ DCHECK_NE((EncodingMap[opcode].flags & IS_BINARY_OP), 0ULL);
+ LIR* res = RawLIR(cu, cu->current_dalvik_offset, opcode, r_dest, r_src);
+ if (r_dest == r_src) {
+ res->flags.is_nop = true;
+ }
+ return res;
+}
+
+bool X86Codegen::InexpensiveConstantInt(int32_t value)
+{
+ return true;
+}
+
+bool X86Codegen::InexpensiveConstantFloat(int32_t value)
+{
+ return false;
+}
+
+bool X86Codegen::InexpensiveConstantLong(int64_t value)
+{
+ return true;
+}
+
+bool X86Codegen::InexpensiveConstantDouble(int64_t value)
+{
+ return false; // TUNING
+}
+
+/*
+ * Load a immediate using a shortcut if possible; otherwise
+ * grab from the per-translation literal pool. If target is
+ * a high register, build constant into a low register and copy.
+ *
+ * No additional register clobbering operation performed. Use this version when
+ * 1) r_dest is freshly returned from AllocTemp or
+ * 2) The codegen is under fixed register usage
+ */
+LIR* X86Codegen::LoadConstantNoClobber(CompilationUnit *cu, int r_dest, int value)
+{
+ int r_dest_save = r_dest;
+ if (X86_FPREG(r_dest)) {
+ if (value == 0) {
+ return NewLIR2(cu, kX86XorpsRR, r_dest, r_dest);
+ }
+ DCHECK(X86_SINGLEREG(r_dest));
+ r_dest = AllocTemp(cu);
+ }
+
+ LIR *res;
+ if (value == 0) {
+ res = NewLIR2(cu, kX86Xor32RR, r_dest, r_dest);
+ } else {
+ // Note, there is no byte immediate form of a 32 bit immediate move.
+ res = NewLIR2(cu, kX86Mov32RI, r_dest, value);
+ }
+
+ if (X86_FPREG(r_dest_save)) {
+ NewLIR2(cu, kX86MovdxrRR, r_dest_save, r_dest);
+ FreeTemp(cu, r_dest);
+ }
+
+ return res;
+}
+
+LIR* X86Codegen::OpUnconditionalBranch(CompilationUnit* cu, LIR* target)
+{
+ LIR* res = NewLIR1(cu, kX86Jmp8, 0 /* offset to be patched during assembly*/ );
+ res->target = target;
+ return res;
+}
+
+LIR* X86Codegen::OpCondBranch(CompilationUnit* cu, ConditionCode cc, LIR* target)
+{
+ LIR* branch = NewLIR2(cu, kX86Jcc8, 0 /* offset to be patched */,
+ X86ConditionEncoding(cc));
+ branch->target = target;
+ return branch;
+}
+
+LIR* X86Codegen::OpReg(CompilationUnit *cu, OpKind op, int r_dest_src)
+{
+ X86OpCode opcode = kX86Bkpt;
+ switch (op) {
+ case kOpNeg: opcode = kX86Neg32R; break;
+ case kOpNot: opcode = kX86Not32R; break;
+ case kOpBlx: opcode = kX86CallR; break;
+ default:
+ LOG(FATAL) << "Bad case in OpReg " << op;
+ }
+ return NewLIR1(cu, opcode, r_dest_src);
+}
+
+LIR* X86Codegen::OpRegImm(CompilationUnit *cu, OpKind op, int r_dest_src1, int value)
+{
+ X86OpCode opcode = kX86Bkpt;
+ bool byte_imm = IS_SIMM8(value);
+ DCHECK(!X86_FPREG(r_dest_src1));
+ switch (op) {
+ case kOpLsl: opcode = kX86Sal32RI; break;
+ case kOpLsr: opcode = kX86Shr32RI; break;
+ case kOpAsr: opcode = kX86Sar32RI; break;
+ case kOpAdd: opcode = byte_imm ? kX86Add32RI8 : kX86Add32RI; break;
+ case kOpOr: opcode = byte_imm ? kX86Or32RI8 : kX86Or32RI; break;
+ case kOpAdc: opcode = byte_imm ? kX86Adc32RI8 : kX86Adc32RI; break;
+ //case kOpSbb: opcode = kX86Sbb32RI; break;
+ case kOpAnd: opcode = byte_imm ? kX86And32RI8 : kX86And32RI; break;
+ case kOpSub: opcode = byte_imm ? kX86Sub32RI8 : kX86Sub32RI; break;
+ case kOpXor: opcode = byte_imm ? kX86Xor32RI8 : kX86Xor32RI; break;
+ case kOpCmp: opcode = byte_imm ? kX86Cmp32RI8 : kX86Cmp32RI; break;
+ case kOpMov: return LoadConstantNoClobber(cu, r_dest_src1, value);
+ case kOpMul:
+ opcode = byte_imm ? kX86Imul32RRI8 : kX86Imul32RRI;
+ return NewLIR3(cu, opcode, r_dest_src1, r_dest_src1, value);
+ default:
+ LOG(FATAL) << "Bad case in OpRegImm " << op;
+ }
+ return NewLIR2(cu, opcode, r_dest_src1, value);
+}
+
+LIR* X86Codegen::OpRegReg(CompilationUnit *cu, OpKind op, int r_dest_src1, int r_src2)
+{
+ X86OpCode opcode = kX86Nop;
+ bool src2_must_be_cx = false;
+ switch (op) {
+ // X86 unary opcodes
+ case kOpMvn:
+ OpRegCopy(cu, r_dest_src1, r_src2);
+ return OpReg(cu, kOpNot, r_dest_src1);
+ case kOpNeg:
+ OpRegCopy(cu, r_dest_src1, r_src2);
+ return OpReg(cu, kOpNeg, r_dest_src1);
+ // X86 binary opcodes
+ case kOpSub: opcode = kX86Sub32RR; break;
+ case kOpSbc: opcode = kX86Sbb32RR; break;
+ case kOpLsl: opcode = kX86Sal32RC; src2_must_be_cx = true; break;
+ case kOpLsr: opcode = kX86Shr32RC; src2_must_be_cx = true; break;
+ case kOpAsr: opcode = kX86Sar32RC; src2_must_be_cx = true; break;
+ case kOpMov: opcode = kX86Mov32RR; break;
+ case kOpCmp: opcode = kX86Cmp32RR; break;
+ case kOpAdd: opcode = kX86Add32RR; break;
+ case kOpAdc: opcode = kX86Adc32RR; break;
+ case kOpAnd: opcode = kX86And32RR; break;
+ case kOpOr: opcode = kX86Or32RR; break;
+ case kOpXor: opcode = kX86Xor32RR; break;
+ case kOp2Byte:
+ // Use shifts instead of a byte operand if the source can't be byte accessed.
+ if (r_src2 >= 4) {
+ NewLIR2(cu, kX86Mov32RR, r_dest_src1, r_src2);
+ NewLIR2(cu, kX86Sal32RI, r_dest_src1, 24);
+ return NewLIR2(cu, kX86Sar32RI, r_dest_src1, 24);
+ } else {
+ opcode = kX86Movsx8RR;
+ }
+ break;
+ case kOp2Short: opcode = kX86Movsx16RR; break;
+ case kOp2Char: opcode = kX86Movzx16RR; break;
+ case kOpMul: opcode = kX86Imul32RR; break;
+ default:
+ LOG(FATAL) << "Bad case in OpRegReg " << op;
+ break;
+ }
+ CHECK(!src2_must_be_cx || r_src2 == rCX);
+ return NewLIR2(cu, opcode, r_dest_src1, r_src2);
+}
+
+LIR* X86Codegen::OpRegMem(CompilationUnit *cu, OpKind op, int r_dest, int rBase,
+ int offset)
+{
+ X86OpCode opcode = kX86Nop;
+ switch (op) {
+ // X86 binary opcodes
+ case kOpSub: opcode = kX86Sub32RM; break;
+ case kOpMov: opcode = kX86Mov32RM; break;
+ case kOpCmp: opcode = kX86Cmp32RM; break;
+ case kOpAdd: opcode = kX86Add32RM; break;
+ case kOpAnd: opcode = kX86And32RM; break;
+ case kOpOr: opcode = kX86Or32RM; break;
+ case kOpXor: opcode = kX86Xor32RM; break;
+ case kOp2Byte: opcode = kX86Movsx8RM; break;
+ case kOp2Short: opcode = kX86Movsx16RM; break;
+ case kOp2Char: opcode = kX86Movzx16RM; break;
+ case kOpMul:
+ default:
+ LOG(FATAL) << "Bad case in OpRegMem " << op;
+ break;
+ }
+ return NewLIR3(cu, opcode, r_dest, rBase, offset);
+}
+
+LIR* X86Codegen::OpRegRegReg(CompilationUnit *cu, OpKind op, int r_dest, int r_src1,
+ int r_src2)
+{
+ if (r_dest != r_src1 && r_dest != r_src2) {
+ if (op == kOpAdd) { // lea special case, except can't encode rbp as base
+ if (r_src1 == r_src2) {
+ OpRegCopy(cu, r_dest, r_src1);
+ return OpRegImm(cu, kOpLsl, r_dest, 1);
+ } else if (r_src1 != rBP) {
+ return NewLIR5(cu, kX86Lea32RA, r_dest, r_src1 /* base */,
+ r_src2 /* index */, 0 /* scale */, 0 /* disp */);
+ } else {
+ return NewLIR5(cu, kX86Lea32RA, r_dest, r_src2 /* base */,
+ r_src1 /* index */, 0 /* scale */, 0 /* disp */);
+ }
+ } else {
+ OpRegCopy(cu, r_dest, r_src1);
+ return OpRegReg(cu, op, r_dest, r_src2);
+ }
+ } else if (r_dest == r_src1) {
+ return OpRegReg(cu, op, r_dest, r_src2);
+ } else { // r_dest == r_src2
+ switch (op) {
+ case kOpSub: // non-commutative
+ OpReg(cu, kOpNeg, r_dest);
+ op = kOpAdd;
+ break;
+ case kOpSbc:
+ case kOpLsl: case kOpLsr: case kOpAsr: case kOpRor: {
+ int t_reg = AllocTemp(cu);
+ OpRegCopy(cu, t_reg, r_src1);
+ OpRegReg(cu, op, t_reg, r_src2);
+ LIR* res = OpRegCopy(cu, r_dest, t_reg);
+ FreeTemp(cu, t_reg);
+ return res;
+ }
+ case kOpAdd: // commutative
+ case kOpOr:
+ case kOpAdc:
+ case kOpAnd:
+ case kOpXor:
+ break;
+ default:
+ LOG(FATAL) << "Bad case in OpRegRegReg " << op;
+ }
+ return OpRegReg(cu, op, r_dest, r_src1);
+ }
+}
+
+LIR* X86Codegen::OpRegRegImm(CompilationUnit *cu, OpKind op, int r_dest, int r_src,
+ int value)
+{
+ if (op == kOpMul) {
+ X86OpCode opcode = IS_SIMM8(value) ? kX86Imul32RRI8 : kX86Imul32RRI;
+ return NewLIR3(cu, opcode, r_dest, r_src, value);
+ } else if (op == kOpAnd) {
+ if (value == 0xFF && r_src < 4) {
+ return NewLIR2(cu, kX86Movzx8RR, r_dest, r_src);
+ } else if (value == 0xFFFF) {
+ return NewLIR2(cu, kX86Movzx16RR, r_dest, r_src);
+ }
+ }
+ if (r_dest != r_src) {
+ if (false && op == kOpLsl && value >= 0 && value <= 3) { // lea shift special case
+ // TODO: fix bug in LEA encoding when disp == 0
+ return NewLIR5(cu, kX86Lea32RA, r_dest, r5sib_no_base /* base */,
+ r_src /* index */, value /* scale */, 0 /* disp */);
+ } else if (op == kOpAdd) { // lea add special case
+ return NewLIR5(cu, kX86Lea32RA, r_dest, r_src /* base */,
+ r4sib_no_index /* index */, 0 /* scale */, value /* disp */);
+ }
+ OpRegCopy(cu, r_dest, r_src);
+ }
+ return OpRegImm(cu, op, r_dest, value);
+}
+
+LIR* X86Codegen::OpThreadMem(CompilationUnit* cu, OpKind op, int thread_offset)
+{
+ X86OpCode opcode = kX86Bkpt;
+ switch (op) {
+ case kOpBlx: opcode = kX86CallT; break;
+ default:
+ LOG(FATAL) << "Bad opcode: " << op;
+ break;
+ }
+ return NewLIR1(cu, opcode, thread_offset);
+}
+
+LIR* X86Codegen::OpMem(CompilationUnit* cu, OpKind op, int rBase, int disp)
+{
+ X86OpCode opcode = kX86Bkpt;
+ switch (op) {
+ case kOpBlx: opcode = kX86CallM; break;
+ default:
+ LOG(FATAL) << "Bad opcode: " << op;
+ break;
+ }
+ return NewLIR2(cu, opcode, rBase, disp);
+}
+
+LIR* X86Codegen::LoadConstantWide(CompilationUnit *cu, int r_dest_lo, int r_dest_hi, int64_t value)
+{
+ int32_t val_lo = Low32Bits(value);
+ int32_t val_hi = High32Bits(value);
+ LIR *res;
+ if (X86_FPREG(r_dest_lo)) {
+ DCHECK(X86_FPREG(r_dest_hi)); // ignore r_dest_hi
+ if (value == 0) {
+ return NewLIR2(cu, kX86XorpsRR, r_dest_lo, r_dest_lo);
+ } else {
+ if (val_lo == 0) {
+ res = NewLIR2(cu, kX86XorpsRR, r_dest_lo, r_dest_lo);
+ } else {
+ res = LoadConstantNoClobber(cu, r_dest_lo, val_lo);
+ }
+ if (val_hi != 0) {
+ LoadConstantNoClobber(cu, r_dest_hi, val_hi);
+ NewLIR2(cu, kX86PsllqRI, r_dest_hi, 32);
+ NewLIR2(cu, kX86OrpsRR, r_dest_lo, r_dest_hi);
+ }
+ }
+ } else {
+ res = LoadConstantNoClobber(cu, r_dest_lo, val_lo);
+ LoadConstantNoClobber(cu, r_dest_hi, val_hi);
+ }
+ return res;
+}
+
+LIR* X86Codegen::LoadBaseIndexedDisp(CompilationUnit *cu, int rBase, int r_index, int scale,
+ int displacement, int r_dest, int r_dest_hi, OpSize size,
+ int s_reg) {
+ LIR *load = NULL;
+ LIR *load2 = NULL;
+ bool is_array = r_index != INVALID_REG;
+ bool pair = false;
+ bool is64bit = false;
+ X86OpCode opcode = kX86Nop;
+ switch (size) {
+ case kLong:
+ case kDouble:
+ is64bit = true;
+ if (X86_FPREG(r_dest)) {
+ opcode = is_array ? kX86MovsdRA : kX86MovsdRM;
+ if (X86_SINGLEREG(r_dest)) {
+ DCHECK(X86_FPREG(r_dest_hi));
+ DCHECK_EQ(r_dest, (r_dest_hi - 1));
+ r_dest = S2d(r_dest, r_dest_hi);
+ }
+ r_dest_hi = r_dest + 1;
+ } else {
+ pair = true;
+ opcode = is_array ? kX86Mov32RA : kX86Mov32RM;
+ }
+ // TODO: double store is to unaligned address
+ DCHECK_EQ((displacement & 0x3), 0);
+ break;
+ case kWord:
+ case kSingle:
+ opcode = is_array ? kX86Mov32RA : kX86Mov32RM;
+ if (X86_FPREG(r_dest)) {
+ opcode = is_array ? kX86MovssRA : kX86MovssRM;
+ DCHECK(X86_SINGLEREG(r_dest));
+ }
+ DCHECK_EQ((displacement & 0x3), 0);
+ break;
+ case kUnsignedHalf:
+ opcode = is_array ? kX86Movzx16RA : kX86Movzx16RM;
+ DCHECK_EQ((displacement & 0x1), 0);
+ break;
+ case kSignedHalf:
+ opcode = is_array ? kX86Movsx16RA : kX86Movsx16RM;
+ DCHECK_EQ((displacement & 0x1), 0);
+ break;
+ case kUnsignedByte:
+ opcode = is_array ? kX86Movzx8RA : kX86Movzx8RM;
+ break;
+ case kSignedByte:
+ opcode = is_array ? kX86Movsx8RA : kX86Movsx8RM;
+ break;
+ default:
+ LOG(FATAL) << "Bad case in LoadBaseIndexedDispBody";
+ }
+
+ if (!is_array) {
+ if (!pair) {
+ load = NewLIR3(cu, opcode, r_dest, rBase, displacement + LOWORD_OFFSET);
+ } else {
+ if (rBase == r_dest) {
+ load2 = NewLIR3(cu, opcode, r_dest_hi, rBase,
+ displacement + HIWORD_OFFSET);
+ load = NewLIR3(cu, opcode, r_dest, rBase, displacement + LOWORD_OFFSET);
+ } else {
+ load = NewLIR3(cu, opcode, r_dest, rBase, displacement + LOWORD_OFFSET);
+ load2 = NewLIR3(cu, opcode, r_dest_hi, rBase,
+ displacement + HIWORD_OFFSET);
+ }
+ }
+ if (rBase == rX86_SP) {
+ AnnotateDalvikRegAccess(cu, load, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2,
+ true /* is_load */, is64bit);
+ if (pair) {
+ AnnotateDalvikRegAccess(cu, load2, (displacement + HIWORD_OFFSET) >> 2,
+ true /* is_load */, is64bit);
+ }
+ }
+ } else {
+ if (!pair) {
+ load = NewLIR5(cu, opcode, r_dest, rBase, r_index, scale,
+ displacement + LOWORD_OFFSET);
+ } else {
+ if (rBase == r_dest) {
+ load2 = NewLIR5(cu, opcode, r_dest_hi, rBase, r_index, scale,
+ displacement + HIWORD_OFFSET);
+ load = NewLIR5(cu, opcode, r_dest, rBase, r_index, scale,
+ displacement + LOWORD_OFFSET);
+ } else {
+ load = NewLIR5(cu, opcode, r_dest, rBase, r_index, scale,
+ displacement + LOWORD_OFFSET);
+ load2 = NewLIR5(cu, opcode, r_dest_hi, rBase, r_index, scale,
+ displacement + HIWORD_OFFSET);
+ }
+ }
+ }
+
+ return load;
+}
+
+/* Load value from base + scaled index. */
+LIR* X86Codegen::LoadBaseIndexed(CompilationUnit *cu, int rBase,
+ int r_index, int r_dest, int scale, OpSize size) {
+ return LoadBaseIndexedDisp(cu, rBase, r_index, scale, 0,
+ r_dest, INVALID_REG, size, INVALID_SREG);
+}
+
+LIR* X86Codegen::LoadBaseDisp(CompilationUnit *cu, int rBase, int displacement,
+ int r_dest, OpSize size, int s_reg) {
+ return LoadBaseIndexedDisp(cu, rBase, INVALID_REG, 0, displacement,
+ r_dest, INVALID_REG, size, s_reg);
+}
+
+LIR* X86Codegen::LoadBaseDispWide(CompilationUnit *cu, int rBase, int displacement,
+ int r_dest_lo, int r_dest_hi, int s_reg) {
+ return LoadBaseIndexedDisp(cu, rBase, INVALID_REG, 0, displacement,
+ r_dest_lo, r_dest_hi, kLong, s_reg);
+}
+
+LIR* X86Codegen::StoreBaseIndexedDisp(CompilationUnit *cu, int rBase, int r_index, int scale,
+ int displacement, int r_src, int r_src_hi, OpSize size,
+ int s_reg) {
+ LIR *store = NULL;
+ LIR *store2 = NULL;
+ bool is_array = r_index != INVALID_REG;
+ bool pair = false;
+ bool is64bit = false;
+ X86OpCode opcode = kX86Nop;
+ switch (size) {
+ case kLong:
+ case kDouble:
+ is64bit = true;
+ if (X86_FPREG(r_src)) {
+ opcode = is_array ? kX86MovsdAR : kX86MovsdMR;
+ if (X86_SINGLEREG(r_src)) {
+ DCHECK(X86_FPREG(r_src_hi));
+ DCHECK_EQ(r_src, (r_src_hi - 1));
+ r_src = S2d(r_src, r_src_hi);
+ }
+ r_src_hi = r_src + 1;
+ } else {
+ pair = true;
+ opcode = is_array ? kX86Mov32AR : kX86Mov32MR;
+ }
+ // TODO: double store is to unaligned address
+ DCHECK_EQ((displacement & 0x3), 0);
+ break;
+ case kWord:
+ case kSingle:
+ opcode = is_array ? kX86Mov32AR : kX86Mov32MR;
+ if (X86_FPREG(r_src)) {
+ opcode = is_array ? kX86MovssAR : kX86MovssMR;
+ DCHECK(X86_SINGLEREG(r_src));
+ }
+ DCHECK_EQ((displacement & 0x3), 0);
+ break;
+ case kUnsignedHalf:
+ case kSignedHalf:
+ opcode = is_array ? kX86Mov16AR : kX86Mov16MR;
+ DCHECK_EQ((displacement & 0x1), 0);
+ break;
+ case kUnsignedByte:
+ case kSignedByte:
+ opcode = is_array ? kX86Mov8AR : kX86Mov8MR;
+ break;
+ default:
+ LOG(FATAL) << "Bad case in LoadBaseIndexedDispBody";
+ }
+
+ if (!is_array) {
+ if (!pair) {
+ store = NewLIR3(cu, opcode, rBase, displacement + LOWORD_OFFSET, r_src);
+ } else {
+ store = NewLIR3(cu, opcode, rBase, displacement + LOWORD_OFFSET, r_src);
+ store2 = NewLIR3(cu, opcode, rBase, displacement + HIWORD_OFFSET, r_src_hi);
+ }
+ if (rBase == rX86_SP) {
+ AnnotateDalvikRegAccess(cu, store, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2,
+ false /* is_load */, is64bit);
+ if (pair) {
+ AnnotateDalvikRegAccess(cu, store2, (displacement + HIWORD_OFFSET) >> 2,
+ false /* is_load */, is64bit);
+ }
+ }
+ } else {
+ if (!pair) {
+ store = NewLIR5(cu, opcode, rBase, r_index, scale,
+ displacement + LOWORD_OFFSET, r_src);
+ } else {
+ store = NewLIR5(cu, opcode, rBase, r_index, scale,
+ displacement + LOWORD_OFFSET, r_src);
+ store2 = NewLIR5(cu, opcode, rBase, r_index, scale,
+ displacement + HIWORD_OFFSET, r_src_hi);
+ }
+ }
+
+ return store;
+}
+
+/* store value base base + scaled index. */
+LIR* X86Codegen::StoreBaseIndexed(CompilationUnit *cu, int rBase, int r_index, int r_src,
+ int scale, OpSize size)
+{
+ return StoreBaseIndexedDisp(cu, rBase, r_index, scale, 0,
+ r_src, INVALID_REG, size, INVALID_SREG);
+}
+
+LIR* X86Codegen::StoreBaseDisp(CompilationUnit *cu, int rBase, int displacement,
+ int r_src, OpSize size)
+{
+ return StoreBaseIndexedDisp(cu, rBase, INVALID_REG, 0,
+ displacement, r_src, INVALID_REG, size,
+ INVALID_SREG);
+}
+
+LIR* X86Codegen::StoreBaseDispWide(CompilationUnit *cu, int rBase, int displacement,
+ int r_src_lo, int r_src_hi)
+{
+ return StoreBaseIndexedDisp(cu, rBase, INVALID_REG, 0, displacement,
+ r_src_lo, r_src_hi, kLong, INVALID_SREG);
+}
+
+} // namespace art
diff --git a/src/compiler/dex/quick/x86/x86_lir.h b/src/compiler/dex/quick/x86/x86_lir.h
new file mode 100644
index 0000000..7e9e56e
--- /dev/null
+++ b/src/compiler/dex/quick/x86/x86_lir.h
@@ -0,0 +1,442 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_COMPILER_DEX_QUICK_X86_X86LIR_H_
+#define ART_SRC_COMPILER_DEX_QUICK_X86_X86LIR_H_
+
+#include "compiler/dex/compiler_internals.h"
+
+namespace art {
+
+/*
+ * Runtime register conventions. We consider both x86, x86-64 and x32 (32bit mode x86-64), although
+ * we currently only target x86. The ABI has different conventions and we hope to have a single
+ * convention to simplify code generation. Changing something that is callee save and making it
+ * caller save places a burden on up-calls to save/restore the callee save register, however, there
+ * are few registers that are callee save in the ABI. Changing something that is caller save and
+ * making it callee save places a burden on down-calls to save/restore the callee save register.
+ * For these reasons we aim to match native conventions for caller and callee save. The first 4
+ * registers can be used for byte operations, for this reason they are preferred for temporary
+ * scratch registers.
+ *
+ * General Purpose Register:
+ * Native: x86 | x86-64 / x32 | ART
+ * r0/eax: caller save | caller save | caller, Method*, scratch, return value
+ * r1/ecx: caller save | caller save, arg4 | caller, arg1, scratch
+ * r2/edx: caller save | caller save, arg3 | caller, arg2, scratch, high half of long return
+ * r3/ebx: callEE save | callEE save | callER, arg3, scratch
+ * r4/esp: stack pointer
+ * r5/ebp: callee save | callee save | callee, available for dalvik register promotion
+ * r6/esi: callEE save | callER save, arg2 | callee, available for dalvik register promotion
+ * r7/edi: callEE save | callER save, arg1 | callee, available for dalvik register promotion
+ * --- x86-64/x32 registers
+ * Native: x86-64 / x32 | ART
+ * r8: caller save, arg5 | caller, scratch
+ * r9: caller save, arg6 | caller, scratch
+ * r10: caller save | caller, scratch
+ * r11: caller save | caller, scratch
+ * r12: callee save | callee, available for dalvik register promotion
+ * r13: callee save | callee, available for dalvik register promotion
+ * r14: callee save | callee, available for dalvik register promotion
+ * r15: callee save | callee, available for dalvik register promotion
+ *
+ * There is no rSELF, instead on x86 fs: has a base address of Thread::Current, whereas on
+ * x86-64/x32 gs: holds it.
+ *
+ * For floating point we don't support CPUs without SSE2 support (ie newer than PIII):
+ * Native: x86 | x86-64 / x32 | ART
+ * XMM0: caller save |caller save, arg1 | caller, float/double return value (except for native x86 code)
+ * XMM1: caller save |caller save, arg2 | caller, scratch
+ * XMM2: caller save |caller save, arg3 | caller, scratch
+ * XMM3: caller save |caller save, arg4 | caller, scratch
+ * XMM4: caller save |caller save, arg5 | caller, scratch
+ * XMM5: caller save |caller save, arg6 | caller, scratch
+ * XMM6: caller save |caller save, arg7 | caller, scratch
+ * XMM7: caller save |caller save, arg8 | caller, scratch
+ * --- x86-64/x32 registers
+ * XMM8 .. 15: caller save
+ *
+ * X87 is a necessary evil outside of ART code:
+ * ST0: x86 float/double native return value, caller save
+ * ST1 .. ST7: caller save
+ *
+ * Stack frame diagram (stack grows down, higher addresses at top):
+ *
+ * +------------------------+
+ * | IN[ins-1] | {Note: resides in caller's frame}
+ * | . |
+ * | IN[0] |
+ * | caller's Method* |
+ * +========================+ {Note: start of callee's frame}
+ * | return address | {pushed by call}
+ * | spill region | {variable sized}
+ * +------------------------+
+ * | ...filler word... | {Note: used as 2nd word of V[locals-1] if long]
+ * +------------------------+
+ * | V[locals-1] |
+ * | V[locals-2] |
+ * | . |
+ * | . |
+ * | V[1] |
+ * | V[0] |
+ * +------------------------+
+ * | 0 to 3 words padding |
+ * +------------------------+
+ * | OUT[outs-1] |
+ * | OUT[outs-2] |
+ * | . |
+ * | OUT[0] |
+ * | cur_method* | <<== sp w/ 16-byte alignment
+ * +========================+
+ */
+
+// Offset to distingish FP regs.
+#define X86_FP_REG_OFFSET 32
+// Offset to distinguish DP FP regs.
+#define X86_FP_DOUBLE (X86_FP_REG_OFFSET + 16)
+// Offset to distingish the extra regs.
+#define X86_EXTRA_REG_OFFSET (X86_FP_DOUBLE + 16)
+// Reg types.
+#define X86_REGTYPE(x) (x & (X86_FP_REG_OFFSET | X86_FP_DOUBLE))
+#define X86_FPREG(x) ((x & X86_FP_REG_OFFSET) == X86_FP_REG_OFFSET)
+#define X86_EXTRAREG(x) ((x & X86_EXTRA_REG_OFFSET) == X86_EXTRA_REG_OFFSET)
+#define X86_DOUBLEREG(x) ((x & X86_FP_DOUBLE) == X86_FP_DOUBLE)
+#define X86_SINGLEREG(x) (X86_FPREG(x) && !X86_DOUBLEREG(x))
+
+/*
+ * Note: the low register of a floating point pair is sufficient to
+ * create the name of a double, but require both names to be passed to
+ * allow for asserts to verify that the pair is consecutive if significant
+ * rework is done in this area. Also, it is a good reminder in the calling
+ * code that reg locations always describe doubles as a pair of singles.
+ */
+#define X86_S2D(x,y) ((x) | X86_FP_DOUBLE)
+/* Mask to strip off fp flags */
+#define X86_FP_REG_MASK 0xF
+
+// RegisterLocation templates return values (rAX, rAX/rDX or XMM0).
+// location, wide, defined, const, fp, core, ref, high_word, home, low_reg, high_reg, s_reg_low
+#define X86_LOC_C_RETURN {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, rAX, INVALID_REG, INVALID_SREG, INVALID_SREG}
+#define X86_LOC_C_RETURN_WIDE {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, rAX, rDX, INVALID_SREG, INVALID_SREG}
+#define X86_LOC_C_RETURN_FLOAT {kLocPhysReg, 0, 0, 0, 1, 0, 0, 0, 1, fr0, INVALID_REG, INVALID_SREG, INVALID_SREG}
+#define X86_LOC_C_RETURN_DOUBLE {kLocPhysReg, 1, 0, 0, 1, 0, 0, 0, 1, fr0, fr1, INVALID_SREG, INVALID_SREG}
+
+enum X86ResourceEncodingPos {
+ kX86GPReg0 = 0,
+ kX86RegSP = 4,
+ kX86FPReg0 = 16, // xmm0 .. xmm7/xmm15.
+ kX86FPRegEnd = 32,
+ kX86RegEnd = kX86FPRegEnd,
+};
+
+#define ENCODE_X86_REG_LIST(N) (static_cast<uint64_t>(N))
+#define ENCODE_X86_REG_SP (1ULL << kX86RegSP)
+
+enum X86NativeRegisterPool {
+ r0 = 0,
+ rAX = r0,
+ r1 = 1,
+ rCX = r1,
+ r2 = 2,
+ rDX = r2,
+ r3 = 3,
+ rBX = r3,
+ r4sp = 4,
+ rX86_SP = r4sp,
+ r4sib_no_index = r4sp,
+ r5 = 5,
+ rBP = r5,
+ r5sib_no_base = r5,
+ r6 = 6,
+ rSI = r6,
+ r7 = 7,
+ rDI = r7,
+#ifndef TARGET_REX_SUPPORT
+ rRET = 8, // fake return address register for core spill mask.
+#else
+ r8 = 8,
+ r9 = 9,
+ r10 = 10,
+ r11 = 11,
+ r12 = 12,
+ r13 = 13,
+ r14 = 14,
+ r15 = 15,
+ rRET = 16, // fake return address register for core spill mask.
+#endif
+ fr0 = 0 + X86_FP_REG_OFFSET,
+ fr1 = 1 + X86_FP_REG_OFFSET,
+ fr2 = 2 + X86_FP_REG_OFFSET,
+ fr3 = 3 + X86_FP_REG_OFFSET,
+ fr4 = 4 + X86_FP_REG_OFFSET,
+ fr5 = 5 + X86_FP_REG_OFFSET,
+ fr6 = 6 + X86_FP_REG_OFFSET,
+ fr7 = 7 + X86_FP_REG_OFFSET,
+ fr8 = 8 + X86_FP_REG_OFFSET,
+ fr9 = 9 + X86_FP_REG_OFFSET,
+ fr10 = 10 + X86_FP_REG_OFFSET,
+ fr11 = 11 + X86_FP_REG_OFFSET,
+ fr12 = 12 + X86_FP_REG_OFFSET,
+ fr13 = 13 + X86_FP_REG_OFFSET,
+ fr14 = 14 + X86_FP_REG_OFFSET,
+ fr15 = 15 + X86_FP_REG_OFFSET,
+};
+
+#define rX86_ARG0 rAX
+#define rX86_ARG1 rCX
+#define rX86_ARG2 rDX
+#define rX86_ARG3 rBX
+#define rX86_FARG0 rAX
+#define rX86_FARG1 rCX
+#define rX86_FARG2 rDX
+#define rX86_FARG3 rBX
+#define rX86_RET0 rAX
+#define rX86_RET1 rDX
+#define rX86_INVOKE_TGT rAX
+#define rX86_LR INVALID_REG
+#define rX86_SUSPEND INVALID_REG
+#define rX86_SELF INVALID_REG
+#define rX86_COUNT rCX
+#define rX86_PC INVALID_REG
+
+/*
+ * The following enum defines the list of supported X86 instructions by the
+ * assembler. Their corresponding EncodingMap positions will be defined in
+ * Assemble.cc.
+ */
+enum X86OpCode {
+ kX86First = 0,
+ kX8632BitData = kX86First, // data [31..0].
+ kX86Bkpt,
+ kX86Nop,
+ // Define groups of binary operations
+ // MR - Memory Register - opcode [base + disp], reg
+ // - lir operands - 0: base, 1: disp, 2: reg
+ // AR - Array Register - opcode [base + index * scale + disp], reg
+ // - lir operands - 0: base, 1: index, 2: scale, 3: disp, 4: reg
+ // TR - Thread Register - opcode fs:[disp], reg - where fs: is equal to Thread::Current()
+ // - lir operands - 0: disp, 1: reg
+ // RR - Register Register - opcode reg1, reg2
+ // - lir operands - 0: reg1, 1: reg2
+ // RM - Register Memory - opcode reg, [base + disp]
+ // - lir operands - 0: reg, 1: base, 2: disp
+ // RA - Register Array - opcode reg, [base + index * scale + disp]
+ // - lir operands - 0: reg, 1: base, 2: index, 3: scale, 4: disp
+ // RT - Register Thread - opcode reg, fs:[disp] - where fs: is equal to Thread::Current()
+ // - lir operands - 0: reg, 1: disp
+ // RI - Register Immediate - opcode reg, #immediate
+ // - lir operands - 0: reg, 1: immediate
+ // MI - Memory Immediate - opcode [base + disp], #immediate
+ // - lir operands - 0: base, 1: disp, 2: immediate
+ // AI - Array Immediate - opcode [base + index * scale + disp], #immediate
+ // - lir operands - 0: base, 1: index, 2: scale, 3: disp 4: immediate
+ // TI - Thread Register - opcode fs:[disp], imm - where fs: is equal to Thread::Current()
+ // - lir operands - 0: disp, 1: imm
+#define BinaryOpCode(opcode) \
+ opcode ## 8MR, opcode ## 8AR, opcode ## 8TR, \
+ opcode ## 8RR, opcode ## 8RM, opcode ## 8RA, opcode ## 8RT, \
+ opcode ## 8RI, opcode ## 8MI, opcode ## 8AI, opcode ## 8TI, \
+ opcode ## 16MR, opcode ## 16AR, opcode ## 16TR, \
+ opcode ## 16RR, opcode ## 16RM, opcode ## 16RA, opcode ## 16RT, \
+ opcode ## 16RI, opcode ## 16MI, opcode ## 16AI, opcode ## 16TI, \
+ opcode ## 16RI8, opcode ## 16MI8, opcode ## 16AI8, opcode ## 16TI8, \
+ opcode ## 32MR, opcode ## 32AR, opcode ## 32TR, \
+ opcode ## 32RR, opcode ## 32RM, opcode ## 32RA, opcode ## 32RT, \
+ opcode ## 32RI, opcode ## 32MI, opcode ## 32AI, opcode ## 32TI, \
+ opcode ## 32RI8, opcode ## 32MI8, opcode ## 32AI8, opcode ## 32TI8
+ BinaryOpCode(kX86Add),
+ BinaryOpCode(kX86Or),
+ BinaryOpCode(kX86Adc),
+ BinaryOpCode(kX86Sbb),
+ BinaryOpCode(kX86And),
+ BinaryOpCode(kX86Sub),
+ BinaryOpCode(kX86Xor),
+ BinaryOpCode(kX86Cmp),
+#undef BinaryOpCode
+ kX86Imul16RRI, kX86Imul16RMI, kX86Imul16RAI,
+ kX86Imul32RRI, kX86Imul32RMI, kX86Imul32RAI,
+ kX86Imul32RRI8, kX86Imul32RMI8, kX86Imul32RAI8,
+ kX86Mov8MR, kX86Mov8AR, kX86Mov8TR,
+ kX86Mov8RR, kX86Mov8RM, kX86Mov8RA, kX86Mov8RT,
+ kX86Mov8RI, kX86Mov8MI, kX86Mov8AI, kX86Mov8TI,
+ kX86Mov16MR, kX86Mov16AR, kX86Mov16TR,
+ kX86Mov16RR, kX86Mov16RM, kX86Mov16RA, kX86Mov16RT,
+ kX86Mov16RI, kX86Mov16MI, kX86Mov16AI, kX86Mov16TI,
+ kX86Mov32MR, kX86Mov32AR, kX86Mov32TR,
+ kX86Mov32RR, kX86Mov32RM, kX86Mov32RA, kX86Mov32RT,
+ kX86Mov32RI, kX86Mov32MI, kX86Mov32AI, kX86Mov32TI,
+ kX86Lea32RA,
+ // RC - Register CL - opcode reg, CL
+ // - lir operands - 0: reg, 1: CL
+ // MC - Memory CL - opcode [base + disp], CL
+ // - lir operands - 0: base, 1: disp, 2: CL
+ // AC - Array CL - opcode [base + index * scale + disp], CL
+ // - lir operands - 0: base, 1: index, 2: scale, 3: disp, 4: CL
+#define BinaryShiftOpCode(opcode) \
+ opcode ## 8RI, opcode ## 8MI, opcode ## 8AI, \
+ opcode ## 8RC, opcode ## 8MC, opcode ## 8AC, \
+ opcode ## 16RI, opcode ## 16MI, opcode ## 16AI, \
+ opcode ## 16RC, opcode ## 16MC, opcode ## 16AC, \
+ opcode ## 32RI, opcode ## 32MI, opcode ## 32AI, \
+ opcode ## 32RC, opcode ## 32MC, opcode ## 32AC
+ BinaryShiftOpCode(kX86Rol),
+ BinaryShiftOpCode(kX86Ror),
+ BinaryShiftOpCode(kX86Rcl),
+ BinaryShiftOpCode(kX86Rcr),
+ BinaryShiftOpCode(kX86Sal),
+ BinaryShiftOpCode(kX86Shr),
+ BinaryShiftOpCode(kX86Sar),
+#undef BinaryShiftOpcode
+ kX86Cmc,
+#define UnaryOpcode(opcode, reg, mem, array) \
+ opcode ## 8 ## reg, opcode ## 8 ## mem, opcode ## 8 ## array, \
+ opcode ## 16 ## reg, opcode ## 16 ## mem, opcode ## 16 ## array, \
+ opcode ## 32 ## reg, opcode ## 32 ## mem, opcode ## 32 ## array
+ UnaryOpcode(kX86Test, RI, MI, AI),
+ kX86Test32RR,
+ UnaryOpcode(kX86Not, R, M, A),
+ UnaryOpcode(kX86Neg, R, M, A),
+ UnaryOpcode(kX86Mul, DaR, DaM, DaA),
+ UnaryOpcode(kX86Imul, DaR, DaM, DaA),
+ UnaryOpcode(kX86Divmod, DaR, DaM, DaA),
+ UnaryOpcode(kX86Idivmod, DaR, DaM, DaA),
+#undef UnaryOpcode
+#define Binary0fOpCode(opcode) \
+ opcode ## RR, opcode ## RM, opcode ## RA
+ Binary0fOpCode(kX86Movsd),
+ kX86MovsdMR,
+ kX86MovsdAR,
+ Binary0fOpCode(kX86Movss),
+ kX86MovssMR,
+ kX86MovssAR,
+ Binary0fOpCode(kX86Cvtsi2sd), // int to double
+ Binary0fOpCode(kX86Cvtsi2ss), // int to float
+ Binary0fOpCode(kX86Cvttsd2si),// truncating double to int
+ Binary0fOpCode(kX86Cvttss2si),// truncating float to int
+ Binary0fOpCode(kX86Cvtsd2si), // rounding double to int
+ Binary0fOpCode(kX86Cvtss2si), // rounding float to int
+ Binary0fOpCode(kX86Ucomisd), // unordered double compare
+ Binary0fOpCode(kX86Ucomiss), // unordered float compare
+ Binary0fOpCode(kX86Comisd), // double compare
+ Binary0fOpCode(kX86Comiss), // float compare
+ Binary0fOpCode(kX86Orps), // or of floating point registers
+ Binary0fOpCode(kX86Xorps), // xor of floating point registers
+ Binary0fOpCode(kX86Addsd), // double add
+ Binary0fOpCode(kX86Addss), // float add
+ Binary0fOpCode(kX86Mulsd), // double multiply
+ Binary0fOpCode(kX86Mulss), // float multiply
+ Binary0fOpCode(kX86Cvtsd2ss), // double to float
+ Binary0fOpCode(kX86Cvtss2sd), // float to double
+ Binary0fOpCode(kX86Subsd), // double subtract
+ Binary0fOpCode(kX86Subss), // float subtract
+ Binary0fOpCode(kX86Divsd), // double divide
+ Binary0fOpCode(kX86Divss), // float divide
+ kX86PsrlqRI, // right shift of floating point registers
+ kX86PsllqRI, // left shift of floating point registers
+ Binary0fOpCode(kX86Movdxr), // move into xmm from gpr
+ kX86MovdrxRR, kX86MovdrxMR, kX86MovdrxAR,// move into reg from xmm
+ kX86Set8R, kX86Set8M, kX86Set8A,// set byte depending on condition operand
+ kX86Mfence, // memory barrier
+ Binary0fOpCode(kX86Imul16), // 16bit multiply
+ Binary0fOpCode(kX86Imul32), // 32bit multiply
+ kX86CmpxchgRR, kX86CmpxchgMR, kX86CmpxchgAR,// compare and exchange
+ kX86LockCmpxchgRR, kX86LockCmpxchgMR, kX86LockCmpxchgAR,// locked compare and exchange
+ Binary0fOpCode(kX86Movzx8), // zero-extend 8-bit value
+ Binary0fOpCode(kX86Movzx16), // zero-extend 16-bit value
+ Binary0fOpCode(kX86Movsx8), // sign-extend 8-bit value
+ Binary0fOpCode(kX86Movsx16), // sign-extend 16-bit value
+#undef Binary0fOpCode
+ kX86Jcc8, kX86Jcc32, // jCC rel8/32; lir operands - 0: rel, 1: CC, target assigned
+ kX86Jmp8, kX86Jmp32, // jmp rel8/32; lir operands - 0: rel, target assigned
+ kX86JmpR, // jmp reg; lir operands - 0: reg
+ kX86CallR, // call reg; lir operands - 0: reg
+ kX86CallM, // call [base + disp]; lir operands - 0: base, 1: disp
+ kX86CallA, // call [base + index * scale + disp]
+ // lir operands - 0: base, 1: index, 2: scale, 3: disp
+ kX86CallT, // call fs:[disp]; fs: is equal to Thread::Current(); lir operands - 0: disp
+ kX86Ret, // ret; no lir operands
+ kX86StartOfMethod, // call 0; pop reg; sub reg, # - generate start of method into reg
+ // lir operands - 0: reg
+ kX86PcRelLoadRA, // mov reg, [base + index * scale + PC relative displacement]
+ // lir operands - 0: reg, 1: base, 2: index, 3: scale, 4: table
+ kX86PcRelAdr, // mov reg, PC relative displacement; lir operands - 0: reg, 1: table
+ kX86Last
+};
+
+/* Instruction assembly field_loc kind */
+enum X86EncodingKind {
+ kData, // Special case for raw data.
+ kNop, // Special case for variable length nop.
+ kNullary, // Opcode that takes no arguments.
+ kReg, kMem, kArray, // R, M and A instruction kinds.
+ kMemReg, kArrayReg, kThreadReg, // MR, AR and TR instruction kinds.
+ kRegReg, kRegMem, kRegArray, kRegThread, // RR, RM, RA and RT instruction kinds.
+ kRegRegStore, // RR following the store modrm reg-reg encoding rather than the load.
+ kRegImm, kMemImm, kArrayImm, kThreadImm, // RI, MI, AI and TI instruction kinds.
+ kRegRegImm, kRegMemImm, kRegArrayImm, // RRI, RMI and RAI instruction kinds.
+ kMovRegImm, // Shorter form move RI.
+ kShiftRegImm, kShiftMemImm, kShiftArrayImm, // Shift opcode with immediate.
+ kShiftRegCl, kShiftMemCl, kShiftArrayCl, // Shift opcode with register CL.
+ kRegRegReg, kRegRegMem, kRegRegArray, // RRR, RRM, RRA instruction kinds.
+ kRegCond, kMemCond, kArrayCond, // R, M, A instruction kinds following by a condition.
+ kJmp, kJcc, kCall, // Branch instruction kinds.
+ kPcRel, // Operation with displacement that is PC relative
+ kMacro, // An instruction composing multiple others
+ kUnimplemented // Encoding used when an instruction isn't yet implemented.
+};
+
+/* Struct used to define the EncodingMap positions for each X86 opcode */
+struct X86EncodingMap {
+ X86OpCode opcode; // e.g. kOpAddRI
+ X86EncodingKind kind; // Used to discriminate in the union below
+ uint64_t flags;
+ struct {
+ uint8_t prefix1; // non-zero => a prefix byte
+ uint8_t prefix2; // non-zero => a second prefix byte
+ uint8_t opcode; // 1 byte opcode
+ uint8_t extra_opcode1; // possible extra opcode byte
+ uint8_t extra_opcode2; // possible second extra opcode byte
+ // 3bit opcode that gets encoded in the register bits of the modrm byte, use determined by the
+ // encoding kind
+ uint8_t modrm_opcode;
+ uint8_t ax_opcode; // non-zero => shorter encoding for AX as a destination
+ uint8_t immediate_bytes; // number of bytes of immediate
+ } skeleton;
+ const char *name;
+ const char* fmt;
+};
+
+
+// FIXME: mem barrier type - what do we do for x86?
+#define kSY 0
+#define kST 0
+
+// Offsets of high and low halves of a 64bit value.
+#define LOWORD_OFFSET 0
+#define HIWORD_OFFSET 4
+
+// Segment override instruction prefix used for quick TLS access to Thread::Current().
+#define THREAD_PREFIX 0x64
+
+#define IS_SIMM8(v) ((-128 <= (v)) && ((v) <= 127))
+#define IS_SIMM16(v) ((-32768 <= (v)) && ((v) <= 32767))
+
+extern X86EncodingMap EncodingMap[kX86Last];
+extern X86ConditionCode X86ConditionEncoding(ConditionCode cond);
+
+} // namespace art
+
+#endif // ART_SRC_COMPILER_DEX_QUICK_X86_X86LIR_H_
diff --git a/src/compiler/dex/ralloc.cc b/src/compiler/dex/ralloc.cc
new file mode 100644
index 0000000..9163cd9
--- /dev/null
+++ b/src/compiler/dex/ralloc.cc
@@ -0,0 +1,533 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "compiler_internals.h"
+#include "dataflow.h"
+#include "quick/ralloc_util.h"
+
+namespace art {
+
+static bool SetFp(CompilationUnit* cu, int index, bool is_fp) {
+ bool change = false;
+ if (is_fp && !cu->reg_location[index].fp) {
+ cu->reg_location[index].fp = true;
+ cu->reg_location[index].defined = true;
+ change = true;
+ }
+ return change;
+}
+
+static bool SetCore(CompilationUnit* cu, int index, bool is_core) {
+ bool change = false;
+ if (is_core && !cu->reg_location[index].defined) {
+ cu->reg_location[index].core = true;
+ cu->reg_location[index].defined = true;
+ change = true;
+ }
+ return change;
+}
+
+static bool SetRef(CompilationUnit* cu, int index, bool is_ref) {
+ bool change = false;
+ if (is_ref && !cu->reg_location[index].defined) {
+ cu->reg_location[index].ref = true;
+ cu->reg_location[index].defined = true;
+ change = true;
+ }
+ return change;
+}
+
+static bool SetWide(CompilationUnit* cu, int index, bool is_wide) {
+ bool change = false;
+ if (is_wide && !cu->reg_location[index].wide) {
+ cu->reg_location[index].wide = true;
+ change = true;
+ }
+ return change;
+}
+
+static bool SetHigh(CompilationUnit* cu, int index, bool is_high) {
+ bool change = false;
+ if (is_high && !cu->reg_location[index].high_word) {
+ cu->reg_location[index].high_word = true;
+ change = true;
+ }
+ return change;
+}
+
+/*
+ * Infer types and sizes. We don't need to track change on sizes,
+ * as it doesn't propagate. We're guaranteed at least one pass through
+ * the cfg.
+ */
+static bool InferTypeAndSize(CompilationUnit* cu, BasicBlock* bb)
+{
+ MIR *mir;
+ bool changed = false; // Did anything change?
+
+ if (bb->data_flow_info == NULL) return false;
+ if (bb->block_type != kDalvikByteCode && bb->block_type != kEntryBlock)
+ return false;
+
+ for (mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+ SSARepresentation *ssa_rep = mir->ssa_rep;
+ if (ssa_rep) {
+ int attrs = oat_data_flow_attributes[mir->dalvikInsn.opcode];
+
+ // Handle defs
+ if (attrs & DF_DA) {
+ if (attrs & DF_CORE_A) {
+ changed |= SetCore(cu, ssa_rep->defs[0], true);
+ }
+ if (attrs & DF_REF_A) {
+ changed |= SetRef(cu, ssa_rep->defs[0], true);
+ }
+ if (attrs & DF_A_WIDE) {
+ cu->reg_location[ssa_rep->defs[0]].wide = true;
+ cu->reg_location[ssa_rep->defs[1]].wide = true;
+ cu->reg_location[ssa_rep->defs[1]].high_word = true;
+ DCHECK_EQ(SRegToVReg(cu, ssa_rep->defs[0])+1,
+ SRegToVReg(cu, ssa_rep->defs[1]));
+ }
+ }
+
+ // Handles uses
+ int next = 0;
+ if (attrs & DF_UA) {
+ if (attrs & DF_CORE_A) {
+ changed |= SetCore(cu, ssa_rep->uses[next], true);
+ }
+ if (attrs & DF_REF_A) {
+ changed |= SetRef(cu, ssa_rep->uses[next], true);
+ }
+ if (attrs & DF_A_WIDE) {
+ cu->reg_location[ssa_rep->uses[next]].wide = true;
+ cu->reg_location[ssa_rep->uses[next + 1]].wide = true;
+ cu->reg_location[ssa_rep->uses[next + 1]].high_word = true;
+ DCHECK_EQ(SRegToVReg(cu, ssa_rep->uses[next])+1,
+ SRegToVReg(cu, ssa_rep->uses[next + 1]));
+ next += 2;
+ } else {
+ next++;
+ }
+ }
+ if (attrs & DF_UB) {
+ if (attrs & DF_CORE_B) {
+ changed |= SetCore(cu, ssa_rep->uses[next], true);
+ }
+ if (attrs & DF_REF_B) {
+ changed |= SetRef(cu, ssa_rep->uses[next], true);
+ }
+ if (attrs & DF_B_WIDE) {
+ cu->reg_location[ssa_rep->uses[next]].wide = true;
+ cu->reg_location[ssa_rep->uses[next + 1]].wide = true;
+ cu->reg_location[ssa_rep->uses[next + 1]].high_word = true;
+ DCHECK_EQ(SRegToVReg(cu, ssa_rep->uses[next])+1,
+ SRegToVReg(cu, ssa_rep->uses[next + 1]));
+ next += 2;
+ } else {
+ next++;
+ }
+ }
+ if (attrs & DF_UC) {
+ if (attrs & DF_CORE_C) {
+ changed |= SetCore(cu, ssa_rep->uses[next], true);
+ }
+ if (attrs & DF_REF_C) {
+ changed |= SetRef(cu, ssa_rep->uses[next], true);
+ }
+ if (attrs & DF_C_WIDE) {
+ cu->reg_location[ssa_rep->uses[next]].wide = true;
+ cu->reg_location[ssa_rep->uses[next + 1]].wide = true;
+ cu->reg_location[ssa_rep->uses[next + 1]].high_word = true;
+ DCHECK_EQ(SRegToVReg(cu, ssa_rep->uses[next])+1,
+ SRegToVReg(cu, ssa_rep->uses[next + 1]));
+ }
+ }
+
+ // Special-case return handling
+ if ((mir->dalvikInsn.opcode == Instruction::RETURN) ||
+ (mir->dalvikInsn.opcode == Instruction::RETURN_WIDE) ||
+ (mir->dalvikInsn.opcode == Instruction::RETURN_OBJECT)) {
+ switch(cu->shorty[0]) {
+ case 'I':
+ changed |= SetCore(cu, ssa_rep->uses[0], true);
+ break;
+ case 'J':
+ changed |= SetCore(cu, ssa_rep->uses[0], true);
+ changed |= SetCore(cu, ssa_rep->uses[1], true);
+ cu->reg_location[ssa_rep->uses[0]].wide = true;
+ cu->reg_location[ssa_rep->uses[1]].wide = true;
+ cu->reg_location[ssa_rep->uses[1]].high_word = true;
+ break;
+ case 'F':
+ changed |= SetFp(cu, ssa_rep->uses[0], true);
+ break;
+ case 'D':
+ changed |= SetFp(cu, ssa_rep->uses[0], true);
+ changed |= SetFp(cu, ssa_rep->uses[1], true);
+ cu->reg_location[ssa_rep->uses[0]].wide = true;
+ cu->reg_location[ssa_rep->uses[1]].wide = true;
+ cu->reg_location[ssa_rep->uses[1]].high_word = true;
+ break;
+ case 'L':
+ changed |= SetRef(cu, ssa_rep->uses[0], true);
+ break;
+ default: break;
+ }
+ }
+
+ // Special-case handling for format 35c/3rc invokes
+ Instruction::Code opcode = mir->dalvikInsn.opcode;
+ int flags = (static_cast<int>(opcode) >= kNumPackedOpcodes)
+ ? 0 : Instruction::FlagsOf(mir->dalvikInsn.opcode);
+ if ((flags & Instruction::kInvoke) &&
+ (attrs & (DF_FORMAT_35C | DF_FORMAT_3RC))) {
+ DCHECK_EQ(next, 0);
+ int target_idx = mir->dalvikInsn.vB;
+ const char* shorty = GetShortyFromTargetIdx(cu, target_idx);
+ // Handle result type if floating point
+ if ((shorty[0] == 'F') || (shorty[0] == 'D')) {
+ MIR* move_result_mir = FindMoveResult(cu, bb, mir);
+ // Result might not be used at all, so no move-result
+ if (move_result_mir && (move_result_mir->dalvikInsn.opcode !=
+ Instruction::MOVE_RESULT_OBJECT)) {
+ SSARepresentation* tgt_rep = move_result_mir->ssa_rep;
+ DCHECK(tgt_rep != NULL);
+ tgt_rep->fp_def[0] = true;
+ changed |= SetFp(cu, tgt_rep->defs[0], true);
+ if (shorty[0] == 'D') {
+ tgt_rep->fp_def[1] = true;
+ changed |= SetFp(cu, tgt_rep->defs[1], true);
+ }
+ }
+ }
+ int num_uses = mir->dalvikInsn.vA;
+ // If this is a non-static invoke, mark implicit "this"
+ if (((mir->dalvikInsn.opcode != Instruction::INVOKE_STATIC) &&
+ (mir->dalvikInsn.opcode != Instruction::INVOKE_STATIC_RANGE))) {
+ cu->reg_location[ssa_rep->uses[next]].defined = true;
+ cu->reg_location[ssa_rep->uses[next]].ref = true;
+ next++;
+ }
+ uint32_t cpos = 1;
+ if (strlen(shorty) > 1) {
+ for (int i = next; i < num_uses;) {
+ DCHECK_LT(cpos, strlen(shorty));
+ switch (shorty[cpos++]) {
+ case 'D':
+ ssa_rep->fp_use[i] = true;
+ ssa_rep->fp_use[i+1] = true;
+ cu->reg_location[ssa_rep->uses[i]].wide = true;
+ cu->reg_location[ssa_rep->uses[i+1]].wide = true;
+ cu->reg_location[ssa_rep->uses[i+1]].high_word = true;
+ DCHECK_EQ(SRegToVReg(cu, ssa_rep->uses[i])+1,
+ SRegToVReg(cu, ssa_rep->uses[i+1]));
+ i++;
+ break;
+ case 'J':
+ cu->reg_location[ssa_rep->uses[i]].wide = true;
+ cu->reg_location[ssa_rep->uses[i+1]].wide = true;
+ cu->reg_location[ssa_rep->uses[i+1]].high_word = true;
+ DCHECK_EQ(SRegToVReg(cu, ssa_rep->uses[i])+1,
+ SRegToVReg(cu, ssa_rep->uses[i+1]));
+ changed |= SetCore(cu, ssa_rep->uses[i],true);
+ i++;
+ break;
+ case 'F':
+ ssa_rep->fp_use[i] = true;
+ break;
+ case 'L':
+ changed |= SetRef(cu,ssa_rep->uses[i], true);
+ break;
+ default:
+ changed |= SetCore(cu,ssa_rep->uses[i], true);
+ break;
+ }
+ i++;
+ }
+ }
+ }
+
+ for (int i=0; ssa_rep->fp_use && i< ssa_rep->num_uses; i++) {
+ if (ssa_rep->fp_use[i])
+ changed |= SetFp(cu, ssa_rep->uses[i], true);
+ }
+ for (int i=0; ssa_rep->fp_def && i< ssa_rep->num_defs; i++) {
+ if (ssa_rep->fp_def[i])
+ changed |= SetFp(cu, ssa_rep->defs[i], true);
+ }
+ // Special-case handling for moves & Phi
+ if (attrs & (DF_IS_MOVE | DF_NULL_TRANSFER_N)) {
+ /*
+ * If any of our inputs or outputs is defined, set all.
+ * Some ugliness related to Phi nodes and wide values.
+ * The Phi set will include all low words or all high
+ * words, so we have to treat them specially.
+ */
+ bool is_phi = (static_cast<int>(mir->dalvikInsn.opcode) ==
+ kMirOpPhi);
+ RegLocation rl_temp = cu->reg_location[ssa_rep->defs[0]];
+ bool defined_fp = rl_temp.defined && rl_temp.fp;
+ bool defined_core = rl_temp.defined && rl_temp.core;
+ bool defined_ref = rl_temp.defined && rl_temp.ref;
+ bool is_wide = rl_temp.wide || ((attrs & DF_A_WIDE) != 0);
+ bool is_high = is_phi && rl_temp.wide && rl_temp.high_word;
+ for (int i = 0; i < ssa_rep->num_uses;i++) {
+ rl_temp = cu->reg_location[ssa_rep->uses[i]];
+ defined_fp |= rl_temp.defined && rl_temp.fp;
+ defined_core |= rl_temp.defined && rl_temp.core;
+ defined_ref |= rl_temp.defined && rl_temp.ref;
+ is_wide |= rl_temp.wide;
+ is_high |= is_phi && rl_temp.wide && rl_temp.high_word;
+ }
+ /*
+ * TODO: cleaner fix
+ * We don't normally expect to see a Dalvik register
+ * definition used both as a floating point and core
+ * value. However, the instruction rewriting that occurs
+ * during verification can eliminate some type information,
+ * leaving us confused. The real fix here is either to
+ * add explicit type information to Dalvik byte codes,
+ * or to recognize THROW_VERIFICATION_ERROR as
+ * an unconditional branch and support dead code elimination.
+ * As a workaround we can detect this situation and
+ * disable register promotion (which is the only thing that
+ * relies on distinctions between core and fp usages.
+ */
+ if ((defined_fp && (defined_core | defined_ref)) &&
+ ((cu->disable_opt & (1 << kPromoteRegs)) == 0)) {
+ LOG(WARNING) << PrettyMethod(cu->method_idx, *cu->dex_file)
+ << " op at block " << bb->id
+ << " has both fp and core/ref uses for same def.";
+ cu->disable_opt |= (1 << kPromoteRegs);
+ }
+ changed |= SetFp(cu, ssa_rep->defs[0], defined_fp);
+ changed |= SetCore(cu, ssa_rep->defs[0], defined_core);
+ changed |= SetRef(cu, ssa_rep->defs[0], defined_ref);
+ changed |= SetWide(cu, ssa_rep->defs[0], is_wide);
+ changed |= SetHigh(cu, ssa_rep->defs[0], is_high);
+ if (attrs & DF_A_WIDE) {
+ changed |= SetWide(cu, ssa_rep->defs[1], true);
+ changed |= SetHigh(cu, ssa_rep->defs[1], true);
+ }
+ for (int i = 0; i < ssa_rep->num_uses; i++) {
+ changed |= SetFp(cu, ssa_rep->uses[i], defined_fp);
+ changed |= SetCore(cu, ssa_rep->uses[i], defined_core);
+ changed |= SetRef(cu, ssa_rep->uses[i], defined_ref);
+ changed |= SetWide(cu, ssa_rep->uses[i], is_wide);
+ changed |= SetHigh(cu, ssa_rep->uses[i], is_high);
+ }
+ if (attrs & DF_A_WIDE) {
+ DCHECK_EQ(ssa_rep->num_uses, 2);
+ changed |= SetWide(cu, ssa_rep->uses[1], true);
+ changed |= SetHigh(cu, ssa_rep->uses[1], true);
+ }
+ }
+ }
+ }
+ return changed;
+}
+
+static const char* storage_name[] = {" Frame ", "PhysReg", " Spill "};
+
+static void DumpRegLocTable(CompilationUnit* cu, RegLocation* table, int count)
+{
+ Codegen* cg = cu->cg.get();
+ for (int i = 0; i < count; i++) {
+ LOG(INFO) << StringPrintf("Loc[%02d] : %s, %c %c %c %c %c %c %c%d %c%d S%d",
+ table[i].orig_sreg, storage_name[table[i].location],
+ table[i].wide ? 'W' : 'N', table[i].defined ? 'D' : 'U',
+ table[i].fp ? 'F' : table[i].ref ? 'R' :'C',
+ table[i].is_const ? 'c' : 'n',
+ table[i].high_word ? 'H' : 'L', table[i].home ? 'h' : 't',
+ cg->IsFpReg(table[i].low_reg) ? 's' : 'r',
+ table[i].low_reg & cg->FpRegMask(),
+ cg->IsFpReg(table[i].high_reg) ? 's' : 'r',
+ table[i].high_reg & cg->FpRegMask(), table[i].s_reg_low);
+ }
+}
+
+static const RegLocation fresh_loc = {kLocDalvikFrame, 0, 0, 0, 0, 0, 0, 0, 0,
+ INVALID_REG, INVALID_REG, INVALID_SREG,
+ INVALID_SREG};
+
+int ComputeFrameSize(CompilationUnit* cu) {
+ /* Figure out the frame size */
+ static const uint32_t kAlignMask = kStackAlignment - 1;
+ uint32_t size = (cu->num_core_spills + cu->num_fp_spills +
+ 1 /* filler word */ + cu->num_regs + cu->num_outs +
+ cu->num_compiler_temps + 1 /* cur_method* */)
+ * sizeof(uint32_t);
+ /* Align and set */
+ return (size + kAlignMask) & ~(kAlignMask);
+}
+
+/*
+ * Simple register allocation. Some Dalvik virtual registers may
+ * be promoted to physical registers. Most of the work for temp
+ * allocation is done on the fly. We also do some initialization and
+ * type inference here.
+ */
+void SimpleRegAlloc(CompilationUnit* cu)
+{
+ int i;
+ RegLocation* loc;
+
+ /* Allocate the location map */
+ loc = static_cast<RegLocation*>(NewMem(cu, cu->num_ssa_regs * sizeof(*loc),
+ true, kAllocRegAlloc));
+ for (i=0; i< cu->num_ssa_regs; i++) {
+ loc[i] = fresh_loc;
+ loc[i].s_reg_low = i;
+ loc[i].is_const = IsBitSet(cu->is_constant_v, i);
+ }
+
+ /* Patch up the locations for Method* and the compiler temps */
+ loc[cu->method_sreg].location = kLocCompilerTemp;
+ loc[cu->method_sreg].defined = true;
+ for (i = 0; i < cu->num_compiler_temps; i++) {
+ CompilerTemp* ct = reinterpret_cast<CompilerTemp*>(cu->compiler_temps.elem_list[i]);
+ loc[ct->s_reg].location = kLocCompilerTemp;
+ loc[ct->s_reg].defined = true;
+ }
+
+ cu->reg_location = loc;
+
+ /* Allocation the promotion map */
+ int num_regs = cu->num_dalvik_registers;
+ cu->promotion_map = static_cast<PromotionMap*>
+ (NewMem(cu, (num_regs + cu->num_compiler_temps + 1) * sizeof(cu->promotion_map[0]),
+ true, kAllocRegAlloc));
+
+ /* Add types of incoming arguments based on signature */
+ int num_ins = cu->num_ins;
+ if (num_ins > 0) {
+ int s_reg = num_regs - num_ins;
+ if ((cu->access_flags & kAccStatic) == 0) {
+ // For non-static, skip past "this"
+ cu->reg_location[s_reg].defined = true;
+ cu->reg_location[s_reg].ref = true;
+ s_reg++;
+ }
+ const char* shorty = cu->shorty;
+ int shorty_len = strlen(shorty);
+ for (int i = 1; i < shorty_len; i++) {
+ switch (shorty[i]) {
+ case 'D':
+ cu->reg_location[s_reg].wide = true;
+ cu->reg_location[s_reg+1].high_word = true;
+ cu->reg_location[s_reg+1].fp = true;
+ DCHECK_EQ(SRegToVReg(cu, s_reg)+1, SRegToVReg(cu, s_reg+1));
+ cu->reg_location[s_reg].fp = true;
+ cu->reg_location[s_reg].defined = true;
+ s_reg++;
+ break;
+ case 'J':
+ cu->reg_location[s_reg].wide = true;
+ cu->reg_location[s_reg+1].high_word = true;
+ DCHECK_EQ(SRegToVReg(cu, s_reg)+1, SRegToVReg(cu, s_reg+1));
+ cu->reg_location[s_reg].core = true;
+ cu->reg_location[s_reg].defined = true;
+ s_reg++;
+ break;
+ case 'F':
+ cu->reg_location[s_reg].fp = true;
+ cu->reg_location[s_reg].defined = true;
+ break;
+ case 'L':
+ cu->reg_location[s_reg].ref = true;
+ cu->reg_location[s_reg].defined = true;
+ break;
+ default:
+ cu->reg_location[s_reg].core = true;
+ cu->reg_location[s_reg].defined = true;
+ break;
+ }
+ s_reg++;
+ }
+ }
+
+ /* Do type & size inference pass */
+ DataFlowAnalysisDispatcher(cu, InferTypeAndSize,
+ kPreOrderDFSTraversal,
+ true /* is_iterative */);
+
+ /*
+ * Set the s_reg_low field to refer to the pre-SSA name of the
+ * base Dalvik virtual register. Once we add a better register
+ * allocator, remove this remapping.
+ */
+ for (i=0; i < cu->num_ssa_regs; i++) {
+ if (cu->reg_location[i].location != kLocCompilerTemp) {
+ int orig_sreg = cu->reg_location[i].s_reg_low;
+ cu->reg_location[i].orig_sreg = orig_sreg;
+ cu->reg_location[i].s_reg_low = SRegToVReg(cu, orig_sreg);
+ }
+ }
+
+ /*
+ * Now that everything is typed and constants propagated, identify those constants
+ * that can be cheaply materialized and don't need to be flushed to a home location.
+ * The default is to not flush, and some have already been marked as must flush.
+ */
+ for (i=0; i < cu->num_ssa_regs; i++) {
+ if (IsBitSet(cu->is_constant_v, i)) {
+ bool flush = false;
+ RegLocation loc = cu->reg_location[i];
+ if (loc.wide) {
+ int64_t value = ConstantValueWide(cu, loc);
+ if (loc.fp) {
+ flush = !cu->cg->InexpensiveConstantDouble(value);
+ } else {
+ flush = !cu->cg->InexpensiveConstantLong(value);
+ }
+ } else {
+ int32_t value = ConstantValue(cu, loc);
+ if (loc.fp) {
+ flush = !cu->cg->InexpensiveConstantFloat(value);
+ } else {
+ flush = !cu->cg->InexpensiveConstantInt(value);
+ }
+ }
+ if (flush) {
+ SetBit(cu, cu->must_flush_constant_v, i);
+ }
+ if (loc.wide) {
+ i++; // Skip the high word
+ }
+ }
+ }
+
+ cu->core_spill_mask = 0;
+ cu->fp_spill_mask = 0;
+ cu->num_core_spills = 0;
+
+ DoPromotion(cu);
+
+ /* Get easily-accessable post-promotion copy of RegLocation for Method* */
+ cu->method_loc = cu->reg_location[cu->method_sreg];
+
+ if (cu->verbose && !(cu->disable_opt & (1 << kPromoteRegs))) {
+ LOG(INFO) << "After Promotion";
+ DumpRegLocTable(cu, cu->reg_location, cu->num_ssa_regs);
+ }
+
+ /* Set the frame size */
+ cu->frame_size = ComputeFrameSize(cu);
+}
+
+} // namespace art
diff --git a/src/compiler/dex/ralloc.h b/src/compiler/dex/ralloc.h
new file mode 100644
index 0000000..cd25b1c
--- /dev/null
+++ b/src/compiler/dex/ralloc.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_COMPILER_DEX_RALLOC_H_
+#define ART_SRC_COMPILER_DEX_RALLOC_H_
+
+#include "compiler_internals.h"
+
+namespace art {
+
+void SimpleRegAlloc(CompilationUnit* cu);
+
+} // namespace art
+
+#endif // ART_SRC_COMPILER_DEX_RALLOC_H_
diff --git a/src/compiler/dex/ssa_transformation.cc b/src/compiler/dex/ssa_transformation.cc
new file mode 100644
index 0000000..5d787c4
--- /dev/null
+++ b/src/compiler/dex/ssa_transformation.cc
@@ -0,0 +1,932 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "compiler_internals.h"
+#include "dataflow.h"
+
+namespace art {
+
+// Make sure iterative dfs recording matches old recursive version
+//#define TEST_DFS
+
+static BasicBlock* NeedsVisit(BasicBlock* bb) {
+ if (bb != NULL) {
+ if (bb->visited || bb->hidden) {
+ bb = NULL;
+ }
+ }
+ return bb;
+}
+
+static BasicBlock* NextUnvisitedSuccessor(BasicBlock* bb)
+{
+ BasicBlock* res = NeedsVisit(bb->fall_through);
+ if (res == NULL) {
+ res = NeedsVisit(bb->taken);
+ if (res == NULL) {
+ if (bb->successor_block_list.block_list_type != kNotUsed) {
+ GrowableListIterator iterator;
+ GrowableListIteratorInit(&bb->successor_block_list.blocks,
+ &iterator);
+ while (true) {
+ SuccessorBlockInfo *sbi = reinterpret_cast<SuccessorBlockInfo*>
+ (GrowableListIteratorNext(&iterator));
+ if (sbi == NULL) break;
+ res = NeedsVisit(sbi->block);
+ if (res != NULL) break;
+ }
+ }
+ }
+ }
+ return res;
+}
+
+static void MarkPreOrder(CompilationUnit* cu, BasicBlock* block)
+{
+ block->visited = true;
+ /* Enqueue the pre_order block id */
+ InsertGrowableList(cu, &cu->dfs_order, block->id);
+}
+
+static void RecordDFSOrders(CompilationUnit* cu, BasicBlock* block)
+{
+ std::vector<BasicBlock*> succ;
+ MarkPreOrder(cu, block);
+ succ.push_back(block);
+ while (!succ.empty()) {
+ BasicBlock* curr = succ.back();
+ BasicBlock* next_successor = NextUnvisitedSuccessor(curr);
+ if (next_successor != NULL) {
+ MarkPreOrder(cu, next_successor);
+ succ.push_back(next_successor);
+ continue;
+ }
+ curr->dfs_id = cu->dfs_post_order.num_used;
+ InsertGrowableList(cu, &cu->dfs_post_order, curr->id);
+ succ.pop_back();
+ }
+}
+
+#if defined(TEST_DFS)
+/* Enter the node to the dfs_order list then visit its successors */
+static void RecursiveRecordDFSOrders(CompilationUnit* cu, BasicBlock* block)
+{
+
+ if (block->visited || block->hidden) return;
+ block->visited = true;
+
+ // Can this block be reached only via previous block fallthrough?
+ if ((block->block_type == kDalvikByteCode) &&
+ (block->predecessors->num_used == 1)) {
+ DCHECK_GE(cu->dfs_order.num_used, 1U);
+ int prev_idx = cu->dfs_order.num_used - 1;
+ int prev_id = cu->dfs_order.elem_list[prev_idx];
+ BasicBlock* pred_bb = (BasicBlock*)block->predecessors->elem_list[0];
+ }
+
+ /* Enqueue the pre_order block id */
+ InsertGrowableList(cu, &cu->dfs_order, block->id);
+
+ if (block->fall_through) {
+ RecursiveRecordDFSOrders(cu, block->fall_through);
+ }
+ if (block->taken) RecursiveRecordDFSOrders(cu, block->taken);
+ if (block->successor_block_list.block_list_type != kNotUsed) {
+ GrowableListIterator iterator;
+ GrowableListIteratorInit(&block->successor_block_list.blocks,
+ &iterator);
+ while (true) {
+ SuccessorBlockInfo *successor_block_info =
+ (SuccessorBlockInfo *) GrowableListIteratorNext(&iterator);
+ if (successor_block_info == NULL) break;
+ BasicBlock* succ_bb = successor_block_info->block;
+ RecursiveRecordDFSOrders(cu, succ_bb);
+ }
+ }
+
+ /* Record postorder in basic block and enqueue normal id in dfs_post_order */
+ block->dfs_id = cu->dfs_post_order.num_used;
+ InsertGrowableList(cu, &cu->dfs_post_order, block->id);
+ return;
+}
+#endif
+
+/* Sort the blocks by the Depth-First-Search */
+static void ComputeDFSOrders(CompilationUnit* cu)
+{
+ /* Initialize or reset the DFS pre_order list */
+ if (cu->dfs_order.elem_list == NULL) {
+ CompilerInitGrowableList(cu, &cu->dfs_order, cu->num_blocks,
+ kListDfsOrder);
+ } else {
+ /* Just reset the used length on the counter */
+ cu->dfs_order.num_used = 0;
+ }
+
+ /* Initialize or reset the DFS post_order list */
+ if (cu->dfs_post_order.elem_list == NULL) {
+ CompilerInitGrowableList(cu, &cu->dfs_post_order, cu->num_blocks,
+ kListDfsPostOrder);
+ } else {
+ /* Just reset the used length on the counter */
+ cu->dfs_post_order.num_used = 0;
+ }
+
+#if defined(TEST_DFS)
+ // Reset visited flags
+ DataFlowAnalysisDispatcher(cu, ClearVisitedFlag,
+ kAllNodes, false /* is_iterative */);
+ // Record pre and post order dfs
+ RecursiveRecordDFSOrders(cu, cu->entry_block);
+ // Copy the results for later comparison and reset the lists
+ GrowableList recursive_dfs_order;
+ GrowableList recursive_dfs_post_order;
+ CompilerInitGrowableList(cu, &recursive_dfs_order, cu->dfs_order.num_used,
+ kListDfsOrder);
+ for (unsigned int i = 0; i < cu->dfs_order.num_used; i++) {
+ InsertGrowableList(cu, &recursive_dfs_order,
+ cu->dfs_order.elem_list[i]);
+ }
+ cu->dfs_order.num_used = 0;
+ CompilerInitGrowableList(cu, &recursive_dfs_post_order,
+ cu->dfs_post_order.num_used, kListDfsOrder);
+ for (unsigned int i = 0; i < cu->dfs_post_order.num_used; i++) {
+ InsertGrowableList(cu, &recursive_dfs_post_order,
+ cu->dfs_post_order.elem_list[i]);
+ }
+ cu->dfs_post_order.num_used = 0;
+#endif
+
+ // Reset visited flags from all nodes
+ DataFlowAnalysisDispatcher(cu, ClearVisitedFlag,
+ kAllNodes, false /* is_iterative */);
+ // Record dfs orders
+ RecordDFSOrders(cu, cu->entry_block);
+
+#if defined(TEST_DFS)
+ bool mismatch = false;
+ mismatch |= (cu->dfs_order.num_used != recursive_dfs_order.num_used);
+ for (unsigned int i = 0; i < cu->dfs_order.num_used; i++) {
+ mismatch |= (cu->dfs_order.elem_list[i] !=
+ recursive_dfs_order.elem_list[i]);
+ }
+ mismatch |= (cu->dfs_post_order.num_used != recursive_dfs_post_order.num_used);
+ for (unsigned int i = 0; i < cu->dfs_post_order.num_used; i++) {
+ mismatch |= (cu->dfs_post_order.elem_list[i] !=
+ recursive_dfs_post_order.elem_list[i]);
+ }
+ if (mismatch) {
+ LOG(INFO) << "Mismatch for "
+ << PrettyMethod(cu->method_idx, *cu->dex_file);
+ LOG(INFO) << "New dfs";
+ for (unsigned int i = 0; i < cu->dfs_order.num_used; i++) {
+ LOG(INFO) << i << " - " << cu->dfs_order.elem_list[i];
+ }
+ LOG(INFO) << "Recursive dfs";
+ for (unsigned int i = 0; i < recursive_dfs_order.num_used; i++) {
+ LOG(INFO) << i << " - " << recursive_dfs_order.elem_list[i];
+ }
+ LOG(INFO) << "New post dfs";
+ for (unsigned int i = 0; i < cu->dfs_post_order.num_used; i++) {
+ LOG(INFO) << i << " - " << cu->dfs_post_order.elem_list[i];
+ }
+ LOG(INFO) << "Recursive post dfs";
+ for (unsigned int i = 0; i < recursive_dfs_post_order.num_used; i++) {
+ LOG(INFO) << i << " - " << recursive_dfs_post_order.elem_list[i];
+ }
+ }
+ CHECK_EQ(cu->dfs_order.num_used, recursive_dfs_order.num_used);
+ for (unsigned int i = 0; i < cu->dfs_order.num_used; i++) {
+ CHECK_EQ(cu->dfs_order.elem_list[i], recursive_dfs_order.elem_list[i]);
+ }
+ CHECK_EQ(cu->dfs_post_order.num_used, recursive_dfs_post_order.num_used);
+ for (unsigned int i = 0; i < cu->dfs_post_order.num_used; i++) {
+ CHECK_EQ(cu->dfs_post_order.elem_list[i],
+ recursive_dfs_post_order.elem_list[i]);
+ }
+#endif
+
+ cu->num_reachable_blocks = cu->dfs_order.num_used;
+}
+
+/*
+ * Mark block bit on the per-Dalvik register vector to denote that Dalvik
+ * register idx is defined in BasicBlock bb.
+ */
+static bool FillDefBlockMatrix(CompilationUnit* cu, BasicBlock* bb)
+{
+ if (bb->data_flow_info == NULL) return false;
+
+ ArenaBitVectorIterator iterator;
+
+ BitVectorIteratorInit(bb->data_flow_info->def_v, &iterator);
+ while (true) {
+ int idx = BitVectorIteratorNext(&iterator);
+ if (idx == -1) break;
+ /* Block bb defines register idx */
+ SetBit(cu, cu->def_block_matrix[idx], bb->id);
+ }
+ return true;
+}
+
+static void ComputeDefBlockMatrix(CompilationUnit* cu)
+{
+ int num_registers = cu->num_dalvik_registers;
+ /* Allocate num_dalvik_registers bit vector pointers */
+ cu->def_block_matrix = static_cast<ArenaBitVector**>
+ (NewMem(cu, sizeof(ArenaBitVector *) * num_registers, true, kAllocDFInfo));
+ int i;
+
+ /* Initialize num_register vectors with num_blocks bits each */
+ for (i = 0; i < num_registers; i++) {
+ cu->def_block_matrix[i] = AllocBitVector(cu, cu->num_blocks,
+ false, kBitMapBMatrix);
+ }
+ DataFlowAnalysisDispatcher(cu, FindLocalLiveIn,
+ kAllNodes, false /* is_iterative */);
+ DataFlowAnalysisDispatcher(cu, FillDefBlockMatrix,
+ kAllNodes, false /* is_iterative */);
+
+ /*
+ * Also set the incoming parameters as defs in the entry block.
+ * Only need to handle the parameters for the outer method.
+ */
+ int num_regs = cu->num_dalvik_registers;
+ int in_reg = num_regs - cu->num_ins;
+ for (; in_reg < num_regs; in_reg++) {
+ SetBit(cu, cu->def_block_matrix[in_reg], cu->entry_block->id);
+ }
+}
+
+/* Compute the post-order traversal of the CFG */
+static void ComputeDomPostOrderTraversal(CompilationUnit* cu, BasicBlock* bb)
+{
+ ArenaBitVectorIterator bv_iterator;
+ BitVectorIteratorInit(bb->i_dominated, &bv_iterator);
+ GrowableList* block_list = &cu->block_list;
+
+ /* Iterate through the dominated blocks first */
+ while (true) {
+ //TUNING: hot call to BitVectorIteratorNext
+ int bb_idx = BitVectorIteratorNext(&bv_iterator);
+ if (bb_idx == -1) break;
+ BasicBlock* dominated_bb =
+ reinterpret_cast<BasicBlock*>(GrowableListGetElement(block_list, bb_idx));
+ ComputeDomPostOrderTraversal(cu, dominated_bb);
+ }
+
+ /* Enter the current block id */
+ InsertGrowableList(cu, &cu->dom_post_order_traversal, bb->id);
+
+ /* hacky loop detection */
+ if (bb->taken && IsBitSet(bb->dominators, bb->taken->id)) {
+ cu->has_loop = true;
+ }
+}
+
+static void CheckForDominanceFrontier(CompilationUnit* cu, BasicBlock* dom_bb,
+ const BasicBlock* succ_bb)
+{
+ /*
+ * TODO - evaluate whether phi will ever need to be inserted into exit
+ * blocks.
+ */
+ if (succ_bb->i_dom != dom_bb &&
+ succ_bb->block_type == kDalvikByteCode &&
+ succ_bb->hidden == false) {
+ SetBit(cu, dom_bb->dom_frontier, succ_bb->id);
+ }
+}
+
+/* Worker function to compute the dominance frontier */
+static bool ComputeDominanceFrontier(CompilationUnit* cu, BasicBlock* bb)
+{
+ GrowableList* block_list = &cu->block_list;
+
+ /* Calculate DF_local */
+ if (bb->taken) {
+ CheckForDominanceFrontier(cu, bb, bb->taken);
+ }
+ if (bb->fall_through) {
+ CheckForDominanceFrontier(cu, bb, bb->fall_through);
+ }
+ if (bb->successor_block_list.block_list_type != kNotUsed) {
+ GrowableListIterator iterator;
+ GrowableListIteratorInit(&bb->successor_block_list.blocks,
+ &iterator);
+ while (true) {
+ SuccessorBlockInfo *successor_block_info =
+ reinterpret_cast<SuccessorBlockInfo*>(GrowableListIteratorNext(&iterator));
+ if (successor_block_info == NULL) break;
+ BasicBlock* succ_bb = successor_block_info->block;
+ CheckForDominanceFrontier(cu, bb, succ_bb);
+ }
+ }
+
+ /* Calculate DF_up */
+ ArenaBitVectorIterator bv_iterator;
+ BitVectorIteratorInit(bb->i_dominated, &bv_iterator);
+ while (true) {
+ //TUNING: hot call to BitVectorIteratorNext
+ int dominated_idx = BitVectorIteratorNext(&bv_iterator);
+ if (dominated_idx == -1) break;
+ BasicBlock* dominated_bb =
+ reinterpret_cast<BasicBlock*>(GrowableListGetElement(block_list, dominated_idx));
+ ArenaBitVectorIterator df_iterator;
+ BitVectorIteratorInit(dominated_bb->dom_frontier, &df_iterator);
+ while (true) {
+ //TUNING: hot call to BitVectorIteratorNext
+ int df_up_idx = BitVectorIteratorNext(&df_iterator);
+ if (df_up_idx == -1) break;
+ BasicBlock* df_up_block =
+ reinterpret_cast<BasicBlock*>( GrowableListGetElement(block_list, df_up_idx));
+ CheckForDominanceFrontier(cu, bb, df_up_block);
+ }
+ }
+
+ return true;
+}
+
+/* Worker function for initializing domination-related data structures */
+static bool InitializeDominationInfo(CompilationUnit* cu, BasicBlock* bb)
+{
+ int num_total_blocks = cu->block_list.num_used;
+
+ if (bb->dominators == NULL ) {
+ bb->dominators = AllocBitVector(cu, num_total_blocks,
+ false /* expandable */,
+ kBitMapDominators);
+ bb->i_dominated = AllocBitVector(cu, num_total_blocks,
+ false /* expandable */,
+ kBitMapIDominated);
+ bb->dom_frontier = AllocBitVector(cu, num_total_blocks,
+ false /* expandable */,
+ kBitMapDomFrontier);
+ } else {
+ ClearAllBits(bb->dominators);
+ ClearAllBits(bb->i_dominated);
+ ClearAllBits(bb->dom_frontier);
+ }
+ /* Set all bits in the dominator vector */
+ SetInitialBits(bb->dominators, num_total_blocks);
+
+ return true;
+}
+
+/*
+ * Worker function to compute each block's dominators. This implementation
+ * is only used when kDebugVerifyDataflow is active and should compute
+ * the same dominator sets as ComputeBlockDominiators.
+ */
+static bool SlowComputeBlockDominators(CompilationUnit* cu, BasicBlock* bb)
+{
+ GrowableList* block_list = &cu->block_list;
+ int num_total_blocks = block_list->num_used;
+ ArenaBitVector* temp_block_v = cu->temp_block_v;
+ GrowableListIterator iter;
+
+ /*
+ * The dominator of the entry block has been preset to itself and we need
+ * to skip the calculation here.
+ */
+ if (bb == cu->entry_block) return false;
+
+ SetInitialBits(temp_block_v, num_total_blocks);
+
+ /* Iterate through the predecessors */
+ GrowableListIteratorInit(bb->predecessors, &iter);
+ while (true) {
+ BasicBlock* pred_bb = reinterpret_cast<BasicBlock*>(GrowableListIteratorNext(&iter));
+ if (!pred_bb) break;
+ /* temp_block_v = temp_block_v ^ dominators */
+ if (pred_bb->dominators != NULL) {
+ IntersectBitVectors(temp_block_v, temp_block_v, pred_bb->dominators);
+ }
+ }
+ SetBit(cu, temp_block_v, bb->id);
+ if (CompareBitVectors(temp_block_v, bb->dominators)) {
+ CopyBitVector(bb->dominators, temp_block_v);
+ return true;
+ }
+ return false;
+}
+
+/*
+ * Worker function to compute the idom. This implementation is only
+ * used when kDebugVerifyDataflow is active and should compute the
+ * same i_dom as ComputeblockIDom.
+ */
+static bool SlowComputeBlockIDom(CompilationUnit* cu, BasicBlock* bb)
+{
+ GrowableList* block_list = &cu->block_list;
+ ArenaBitVector* temp_block_v = cu->temp_block_v;
+ ArenaBitVectorIterator bv_iterator;
+ BasicBlock* i_dom;
+
+ if (bb == cu->entry_block) return false;
+
+ CopyBitVector(temp_block_v, bb->dominators);
+ ClearBit(temp_block_v, bb->id);
+ BitVectorIteratorInit(temp_block_v, &bv_iterator);
+
+ /* Should not see any dead block */
+ DCHECK_NE(CountSetBits(temp_block_v), 0);
+ if (CountSetBits(temp_block_v) == 1) {
+ i_dom = reinterpret_cast<BasicBlock*>
+ (GrowableListGetElement(block_list, BitVectorIteratorNext(&bv_iterator)));
+ bb->i_dom = i_dom;
+ } else {
+ int i_dom_idx = BitVectorIteratorNext(&bv_iterator);
+ DCHECK_NE(i_dom_idx, -1);
+ while (true) {
+ int next_dom = BitVectorIteratorNext(&bv_iterator);
+ if (next_dom == -1) break;
+ BasicBlock* next_dom_bb =
+ reinterpret_cast<BasicBlock*>(GrowableListGetElement(block_list, next_dom));
+ /* i_dom dominates next_dom - set new i_dom */
+ if (IsBitSet(next_dom_bb->dominators, i_dom_idx)) {
+ i_dom_idx = next_dom;
+ }
+
+ }
+ i_dom = reinterpret_cast<BasicBlock*>(GrowableListGetElement(block_list, i_dom_idx));
+ /* Set the immediate dominator block for bb */
+ bb->i_dom = i_dom;
+ }
+ /* Add bb to the i_dominated set of the immediate dominator block */
+ SetBit(cu, i_dom->i_dominated, bb->id);
+ return true;
+}
+
+/*
+ * Walk through the ordered i_dom list until we reach common parent.
+ * Given the ordering of i_dom_list, this common parent represents the
+ * last element of the intersection of block1 and block2 dominators.
+ */
+static int FindCommonParent(CompilationUnit *cu, int block1, int block2)
+{
+ while (block1 != block2) {
+ while (block1 < block2) {
+ block1 = cu->i_dom_list[block1];
+ DCHECK_NE(block1, NOTVISITED);
+ }
+ while (block2 < block1) {
+ block2 = cu->i_dom_list[block2];
+ DCHECK_NE(block2, NOTVISITED);
+ }
+ }
+ return block1;
+}
+
+/* Worker function to compute each block's immediate dominator */
+static bool ComputeblockIDom(CompilationUnit* cu, BasicBlock* bb)
+{
+ GrowableListIterator iter;
+ int idom = -1;
+
+ /* Special-case entry block */
+ if (bb == cu->entry_block) {
+ return false;
+ }
+
+ /* Iterate through the predecessors */
+ GrowableListIteratorInit(bb->predecessors, &iter);
+
+ /* Find the first processed predecessor */
+ while (true) {
+ BasicBlock* pred_bb = reinterpret_cast<BasicBlock*>(GrowableListIteratorNext(&iter));
+ CHECK(pred_bb != NULL);
+ if (cu->i_dom_list[pred_bb->dfs_id] != NOTVISITED) {
+ idom = pred_bb->dfs_id;
+ break;
+ }
+ }
+
+ /* Scan the rest of the predecessors */
+ while (true) {
+ BasicBlock* pred_bb = reinterpret_cast<BasicBlock*>(GrowableListIteratorNext(&iter));
+ if (!pred_bb) break;
+ if (cu->i_dom_list[pred_bb->dfs_id] == NOTVISITED) {
+ continue;
+ } else {
+ idom = FindCommonParent(cu, pred_bb->dfs_id, idom);
+ }
+ }
+
+ DCHECK_NE(idom, NOTVISITED);
+
+ /* Did something change? */
+ if (cu->i_dom_list[bb->dfs_id] != idom) {
+ cu->i_dom_list[bb->dfs_id] = idom;
+ return true;
+ }
+ return false;
+}
+
+/* Worker function to compute each block's domintors */
+static bool ComputeBlockDominiators(CompilationUnit* cu, BasicBlock* bb)
+{
+ if (bb == cu->entry_block) {
+ ClearAllBits(bb->dominators);
+ } else {
+ CopyBitVector(bb->dominators, bb->i_dom->dominators);
+ }
+ SetBit(cu, bb->dominators, bb->id);
+ return false;
+}
+
+static bool SetDominators(CompilationUnit* cu, BasicBlock* bb)
+{
+ if (bb != cu->entry_block) {
+ int idom_dfs_idx = cu->i_dom_list[bb->dfs_id];
+ DCHECK_NE(idom_dfs_idx, NOTVISITED);
+ int i_dom_idx = cu->dfs_post_order.elem_list[idom_dfs_idx];
+ BasicBlock* i_dom =
+ reinterpret_cast<BasicBlock*>(GrowableListGetElement(&cu->block_list, i_dom_idx));
+ if (cu->enable_debug & (1 << kDebugVerifyDataflow)) {
+ DCHECK_EQ(bb->i_dom->id, i_dom->id);
+ }
+ bb->i_dom = i_dom;
+ /* Add bb to the i_dominated set of the immediate dominator block */
+ SetBit(cu, i_dom->i_dominated, bb->id);
+ }
+ return false;
+}
+
+/* Compute dominators, immediate dominator, and dominance fronter */
+static void ComputeDominators(CompilationUnit* cu)
+{
+ int num_reachable_blocks = cu->num_reachable_blocks;
+ int num_total_blocks = cu->block_list.num_used;
+
+ /* Initialize domination-related data structures */
+ DataFlowAnalysisDispatcher(cu, InitializeDominationInfo,
+ kReachableNodes, false /* is_iterative */);
+
+ /* Initalize & Clear i_dom_list */
+ if (cu->i_dom_list == NULL) {
+ cu->i_dom_list = static_cast<int*>(NewMem(cu, sizeof(int) * num_reachable_blocks,
+ false, kAllocDFInfo));
+ }
+ for (int i = 0; i < num_reachable_blocks; i++) {
+ cu->i_dom_list[i] = NOTVISITED;
+ }
+
+ /* For post-order, last block is entry block. Set its i_dom to istelf */
+ DCHECK_EQ(cu->entry_block->dfs_id, num_reachable_blocks-1);
+ cu->i_dom_list[cu->entry_block->dfs_id] = cu->entry_block->dfs_id;
+
+ /* Compute the immediate dominators */
+ DataFlowAnalysisDispatcher(cu, ComputeblockIDom,
+ kReversePostOrderTraversal,
+ true /* is_iterative */);
+
+ /* Set the dominator for the root node */
+ ClearAllBits(cu->entry_block->dominators);
+ SetBit(cu, cu->entry_block->dominators, cu->entry_block->id);
+
+ if (cu->temp_block_v == NULL) {
+ cu->temp_block_v = AllocBitVector(cu, num_total_blocks,
+ false /* expandable */,
+ kBitMapTmpBlockV);
+ } else {
+ ClearAllBits(cu->temp_block_v);
+ }
+ cu->entry_block->i_dom = NULL;
+
+ /* For testing, compute sets using alternate mechanism */
+ if (cu->enable_debug & (1 << kDebugVerifyDataflow)) {
+ // Use alternate mechanism to compute dominators for comparison
+ DataFlowAnalysisDispatcher(cu, SlowComputeBlockDominators,
+ kPreOrderDFSTraversal,
+ true /* is_iterative */);
+
+ DataFlowAnalysisDispatcher(cu, SlowComputeBlockIDom,
+ kReachableNodes,
+ false /* is_iterative */);
+ }
+
+ DataFlowAnalysisDispatcher(cu, SetDominators,
+ kReachableNodes,
+ false /* is_iterative */);
+
+ DataFlowAnalysisDispatcher(cu, ComputeBlockDominiators,
+ kReversePostOrderTraversal,
+ false /* is_iterative */);
+
+ /*
+ * Now go ahead and compute the post order traversal based on the
+ * i_dominated sets.
+ */
+ if (cu->dom_post_order_traversal.elem_list == NULL) {
+ CompilerInitGrowableList(cu, &cu->dom_post_order_traversal,
+ num_reachable_blocks, kListDomPostOrderTraversal);
+ } else {
+ cu->dom_post_order_traversal.num_used = 0;
+ }
+
+ ComputeDomPostOrderTraversal(cu, cu->entry_block);
+ DCHECK_EQ(cu->dom_post_order_traversal.num_used, static_cast<unsigned>(cu->num_reachable_blocks));
+
+ /* Now compute the dominance frontier for each block */
+ DataFlowAnalysisDispatcher(cu, ComputeDominanceFrontier,
+ kPostOrderDOMTraversal,
+ false /* is_iterative */);
+}
+
+/*
+ * Perform dest U= src1 ^ ~src2
+ * This is probably not general enough to be placed in BitVector.[ch].
+ */
+static void ComputeSuccLineIn(ArenaBitVector* dest, const ArenaBitVector* src1,
+ const ArenaBitVector* src2)
+{
+ if (dest->storage_size != src1->storage_size ||
+ dest->storage_size != src2->storage_size ||
+ dest->expandable != src1->expandable ||
+ dest->expandable != src2->expandable) {
+ LOG(FATAL) << "Incompatible set properties";
+ }
+
+ unsigned int idx;
+ for (idx = 0; idx < dest->storage_size; idx++) {
+ dest->storage[idx] |= src1->storage[idx] & ~src2->storage[idx];
+ }
+}
+
+/*
+ * Iterate through all successor blocks and propagate up the live-in sets.
+ * The calculated result is used for phi-node pruning - where we only need to
+ * insert a phi node if the variable is live-in to the block.
+ */
+static bool ComputeBlockLiveIns(CompilationUnit* cu, BasicBlock* bb)
+{
+ ArenaBitVector* temp_dalvik_register_v = cu->temp_dalvik_register_v;
+
+ if (bb->data_flow_info == NULL) return false;
+ CopyBitVector(temp_dalvik_register_v, bb->data_flow_info->live_in_v);
+ if (bb->taken && bb->taken->data_flow_info)
+ ComputeSuccLineIn(temp_dalvik_register_v, bb->taken->data_flow_info->live_in_v,
+ bb->data_flow_info->def_v);
+ if (bb->fall_through && bb->fall_through->data_flow_info)
+ ComputeSuccLineIn(temp_dalvik_register_v,
+ bb->fall_through->data_flow_info->live_in_v,
+ bb->data_flow_info->def_v);
+ if (bb->successor_block_list.block_list_type != kNotUsed) {
+ GrowableListIterator iterator;
+ GrowableListIteratorInit(&bb->successor_block_list.blocks,
+ &iterator);
+ while (true) {
+ SuccessorBlockInfo *successor_block_info =
+ reinterpret_cast<SuccessorBlockInfo*>(GrowableListIteratorNext(&iterator));
+ if (successor_block_info == NULL) break;
+ BasicBlock* succ_bb = successor_block_info->block;
+ if (succ_bb->data_flow_info) {
+ ComputeSuccLineIn(temp_dalvik_register_v,
+ succ_bb->data_flow_info->live_in_v,
+ bb->data_flow_info->def_v);
+ }
+ }
+ }
+ if (CompareBitVectors(temp_dalvik_register_v, bb->data_flow_info->live_in_v)) {
+ CopyBitVector(bb->data_flow_info->live_in_v, temp_dalvik_register_v);
+ return true;
+ }
+ return false;
+}
+
+/* Insert phi nodes to for each variable to the dominance frontiers */
+static void InsertPhiNodes(CompilationUnit* cu)
+{
+ int dalvik_reg;
+ const GrowableList* block_list = &cu->block_list;
+ ArenaBitVector* phi_blocks =
+ AllocBitVector(cu, cu->num_blocks, false, kBitMapPhi);
+ ArenaBitVector* tmp_blocks =
+ AllocBitVector(cu, cu->num_blocks, false, kBitMapTmpBlocks);
+ ArenaBitVector* input_blocks =
+ AllocBitVector(cu, cu->num_blocks, false, kBitMapInputBlocks);
+
+ cu->temp_dalvik_register_v =
+ AllocBitVector(cu, cu->num_dalvik_registers, false,
+ kBitMapRegisterV);
+
+ DataFlowAnalysisDispatcher(cu, ComputeBlockLiveIns,
+ kPostOrderDFSTraversal, true /* is_iterative */);
+
+ /* Iterate through each Dalvik register */
+ for (dalvik_reg = cu->num_dalvik_registers - 1; dalvik_reg >= 0; dalvik_reg--) {
+ bool change;
+ ArenaBitVectorIterator iterator;
+
+ CopyBitVector(input_blocks, cu->def_block_matrix[dalvik_reg]);
+ ClearAllBits(phi_blocks);
+
+ /* Calculate the phi blocks for each Dalvik register */
+ do {
+ change = false;
+ ClearAllBits(tmp_blocks);
+ BitVectorIteratorInit(input_blocks, &iterator);
+
+ while (true) {
+ int idx = BitVectorIteratorNext(&iterator);
+ if (idx == -1) break;
+ BasicBlock* def_bb =
+ reinterpret_cast<BasicBlock*>(GrowableListGetElement(block_list, idx));
+
+ /* Merge the dominance frontier to tmp_blocks */
+ //TUNING: hot call to UnifyBitVetors
+ if (def_bb->dom_frontier != NULL) {
+ UnifyBitVetors(tmp_blocks, tmp_blocks, def_bb->dom_frontier);
+ }
+ }
+ if (CompareBitVectors(phi_blocks, tmp_blocks)) {
+ change = true;
+ CopyBitVector(phi_blocks, tmp_blocks);
+
+ /*
+ * Iterate through the original blocks plus the new ones in
+ * the dominance frontier.
+ */
+ CopyBitVector(input_blocks, phi_blocks);
+ UnifyBitVetors(input_blocks, input_blocks,
+ cu->def_block_matrix[dalvik_reg]);
+ }
+ } while (change);
+
+ /*
+ * Insert a phi node for dalvik_reg in the phi_blocks if the Dalvik
+ * register is in the live-in set.
+ */
+ BitVectorIteratorInit(phi_blocks, &iterator);
+ while (true) {
+ int idx = BitVectorIteratorNext(&iterator);
+ if (idx == -1) break;
+ BasicBlock* phi_bb =
+ reinterpret_cast<BasicBlock*>(GrowableListGetElement(block_list, idx));
+ /* Variable will be clobbered before being used - no need for phi */
+ if (!IsBitSet(phi_bb->data_flow_info->live_in_v, dalvik_reg)) continue;
+ MIR *phi = static_cast<MIR*>(NewMem(cu, sizeof(MIR), true, kAllocDFInfo));
+ phi->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpPhi);
+ phi->dalvikInsn.vA = dalvik_reg;
+ phi->offset = phi_bb->start_offset;
+ PrependMIR(phi_bb, phi);
+ }
+ }
+}
+
+/*
+ * Worker function to insert phi-operands with latest SSA names from
+ * predecessor blocks
+ */
+static bool InsertPhiNodeOperands(CompilationUnit* cu, BasicBlock* bb)
+{
+ GrowableListIterator iter;
+ MIR *mir;
+ std::vector<int> uses;
+ std::vector<int> incoming_arc;
+
+ /* Phi nodes are at the beginning of each block */
+ for (mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+ if (mir->dalvikInsn.opcode != static_cast<Instruction::Code>(kMirOpPhi))
+ return true;
+ int ssa_reg = mir->ssa_rep->defs[0];
+ DCHECK_GE(ssa_reg, 0); // Shouldn't see compiler temps here
+ int v_reg = SRegToVReg(cu, ssa_reg);
+
+ uses.clear();
+ incoming_arc.clear();
+
+ /* Iterate through the predecessors */
+ GrowableListIteratorInit(bb->predecessors, &iter);
+ while (true) {
+ BasicBlock* pred_bb =
+ reinterpret_cast<BasicBlock*>(GrowableListIteratorNext(&iter));
+ if (!pred_bb) break;
+ int ssa_reg = pred_bb->data_flow_info->vreg_to_ssa_map[v_reg];
+ uses.push_back(ssa_reg);
+ incoming_arc.push_back(pred_bb->id);
+ }
+
+ /* Count the number of SSA registers for a Dalvik register */
+ int num_uses = uses.size();
+ mir->ssa_rep->num_uses = num_uses;
+ mir->ssa_rep->uses =
+ static_cast<int*>(NewMem(cu, sizeof(int) * num_uses, false, kAllocDFInfo));
+ mir->ssa_rep->fp_use =
+ static_cast<bool*>(NewMem(cu, sizeof(bool) * num_uses, true, kAllocDFInfo));
+ int* incoming =
+ static_cast<int*>(NewMem(cu, sizeof(int) * num_uses, false, kAllocDFInfo));
+ // TODO: Ugly, rework (but don't burden each MIR/LIR for Phi-only needs)
+ mir->dalvikInsn.vB = reinterpret_cast<uintptr_t>(incoming);
+
+ /* Set the uses array for the phi node */
+ int *use_ptr = mir->ssa_rep->uses;
+ for (int i = 0; i < num_uses; i++) {
+ *use_ptr++ = uses[i];
+ *incoming++ = incoming_arc[i];
+ }
+ }
+
+ return true;
+}
+
+static void DoDFSPreOrderSSARename(CompilationUnit* cu, BasicBlock* block)
+{
+
+ if (block->visited || block->hidden) return;
+ block->visited = true;
+
+ /* Process this block */
+ DoSSAConversion(cu, block);
+ int map_size = sizeof(int) * cu->num_dalvik_registers;
+
+ /* Save SSA map snapshot */
+ int* saved_ssa_map = static_cast<int*>(NewMem(cu, map_size, false, kAllocDalvikToSSAMap));
+ memcpy(saved_ssa_map, cu->vreg_to_ssa_map, map_size);
+
+ if (block->fall_through) {
+ DoDFSPreOrderSSARename(cu, block->fall_through);
+ /* Restore SSA map snapshot */
+ memcpy(cu->vreg_to_ssa_map, saved_ssa_map, map_size);
+ }
+ if (block->taken) {
+ DoDFSPreOrderSSARename(cu, block->taken);
+ /* Restore SSA map snapshot */
+ memcpy(cu->vreg_to_ssa_map, saved_ssa_map, map_size);
+ }
+ if (block->successor_block_list.block_list_type != kNotUsed) {
+ GrowableListIterator iterator;
+ GrowableListIteratorInit(&block->successor_block_list.blocks, &iterator);
+ while (true) {
+ SuccessorBlockInfo *successor_block_info =
+ reinterpret_cast<SuccessorBlockInfo*>(GrowableListIteratorNext(&iterator));
+ if (successor_block_info == NULL) break;
+ BasicBlock* succ_bb = successor_block_info->block;
+ DoDFSPreOrderSSARename(cu, succ_bb);
+ /* Restore SSA map snapshot */
+ memcpy(cu->vreg_to_ssa_map, saved_ssa_map, map_size);
+ }
+ }
+ cu->vreg_to_ssa_map = saved_ssa_map;
+ return;
+}
+
+/* Perform SSA transformation for the whole method */
+void SSATransformation(CompilationUnit* cu)
+{
+ /* Compute the DFS order */
+ ComputeDFSOrders(cu);
+
+ if (!cu->disable_dataflow) {
+ /* Compute the dominator info */
+ ComputeDominators(cu);
+ }
+
+ /* Allocate data structures in preparation for SSA conversion */
+ CompilerInitializeSSAConversion(cu);
+
+ if (!cu->disable_dataflow) {
+ /* Find out the "Dalvik reg def x block" relation */
+ ComputeDefBlockMatrix(cu);
+
+ /* Insert phi nodes to dominance frontiers for all variables */
+ InsertPhiNodes(cu);
+ }
+
+ /* Rename register names by local defs and phi nodes */
+ DataFlowAnalysisDispatcher(cu, ClearVisitedFlag,
+ kAllNodes, false /* is_iterative */);
+ DoDFSPreOrderSSARename(cu, cu->entry_block);
+
+ if (!cu->disable_dataflow) {
+ /*
+ * Shared temp bit vector used by each block to count the number of defs
+ * from all the predecessor blocks.
+ */
+ cu->temp_ssa_register_v = AllocBitVector(cu, cu->num_ssa_regs,
+ false, kBitMapTempSSARegisterV);
+
+ cu->temp_ssa_block_id_v =
+ static_cast<int*>(NewMem(cu, sizeof(int) * cu->num_ssa_regs, false, kAllocDFInfo));
+
+ /* Insert phi-operands with latest SSA names from predecessor blocks */
+ DataFlowAnalysisDispatcher(cu, InsertPhiNodeOperands,
+ kReachableNodes, false /* is_iterative */);
+ }
+}
+
+} // namespace art
diff --git a/src/compiler/dex/ssa_transformation.h b/src/compiler/dex/ssa_transformation.h
new file mode 100644
index 0000000..92f7c0e
--- /dev/null
+++ b/src/compiler/dex/ssa_transformation.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_COMPILER_DEX_SSATRANSFORMATION_H_
+#define ART_SRC_COMPILER_DEX_SSATRANSFORMATION_H_
+
+#include "compiler_internals.h"
+
+namespace art {
+
+void SSATransformation(CompilationUnit* cu);
+
+} // namespace art
+
+#endif // ART_SRC_COMPILER_DEX_DATAFLOW_H_
diff --git a/src/compiler/dex/write_elf.cc b/src/compiler/dex/write_elf.cc
new file mode 100644
index 0000000..1fd8a9492
--- /dev/null
+++ b/src/compiler/dex/write_elf.cc
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "compiler.h"
+#include "elf_writer.h"
+#include "os.h"
+
+extern "C" bool WriteElf(art::Compiler& compiler,
+ std::vector<uint8_t>& oat_contents,
+ art::File* file) {
+ return art::ElfWriter::Create(file, oat_contents, compiler);
+}
+extern "C" bool FixupElf(art::File* file, uintptr_t oat_data_begin) {
+ return art::ElfWriter::Fixup(file, oat_data_begin);
+}
+extern "C" void GetOatElfInformation(art::File* file,
+ size_t& oat_loaded_size,
+ size_t& oat_data_offset) {
+ art::ElfWriter::GetOatElfInformation(file, oat_loaded_size, oat_data_offset);
+}