Refactored SEA IR folder structure to be more logical.
Change-Id: I302c79af92c07cceb38dff36efe416bfa4869a91
diff --git a/compiler/sea_ir/ir/instruction_nodes.h b/compiler/sea_ir/ir/instruction_nodes.h
new file mode 100644
index 0000000..906a10f
--- /dev/null
+++ b/compiler/sea_ir/ir/instruction_nodes.h
@@ -0,0 +1,238 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_SEA_IR_IR_INSTRUCTION_NODES_H_
+#define ART_COMPILER_SEA_IR_IR_INSTRUCTION_NODES_H_
+#include "dex_instruction-inl.h"
+#include "sea_ir/ir/sea_node.h"
+#include "sea_ir/ir/visitor.h"
+
+
+namespace sea_ir {
+
+enum SpecialRegisters {
+ NO_REGISTER = -1, // Usually signifies that there is no register
+ // that respects the condition you asked for.
+ RETURN_REGISTER = -2, // Written by the invoke* instructions, read by move-results.
+ UNNAMED_CONST_REGISTER = -3 // Written by UnnamedConst* instructions, read by *Lit* instruction.
+};
+
+class IRVisitor;
+
+// This class represents an instruction in SEA IR.
+// As we add support for specific classes of instructions,
+// the number of InstructionNode objects should dwindle, while the
+// number of subclasses and instances of subclasses will go up.
+class InstructionNode: public SeaNode {
+ public:
+ static std::vector<sea_ir::InstructionNode*> Create(const art::Instruction* in);
+ // Returns the Dalvik instruction around which this InstructionNode is wrapped.
+ const art::Instruction* GetInstruction() const {
+ DCHECK(NULL != instruction_) << "Tried to access NULL instruction in an InstructionNode.";
+ return instruction_;
+ }
+ // Returns the register that is defined by the current instruction, or NO_REGISTER otherwise.
+ virtual int GetResultRegister() const;
+ // Returns the set of registers defined by the current instruction.
+ virtual std::vector<int> GetDefinitions() const;
+ // Returns the set of register numbers that are used by the instruction.
+ virtual std::vector<int> GetUses() const;
+ // Mark the current instruction as a downward exposed definition.
+ void MarkAsDEDef();
+ // Rename the use of @reg_no to refer to the instruction @definition,
+ // essentially creating SSA form.
+ void RenameToSSA(int reg_no, InstructionNode* definition) {
+ definition_edges_.insert(std::pair<int, InstructionNode*>(reg_no, definition));
+ definition->AddSSAUse(this);
+ }
+ // Returns the ordered set of Instructions that define the input operands of this instruction.
+ // Precondition: SeaGraph.ConvertToSSA().
+ virtual std::vector<InstructionNode*> GetSSAProducers() {
+ std::vector<int> uses = GetUses();
+ std::vector<InstructionNode*> ssa_uses;
+ for (std::vector<int>::const_iterator cit = uses.begin(); cit != uses.end(); cit++) {
+ ssa_uses.push_back((*definition_edges_.find(*cit)).second);
+ }
+ return ssa_uses;
+ }
+ std::map<int, InstructionNode* >* GetSSAProducersMap() {
+ return &definition_edges_;
+ }
+ std::vector<InstructionNode*>* GetSSAConsumers() {
+ return &used_in_;
+ }
+ virtual void AddSSAUse(InstructionNode* use) {
+ used_in_.push_back(use);
+ }
+ void Accept(IRVisitor* v) {
+ v->Visit(this);
+ v->Traverse(this);
+ }
+ // Set the region to which this instruction belongs.
+ Region* GetRegion() {
+ DCHECK(NULL != region_);
+ return region_;
+ }
+ // Get the region to which this instruction belongs.
+ void SetRegion(Region* region) {
+ region_ = region;
+ }
+
+ protected:
+ explicit InstructionNode(const art::Instruction* in):
+ SeaNode(), instruction_(in), used_in_(), de_def_(false), region_(NULL) { }
+
+ protected:
+ const art::Instruction* const instruction_;
+ std::map<int, InstructionNode* > definition_edges_; // Maps used registers to their definitions.
+ // Stores pointers to instructions that use the result of the current instruction.
+ std::vector<InstructionNode*> used_in_;
+ bool de_def_;
+ Region* region_;
+};
+
+class ConstInstructionNode: public InstructionNode {
+ public:
+ explicit ConstInstructionNode(const art::Instruction* inst):
+ InstructionNode(inst) { }
+
+ void Accept(IRVisitor* v) {
+ v->Visit(this);
+ v->Traverse(this);
+ }
+
+ virtual int32_t GetConstValue() const {
+ return GetInstruction()->VRegB_11n();
+ }
+};
+
+class UnnamedConstInstructionNode: public ConstInstructionNode {
+ public:
+ explicit UnnamedConstInstructionNode(const art::Instruction* inst, int32_t value):
+ ConstInstructionNode(inst), value_(value) { }
+
+ void Accept(IRVisitor* v) {
+ v->Visit(this);
+ v->Traverse(this);
+ }
+
+ int GetResultRegister() const {
+ return UNNAMED_CONST_REGISTER;
+ }
+
+ int32_t GetConstValue() const {
+ return value_;
+ }
+
+ private:
+ const int32_t value_;
+};
+
+class ReturnInstructionNode: public InstructionNode {
+ public:
+ explicit ReturnInstructionNode(const art::Instruction* inst): InstructionNode(inst) { }
+ void Accept(IRVisitor* v) {
+ v->Visit(this);
+ v->Traverse(this);
+ }
+};
+
+class IfNeInstructionNode: public InstructionNode {
+ public:
+ explicit IfNeInstructionNode(const art::Instruction* inst): InstructionNode(inst) {
+ DCHECK(InstructionTools::IsDefinition(inst) == false);
+ }
+ void Accept(IRVisitor* v) {
+ v->Visit(this);
+ v->Traverse(this);
+ }
+};
+
+
+
+class MoveResultInstructionNode: public InstructionNode {
+ public:
+ explicit MoveResultInstructionNode(const art::Instruction* inst): InstructionNode(inst) { }
+ std::vector<int> GetUses() const {
+ std::vector<int> uses; // Using vector<> instead of set<> because order matters.
+ uses.push_back(RETURN_REGISTER);
+ return uses;
+ }
+ void Accept(IRVisitor* v) {
+ v->Visit(this);
+ v->Traverse(this);
+ }
+};
+
+class InvokeStaticInstructionNode: public InstructionNode {
+ public:
+ explicit InvokeStaticInstructionNode(const art::Instruction* inst): InstructionNode(inst) { }
+ int GetResultRegister() const {
+ return RETURN_REGISTER;
+ }
+ void Accept(IRVisitor* v) {
+ v->Visit(this);
+ v->Traverse(this);
+ }
+};
+
+class AddIntInstructionNode: public InstructionNode {
+ public:
+ explicit AddIntInstructionNode(const art::Instruction* inst): InstructionNode(inst) { }
+ void Accept(IRVisitor* v) {
+ v->Visit(this);
+ v->Traverse(this);
+ }
+};
+
+class AddIntLitInstructionNode: public AddIntInstructionNode {
+ public:
+ explicit AddIntLitInstructionNode(const art::Instruction* inst):
+ AddIntInstructionNode(inst) { }
+
+ std::vector<int> GetUses() const {
+ std::vector<int> uses = AddIntInstructionNode::GetUses();
+ uses.push_back(UNNAMED_CONST_REGISTER);
+ return uses;
+ }
+
+ void Accept(IRVisitor* v) {
+ v->Visit(this);
+ v->Traverse(this);
+ }
+};
+
+class GotoInstructionNode: public InstructionNode {
+ public:
+ explicit GotoInstructionNode(const art::Instruction* inst): InstructionNode(inst) { }
+ void Accept(IRVisitor* v) {
+ v->Visit(this);
+ v->Traverse(this);
+ }
+};
+
+class IfEqzInstructionNode: public InstructionNode {
+ public:
+ explicit IfEqzInstructionNode(const art::Instruction* inst): InstructionNode(inst) {
+ DCHECK(InstructionTools::IsDefinition(inst) == false);
+ }
+ void Accept(IRVisitor* v) {
+ v->Visit(this);
+ v->Traverse(this);
+ }
+};
+} // namespace sea_ir
+#endif // ART_COMPILER_SEA_IR_IR_INSTRUCTION_NODES_H_
diff --git a/compiler/sea_ir/ir/instruction_tools.cc b/compiler/sea_ir/ir/instruction_tools.cc
new file mode 100644
index 0000000..143209d
--- /dev/null
+++ b/compiler/sea_ir/ir/instruction_tools.cc
@@ -0,0 +1,797 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "sea_ir/ir/instruction_tools.h"
+
+namespace sea_ir {
+
+bool InstructionTools::IsDefinition(const art::Instruction* const instruction) {
+ if (0 != (InstructionTools::instruction_attributes_[instruction->Opcode()] & (1 << kDA))) {
+ return true;
+ }
+ return false;
+}
+
+const int InstructionTools::instruction_attributes_[] = {
+ // 00 NOP
+ DF_NOP,
+
+ // 01 MOVE vA, vB
+ DF_DA | DF_UB | DF_IS_MOVE,
+
+ // 02 MOVE_FROM16 vAA, vBBBB
+ DF_DA | DF_UB | DF_IS_MOVE,
+
+ // 03 MOVE_16 vAAAA, vBBBB
+ DF_DA | DF_UB | DF_IS_MOVE,
+
+ // 04 MOVE_WIDE vA, vB
+ DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_IS_MOVE,
+
+ // 05 MOVE_WIDE_FROM16 vAA, vBBBB
+ DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_IS_MOVE,
+
+ // 06 MOVE_WIDE_16 vAAAA, vBBBB
+ DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_IS_MOVE,
+
+ // 07 MOVE_OBJECT vA, vB
+ DF_DA | DF_UB | DF_NULL_TRANSFER_0 | DF_IS_MOVE | DF_REF_A | DF_REF_B,
+
+ // 08 MOVE_OBJECT_FROM16 vAA, vBBBB
+ DF_DA | DF_UB | DF_NULL_TRANSFER_0 | DF_IS_MOVE | DF_REF_A | DF_REF_B,
+
+ // 09 MOVE_OBJECT_16 vAAAA, vBBBB
+ DF_DA | DF_UB | DF_NULL_TRANSFER_0 | DF_IS_MOVE | DF_REF_A | DF_REF_B,
+
+ // 0A MOVE_RESULT vAA
+ DF_DA,
+
+ // 0B MOVE_RESULT_WIDE vAA
+ DF_DA | DF_A_WIDE,
+
+ // 0C MOVE_RESULT_OBJECT vAA
+ DF_DA | DF_REF_A,
+
+ // 0D MOVE_EXCEPTION vAA
+ DF_DA | DF_REF_A | DF_NON_NULL_DST,
+
+ // 0E RETURN_VOID
+ DF_NOP,
+
+ // 0F RETURN vAA
+ DF_UA,
+
+ // 10 RETURN_WIDE vAA
+ DF_UA | DF_A_WIDE,
+
+ // 11 RETURN_OBJECT vAA
+ DF_UA | DF_REF_A,
+
+ // 12 CONST_4 vA, #+B
+ DF_DA | DF_SETS_CONST,
+
+ // 13 CONST_16 vAA, #+BBBB
+ DF_DA | DF_SETS_CONST,
+
+ // 14 CONST vAA, #+BBBBBBBB
+ DF_DA | DF_SETS_CONST,
+
+ // 15 CONST_HIGH16 VAA, #+BBBB0000
+ DF_DA | DF_SETS_CONST,
+
+ // 16 CONST_WIDE_16 vAA, #+BBBB
+ DF_DA | DF_A_WIDE | DF_SETS_CONST,
+
+ // 17 CONST_WIDE_32 vAA, #+BBBBBBBB
+ DF_DA | DF_A_WIDE | DF_SETS_CONST,
+
+ // 18 CONST_WIDE vAA, #+BBBBBBBBBBBBBBBB
+ DF_DA | DF_A_WIDE | DF_SETS_CONST,
+
+ // 19 CONST_WIDE_HIGH16 vAA, #+BBBB000000000000
+ DF_DA | DF_A_WIDE | DF_SETS_CONST,
+
+ // 1A CONST_STRING vAA, string@BBBB
+ DF_DA | DF_REF_A | DF_NON_NULL_DST,
+
+ // 1B CONST_STRING_JUMBO vAA, string@BBBBBBBB
+ DF_DA | DF_REF_A | DF_NON_NULL_DST,
+
+ // 1C CONST_CLASS vAA, type@BBBB
+ DF_DA | DF_REF_A | DF_NON_NULL_DST,
+
+ // 1D MONITOR_ENTER vAA
+ DF_UA | DF_NULL_CHK_0 | DF_REF_A,
+
+ // 1E MONITOR_EXIT vAA
+ DF_UA | DF_NULL_CHK_0 | DF_REF_A,
+
+ // 1F CHK_CAST vAA, type@BBBB
+ DF_UA | DF_REF_A | DF_UMS,
+
+ // 20 INSTANCE_OF vA, vB, type@CCCC
+ DF_DA | DF_UB | DF_CORE_A | DF_REF_B | DF_UMS,
+
+ // 21 ARRAY_LENGTH vA, vB
+ DF_DA | DF_UB | DF_NULL_CHK_0 | DF_CORE_A | DF_REF_B,
+
+ // 22 NEW_INSTANCE vAA, type@BBBB
+ DF_DA | DF_NON_NULL_DST | DF_REF_A | DF_UMS,
+
+ // 23 NEW_ARRAY vA, vB, type@CCCC
+ DF_DA | DF_UB | DF_NON_NULL_DST | DF_REF_A | DF_CORE_B | DF_UMS,
+
+ // 24 FILLED_NEW_ARRAY {vD, vE, vF, vG, vA}
+ DF_FORMAT_35C | DF_NON_NULL_RET | DF_UMS,
+
+ // 25 FILLED_NEW_ARRAY_RANGE {vCCCC .. vNNNN}, type@BBBB
+ DF_FORMAT_3RC | DF_NON_NULL_RET | DF_UMS,
+
+ // 26 FILL_ARRAY_DATA vAA, +BBBBBBBB
+ DF_UA | DF_REF_A | DF_UMS,
+
+ // 27 THROW vAA
+ DF_UA | DF_REF_A | DF_UMS,
+
+ // 28 GOTO
+ DF_NOP,
+
+ // 29 GOTO_16
+ DF_NOP,
+
+ // 2A GOTO_32
+ DF_NOP,
+
+ // 2B PACKED_SWITCH vAA, +BBBBBBBB
+ DF_UA,
+
+ // 2C SPARSE_SWITCH vAA, +BBBBBBBB
+ DF_UA,
+
+ // 2D CMPL_FLOAT vAA, vBB, vCC
+ DF_DA | DF_UB | DF_UC | DF_FP_B | DF_FP_C | DF_CORE_A,
+
+ // 2E CMPG_FLOAT vAA, vBB, vCC
+ DF_DA | DF_UB | DF_UC | DF_FP_B | DF_FP_C | DF_CORE_A,
+
+ // 2F CMPL_DOUBLE vAA, vBB, vCC
+ DF_DA | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_B | DF_FP_C | DF_CORE_A,
+
+ // 30 CMPG_DOUBLE vAA, vBB, vCC
+ DF_DA | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_B | DF_FP_C | DF_CORE_A,
+
+ // 31 CMP_LONG vAA, vBB, vCC
+ DF_DA | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+ // 32 IF_EQ vA, vB, +CCCC
+ DF_UA | DF_UB,
+
+ // 33 IF_NE vA, vB, +CCCC
+ DF_UA | DF_UB,
+
+ // 34 IF_LT vA, vB, +CCCC
+ DF_UA | DF_UB,
+
+ // 35 IF_GE vA, vB, +CCCC
+ DF_UA | DF_UB,
+
+ // 36 IF_GT vA, vB, +CCCC
+ DF_UA | DF_UB,
+
+ // 37 IF_LE vA, vB, +CCCC
+ DF_UA | DF_UB,
+
+ // 38 IF_EQZ vAA, +BBBB
+ DF_UA,
+
+ // 39 IF_NEZ vAA, +BBBB
+ DF_UA,
+
+ // 3A IF_LTZ vAA, +BBBB
+ DF_UA,
+
+ // 3B IF_GEZ vAA, +BBBB
+ DF_UA,
+
+ // 3C IF_GTZ vAA, +BBBB
+ DF_UA,
+
+ // 3D IF_LEZ vAA, +BBBB
+ DF_UA,
+
+ // 3E UNUSED_3E
+ DF_NOP,
+
+ // 3F UNUSED_3F
+ DF_NOP,
+
+ // 40 UNUSED_40
+ DF_NOP,
+
+ // 41 UNUSED_41
+ DF_NOP,
+
+ // 42 UNUSED_42
+ DF_NOP,
+
+ // 43 UNUSED_43
+ DF_NOP,
+
+ // 44 AGET vAA, vBB, vCC
+ DF_DA | DF_UB | DF_UC | DF_NULL_CHK_0 | DF_RANGE_CHK_1 | DF_REF_B | DF_CORE_C,
+
+ // 45 AGET_WIDE vAA, vBB, vCC
+ DF_DA | DF_A_WIDE | DF_UB | DF_UC | DF_NULL_CHK_0 | DF_RANGE_CHK_1 | DF_REF_B | DF_CORE_C,
+
+ // 46 AGET_OBJECT vAA, vBB, vCC
+ DF_DA | DF_UB | DF_UC | DF_NULL_CHK_0 | DF_RANGE_CHK_1 | DF_REF_A | DF_REF_B | DF_CORE_C,
+
+ // 47 AGET_BOOLEAN vAA, vBB, vCC
+ DF_DA | DF_UB | DF_UC | DF_NULL_CHK_0 | DF_RANGE_CHK_1 | DF_REF_B | DF_CORE_C,
+
+ // 48 AGET_BYTE vAA, vBB, vCC
+ DF_DA | DF_UB | DF_UC | DF_NULL_CHK_0 | DF_RANGE_CHK_1 | DF_REF_B | DF_CORE_C,
+
+ // 49 AGET_CHAR vAA, vBB, vCC
+ DF_DA | DF_UB | DF_UC | DF_NULL_CHK_0 | DF_RANGE_CHK_1 | DF_REF_B | DF_CORE_C,
+
+ // 4A AGET_SHORT vAA, vBB, vCC
+ DF_DA | DF_UB | DF_UC | DF_NULL_CHK_0 | DF_RANGE_CHK_1 | DF_REF_B | DF_CORE_C,
+
+ // 4B APUT vAA, vBB, vCC
+ DF_UA | DF_UB | DF_UC | DF_NULL_CHK_1 | DF_RANGE_CHK_2 | DF_REF_B | DF_CORE_C,
+
+ // 4C APUT_WIDE vAA, vBB, vCC
+ DF_UA | DF_A_WIDE | DF_UB | DF_UC | DF_NULL_CHK_2 | DF_RANGE_CHK_3 | DF_REF_B | DF_CORE_C,
+
+ // 4D APUT_OBJECT vAA, vBB, vCC
+ DF_UA | DF_UB | DF_UC | DF_NULL_CHK_1 | DF_RANGE_CHK_2 | DF_REF_A | DF_REF_B | DF_CORE_C,
+
+ // 4E APUT_BOOLEAN vAA, vBB, vCC
+ DF_UA | DF_UB | DF_UC | DF_NULL_CHK_1 | DF_RANGE_CHK_2 | DF_REF_B | DF_CORE_C,
+
+ // 4F APUT_BYTE vAA, vBB, vCC
+ DF_UA | DF_UB | DF_UC | DF_NULL_CHK_1 | DF_RANGE_CHK_2 | DF_REF_B | DF_CORE_C,
+
+ // 50 APUT_CHAR vAA, vBB, vCC
+ DF_UA | DF_UB | DF_UC | DF_NULL_CHK_1 | DF_RANGE_CHK_2 | DF_REF_B | DF_CORE_C,
+
+ // 51 APUT_SHORT vAA, vBB, vCC
+ DF_UA | DF_UB | DF_UC | DF_NULL_CHK_1 | DF_RANGE_CHK_2 | DF_REF_B | DF_CORE_C,
+
+ // 52 IGET vA, vB, field@CCCC
+ DF_DA | DF_UB | DF_NULL_CHK_0 | DF_REF_B,
+
+ // 53 IGET_WIDE vA, vB, field@CCCC
+ DF_DA | DF_A_WIDE | DF_UB | DF_NULL_CHK_0 | DF_REF_B,
+
+ // 54 IGET_OBJECT vA, vB, field@CCCC
+ DF_DA | DF_UB | DF_NULL_CHK_0 | DF_REF_A | DF_REF_B,
+
+ // 55 IGET_BOOLEAN vA, vB, field@CCCC
+ DF_DA | DF_UB | DF_NULL_CHK_0 | DF_REF_B,
+
+ // 56 IGET_BYTE vA, vB, field@CCCC
+ DF_DA | DF_UB | DF_NULL_CHK_0 | DF_REF_B,
+
+ // 57 IGET_CHAR vA, vB, field@CCCC
+ DF_DA | DF_UB | DF_NULL_CHK_0 | DF_REF_B,
+
+ // 58 IGET_SHORT vA, vB, field@CCCC
+ DF_DA | DF_UB | DF_NULL_CHK_0 | DF_REF_B,
+
+ // 59 IPUT vA, vB, field@CCCC
+ DF_UA | DF_UB | DF_NULL_CHK_1 | DF_REF_B,
+
+ // 5A IPUT_WIDE vA, vB, field@CCCC
+ DF_UA | DF_A_WIDE | DF_UB | DF_NULL_CHK_2 | DF_REF_B,
+
+ // 5B IPUT_OBJECT vA, vB, field@CCCC
+ DF_UA | DF_UB | DF_NULL_CHK_1 | DF_REF_A | DF_REF_B,
+
+ // 5C IPUT_BOOLEAN vA, vB, field@CCCC
+ DF_UA | DF_UB | DF_NULL_CHK_1 | DF_REF_B,
+
+ // 5D IPUT_BYTE vA, vB, field@CCCC
+ DF_UA | DF_UB | DF_NULL_CHK_1 | DF_REF_B,
+
+ // 5E IPUT_CHAR vA, vB, field@CCCC
+ DF_UA | DF_UB | DF_NULL_CHK_1 | DF_REF_B,
+
+ // 5F IPUT_SHORT vA, vB, field@CCCC
+ DF_UA | DF_UB | DF_NULL_CHK_1 | DF_REF_B,
+
+ // 60 SGET vAA, field@BBBB
+ DF_DA | DF_UMS,
+
+ // 61 SGET_WIDE vAA, field@BBBB
+ DF_DA | DF_A_WIDE | DF_UMS,
+
+ // 62 SGET_OBJECT vAA, field@BBBB
+ DF_DA | DF_REF_A | DF_UMS,
+
+ // 63 SGET_BOOLEAN vAA, field@BBBB
+ DF_DA | DF_UMS,
+
+ // 64 SGET_BYTE vAA, field@BBBB
+ DF_DA | DF_UMS,
+
+ // 65 SGET_CHAR vAA, field@BBBB
+ DF_DA | DF_UMS,
+
+ // 66 SGET_SHORT vAA, field@BBBB
+ DF_DA | DF_UMS,
+
+ // 67 SPUT vAA, field@BBBB
+ DF_UA | DF_UMS,
+
+ // 68 SPUT_WIDE vAA, field@BBBB
+ DF_UA | DF_A_WIDE | DF_UMS,
+
+ // 69 SPUT_OBJECT vAA, field@BBBB
+ DF_UA | DF_REF_A | DF_UMS,
+
+ // 6A SPUT_BOOLEAN vAA, field@BBBB
+ DF_UA | DF_UMS,
+
+ // 6B SPUT_BYTE vAA, field@BBBB
+ DF_UA | DF_UMS,
+
+ // 6C SPUT_CHAR vAA, field@BBBB
+ DF_UA | DF_UMS,
+
+ // 6D SPUT_SHORT vAA, field@BBBB
+ DF_UA | DF_UMS,
+
+ // 6E INVOKE_VIRTUAL {vD, vE, vF, vG, vA}
+ DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
+
+ // 6F INVOKE_SUPER {vD, vE, vF, vG, vA}
+ DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
+
+ // 70 INVOKE_DIRECT {vD, vE, vF, vG, vA}
+ DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
+
+ // 71 INVOKE_STATIC {vD, vE, vF, vG, vA}
+ DF_FORMAT_35C | DF_UMS,
+
+ // 72 INVOKE_INTERFACE {vD, vE, vF, vG, vA}
+ DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
+
+ // 73 UNUSED_73
+ DF_NOP,
+
+ // 74 INVOKE_VIRTUAL_RANGE {vCCCC .. vNNNN}
+ DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
+
+ // 75 INVOKE_SUPER_RANGE {vCCCC .. vNNNN}
+ DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
+
+ // 76 INVOKE_DIRECT_RANGE {vCCCC .. vNNNN}
+ DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
+
+ // 77 INVOKE_STATIC_RANGE {vCCCC .. vNNNN}
+ DF_FORMAT_3RC | DF_UMS,
+
+ // 78 INVOKE_INTERFACE_RANGE {vCCCC .. vNNNN}
+ DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
+
+ // 79 UNUSED_79
+ DF_NOP,
+
+ // 7A UNUSED_7A
+ DF_NOP,
+
+ // 7B NEG_INT vA, vB
+ DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+ // 7C NOT_INT vA, vB
+ DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+ // 7D NEG_LONG vA, vB
+ DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
+
+ // 7E NOT_LONG vA, vB
+ DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
+
+ // 7F NEG_FLOAT vA, vB
+ DF_DA | DF_UB | DF_FP_A | DF_FP_B,
+
+ // 80 NEG_DOUBLE vA, vB
+ DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
+
+ // 81 INT_TO_LONG vA, vB
+ DF_DA | DF_A_WIDE | DF_UB | DF_CORE_A | DF_CORE_B,
+
+ // 82 INT_TO_FLOAT vA, vB
+ DF_DA | DF_UB | DF_FP_A | DF_CORE_B,
+
+ // 83 INT_TO_DOUBLE vA, vB
+ DF_DA | DF_A_WIDE | DF_UB | DF_FP_A | DF_CORE_B,
+
+ // 84 LONG_TO_INT vA, vB
+ DF_DA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
+
+ // 85 LONG_TO_FLOAT vA, vB
+ DF_DA | DF_UB | DF_B_WIDE | DF_FP_A | DF_CORE_B,
+
+ // 86 LONG_TO_DOUBLE vA, vB
+ DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_FP_A | DF_CORE_B,
+
+ // 87 FLOAT_TO_INT vA, vB
+ DF_DA | DF_UB | DF_FP_B | DF_CORE_A,
+
+ // 88 FLOAT_TO_LONG vA, vB
+ DF_DA | DF_A_WIDE | DF_UB | DF_FP_B | DF_CORE_A,
+
+ // 89 FLOAT_TO_DOUBLE vA, vB
+ DF_DA | DF_A_WIDE | DF_UB | DF_FP_A | DF_FP_B,
+
+ // 8A DOUBLE_TO_INT vA, vB
+ DF_DA | DF_UB | DF_B_WIDE | DF_FP_B | DF_CORE_A,
+
+ // 8B DOUBLE_TO_LONG vA, vB
+ DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_FP_B | DF_CORE_A,
+
+ // 8C DOUBLE_TO_FLOAT vA, vB
+ DF_DA | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
+
+ // 8D INT_TO_BYTE vA, vB
+ DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+ // 8E INT_TO_CHAR vA, vB
+ DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+ // 8F INT_TO_SHORT vA, vB
+ DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+ // 90 ADD_INT vAA, vBB, vCC
+ DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+ // 91 SUB_INT vAA, vBB, vCC
+ DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+ // 92 MUL_INT vAA, vBB, vCC
+ DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+ // 93 DIV_INT vAA, vBB, vCC
+ DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+ // 94 REM_INT vAA, vBB, vCC
+ DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+ // 95 AND_INT vAA, vBB, vCC
+ DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+ // 96 OR_INT vAA, vBB, vCC
+ DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+ // 97 XOR_INT vAA, vBB, vCC
+ DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+ // 98 SHL_INT vAA, vBB, vCC
+ DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+ // 99 SHR_INT vAA, vBB, vCC
+ DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+ // 9A USHR_INT vAA, vBB, vCC
+ DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+ // 9B ADD_LONG vAA, vBB, vCC
+ DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+ // 9C SUB_LONG vAA, vBB, vCC
+ DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+ // 9D MUL_LONG vAA, vBB, vCC
+ DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+ // 9E DIV_LONG vAA, vBB, vCC
+ DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+ // 9F REM_LONG vAA, vBB, vCC
+ DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+ // A0 AND_LONG vAA, vBB, vCC
+ DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+ // A1 OR_LONG vAA, vBB, vCC
+ DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+ // A2 XOR_LONG vAA, vBB, vCC
+ DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+ // A3 SHL_LONG vAA, vBB, vCC
+ DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+ // A4 SHR_LONG vAA, vBB, vCC
+ DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+ // A5 USHR_LONG vAA, vBB, vCC
+ DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
+
+ // A6 ADD_FLOAT vAA, vBB, vCC
+ DF_DA | DF_UB | DF_UC | DF_FP_A | DF_FP_B | DF_FP_C,
+
+ // A7 SUB_FLOAT vAA, vBB, vCC
+ DF_DA | DF_UB | DF_UC | DF_FP_A | DF_FP_B | DF_FP_C,
+
+ // A8 MUL_FLOAT vAA, vBB, vCC
+ DF_DA | DF_UB | DF_UC | DF_FP_A | DF_FP_B | DF_FP_C,
+
+ // A9 DIV_FLOAT vAA, vBB, vCC
+ DF_DA | DF_UB | DF_UC | DF_FP_A | DF_FP_B | DF_FP_C,
+
+ // AA REM_FLOAT vAA, vBB, vCC
+ DF_DA | DF_UB | DF_UC | DF_FP_A | DF_FP_B | DF_FP_C,
+
+ // AB ADD_DOUBLE vAA, vBB, vCC
+ DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_A | DF_FP_B | DF_FP_C,
+
+ // AC SUB_DOUBLE vAA, vBB, vCC
+ DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_A | DF_FP_B | DF_FP_C,
+
+ // AD MUL_DOUBLE vAA, vBB, vCC
+ DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_A | DF_FP_B | DF_FP_C,
+
+ // AE DIV_DOUBLE vAA, vBB, vCC
+ DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_A | DF_FP_B | DF_FP_C,
+
+ // AF REM_DOUBLE vAA, vBB, vCC
+ DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_A | DF_FP_B | DF_FP_C,
+
+ // B0 ADD_INT_2ADDR vA, vB
+ DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+ // B1 SUB_INT_2ADDR vA, vB
+ DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+ // B2 MUL_INT_2ADDR vA, vB
+ DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+ // B3 DIV_INT_2ADDR vA, vB
+ DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+ // B4 REM_INT_2ADDR vA, vB
+ DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+ // B5 AND_INT_2ADDR vA, vB
+ DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+ // B6 OR_INT_2ADDR vA, vB
+ DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+ // B7 XOR_INT_2ADDR vA, vB
+ DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+ // B8 SHL_INT_2ADDR vA, vB
+ DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+ // B9 SHR_INT_2ADDR vA, vB
+ DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+ // BA USHR_INT_2ADDR vA, vB
+ DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+ // BB ADD_LONG_2ADDR vA, vB
+ DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
+
+ // BC SUB_LONG_2ADDR vA, vB
+ DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
+
+ // BD MUL_LONG_2ADDR vA, vB
+ DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
+
+ // BE DIV_LONG_2ADDR vA, vB
+ DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
+
+ // BF REM_LONG_2ADDR vA, vB
+ DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
+
+ // C0 AND_LONG_2ADDR vA, vB
+ DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
+
+ // C1 OR_LONG_2ADDR vA, vB
+ DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
+
+ // C2 XOR_LONG_2ADDR vA, vB
+ DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
+
+ // C3 SHL_LONG_2ADDR vA, vB
+ DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+ // C4 SHR_LONG_2ADDR vA, vB
+ DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+ // C5 USHR_LONG_2ADDR vA, vB
+ DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+ // C6 ADD_FLOAT_2ADDR vA, vB
+ DF_DA | DF_UA | DF_UB | DF_FP_A | DF_FP_B,
+
+ // C7 SUB_FLOAT_2ADDR vA, vB
+ DF_DA | DF_UA | DF_UB | DF_FP_A | DF_FP_B,
+
+ // C8 MUL_FLOAT_2ADDR vA, vB
+ DF_DA | DF_UA | DF_UB | DF_FP_A | DF_FP_B,
+
+ // C9 DIV_FLOAT_2ADDR vA, vB
+ DF_DA | DF_UA | DF_UB | DF_FP_A | DF_FP_B,
+
+ // CA REM_FLOAT_2ADDR vA, vB
+ DF_DA | DF_UA | DF_UB | DF_FP_A | DF_FP_B,
+
+ // CB ADD_DOUBLE_2ADDR vA, vB
+ DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
+
+ // CC SUB_DOUBLE_2ADDR vA, vB
+ DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
+
+ // CD MUL_DOUBLE_2ADDR vA, vB
+ DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
+
+ // CE DIV_DOUBLE_2ADDR vA, vB
+ DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
+
+ // CF REM_DOUBLE_2ADDR vA, vB
+ DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
+
+ // D0 ADD_INT_LIT16 vA, vB, #+CCCC
+ DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+ // D1 RSUB_INT vA, vB, #+CCCC
+ DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+ // D2 MUL_INT_LIT16 vA, vB, #+CCCC
+ DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+ // D3 DIV_INT_LIT16 vA, vB, #+CCCC
+ DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+ // D4 REM_INT_LIT16 vA, vB, #+CCCC
+ DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+ // D5 AND_INT_LIT16 vA, vB, #+CCCC
+ DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+ // D6 OR_INT_LIT16 vA, vB, #+CCCC
+ DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+ // D7 XOR_INT_LIT16 vA, vB, #+CCCC
+ DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+ // D8 ADD_INT_LIT8 vAA, vBB, #+CC
+ DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+ // D9 RSUB_INT_LIT8 vAA, vBB, #+CC
+ DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+ // DA MUL_INT_LIT8 vAA, vBB, #+CC
+ DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+ // DB DIV_INT_LIT8 vAA, vBB, #+CC
+ DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+ // DC REM_INT_LIT8 vAA, vBB, #+CC
+ DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+ // DD AND_INT_LIT8 vAA, vBB, #+CC
+ DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+ // DE OR_INT_LIT8 vAA, vBB, #+CC
+ DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+ // DF XOR_INT_LIT8 vAA, vBB, #+CC
+ DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+ // E0 SHL_INT_LIT8 vAA, vBB, #+CC
+ DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+ // E1 SHR_INT_LIT8 vAA, vBB, #+CC
+ DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+ // E2 USHR_INT_LIT8 vAA, vBB, #+CC
+ DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
+
+ // E3 IGET_VOLATILE
+ DF_DA | DF_UB | DF_NULL_CHK_0 | DF_REF_B,
+
+ // E4 IPUT_VOLATILE
+ DF_UA | DF_UB | DF_NULL_CHK_1 | DF_REF_B,
+
+ // E5 SGET_VOLATILE
+ DF_DA | DF_UMS,
+
+ // E6 SPUT_VOLATILE
+ DF_UA | DF_UMS,
+
+ // E7 IGET_OBJECT_VOLATILE
+ DF_DA | DF_UB | DF_NULL_CHK_0 | DF_REF_A | DF_REF_B,
+
+ // E8 IGET_WIDE_VOLATILE
+ DF_DA | DF_A_WIDE | DF_UB | DF_NULL_CHK_0 | DF_REF_B,
+
+ // E9 IPUT_WIDE_VOLATILE
+ DF_UA | DF_A_WIDE | DF_UB | DF_NULL_CHK_2 | DF_REF_B,
+
+ // EA SGET_WIDE_VOLATILE
+ DF_DA | DF_A_WIDE | DF_UMS,
+
+ // EB SPUT_WIDE_VOLATILE
+ DF_UA | DF_A_WIDE | DF_UMS,
+
+ // EC BREAKPOINT
+ DF_NOP,
+
+ // ED THROW_VERIFICATION_ERROR
+ DF_NOP | DF_UMS,
+
+ // EE EXECUTE_INLINE
+ DF_FORMAT_35C,
+
+ // EF EXECUTE_INLINE_RANGE
+ DF_FORMAT_3RC,
+
+ // F0 INVOKE_OBJECT_INIT_RANGE
+ DF_NOP | DF_NULL_CHK_0,
+
+ // F1 RETURN_VOID_BARRIER
+ DF_NOP,
+
+ // F2 IGET_QUICK
+ DF_DA | DF_UB | DF_NULL_CHK_0,
+
+ // F3 IGET_WIDE_QUICK
+ DF_DA | DF_A_WIDE | DF_UB | DF_NULL_CHK_0,
+
+ // F4 IGET_OBJECT_QUICK
+ DF_DA | DF_UB | DF_NULL_CHK_0,
+
+ // F5 IPUT_QUICK
+ DF_UA | DF_UB | DF_NULL_CHK_1,
+
+ // F6 IPUT_WIDE_QUICK
+ DF_UA | DF_A_WIDE | DF_UB | DF_NULL_CHK_2,
+
+ // F7 IPUT_OBJECT_QUICK
+ DF_UA | DF_UB | DF_NULL_CHK_1,
+
+ // F8 INVOKE_VIRTUAL_QUICK
+ DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
+
+ // F9 INVOKE_VIRTUAL_QUICK_RANGE
+ DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
+
+ // FA INVOKE_SUPER_QUICK
+ DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
+
+ // FB INVOKE_SUPER_QUICK_RANGE
+ DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
+
+ // FC IPUT_OBJECT_VOLATILE
+ DF_UA | DF_UB | DF_NULL_CHK_1 | DF_REF_A | DF_REF_B,
+
+ // FD SGET_OBJECT_VOLATILE
+ DF_DA | DF_REF_A | DF_UMS,
+
+ // FE SPUT_OBJECT_VOLATILE
+ DF_UA | DF_REF_A | DF_UMS,
+
+ // FF UNUSED_FF
+ DF_NOP
+};
+} // namespace sea_ir
diff --git a/compiler/sea_ir/ir/instruction_tools.h b/compiler/sea_ir/ir/instruction_tools.h
new file mode 100644
index 0000000..895e017
--- /dev/null
+++ b/compiler/sea_ir/ir/instruction_tools.h
@@ -0,0 +1,125 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "sea.h"
+#include "dex_instruction.h"
+
+#ifndef ART_COMPILER_SEA_IR_IR_INSTRUCTION_TOOLS_H_
+#define ART_COMPILER_SEA_IR_IR_INSTRUCTION_TOOLS_H_
+
+
+// Note: This file has content cannibalized for SEA_IR from the MIR implementation,
+// to avoid having a dependence on MIR.
+namespace sea_ir {
+
+#define DF_NOP 0
+#define DF_UA (1 << kUA)
+#define DF_UB (1 << kUB)
+#define DF_UC (1 << kUC)
+#define DF_A_WIDE (1 << kAWide)
+#define DF_B_WIDE (1 << kBWide)
+#define DF_C_WIDE (1 << kCWide)
+#define DF_DA (1 << kDA)
+#define DF_IS_MOVE (1 << kIsMove)
+#define DF_SETS_CONST (1 << kSetsConst)
+#define DF_FORMAT_35C (1 << kFormat35c)
+#define DF_FORMAT_3RC (1 << kFormat3rc)
+#define DF_NULL_CHK_0 (1 << kNullCheckSrc0)
+#define DF_NULL_CHK_1 (1 << kNullCheckSrc1)
+#define DF_NULL_CHK_2 (1 << kNullCheckSrc2)
+#define DF_NULL_CHK_OUT0 (1 << kNullCheckOut0)
+#define DF_NON_NULL_DST (1 << kDstNonNull)
+#define DF_NON_NULL_RET (1 << kRetNonNull)
+#define DF_NULL_TRANSFER_0 (1 << kNullTransferSrc0)
+#define DF_NULL_TRANSFER_N (1 << kNullTransferSrcN)
+#define DF_RANGE_CHK_1 (1 << kRangeCheckSrc1)
+#define DF_RANGE_CHK_2 (1 << kRangeCheckSrc2)
+#define DF_RANGE_CHK_3 (1 << kRangeCheckSrc3)
+#define DF_FP_A (1 << kFPA)
+#define DF_FP_B (1 << kFPB)
+#define DF_FP_C (1 << kFPC)
+#define DF_CORE_A (1 << kCoreA)
+#define DF_CORE_B (1 << kCoreB)
+#define DF_CORE_C (1 << kCoreC)
+#define DF_REF_A (1 << kRefA)
+#define DF_REF_B (1 << kRefB)
+#define DF_REF_C (1 << kRefC)
+#define DF_UMS (1 << kUsesMethodStar)
+
+#define DF_HAS_USES (DF_UA | DF_UB | DF_UC)
+
+#define DF_HAS_DEFS (DF_DA)
+
+#define DF_HAS_NULL_CHKS (DF_NULL_CHK_0 | \
+ DF_NULL_CHK_1 | \
+ DF_NULL_CHK_2 | \
+ DF_NULL_CHK_OUT0)
+
+#define DF_HAS_RANGE_CHKS (DF_RANGE_CHK_1 | \
+ DF_RANGE_CHK_2 | \
+ DF_RANGE_CHK_3)
+
+#define DF_HAS_NR_CHKS (DF_HAS_NULL_CHKS | \
+ DF_HAS_RANGE_CHKS)
+
+#define DF_A_IS_REG (DF_UA | DF_DA)
+#define DF_B_IS_REG (DF_UB)
+#define DF_C_IS_REG (DF_UC)
+#define DF_IS_GETTER_OR_SETTER (DF_IS_GETTER | DF_IS_SETTER)
+#define DF_USES_FP (DF_FP_A | DF_FP_B | DF_FP_C)
+
+enum DataFlowAttributePos {
+ kUA = 0,
+ kUB,
+ kUC,
+ kAWide,
+ kBWide,
+ kCWide,
+ kDA,
+ kIsMove,
+ kSetsConst,
+ kFormat35c,
+ kFormat3rc,
+ kNullCheckSrc0, // Null check of uses[0].
+ kNullCheckSrc1, // Null check of uses[1].
+ kNullCheckSrc2, // Null check of uses[2].
+ kNullCheckOut0, // Null check out outgoing arg0.
+ kDstNonNull, // May assume dst is non-null.
+ kRetNonNull, // May assume retval is non-null.
+ kNullTransferSrc0, // Object copy src[0] -> dst.
+ kNullTransferSrcN, // Phi null check state transfer.
+ kRangeCheckSrc1, // Range check of uses[1].
+ kRangeCheckSrc2, // Range check of uses[2].
+ kRangeCheckSrc3, // Range check of uses[3].
+ kFPA,
+ kFPB,
+ kFPC,
+ kCoreA,
+ kCoreB,
+ kCoreC,
+ kRefA,
+ kRefB,
+ kRefC,
+ kUsesMethodStar, // Implicit use of Method*.
+};
+
+class InstructionTools {
+ public:
+ static bool IsDefinition(const art::Instruction* instruction);
+ static const int instruction_attributes_[];
+};
+} // namespace sea_ir
+#endif // ART_COMPILER_SEA_IR_IR_INSTRUCTION_TOOLS_H_
diff --git a/compiler/sea_ir/ir/sea.cc b/compiler/sea_ir/ir/sea.cc
new file mode 100644
index 0000000..9f98c20
--- /dev/null
+++ b/compiler/sea_ir/ir/sea.cc
@@ -0,0 +1,672 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "base/stringprintf.h"
+#include "sea_ir/ir/instruction_tools.h"
+#include "sea_ir/ir/sea.h"
+#include "sea_ir/code_gen/code_gen.h"
+#include "sea_ir/types/type_inference.h"
+
+#define MAX_REACHING_DEF_ITERERATIONS (10)
+// TODO: When development is done, this define should not
+// be needed, it is currently used as a cutoff
+// for cases where the iterative fixed point algorithm
+// does not reach a fixed point because of a bug.
+
+namespace sea_ir {
+
+int SeaNode::current_max_node_id_ = 0;
+
+void IRVisitor::Traverse(Region* region) {
+ std::vector<PhiInstructionNode*>* phis = region->GetPhiNodes();
+ for (std::vector<PhiInstructionNode*>::const_iterator cit = phis->begin();
+ cit != phis->end(); cit++) {
+ (*cit)->Accept(this);
+ }
+ std::vector<InstructionNode*>* instructions = region->GetInstructions();
+ for (std::vector<InstructionNode*>::const_iterator cit = instructions->begin();
+ cit != instructions->end(); cit++) {
+ (*cit)->Accept(this);
+ }
+}
+
+void IRVisitor::Traverse(SeaGraph* graph) {
+ for (std::vector<Region*>::const_iterator cit = ordered_regions_.begin();
+ cit != ordered_regions_.end(); cit++ ) {
+ (*cit)->Accept(this);
+ }
+}
+
+SeaGraph* SeaGraph::GetCurrentGraph(const art::DexFile& dex_file) {
+ return new SeaGraph(dex_file);
+}
+
+void SeaGraph::AddEdge(Region* src, Region* dst) const {
+ src->AddSuccessor(dst);
+ dst->AddPredecessor(src);
+}
+
+void SeaGraph::ComputeRPO(Region* current_region, int& current_rpo) {
+ current_region->SetRPO(VISITING);
+ std::vector<sea_ir::Region*>* succs = current_region->GetSuccessors();
+ for (std::vector<sea_ir::Region*>::iterator succ_it = succs->begin();
+ succ_it != succs->end(); ++succ_it) {
+ if (NOT_VISITED == (*succ_it)->GetRPO()) {
+ SeaGraph::ComputeRPO(*succ_it, current_rpo);
+ }
+ }
+ current_region->SetRPO(current_rpo--);
+}
+
+void SeaGraph::ComputeIDominators() {
+ bool changed = true;
+ while (changed) {
+ changed = false;
+ // Entry node has itself as IDOM.
+ std::vector<Region*>::iterator crt_it;
+ std::set<Region*> processedNodes;
+ // Find and mark the entry node(s).
+ for (crt_it = regions_.begin(); crt_it != regions_.end(); ++crt_it) {
+ if ((*crt_it)->GetPredecessors()->size() == 0) {
+ processedNodes.insert(*crt_it);
+ (*crt_it)->SetIDominator(*crt_it);
+ }
+ }
+ for (crt_it = regions_.begin(); crt_it != regions_.end(); ++crt_it) {
+ if ((*crt_it)->GetPredecessors()->size() == 0) {
+ continue;
+ }
+ // NewIDom = first (processed) predecessor of b.
+ Region* new_dom = NULL;
+ std::vector<Region*>* preds = (*crt_it)->GetPredecessors();
+ DCHECK(NULL != preds);
+ Region* root_pred = NULL;
+ for (std::vector<Region*>::iterator pred_it = preds->begin();
+ pred_it != preds->end(); ++pred_it) {
+ if (processedNodes.end() != processedNodes.find((*pred_it))) {
+ root_pred = *pred_it;
+ new_dom = root_pred;
+ break;
+ }
+ }
+ // For all other predecessors p of b, if idom is not set,
+ // then NewIdom = Intersect(p, NewIdom)
+ for (std::vector<Region*>::const_iterator pred_it = preds->begin();
+ pred_it != preds->end(); ++pred_it) {
+ DCHECK(NULL != *pred_it);
+ // if IDOMS[p] != UNDEFINED
+ if ((*pred_it != root_pred) && (*pred_it)->GetIDominator() != NULL) {
+ DCHECK(NULL != new_dom);
+ new_dom = SeaGraph::Intersect(*pred_it, new_dom);
+ }
+ }
+ DCHECK(NULL != *crt_it);
+ if ((*crt_it)->GetIDominator() != new_dom) {
+ (*crt_it)->SetIDominator(new_dom);
+ changed = true;
+ }
+ processedNodes.insert(*crt_it);
+ }
+ }
+
+ // For easily ordering of regions we need edges dominator->dominated.
+ for (std::vector<Region*>::iterator region_it = regions_.begin();
+ region_it != regions_.end(); region_it++) {
+ Region* idom = (*region_it)->GetIDominator();
+ if (idom != *region_it) {
+ idom->AddToIDominatedSet(*region_it);
+ }
+ }
+}
+
+Region* SeaGraph::Intersect(Region* i, Region* j) {
+ Region* finger1 = i;
+ Region* finger2 = j;
+ while (finger1 != finger2) {
+ while (finger1->GetRPO() > finger2->GetRPO()) {
+ DCHECK(NULL != finger1);
+ finger1 = finger1->GetIDominator(); // should have: finger1 != NULL
+ DCHECK(NULL != finger1);
+ }
+ while (finger1->GetRPO() < finger2->GetRPO()) {
+ DCHECK(NULL != finger2);
+ finger2 = finger2->GetIDominator(); // should have: finger1 != NULL
+ DCHECK(NULL != finger2);
+ }
+ }
+ return finger1; // finger1 should be equal to finger2 at this point.
+}
+
+void SeaGraph::ComputeDownExposedDefs() {
+ for (std::vector<Region*>::iterator region_it = regions_.begin();
+ region_it != regions_.end(); region_it++) {
+ (*region_it)->ComputeDownExposedDefs();
+ }
+}
+
+void SeaGraph::ComputeReachingDefs() {
+ // Iterate until the reaching definitions set doesn't change anymore.
+ // (See Cooper & Torczon, "Engineering a Compiler", second edition, page 487)
+ bool changed = true;
+ int iteration = 0;
+ while (changed && (iteration < MAX_REACHING_DEF_ITERERATIONS)) {
+ iteration++;
+ changed = false;
+ // TODO: optimize the ordering if this becomes performance bottleneck.
+ for (std::vector<Region*>::iterator regions_it = regions_.begin();
+ regions_it != regions_.end();
+ regions_it++) {
+ changed |= (*regions_it)->UpdateReachingDefs();
+ }
+ }
+ DCHECK(!changed) << "Reaching definitions computation did not reach a fixed point.";
+}
+
+
+void SeaGraph::BuildMethodSeaGraph(const art::DexFile::CodeItem* code_item,
+ const art::DexFile& dex_file, uint32_t class_def_idx,
+ uint32_t method_idx, uint32_t method_access_flags) {
+ code_item_ = code_item;
+ class_def_idx_ = class_def_idx;
+ method_idx_ = method_idx;
+ method_access_flags_ = method_access_flags;
+ const uint16_t* code = code_item->insns_;
+ const size_t size_in_code_units = code_item->insns_size_in_code_units_;
+ // This maps target instruction pointers to their corresponding region objects.
+ std::map<const uint16_t*, Region*> target_regions;
+ size_t i = 0;
+ // Pass: Find the start instruction of basic blocks
+ // by locating targets and flow-though instructions of branches.
+ while (i < size_in_code_units) {
+ const art::Instruction* inst = art::Instruction::At(&code[i]);
+ if (inst->IsBranch() || inst->IsUnconditional()) {
+ int32_t offset = inst->GetTargetOffset();
+ if (target_regions.end() == target_regions.find(&code[i + offset])) {
+ Region* region = GetNewRegion();
+ target_regions.insert(std::pair<const uint16_t*, Region*>(&code[i + offset], region));
+ }
+ if (inst->CanFlowThrough()
+ && (target_regions.end() == target_regions.find(&code[i + inst->SizeInCodeUnits()]))) {
+ Region* region = GetNewRegion();
+ target_regions.insert(
+ std::pair<const uint16_t*, Region*>(&code[i + inst->SizeInCodeUnits()], region));
+ }
+ }
+ i += inst->SizeInCodeUnits();
+ }
+
+
+ Region* r = GetNewRegion();
+ // Insert one SignatureNode per function argument,
+ // to serve as placeholder definitions in dataflow analysis.
+ for (unsigned int crt_offset = 0; crt_offset < code_item->ins_size_; crt_offset++) {
+ int position = crt_offset; // TODO: Is this the correct offset in the signature?
+ SignatureNode* parameter_def_node =
+ new sea_ir::SignatureNode(code_item->registers_size_ - 1 - crt_offset, position);
+ AddParameterNode(parameter_def_node);
+ r->AddChild(parameter_def_node);
+ }
+ // Pass: Assign instructions to region nodes and
+ // assign branches their control flow successors.
+ i = 0;
+ sea_ir::InstructionNode* last_node = NULL;
+ sea_ir::InstructionNode* node = NULL;
+ while (i < size_in_code_units) {
+ const art::Instruction* inst = art::Instruction::At(&code[i]);
+ std::vector<InstructionNode*> sea_instructions_for_dalvik =
+ sea_ir::InstructionNode::Create(inst);
+ for (std::vector<InstructionNode*>::const_iterator cit = sea_instructions_for_dalvik.begin();
+ sea_instructions_for_dalvik.end() != cit; ++cit) {
+ last_node = node;
+ node = *cit;
+
+ if (inst->IsBranch() || inst->IsUnconditional()) {
+ int32_t offset = inst->GetTargetOffset();
+ std::map<const uint16_t*, Region*>::iterator it = target_regions.find(&code[i + offset]);
+ DCHECK(it != target_regions.end());
+ AddEdge(r, it->second); // Add edge to branch target.
+ }
+ std::map<const uint16_t*, Region*>::iterator it = target_regions.find(&code[i]);
+ if (target_regions.end() != it) {
+ // Get the already created region because this is a branch target.
+ Region* nextRegion = it->second;
+ if (last_node->GetInstruction()->IsBranch()
+ && last_node->GetInstruction()->CanFlowThrough()) {
+ AddEdge(r, it->second); // Add flow-through edge.
+ }
+ r = nextRegion;
+ }
+ r->AddChild(node);
+ }
+ i += inst->SizeInCodeUnits();
+ }
+}
+
+void SeaGraph::ComputeRPO() {
+ int rpo_id = regions_.size() - 1;
+ for (std::vector<Region*>::const_iterator crt_it = regions_.begin(); crt_it != regions_.end();
+ ++crt_it) {
+ if ((*crt_it)->GetPredecessors()->size() == 0) {
+ ComputeRPO(*crt_it, rpo_id);
+ }
+ }
+}
+
+// Performs the renaming phase in traditional SSA transformations.
+// See: Cooper & Torczon, "Engineering a Compiler", second edition, page 505.)
+void SeaGraph::RenameAsSSA() {
+ utils::ScopedHashtable<int, InstructionNode*> scoped_table;
+ scoped_table.OpenScope();
+ for (std::vector<Region*>::iterator region_it = regions_.begin(); region_it != regions_.end();
+ region_it++) {
+ if ((*region_it)->GetIDominator() == *region_it) {
+ RenameAsSSA(*region_it, &scoped_table);
+ }
+ }
+ scoped_table.CloseScope();
+}
+
+void SeaGraph::ConvertToSSA() {
+ // Pass: find global names.
+ // The map @block maps registers to the blocks in which they are defined.
+ std::map<int, std::set<Region*> > blocks;
+ // The set @globals records registers whose use
+ // is in a different block than the corresponding definition.
+ std::set<int> globals;
+ for (std::vector<Region*>::iterator region_it = regions_.begin(); region_it != regions_.end();
+ region_it++) {
+ std::set<int> var_kill;
+ std::vector<InstructionNode*>* instructions = (*region_it)->GetInstructions();
+ for (std::vector<InstructionNode*>::iterator inst_it = instructions->begin();
+ inst_it != instructions->end(); inst_it++) {
+ std::vector<int> used_regs = (*inst_it)->GetUses();
+ for (std::size_t i = 0; i < used_regs.size(); i++) {
+ int used_reg = used_regs[i];
+ if (var_kill.find(used_reg) == var_kill.end()) {
+ globals.insert(used_reg);
+ }
+ }
+ const int reg_def = (*inst_it)->GetResultRegister();
+ if (reg_def != NO_REGISTER) {
+ var_kill.insert(reg_def);
+ }
+
+ blocks.insert(std::pair<int, std::set<Region*> >(reg_def, std::set<Region*>()));
+ std::set<Region*>* reg_def_blocks = &(blocks.find(reg_def)->second);
+ reg_def_blocks->insert(*region_it);
+ }
+ }
+
+ // Pass: Actually add phi-nodes to regions.
+ for (std::set<int>::const_iterator globals_it = globals.begin();
+ globals_it != globals.end(); globals_it++) {
+ int global = *globals_it;
+ // Copy the set, because we will modify the worklist as we go.
+ std::set<Region*> worklist((*(blocks.find(global))).second);
+ for (std::set<Region*>::const_iterator b_it = worklist.begin();
+ b_it != worklist.end(); b_it++) {
+ std::set<Region*>* df = (*b_it)->GetDominanceFrontier();
+ for (std::set<Region*>::const_iterator df_it = df->begin(); df_it != df->end(); df_it++) {
+ if ((*df_it)->InsertPhiFor(global)) {
+ // Check that the dominance frontier element is in the worklist already
+ // because we only want to break if the element is actually not there yet.
+ if (worklist.find(*df_it) == worklist.end()) {
+ worklist.insert(*df_it);
+ b_it = worklist.begin();
+ break;
+ }
+ }
+ }
+ }
+ }
+ // Pass: Build edges to the definition corresponding to each use.
+ // (This corresponds to the renaming phase in traditional SSA transformations.
+ // See: Cooper & Torczon, "Engineering a Compiler", second edition, page 505.)
+ RenameAsSSA();
+}
+
+void SeaGraph::RenameAsSSA(Region* crt_region,
+ utils::ScopedHashtable<int, InstructionNode*>* scoped_table) {
+ scoped_table->OpenScope();
+ // Rename phi nodes defined in the current region.
+ std::vector<PhiInstructionNode*>* phis = crt_region->GetPhiNodes();
+ for (std::vector<PhiInstructionNode*>::iterator phi_it = phis->begin();
+ phi_it != phis->end(); phi_it++) {
+ int reg_no = (*phi_it)->GetRegisterNumber();
+ scoped_table->Add(reg_no, (*phi_it));
+ }
+ // Rename operands of instructions from the current region.
+ std::vector<InstructionNode*>* instructions = crt_region->GetInstructions();
+ for (std::vector<InstructionNode*>::const_iterator instructions_it = instructions->begin();
+ instructions_it != instructions->end(); instructions_it++) {
+ InstructionNode* current_instruction = (*instructions_it);
+ // Rename uses.
+ std::vector<int> used_regs = current_instruction->GetUses();
+ for (std::vector<int>::const_iterator reg_it = used_regs.begin();
+ reg_it != used_regs.end(); reg_it++) {
+ int current_used_reg = (*reg_it);
+ InstructionNode* definition = scoped_table->Lookup(current_used_reg);
+ current_instruction->RenameToSSA(current_used_reg, definition);
+ }
+ // Update scope table with latest definitions.
+ std::vector<int> def_regs = current_instruction->GetDefinitions();
+ for (std::vector<int>::const_iterator reg_it = def_regs.begin();
+ reg_it != def_regs.end(); reg_it++) {
+ int current_defined_reg = (*reg_it);
+ scoped_table->Add(current_defined_reg, current_instruction);
+ }
+ }
+ // Fill in uses of phi functions in CFG successor regions.
+ const std::vector<Region*>* successors = crt_region->GetSuccessors();
+ for (std::vector<Region*>::const_iterator successors_it = successors->begin();
+ successors_it != successors->end(); successors_it++) {
+ Region* successor = (*successors_it);
+ successor->SetPhiDefinitionsForUses(scoped_table, crt_region);
+ }
+
+ // Rename all successors in the dominators tree.
+ const std::set<Region*>* dominated_nodes = crt_region->GetIDominatedSet();
+ for (std::set<Region*>::const_iterator dominated_nodes_it = dominated_nodes->begin();
+ dominated_nodes_it != dominated_nodes->end(); dominated_nodes_it++) {
+ Region* dominated_node = (*dominated_nodes_it);
+ RenameAsSSA(dominated_node, scoped_table);
+ }
+ scoped_table->CloseScope();
+}
+
+void SeaGraph::GenerateLLVM() {
+ // Pass: Generate LLVM IR.
+ CodeGenPrepassVisitor code_gen_prepass_visitor;
+ std::cout << "Generating code..." << std::endl;
+ std::cout << "=== PRE VISITING ===" << std::endl;
+ Accept(&code_gen_prepass_visitor);
+ CodeGenVisitor code_gen_visitor(code_gen_prepass_visitor.GetData());
+ std::cout << "=== VISITING ===" << std::endl;
+ Accept(&code_gen_visitor);
+ std::cout << "=== POST VISITING ===" << std::endl;
+ CodeGenPostpassVisitor code_gen_postpass_visitor(code_gen_visitor.GetData());
+ Accept(&code_gen_postpass_visitor);
+ code_gen_postpass_visitor.Write(std::string("my_file.llvm"));
+}
+
+void SeaGraph::CompileMethod(const art::DexFile::CodeItem* code_item, uint32_t class_def_idx,
+ uint32_t method_idx, uint32_t method_access_flags, const art::DexFile& dex_file) {
+ // Two passes: Builds the intermediate structure (non-SSA) of the sea-ir for the function.
+ BuildMethodSeaGraph(code_item, dex_file, class_def_idx, method_idx, method_access_flags);
+ // Pass: Compute reverse post-order of regions.
+ ComputeRPO();
+ // Multiple passes: compute immediate dominators.
+ ComputeIDominators();
+ // Pass: compute downward-exposed definitions.
+ ComputeDownExposedDefs();
+ // Multiple Passes (iterative fixed-point algorithm): Compute reaching definitions
+ ComputeReachingDefs();
+ // Pass (O(nlogN)): Compute the dominance frontier for region nodes.
+ ComputeDominanceFrontier();
+ // Two Passes: Phi node insertion.
+ ConvertToSSA();
+ // Pass: type inference
+ ti_->ComputeTypes(this);
+ // Pass: Generate LLVM IR.
+ GenerateLLVM();
+}
+
+void SeaGraph::ComputeDominanceFrontier() {
+ for (std::vector<Region*>::iterator region_it = regions_.begin();
+ region_it != regions_.end(); region_it++) {
+ std::vector<Region*>* preds = (*region_it)->GetPredecessors();
+ if (preds->size() > 1) {
+ for (std::vector<Region*>::iterator pred_it = preds->begin();
+ pred_it != preds->end(); pred_it++) {
+ Region* runner = *pred_it;
+ while (runner != (*region_it)->GetIDominator()) {
+ runner->AddToDominanceFrontier(*region_it);
+ runner = runner->GetIDominator();
+ }
+ }
+ }
+ }
+}
+
+Region* SeaGraph::GetNewRegion() {
+ Region* new_region = new Region();
+ AddRegion(new_region);
+ return new_region;
+}
+
+void SeaGraph::AddRegion(Region* r) {
+ DCHECK(r) << "Tried to add NULL region to SEA graph.";
+ regions_.push_back(r);
+}
+
+SeaGraph::SeaGraph(const art::DexFile& df)
+ :ti_(new TypeInference()), class_def_idx_(0), method_idx_(0), method_access_flags_(),
+ regions_(), parameters_(), dex_file_(df), code_item_(NULL) { }
+
+void Region::AddChild(sea_ir::InstructionNode* instruction) {
+ DCHECK(instruction) << "Tried to add NULL instruction to region node.";
+ instructions_.push_back(instruction);
+ instruction->SetRegion(this);
+}
+
+SeaNode* Region::GetLastChild() const {
+ if (instructions_.size() > 0) {
+ return instructions_.back();
+ }
+ return NULL;
+}
+
+void Region::ComputeDownExposedDefs() {
+ for (std::vector<InstructionNode*>::const_iterator inst_it = instructions_.begin();
+ inst_it != instructions_.end(); inst_it++) {
+ int reg_no = (*inst_it)->GetResultRegister();
+ std::map<int, InstructionNode*>::iterator res = de_defs_.find(reg_no);
+ if ((reg_no != NO_REGISTER) && (res == de_defs_.end())) {
+ de_defs_.insert(std::pair<int, InstructionNode*>(reg_no, *inst_it));
+ } else {
+ res->second = *inst_it;
+ }
+ }
+ for (std::map<int, sea_ir::InstructionNode*>::const_iterator cit = de_defs_.begin();
+ cit != de_defs_.end(); cit++) {
+ (*cit).second->MarkAsDEDef();
+ }
+}
+
+const std::map<int, sea_ir::InstructionNode*>* Region::GetDownExposedDefs() const {
+ return &de_defs_;
+}
+
+std::map<int, std::set<sea_ir::InstructionNode*>* >* Region::GetReachingDefs() {
+ return &reaching_defs_;
+}
+
+bool Region::UpdateReachingDefs() {
+ std::map<int, std::set<sea_ir::InstructionNode*>* > new_reaching;
+ for (std::vector<Region*>::const_iterator pred_it = predecessors_.begin();
+ pred_it != predecessors_.end(); pred_it++) {
+ // The reaching_defs variable will contain reaching defs __for current predecessor only__
+ std::map<int, std::set<sea_ir::InstructionNode*>* > reaching_defs;
+ std::map<int, std::set<sea_ir::InstructionNode*>* >* pred_reaching =
+ (*pred_it)->GetReachingDefs();
+ const std::map<int, InstructionNode*>* de_defs = (*pred_it)->GetDownExposedDefs();
+
+ // The definitions from the reaching set of the predecessor
+ // may be shadowed by downward exposed definitions from the predecessor,
+ // otherwise the defs from the reaching set are still good.
+ for (std::map<int, InstructionNode*>::const_iterator de_def = de_defs->begin();
+ de_def != de_defs->end(); de_def++) {
+ std::set<InstructionNode*>* solo_def;
+ solo_def = new std::set<InstructionNode*>();
+ solo_def->insert(de_def->second);
+ reaching_defs.insert(
+ std::pair<int const, std::set<InstructionNode*>*>(de_def->first, solo_def));
+ }
+ reaching_defs.insert(pred_reaching->begin(), pred_reaching->end());
+
+ // Now we combine the reaching map coming from the current predecessor (reaching_defs)
+ // with the accumulated set from all predecessors so far (from new_reaching).
+ std::map<int, std::set<sea_ir::InstructionNode*>*>::iterator reaching_it =
+ reaching_defs.begin();
+ for (; reaching_it != reaching_defs.end(); reaching_it++) {
+ std::map<int, std::set<sea_ir::InstructionNode*>*>::iterator crt_entry =
+ new_reaching.find(reaching_it->first);
+ if (new_reaching.end() != crt_entry) {
+ crt_entry->second->insert(reaching_it->second->begin(), reaching_it->second->end());
+ } else {
+ new_reaching.insert(
+ std::pair<int, std::set<sea_ir::InstructionNode*>*>(
+ reaching_it->first,
+ reaching_it->second) );
+ }
+ }
+ }
+ bool changed = false;
+ // Because the sets are monotonically increasing,
+ // we can compare sizes instead of using set comparison.
+ // TODO: Find formal proof.
+ int old_size = 0;
+ if (-1 == reaching_defs_size_) {
+ std::map<int, std::set<sea_ir::InstructionNode*>*>::iterator reaching_it =
+ reaching_defs_.begin();
+ for (; reaching_it != reaching_defs_.end(); reaching_it++) {
+ old_size += (*reaching_it).second->size();
+ }
+ } else {
+ old_size = reaching_defs_size_;
+ }
+ int new_size = 0;
+ std::map<int, std::set<sea_ir::InstructionNode*>*>::iterator reaching_it = new_reaching.begin();
+ for (; reaching_it != new_reaching.end(); reaching_it++) {
+ new_size += (*reaching_it).second->size();
+ }
+ if (old_size != new_size) {
+ changed = true;
+ }
+ if (changed) {
+ reaching_defs_ = new_reaching;
+ reaching_defs_size_ = new_size;
+ }
+ return changed;
+}
+
+bool Region::InsertPhiFor(int reg_no) {
+ if (!ContainsPhiFor(reg_no)) {
+ phi_set_.insert(reg_no);
+ PhiInstructionNode* new_phi = new PhiInstructionNode(reg_no);
+ new_phi->SetRegion(this);
+ phi_instructions_.push_back(new_phi);
+ return true;
+ }
+ return false;
+}
+
+void Region::SetPhiDefinitionsForUses(
+ const utils::ScopedHashtable<int, InstructionNode*>* scoped_table, Region* predecessor) {
+ int predecessor_id = -1;
+ for (unsigned int crt_pred_id = 0; crt_pred_id < predecessors_.size(); crt_pred_id++) {
+ if (predecessors_.at(crt_pred_id) == predecessor) {
+ predecessor_id = crt_pred_id;
+ }
+ }
+ DCHECK_NE(-1, predecessor_id);
+ for (std::vector<PhiInstructionNode*>::iterator phi_it = phi_instructions_.begin();
+ phi_it != phi_instructions_.end(); phi_it++) {
+ PhiInstructionNode* phi = (*phi_it);
+ int reg_no = phi->GetRegisterNumber();
+ InstructionNode* definition = scoped_table->Lookup(reg_no);
+ phi->RenameToSSA(reg_no, definition, predecessor_id);
+ }
+}
+
+std::vector<InstructionNode*> InstructionNode::Create(const art::Instruction* in) {
+ std::vector<InstructionNode*> sea_instructions;
+ switch (in->Opcode()) {
+ case art::Instruction::CONST_4:
+ sea_instructions.push_back(new ConstInstructionNode(in));
+ break;
+ case art::Instruction::RETURN:
+ sea_instructions.push_back(new ReturnInstructionNode(in));
+ break;
+ case art::Instruction::IF_NE:
+ sea_instructions.push_back(new IfNeInstructionNode(in));
+ break;
+ case art::Instruction::ADD_INT_LIT8:
+ sea_instructions.push_back(new UnnamedConstInstructionNode(in, in->VRegB_22b()));
+ sea_instructions.push_back(new AddIntLitInstructionNode(in));
+ break;
+ case art::Instruction::MOVE_RESULT:
+ sea_instructions.push_back(new MoveResultInstructionNode(in));
+ break;
+ case art::Instruction::INVOKE_STATIC:
+ sea_instructions.push_back(new InvokeStaticInstructionNode(in));
+ break;
+ case art::Instruction::ADD_INT:
+ sea_instructions.push_back(new AddIntInstructionNode(in));
+ break;
+ case art::Instruction::GOTO:
+ sea_instructions.push_back(new GotoInstructionNode(in));
+ break;
+ case art::Instruction::IF_EQZ:
+ sea_instructions.push_back(new IfEqzInstructionNode(in));
+ break;
+ default:
+ // Default, generic IR instruction node; default case should never be reached
+ // when support for all instructions ahs been added.
+ sea_instructions.push_back(new InstructionNode(in));
+ }
+ return sea_instructions;
+}
+
+void InstructionNode::MarkAsDEDef() {
+ de_def_ = true;
+}
+
+int InstructionNode::GetResultRegister() const {
+ if (instruction_->HasVRegA() && InstructionTools::IsDefinition(instruction_)) {
+ return instruction_->VRegA();
+ }
+ return NO_REGISTER;
+}
+
+std::vector<int> InstructionNode::GetDefinitions() const {
+ // TODO: Extend this to handle instructions defining more than one register (if any)
+ // The return value should be changed to pointer to field then; for now it is an object
+ // so that we avoid possible memory leaks from allocating objects dynamically.
+ std::vector<int> definitions;
+ int result = GetResultRegister();
+ if (NO_REGISTER != result) {
+ definitions.push_back(result);
+ }
+ return definitions;
+}
+
+std::vector<int> InstructionNode::GetUses() const {
+ std::vector<int> uses; // Using vector<> instead of set<> because order matters.
+ if (!InstructionTools::IsDefinition(instruction_) && (instruction_->HasVRegA())) {
+ int vA = instruction_->VRegA();
+ uses.push_back(vA);
+ }
+ if (instruction_->HasVRegB()) {
+ int vB = instruction_->VRegB();
+ uses.push_back(vB);
+ }
+ if (instruction_->HasVRegC()) {
+ int vC = instruction_->VRegC();
+ uses.push_back(vC);
+ }
+ return uses;
+}
+} // namespace sea_ir
diff --git a/compiler/sea_ir/ir/sea.h b/compiler/sea_ir/ir/sea.h
new file mode 100644
index 0000000..958fc32
--- /dev/null
+++ b/compiler/sea_ir/ir/sea.h
@@ -0,0 +1,348 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#ifndef ART_COMPILER_SEA_IR_IR_SEA_H_
+#define ART_COMPILER_SEA_IR_IR_SEA_H_
+
+#include <set>
+#include <map>
+
+#include "dex_file.h"
+#include "dex_instruction.h"
+#include "sea_ir/ir/instruction_tools.h"
+#include "sea_ir/ir/instruction_nodes.h"
+#include "utils/scoped_hashtable.h"
+
+namespace sea_ir {
+
+// Reverse post-order numbering constants
+enum RegionNumbering {
+ NOT_VISITED = -1,
+ VISITING = -2
+};
+
+class TypeInference;
+
+class Region;
+class InstructionNode;
+class PhiInstructionNode;
+class SignatureNode;
+
+// A SignatureNode is a declaration of one parameter in the function signature.
+// This class is used to provide place-holder definitions to which instructions
+// can return from the GetSSAUses() calls, instead of having missing SSA edges.
+class SignatureNode: public InstructionNode {
+ public:
+ // Creates a new signature node representing the initial definition of the
+ // register @parameter_register which is the @position-th argument to the method.
+ explicit SignatureNode(unsigned int parameter_register, unsigned int position):
+ InstructionNode(NULL), parameter_register_(parameter_register), position_(position) { }
+
+ int GetResultRegister() const {
+ return parameter_register_;
+ }
+
+ unsigned int GetPositionInSignature() const {
+ return position_;
+ }
+
+ std::vector<int> GetUses() const {
+ return std::vector<int>();
+ }
+
+ void Accept(IRVisitor* v) {
+ v->Visit(this);
+ v->Traverse(this);
+ }
+
+ private:
+ const unsigned int parameter_register_;
+ const unsigned int position_; // The position of this parameter node is
+ // in the function parameter list.
+};
+
+class PhiInstructionNode: public InstructionNode {
+ public:
+ explicit PhiInstructionNode(int register_no):
+ InstructionNode(NULL), register_no_(register_no), definition_edges_() {}
+ // Returns the register on which this phi-function is used.
+ int GetRegisterNumber() const {
+ return register_no_;
+ }
+
+ // Renames the use of @reg_no to refer to the instruction @definition.
+ // Phi-functions are different than normal instructions in that they
+ // have multiple predecessor regions; this is why RenameToSSA has
+ // the additional parameter specifying that @parameter_id is the incoming
+ // edge for @definition, essentially creating SSA form.
+ void RenameToSSA(int reg_no, InstructionNode* definition, unsigned int predecessor_id) {
+ DCHECK(NULL != definition) << "Tried to rename to SSA using a NULL definition for "
+ << StringId() << " register " << reg_no;
+ if (definition_edges_.size() < predecessor_id+1) {
+ definition_edges_.resize(predecessor_id+1, NULL);
+ }
+ if (NULL == definition_edges_.at(predecessor_id)) {
+ definition_edges_[predecessor_id] = new std::vector<InstructionNode*>();
+ }
+ definition_edges_[predecessor_id]->push_back(definition);
+ definition->AddSSAUse(this);
+ }
+
+ // Returns the ordered set of Instructions that define the input operands of this instruction.
+ // Precondition: SeaGraph.ConvertToSSA().
+ std::vector<InstructionNode*> GetSSAProducers() {
+ std::vector<InstructionNode*> producers;
+ for (std::vector<std::vector<InstructionNode*>*>::const_iterator
+ cit = definition_edges_.begin(); cit != definition_edges_.end(); cit++) {
+ producers.insert(producers.end(), (*cit)->begin(), (*cit)->end());
+ }
+ return producers;
+ }
+
+ // Returns the instruction that defines the phi register from predecessor
+ // on position @predecessor_pos. Note that the return value is vector<> just
+ // for consistency with the return value of GetSSAUses() on regular instructions,
+ // The returned vector should always have a single element because the IR is SSA.
+ std::vector<InstructionNode*>* GetSSAUses(int predecessor_pos) {
+ return definition_edges_.at(predecessor_pos);
+ }
+
+ void Accept(IRVisitor* v) {
+ v->Visit(this);
+ v->Traverse(this);
+ }
+
+ private:
+ int register_no_;
+ // This vector has one entry for each predecessors, each with a single
+ // element, storing the id of the instruction that defines the register
+ // corresponding to this phi function.
+ std::vector<std::vector<InstructionNode*>*> definition_edges_;
+};
+
+// This class corresponds to a basic block in traditional compiler IRs.
+// The dataflow analysis relies on this class both during execution and
+// for storing its results.
+class Region : public SeaNode {
+ public:
+ explicit Region():
+ SeaNode(), successors_(), predecessors_(), reaching_defs_size_(0),
+ rpo_number_(NOT_VISITED), idom_(NULL), idominated_set_(), df_(), phi_set_() {
+ string_id_ = "cluster_" + string_id_;
+ }
+ // Adds @instruction as an instruction node child in the current region.
+ void AddChild(sea_ir::InstructionNode* instruction);
+ // Returns the last instruction node child of the current region.
+ // This child has the CFG successors pointing to the new regions.
+ SeaNode* GetLastChild() const;
+ // Returns all the child instructions of this region, in program order.
+ std::vector<InstructionNode*>* GetInstructions() {
+ return &instructions_;
+ }
+
+ // Computes Downward Exposed Definitions for the current node.
+ void ComputeDownExposedDefs();
+ const std::map<int, sea_ir::InstructionNode*>* GetDownExposedDefs() const;
+ // Performs one iteration of the reaching definitions algorithm
+ // and returns true if the reaching definitions set changed.
+ bool UpdateReachingDefs();
+ // Returns the set of reaching definitions for the current region.
+ std::map<int, std::set<sea_ir::InstructionNode*>* >* GetReachingDefs();
+
+ void SetRPO(int rpo) {
+ rpo_number_ = rpo;
+ }
+
+ int GetRPO() {
+ return rpo_number_;
+ }
+
+ void SetIDominator(Region* dom) {
+ idom_ = dom;
+ }
+
+ Region* GetIDominator() const {
+ return idom_;
+ }
+
+ void AddToIDominatedSet(Region* dominated) {
+ idominated_set_.insert(dominated);
+ }
+
+ const std::set<Region*>* GetIDominatedSet() {
+ return &idominated_set_;
+ }
+ // Adds @df_reg to the dominance frontier of the current region.
+ void AddToDominanceFrontier(Region* df_reg) {
+ df_.insert(df_reg);
+ }
+ // Returns the dominance frontier of the current region.
+ // Preconditions: SeaGraph.ComputeDominanceFrontier()
+ std::set<Region*>* GetDominanceFrontier() {
+ return &df_;
+ }
+ // Returns true if the region contains a phi function for @reg_no.
+ bool ContainsPhiFor(int reg_no) {
+ return (phi_set_.end() != phi_set_.find(reg_no));
+ }
+ // Returns the phi-functions from the region.
+ std::vector<PhiInstructionNode*>* GetPhiNodes() {
+ return &phi_instructions_;
+ }
+ // Adds a phi-function for @reg_no to this region.
+ // Note: The insertion order does not matter, as phi-functions
+ // are conceptually executed at the same time.
+ bool InsertPhiFor(int reg_no);
+ // Sets the phi-function uses to be as defined in @scoped_table for predecessor @@predecessor.
+ void SetPhiDefinitionsForUses(const utils::ScopedHashtable<int, InstructionNode*>* scoped_table,
+ Region* predecessor);
+
+ void Accept(IRVisitor* v) {
+ v->Visit(this);
+ v->Traverse(this);
+ }
+
+ void AddSuccessor(Region* successor) {
+ DCHECK(successor) << "Tried to add NULL successor to SEA node.";
+ successors_.push_back(successor);
+ return;
+ }
+ void AddPredecessor(Region* predecessor) {
+ DCHECK(predecessor) << "Tried to add NULL predecessor to SEA node.";
+ predecessors_.push_back(predecessor);
+ }
+
+ std::vector<sea_ir::Region*>* GetSuccessors() {
+ return &successors_;
+ }
+ std::vector<sea_ir::Region*>* GetPredecessors() {
+ return &predecessors_;
+ }
+
+ private:
+ std::vector<sea_ir::Region*> successors_; // CFG successor nodes (regions)
+ std::vector<sea_ir::Region*> predecessors_; // CFG predecessor nodes (instructions/regions)
+ std::vector<sea_ir::InstructionNode*> instructions_;
+ std::map<int, sea_ir::InstructionNode*> de_defs_;
+ std::map<int, std::set<sea_ir::InstructionNode*>* > reaching_defs_;
+ int reaching_defs_size_;
+ int rpo_number_; // reverse postorder number of the region
+ // Immediate dominator node.
+ Region* idom_;
+ // The set of nodes immediately dominated by the region.
+ std::set<Region*> idominated_set_;
+ // Records the dominance frontier.
+ std::set<Region*> df_;
+ // Records the set of register numbers that have phi nodes in this region.
+ std::set<int> phi_set_;
+ std::vector<PhiInstructionNode*> phi_instructions_;
+};
+
+// A SeaGraph instance corresponds to a source code function.
+// Its main point is to encapsulate the SEA IR representation of it
+// and acts as starting point for visitors (ex: during code generation).
+class SeaGraph: IVisitable {
+ public:
+ static SeaGraph* GetCurrentGraph(const art::DexFile&);
+
+ void CompileMethod(const art::DexFile::CodeItem* code_item, uint32_t class_def_idx,
+ uint32_t method_idx, uint32_t method_access_flags, const art::DexFile& dex_file);
+ // Returns all regions corresponding to this SeaGraph.
+ std::vector<Region*>* GetRegions() {
+ return ®ions_;
+ }
+ // Recursively computes the reverse postorder value for @crt_bb and successors.
+ static void ComputeRPO(Region* crt_bb, int& crt_rpo);
+ // Returns the "lowest common ancestor" of @i and @j in the dominator tree.
+ static Region* Intersect(Region* i, Region* j);
+ // Returns the vector of parameters of the function.
+ std::vector<SignatureNode*>* GetParameterNodes() {
+ return ¶meters_;
+ }
+
+ const art::DexFile* GetDexFile() const {
+ return &dex_file_;
+ }
+
+ virtual void Accept(IRVisitor* visitor) {
+ visitor->Initialize(this);
+ visitor->Visit(this);
+ visitor->Traverse(this);
+ }
+
+ TypeInference* ti_;
+ uint32_t class_def_idx_;
+ uint32_t method_idx_;
+ uint32_t method_access_flags_;
+
+ private:
+ explicit SeaGraph(const art::DexFile& df);
+ // Registers @childReg as a region belonging to the SeaGraph instance.
+ void AddRegion(Region* childReg);
+ // Returns new region and registers it with the SeaGraph instance.
+ Region* GetNewRegion();
+ // Adds a (formal) parameter node to the vector of parameters of the function.
+ void AddParameterNode(SignatureNode* parameterNode) {
+ parameters_.push_back(parameterNode);
+ }
+ // Adds a CFG edge from @src node to @dst node.
+ void AddEdge(Region* src, Region* dst) const;
+ // Builds the non-SSA sea-ir representation of the function @code_item from @dex_file
+ // with class id @class_def_idx and method id @method_idx.
+ void BuildMethodSeaGraph(const art::DexFile::CodeItem* code_item,
+ const art::DexFile& dex_file, uint32_t class_def_idx,
+ uint32_t method_idx, uint32_t method_access_flags);
+ // Computes immediate dominators for each region.
+ // Precondition: ComputeMethodSeaGraph()
+ void ComputeIDominators();
+ // Computes Downward Exposed Definitions for all regions in the graph.
+ void ComputeDownExposedDefs();
+ // Computes the reaching definitions set following the equations from
+ // Cooper & Torczon, "Engineering a Compiler", second edition, page 491.
+ // Precondition: ComputeDEDefs()
+ void ComputeReachingDefs();
+ // Computes the reverse-postorder numbering for the region nodes.
+ // Precondition: ComputeDEDefs()
+ void ComputeRPO();
+ // Computes the dominance frontier for all regions in the graph,
+ // following the algorithm from
+ // Cooper & Torczon, "Engineering a Compiler", second edition, page 499.
+ // Precondition: ComputeIDominators()
+ void ComputeDominanceFrontier();
+ // Converts the IR to semi-pruned SSA form.
+ void ConvertToSSA();
+ // Performs the renaming phase of the SSA transformation during ConvertToSSA() execution.
+ void RenameAsSSA();
+ // Identifies the definitions corresponding to uses for region @node
+ // by using the scoped hashtable of names @ scoped_table.
+ void RenameAsSSA(Region* node, utils::ScopedHashtable<int, InstructionNode*>* scoped_table);
+
+
+
+ virtual ~SeaGraph() {}
+ // Generate LLVM IR for the method.
+ // Precondition: ConvertToSSA().
+ void GenerateLLVM();
+
+ static SeaGraph graph_;
+ std::vector<Region*> regions_;
+ std::vector<SignatureNode*> parameters_;
+ const art::DexFile& dex_file_;
+ const art::DexFile::CodeItem* code_item_;
+};
+} // namespace sea_ir
+#endif // ART_COMPILER_SEA_IR_IR_SEA_H_
diff --git a/compiler/sea_ir/ir/sea_node.h b/compiler/sea_ir/ir/sea_node.h
new file mode 100644
index 0000000..4dab5cb
--- /dev/null
+++ b/compiler/sea_ir/ir/sea_node.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_SEA_IR_IR_SEA_NODE_H_
+#define ART_COMPILER_SEA_IR_IR_SEA_NODE_H_
+
+#include "base/stringprintf.h"
+
+namespace sea_ir {
+class Region;
+class IRVisitor;
+
+class IVisitable {
+ public:
+ virtual void Accept(IRVisitor* visitor) = 0;
+ virtual ~IVisitable() {}
+};
+
+// This abstract class provides the essential services that
+// we want each SEA IR element to have.
+// At the moment, these are:
+// - an id and corresponding string representation.
+// - a .dot graph language representation for .dot output.
+//
+// Note that SEA IR nodes could also be Regions, Projects
+// which are not instructions.
+class SeaNode: public IVisitable {
+ public:
+ explicit SeaNode():id_(GetNewId()), string_id_() {
+ string_id_ = art::StringPrintf("%d", id_);
+ }
+
+ // Adds CFG predecessors and successors to each block.
+ void AddSuccessor(Region* successor);
+ void AddPredecessor(Region* predecesor);
+
+ // Returns the id of the current block as string
+ const std::string& StringId() const {
+ return string_id_;
+ }
+ // Returns the id of this node as int. The id is supposed to be unique among
+ // all instances of all subclasses of this class.
+ int Id() const {
+ return id_;
+ }
+
+ virtual ~SeaNode() { }
+
+ protected:
+ static int GetNewId() {
+ return current_max_node_id_++;
+ }
+
+ const int id_;
+ std::string string_id_;
+
+ private:
+ static int current_max_node_id_;
+ // Creating new instances of sea node objects should not be done through copy or assignment
+ // operators because that would lead to duplication of their unique ids.
+ DISALLOW_COPY_AND_ASSIGN(SeaNode);
+};
+} // namespace sea_ir
+#endif // ART_COMPILER_SEA_IR_IR_SEA_NODE_H_
diff --git a/compiler/sea_ir/ir/visitor.h b/compiler/sea_ir/ir/visitor.h
new file mode 100644
index 0000000..cc7b5d1
--- /dev/null
+++ b/compiler/sea_ir/ir/visitor.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_SEA_IR_IR_VISITOR_H_
+#define ART_COMPILER_SEA_IR_IR_VISITOR_H_
+
+namespace sea_ir {
+
+class SeaGraph;
+class Region;
+class InstructionNode;
+class PhiInstructionNode;
+class SignatureNode;
+class UnnamedConstInstructionNode;
+class ConstInstructionNode;
+class ReturnInstructionNode;
+class IfNeInstructionNode;
+class AddIntLit8InstructionNode;
+class MoveResultInstructionNode;
+class InvokeStaticInstructionNode;
+class AddIntInstructionNode;
+class AddIntLitInstructionNode;
+class GotoInstructionNode;
+class IfEqzInstructionNode;
+
+
+
+
+class IRVisitor {
+ public:
+ explicit IRVisitor(): ordered_regions_() { }
+ virtual void Initialize(SeaGraph* graph) = 0;
+ virtual void Visit(SeaGraph* graph) = 0;
+ virtual void Visit(Region* region) = 0;
+ virtual void Visit(PhiInstructionNode* region) = 0;
+ virtual void Visit(SignatureNode* region) = 0;
+
+ virtual void Visit(InstructionNode* region) = 0;
+ virtual void Visit(ConstInstructionNode* instruction) = 0;
+ virtual void Visit(UnnamedConstInstructionNode* instruction) = 0;
+ virtual void Visit(ReturnInstructionNode* instruction) = 0;
+ virtual void Visit(IfNeInstructionNode* instruction) = 0;
+ virtual void Visit(MoveResultInstructionNode* instruction) = 0;
+ virtual void Visit(InvokeStaticInstructionNode* instruction) = 0;
+ virtual void Visit(AddIntInstructionNode* instruction) = 0;
+ virtual void Visit(GotoInstructionNode* instruction) = 0;
+ virtual void Visit(IfEqzInstructionNode* instruction) = 0;
+
+ // Note: This flavor of visitor separates the traversal functions from the actual visiting part
+ // so that the Visitor subclasses don't duplicate code and can't get the traversal wrong.
+ // The disadvantage is the increased number of functions (and calls).
+ virtual void Traverse(SeaGraph* graph);
+ virtual void Traverse(Region* region);
+ // The following functions are meant to be empty and not pure virtual,
+ // because the parameter classes have no children to traverse.
+ virtual void Traverse(InstructionNode* region) { }
+ virtual void Traverse(ConstInstructionNode* instruction) { }
+ virtual void Traverse(ReturnInstructionNode* instruction) { }
+ virtual void Traverse(IfNeInstructionNode* instruction) { }
+ virtual void Traverse(AddIntLit8InstructionNode* instruction) { }
+ virtual void Traverse(MoveResultInstructionNode* instruction) { }
+ virtual void Traverse(InvokeStaticInstructionNode* instruction) { }
+ virtual void Traverse(AddIntInstructionNode* instruction) { }
+ virtual void Traverse(GotoInstructionNode* instruction) { }
+ virtual void Traverse(IfEqzInstructionNode* instruction) { }
+ virtual void Traverse(PhiInstructionNode* phi) { }
+ virtual void Traverse(SignatureNode* sig) { }
+ virtual ~IRVisitor() { }
+
+ protected:
+ std::vector<Region*> ordered_regions_;
+};
+} // namespace sea_ir
+#endif // ART_COMPILER_SEA_IR_IR_VISITOR_H_