Merge "AArch64: Remove unnecessary work around for sp."
diff --git a/compiler/dex/compiler_enums.h b/compiler/dex/compiler_enums.h
index 346fbb8..dcc67c3 100644
--- a/compiler/dex/compiler_enums.h
+++ b/compiler/dex/compiler_enums.h
@@ -467,8 +467,13 @@
kIsQuinOp,
kIsSextupleOp,
kIsIT,
+ kIsMoveOp,
kMemLoad,
kMemStore,
+ kMemVolatile,
+ kMemScaledx0,
+ kMemScaledx2,
+ kMemScaledx4,
kPCRelFixup, // x86 FIXME: add NEEDS_FIXUP to instruction attributes.
kRegDef0,
kRegDef1,
diff --git a/compiler/dex/frontend.cc b/compiler/dex/frontend.cc
index 11dd182..4f8c1d4 100644
--- a/compiler/dex/frontend.cc
+++ b/compiler/dex/frontend.cc
@@ -42,7 +42,7 @@
/* Default optimizer/debug setting for the compiler. */
static uint32_t kCompilerOptimizerDisableFlags = 0 | // Disable specific optimizations
- (1 << kLoadStoreElimination) | // TODO: this pass has been broken for awhile - fix or delete.
+ // (1 << kLoadStoreElimination) |
// (1 << kLoadHoisting) |
// (1 << kSuppressLoads) |
// (1 << kNullCheckElimination) |
@@ -96,12 +96,12 @@
~0U,
// 1 = kArm, unused (will use kThumb2).
~0U,
- // 2 = kArm64. TODO(Arm64): enable optimizations once backend is mature enough.
- (1 << kLoadStoreElimination) |
+ // 2 = kArm64.
0,
// 3 = kThumb2.
0,
// 4 = kX86.
+ (1 << kLoadStoreElimination) |
0,
// 5 = kX86_64.
(1 << kLoadStoreElimination) |
diff --git a/compiler/dex/quick/arm/assemble_arm.cc b/compiler/dex/quick/arm/assemble_arm.cc
index 5083bbc..35c3597 100644
--- a/compiler/dex/quick/arm/assemble_arm.cc
+++ b/compiler/dex/quick/arm/assemble_arm.cc
@@ -217,7 +217,7 @@
"ldmia", "!0C!!, <!1R>", 2, kFixupNone),
ENCODING_MAP(kThumbLdrRRI5, 0x6800,
kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 10, 6,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF4,
"ldr", "!0C, [!1C, #!2E]", 2, kFixupNone),
ENCODING_MAP(kThumbLdrRRR, 0x5800,
kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 8, 6,
@@ -226,14 +226,14 @@
ENCODING_MAP(kThumbLdrPcRel, 0x4800,
kFmtBitBlt, 10, 8, kFmtBitBlt, 7, 0, kFmtUnused, -1, -1,
kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0 | REG_USE_PC
- | IS_LOAD | NEEDS_FIXUP, "ldr", "!0C, [pc, #!1E]", 2, kFixupLoad),
+ | IS_LOAD_OFF4 | NEEDS_FIXUP, "ldr", "!0C, [pc, #!1E]", 2, kFixupLoad),
ENCODING_MAP(kThumbLdrSpRel, 0x9800,
kFmtBitBlt, 10, 8, kFmtSkip, -1, -1, kFmtBitBlt, 7, 0,
kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0 | REG_USE_SP
- | IS_LOAD, "ldr", "!0C, [sp, #!2E]", 2, kFixupNone),
+ | IS_LOAD_OFF4, "ldr", "!0C, [sp, #!2E]", 2, kFixupNone),
ENCODING_MAP(kThumbLdrbRRI5, 0x7800,
kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 10, 6,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF,
"ldrb", "!0C, [!1C, #2d]", 2, kFixupNone),
ENCODING_MAP(kThumbLdrbRRR, 0x5c00,
kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 8, 6,
@@ -241,7 +241,7 @@
"ldrb", "!0C, [!1C, !2C]", 2, kFixupNone),
ENCODING_MAP(kThumbLdrhRRI5, 0x8800,
kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 10, 6,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF2,
"ldrh", "!0C, [!1C, #!2F]", 2, kFixupNone),
ENCODING_MAP(kThumbLdrhRRR, 0x5a00,
kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 8, 6,
@@ -283,19 +283,19 @@
ENCODING_MAP(kThumbMovRR, 0x1c00,
kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
kFmtUnused, -1, -1,
- IS_BINARY_OP | REG_DEF0_USE1 | SETS_CCODES,
+ IS_BINARY_OP | REG_DEF0_USE1 | SETS_CCODES |IS_MOVE,
"movs", "!0C, !1C", 2, kFixupNone),
ENCODING_MAP(kThumbMovRR_H2H, 0x46c0,
kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1 | IS_MOVE,
"mov", "!0C, !1C", 2, kFixupNone),
ENCODING_MAP(kThumbMovRR_H2L, 0x4640,
kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1 | IS_MOVE,
"mov", "!0C, !1C", 2, kFixupNone),
ENCODING_MAP(kThumbMovRR_L2H, 0x4680,
kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1 | IS_MOVE,
"mov", "!0C, !1C", 2, kFixupNone),
ENCODING_MAP(kThumbMul, 0x4340,
kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
@@ -354,7 +354,7 @@
"stmia", "!0C!!, <!1R>", 2, kFixupNone),
ENCODING_MAP(kThumbStrRRI5, 0x6000,
kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 10, 6,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE_OFF4,
"str", "!0C, [!1C, #!2E]", 2, kFixupNone),
ENCODING_MAP(kThumbStrRRR, 0x5000,
kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 8, 6,
@@ -363,10 +363,10 @@
ENCODING_MAP(kThumbStrSpRel, 0x9000,
kFmtBitBlt, 10, 8, kFmtSkip, -1, -1, kFmtBitBlt, 7, 0,
kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE0 | REG_USE_SP
- | IS_STORE, "str", "!0C, [sp, #!2E]", 2, kFixupNone),
+ | IS_STORE_OFF4, "str", "!0C, [sp, #!2E]", 2, kFixupNone),
ENCODING_MAP(kThumbStrbRRI5, 0x7000,
kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 10, 6,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE_OFF,
"strb", "!0C, [!1C, #!2d]", 2, kFixupNone),
ENCODING_MAP(kThumbStrbRRR, 0x5400,
kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 8, 6,
@@ -374,7 +374,7 @@
"strb", "!0C, [!1C, !2C]", 2, kFixupNone),
ENCODING_MAP(kThumbStrhRRI5, 0x8000,
kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 10, 6,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE_OFF2,
"strh", "!0C, [!1C, #!2F]", 2, kFixupNone),
ENCODING_MAP(kThumbStrhRRR, 0x5200,
kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtBitBlt, 8, 6,
@@ -423,11 +423,11 @@
*/
ENCODING_MAP(kThumb2Vldrs, 0xed900a00,
kFmtSfp, 22, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 7, 0,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD |
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF4 |
REG_DEF_LR | NEEDS_FIXUP, "vldr", "!0s, [!1C, #!2E]", 4, kFixupVLoad),
ENCODING_MAP(kThumb2Vldrd, 0xed900b00,
kFmtDfp, 22, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 7, 0,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD |
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF |
REG_DEF_LR | NEEDS_FIXUP, "vldr", "!0S, [!1C, #!2E]", 4, kFixupVLoad),
ENCODING_MAP(kThumb2Vmuls, 0xee200a00,
kFmtSfp, 22, 12, kFmtSfp, 7, 16, kFmtSfp, 5, 0,
@@ -440,11 +440,11 @@
"vmuld", "!0S, !1S, !2S", 4, kFixupNone),
ENCODING_MAP(kThumb2Vstrs, 0xed800a00,
kFmtSfp, 22, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 7, 0,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE_OFF4,
"vstr", "!0s, [!1C, #!2E]", 4, kFixupNone),
ENCODING_MAP(kThumb2Vstrd, 0xed800b00,
kFmtDfp, 22, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 7, 0,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE_OFF4,
"vstr", "!0S, [!1C, #!2E]", 4, kFixupNone),
ENCODING_MAP(kThumb2Vsubs, 0xee300a40,
kFmtSfp, 22, 12, kFmtSfp, 7, 16, kFmtSfp, 5, 0,
@@ -520,19 +520,19 @@
"mov", "!0C, #!1M", 4, kFixupNone),
ENCODING_MAP(kThumb2StrRRI12, 0xf8c00000,
kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 11, 0,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE_OFF,
"str", "!0C, [!1C, #!2d]", 4, kFixupNone),
ENCODING_MAP(kThumb2LdrRRI12, 0xf8d00000,
kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 11, 0,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF,
"ldr", "!0C, [!1C, #!2d]", 4, kFixupNone),
ENCODING_MAP(kThumb2StrRRI8Predec, 0xf8400c00,
kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 8, 0,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE_OFF,
"str", "!0C, [!1C, #-!2d]", 4, kFixupNone),
ENCODING_MAP(kThumb2LdrRRI8Predec, 0xf8500c00,
kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 8, 0,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF,
"ldr", "!0C, [!1C, #-!2d]", 4, kFixupNone),
ENCODING_MAP(kThumb2Cbnz, 0xb900, /* Note: does not affect flags */
kFmtBitBlt, 2, 0, kFmtImm6, -1, -1, kFmtUnused, -1, -1,
@@ -549,15 +549,15 @@
"add", "!0C,!1C,#!2d", 4, kFixupNone),
ENCODING_MAP(kThumb2MovRR, 0xea4f0000, /* no setflags encoding */
kFmtBitBlt, 11, 8, kFmtBitBlt, 3, 0, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1 | IS_MOVE,
"mov", "!0C, !1C", 4, kFixupNone),
ENCODING_MAP(kThumb2Vmovs, 0xeeb00a40,
kFmtSfp, 22, 12, kFmtSfp, 5, 0, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1 | IS_MOVE,
"vmov.f32 ", " !0s, !1s", 4, kFixupNone),
ENCODING_MAP(kThumb2Vmovd, 0xeeb00b40,
kFmtDfp, 22, 12, kFmtDfp, 5, 0, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1 | IS_MOVE,
"vmov.f64 ", " !0S, !1S", 4, kFixupNone),
ENCODING_MAP(kThumb2Ldmia, 0xe8900000,
kFmtBitBlt, 19, 16, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
@@ -613,59 +613,59 @@
"sbfx", "!0C, !1C, #!2d, #!3d", 4, kFixupNone),
ENCODING_MAP(kThumb2LdrRRR, 0xf8500000,
kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
- kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD,
+ kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD_OFF,
"ldr", "!0C, [!1C, !2C, LSL #!3d]", 4, kFixupNone),
ENCODING_MAP(kThumb2LdrhRRR, 0xf8300000,
kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
- kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD,
+ kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD_OFF,
"ldrh", "!0C, [!1C, !2C, LSL #!3d]", 4, kFixupNone),
ENCODING_MAP(kThumb2LdrshRRR, 0xf9300000,
kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
- kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD,
+ kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD_OFF,
"ldrsh", "!0C, [!1C, !2C, LSL #!3d]", 4, kFixupNone),
ENCODING_MAP(kThumb2LdrbRRR, 0xf8100000,
kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
- kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD,
+ kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD_OFF,
"ldrb", "!0C, [!1C, !2C, LSL #!3d]", 4, kFixupNone),
ENCODING_MAP(kThumb2LdrsbRRR, 0xf9100000,
kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
- kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD,
+ kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD_OFF,
"ldrsb", "!0C, [!1C, !2C, LSL #!3d]", 4, kFixupNone),
ENCODING_MAP(kThumb2StrRRR, 0xf8400000,
kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
- kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_USE012 | IS_STORE,
+ kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_USE012 | IS_STORE_OFF,
"str", "!0C, [!1C, !2C, LSL #!3d]", 4, kFixupNone),
ENCODING_MAP(kThumb2StrhRRR, 0xf8200000,
kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
- kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_USE012 | IS_STORE,
+ kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_USE012 | IS_STORE_OFF,
"strh", "!0C, [!1C, !2C, LSL #!3d]", 4, kFixupNone),
ENCODING_MAP(kThumb2StrbRRR, 0xf8000000,
kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
- kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_USE012 | IS_STORE,
+ kFmtBitBlt, 5, 4, IS_QUAD_OP | REG_USE012 | IS_STORE_OFF,
"strb", "!0C, [!1C, !2C, LSL #!3d]", 4, kFixupNone),
ENCODING_MAP(kThumb2LdrhRRI12, 0xf8b00000,
kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 11, 0,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF,
"ldrh", "!0C, [!1C, #!2d]", 4, kFixupNone),
ENCODING_MAP(kThumb2LdrshRRI12, 0xf9b00000,
kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 11, 0,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF,
"ldrsh", "!0C, [!1C, #!2d]", 4, kFixupNone),
ENCODING_MAP(kThumb2LdrbRRI12, 0xf8900000,
kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 11, 0,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF,
"ldrb", "!0C, [!1C, #!2d]", 4, kFixupNone),
ENCODING_MAP(kThumb2LdrsbRRI12, 0xf9900000,
kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 11, 0,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF,
"ldrsb", "!0C, [!1C, #!2d]", 4, kFixupNone),
ENCODING_MAP(kThumb2StrhRRI12, 0xf8a00000,
kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 11, 0,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE_OFF,
"strh", "!0C, [!1C, #!2d]", 4, kFixupNone),
ENCODING_MAP(kThumb2StrbRRI12, 0xf8800000,
kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 11, 0,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE_OFF,
"strb", "!0C, [!1C, #!2d]", 4, kFixupNone),
ENCODING_MAP(kThumb2Pop, 0xe8bd0000,
kFmtBitBlt, 15, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
@@ -841,7 +841,7 @@
ENCODING_MAP(kThumb2LdrPcRel12, 0xf8df0000,
kFmtBitBlt, 15, 12, kFmtBitBlt, 11, 0, kFmtUnused, -1, -1,
kFmtUnused, -1, -1,
- IS_TERTIARY_OP | REG_DEF0 | REG_USE_PC | IS_LOAD | NEEDS_FIXUP,
+ IS_TERTIARY_OP | REG_DEF0 | REG_USE_PC | IS_LOAD_OFF | NEEDS_FIXUP,
"ldr", "!0C, [r15pc, #!1d]", 4, kFixupLoad),
ENCODING_MAP(kThumb2BCond, 0xf0008000,
kFmtBrOffset, -1, -1, kFmtBitBlt, 25, 22, kFmtUnused, -1, -1,
@@ -899,19 +899,19 @@
"umull", "!0C, !1C, !2C, !3C", 4, kFixupNone),
ENCODING_MAP(kThumb2Ldrex, 0xe8500f00,
kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16, kFmtBitBlt, 7, 0,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOADX,
"ldrex", "!0C, [!1C, #!2E]", 4, kFixupNone),
ENCODING_MAP(kThumb2Ldrexd, 0xe8d0007f,
kFmtBitBlt, 15, 12, kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF01_USE2 | IS_LOAD,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF01_USE2 | IS_LOADX,
"ldrexd", "!0C, !1C, [!2C]", 4, kFixupNone),
ENCODING_MAP(kThumb2Strex, 0xe8400000,
kFmtBitBlt, 11, 8, kFmtBitBlt, 15, 12, kFmtBitBlt, 19, 16,
- kFmtBitBlt, 7, 0, IS_QUAD_OP | REG_DEF0_USE12 | IS_STORE,
+ kFmtBitBlt, 7, 0, IS_QUAD_OP | REG_DEF0_USE12 | IS_STOREX,
"strex", "!0C, !1C, [!2C, #!2E]", 4, kFixupNone),
ENCODING_MAP(kThumb2Strexd, 0xe8c00070,
kFmtBitBlt, 3, 0, kFmtBitBlt, 15, 12, kFmtBitBlt, 11, 8,
- kFmtBitBlt, 19, 16, IS_QUAD_OP | REG_DEF0_USE123 | IS_STORE,
+ kFmtBitBlt, 19, 16, IS_QUAD_OP | REG_DEF0_USE123 | IS_STOREX,
"strexd", "!0C, !1C, !2C, [!3C]", 4, kFixupNone),
ENCODING_MAP(kThumb2Clrex, 0xf3bf8f2f,
kFmtUnused, -1, -1, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
@@ -927,12 +927,12 @@
"bfc", "!0C,#!1d,#!2d", 4, kFixupNone),
ENCODING_MAP(kThumb2Dmb, 0xf3bf8f50,
kFmtBitBlt, 3, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_UNARY_OP,
+ kFmtUnused, -1, -1, IS_UNARY_OP | IS_VOLATILE,
"dmb", "#!0B", 4, kFixupNone),
ENCODING_MAP(kThumb2LdrPcReln12, 0xf85f0000,
kFmtBitBlt, 15, 12, kFmtBitBlt, 11, 0, kFmtUnused, -1, -1,
kFmtUnused, -1, -1,
- IS_BINARY_OP | REG_DEF0 | REG_USE_PC | IS_LOAD,
+ IS_BINARY_OP | REG_DEF0 | REG_USE_PC | IS_LOAD_OFF,
"ldr", "!0C, [r15pc, -#!1d]", 4, kFixupNone),
ENCODING_MAP(kThumb2Stm, 0xe9000000,
kFmtBitBlt, 19, 16, kFmtBitBlt, 12, 0, kFmtUnused, -1, -1,
@@ -1023,17 +1023,17 @@
ENCODING_MAP(kThumb2LdrdPcRel8, 0xe9df0000,
kFmtBitBlt, 15, 12, kFmtBitBlt, 11, 8, kFmtBitBlt, 7, 0,
kFmtUnused, -1, -1,
- IS_TERTIARY_OP | REG_DEF0 | REG_DEF1 | REG_USE_PC | IS_LOAD | NEEDS_FIXUP,
+ IS_TERTIARY_OP | REG_DEF0 | REG_DEF1 | REG_USE_PC | IS_LOAD_OFF4 | NEEDS_FIXUP,
"ldrd", "!0C, !1C, [pc, #!2E]", 4, kFixupLoad),
ENCODING_MAP(kThumb2LdrdI8, 0xe9d00000,
kFmtBitBlt, 15, 12, kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16,
kFmtBitBlt, 7, 0,
- IS_QUAD_OP | REG_DEF0 | REG_DEF1 | REG_USE2 | IS_LOAD,
+ IS_QUAD_OP | REG_DEF0 | REG_DEF1 | REG_USE2 | IS_LOAD_OFF4,
"ldrd", "!0C, !1C, [!2C, #!3E]", 4, kFixupNone),
ENCODING_MAP(kThumb2StrdI8, 0xe9c00000,
kFmtBitBlt, 15, 12, kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16,
kFmtBitBlt, 7, 0,
- IS_QUAD_OP | REG_USE0 | REG_USE1 | REG_USE2 | IS_STORE,
+ IS_QUAD_OP | REG_USE0 | REG_USE1 | REG_USE2 | IS_STORE_OFF4,
"strd", "!0C, !1C, [!2C, #!3E]", 4, kFixupNone),
};
diff --git a/compiler/dex/quick/arm/codegen_arm.h b/compiler/dex/quick/arm/codegen_arm.h
index 9652192..e0b8ec6 100644
--- a/compiler/dex/quick/arm/codegen_arm.h
+++ b/compiler/dex/quick/arm/codegen_arm.h
@@ -198,6 +198,7 @@
}
LIR* InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) OVERRIDE;
+ size_t GetInstructionOffset(LIR* lir);
private:
void GenFusedLongCmpImmBranch(BasicBlock* bb, RegLocation rl_src1, int64_t val,
diff --git a/compiler/dex/quick/arm/utility_arm.cc b/compiler/dex/quick/arm/utility_arm.cc
index cf21da7..bba1a8c 100644
--- a/compiler/dex/quick/arm/utility_arm.cc
+++ b/compiler/dex/quick/arm/utility_arm.cc
@@ -1169,4 +1169,17 @@
return OpReg(op, r_tgt);
}
+size_t ArmMir2Lir::GetInstructionOffset(LIR* lir) {
+ uint64_t check_flags = GetTargetInstFlags(lir->opcode);
+ DCHECK((check_flags & IS_LOAD) || (check_flags & IS_STORE));
+ size_t offset = (check_flags & IS_TERTIARY_OP) ? lir->operands[2] : 0;
+
+ if (check_flags & SCALED_OFFSET_X2) {
+ offset = offset * 2;
+ } else if (check_flags & SCALED_OFFSET_X4) {
+ offset = offset * 4;
+ }
+ return offset;
+}
+
} // namespace art
diff --git a/compiler/dex/quick/arm64/arm64_lir.h b/compiler/dex/quick/arm64/arm64_lir.h
index 3a8ea3f..90cb156 100644
--- a/compiler/dex/quick/arm64/arm64_lir.h
+++ b/compiler/dex/quick/arm64/arm64_lir.h
@@ -331,6 +331,7 @@
kA64Stp4ffXD, // stp [0s10110100] imm_7[21-15] rt2[14-10] rn[9-5] rt[4-0].
kA64Stp4rrXD, // stp [s010100100] imm_7[21-15] rt2[14-10] rn[9-5] rt[4-0].
kA64StpPost4rrXD, // stp [s010100010] imm_7[21-15] rt2[14-10] rn[9-5] rt[4-0].
+ kA64StpPre4ffXD, // stp [0s10110110] imm_7[21-15] rt2[14-10] rn[9-5] rt[4-0].
kA64StpPre4rrXD, // stp [s010100110] imm_7[21-15] rt2[14-10] rn[9-5] rt[4-0].
kA64Str3fXD, // str [1s11110100] imm_12[21-10] rn[9-5] rt[4-0].
kA64Str4fXxG, // str [1s111100001] rm[20-16] [011] S[12] [10] rn[9-5] rt[4-0].
diff --git a/compiler/dex/quick/arm64/assemble_arm64.cc b/compiler/dex/quick/arm64/assemble_arm64.cc
index 462be54..c46be53 100644
--- a/compiler/dex/quick/arm64/assemble_arm64.cc
+++ b/compiler/dex/quick/arm64/assemble_arm64.cc
@@ -214,7 +214,7 @@
"csneg", "!0r, !1r, !2r, !3c", kFixupNone),
ENCODING_MAP(kA64Dmb1B, NO_VARIANTS(0xd50330bf),
kFmtBitBlt, 11, 8, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_UNARY_OP,
+ kFmtUnused, -1, -1, IS_UNARY_OP | IS_VOLATILE,
"dmb", "#!0B", kFixupNone),
ENCODING_MAP(WIDE(kA64Eor3Rrl), SF_VARIANTS(0x52000000),
kFmtRegROrSp, 4, 0, kFmtRegR, 9, 5, kFmtBitBlt, 22, 10,
@@ -274,7 +274,7 @@
"fmin", "!0f, !1f, !2f", kFixupNone),
ENCODING_MAP(FWIDE(kA64Fmov2ff), FLOAT_VARIANTS(0x1e204000),
kFmtRegF, 4, 0, kFmtRegF, 9, 5, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1 | IS_MOVE,
"fmov", "!0f, !1f", kFixupNone),
ENCODING_MAP(FWIDE(kA64Fmov2fI), FLOAT_VARIANTS(0x1e201000),
kFmtRegF, 4, 0, kFmtBitBlt, 20, 13, kFmtUnused, -1, -1,
@@ -318,7 +318,7 @@
"fsub", "!0f, !1f, !2f", kFixupNone),
ENCODING_MAP(kA64Ldrb3wXd, NO_VARIANTS(0x39400000),
kFmtRegW, 4, 0, kFmtRegXOrSp, 9, 5, kFmtBitBlt, 21, 10,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF,
"ldrb", "!0w, [!1X, #!2d]", kFixupNone),
ENCODING_MAP(kA64Ldrb3wXx, NO_VARIANTS(0x38606800),
kFmtRegW, 4, 0, kFmtRegXOrSp, 9, 5, kFmtRegX, 20, 16,
@@ -326,7 +326,7 @@
"ldrb", "!0w, [!1X, !2x]", kFixupNone),
ENCODING_MAP(WIDE(kA64Ldrsb3rXd), CUSTOM_VARIANTS(0x39c00000, 0x39800000),
kFmtRegR, 4, 0, kFmtRegXOrSp, 9, 5, kFmtBitBlt, 21, 10,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF,
"ldrsb", "!0r, [!1X, #!2d]", kFixupNone),
ENCODING_MAP(WIDE(kA64Ldrsb3rXx), CUSTOM_VARIANTS(0x38e06800, 0x38a06800),
kFmtRegR, 4, 0, kFmtRegXOrSp, 9, 5, kFmtRegX, 20, 16,
@@ -334,19 +334,19 @@
"ldrsb", "!0r, [!1X, !2x]", kFixupNone),
ENCODING_MAP(kA64Ldrh3wXF, NO_VARIANTS(0x79400000),
kFmtRegW, 4, 0, kFmtRegXOrSp, 9, 5, kFmtBitBlt, 21, 10,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF,
"ldrh", "!0w, [!1X, #!2F]", kFixupNone),
ENCODING_MAP(kA64Ldrh4wXxd, NO_VARIANTS(0x78606800),
kFmtRegW, 4, 0, kFmtRegXOrSp, 9, 5, kFmtRegX, 20, 16,
- kFmtBitBlt, 12, 12, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD,
+ kFmtBitBlt, 12, 12, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD_OFF,
"ldrh", "!0w, [!1X, !2x, lsl #!3d]", kFixupNone),
ENCODING_MAP(WIDE(kA64Ldrsh3rXF), CUSTOM_VARIANTS(0x79c00000, 0x79800000),
kFmtRegR, 4, 0, kFmtRegXOrSp, 9, 5, kFmtBitBlt, 21, 10,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF,
"ldrsh", "!0r, [!1X, #!2F]", kFixupNone),
ENCODING_MAP(WIDE(kA64Ldrsh4rXxd), CUSTOM_VARIANTS(0x78e06800, 0x78906800),
kFmtRegR, 4, 0, kFmtRegXOrSp, 9, 5, kFmtRegX, 20, 16,
- kFmtBitBlt, 12, 12, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD,
+ kFmtBitBlt, 12, 12, IS_QUAD_OP | REG_DEF0_USE12 | IS_LOAD_OFF,
"ldrsh", "!0r, [!1X, !2x, lsl #!3d]", kFixupNone),
ENCODING_MAP(FWIDE(kA64Ldr2fp), SIZE_VARIANTS(0x1c000000),
kFmtRegF, 4, 0, kFmtBitBlt, 23, 5, kFmtUnused, -1, -1,
@@ -360,11 +360,11 @@
"ldr", "!0r, !1p", kFixupLoad),
ENCODING_MAP(FWIDE(kA64Ldr3fXD), SIZE_VARIANTS(0xbd400000),
kFmtRegF, 4, 0, kFmtRegXOrSp, 9, 5, kFmtBitBlt, 21, 10,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF,
"ldr", "!0f, [!1X, #!2D]", kFixupNone),
ENCODING_MAP(WIDE(kA64Ldr3rXD), SIZE_VARIANTS(0xb9400000),
kFmtRegR, 4, 0, kFmtRegXOrSp, 9, 5, kFmtBitBlt, 21, 10,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | IS_LOAD_OFF,
"ldr", "!0r, [!1X, #!2D]", kFixupNone),
ENCODING_MAP(FWIDE(kA64Ldr4fXxG), SIZE_VARIANTS(0xbc606800),
kFmtRegF, 4, 0, kFmtRegXOrSp, 9, 5, kFmtRegX, 20, 16,
@@ -380,11 +380,11 @@
"ldr", "!0r, [!1X], #!2d", kFixupNone),
ENCODING_MAP(WIDE(kA64Ldp4ffXD), CUSTOM_VARIANTS(0x2d400000, 0x6d400000),
kFmtRegF, 4, 0, kFmtRegF, 14, 10, kFmtRegXOrSp, 9, 5,
- kFmtBitBlt, 21, 15, IS_QUAD_OP | REG_USE2 | REG_DEF01 | IS_LOAD,
+ kFmtBitBlt, 21, 15, IS_QUAD_OP | REG_USE2 | REG_DEF01 | IS_LOAD_OFF,
"ldp", "!0f, !1f, [!2X, #!3D]", kFixupNone),
ENCODING_MAP(WIDE(kA64Ldp4rrXD), SF_VARIANTS(0x29400000),
kFmtRegR, 4, 0, kFmtRegR, 14, 10, kFmtRegXOrSp, 9, 5,
- kFmtBitBlt, 21, 15, IS_QUAD_OP | REG_USE2 | REG_DEF01 | IS_LOAD,
+ kFmtBitBlt, 21, 15, IS_QUAD_OP | REG_USE2 | REG_DEF01 | IS_LOAD_OFF,
"ldp", "!0r, !1r, [!2X, #!3D]", kFixupNone),
ENCODING_MAP(WIDE(kA64LdpPost4rrXD), CUSTOM_VARIANTS(0x28c00000, 0xa8c00000),
kFmtRegR, 4, 0, kFmtRegR, 14, 10, kFmtRegXOrSp, 9, 5,
@@ -400,11 +400,11 @@
"ldur", "!0r, [!1X, #!2d]", kFixupNone),
ENCODING_MAP(WIDE(kA64Ldxr2rX), SIZE_VARIANTS(0x885f7c00),
kFmtRegR, 4, 0, kFmtRegXOrSp, 9, 5, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1 | IS_LOAD,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1 | IS_LOADX,
"ldxr", "!0r, [!1X]", kFixupNone),
ENCODING_MAP(WIDE(kA64Ldaxr2rX), SIZE_VARIANTS(0x885ffc00),
kFmtRegR, 4, 0, kFmtRegXOrSp, 9, 5, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1 | IS_LOAD,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1 | IS_LOADX,
"ldaxr", "!0r, [!1X]", kFixupNone),
ENCODING_MAP(WIDE(kA64Lsl3rrr), SF_VARIANTS(0x1ac02000),
kFmtRegR, 4, 0, kFmtRegR, 9, 5, kFmtRegR, 20, 16,
@@ -432,7 +432,7 @@
"movz", "!0r, #!1d!2M", kFixupNone),
ENCODING_MAP(WIDE(kA64Mov2rr), SF_VARIANTS(0x2a0003e0),
kFmtRegR, 4, 0, kFmtRegR, 20, 16, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1 | IS_MOVE,
"mov", "!0r, !1r", kFixupNone),
ENCODING_MAP(WIDE(kA64Mvn2rr), SF_VARIANTS(0x2a2003e0),
kFmtRegR, 4, 0, kFmtRegR, 20, 16, kFmtUnused, -1, -1,
@@ -508,23 +508,27 @@
"smulh", "!0x, !1x, !2x", kFixupNone),
ENCODING_MAP(WIDE(kA64Stp4ffXD), CUSTOM_VARIANTS(0x2d000000, 0x6d000000),
kFmtRegF, 4, 0, kFmtRegF, 14, 10, kFmtRegXOrSp, 9, 5,
- kFmtBitBlt, 21, 15, IS_QUAD_OP | REG_USE012 | IS_STORE,
+ kFmtBitBlt, 21, 15, IS_QUAD_OP | REG_USE012 | IS_STORE_OFF,
"stp", "!0f, !1f, [!2X, #!3D]", kFixupNone),
ENCODING_MAP(WIDE(kA64Stp4rrXD), SF_VARIANTS(0x29000000),
kFmtRegR, 4, 0, kFmtRegR, 14, 10, kFmtRegXOrSp, 9, 5,
- kFmtBitBlt, 21, 15, IS_QUAD_OP | REG_USE012 | IS_STORE,
+ kFmtBitBlt, 21, 15, IS_QUAD_OP | REG_USE012 | IS_STORE_OFF,
"stp", "!0r, !1r, [!2X, #!3D]", kFixupNone),
ENCODING_MAP(WIDE(kA64StpPost4rrXD), CUSTOM_VARIANTS(0x28800000, 0xa8800000),
kFmtRegR, 4, 0, kFmtRegR, 14, 10, kFmtRegXOrSp, 9, 5,
kFmtBitBlt, 21, 15, IS_QUAD_OP | REG_DEF2 | REG_USE012 | IS_STORE,
"stp", "!0r, !1r, [!2X], #!3D", kFixupNone),
+ ENCODING_MAP(WIDE(kA64StpPre4ffXD), CUSTOM_VARIANTS(0x2d800000, 0x6d800000),
+ kFmtRegF, 4, 0, kFmtRegF, 14, 10, kFmtRegXOrSp, 9, 5,
+ kFmtBitBlt, 21, 15, IS_QUAD_OP | REG_DEF2 | REG_USE012 | IS_STORE,
+ "stp", "!0r, !1f, [!2X, #!3D]!!", kFixupNone),
ENCODING_MAP(WIDE(kA64StpPre4rrXD), CUSTOM_VARIANTS(0x29800000, 0xa9800000),
kFmtRegR, 4, 0, kFmtRegR, 14, 10, kFmtRegXOrSp, 9, 5,
kFmtBitBlt, 21, 15, IS_QUAD_OP | REG_DEF2 | REG_USE012 | IS_STORE,
"stp", "!0r, !1r, [!2X, #!3D]!!", kFixupNone),
ENCODING_MAP(FWIDE(kA64Str3fXD), CUSTOM_VARIANTS(0xbd000000, 0xfd000000),
kFmtRegF, 4, 0, kFmtRegXOrSp, 9, 5, kFmtBitBlt, 21, 10,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE_OFF,
"str", "!0f, [!1X, #!2D]", kFixupNone),
ENCODING_MAP(FWIDE(kA64Str4fXxG), CUSTOM_VARIANTS(0xbc206800, 0xfc206800),
kFmtRegF, 4, 0, kFmtRegXOrSp, 9, 5, kFmtRegX, 20, 16,
@@ -532,7 +536,7 @@
"str", "!0f, [!1X, !2x!3G]", kFixupNone),
ENCODING_MAP(WIDE(kA64Str3rXD), SIZE_VARIANTS(0xb9000000),
kFmtRegR, 4, 0, kFmtRegXOrSp, 9, 5, kFmtBitBlt, 21, 10,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE_OFF,
"str", "!0r, [!1X, #!2D]", kFixupNone),
ENCODING_MAP(WIDE(kA64Str4rXxG), SIZE_VARIANTS(0xb8206800),
kFmtRegR, 4, 0, kFmtRegXOrSp, 9, 5, kFmtRegX, 20, 16,
@@ -540,7 +544,7 @@
"str", "!0r, [!1X, !2x!3G]", kFixupNone),
ENCODING_MAP(kA64Strb3wXd, NO_VARIANTS(0x39000000),
kFmtRegW, 4, 0, kFmtRegXOrSp, 9, 5, kFmtBitBlt, 21, 10,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE_OFF,
"strb", "!0w, [!1X, #!2d]", kFixupNone),
ENCODING_MAP(kA64Strb3wXx, NO_VARIANTS(0x38206800),
kFmtRegW, 4, 0, kFmtRegXOrSp, 9, 5, kFmtRegX, 20, 16,
@@ -548,7 +552,7 @@
"strb", "!0w, [!1X, !2x]", kFixupNone),
ENCODING_MAP(kA64Strh3wXF, NO_VARIANTS(0x79000000),
kFmtRegW, 4, 0, kFmtRegXOrSp, 9, 5, kFmtBitBlt, 21, 10,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | IS_STORE_OFF,
"strh", "!0w, [!1X, #!2F]", kFixupNone),
ENCODING_MAP(kA64Strh4wXxd, NO_VARIANTS(0x78206800),
kFmtRegW, 4, 0, kFmtRegXOrSp, 9, 5, kFmtRegX, 20, 16,
@@ -568,11 +572,11 @@
"stur", "!0r, [!1X, #!2d]", kFixupNone),
ENCODING_MAP(WIDE(kA64Stxr3wrX), SIZE_VARIANTS(0x88007c00),
kFmtRegW, 20, 16, kFmtRegR, 4, 0, kFmtRegXOrSp, 9, 5,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12 | IS_STORE,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12 | IS_STOREX,
"stxr", "!0w, !1r, [!2X]", kFixupNone),
ENCODING_MAP(WIDE(kA64Stlxr3wrX), SIZE_VARIANTS(0x8800fc00),
kFmtRegW, 20, 16, kFmtRegR, 4, 0, kFmtRegXOrSp, 9, 5,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12 | IS_STORE,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12 | IS_STOREX,
"stlxr", "!0w, !1r, [!2X]", kFixupNone),
ENCODING_MAP(WIDE(kA64Sub4RRdT), SF_VARIANTS(0x51000000),
kFmtRegROrSp, 4, 0, kFmtRegROrSp, 9, 5, kFmtBitBlt, 21, 10,
@@ -723,6 +727,7 @@
<< " @ 0x" << std::hex << lir->dalvik_offset;
if (kFailOnSizeError) {
LOG(FATAL) << "Bad argument n. " << i << " of " << encoder->name
+ << "(" << UNWIDE(encoder->opcode) << ", " << encoder->fmt << ")"
<< ". Expected " << expected << ", got 0x" << std::hex << operand;
} else {
LOG(WARNING) << "Bad argument n. " << i << " of " << encoder->name
diff --git a/compiler/dex/quick/arm64/call_arm64.cc b/compiler/dex/quick/arm64/call_arm64.cc
index e584548..6fa8a4a 100644
--- a/compiler/dex/quick/arm64/call_arm64.cc
+++ b/compiler/dex/quick/arm64/call_arm64.cc
@@ -330,19 +330,14 @@
NewLIR0(kPseudoMethodEntry);
- const size_t kStackOverflowReservedUsableBytes = GetStackOverflowReservedBytes(kArm64) -
- Thread::kStackOverflowSignalReservedBytes;
- const bool large_frame = static_cast<size_t>(frame_size_) > kStackOverflowReservedUsableBytes;
const int spill_count = num_core_spills_ + num_fp_spills_;
const int spill_size = (spill_count * kArm64PointerSize + 15) & ~0xf; // SP 16 byte alignment.
const int frame_size_without_spills = frame_size_ - spill_size;
if (!skip_overflow_check) {
if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitStackOverflowChecks()) {
- if (!large_frame) {
- // Load stack limit
- LoadWordDisp(rs_xSELF, Thread::StackEndOffset<8>().Int32Value(), rs_xIP1);
- }
+ // Load stack limit
+ LoadWordDisp(rs_xSELF, Thread::StackEndOffset<8>().Int32Value(), rs_xIP1);
} else {
// TODO(Arm64) Implement implicit checks.
// Implicit stack overflow check.
@@ -350,24 +345,21 @@
// redzone we will get a segmentation fault.
// Load32Disp(rs_wSP, -Thread::kStackOverflowReservedBytes, rs_wzr);
// MarkPossibleStackOverflowException();
+ //
+ // TODO: If the frame size is small enough, is it possible to make this a pre-indexed load,
+ // so that we can avoid the following "sub sp" when spilling?
LOG(FATAL) << "Implicit stack overflow checks not implemented.";
}
}
- if (frame_size_ > 0) {
- OpRegImm64(kOpSub, rs_sp, spill_size);
+ int spilled_already = 0;
+ if (spill_size > 0) {
+ spilled_already = SpillRegs(rs_sp, core_spill_mask_, fp_spill_mask_, frame_size_);
+ DCHECK(spill_size == spilled_already || frame_size_ == spilled_already);
}
- /* Need to spill any FP regs? */
- if (fp_spill_mask_) {
- int spill_offset = spill_size - kArm64PointerSize*(num_fp_spills_ + num_core_spills_);
- SpillFPRegs(rs_sp, spill_offset, fp_spill_mask_);
- }
-
- /* Spill core callee saves. */
- if (core_spill_mask_) {
- int spill_offset = spill_size - kArm64PointerSize*num_core_spills_;
- SpillCoreRegs(rs_sp, spill_offset, core_spill_mask_);
+ if (spilled_already != frame_size_) {
+ OpRegImm(kOpSub, rs_sp, frame_size_without_spills);
}
if (!skip_overflow_check) {
@@ -396,29 +388,9 @@
const size_t sp_displace_;
};
- if (large_frame) {
- // Compare Expected SP against bottom of stack.
- // Branch to throw target if there is not enough room.
- OpRegRegImm(kOpSub, rs_xIP1, rs_sp, frame_size_without_spills);
- LoadWordDisp(rs_xSELF, Thread::StackEndOffset<8>().Int32Value(), rs_xIP0);
- LIR* branch = OpCmpBranch(kCondUlt, rs_xIP1, rs_xIP0, nullptr);
- AddSlowPath(new(arena_)StackOverflowSlowPath(this, branch, spill_size));
- OpRegCopy(rs_sp, rs_xIP1); // Establish stack after checks.
- } else {
- /*
- * If the frame is small enough we are guaranteed to have enough space that remains to
- * handle signals on the user stack.
- * Establishes stack before checks.
- */
- OpRegRegImm(kOpSub, rs_sp, rs_sp, frame_size_without_spills);
- LIR* branch = OpCmpBranch(kCondUlt, rs_sp, rs_xIP1, nullptr);
- AddSlowPath(new(arena_)StackOverflowSlowPath(this, branch, frame_size_));
- }
- } else {
- OpRegImm(kOpSub, rs_sp, frame_size_without_spills);
+ LIR* branch = OpCmpBranch(kCondUlt, rs_sp, rs_xIP1, nullptr);
+ AddSlowPath(new(arena_)StackOverflowSlowPath(this, branch, frame_size_));
}
- } else {
- OpRegImm(kOpSub, rs_sp, frame_size_without_spills);
}
FlushIns(ArgLocs, rl_method);
@@ -445,57 +417,7 @@
NewLIR0(kPseudoMethodExit);
- // Restore saves and drop stack frame.
- // 2 versions:
- //
- // 1. (Original): Try to address directly, then drop the whole frame.
- // Limitation: ldp is a 7b signed immediate. There should have been a DCHECK!
- //
- // 2. (New): Drop the non-save-part. Then do similar to original, which is now guaranteed to be
- // in range. Then drop the rest.
- //
- // TODO: In methods with few spills but huge frame, it would be better to do non-immediate loads
- // in variant 1.
-
- if (frame_size_ <= 504) {
- // "Magic" constant, 63 (max signed 7b) * 8. Do variant 1.
- // Could be tighter, as the last load is below frame_size_ offset.
- if (fp_spill_mask_) {
- int spill_offset = frame_size_ - kArm64PointerSize * (num_fp_spills_ + num_core_spills_);
- UnSpillFPRegs(rs_sp, spill_offset, fp_spill_mask_);
- }
- if (core_spill_mask_) {
- int spill_offset = frame_size_ - kArm64PointerSize * num_core_spills_;
- UnSpillCoreRegs(rs_sp, spill_offset, core_spill_mask_);
- }
-
- OpRegImm64(kOpAdd, rs_sp, frame_size_);
- } else {
- // Second variant. Drop the frame part.
- int drop = 0;
- // TODO: Always use the first formula, as num_fp_spills would be zero?
- if (fp_spill_mask_) {
- drop = frame_size_ - kArm64PointerSize * (num_fp_spills_ + num_core_spills_);
- } else {
- drop = frame_size_ - kArm64PointerSize * num_core_spills_;
- }
-
- // Drop needs to be 16B aligned, so that SP keeps aligned.
- drop = RoundDown(drop, 16);
-
- OpRegImm64(kOpAdd, rs_sp, drop);
-
- if (fp_spill_mask_) {
- int offset = frame_size_ - drop - kArm64PointerSize * (num_fp_spills_ + num_core_spills_);
- UnSpillFPRegs(rs_sp, offset, fp_spill_mask_);
- }
- if (core_spill_mask_) {
- int offset = frame_size_ - drop - kArm64PointerSize * num_core_spills_;
- UnSpillCoreRegs(rs_sp, offset, core_spill_mask_);
- }
-
- OpRegImm64(kOpAdd, rs_sp, frame_size_ - drop);
- }
+ UnspillRegs(rs_sp, core_spill_mask_, fp_spill_mask_, frame_size_);
// Finally return.
NewLIR0(kA64Ret);
diff --git a/compiler/dex/quick/arm64/codegen_arm64.h b/compiler/dex/quick/arm64/codegen_arm64.h
index 7a1d856..18f2a29 100644
--- a/compiler/dex/quick/arm64/codegen_arm64.h
+++ b/compiler/dex/quick/arm64/codegen_arm64.h
@@ -219,11 +219,12 @@
void GenPackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src);
void GenSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src);
- uint32_t GenPairWise(uint32_t reg_mask, int* reg1, int* reg2);
- void UnSpillCoreRegs(RegStorage base, int offset, uint32_t reg_mask);
- void SpillCoreRegs(RegStorage base, int offset, uint32_t reg_mask);
- void UnSpillFPRegs(RegStorage base, int offset, uint32_t reg_mask);
- void SpillFPRegs(RegStorage base, int offset, uint32_t reg_mask);
+ // Spill core and FP registers. Returns the SP difference: either spill size, or whole
+ // frame size.
+ int SpillRegs(RegStorage base, uint32_t core_reg_mask, uint32_t fp_reg_mask, int frame_size);
+
+ // Unspill core and FP registers.
+ void UnspillRegs(RegStorage base, uint32_t core_reg_mask, uint32_t fp_reg_mask, int frame_size);
// Required for target - single operation generators.
LIR* OpUnconditionalBranch(LIR* target);
@@ -298,6 +299,7 @@
bool WideFPRsAreAliases() OVERRIDE {
return true; // 64b architecture.
}
+ size_t GetInstructionOffset(LIR* lir);
LIR* InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) OVERRIDE;
@@ -382,6 +384,7 @@
RegLocation GenDivRem(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
bool is_div, bool check_zero);
RegLocation GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit, bool is_div);
+ size_t GetLoadStoreSize(LIR* lir);
};
} // namespace art
diff --git a/compiler/dex/quick/arm64/int_arm64.cc b/compiler/dex/quick/arm64/int_arm64.cc
index f9f85f4..9403d5e 100644
--- a/compiler/dex/quick/arm64/int_arm64.cc
+++ b/compiler/dex/quick/arm64/int_arm64.cc
@@ -22,6 +22,7 @@
#include "dex/reg_storage_eq.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "mirror/array.h"
+#include "utils.h"
namespace art {
@@ -788,6 +789,7 @@
}
LIR* Arm64Mir2Lir::OpPcRelLoad(RegStorage reg, LIR* target) {
+ ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
return RawLIR(current_dalvik_offset_, WIDE(kA64Ldr2rp), reg.GetReg(), 0, 0, 0, 0, target);
}
@@ -1237,6 +1239,14 @@
StoreValueWide(rl_dest, rl_result);
}
+static uint32_t ExtractReg(uint32_t reg_mask, int* reg) {
+ // Find first register.
+ int first_bit_set = CTZ(reg_mask) + 1;
+ *reg = *reg + first_bit_set;
+ reg_mask >>= first_bit_set;
+ return reg_mask;
+}
+
/**
* @brief Split a register list in pairs or registers.
*
@@ -1253,15 +1263,15 @@
* }
* @endcode
*/
-uint32_t Arm64Mir2Lir::GenPairWise(uint32_t reg_mask, int* reg1, int* reg2) {
+static uint32_t GenPairWise(uint32_t reg_mask, int* reg1, int* reg2) {
// Find first register.
- int first_bit_set = __builtin_ctz(reg_mask) + 1;
+ int first_bit_set = CTZ(reg_mask) + 1;
int reg = *reg1 + first_bit_set;
reg_mask >>= first_bit_set;
if (LIKELY(reg_mask)) {
// Save the first register, find the second and use the pair opcode.
- int second_bit_set = __builtin_ctz(reg_mask) + 1;
+ int second_bit_set = CTZ(reg_mask) + 1;
*reg2 = reg;
reg_mask >>= second_bit_set;
*reg1 = reg + second_bit_set;
@@ -1274,68 +1284,274 @@
return reg_mask;
}
-void Arm64Mir2Lir::UnSpillCoreRegs(RegStorage base, int offset, uint32_t reg_mask) {
- int reg1 = -1, reg2 = -1;
- const int reg_log2_size = 3;
-
- for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) {
- reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
- if (UNLIKELY(reg2 < 0)) {
- NewLIR3(WIDE(kA64Ldr3rXD), RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
- } else {
- DCHECK_LE(offset, 63);
- NewLIR4(WIDE(kA64Ldp4rrXD), RegStorage::Solo64(reg2).GetReg(),
- RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
- }
- }
-}
-
-void Arm64Mir2Lir::SpillCoreRegs(RegStorage base, int offset, uint32_t reg_mask) {
+static void SpillCoreRegs(Arm64Mir2Lir* m2l, RegStorage base, int offset, uint32_t reg_mask) {
int reg1 = -1, reg2 = -1;
const int reg_log2_size = 3;
for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) {
reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
if (UNLIKELY(reg2 < 0)) {
- NewLIR3(WIDE(kA64Str3rXD), RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
+ m2l->NewLIR3(WIDE(kA64Str3rXD), RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
} else {
- NewLIR4(WIDE(kA64Stp4rrXD), RegStorage::Solo64(reg2).GetReg(),
- RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
- }
- }
-}
-
-void Arm64Mir2Lir::UnSpillFPRegs(RegStorage base, int offset, uint32_t reg_mask) {
- int reg1 = -1, reg2 = -1;
- const int reg_log2_size = 3;
-
- for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) {
- reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
- if (UNLIKELY(reg2 < 0)) {
- NewLIR3(FWIDE(kA64Ldr3fXD), RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), offset);
- } else {
- NewLIR4(WIDE(kA64Ldp4ffXD), RegStorage::FloatSolo64(reg2).GetReg(),
- RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), offset);
+ m2l->NewLIR4(WIDE(kA64Stp4rrXD), RegStorage::Solo64(reg2).GetReg(),
+ RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
}
}
}
// TODO(Arm64): consider using ld1 and st1?
-void Arm64Mir2Lir::SpillFPRegs(RegStorage base, int offset, uint32_t reg_mask) {
+static void SpillFPRegs(Arm64Mir2Lir* m2l, RegStorage base, int offset, uint32_t reg_mask) {
int reg1 = -1, reg2 = -1;
const int reg_log2_size = 3;
for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) {
reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
if (UNLIKELY(reg2 < 0)) {
- NewLIR3(FWIDE(kA64Str3fXD), RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), offset);
+ m2l->NewLIR3(FWIDE(kA64Str3fXD), RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(),
+ offset);
} else {
- NewLIR4(WIDE(kA64Stp4ffXD), RegStorage::FloatSolo64(reg2).GetReg(),
- RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), offset);
+ m2l->NewLIR4(WIDE(kA64Stp4ffXD), RegStorage::FloatSolo64(reg2).GetReg(),
+ RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), offset);
}
}
}
+static int SpillRegsPreSub(Arm64Mir2Lir* m2l, RegStorage base, uint32_t core_reg_mask,
+ uint32_t fp_reg_mask, int frame_size) {
+ m2l->OpRegRegImm(kOpSub, rs_sp, rs_sp, frame_size);
+
+ int core_count = POPCOUNT(core_reg_mask);
+
+ if (fp_reg_mask != 0) {
+ // Spill FP regs.
+ int fp_count = POPCOUNT(fp_reg_mask);
+ int spill_offset = frame_size - (core_count + fp_count) * kArm64PointerSize;
+ SpillFPRegs(m2l, rs_sp, spill_offset, fp_reg_mask);
+ }
+
+ if (core_reg_mask != 0) {
+ // Spill core regs.
+ int spill_offset = frame_size - (core_count * kArm64PointerSize);
+ SpillCoreRegs(m2l, rs_sp, spill_offset, core_reg_mask);
+ }
+
+ return frame_size;
+}
+
+static int SpillRegsPreIndexed(Arm64Mir2Lir* m2l, RegStorage base, uint32_t core_reg_mask,
+ uint32_t fp_reg_mask, int frame_size) {
+ // Otherwise, spill both core and fp regs at the same time.
+ // The very first instruction will be an stp with pre-indexed address, moving the stack pointer
+ // down. From then on, we fill upwards. This will generate overall the same number of instructions
+ // as the specialized code above in most cases (exception being odd number of core and even
+ // non-zero fp spills), but is more flexible, as the offsets are guaranteed small.
+ //
+ // Some demonstrative fill cases : (c) = core, (f) = fp
+ // cc 44 cc 44 cc 22 cc 33 fc => 1[1/2]
+ // fc => 23 fc => 23 ff => 11 ff => 22
+ // ff 11 f 11 f 11
+ //
+ int reg1 = -1, reg2 = -1;
+ int core_count = POPCOUNT(core_reg_mask);
+ int fp_count = POPCOUNT(fp_reg_mask);
+
+ int combined = fp_count + core_count;
+ int all_offset = RoundUp(combined, 2); // Needs to be 16B = 2-reg aligned.
+
+ int cur_offset = 2; // What's the starting offset after the first stp? We expect the base slot
+ // to be filled.
+
+ // First figure out whether the bottom is FP or core.
+ if (fp_count > 0) {
+ // Some FP spills.
+ //
+ // Four cases: (d0 is dummy to fill up stp)
+ // 1) Single FP, even number of core -> stp d0, fp_reg
+ // 2) Single FP, odd number of core -> stp fp_reg, d0
+ // 3) More FP, even number combined -> stp fp_reg1, fp_reg2
+ // 4) More FP, odd number combined -> stp d0, fp_reg
+ if (fp_count == 1) {
+ fp_reg_mask = ExtractReg(fp_reg_mask, ®1);
+ DCHECK_EQ(fp_reg_mask, 0U);
+ if (core_count % 2 == 0) {
+ m2l->NewLIR4(WIDE(kA64StpPre4ffXD),
+ RegStorage::FloatSolo64(reg1).GetReg(),
+ RegStorage::FloatSolo64(reg1).GetReg(),
+ base.GetReg(), -all_offset);
+ } else {
+ m2l->NewLIR4(WIDE(kA64StpPre4ffXD),
+ RegStorage::FloatSolo64(reg1).GetReg(),
+ RegStorage::FloatSolo64(reg1).GetReg(),
+ base.GetReg(), -all_offset);
+ cur_offset = 0; // That core reg needs to go into the upper half.
+ }
+ } else {
+ if (combined % 2 == 0) {
+ fp_reg_mask = GenPairWise(fp_reg_mask, ®1, ®2);
+ m2l->NewLIR4(WIDE(kA64StpPre4ffXD), RegStorage::FloatSolo64(reg2).GetReg(),
+ RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), -all_offset);
+ } else {
+ fp_reg_mask = ExtractReg(fp_reg_mask, ®1);
+ m2l->NewLIR4(WIDE(kA64StpPre4ffXD), rs_d0.GetReg(), RegStorage::FloatSolo64(reg1).GetReg(),
+ base.GetReg(), -all_offset);
+ }
+ }
+ } else {
+ // No FP spills.
+ //
+ // Two cases:
+ // 1) Even number of core -> stp core1, core2
+ // 2) Odd number of core -> stp xzr, core1
+ if (core_count % 2 == 1) {
+ core_reg_mask = ExtractReg(core_reg_mask, ®1);
+ m2l->NewLIR4(WIDE(kA64StpPre4rrXD), rs_xzr.GetReg(),
+ RegStorage::Solo64(reg1).GetReg(), base.GetReg(), -all_offset);
+ } else {
+ core_reg_mask = GenPairWise(core_reg_mask, ®1, ®2);
+ m2l->NewLIR4(WIDE(kA64StpPre4rrXD), RegStorage::Solo64(reg2).GetReg(),
+ RegStorage::Solo64(reg1).GetReg(), base.GetReg(), -all_offset);
+ }
+ }
+
+ if (fp_count != 0) {
+ for (; fp_reg_mask != 0;) {
+ // Have some FP regs to do.
+ fp_reg_mask = GenPairWise(fp_reg_mask, ®1, ®2);
+ if (UNLIKELY(reg2 < 0)) {
+ m2l->NewLIR3(FWIDE(kA64Str3fXD), RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(),
+ cur_offset);
+ // Do not increment offset here, as the second half will be filled by a core reg.
+ } else {
+ m2l->NewLIR4(WIDE(kA64Stp4ffXD), RegStorage::FloatSolo64(reg2).GetReg(),
+ RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), cur_offset);
+ cur_offset += 2;
+ }
+ }
+
+ // Reset counting.
+ reg1 = -1;
+
+ // If there is an odd number of core registers, we need to store the bottom now.
+ if (core_count % 2 == 1) {
+ core_reg_mask = ExtractReg(core_reg_mask, ®1);
+ m2l->NewLIR3(WIDE(kA64Str3rXD), RegStorage::Solo64(reg1).GetReg(), base.GetReg(),
+ cur_offset + 1);
+ cur_offset += 2; // Half-slot filled now.
+ }
+ }
+
+ // Spill the rest of the core regs. They are guaranteed to be even.
+ DCHECK_EQ(POPCOUNT(core_reg_mask) % 2, 0);
+ for (; core_reg_mask != 0; cur_offset += 2) {
+ core_reg_mask = GenPairWise(core_reg_mask, ®1, ®2);
+ m2l->NewLIR4(WIDE(kA64Stp4rrXD), RegStorage::Solo64(reg2).GetReg(),
+ RegStorage::Solo64(reg1).GetReg(), base.GetReg(), cur_offset);
+ }
+
+ DCHECK_EQ(cur_offset, all_offset);
+
+ return all_offset * 8;
+}
+
+int Arm64Mir2Lir::SpillRegs(RegStorage base, uint32_t core_reg_mask, uint32_t fp_reg_mask,
+ int frame_size) {
+ // If the frame size is small enough that all offsets would fit into the immediates, use that
+ // setup, as it decrements sp early (kind of instruction scheduling), and is not worse
+ // instruction-count wise than the complicated code below.
+ //
+ // This case is also optimal when we have an odd number of core spills, and an even (non-zero)
+ // number of fp spills.
+ if ((RoundUp(frame_size, 8) / 8 <= 63)) {
+ return SpillRegsPreSub(this, base, core_reg_mask, fp_reg_mask, frame_size);
+ } else {
+ return SpillRegsPreIndexed(this, base, core_reg_mask, fp_reg_mask, frame_size);
+ }
+}
+
+static void UnSpillCoreRegs(Arm64Mir2Lir* m2l, RegStorage base, int offset, uint32_t reg_mask) {
+ int reg1 = -1, reg2 = -1;
+ const int reg_log2_size = 3;
+
+ for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) {
+ reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
+ if (UNLIKELY(reg2 < 0)) {
+ m2l->NewLIR3(WIDE(kA64Ldr3rXD), RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
+ } else {
+ DCHECK_LE(offset, 63);
+ m2l->NewLIR4(WIDE(kA64Ldp4rrXD), RegStorage::Solo64(reg2).GetReg(),
+ RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
+ }
+ }
+}
+
+static void UnSpillFPRegs(Arm64Mir2Lir* m2l, RegStorage base, int offset, uint32_t reg_mask) {
+ int reg1 = -1, reg2 = -1;
+ const int reg_log2_size = 3;
+
+ for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) {
+ reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
+ if (UNLIKELY(reg2 < 0)) {
+ m2l->NewLIR3(FWIDE(kA64Ldr3fXD), RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(),
+ offset);
+ } else {
+ m2l->NewLIR4(WIDE(kA64Ldp4ffXD), RegStorage::FloatSolo64(reg2).GetReg(),
+ RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), offset);
+ }
+ }
+}
+
+void Arm64Mir2Lir::UnspillRegs(RegStorage base, uint32_t core_reg_mask, uint32_t fp_reg_mask,
+ int frame_size) {
+ // Restore saves and drop stack frame.
+ // 2 versions:
+ //
+ // 1. (Original): Try to address directly, then drop the whole frame.
+ // Limitation: ldp is a 7b signed immediate.
+ //
+ // 2. (New): Drop the non-save-part. Then do similar to original, which is now guaranteed to be
+ // in range. Then drop the rest.
+ //
+ // TODO: In methods with few spills but huge frame, it would be better to do non-immediate loads
+ // in variant 1.
+
+ // "Magic" constant, 63 (max signed 7b) * 8.
+ static constexpr int kMaxFramesizeForOffset = 63 * kArm64PointerSize;
+
+ const int num_core_spills = POPCOUNT(core_reg_mask);
+ const int num_fp_spills = POPCOUNT(fp_reg_mask);
+
+ int early_drop = 0;
+
+ if (frame_size > kMaxFramesizeForOffset) {
+ // Second variant. Drop the frame part.
+
+ // TODO: Always use the first formula, as num_fp_spills would be zero?
+ if (fp_reg_mask != 0) {
+ early_drop = frame_size - kArm64PointerSize * (num_fp_spills + num_core_spills);
+ } else {
+ early_drop = frame_size - kArm64PointerSize * num_core_spills;
+ }
+
+ // Drop needs to be 16B aligned, so that SP keeps aligned.
+ early_drop = RoundDown(early_drop, 16);
+
+ OpRegImm64(kOpAdd, rs_sp, early_drop);
+ }
+
+ // Unspill.
+ if (fp_reg_mask != 0) {
+ int offset = frame_size - early_drop - kArm64PointerSize * (num_fp_spills + num_core_spills);
+ UnSpillFPRegs(this, rs_sp, offset, fp_reg_mask);
+ }
+ if (core_reg_mask != 0) {
+ int offset = frame_size - early_drop - kArm64PointerSize * num_core_spills;
+ UnSpillCoreRegs(this, rs_sp, offset, core_reg_mask);
+ }
+
+ // Drop the (rest of) the frame.
+ OpRegImm64(kOpAdd, rs_sp, frame_size - early_drop);
+}
+
bool Arm64Mir2Lir::GenInlinedReverseBits(CallInfo* info, OpSize size) {
ArmOpcode wide = (size == k64) ? WIDE(0) : UNWIDE(0);
RegLocation rl_src_i = info->args[0];
diff --git a/compiler/dex/quick/arm64/utility_arm64.cc b/compiler/dex/quick/arm64/utility_arm64.cc
index f7e80c1..5131bd8 100644
--- a/compiler/dex/quick/arm64/utility_arm64.cc
+++ b/compiler/dex/quick/arm64/utility_arm64.cc
@@ -87,6 +87,26 @@
return (bit7 | bit6 | bit5_to_0);
}
+size_t Arm64Mir2Lir::GetLoadStoreSize(LIR* lir) {
+ bool opcode_is_wide = IS_WIDE(lir->opcode);
+ ArmOpcode opcode = UNWIDE(lir->opcode);
+ DCHECK(!IsPseudoLirOp(opcode));
+ const ArmEncodingMap *encoder = &EncodingMap[opcode];
+ uint32_t bits = opcode_is_wide ? encoder->xskeleton : encoder->wskeleton;
+ return (bits >> 30);
+}
+
+size_t Arm64Mir2Lir::GetInstructionOffset(LIR* lir) {
+ size_t offset = lir->operands[2];
+ uint64_t check_flags = GetTargetInstFlags(lir->opcode);
+ DCHECK((check_flags & IS_LOAD) || (check_flags & IS_STORE));
+ if (check_flags & SCALED_OFFSET_X0) {
+ DCHECK(check_flags & IS_TERTIARY_OP);
+ offset = offset * (1 << GetLoadStoreSize(lir));
+ }
+ return offset;
+}
+
LIR* Arm64Mir2Lir::LoadFPConstantValue(RegStorage r_dest, int32_t value) {
DCHECK(r_dest.IsSingle());
if (value == 0) {
diff --git a/compiler/dex/quick/local_optimizations.cc b/compiler/dex/quick/local_optimizations.cc
index 2893157..eec2b32 100644
--- a/compiler/dex/quick/local_optimizations.cc
+++ b/compiler/dex/quick/local_optimizations.cc
@@ -15,15 +15,43 @@
*/
#include "dex/compiler_internals.h"
+#include "dex/quick/mir_to_lir-inl.h"
namespace art {
#define DEBUG_OPT(X)
+#define LOAD_STORE_CHECK_REG_DEP(mask, check) (mask.Intersects(*check->u.m.def_mask))
+
/* Check RAW, WAR, and RAW dependency on the register operands */
#define CHECK_REG_DEP(use, def, check) (def.Intersects(*check->u.m.use_mask)) || \
(use.Union(def).Intersects(*check->u.m.def_mask))
+/* Load Store Elimination filter:
+ * - Wide Load/Store
+ * - Exclusive Load/Store
+ * - Quad operand Load/Store
+ * - List Load/Store
+ * - IT blocks
+ * - Branch
+ * - Dmb
+ */
+#define LOAD_STORE_FILTER(flags) ((flags & (IS_QUAD_OP|IS_STORE)) == (IS_QUAD_OP|IS_STORE) || \
+ (flags & (IS_QUAD_OP|IS_LOAD)) == (IS_QUAD_OP|IS_LOAD) || \
+ (flags & REG_USE012) == REG_USE012 || \
+ (flags & REG_DEF01) == REG_DEF01 || \
+ (flags & REG_DEF_LIST0) || \
+ (flags & REG_DEF_LIST1) || \
+ (flags & REG_USE_LIST0) || \
+ (flags & REG_USE_LIST1) || \
+ (flags & REG_DEF_FPCS_LIST0) || \
+ (flags & REG_DEF_FPCS_LIST2) || \
+ (flags & REG_USE_FPCS_LIST0) || \
+ (flags & REG_USE_FPCS_LIST2) || \
+ (flags & IS_VOLATILE) || \
+ (flags & IS_BRANCH) || \
+ (flags & IS_IT))
+
/* Scheduler heuristics */
#define MAX_HOIST_DISTANCE 20
#define LDLD_DISTANCE 4
@@ -43,6 +71,7 @@
/* Insert a move to replace the load */
LIR* move_lir;
move_lir = OpRegCopyNoInsert(dest, src);
+ move_lir->dalvik_offset = orig_lir->dalvik_offset;
/*
* Insert the converted instruction after the original since the
* optimization is scannng in the top-down order and the new instruction
@@ -52,8 +81,53 @@
InsertLIRAfter(orig_lir, move_lir);
}
+void Mir2Lir::DumpDependentInsnPair(LIR* check_lir, LIR* this_lir, const char* type) {
+ LOG(INFO) << type;
+ LOG(INFO) << "Check LIR:";
+ DumpLIRInsn(check_lir, 0);
+ LOG(INFO) << "This LIR:";
+ DumpLIRInsn(this_lir, 0);
+}
+
+inline void Mir2Lir::EliminateLoad(LIR* lir, int reg_id) {
+ DCHECK(RegStorage::SameRegType(lir->operands[0], reg_id));
+ RegStorage dest_reg, src_reg;
+
+ /* Same Register - Nop */
+ if (lir->operands[0] == reg_id) {
+ NopLIR(lir);
+ return;
+ }
+
+ /* different Regsister - Move + Nop */
+ switch (reg_id & RegStorage::kShapeTypeMask) {
+ case RegStorage::k32BitSolo | RegStorage::kCoreRegister:
+ dest_reg = RegStorage::Solo32(lir->operands[0]);
+ src_reg = RegStorage::Solo32(reg_id);
+ break;
+ case RegStorage::k64BitSolo | RegStorage::kCoreRegister:
+ dest_reg = RegStorage::Solo64(lir->operands[0]);
+ src_reg = RegStorage::Solo64(reg_id);
+ break;
+ case RegStorage::k32BitSolo | RegStorage::kFloatingPoint:
+ dest_reg = RegStorage::FloatSolo32(lir->operands[0]);
+ src_reg = RegStorage::FloatSolo32(reg_id);
+ break;
+ case RegStorage::k64BitSolo | RegStorage::kFloatingPoint:
+ dest_reg = RegStorage::FloatSolo64(lir->operands[0]);
+ src_reg = RegStorage::FloatSolo64(reg_id);
+ break;
+ default:
+ LOG(INFO) << "Load Store: Unsuported register type!";
+ return;
+ }
+ ConvertMemOpIntoMove(lir, dest_reg, src_reg);
+ NopLIR(lir);
+ return;
+}
+
/*
- * Perform a pass of top-down walk, from the second-last instruction in the
+ * Perform a pass of top-down walk, from the first to the last instruction in the
* superblock, to eliminate redundant loads and stores.
*
* An earlier load can eliminate a later load iff
@@ -66,213 +140,172 @@
* 2) The native register is not clobbered in between
* 3) The memory location is not written to in between
*
- * A later store can be eliminated by an earlier store iff
+ * An earlier store can eliminate a later store iff
* 1) They are must-aliases
* 2) The memory location is not written to in between
*/
void Mir2Lir::ApplyLoadStoreElimination(LIR* head_lir, LIR* tail_lir) {
- LIR* this_lir;
+ LIR* this_lir, *check_lir;
+ std::vector<int> alias_list;
if (head_lir == tail_lir) {
return;
}
- for (this_lir = PREV_LIR(tail_lir); this_lir != head_lir; this_lir = PREV_LIR(this_lir)) {
- if (IsPseudoLirOp(this_lir->opcode)) {
+ for (this_lir = head_lir; this_lir != tail_lir; this_lir = NEXT_LIR(this_lir)) {
+ if (this_lir->flags.is_nop || IsPseudoLirOp(this_lir->opcode)) {
continue;
}
- int sink_distance = 0;
-
uint64_t target_flags = GetTargetInstFlags(this_lir->opcode);
+ /* Target LIR - skip if instr is:
+ * - NOP
+ * - Branch
+ * - Load and store
+ * - Wide load
+ * - Wide store
+ * - Exclusive load/store
+ */
+ if (LOAD_STORE_FILTER(target_flags) ||
+ ((target_flags & (IS_LOAD | IS_STORE)) == (IS_LOAD | IS_STORE)) ||
+ !(target_flags & (IS_LOAD | IS_STORE))) {
+ continue;
+ }
+ int native_reg_id = this_lir->operands[0];
+ int dest_reg_id = this_lir->operands[1];
+ bool is_this_lir_load = target_flags & IS_LOAD;
+ ResourceMask this_mem_mask = kEncodeMem.Intersection(this_lir->u.m.use_mask->Union(
+ *this_lir->u.m.def_mask));
- /* Skip non-interesting instructions */
- if ((this_lir->flags.is_nop == true) ||
- (target_flags & IS_BRANCH) ||
- ((target_flags & (REG_DEF0 | REG_DEF1)) == (REG_DEF0 | REG_DEF1)) || // Skip wide loads.
- ((target_flags & (REG_USE0 | REG_USE1 | REG_USE2)) ==
- (REG_USE0 | REG_USE1 | REG_USE2)) || // Skip wide stores.
- // Skip instructions that are neither loads or stores.
- !(target_flags & (IS_LOAD | IS_STORE)) ||
- // Skip instructions that do both load and store.
- ((target_flags & (IS_STORE | IS_LOAD)) == (IS_STORE | IS_LOAD))) {
+ /* Memory region */
+ if (!this_mem_mask.Intersects(kEncodeLiteral.Union(kEncodeDalvikReg)) &&
+ (!this_mem_mask.Intersects(kEncodeLiteral.Union(kEncodeHeapRef)))) {
continue;
}
- int native_reg_id;
- if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) {
- // If x86, location differs depending on whether memory/reg operation.
- native_reg_id = (target_flags & IS_STORE) ? this_lir->operands[2] : this_lir->operands[0];
- } else {
- native_reg_id = this_lir->operands[0];
- }
- bool is_this_lir_load = target_flags & IS_LOAD;
- LIR* check_lir;
- /* Use the mem mask to determine the rough memory location */
- ResourceMask this_mem_mask = kEncodeMem.Intersection(
- this_lir->u.m.use_mask->Union(*this_lir->u.m.def_mask));
-
- /*
- * Currently only eliminate redundant ld/st for constant and Dalvik
- * register accesses.
- */
- if (!this_mem_mask.Intersects(kEncodeLiteral.Union(kEncodeDalvikReg))) {
+ /* Does not redefine the address */
+ if (this_lir->u.m.def_mask->Intersects(*this_lir->u.m.use_mask)) {
continue;
}
ResourceMask stop_def_reg_mask = this_lir->u.m.def_mask->Without(kEncodeMem);
+ ResourceMask stop_use_reg_mask = this_lir->u.m.use_mask->Without(kEncodeMem);
- /*
- * Add pc to the resource mask to prevent this instruction
- * from sinking past branch instructions. Also take out the memory
- * region bits since stop_mask is used to check data/control
- * dependencies.
- *
- * Note: on x86(-64) and Arm64 we use the IsBranch bit, as the PC is not exposed.
- */
- ResourceMask pc_encoding = GetPCUseDefEncoding();
- if (pc_encoding == kEncodeNone) {
- // TODO: Stop the abuse of kIsBranch as a bit specification for ResourceMask.
- pc_encoding = ResourceMask::Bit(kIsBranch);
+ /* The ARM backend can load/store PC */
+ ResourceMask uses_pc = GetPCUseDefEncoding();
+ if (uses_pc.Intersects(this_lir->u.m.use_mask->Union(*this_lir->u.m.def_mask))) {
+ continue;
}
- ResourceMask stop_use_reg_mask = pc_encoding.Union(*this_lir->u.m.use_mask).
- Without(kEncodeMem);
+ /* Initialize alias list */
+ alias_list.clear();
+ ResourceMask alias_reg_list_mask = kEncodeNone;
+ if (!this_mem_mask.Intersects(kEncodeLiteral)) {
+ alias_list.push_back(dest_reg_id);
+ SetupRegMask(&alias_reg_list_mask, dest_reg_id);
+ }
+
+ /* Scan through the BB for posible elimination candidates */
for (check_lir = NEXT_LIR(this_lir); check_lir != tail_lir; check_lir = NEXT_LIR(check_lir)) {
- /*
- * Skip already dead instructions (whose dataflow information is
- * outdated and misleading).
- */
if (check_lir->flags.is_nop || IsPseudoLirOp(check_lir->opcode)) {
continue;
}
- ResourceMask check_mem_mask = kEncodeMem.Intersection(
- check_lir->u.m.use_mask->Union(*check_lir->u.m.def_mask));
- ResourceMask alias_condition = this_mem_mask.Intersection(check_mem_mask);
- bool stop_here = false;
+ if (uses_pc.Intersects(check_lir->u.m.use_mask->Union(*check_lir->u.m.def_mask))) {
+ break;
+ }
- /*
- * Potential aliases seen - check the alias relations
- */
+ ResourceMask check_mem_mask = kEncodeMem.Intersection(check_lir->u.m.use_mask->Union(
+ *check_lir->u.m.def_mask));
+ ResourceMask alias_mem_mask = this_mem_mask.Intersection(check_mem_mask);
uint64_t check_flags = GetTargetInstFlags(check_lir->opcode);
- // TUNING: Support instructions with multiple register targets.
- if ((check_flags & (REG_DEF0 | REG_DEF1)) == (REG_DEF0 | REG_DEF1)) {
+ bool stop_here = false;
+ bool pass_over = false;
+
+ /* Check LIR - skip if instr is:
+ * - Wide Load
+ * - Wide Store
+ * - Branch
+ * - Dmb
+ * - Exclusive load/store
+ * - IT blocks
+ * - Quad loads
+ */
+ if (LOAD_STORE_FILTER(check_flags)) {
stop_here = true;
- } else if (!check_mem_mask.Equals(kEncodeMem) && !alias_condition.Equals(kEncodeNone)) {
- bool is_check_lir_load = check_flags & IS_LOAD;
- if (alias_condition.Equals(kEncodeLiteral)) {
- /*
- * Should only see literal loads in the instruction
- * stream.
- */
- DCHECK(!(check_flags & IS_STORE));
- /* Same value && same register type */
- if (check_lir->flags.alias_info == this_lir->flags.alias_info &&
- RegStorage::SameRegType(check_lir->operands[0], native_reg_id)) {
- /*
- * Different destination register - insert
- * a move
- */
- if (check_lir->operands[0] != native_reg_id) {
- // TODO: update for 64-bit regs.
- ConvertMemOpIntoMove(check_lir, RegStorage::Solo32(check_lir->operands[0]),
- RegStorage::Solo32(native_reg_id));
- }
- NopLIR(check_lir);
- }
- } else if (alias_condition.Equals(kEncodeDalvikReg)) {
- /* Must alias */
- if (check_lir->flags.alias_info == this_lir->flags.alias_info) {
- /* Only optimize compatible registers */
- bool reg_compatible = RegStorage::SameRegType(check_lir->operands[0], native_reg_id);
- if ((is_this_lir_load && is_check_lir_load) ||
- (!is_this_lir_load && is_check_lir_load)) {
- /* RAR or RAW */
- if (reg_compatible) {
- /*
- * Different destination register -
- * insert a move
- */
- if (check_lir->operands[0] != native_reg_id) {
- // TODO: update for 64-bit regs.
- ConvertMemOpIntoMove(check_lir, RegStorage::Solo32(check_lir->operands[0]),
- RegStorage::Solo32(native_reg_id));
- }
- NopLIR(check_lir);
- } else {
- /*
- * Destinaions are of different types -
- * something complicated going on so
- * stop looking now.
- */
- stop_here = true;
- }
- } else if (is_this_lir_load && !is_check_lir_load) {
- /* WAR - register value is killed */
- stop_here = true;
- } else if (!is_this_lir_load && !is_check_lir_load) {
- /* WAW - nuke the earlier store */
- NopLIR(this_lir);
- stop_here = true;
- }
- /* Partial overlap */
- } else if (IsDalvikRegisterClobbered(this_lir, check_lir)) {
- /*
- * It is actually ok to continue if check_lir
- * is a read. But it is hard to make a test
- * case for this so we just stop here to be
- * conservative.
- */
- stop_here = true;
+ /* Possible alias or result of earlier pass */
+ } else if (check_flags & IS_MOVE) {
+ for (auto ® : alias_list) {
+ if (RegStorage::RegNum(check_lir->operands[1]) == RegStorage::RegNum(reg)) {
+ pass_over = true;
+ alias_list.push_back(check_lir->operands[0]);
+ SetupRegMask(&alias_reg_list_mask, check_lir->operands[0]);
}
}
- /* Memory content may be updated. Stop looking now. */
+ /* Memory regions */
+ } else if (!alias_mem_mask.Equals(kEncodeNone)) {
+ DCHECK((check_flags & IS_LOAD) || (check_flags & IS_STORE));
+ bool is_check_lir_load = check_flags & IS_LOAD;
+ bool reg_compatible = RegStorage::SameRegType(check_lir->operands[0], native_reg_id);
+
+ if (alias_mem_mask.Equals(kEncodeLiteral)) {
+ DCHECK(check_flags & IS_LOAD);
+ /* Same value && same register type */
+ if (reg_compatible && (this_lir->target == check_lir->target)) {
+ DEBUG_OPT(DumpDependentInsnPair(check_lir, this_lir, "LITERAL"));
+ EliminateLoad(check_lir, native_reg_id);
+ }
+ } else if (((alias_mem_mask.Equals(kEncodeDalvikReg)) || (alias_mem_mask.Equals(kEncodeHeapRef))) &&
+ alias_reg_list_mask.Intersects((check_lir->u.m.use_mask)->Without(kEncodeMem))) {
+ bool same_offset = (GetInstructionOffset(this_lir) == GetInstructionOffset(check_lir));
+ if (same_offset && !is_check_lir_load) {
+ if (check_lir->operands[0] != native_reg_id) {
+ DEBUG_OPT(DumpDependentInsnPair(check_lir, this_lir, "STORE STOP"));
+ stop_here = true;
+ break;
+ }
+ }
+
+ if (reg_compatible && same_offset &&
+ ((is_this_lir_load && is_check_lir_load) /* LDR - LDR */ ||
+ (!is_this_lir_load && is_check_lir_load) /* STR - LDR */ ||
+ (!is_this_lir_load && !is_check_lir_load) /* STR - STR */)) {
+ DEBUG_OPT(DumpDependentInsnPair(check_lir, this_lir, "LOAD STORE"));
+ EliminateLoad(check_lir, native_reg_id);
+ }
+ } else {
+ /* Unsupported memory region */
+ }
+ }
+
+ if (pass_over) {
+ continue;
+ }
+
+ if (stop_here == false) {
+ bool stop_alias = LOAD_STORE_CHECK_REG_DEP(alias_reg_list_mask, check_lir);
+ if (stop_alias) {
+ /* Scan through alias list and if alias remove from alias list. */
+ for (auto ® : alias_list) {
+ stop_alias = false;
+ ResourceMask alias_reg_mask = kEncodeNone;
+ SetupRegMask(&alias_reg_mask, reg);
+ stop_alias = LOAD_STORE_CHECK_REG_DEP(alias_reg_mask, check_lir);
+ if (stop_alias) {
+ ClearRegMask(&alias_reg_list_mask, reg);
+ alias_list.erase(std::remove(alias_list.begin(), alias_list.end(),
+ reg), alias_list.end());
+ }
+ }
+ }
+ ResourceMask stop_search_mask = stop_def_reg_mask.Union(stop_use_reg_mask);
+ stop_search_mask = stop_search_mask.Union(alias_reg_list_mask);
+ stop_here = LOAD_STORE_CHECK_REG_DEP(stop_search_mask, check_lir);
if (stop_here) {
break;
- /* The check_lir has been transformed - check the next one */
- } else if (check_lir->flags.is_nop) {
- continue;
}
- }
-
-
- /*
- * this and check LIRs have no memory dependency. Now check if
- * their register operands have any RAW, WAR, and WAW
- * dependencies. If so, stop looking.
- */
- if (stop_here == false) {
- stop_here = CHECK_REG_DEP(stop_use_reg_mask, stop_def_reg_mask, check_lir);
- }
-
- if (stop_here == true) {
- if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) {
- // Prevent stores from being sunk between ops that generate ccodes and
- // ops that use them.
- uint64_t flags = GetTargetInstFlags(check_lir->opcode);
- if (sink_distance > 0 && (flags & IS_BRANCH) && (flags & USES_CCODES)) {
- check_lir = PREV_LIR(check_lir);
- sink_distance--;
- }
- }
- DEBUG_OPT(dump_dependent_insn_pair(this_lir, check_lir, "REG CLOBBERED"));
- /* Only sink store instructions */
- if (sink_distance && !is_this_lir_load) {
- LIR* new_store_lir =
- static_cast<LIR*>(arena_->Alloc(sizeof(LIR), kArenaAllocLIR));
- *new_store_lir = *this_lir;
- /*
- * Stop point found - insert *before* the check_lir
- * since the instruction list is scanned in the
- * top-down order.
- */
- InsertLIRBefore(check_lir, new_store_lir);
- NopLIR(this_lir);
- }
+ } else {
break;
- } else if (!check_lir->flags.is_nop) {
- sink_distance++;
}
}
}
@@ -385,7 +418,7 @@
/* Found a new place to put the load - move it here */
if (stop_here == true) {
- DEBUG_OPT(dump_dependent_insn_pair(check_lir, this_lir "HOIST STOP"));
+ DEBUG_OPT(DumpDependentInsnPair(check_lir, this_lir, "HOIST STOP"));
break;
}
}
diff --git a/compiler/dex/quick/mir_to_lir-inl.h b/compiler/dex/quick/mir_to_lir-inl.h
index 9ce5bb7..ff5a46f 100644
--- a/compiler/dex/quick/mir_to_lir-inl.h
+++ b/compiler/dex/quick/mir_to_lir-inl.h
@@ -147,6 +147,15 @@
}
/*
+ * Clear the corresponding bit(s).
+ */
+inline void Mir2Lir::ClearRegMask(ResourceMask* mask, int reg) {
+ DCHECK_EQ((reg & ~RegStorage::kRegValMask), 0);
+ DCHECK(reginfo_map_.Get(reg) != nullptr) << "No info for 0x" << reg;
+ *mask = mask->ClearBits(reginfo_map_.Get(reg)->DefUseMask());
+}
+
+/*
* Set up the proper fields in the resource mask
*/
inline void Mir2Lir::SetupResourceMasks(LIR* lir) {
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index ed7fcdd..4d8b91e 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -1311,4 +1311,9 @@
rl.ref ? RefCheck::kCheckRef : RefCheck::kCheckNotRef, FPCheck::kIgnoreFP, fail, report);
}
+size_t Mir2Lir::GetInstructionOffset(LIR* lir) {
+ UNIMPLEMENTED(FATAL) << "Unsuppored GetInstructionOffset()";
+ return 0;
+}
+
} // namespace art
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index b832223..d03b859 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -51,6 +51,7 @@
#define IS_BINARY_OP (1ULL << kIsBinaryOp)
#define IS_BRANCH (1ULL << kIsBranch)
#define IS_IT (1ULL << kIsIT)
+#define IS_MOVE (1ULL << kIsMoveOp)
#define IS_LOAD (1ULL << kMemLoad)
#define IS_QUAD_OP (1ULL << kIsQuadOp)
#define IS_QUIN_OP (1ULL << kIsQuinOp)
@@ -58,6 +59,7 @@
#define IS_STORE (1ULL << kMemStore)
#define IS_TERTIARY_OP (1ULL << kIsTertiaryOp)
#define IS_UNARY_OP (1ULL << kIsUnaryOp)
+#define IS_VOLATILE (1ULL << kMemVolatile)
#define NEEDS_FIXUP (1ULL << kPCRelFixup)
#define NO_OPERAND (1ULL << kNoOperand)
#define REG_DEF0 (1ULL << kRegDef0)
@@ -94,6 +96,20 @@
#define REG_USE_HI (1ULL << kUseHi)
#define REG_DEF_LO (1ULL << kDefLo)
#define REG_DEF_HI (1ULL << kDefHi)
+#define SCALED_OFFSET_X0 (1ULL << kMemScaledx0)
+#define SCALED_OFFSET_X2 (1ULL << kMemScaledx2)
+#define SCALED_OFFSET_X4 (1ULL << kMemScaledx4)
+
+// Special load/stores
+#define IS_LOADX (IS_LOAD | IS_VOLATILE)
+#define IS_LOAD_OFF (IS_LOAD | SCALED_OFFSET_X0)
+#define IS_LOAD_OFF2 (IS_LOAD | SCALED_OFFSET_X2)
+#define IS_LOAD_OFF4 (IS_LOAD | SCALED_OFFSET_X4)
+
+#define IS_STOREX (IS_STORE | IS_VOLATILE)
+#define IS_STORE_OFF (IS_STORE | SCALED_OFFSET_X0)
+#define IS_STORE_OFF2 (IS_STORE | SCALED_OFFSET_X2)
+#define IS_STORE_OFF4 (IS_STORE | SCALED_OFFSET_X4)
// Common combo register usage patterns.
#define REG_DEF01 (REG_DEF0 | REG_DEF1)
@@ -552,6 +568,12 @@
virtual ~Mir2Lir() {}
+ /**
+ * @brief Decodes the LIR offset.
+ * @return Returns the scaled offset of LIR.
+ */
+ virtual size_t GetInstructionOffset(LIR* lir);
+
int32_t s4FromSwitchData(const void* switch_data) {
return *reinterpret_cast<const int32_t*>(switch_data);
}
@@ -641,7 +663,10 @@
void SetMemRefType(LIR* lir, bool is_load, int mem_type);
void AnnotateDalvikRegAccess(LIR* lir, int reg_id, bool is_load, bool is64bit);
void SetupRegMask(ResourceMask* mask, int reg);
+ void ClearRegMask(ResourceMask* mask, int reg);
void DumpLIRInsn(LIR* arg, unsigned char* base_addr);
+ void EliminateLoad(LIR* lir, int reg_id);
+ void DumpDependentInsnPair(LIR* check_lir, LIR* this_lir, const char* type);
void DumpPromotionMap();
void CodegenDump();
LIR* RawLIR(DexOffset dalvik_offset, int opcode, int op0 = 0, int op1 = 0,
diff --git a/compiler/dex/reg_storage.h b/compiler/dex/reg_storage.h
index addd628..706933a 100644
--- a/compiler/dex/reg_storage.h
+++ b/compiler/dex/reg_storage.h
@@ -192,6 +192,18 @@
return (reg & (kFloatingPoint | k64BitMask)) == kFloatingPoint;
}
+ static constexpr bool Is32Bit(uint16_t reg) {
+ return ((reg & kShapeMask) == k32BitSolo);
+ }
+
+ static constexpr bool Is64Bit(uint16_t reg) {
+ return ((reg & k64BitMask) == k64Bits);
+ }
+
+ static constexpr bool Is64BitSolo(uint16_t reg) {
+ return ((reg & kShapeMask) == k64BitSolo);
+ }
+
// Used to retrieve either the low register of a pair, or the only register.
int GetReg() const {
DCHECK(!IsPair()) << "reg_ = 0x" << std::hex << reg_;
@@ -265,11 +277,11 @@
}
static constexpr bool SameRegType(RegStorage reg1, RegStorage reg2) {
- return (reg1.IsDouble() == reg2.IsDouble()) && (reg1.IsSingle() == reg2.IsSingle());
+ return ((reg1.reg_ & kShapeTypeMask) == (reg2.reg_ & kShapeTypeMask));
}
static constexpr bool SameRegType(int reg1, int reg2) {
- return (IsDouble(reg1) == IsDouble(reg2)) && (IsSingle(reg1) == IsSingle(reg2));
+ return ((reg1 & kShapeTypeMask) == (reg2 & kShapeTypeMask));
}
// Create a 32-bit solo.
diff --git a/compiler/dex/verified_method.cc b/compiler/dex/verified_method.cc
index 01c8f80..1b3f2a1 100644
--- a/compiler/dex/verified_method.cc
+++ b/compiler/dex/verified_method.cc
@@ -216,7 +216,7 @@
verifier::RegisterLine* line = method_verifier->GetRegLine(dex_pc);
bool is_range = (inst->Opcode() == Instruction::INVOKE_VIRTUAL_RANGE) ||
(inst->Opcode() == Instruction::INVOKE_INTERFACE_RANGE);
- const verifier::RegType&
+ verifier::RegType&
reg_type(line->GetRegisterType(is_range ? inst->VRegC_3rc() : inst->VRegC_35c()));
if (!reg_type.HasClass()) {
@@ -284,18 +284,18 @@
const verifier::RegisterLine* line = method_verifier->GetRegLine(dex_pc);
bool is_safe_cast = false;
if (code == Instruction::CHECK_CAST) {
- const verifier::RegType& reg_type(line->GetRegisterType(inst->VRegA_21c()));
- const verifier::RegType& cast_type =
+ verifier::RegType& reg_type(line->GetRegisterType(inst->VRegA_21c()));
+ verifier::RegType& cast_type =
method_verifier->ResolveCheckedClass(inst->VRegB_21c());
is_safe_cast = cast_type.IsStrictlyAssignableFrom(reg_type);
} else {
- const verifier::RegType& array_type(line->GetRegisterType(inst->VRegB_23x()));
+ verifier::RegType& array_type(line->GetRegisterType(inst->VRegB_23x()));
// We only know its safe to assign to an array if the array type is precise. For example,
// an Object[] can have any type of object stored in it, but it may also be assigned a
// String[] in which case the stores need to be of Strings.
if (array_type.IsPreciseReference()) {
- const verifier::RegType& value_type(line->GetRegisterType(inst->VRegA_23x()));
- const verifier::RegType& component_type = method_verifier->GetRegTypeCache()
+ verifier::RegType& value_type(line->GetRegisterType(inst->VRegA_23x()));
+ verifier::RegType& component_type = method_verifier->GetRegTypeCache()
->GetComponentType(array_type, method_verifier->GetClassLoader());
is_safe_cast = component_type.IsStrictlyAssignableFrom(value_type);
}
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 883e1c1f..ae60b97 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -337,9 +337,11 @@
relocated_version_used = true;
} else {
image_filename = &system_filename;
+ is_system = true;
}
} else if (has_system) {
image_filename = &system_filename;
+ is_system = true;
} else {
CHECK(has_cache);
image_filename = &cache_filename;
@@ -354,8 +356,12 @@
image_lock.Init(image_filename->c_str(), &error_msg);
LOG(INFO) << "Using image file " << image_filename->c_str() << " for image location "
<< image_location;
+ // If we are in /system we can assume the image is good. We can also
+ // assume this if we are using a relocated image (i.e. image checksum
+ // matches) since this is only different by the offset. We need this to
+ // make sure that host tests continue to work.
space = ImageSpace::Init(image_filename->c_str(), image_location,
- false, &error_msg);
+ !(is_system || relocated_version_used), &error_msg);
}
if (space != nullptr) {
return space;
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index d5b90f2..43b9912 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -677,13 +677,15 @@
return soa.AddLocalReference<jclass>(c->GetSuperClass());
}
+ // Note: java_class1 should be safely castable to java_class2, and
+ // not the other way around.
static jboolean IsAssignableFrom(JNIEnv* env, jclass java_class1, jclass java_class2) {
CHECK_NON_NULL_ARGUMENT_RETURN(java_class1, JNI_FALSE);
CHECK_NON_NULL_ARGUMENT_RETURN(java_class2, JNI_FALSE);
ScopedObjectAccess soa(env);
mirror::Class* c1 = soa.Decode<mirror::Class*>(java_class1);
mirror::Class* c2 = soa.Decode<mirror::Class*>(java_class2);
- return c1->IsAssignableFrom(c2) ? JNI_TRUE : JNI_FALSE;
+ return c2->IsAssignableFrom(c1) ? JNI_TRUE : JNI_FALSE;
}
static jboolean IsInstanceOf(JNIEnv* env, jobject jobj, jclass java_class) {
diff --git a/runtime/jni_internal_test.cc b/runtime/jni_internal_test.cc
index 7c7e60c..da3080f 100644
--- a/runtime/jni_internal_test.cc
+++ b/runtime/jni_internal_test.cc
@@ -950,8 +950,28 @@
jclass string_class = env_->FindClass("java/lang/String");
ASSERT_NE(string_class, nullptr);
- ASSERT_TRUE(env_->IsAssignableFrom(object_class, string_class));
- ASSERT_FALSE(env_->IsAssignableFrom(string_class, object_class));
+ // A superclass is assignable from an instance of its
+ // subclass but not vice versa.
+ ASSERT_TRUE(env_->IsAssignableFrom(string_class, object_class));
+ ASSERT_FALSE(env_->IsAssignableFrom(object_class, string_class));
+
+ jclass charsequence_interface = env_->FindClass("java/lang/CharSequence");
+ ASSERT_NE(charsequence_interface, nullptr);
+
+ // An interface is assignable from an instance of an implementing
+ // class but not vice versa.
+ ASSERT_TRUE(env_->IsAssignableFrom(string_class, charsequence_interface));
+ ASSERT_FALSE(env_->IsAssignableFrom(charsequence_interface, string_class));
+
+ // Check that arrays are covariant.
+ jclass string_array_class = env_->FindClass("[Ljava/lang/String;");
+ ASSERT_NE(string_array_class, nullptr);
+ jclass object_array_class = env_->FindClass("[Ljava/lang/Object;");
+ ASSERT_NE(object_array_class, nullptr);
+ ASSERT_TRUE(env_->IsAssignableFrom(string_array_class, object_array_class));
+ ASSERT_FALSE(env_->IsAssignableFrom(object_array_class, string_array_class));
+
+ // Primitive types are tested in 004-JniTest.
// Null as either class should fail.
CheckJniAbortCatcher jni_abort_catcher;
diff --git a/runtime/native/java_lang_Runtime.cc b/runtime/native/java_lang_Runtime.cc
index 496a1b2..fb708a2 100644
--- a/runtime/native/java_lang_Runtime.cc
+++ b/runtime/native/java_lang_Runtime.cc
@@ -38,6 +38,7 @@
}
static void Runtime_nativeExit(JNIEnv*, jclass, jint status) {
+ LOG(INFO) << "System.exit called, status: " << status;
Runtime::Current()->CallExitHook(status);
exit(status);
}
diff --git a/runtime/verifier/method_verifier-inl.h b/runtime/verifier/method_verifier-inl.h
index 62ecf4b..d4fe106 100644
--- a/runtime/verifier/method_verifier-inl.h
+++ b/runtime/verifier/method_verifier-inl.h
@@ -66,9 +66,9 @@
return !failure_messages_.empty();
}
-inline const RegType& MethodVerifier::ResolveCheckedClass(uint32_t class_idx) {
+inline RegType& MethodVerifier::ResolveCheckedClass(uint32_t class_idx) {
DCHECK(!HasFailures());
- const RegType& result = ResolveClassAndCheckAccess(class_idx);
+ RegType& result = ResolveClassAndCheckAccess(class_idx);
DCHECK(!HasFailures());
return result;
}
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 2571cf1..18f7626 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -1175,7 +1175,7 @@
// If this is a constructor for a class other than java.lang.Object, mark the first ("this")
// argument as uninitialized. This restricts field access until the superclass constructor is
// called.
- const RegType& declaring_class = GetDeclaringClass();
+ RegType& declaring_class = GetDeclaringClass();
if (IsConstructor() && !declaring_class.IsJavaLangObject()) {
reg_line->SetRegisterType(arg_start + cur_arg,
reg_types_.UninitializedThisArgument(declaring_class));
@@ -1207,7 +1207,7 @@
// it's effectively considered initialized the instant we reach here (in the sense that we
// can return without doing anything or call virtual methods).
{
- const RegType& reg_type = ResolveClassAndCheckAccess(iterator.GetTypeIdx());
+ RegType& reg_type = ResolveClassAndCheckAccess(iterator.GetTypeIdx());
if (!reg_type.IsNonZeroReferenceTypes()) {
DCHECK(HasFailures());
return false;
@@ -1241,8 +1241,8 @@
return false;
}
- const RegType& lo_half = descriptor[0] == 'J' ? reg_types_.LongLo() : reg_types_.DoubleLo();
- const RegType& hi_half = descriptor[0] == 'J' ? reg_types_.LongHi() : reg_types_.DoubleHi();
+ RegType& lo_half = descriptor[0] == 'J' ? reg_types_.LongLo() : reg_types_.DoubleLo();
+ RegType& hi_half = descriptor[0] == 'J' ? reg_types_.LongHi() : reg_types_.DoubleHi();
reg_line->SetRegisterTypeWide(arg_start + cur_arg, lo_half, hi_half);
cur_arg++;
break;
@@ -1536,7 +1536,7 @@
* This statement can only appear as the first instruction in an exception handler. We verify
* that as part of extracting the exception type from the catch block list.
*/
- const RegType& res_type = GetCaughtExceptionType();
+ RegType& res_type = GetCaughtExceptionType();
work_line_->SetRegisterType(inst->VRegA_11x(), res_type);
break;
}
@@ -1550,7 +1550,7 @@
case Instruction::RETURN:
if (!IsConstructor() || work_line_->CheckConstructorReturn()) {
/* check the method signature */
- const RegType& return_type = GetMethodReturnType();
+ RegType& return_type = GetMethodReturnType();
if (!return_type.IsCategory1Types()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "unexpected non-category 1 return type "
<< return_type;
@@ -1558,7 +1558,7 @@
// Compilers may generate synthetic functions that write byte values into boolean fields.
// Also, it may use integer values for boolean, byte, short, and character return types.
const uint32_t vregA = inst->VRegA_11x();
- const RegType& src_type = work_line_->GetRegisterType(vregA);
+ RegType& src_type = work_line_->GetRegisterType(vregA);
bool use_src = ((return_type.IsBoolean() && src_type.IsByte()) ||
((return_type.IsBoolean() || return_type.IsByte() ||
return_type.IsShort() || return_type.IsChar()) &&
@@ -1575,7 +1575,7 @@
case Instruction::RETURN_WIDE:
if (!IsConstructor() || work_line_->CheckConstructorReturn()) {
/* check the method signature */
- const RegType& return_type = GetMethodReturnType();
+ RegType& return_type = GetMethodReturnType();
if (!return_type.IsCategory2Types()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "return-wide not expected";
} else {
@@ -1590,7 +1590,7 @@
break;
case Instruction::RETURN_OBJECT:
if (!IsConstructor() || work_line_->CheckConstructorReturn()) {
- const RegType& return_type = GetMethodReturnType();
+ RegType& return_type = GetMethodReturnType();
if (!return_type.IsReferenceTypes()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "return-object not expected";
} else {
@@ -1598,7 +1598,7 @@
DCHECK(!return_type.IsZero());
DCHECK(!return_type.IsUninitializedReference());
const uint32_t vregA = inst->VRegA_11x();
- const RegType& reg_type = work_line_->GetRegisterType(vregA);
+ RegType& reg_type = work_line_->GetRegisterType(vregA);
// Disallow returning uninitialized values and verify that the reference in vAA is an
// instance of the "return_type"
if (reg_type.IsUninitializedTypes()) {
@@ -1645,29 +1645,29 @@
/* could be long or double; resolved upon use */
case Instruction::CONST_WIDE_16: {
int64_t val = static_cast<int16_t>(inst->VRegB_21s());
- const RegType& lo = reg_types_.FromCat2ConstLo(static_cast<int32_t>(val), true);
- const RegType& hi = reg_types_.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
+ RegType& lo = reg_types_.FromCat2ConstLo(static_cast<int32_t>(val), true);
+ RegType& hi = reg_types_.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
work_line_->SetRegisterTypeWide(inst->VRegA_21s(), lo, hi);
break;
}
case Instruction::CONST_WIDE_32: {
int64_t val = static_cast<int32_t>(inst->VRegB_31i());
- const RegType& lo = reg_types_.FromCat2ConstLo(static_cast<int32_t>(val), true);
- const RegType& hi = reg_types_.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
+ RegType& lo = reg_types_.FromCat2ConstLo(static_cast<int32_t>(val), true);
+ RegType& hi = reg_types_.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
work_line_->SetRegisterTypeWide(inst->VRegA_31i(), lo, hi);
break;
}
case Instruction::CONST_WIDE: {
int64_t val = inst->VRegB_51l();
- const RegType& lo = reg_types_.FromCat2ConstLo(static_cast<int32_t>(val), true);
- const RegType& hi = reg_types_.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
+ RegType& lo = reg_types_.FromCat2ConstLo(static_cast<int32_t>(val), true);
+ RegType& hi = reg_types_.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
work_line_->SetRegisterTypeWide(inst->VRegA_51l(), lo, hi);
break;
}
case Instruction::CONST_WIDE_HIGH16: {
int64_t val = static_cast<uint64_t>(inst->VRegB_21h()) << 48;
- const RegType& lo = reg_types_.FromCat2ConstLo(static_cast<int32_t>(val), true);
- const RegType& hi = reg_types_.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
+ RegType& lo = reg_types_.FromCat2ConstLo(static_cast<int32_t>(val), true);
+ RegType& hi = reg_types_.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
work_line_->SetRegisterTypeWide(inst->VRegA_21h(), lo, hi);
break;
}
@@ -1680,7 +1680,7 @@
case Instruction::CONST_CLASS: {
// Get type from instruction if unresolved then we need an access check
// TODO: check Compiler::CanAccessTypeWithoutChecks returns false when res_type is unresolved
- const RegType& res_type = ResolveClassAndCheckAccess(inst->VRegB_21c());
+ RegType& res_type = ResolveClassAndCheckAccess(inst->VRegB_21c());
// Register holds class, ie its type is class, on error it will hold Conflict.
work_line_->SetRegisterType(inst->VRegA_21c(),
res_type.IsConflict() ? res_type
@@ -1726,7 +1726,7 @@
*/
const bool is_checkcast = (inst->Opcode() == Instruction::CHECK_CAST);
const uint32_t type_idx = (is_checkcast) ? inst->VRegB_21c() : inst->VRegC_22c();
- const RegType& res_type = ResolveClassAndCheckAccess(type_idx);
+ RegType& res_type = ResolveClassAndCheckAccess(type_idx);
if (res_type.IsConflict()) {
// If this is a primitive type, fail HARD.
mirror::Class* klass = (*dex_cache_)->GetResolvedType(type_idx);
@@ -1745,7 +1745,7 @@
}
// TODO: check Compiler::CanAccessTypeWithoutChecks returns false when res_type is unresolved
uint32_t orig_type_reg = (is_checkcast) ? inst->VRegA_21c() : inst->VRegB_22c();
- const RegType& orig_type = work_line_->GetRegisterType(orig_type_reg);
+ RegType& orig_type = work_line_->GetRegisterType(orig_type_reg);
if (!res_type.IsNonZeroReferenceTypes()) {
if (is_checkcast) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "check-cast on unexpected class " << res_type;
@@ -1768,7 +1768,7 @@
break;
}
case Instruction::ARRAY_LENGTH: {
- const RegType& res_type = work_line_->GetRegisterType(inst->VRegB_12x());
+ RegType& res_type = work_line_->GetRegisterType(inst->VRegB_12x());
if (res_type.IsReferenceTypes()) {
if (!res_type.IsArrayTypes() && !res_type.IsZero()) { // ie not an array or null
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "array-length on non-array " << res_type;
@@ -1781,7 +1781,7 @@
break;
}
case Instruction::NEW_INSTANCE: {
- const RegType& res_type = ResolveClassAndCheckAccess(inst->VRegB_21c());
+ RegType& res_type = ResolveClassAndCheckAccess(inst->VRegB_21c());
if (res_type.IsConflict()) {
DCHECK_NE(failures_.size(), 0U);
break; // bad class
@@ -1793,7 +1793,7 @@
<< "new-instance on primitive, interface or abstract class" << res_type;
// Soft failure so carry on to set register type.
}
- const RegType& uninit_type = reg_types_.Uninitialized(res_type, work_insn_idx_);
+ RegType& uninit_type = reg_types_.Uninitialized(res_type, work_insn_idx_);
// Any registers holding previous allocations from this address that have not yet been
// initialized must be marked invalid.
work_line_->MarkUninitRefsAsInvalid(uninit_type);
@@ -1846,7 +1846,7 @@
work_line_->SetRegisterType(inst->VRegA_23x(), reg_types_.Integer());
break;
case Instruction::THROW: {
- const RegType& res_type = work_line_->GetRegisterType(inst->VRegA_11x());
+ RegType& res_type = work_line_->GetRegisterType(inst->VRegA_11x());
if (!reg_types_.JavaLangThrowable(false).IsAssignableFrom(res_type)) {
Fail(res_type.IsUnresolvedTypes() ? VERIFY_ERROR_NO_CLASS : VERIFY_ERROR_BAD_CLASS_SOFT)
<< "thrown class " << res_type << " not instanceof Throwable";
@@ -1867,14 +1867,14 @@
case Instruction::FILL_ARRAY_DATA: {
/* Similar to the verification done for APUT */
- const RegType& array_type = work_line_->GetRegisterType(inst->VRegA_31t());
+ RegType& array_type = work_line_->GetRegisterType(inst->VRegA_31t());
/* array_type can be null if the reg type is Zero */
if (!array_type.IsZero()) {
if (!array_type.IsArrayTypes()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid fill-array-data with array type "
<< array_type;
} else {
- const RegType& component_type = reg_types_.GetComponentType(array_type,
+ RegType& component_type = reg_types_.GetComponentType(array_type,
class_loader_->Get());
DCHECK(!component_type.IsConflict());
if (component_type.IsNonZeroReferenceTypes()) {
@@ -1902,8 +1902,8 @@
}
case Instruction::IF_EQ:
case Instruction::IF_NE: {
- const RegType& reg_type1 = work_line_->GetRegisterType(inst->VRegA_22t());
- const RegType& reg_type2 = work_line_->GetRegisterType(inst->VRegB_22t());
+ RegType& reg_type1 = work_line_->GetRegisterType(inst->VRegA_22t());
+ RegType& reg_type2 = work_line_->GetRegisterType(inst->VRegB_22t());
bool mismatch = false;
if (reg_type1.IsZero()) { // zero then integral or reference expected
mismatch = !reg_type2.IsReferenceTypes() && !reg_type2.IsIntegralTypes();
@@ -1922,8 +1922,8 @@
case Instruction::IF_GE:
case Instruction::IF_GT:
case Instruction::IF_LE: {
- const RegType& reg_type1 = work_line_->GetRegisterType(inst->VRegA_22t());
- const RegType& reg_type2 = work_line_->GetRegisterType(inst->VRegB_22t());
+ RegType& reg_type1 = work_line_->GetRegisterType(inst->VRegA_22t());
+ RegType& reg_type2 = work_line_->GetRegisterType(inst->VRegB_22t());
if (!reg_type1.IsIntegralTypes() || !reg_type2.IsIntegralTypes()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "args to 'if' (" << reg_type1 << ","
<< reg_type2 << ") must be integral";
@@ -1932,7 +1932,7 @@
}
case Instruction::IF_EQZ:
case Instruction::IF_NEZ: {
- const RegType& reg_type = work_line_->GetRegisterType(inst->VRegA_21t());
+ RegType& reg_type = work_line_->GetRegisterType(inst->VRegA_21t());
if (!reg_type.IsReferenceTypes() && !reg_type.IsIntegralTypes()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "type " << reg_type
<< " unexpected as arg to if-eqz/if-nez";
@@ -1978,8 +1978,8 @@
// type is assignable to the original then allow optimization. This check is performed to
// ensure that subsequent merges don't lose type information - such as becoming an
// interface from a class that would lose information relevant to field checks.
- const RegType& orig_type = work_line_->GetRegisterType(instance_of_inst->VRegB_22c());
- const RegType& cast_type = ResolveClassAndCheckAccess(instance_of_inst->VRegC_22c());
+ RegType& orig_type = work_line_->GetRegisterType(instance_of_inst->VRegB_22c());
+ RegType& cast_type = ResolveClassAndCheckAccess(instance_of_inst->VRegC_22c());
if (!orig_type.Equals(cast_type) &&
!cast_type.IsUnresolvedTypes() && !orig_type.IsUnresolvedTypes() &&
@@ -2034,7 +2034,7 @@
case Instruction::IF_GEZ:
case Instruction::IF_GTZ:
case Instruction::IF_LEZ: {
- const RegType& reg_type = work_line_->GetRegisterType(inst->VRegA_21t());
+ RegType& reg_type = work_line_->GetRegisterType(inst->VRegA_21t());
if (!reg_type.IsIntegralTypes()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "type " << reg_type
<< " unexpected as arg to if-ltz/if-gez/if-gtz/if-lez";
@@ -2183,7 +2183,7 @@
inst->Opcode() == Instruction::INVOKE_SUPER_RANGE);
mirror::ArtMethod* called_method = VerifyInvocationArgs(inst, METHOD_VIRTUAL, is_range,
is_super);
- const RegType* return_type = nullptr;
+ RegType* return_type = nullptr;
if (called_method != nullptr) {
Thread* self = Thread::Current();
StackHandleScope<1> hs(self);
@@ -2239,7 +2239,7 @@
* allowing the latter only if the "this" argument is the same as the "this" argument to
* this method (which implies that we're in a constructor ourselves).
*/
- const RegType& this_type = work_line_->GetInvocationThis(inst, is_range);
+ RegType& this_type = work_line_->GetInvocationThis(inst, is_range);
if (this_type.IsConflict()) // failure.
break;
@@ -2250,7 +2250,7 @@
}
/* must be in same class or in superclass */
- // const RegType& this_super_klass = this_type.GetSuperClass(®_types_);
+ // RegType& this_super_klass = this_type.GetSuperClass(®_types_);
// TODO: re-enable constructor type verification
// if (this_super_klass.IsConflict()) {
// Unknown super class, fail so we re-check at runtime.
@@ -2271,7 +2271,7 @@
*/
work_line_->MarkRefsAsInitialized(this_type);
}
- const RegType& return_type = reg_types_.FromDescriptor(class_loader_->Get(),
+ RegType& return_type = reg_types_.FromDescriptor(class_loader_->Get(),
return_type_descriptor, false);
if (!return_type.IsLowHalf()) {
work_line_->SetResultRegisterType(return_type);
@@ -2297,7 +2297,7 @@
} else {
descriptor = called_method->GetReturnTypeDescriptor();
}
- const RegType& return_type = reg_types_.FromDescriptor(class_loader_->Get(), descriptor,
+ RegType& return_type = reg_types_.FromDescriptor(class_loader_->Get(), descriptor,
false);
if (!return_type.IsLowHalf()) {
work_line_->SetResultRegisterType(return_type);
@@ -2325,7 +2325,7 @@
/* Get the type of the "this" arg, which should either be a sub-interface of called
* interface or Object (see comments in RegType::JoinClass).
*/
- const RegType& this_type = work_line_->GetInvocationThis(inst, is_range);
+ RegType& this_type = work_line_->GetInvocationThis(inst, is_range);
if (this_type.IsZero()) {
/* null pointer always passes (and always fails at runtime) */
} else {
@@ -2355,7 +2355,7 @@
} else {
descriptor = abs_method->GetReturnTypeDescriptor();
}
- const RegType& return_type = reg_types_.FromDescriptor(class_loader_->Get(), descriptor,
+ RegType& return_type = reg_types_.FromDescriptor(class_loader_->Get(), descriptor,
false);
if (!return_type.IsLowHalf()) {
work_line_->SetResultRegisterType(return_type);
@@ -2621,7 +2621,7 @@
mirror::ArtMethod* called_method = VerifyInvokeVirtualQuickArgs(inst, is_range);
if (called_method != NULL) {
const char* descriptor = called_method->GetReturnTypeDescriptor();
- const RegType& return_type = reg_types_.FromDescriptor(class_loader_->Get(), descriptor,
+ RegType& return_type = reg_types_.FromDescriptor(class_loader_->Get(), descriptor,
false);
if (!return_type.IsLowHalf()) {
work_line_->SetResultRegisterType(return_type);
@@ -2905,11 +2905,11 @@
return true;
} // NOLINT(readability/fn_size)
-const RegType& MethodVerifier::ResolveClassAndCheckAccess(uint32_t class_idx) {
+RegType& MethodVerifier::ResolveClassAndCheckAccess(uint32_t class_idx) {
const char* descriptor = dex_file_->StringByTypeIdx(class_idx);
- const RegType& referrer = GetDeclaringClass();
+ RegType& referrer = GetDeclaringClass();
mirror::Class* klass = (*dex_cache_)->GetResolvedType(class_idx);
- const RegType& result =
+ RegType& result =
klass != NULL ? reg_types_.FromClass(descriptor, klass,
klass->CannotBeAssignedFromOtherTypes())
: reg_types_.FromDescriptor(class_loader_->Get(), descriptor, false);
@@ -2932,8 +2932,8 @@
return result;
}
-const RegType& MethodVerifier::GetCaughtExceptionType() {
- const RegType* common_super = NULL;
+RegType& MethodVerifier::GetCaughtExceptionType() {
+ RegType* common_super = NULL;
if (code_item_->tries_size_ != 0) {
const byte* handlers_ptr = DexFile::GetCatchHandlerData(*code_item_, 0);
uint32_t handlers_size = DecodeUnsignedLeb128(&handlers_ptr);
@@ -2944,7 +2944,7 @@
if (iterator.GetHandlerTypeIndex() == DexFile::kDexNoIndex16) {
common_super = ®_types_.JavaLangThrowable(false);
} else {
- const RegType& exception = ResolveClassAndCheckAccess(iterator.GetHandlerTypeIndex());
+ RegType& exception = ResolveClassAndCheckAccess(iterator.GetHandlerTypeIndex());
if (!reg_types_.JavaLangThrowable(false).IsAssignableFrom(exception)) {
if (exception.IsUnresolvedTypes()) {
// We don't know enough about the type. Fail here and let runtime handle it.
@@ -2979,7 +2979,7 @@
mirror::ArtMethod* MethodVerifier::ResolveMethodAndCheckAccess(uint32_t dex_method_idx,
MethodType method_type) {
const DexFile::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx);
- const RegType& klass_type = ResolveClassAndCheckAccess(method_id.class_idx_);
+ RegType& klass_type = ResolveClassAndCheckAccess(method_id.class_idx_);
if (klass_type.IsConflict()) {
std::string append(" in attempt to access method ");
append += dex_file_->GetMethodName(method_id);
@@ -2990,7 +2990,7 @@
return NULL; // Can't resolve Class so no more to do here
}
mirror::Class* klass = klass_type.GetClass();
- const RegType& referrer = GetDeclaringClass();
+ RegType& referrer = GetDeclaringClass();
mirror::ArtMethod* res_method = (*dex_cache_)->GetResolvedMethod(dex_method_idx);
if (res_method == NULL) {
const char* name = dex_file_->GetMethodName(method_id);
@@ -3097,7 +3097,7 @@
* rigorous check here (which is okay since we have to do it at runtime).
*/
if (method_type != METHOD_STATIC) {
- const RegType& actual_arg_type = work_line_->GetInvocationThis(inst, is_range);
+ RegType& actual_arg_type = work_line_->GetInvocationThis(inst, is_range);
if (actual_arg_type.IsConflict()) { // GetInvocationThis failed.
CHECK(have_pending_hard_failure_);
return nullptr;
@@ -3118,7 +3118,7 @@
}
}
if (method_type != METHOD_INTERFACE && !actual_arg_type.IsZero()) {
- const RegType* res_method_class;
+ RegType* res_method_class;
if (res_method != nullptr) {
mirror::Class* klass = res_method->GetDeclaringClass();
res_method_class = ®_types_.FromClass(klass->GetDescriptor().c_str(), klass,
@@ -3159,12 +3159,12 @@
return nullptr;
}
- const RegType& reg_type = reg_types_.FromDescriptor(class_loader_->Get(), param_descriptor,
+ RegType& reg_type = reg_types_.FromDescriptor(class_loader_->Get(), param_descriptor,
false);
uint32_t get_reg = is_range ? inst->VRegC_3rc() + static_cast<uint32_t>(sig_registers) :
arg[sig_registers];
if (reg_type.IsIntegralTypes()) {
- const RegType& src_type = work_line_->GetRegisterType(get_reg);
+ RegType& src_type = work_line_->GetRegisterType(get_reg);
if (!src_type.IsIntegralTypes()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "register v" << get_reg << " has type " << src_type
<< " but expected " << reg_type;
@@ -3247,7 +3247,7 @@
// has a vtable entry for the target method.
if (is_super) {
DCHECK(method_type == METHOD_VIRTUAL);
- const RegType& super = GetDeclaringClass().GetSuperClass(®_types_);
+ RegType& super = GetDeclaringClass().GetSuperClass(®_types_);
if (super.IsUnresolvedTypes()) {
Fail(VERIFY_ERROR_NO_METHOD) << "unknown super class in invoke-super from "
<< PrettyMethod(dex_method_idx_, *dex_file_)
@@ -3275,7 +3275,7 @@
RegisterLine* reg_line, bool is_range) {
DCHECK(inst->Opcode() == Instruction::INVOKE_VIRTUAL_QUICK ||
inst->Opcode() == Instruction::INVOKE_VIRTUAL_RANGE_QUICK);
- const RegType& actual_arg_type = reg_line->GetInvocationThis(inst, is_range);
+ RegType& actual_arg_type = reg_line->GetInvocationThis(inst, is_range);
if (!actual_arg_type.HasClass()) {
VLOG(verifier) << "Failed to get mirror::Class* from '" << actual_arg_type << "'";
return nullptr;
@@ -3313,7 +3313,7 @@
// We use vAA as our expected arg count, rather than res_method->insSize, because we need to
// match the call to the signature. Also, we might be calling through an abstract method
// definition (which doesn't have register count values).
- const RegType& actual_arg_type = work_line_->GetInvocationThis(inst, is_range);
+ RegType& actual_arg_type = work_line_->GetInvocationThis(inst, is_range);
if (actual_arg_type.IsConflict()) { // GetInvocationThis failed.
return NULL;
}
@@ -3337,7 +3337,7 @@
}
if (!actual_arg_type.IsZero()) {
mirror::Class* klass = res_method->GetDeclaringClass();
- const RegType& res_method_class =
+ RegType& res_method_class =
reg_types_.FromClass(klass->GetDescriptor().c_str(), klass,
klass->CannotBeAssignedFromOtherTypes());
if (!res_method_class.IsAssignableFrom(actual_arg_type)) {
@@ -3373,7 +3373,7 @@
<< " missing signature component";
return NULL;
}
- const RegType& reg_type = reg_types_.FromDescriptor(class_loader_->Get(), descriptor, false);
+ RegType& reg_type = reg_types_.FromDescriptor(class_loader_->Get(), descriptor, false);
uint32_t get_reg = is_range ? inst->VRegC_3rc() + actual_args : arg[actual_args];
if (!work_line_->VerifyRegisterType(get_reg, reg_type)) {
return res_method;
@@ -3401,7 +3401,7 @@
DCHECK_EQ(inst->Opcode(), Instruction::FILLED_NEW_ARRAY_RANGE);
type_idx = inst->VRegB_3rc();
}
- const RegType& res_type = ResolveClassAndCheckAccess(type_idx);
+ RegType& res_type = ResolveClassAndCheckAccess(type_idx);
if (res_type.IsConflict()) { // bad class
DCHECK_NE(failures_.size(), 0U);
} else {
@@ -3412,12 +3412,12 @@
/* make sure "size" register is valid type */
work_line_->VerifyRegisterType(inst->VRegB_22c(), reg_types_.Integer());
/* set register type to array class */
- const RegType& precise_type = reg_types_.FromUninitialized(res_type);
+ RegType& precise_type = reg_types_.FromUninitialized(res_type);
work_line_->SetRegisterType(inst->VRegA_22c(), precise_type);
} else {
// Verify each register. If "arg_count" is bad, VerifyRegisterType() will run off the end of
// the list and fail. It's legal, if silly, for arg_count to be zero.
- const RegType& expected_type = reg_types_.GetComponentType(res_type, class_loader_->Get());
+ RegType& expected_type = reg_types_.GetComponentType(res_type, class_loader_->Get());
uint32_t arg_count = (is_range) ? inst->VRegA_3rc() : inst->VRegA_35c();
uint32_t arg[5];
if (!is_range) {
@@ -3431,19 +3431,19 @@
}
}
// filled-array result goes into "result" register
- const RegType& precise_type = reg_types_.FromUninitialized(res_type);
+ RegType& precise_type = reg_types_.FromUninitialized(res_type);
work_line_->SetResultRegisterType(precise_type);
}
}
}
void MethodVerifier::VerifyAGet(const Instruction* inst,
- const RegType& insn_type, bool is_primitive) {
- const RegType& index_type = work_line_->GetRegisterType(inst->VRegC_23x());
+ RegType& insn_type, bool is_primitive) {
+ RegType& index_type = work_line_->GetRegisterType(inst->VRegC_23x());
if (!index_type.IsArrayIndexTypes()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Invalid reg type for array index (" << index_type << ")";
} else {
- const RegType& array_type = work_line_->GetRegisterType(inst->VRegB_23x());
+ RegType& array_type = work_line_->GetRegisterType(inst->VRegB_23x());
if (array_type.IsZero()) {
// Null array class; this code path will fail at runtime. Infer a merge-able type from the
// instruction type. TODO: have a proper notion of bottom here.
@@ -3459,7 +3459,7 @@
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "not array type " << array_type << " with aget";
} else {
/* verify the class */
- const RegType& component_type = reg_types_.GetComponentType(array_type, class_loader_->Get());
+ RegType& component_type = reg_types_.GetComponentType(array_type, class_loader_->Get());
if (!component_type.IsReferenceTypes() && !is_primitive) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "primitive array type " << array_type
<< " source for aget-object";
@@ -3486,12 +3486,12 @@
}
}
-void MethodVerifier::VerifyPrimitivePut(const RegType& target_type, const RegType& insn_type,
+void MethodVerifier::VerifyPrimitivePut(RegType& target_type, RegType& insn_type,
const uint32_t vregA) {
// Primitive assignability rules are weaker than regular assignability rules.
bool instruction_compatible;
bool value_compatible;
- const RegType& value_type = work_line_->GetRegisterType(vregA);
+ RegType& value_type = work_line_->GetRegisterType(vregA);
if (target_type.IsIntegralTypes()) {
instruction_compatible = target_type.Equals(insn_type);
value_compatible = value_type.IsIntegralTypes();
@@ -3500,11 +3500,11 @@
value_compatible = value_type.IsFloatTypes();
} else if (target_type.IsLong()) {
instruction_compatible = insn_type.IsLong();
- const RegType& value_type_hi = work_line_->GetRegisterType(vregA + 1);
+ RegType& value_type_hi = work_line_->GetRegisterType(vregA + 1);
value_compatible = value_type.IsLongTypes() && value_type.CheckWidePair(value_type_hi);
} else if (target_type.IsDouble()) {
instruction_compatible = insn_type.IsLong(); // no put-double, so expect put-long
- const RegType& value_type_hi = work_line_->GetRegisterType(vregA + 1);
+ RegType& value_type_hi = work_line_->GetRegisterType(vregA + 1);
value_compatible = value_type.IsDoubleTypes() && value_type.CheckWidePair(value_type_hi);
} else {
instruction_compatible = false; // reference with primitive store
@@ -3526,19 +3526,19 @@
}
void MethodVerifier::VerifyAPut(const Instruction* inst,
- const RegType& insn_type, bool is_primitive) {
- const RegType& index_type = work_line_->GetRegisterType(inst->VRegC_23x());
+ RegType& insn_type, bool is_primitive) {
+ RegType& index_type = work_line_->GetRegisterType(inst->VRegC_23x());
if (!index_type.IsArrayIndexTypes()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Invalid reg type for array index (" << index_type << ")";
} else {
- const RegType& array_type = work_line_->GetRegisterType(inst->VRegB_23x());
+ RegType& array_type = work_line_->GetRegisterType(inst->VRegB_23x());
if (array_type.IsZero()) {
// Null array type; this code path will fail at runtime. Infer a merge-able type from the
// instruction type.
} else if (!array_type.IsArrayTypes()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "not array type " << array_type << " with aput";
} else {
- const RegType& component_type = reg_types_.GetComponentType(array_type, class_loader_->Get());
+ RegType& component_type = reg_types_.GetComponentType(array_type, class_loader_->Get());
const uint32_t vregA = inst->VRegA_23x();
if (is_primitive) {
VerifyPrimitivePut(component_type, insn_type, vregA);
@@ -3560,7 +3560,7 @@
mirror::ArtField* MethodVerifier::GetStaticField(int field_idx) {
const DexFile::FieldId& field_id = dex_file_->GetFieldId(field_idx);
// Check access to class
- const RegType& klass_type = ResolveClassAndCheckAccess(field_id.class_idx_);
+ RegType& klass_type = ResolveClassAndCheckAccess(field_id.class_idx_);
if (klass_type.IsConflict()) { // bad class
AppendToLastFailMessage(StringPrintf(" in attempt to access static field %d (%s) in %s",
field_idx, dex_file_->GetFieldName(field_id),
@@ -3592,10 +3592,10 @@
return field;
}
-mirror::ArtField* MethodVerifier::GetInstanceField(const RegType& obj_type, int field_idx) {
+mirror::ArtField* MethodVerifier::GetInstanceField(RegType& obj_type, int field_idx) {
const DexFile::FieldId& field_id = dex_file_->GetFieldId(field_idx);
// Check access to class
- const RegType& klass_type = ResolveClassAndCheckAccess(field_id.class_idx_);
+ RegType& klass_type = ResolveClassAndCheckAccess(field_id.class_idx_);
if (klass_type.IsConflict()) {
AppendToLastFailMessage(StringPrintf(" in attempt to access instance field %d (%s) in %s",
field_idx, dex_file_->GetFieldName(field_id),
@@ -3629,7 +3629,7 @@
return field;
} else {
mirror::Class* klass = field->GetDeclaringClass();
- const RegType& field_klass =
+ RegType& field_klass =
reg_types_.FromClass(dex_file_->GetFieldDeclaringClassDescriptor(field_id),
klass, klass->CannotBeAssignedFromOtherTypes());
if (obj_type.IsUninitializedTypes() &&
@@ -3654,17 +3654,17 @@
}
}
-void MethodVerifier::VerifyISGet(const Instruction* inst, const RegType& insn_type,
+void MethodVerifier::VerifyISGet(const Instruction* inst, RegType& insn_type,
bool is_primitive, bool is_static) {
uint32_t field_idx = is_static ? inst->VRegB_21c() : inst->VRegC_22c();
mirror::ArtField* field;
if (is_static) {
field = GetStaticField(field_idx);
} else {
- const RegType& object_type = work_line_->GetRegisterType(inst->VRegB_22c());
+ RegType& object_type = work_line_->GetRegisterType(inst->VRegB_22c());
field = GetInstanceField(object_type, field_idx);
}
- const RegType* field_type = nullptr;
+ RegType* field_type = nullptr;
if (field != NULL) {
Thread* self = Thread::Current();
mirror::Class* field_type_class;
@@ -3720,17 +3720,17 @@
}
}
-void MethodVerifier::VerifyISPut(const Instruction* inst, const RegType& insn_type,
+void MethodVerifier::VerifyISPut(const Instruction* inst, RegType& insn_type,
bool is_primitive, bool is_static) {
uint32_t field_idx = is_static ? inst->VRegB_21c() : inst->VRegC_22c();
mirror::ArtField* field;
if (is_static) {
field = GetStaticField(field_idx);
} else {
- const RegType& object_type = work_line_->GetRegisterType(inst->VRegB_22c());
+ RegType& object_type = work_line_->GetRegisterType(inst->VRegB_22c());
field = GetInstanceField(object_type, field_idx);
}
- const RegType* field_type = nullptr;
+ RegType* field_type = nullptr;
if (field != NULL) {
if (field->IsFinal() && field->GetDeclaringClass() != GetDeclaringClass().GetClass()) {
Fail(VERIFY_ERROR_ACCESS_FIELD) << "cannot modify final field " << PrettyField(field)
@@ -3782,7 +3782,7 @@
inst->Opcode() == Instruction::IPUT_QUICK ||
inst->Opcode() == Instruction::IPUT_WIDE_QUICK ||
inst->Opcode() == Instruction::IPUT_OBJECT_QUICK);
- const RegType& object_type = reg_line->GetRegisterType(inst->VRegB_22c());
+ RegType& object_type = reg_line->GetRegisterType(inst->VRegB_22c());
if (!object_type.HasClass()) {
VLOG(verifier) << "Failed to get mirror::Class* from '" << object_type << "'";
return nullptr;
@@ -3797,7 +3797,7 @@
return f;
}
-void MethodVerifier::VerifyIGetQuick(const Instruction* inst, const RegType& insn_type,
+void MethodVerifier::VerifyIGetQuick(const Instruction* inst, RegType& insn_type,
bool is_primitive) {
DCHECK(Runtime::Current()->IsStarted());
mirror::ArtField* field = GetQuickFieldAccess(inst, work_line_.get());
@@ -3812,7 +3812,7 @@
FieldHelper fh(h_field);
field_type_class = fh.GetType(can_load_classes_);
}
- const RegType* field_type;
+ RegType* field_type;
if (field_type_class != nullptr) {
field_type = ®_types_.FromClass(field->GetTypeDescriptor(), field_type_class,
field_type_class->CannotBeAssignedFromOtherTypes());
@@ -3857,7 +3857,7 @@
}
}
-void MethodVerifier::VerifyIPutQuick(const Instruction* inst, const RegType& insn_type,
+void MethodVerifier::VerifyIPutQuick(const Instruction* inst, RegType& insn_type,
bool is_primitive) {
DCHECK(Runtime::Current()->IsStarted());
mirror::ArtField* field = GetQuickFieldAccess(inst, work_line_.get());
@@ -3867,7 +3867,7 @@
}
const char* descriptor = field->GetTypeDescriptor();
mirror::ClassLoader* loader = field->GetDeclaringClass()->GetClassLoader();
- const RegType& field_type = reg_types_.FromDescriptor(loader, descriptor, false);
+ RegType& field_type = reg_types_.FromDescriptor(loader, descriptor, false);
if (field != NULL) {
if (field->IsFinal() && field->GetDeclaringClass() != GetDeclaringClass().GetClass()) {
Fail(VERIFY_ERROR_ACCESS_FIELD) << "cannot modify final field " << PrettyField(field)
@@ -3880,7 +3880,7 @@
// Primitive field assignability rules are weaker than regular assignability rules
bool instruction_compatible;
bool value_compatible;
- const RegType& value_type = work_line_->GetRegisterType(vregA);
+ RegType& value_type = work_line_->GetRegisterType(vregA);
if (field_type.IsIntegralTypes()) {
instruction_compatible = insn_type.IsIntegralTypes();
value_compatible = value_type.IsIntegralTypes();
@@ -3998,7 +3998,7 @@
return &insn_flags_[work_insn_idx_];
}
-const RegType& MethodVerifier::GetMethodReturnType() {
+RegType& MethodVerifier::GetMethodReturnType() {
if (return_type_ == nullptr) {
if (mirror_method_ != NULL) {
Thread* self = Thread::Current();
@@ -4028,7 +4028,7 @@
return *return_type_;
}
-const RegType& MethodVerifier::GetDeclaringClass() {
+RegType& MethodVerifier::GetDeclaringClass() {
if (declaring_class_ == NULL) {
const DexFile::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx_);
const char* descriptor
@@ -4049,7 +4049,7 @@
DCHECK(line != nullptr) << "No register line at DEX pc " << StringPrintf("0x%x", dex_pc);
std::vector<int32_t> result;
for (size_t i = 0; i < line->NumRegs(); ++i) {
- const RegType& type = line->GetRegisterType(i);
+ RegType& type = line->GetRegisterType(i);
if (type.IsConstant()) {
result.push_back(type.IsPreciseConstant() ? kConstant : kImpreciseConstant);
result.push_back(type.ConstantValue());
@@ -4089,7 +4089,7 @@
return result;
}
-const RegType& MethodVerifier::DetermineCat1Constant(int32_t value, bool precise) {
+RegType& MethodVerifier::DetermineCat1Constant(int32_t value, bool precise) {
if (precise) {
// Precise constant type.
return reg_types_.FromCat1Const(value, true);
diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h
index 757c419..e63a90c 100644
--- a/runtime/verifier/method_verifier.h
+++ b/runtime/verifier/method_verifier.h
@@ -230,7 +230,7 @@
bool HasCheckCasts() const;
bool HasVirtualOrInterfaceInvokes() const;
bool HasFailures() const;
- const RegType& ResolveCheckedClass(uint32_t class_idx)
+ RegType& ResolveCheckedClass(uint32_t class_idx)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
private:
@@ -471,34 +471,34 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Helper to perform verification on puts of primitive type.
- void VerifyPrimitivePut(const RegType& target_type, const RegType& insn_type,
+ void VerifyPrimitivePut(RegType& target_type, RegType& insn_type,
const uint32_t vregA) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Perform verification of an aget instruction. The destination register's type will be set to
// be that of component type of the array unless the array type is unknown, in which case a
// bottom type inferred from the type of instruction is used. is_primitive is false for an
// aget-object.
- void VerifyAGet(const Instruction* inst, const RegType& insn_type,
+ void VerifyAGet(const Instruction* inst, RegType& insn_type,
bool is_primitive) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Perform verification of an aput instruction.
- void VerifyAPut(const Instruction* inst, const RegType& insn_type,
+ void VerifyAPut(const Instruction* inst, RegType& insn_type,
bool is_primitive) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Lookup instance field and fail for resolution violations
- mirror::ArtField* GetInstanceField(const RegType& obj_type, int field_idx)
+ mirror::ArtField* GetInstanceField(RegType& obj_type, int field_idx)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Lookup static field and fail for resolution violations
mirror::ArtField* GetStaticField(int field_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Perform verification of an iget or sget instruction.
- void VerifyISGet(const Instruction* inst, const RegType& insn_type,
+ void VerifyISGet(const Instruction* inst, RegType& insn_type,
bool is_primitive, bool is_static)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Perform verification of an iput or sput instruction.
- void VerifyISPut(const Instruction* inst, const RegType& insn_type,
+ void VerifyISPut(const Instruction* inst, RegType& insn_type,
bool is_primitive, bool is_static)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -508,18 +508,18 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Perform verification of an iget-quick instruction.
- void VerifyIGetQuick(const Instruction* inst, const RegType& insn_type,
+ void VerifyIGetQuick(const Instruction* inst, RegType& insn_type,
bool is_primitive)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Perform verification of an iput-quick instruction.
- void VerifyIPutQuick(const Instruction* inst, const RegType& insn_type,
+ void VerifyIPutQuick(const Instruction* inst, RegType& insn_type,
bool is_primitive)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Resolves a class based on an index and performs access checks to ensure the referrer can
// access the resolved class.
- const RegType& ResolveClassAndCheckAccess(uint32_t class_idx)
+ RegType& ResolveClassAndCheckAccess(uint32_t class_idx)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
/*
@@ -527,7 +527,7 @@
* address, determine the Join of all exceptions that can land here. Fails if no matching
* exception handler can be found or if the Join of exception types fails.
*/
- const RegType& GetCaughtExceptionType()
+ RegType& GetCaughtExceptionType()
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
/*
@@ -613,14 +613,14 @@
}
// Return the register type for the method.
- const RegType& GetMethodReturnType() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ RegType& GetMethodReturnType() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Get a type representing the declaring class of the method.
- const RegType& GetDeclaringClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ RegType& GetDeclaringClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
InstructionFlags* CurrentInsnFlags();
- const RegType& DetermineCat1Constant(int32_t value, bool precise)
+ RegType& DetermineCat1Constant(int32_t value, bool precise)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
RegTypeCache reg_types_;
@@ -641,7 +641,7 @@
// Its object representation if known.
mirror::ArtMethod* mirror_method_ GUARDED_BY(Locks::mutator_lock_);
const uint32_t method_access_flags_; // Method's access flags.
- const RegType* return_type_; // Lazily computed return type of the method.
+ RegType* return_type_; // Lazily computed return type of the method.
const DexFile* const dex_file_; // The dex file containing the method.
// The dex_cache for the declaring class of the method.
Handle<mirror::DexCache>* dex_cache_ GUARDED_BY(Locks::mutator_lock_);
@@ -649,7 +649,7 @@
Handle<mirror::ClassLoader>* class_loader_ GUARDED_BY(Locks::mutator_lock_);
const DexFile::ClassDef* const class_def_; // The class def of the declaring class of the method.
const DexFile::CodeItem* const code_item_; // The code item containing the code for the method.
- const RegType* declaring_class_; // Lazily computed reg type of the method's declaring class.
+ RegType* declaring_class_; // Lazily computed reg type of the method's declaring class.
// Instruction widths and flags, one entry per code unit.
std::unique_ptr<InstructionFlags[]> insn_flags_;
// The dex PC of a FindLocksAtDexPc request, -1 otherwise.
diff --git a/runtime/verifier/reg_type.cc b/runtime/verifier/reg_type.cc
index f0729e4..6422cdf 100644
--- a/runtime/verifier/reg_type.cc
+++ b/runtime/verifier/reg_type.cc
@@ -81,7 +81,7 @@
: PrimitiveType(klass, descriptor, cache_id) {
}
-std::string PreciseConstType::Dump() const {
+std::string PreciseConstType::Dump() {
std::stringstream result;
uint32_t val = ConstantValue();
if (val == 0) {
@@ -98,47 +98,47 @@
return result.str();
}
-std::string BooleanType::Dump() const {
+std::string BooleanType::Dump() {
return "Boolean";
}
-std::string ConflictType::Dump() const {
+std::string ConflictType::Dump() {
return "Conflict";
}
-std::string ByteType::Dump() const {
+std::string ByteType::Dump() {
return "Byte";
}
-std::string ShortType::Dump() const {
+std::string ShortType::Dump() {
return "Short";
}
-std::string CharType::Dump() const {
+std::string CharType::Dump() {
return "Char";
}
-std::string FloatType::Dump() const {
+std::string FloatType::Dump() {
return "Float";
}
-std::string LongLoType::Dump() const {
+std::string LongLoType::Dump() {
return "Long (Low Half)";
}
-std::string LongHiType::Dump() const {
+std::string LongHiType::Dump() {
return "Long (High Half)";
}
-std::string DoubleLoType::Dump() const {
+std::string DoubleLoType::Dump() {
return "Double (Low Half)";
}
-std::string DoubleHiType::Dump() const {
+std::string DoubleHiType::Dump() {
return "Double (High Half)";
}
-std::string IntegerType::Dump() const {
+std::string IntegerType::Dump() {
return "Integer";
}
@@ -361,7 +361,7 @@
}
}
-std::string UndefinedType::Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+std::string UndefinedType::Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return "Undefined";
}
@@ -391,7 +391,7 @@
DCHECK(klass->IsInstantiable());
}
-std::string UnresolvedMergedType::Dump() const {
+std::string UnresolvedMergedType::Dump() {
std::stringstream result;
std::set<uint16_t> types = GetMergedTypes();
result << "UnresolvedMergedReferences(";
@@ -405,59 +405,59 @@
return result.str();
}
-std::string UnresolvedSuperClass::Dump() const {
+std::string UnresolvedSuperClass::Dump() {
std::stringstream result;
uint16_t super_type_id = GetUnresolvedSuperClassChildId();
result << "UnresolvedSuperClass(" << reg_type_cache_->GetFromId(super_type_id).Dump() << ")";
return result.str();
}
-std::string UnresolvedReferenceType::Dump() const {
+std::string UnresolvedReferenceType::Dump() {
std::stringstream result;
result << "Unresolved Reference" << ": " << PrettyDescriptor(GetDescriptor());
return result.str();
}
-std::string UnresolvedUninitializedRefType::Dump() const {
+std::string UnresolvedUninitializedRefType::Dump() {
std::stringstream result;
result << "Unresolved And Uninitialized Reference" << ": " << PrettyDescriptor(GetDescriptor());
result << " Allocation PC: " << GetAllocationPc();
return result.str();
}
-std::string UnresolvedUninitializedThisRefType::Dump() const {
+std::string UnresolvedUninitializedThisRefType::Dump() {
std::stringstream result;
result << "Unresolved And Uninitialized This Reference" << PrettyDescriptor(GetDescriptor());
return result.str();
}
-std::string ReferenceType::Dump() const {
+std::string ReferenceType::Dump() {
std::stringstream result;
result << "Reference" << ": " << PrettyDescriptor(GetClass());
return result.str();
}
-std::string PreciseReferenceType::Dump() const {
+std::string PreciseReferenceType::Dump() {
std::stringstream result;
result << "Precise Reference" << ": "<< PrettyDescriptor(GetClass());
return result.str();
}
-std::string UninitializedReferenceType::Dump() const {
+std::string UninitializedReferenceType::Dump() {
std::stringstream result;
result << "Uninitialized Reference" << ": " << PrettyDescriptor(GetClass());
result << " Allocation PC: " << GetAllocationPc();
return result.str();
}
-std::string UninitializedThisReferenceType::Dump() const {
+std::string UninitializedThisReferenceType::Dump() {
std::stringstream result;
result << "Uninitialized This Reference" << ": " << PrettyDescriptor(GetClass());
result << "Allocation PC: " << GetAllocationPc();
return result.str();
}
-std::string ImpreciseConstType::Dump() const {
+std::string ImpreciseConstType::Dump() {
std::stringstream result;
uint32_t val = ConstantValue();
if (val == 0) {
@@ -472,7 +472,7 @@
}
return result.str();
}
-std::string PreciseConstLoType::Dump() const {
+std::string PreciseConstLoType::Dump() {
std::stringstream result;
int32_t val = ConstantValueLo();
@@ -486,7 +486,7 @@
return result.str();
}
-std::string ImpreciseConstLoType::Dump() const {
+std::string ImpreciseConstLoType::Dump() {
std::stringstream result;
int32_t val = ConstantValueLo();
@@ -500,7 +500,7 @@
return result.str();
}
-std::string PreciseConstHiType::Dump() const {
+std::string PreciseConstHiType::Dump() {
std::stringstream result;
int32_t val = ConstantValueHi();
result << "Precise ";
@@ -513,7 +513,7 @@
return result.str();
}
-std::string ImpreciseConstHiType::Dump() const {
+std::string ImpreciseConstHiType::Dump() {
std::stringstream result;
int32_t val = ConstantValueHi();
result << "Imprecise ";
@@ -530,7 +530,7 @@
: RegType(NULL, "", cache_id), constant_(constant) {
}
-const RegType& UndefinedType::Merge(const RegType& incoming_type, RegTypeCache* reg_types) const
+RegType& UndefinedType::Merge(RegType& incoming_type, RegTypeCache* reg_types)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (incoming_type.IsUndefined()) {
return *this; // Undefined MERGE Undefined => Undefined
@@ -538,7 +538,7 @@
return reg_types->Conflict();
}
-const RegType& RegType::HighHalf(RegTypeCache* cache) const {
+RegType& RegType::HighHalf(RegTypeCache* cache) const {
DCHECK(IsLowHalf());
if (IsLongLo()) {
return cache->LongHi();
@@ -586,12 +586,10 @@
}
std::set<uint16_t> UnresolvedMergedType::GetMergedTypes() const {
std::pair<uint16_t, uint16_t> refs = GetTopMergedTypes();
- const RegType& _left(reg_type_cache_->GetFromId(refs.first));
- RegType& __left(const_cast<RegType&>(_left));
- UnresolvedMergedType* left = down_cast<UnresolvedMergedType*>(&__left);
+ RegType& _left(reg_type_cache_->GetFromId(refs.first));
+ UnresolvedMergedType* left = down_cast<UnresolvedMergedType*>(&_left);
- RegType& _right(
- const_cast<RegType&>(reg_type_cache_->GetFromId(refs.second)));
+ RegType& _right(reg_type_cache_->GetFromId(refs.second));
UnresolvedMergedType* right = down_cast<UnresolvedMergedType*>(&_right);
std::set<uint16_t> types;
@@ -614,7 +612,7 @@
return types;
}
-const RegType& RegType::GetSuperClass(RegTypeCache* cache) const {
+RegType& RegType::GetSuperClass(RegTypeCache* cache) {
if (!IsUnresolvedTypes()) {
mirror::Class* super_klass = GetClass()->GetSuperClass();
if (super_klass != NULL) {
@@ -635,7 +633,7 @@
}
}
-bool RegType::CanAccess(const RegType& other) const {
+bool RegType::CanAccess(RegType& other) {
if (Equals(other)) {
return true; // Trivial accessibility.
} else {
@@ -651,7 +649,7 @@
}
}
-bool RegType::CanAccessMember(mirror::Class* klass, uint32_t access_flags) const {
+bool RegType::CanAccessMember(mirror::Class* klass, uint32_t access_flags) {
if ((access_flags & kAccPublic) != 0) {
return true;
}
@@ -662,7 +660,7 @@
}
}
-bool RegType::IsObjectArrayTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+bool RegType::IsObjectArrayTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (IsUnresolvedTypes() && !IsUnresolvedMergedReference() && !IsUnresolvedSuperClass()) {
// Primitive arrays will always resolve
DCHECK(descriptor_[1] == 'L' || descriptor_[1] == '[');
@@ -675,11 +673,11 @@
}
}
-bool RegType::IsJavaLangObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+bool RegType::IsJavaLangObject() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return IsReference() && GetClass()->IsObjectClass();
}
-bool RegType::IsArrayTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+bool RegType::IsArrayTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (IsUnresolvedTypes() && !IsUnresolvedMergedReference() && !IsUnresolvedSuperClass()) {
return descriptor_[0] == '[';
} else if (HasClass()) {
@@ -689,7 +687,7 @@
}
}
-bool RegType::IsJavaLangObjectArray() const {
+bool RegType::IsJavaLangObjectArray() {
if (HasClass()) {
mirror::Class* type = GetClass();
return type->IsArrayClass() && type->GetComponentType()->IsObjectClass();
@@ -697,7 +695,7 @@
return false;
}
-bool RegType::IsInstantiableTypes() const {
+bool RegType::IsInstantiableTypes() {
return IsUnresolvedTypes() || (IsNonZeroReferenceTypes() && GetClass()->IsInstantiable());
}
@@ -705,7 +703,7 @@
: ConstantType(constat, cache_id) {
}
-static bool AssignableFrom(const RegType& lhs, const RegType& rhs, bool strict)
+static bool AssignableFrom(RegType& lhs, RegType& rhs, bool strict)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (lhs.Equals(rhs)) {
return true;
@@ -753,11 +751,11 @@
}
}
-bool RegType::IsAssignableFrom(const RegType& src) const {
+bool RegType::IsAssignableFrom(RegType& src) {
return AssignableFrom(*this, src, false);
}
-bool RegType::IsStrictlyAssignableFrom(const RegType& src) const {
+bool RegType::IsStrictlyAssignableFrom(RegType& src) {
return AssignableFrom(*this, src, true);
}
@@ -775,11 +773,11 @@
}
}
-static const RegType& SelectNonConstant(const RegType& a, const RegType& b) {
+static RegType& SelectNonConstant(RegType& a, RegType& b) {
return a.IsConstantTypes() ? b : a;
}
-const RegType& RegType::Merge(const RegType& incoming_type, RegTypeCache* reg_types) const {
+RegType& RegType::Merge(RegType& incoming_type, RegTypeCache* reg_types) {
DCHECK(!Equals(incoming_type)); // Trivial equality handled by caller
if (IsConflict()) {
return *this; // Conflict MERGE * => Conflict
@@ -958,16 +956,16 @@
void RegType::CheckInvariants() const {
if (IsConstant() || IsConstantLo() || IsConstantHi()) {
CHECK(descriptor_.empty()) << *this;
- CHECK(klass_ == NULL) << *this;
+ CHECK(klass_.IsNull()) << *this;
}
- if (klass_ != NULL) {
+ if (!klass_.IsNull()) {
CHECK(!descriptor_.empty()) << *this;
}
}
void RegType::VisitRoots(RootCallback* callback, void* arg) {
- if (klass_ != nullptr) {
- callback(reinterpret_cast<mirror::Object**>(&klass_), arg, 0, kRootUnknown);
+ if (!klass_.IsNull()) {
+ klass_.VisitRoot(callback, arg, 0, kRootUnknown);
}
}
@@ -978,36 +976,37 @@
void UnresolvedUninitializedThisRefType::CheckInvariants() const {
CHECK_EQ(GetAllocationPc(), 0U) << *this;
CHECK(!descriptor_.empty()) << *this;
- CHECK(klass_ == NULL) << *this;
+ CHECK(klass_.IsNull()) << *this;
}
void UnresolvedUninitializedRefType::CheckInvariants() const {
CHECK(!descriptor_.empty()) << *this;
- CHECK(klass_ == NULL) << *this;
+ CHECK(klass_.IsNull()) << *this;
}
void UnresolvedMergedType::CheckInvariants() const {
// Unresolved merged types: merged types should be defined.
CHECK(descriptor_.empty()) << *this;
- CHECK(klass_ == NULL) << *this;
+ CHECK(klass_.IsNull()) << *this;
CHECK_NE(merged_types_.first, 0U) << *this;
CHECK_NE(merged_types_.second, 0U) << *this;
}
void UnresolvedReferenceType::CheckInvariants() const {
CHECK(!descriptor_.empty()) << *this;
- CHECK(klass_ == NULL) << *this;
+ CHECK(klass_.IsNull()) << *this;
}
void UnresolvedSuperClass::CheckInvariants() const {
// Unresolved merged types: merged types should be defined.
CHECK(descriptor_.empty()) << *this;
- CHECK(klass_ == NULL) << *this;
+ CHECK(klass_.IsNull()) << *this;
CHECK_NE(unresolved_child_id_, 0U) << *this;
}
std::ostream& operator<<(std::ostream& os, const RegType& rhs) {
- os << rhs.Dump();
+ RegType& rhs_non_const = const_cast<RegType&>(rhs);
+ os << rhs_non_const.Dump();
return os;
}
diff --git a/runtime/verifier/reg_type.h b/runtime/verifier/reg_type.h
index e985f3a..d508fb5 100644
--- a/runtime/verifier/reg_type.h
+++ b/runtime/verifier/reg_type.h
@@ -25,6 +25,7 @@
#include "jni.h"
#include "base/macros.h"
+#include "gc_root.h"
#include "globals.h"
#include "object_callbacks.h"
#include "primitive.h"
@@ -107,7 +108,7 @@
return IsLowHalf();
}
// Check this is the low half, and that type_h is its matching high-half.
- inline bool CheckWidePair(const RegType& type_h) const {
+ inline bool CheckWidePair(RegType& type_h) const {
if (IsLowHalf()) {
return ((IsPreciseConstantLo() && type_h.IsPreciseConstantHi()) ||
(IsPreciseConstantLo() && type_h.IsImpreciseConstantHi()) ||
@@ -119,7 +120,7 @@
return false;
}
// The high half that corresponds to this low half
- const RegType& HighHalf(RegTypeCache* cache) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ RegType& HighHalf(RegTypeCache* cache) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool IsConstantBoolean() const {
return IsConstant() && (ConstantValue() >= 0) && (ConstantValue() <= 1);
@@ -198,55 +199,54 @@
virtual bool HasClass() const {
return false;
}
- bool IsJavaLangObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool IsArrayTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool IsObjectArrayTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsJavaLangObject() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsArrayTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsObjectArrayTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Primitive::Type GetPrimitiveType() const;
- bool IsJavaLangObjectArray() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool IsInstantiableTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsJavaLangObjectArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsInstantiableTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
const std::string& GetDescriptor() const {
DCHECK(HasClass() || (IsUnresolvedTypes() && !IsUnresolvedMergedReference() &&
!IsUnresolvedSuperClass()));
return descriptor_;
}
- mirror::Class* GetClass() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Class* GetClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK(!IsUnresolvedReference());
- DCHECK(klass_ != NULL) << Dump();
+ DCHECK(!klass_.IsNull()) << Dump();
DCHECK(HasClass());
- return klass_;
+ return klass_.Read();
}
uint16_t GetId() const {
return cache_id_;
}
- const RegType& GetSuperClass(RegTypeCache* cache) const
+ RegType& GetSuperClass(RegTypeCache* cache)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- virtual std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
+ virtual std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
// Can this type access other?
- bool CanAccess(const RegType& other) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool CanAccess(RegType& other) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Can this type access a member with the given properties?
- bool CanAccessMember(mirror::Class* klass, uint32_t access_flags) const
+ bool CanAccessMember(mirror::Class* klass, uint32_t access_flags)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Can this type be assigned by src?
// Note: Object and interface types may always be assigned to one another, see comment on
// ClassJoin.
- bool IsAssignableFrom(const RegType& src) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsAssignableFrom(RegType& src) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Can this type be assigned by src? Variant of IsAssignableFrom that doesn't allow assignment to
// an interface from an Object.
- bool IsStrictlyAssignableFrom(const RegType& src) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsStrictlyAssignableFrom(RegType& src) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Are these RegTypes the same?
- bool Equals(const RegType& other) const {
+ bool Equals(RegType& other) const {
return GetId() == other.GetId();
}
// Compute the merge of this register from one edge (path) with incoming_type from another.
- virtual const RegType& Merge(const RegType& incoming_type, RegTypeCache* reg_types) const
+ virtual RegType& Merge(RegType& incoming_type, RegTypeCache* reg_types)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
/*
@@ -275,7 +275,7 @@
protected:
RegType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- : descriptor_(descriptor), klass_(klass), cache_id_(cache_id) {
+ : descriptor_(descriptor), klass_(GcRoot<mirror::Class>(klass)), cache_id_(cache_id) {
if (kIsDebugBuild) {
CheckInvariants();
}
@@ -285,7 +285,7 @@
const std::string descriptor_;
- mirror::Class* klass_; // Non-const only due to moving classes.
+ GcRoot<mirror::Class> klass_;
const uint16_t cache_id_;
friend class RegTypeCache;
@@ -301,7 +301,7 @@
return true;
}
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Get the singleton Conflict instance.
static ConflictType* GetInstance();
@@ -331,7 +331,7 @@
return true;
}
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Get the singleton Undefined instance.
static UndefinedType* GetInstance();
@@ -350,7 +350,7 @@
: RegType(klass, descriptor, cache_id) {
}
- virtual const RegType& Merge(const RegType& incoming_type, RegTypeCache* reg_types) const
+ virtual RegType& Merge(RegType& incoming_type, RegTypeCache* reg_types)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static UndefinedType* instance_;
@@ -373,7 +373,7 @@
bool IsInteger() const {
return true;
}
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static IntegerType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
uint16_t cache_id)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -392,7 +392,7 @@
bool IsBoolean() const {
return true;
}
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static BooleanType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
uint16_t cache_id)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -412,7 +412,7 @@
bool IsByte() const {
return true;
}
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static ByteType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
uint16_t cache_id)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -431,7 +431,7 @@
bool IsShort() const {
return true;
}
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static ShortType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
uint16_t cache_id)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -450,7 +450,7 @@
bool IsChar() const {
return true;
}
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static CharType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
uint16_t cache_id)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -469,7 +469,7 @@
bool IsFloat() const {
return true;
}
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static FloatType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
uint16_t cache_id)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -491,7 +491,7 @@
class LongLoType : public Cat2Type {
public:
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool IsLongLo() const {
return true;
}
@@ -513,7 +513,7 @@
class LongHiType : public Cat2Type {
public:
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool IsLongHi() const {
return true;
}
@@ -532,7 +532,7 @@
class DoubleLoType : public Cat2Type {
public:
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool IsDoubleLo() const {
return true;
}
@@ -554,7 +554,7 @@
class DoubleHiType : public Cat2Type {
public:
- std::string Dump() const;
+ std::string Dump();
virtual bool IsDoubleHi() const {
return true;
}
@@ -621,7 +621,7 @@
return true;
}
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
};
class PreciseConstLoType : public ConstantType {
@@ -633,7 +633,7 @@
bool IsPreciseConstantLo() const {
return true;
}
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
};
class PreciseConstHiType : public ConstantType {
@@ -645,7 +645,7 @@
bool IsPreciseConstantHi() const {
return true;
}
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
};
class ImpreciseConstType : public ConstantType {
@@ -655,7 +655,7 @@
bool IsImpreciseConstant() const {
return true;
}
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
};
class ImpreciseConstLoType : public ConstantType {
@@ -666,7 +666,7 @@
bool IsImpreciseConstantLo() const {
return true;
}
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
};
class ImpreciseConstHiType : public ConstantType {
@@ -677,7 +677,7 @@
bool IsImpreciseConstantHi() const {
return true;
}
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
};
// Common parent of all uninitialized types. Uninitialized types are created by "new" dex
@@ -718,7 +718,7 @@
return true;
}
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
};
// Similar to UnresolvedReferenceType but not yet having been passed to a constructor.
@@ -737,7 +737,7 @@
return true;
}
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
private:
void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
};
@@ -762,7 +762,7 @@
return true;
}
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
private:
void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -782,7 +782,7 @@
return true;
}
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
private:
void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
};
@@ -807,7 +807,7 @@
return true;
}
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
};
// A type of register holding a reference to an Object of type GetClass and only an object of that
@@ -829,7 +829,7 @@
return true;
}
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
};
// Common parent of unresolved types.
@@ -857,7 +857,7 @@
return true;
}
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
private:
void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
};
@@ -883,7 +883,7 @@
return static_cast<uint16_t>(unresolved_child_id_ & 0xFFFF);
}
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
private:
void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -918,7 +918,7 @@
return true;
}
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
private:
void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/verifier/reg_type_cache-inl.h b/runtime/verifier/reg_type_cache-inl.h
index fc9e5c9..fdf96a8 100644
--- a/runtime/verifier/reg_type_cache-inl.h
+++ b/runtime/verifier/reg_type_cache-inl.h
@@ -24,14 +24,14 @@
namespace art {
namespace verifier {
-inline const art::verifier::RegType& RegTypeCache::GetFromId(uint16_t id) const {
+inline RegType& RegTypeCache::GetFromId(uint16_t id) const {
DCHECK_LT(id, entries_.size());
RegType* result = entries_[id];
DCHECK(result != NULL);
return *result;
}
-inline const ConstantType& RegTypeCache::FromCat1Const(int32_t value, bool precise) {
+inline ConstantType& RegTypeCache::FromCat1Const(int32_t value, bool precise) {
// We only expect 0 to be a precise constant.
DCHECK(value != 0 || precise);
if (precise && (value >= kMinSmallConstant) && (value <= kMaxSmallConstant)) {
diff --git a/runtime/verifier/reg_type_cache.cc b/runtime/verifier/reg_type_cache.cc
index d51374b..255b506 100644
--- a/runtime/verifier/reg_type_cache.cc
+++ b/runtime/verifier/reg_type_cache.cc
@@ -65,8 +65,8 @@
DCHECK_EQ(entries_.size(), primitive_count_);
}
-const RegType& RegTypeCache::FromDescriptor(mirror::ClassLoader* loader, const char* descriptor,
- bool precise) {
+RegType& RegTypeCache::FromDescriptor(mirror::ClassLoader* loader, const char* descriptor,
+ bool precise) {
DCHECK(RegTypeCache::primitive_initialized_);
if (descriptor[1] == '\0') {
switch (descriptor[0]) {
@@ -97,7 +97,7 @@
}
};
-const RegType& RegTypeCache::RegTypeFromPrimitiveType(Primitive::Type prim_type) const {
+RegType& RegTypeCache::RegTypeFromPrimitiveType(Primitive::Type prim_type) const {
CHECK(RegTypeCache::primitive_initialized_);
switch (prim_type) {
case Primitive::kPrimBoolean:
@@ -156,8 +156,8 @@
return klass;
}
-const RegType& RegTypeCache::From(mirror::ClassLoader* loader, const char* descriptor,
- bool precise) {
+RegType& RegTypeCache::From(mirror::ClassLoader* loader, const char* descriptor,
+ bool precise) {
// Try looking up the class in the cache first.
for (size_t i = primitive_count_; i < entries_.size(); i++) {
if (MatchDescriptor(i, descriptor, precise)) {
@@ -208,7 +208,7 @@
}
}
-const RegType& RegTypeCache::FromClass(const char* descriptor, mirror::Class* klass, bool precise) {
+RegType& RegTypeCache::FromClass(const char* descriptor, mirror::Class* klass, bool precise) {
DCHECK(klass != nullptr && !klass->IsErroneous());
if (klass->IsPrimitive()) {
// Note: precise isn't used for primitive classes. A char is assignable to an int. All
@@ -218,7 +218,7 @@
// Look for the reference in the list of entries to have.
for (size_t i = primitive_count_; i < entries_.size(); i++) {
RegType* cur_entry = entries_[i];
- if (cur_entry->klass_ == klass && MatchingPrecisionForClass(cur_entry, precise)) {
+ if (cur_entry->klass_.Read() == klass && MatchingPrecisionForClass(cur_entry, precise)) {
return *cur_entry;
}
}
@@ -311,17 +311,15 @@
}
}
-const RegType& RegTypeCache::FromUnresolvedMerge(const RegType& left, const RegType& right) {
+RegType& RegTypeCache::FromUnresolvedMerge(RegType& left, RegType& right) {
std::set<uint16_t> types;
if (left.IsUnresolvedMergedReference()) {
- RegType& non_const(const_cast<RegType&>(left));
- types = (down_cast<UnresolvedMergedType*>(&non_const))->GetMergedTypes();
+ types = (down_cast<UnresolvedMergedType*>(&left))->GetMergedTypes();
} else {
types.insert(left.GetId());
}
if (right.IsUnresolvedMergedReference()) {
- RegType& non_const(const_cast<RegType&>(right));
- std::set<uint16_t> right_types = (down_cast<UnresolvedMergedType*>(&non_const))->GetMergedTypes();
+ std::set<uint16_t> right_types = (down_cast<UnresolvedMergedType*>(&right))->GetMergedTypes();
types.insert(right_types.begin(), right_types.end());
} else {
types.insert(right.GetId());
@@ -348,7 +346,7 @@
return *entry;
}
-const RegType& RegTypeCache::FromUnresolvedSuperClass(const RegType& child) {
+RegType& RegTypeCache::FromUnresolvedSuperClass(RegType& child) {
// Check if entry already exists.
for (size_t i = primitive_count_; i < entries_.size(); i++) {
RegType* cur_entry = entries_[i];
@@ -367,7 +365,7 @@
return *entry;
}
-const UninitializedType& RegTypeCache::Uninitialized(const RegType& type, uint32_t allocation_pc) {
+UninitializedType& RegTypeCache::Uninitialized(RegType& type, uint32_t allocation_pc) {
UninitializedType* entry = NULL;
const std::string& descriptor(type.GetDescriptor());
if (type.IsUnresolvedTypes()) {
@@ -397,7 +395,7 @@
return *entry;
}
-const RegType& RegTypeCache::FromUninitialized(const RegType& uninit_type) {
+RegType& RegTypeCache::FromUninitialized(RegType& uninit_type) {
RegType* entry;
if (uninit_type.IsUnresolvedTypes()) {
@@ -439,44 +437,44 @@
return *entry;
}
-const ImpreciseConstType& RegTypeCache::ByteConstant() {
- const ConstantType& result = FromCat1Const(std::numeric_limits<jbyte>::min(), false);
+ImpreciseConstType& RegTypeCache::ByteConstant() {
+ ConstantType& result = FromCat1Const(std::numeric_limits<jbyte>::min(), false);
DCHECK(result.IsImpreciseConstant());
- return *down_cast<const ImpreciseConstType*>(&result);
+ return *down_cast<ImpreciseConstType*>(&result);
}
-const ImpreciseConstType& RegTypeCache::CharConstant() {
+ImpreciseConstType& RegTypeCache::CharConstant() {
int32_t jchar_max = static_cast<int32_t>(std::numeric_limits<jchar>::max());
- const ConstantType& result = FromCat1Const(jchar_max, false);
+ ConstantType& result = FromCat1Const(jchar_max, false);
DCHECK(result.IsImpreciseConstant());
- return *down_cast<const ImpreciseConstType*>(&result);
+ return *down_cast<ImpreciseConstType*>(&result);
}
-const ImpreciseConstType& RegTypeCache::ShortConstant() {
- const ConstantType& result = FromCat1Const(std::numeric_limits<jshort>::min(), false);
+ImpreciseConstType& RegTypeCache::ShortConstant() {
+ ConstantType& result = FromCat1Const(std::numeric_limits<jshort>::min(), false);
DCHECK(result.IsImpreciseConstant());
- return *down_cast<const ImpreciseConstType*>(&result);
+ return *down_cast<ImpreciseConstType*>(&result);
}
-const ImpreciseConstType& RegTypeCache::IntConstant() {
- const ConstantType& result = FromCat1Const(std::numeric_limits<jint>::max(), false);
+ImpreciseConstType& RegTypeCache::IntConstant() {
+ ConstantType& result = FromCat1Const(std::numeric_limits<jint>::max(), false);
DCHECK(result.IsImpreciseConstant());
- return *down_cast<const ImpreciseConstType*>(&result);
+ return *down_cast<ImpreciseConstType*>(&result);
}
-const ImpreciseConstType& RegTypeCache::PosByteConstant() {
- const ConstantType& result = FromCat1Const(std::numeric_limits<jbyte>::max(), false);
+ImpreciseConstType& RegTypeCache::PosByteConstant() {
+ ConstantType& result = FromCat1Const(std::numeric_limits<jbyte>::max(), false);
DCHECK(result.IsImpreciseConstant());
- return *down_cast<const ImpreciseConstType*>(&result);
+ return *down_cast<ImpreciseConstType*>(&result);
}
-const ImpreciseConstType& RegTypeCache::PosShortConstant() {
- const ConstantType& result = FromCat1Const(std::numeric_limits<jshort>::max(), false);
+ImpreciseConstType& RegTypeCache::PosShortConstant() {
+ ConstantType& result = FromCat1Const(std::numeric_limits<jshort>::max(), false);
DCHECK(result.IsImpreciseConstant());
- return *down_cast<const ImpreciseConstType*>(&result);
+ return *down_cast<ImpreciseConstType*>(&result);
}
-const UninitializedType& RegTypeCache::UninitializedThisArgument(const RegType& type) {
+UninitializedType& RegTypeCache::UninitializedThisArgument(RegType& type) {
UninitializedType* entry;
const std::string& descriptor(type.GetDescriptor());
if (type.IsUnresolvedTypes()) {
@@ -502,10 +500,10 @@
return *entry;
}
-const ConstantType& RegTypeCache::FromCat1NonSmallConstant(int32_t value, bool precise) {
+ConstantType& RegTypeCache::FromCat1NonSmallConstant(int32_t value, bool precise) {
for (size_t i = primitive_count_; i < entries_.size(); i++) {
RegType* cur_entry = entries_[i];
- if (cur_entry->klass_ == NULL && cur_entry->IsConstant() &&
+ if (cur_entry->klass_.IsNull() && cur_entry->IsConstant() &&
cur_entry->IsPreciseConstant() == precise &&
(down_cast<ConstantType*>(cur_entry))->ConstantValue() == value) {
return *down_cast<ConstantType*>(cur_entry);
@@ -521,7 +519,7 @@
return *entry;
}
-const ConstantType& RegTypeCache::FromCat2ConstLo(int32_t value, bool precise) {
+ConstantType& RegTypeCache::FromCat2ConstLo(int32_t value, bool precise) {
for (size_t i = primitive_count_; i < entries_.size(); i++) {
RegType* cur_entry = entries_[i];
if (cur_entry->IsConstantLo() && (cur_entry->IsPrecise() == precise) &&
@@ -539,7 +537,7 @@
return *entry;
}
-const ConstantType& RegTypeCache::FromCat2ConstHi(int32_t value, bool precise) {
+ConstantType& RegTypeCache::FromCat2ConstHi(int32_t value, bool precise) {
for (size_t i = primitive_count_; i < entries_.size(); i++) {
RegType* cur_entry = entries_[i];
if (cur_entry->IsConstantHi() && (cur_entry->IsPrecise() == precise) &&
@@ -557,7 +555,7 @@
return *entry;
}
-const RegType& RegTypeCache::GetComponentType(const RegType& array, mirror::ClassLoader* loader) {
+RegType& RegTypeCache::GetComponentType(RegType& array, mirror::ClassLoader* loader) {
if (!array.IsArrayTypes()) {
return Conflict();
} else if (array.IsUnresolvedTypes()) {
diff --git a/runtime/verifier/reg_type_cache.h b/runtime/verifier/reg_type_cache.h
index f42fdd1..d46cf2c 100644
--- a/runtime/verifier/reg_type_cache.h
+++ b/runtime/verifier/reg_type_cache.h
@@ -49,99 +49,99 @@
}
}
static void ShutDown();
- const art::verifier::RegType& GetFromId(uint16_t id) const;
- const RegType& From(mirror::ClassLoader* loader, const char* descriptor, bool precise)
+ RegType& GetFromId(uint16_t id) const;
+ RegType& From(mirror::ClassLoader* loader, const char* descriptor, bool precise)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const RegType& FromClass(const char* descriptor, mirror::Class* klass, bool precise)
+ RegType& FromClass(const char* descriptor, mirror::Class* klass, bool precise)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const ConstantType& FromCat1Const(int32_t value, bool precise)
+ ConstantType& FromCat1Const(int32_t value, bool precise)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const ConstantType& FromCat2ConstLo(int32_t value, bool precise)
+ ConstantType& FromCat2ConstLo(int32_t value, bool precise)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const ConstantType& FromCat2ConstHi(int32_t value, bool precise)
+ ConstantType& FromCat2ConstHi(int32_t value, bool precise)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const RegType& FromDescriptor(mirror::ClassLoader* loader, const char* descriptor, bool precise)
+ RegType& FromDescriptor(mirror::ClassLoader* loader, const char* descriptor, bool precise)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const RegType& FromUnresolvedMerge(const RegType& left, const RegType& right)
+ RegType& FromUnresolvedMerge(RegType& left, RegType& right)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const RegType& FromUnresolvedSuperClass(const RegType& child)
+ RegType& FromUnresolvedSuperClass(RegType& child)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const RegType& JavaLangString() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ RegType& JavaLangString() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// String is final and therefore always precise.
return From(NULL, "Ljava/lang/String;", true);
}
- const RegType& JavaLangThrowable(bool precise)
+ RegType& JavaLangThrowable(bool precise)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return From(NULL, "Ljava/lang/Throwable;", precise);
}
- const ConstantType& Zero() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ConstantType& Zero() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return FromCat1Const(0, true);
}
- const ConstantType& One() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ConstantType& One() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return FromCat1Const(1, true);
}
size_t GetCacheSize() {
return entries_.size();
}
- const RegType& Boolean() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ RegType& Boolean() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return *BooleanType::GetInstance();
}
- const RegType& Byte() {
+ RegType& Byte() {
return *ByteType::GetInstance();
}
- const RegType& Char() {
+ RegType& Char() {
return *CharType::GetInstance();
}
- const RegType& Short() {
+ RegType& Short() {
return *ShortType::GetInstance();
}
- const RegType& Integer() {
+ RegType& Integer() {
return *IntegerType::GetInstance();
}
- const RegType& Float() {
+ RegType& Float() {
return *FloatType::GetInstance();
}
- const RegType& LongLo() {
+ RegType& LongLo() {
return *LongLoType::GetInstance();
}
- const RegType& LongHi() {
+ RegType& LongHi() {
return *LongHiType::GetInstance();
}
- const RegType& DoubleLo() {
+ RegType& DoubleLo() {
return *DoubleLoType::GetInstance();
}
- const RegType& DoubleHi() {
+ RegType& DoubleHi() {
return *DoubleHiType::GetInstance();
}
- const RegType& Undefined() {
+ RegType& Undefined() {
return *UndefinedType::GetInstance();
}
- const RegType& Conflict() {
+ RegType& Conflict() {
return *ConflictType::GetInstance();
}
- const RegType& JavaLangClass(bool precise) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ RegType& JavaLangClass(bool precise) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return From(NULL, "Ljava/lang/Class;", precise);
}
- const RegType& JavaLangObject(bool precise) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ RegType& JavaLangObject(bool precise) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return From(NULL, "Ljava/lang/Object;", precise);
}
- const UninitializedType& Uninitialized(const RegType& type, uint32_t allocation_pc)
+ UninitializedType& Uninitialized(RegType& type, uint32_t allocation_pc)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Create an uninitialized 'this' argument for the given type.
- const UninitializedType& UninitializedThisArgument(const RegType& type)
+ UninitializedType& UninitializedThisArgument(RegType& type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const RegType& FromUninitialized(const RegType& uninit_type)
+ RegType& FromUninitialized(RegType& uninit_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const ImpreciseConstType& ByteConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const ImpreciseConstType& CharConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const ImpreciseConstType& ShortConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const ImpreciseConstType& IntConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const ImpreciseConstType& PosByteConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const ImpreciseConstType& PosShortConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const RegType& GetComponentType(const RegType& array, mirror::ClassLoader* loader)
+ ImpreciseConstType& ByteConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ImpreciseConstType& CharConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ImpreciseConstType& ShortConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ImpreciseConstType& IntConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ImpreciseConstType& PosByteConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ImpreciseConstType& PosShortConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ RegType& GetComponentType(RegType& array, mirror::ClassLoader* loader)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void Dump(std::ostream& os) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const RegType& RegTypeFromPrimitiveType(Primitive::Type) const;
+ RegType& RegTypeFromPrimitiveType(Primitive::Type) const;
void VisitRoots(RootCallback* callback, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -151,7 +151,7 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool MatchDescriptor(size_t idx, const char* descriptor, bool precise)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const ConstantType& FromCat1NonSmallConstant(int32_t value, bool precise)
+ ConstantType& FromCat1NonSmallConstant(int32_t value, bool precise)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void AddEntry(RegType* new_entry);
diff --git a/runtime/verifier/reg_type_test.cc b/runtime/verifier/reg_type_test.cc
index 9dc0df1..e27558a 100644
--- a/runtime/verifier/reg_type_test.cc
+++ b/runtime/verifier/reg_type_test.cc
@@ -33,21 +33,21 @@
// Tests creating primitive types types.
ScopedObjectAccess soa(Thread::Current());
RegTypeCache cache(true);
- const RegType& ref_type_const_0 = cache.FromCat1Const(10, true);
- const RegType& ref_type_const_1 = cache.FromCat1Const(10, true);
- const RegType& ref_type_const_2 = cache.FromCat1Const(30, true);
- const RegType& ref_type_const_3 = cache.FromCat1Const(30, false);
+ RegType& ref_type_const_0 = cache.FromCat1Const(10, true);
+ RegType& ref_type_const_1 = cache.FromCat1Const(10, true);
+ RegType& ref_type_const_2 = cache.FromCat1Const(30, true);
+ RegType& ref_type_const_3 = cache.FromCat1Const(30, false);
EXPECT_TRUE(ref_type_const_0.Equals(ref_type_const_1));
EXPECT_FALSE(ref_type_const_0.Equals(ref_type_const_2));
EXPECT_FALSE(ref_type_const_0.Equals(ref_type_const_3));
- const RegType& ref_type_const_wide_0 = cache.FromCat2ConstHi(50, true);
- const RegType& ref_type_const_wide_1 = cache.FromCat2ConstHi(50, true);
+ RegType& ref_type_const_wide_0 = cache.FromCat2ConstHi(50, true);
+ RegType& ref_type_const_wide_1 = cache.FromCat2ConstHi(50, true);
EXPECT_TRUE(ref_type_const_wide_0.Equals(ref_type_const_wide_1));
- const RegType& ref_type_const_wide_2 = cache.FromCat2ConstLo(50, true);
- const RegType& ref_type_const_wide_3 = cache.FromCat2ConstLo(50, true);
- const RegType& ref_type_const_wide_4 = cache.FromCat2ConstLo(55, true);
+ RegType& ref_type_const_wide_2 = cache.FromCat2ConstLo(50, true);
+ RegType& ref_type_const_wide_3 = cache.FromCat2ConstLo(50, true);
+ RegType& ref_type_const_wide_4 = cache.FromCat2ConstLo(55, true);
EXPECT_TRUE(ref_type_const_wide_2.Equals(ref_type_const_wide_3));
EXPECT_FALSE(ref_type_const_wide_2.Equals(ref_type_const_wide_4));
}
@@ -56,11 +56,11 @@
ScopedObjectAccess soa(Thread::Current());
RegTypeCache cache(true);
int64_t val = static_cast<int32_t>(1234);
- const RegType& precise_lo = cache.FromCat2ConstLo(static_cast<int32_t>(val), true);
- const RegType& precise_hi = cache.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
- const RegType& precise_const = cache.FromCat1Const(static_cast<int32_t>(val >> 32), true);
- const RegType& long_lo = cache.LongLo();
- const RegType& long_hi = cache.LongHi();
+ RegType& precise_lo = cache.FromCat2ConstLo(static_cast<int32_t>(val), true);
+ RegType& precise_hi = cache.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
+ RegType& precise_const = cache.FromCat1Const(static_cast<int32_t>(val >> 32), true);
+ RegType& long_lo = cache.LongLo();
+ RegType& long_hi = cache.LongHi();
// Check sanity of types.
EXPECT_TRUE(precise_lo.IsLowHalf());
EXPECT_FALSE(precise_hi.IsLowHalf());
@@ -80,7 +80,7 @@
ScopedObjectAccess soa(Thread::Current());
RegTypeCache cache(true);
- const RegType& bool_reg_type = cache.Boolean();
+ RegType& bool_reg_type = cache.Boolean();
EXPECT_FALSE(bool_reg_type.IsUndefined());
EXPECT_FALSE(bool_reg_type.IsConflict());
EXPECT_FALSE(bool_reg_type.IsZero());
@@ -112,7 +112,7 @@
EXPECT_TRUE(bool_reg_type.IsArrayIndexTypes());
EXPECT_FALSE(bool_reg_type.IsNonZeroReferenceTypes());
- const RegType& byte_reg_type = cache.Byte();
+ RegType& byte_reg_type = cache.Byte();
EXPECT_FALSE(byte_reg_type.IsUndefined());
EXPECT_FALSE(byte_reg_type.IsConflict());
EXPECT_FALSE(byte_reg_type.IsZero());
@@ -144,7 +144,7 @@
EXPECT_TRUE(byte_reg_type.IsArrayIndexTypes());
EXPECT_FALSE(byte_reg_type.IsNonZeroReferenceTypes());
- const RegType& char_reg_type = cache.Char();
+ RegType& char_reg_type = cache.Char();
EXPECT_FALSE(char_reg_type.IsUndefined());
EXPECT_FALSE(char_reg_type.IsConflict());
EXPECT_FALSE(char_reg_type.IsZero());
@@ -176,7 +176,7 @@
EXPECT_TRUE(char_reg_type.IsArrayIndexTypes());
EXPECT_FALSE(char_reg_type.IsNonZeroReferenceTypes());
- const RegType& short_reg_type = cache.Short();
+ RegType& short_reg_type = cache.Short();
EXPECT_FALSE(short_reg_type.IsUndefined());
EXPECT_FALSE(short_reg_type.IsConflict());
EXPECT_FALSE(short_reg_type.IsZero());
@@ -208,7 +208,7 @@
EXPECT_TRUE(short_reg_type.IsArrayIndexTypes());
EXPECT_FALSE(short_reg_type.IsNonZeroReferenceTypes());
- const RegType& int_reg_type = cache.Integer();
+ RegType& int_reg_type = cache.Integer();
EXPECT_FALSE(int_reg_type.IsUndefined());
EXPECT_FALSE(int_reg_type.IsConflict());
EXPECT_FALSE(int_reg_type.IsZero());
@@ -240,7 +240,7 @@
EXPECT_TRUE(int_reg_type.IsArrayIndexTypes());
EXPECT_FALSE(int_reg_type.IsNonZeroReferenceTypes());
- const RegType& long_reg_type = cache.LongLo();
+ RegType& long_reg_type = cache.LongLo();
EXPECT_FALSE(long_reg_type.IsUndefined());
EXPECT_FALSE(long_reg_type.IsConflict());
EXPECT_FALSE(long_reg_type.IsZero());
@@ -272,7 +272,7 @@
EXPECT_FALSE(long_reg_type.IsArrayIndexTypes());
EXPECT_FALSE(long_reg_type.IsNonZeroReferenceTypes());
- const RegType& float_reg_type = cache.Float();
+ RegType& float_reg_type = cache.Float();
EXPECT_FALSE(float_reg_type.IsUndefined());
EXPECT_FALSE(float_reg_type.IsConflict());
EXPECT_FALSE(float_reg_type.IsZero());
@@ -304,7 +304,7 @@
EXPECT_FALSE(float_reg_type.IsArrayIndexTypes());
EXPECT_FALSE(float_reg_type.IsNonZeroReferenceTypes());
- const RegType& double_reg_type = cache.DoubleLo();
+ RegType& double_reg_type = cache.DoubleLo();
EXPECT_FALSE(double_reg_type.IsUndefined());
EXPECT_FALSE(double_reg_type.IsConflict());
EXPECT_FALSE(double_reg_type.IsZero());
@@ -344,9 +344,9 @@
// match the one that is imprecise.
ScopedObjectAccess soa(Thread::Current());
RegTypeCache cache(true);
- const RegType& imprecise_obj = cache.JavaLangObject(false);
- const RegType& precise_obj = cache.JavaLangObject(true);
- const RegType& precise_obj_2 = cache.FromDescriptor(NULL, "Ljava/lang/Object;", true);
+ RegType& imprecise_obj = cache.JavaLangObject(false);
+ RegType& precise_obj = cache.JavaLangObject(true);
+ RegType& precise_obj_2 = cache.FromDescriptor(NULL, "Ljava/lang/Object;", true);
EXPECT_TRUE(precise_obj.Equals(precise_obj_2));
EXPECT_FALSE(imprecise_obj.Equals(precise_obj));
@@ -359,14 +359,14 @@
// a hit second time.
ScopedObjectAccess soa(Thread::Current());
RegTypeCache cache(true);
- const RegType& ref_type_0 = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
+ RegType& ref_type_0 = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
EXPECT_TRUE(ref_type_0.IsUnresolvedReference());
EXPECT_TRUE(ref_type_0.IsNonZeroReferenceTypes());
- const RegType& ref_type_1 = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
+ RegType& ref_type_1 = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
EXPECT_TRUE(ref_type_0.Equals(ref_type_1));
- const RegType& unresolved_super_class = cache.FromUnresolvedSuperClass(ref_type_0);
+ RegType& unresolved_super_class = cache.FromUnresolvedSuperClass(ref_type_0);
EXPECT_TRUE(unresolved_super_class.IsUnresolvedSuperClass());
EXPECT_TRUE(unresolved_super_class.IsNonZeroReferenceTypes());
}
@@ -375,21 +375,21 @@
// Tests creating types uninitialized types from unresolved types.
ScopedObjectAccess soa(Thread::Current());
RegTypeCache cache(true);
- const RegType& ref_type_0 = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
+ RegType& ref_type_0 = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
EXPECT_TRUE(ref_type_0.IsUnresolvedReference());
- const RegType& ref_type = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
+ RegType& ref_type = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
EXPECT_TRUE(ref_type_0.Equals(ref_type));
// Create an uninitialized type of this unresolved type
- const RegType& unresolved_unintialised = cache.Uninitialized(ref_type, 1101ull);
+ RegType& unresolved_unintialised = cache.Uninitialized(ref_type, 1101ull);
EXPECT_TRUE(unresolved_unintialised.IsUnresolvedAndUninitializedReference());
EXPECT_TRUE(unresolved_unintialised.IsUninitializedTypes());
EXPECT_TRUE(unresolved_unintialised.IsNonZeroReferenceTypes());
// Create an uninitialized type of this unresolved type with different PC
- const RegType& ref_type_unresolved_unintialised_1 = cache.Uninitialized(ref_type, 1102ull);
+ RegType& ref_type_unresolved_unintialised_1 = cache.Uninitialized(ref_type, 1102ull);
EXPECT_TRUE(unresolved_unintialised.IsUnresolvedAndUninitializedReference());
EXPECT_FALSE(unresolved_unintialised.Equals(ref_type_unresolved_unintialised_1));
// Create an uninitialized type of this unresolved type with the same PC
- const RegType& unresolved_unintialised_2 = cache.Uninitialized(ref_type, 1101ull);
+ RegType& unresolved_unintialised_2 = cache.Uninitialized(ref_type, 1101ull);
EXPECT_TRUE(unresolved_unintialised.Equals(unresolved_unintialised_2));
}
@@ -397,12 +397,12 @@
// Tests types for proper Dump messages.
ScopedObjectAccess soa(Thread::Current());
RegTypeCache cache(true);
- const RegType& unresolved_ref = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
- const RegType& unresolved_ref_another = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExistEither;", true);
- const RegType& resolved_ref = cache.JavaLangString();
- const RegType& resolved_unintialiesd = cache.Uninitialized(resolved_ref, 10);
- const RegType& unresolved_unintialized = cache.Uninitialized(unresolved_ref, 12);
- const RegType& unresolved_merged = cache.FromUnresolvedMerge(unresolved_ref, unresolved_ref_another);
+ RegType& unresolved_ref = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
+ RegType& unresolved_ref_another = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExistEither;", true);
+ RegType& resolved_ref = cache.JavaLangString();
+ RegType& resolved_unintialiesd = cache.Uninitialized(resolved_ref, 10);
+ RegType& unresolved_unintialized = cache.Uninitialized(unresolved_ref, 12);
+ RegType& unresolved_merged = cache.FromUnresolvedMerge(unresolved_ref, unresolved_ref_another);
std::string expected = "Unresolved Reference: java.lang.DoesNotExist";
EXPECT_EQ(expected, unresolved_ref.Dump());
@@ -422,16 +422,16 @@
// The JavaLangObject method instead of FromDescriptor. String class is final.
ScopedObjectAccess soa(Thread::Current());
RegTypeCache cache(true);
- const RegType& ref_type = cache.JavaLangString();
- const RegType& ref_type_2 = cache.JavaLangString();
- const RegType& ref_type_3 = cache.FromDescriptor(NULL, "Ljava/lang/String;", true);
+ RegType& ref_type = cache.JavaLangString();
+ RegType& ref_type_2 = cache.JavaLangString();
+ RegType& ref_type_3 = cache.FromDescriptor(NULL, "Ljava/lang/String;", true);
EXPECT_TRUE(ref_type.Equals(ref_type_2));
EXPECT_TRUE(ref_type_2.Equals(ref_type_3));
EXPECT_TRUE(ref_type.IsPreciseReference());
// Create an uninitialized type out of this:
- const RegType& ref_type_unintialized = cache.Uninitialized(ref_type, 0110ull);
+ RegType& ref_type_unintialized = cache.Uninitialized(ref_type, 0110ull);
EXPECT_TRUE(ref_type_unintialized.IsUninitializedReference());
EXPECT_FALSE(ref_type_unintialized.IsUnresolvedAndUninitializedReference());
}
@@ -442,9 +442,9 @@
// The JavaLangObject method instead of FromDescriptor. Object Class in not final.
ScopedObjectAccess soa(Thread::Current());
RegTypeCache cache(true);
- const RegType& ref_type = cache.JavaLangObject(true);
- const RegType& ref_type_2 = cache.JavaLangObject(true);
- const RegType& ref_type_3 = cache.FromDescriptor(NULL, "Ljava/lang/Object;", true);
+ RegType& ref_type = cache.JavaLangObject(true);
+ RegType& ref_type_2 = cache.JavaLangObject(true);
+ RegType& ref_type_3 = cache.FromDescriptor(NULL, "Ljava/lang/Object;", true);
EXPECT_TRUE(ref_type.Equals(ref_type_2));
EXPECT_TRUE(ref_type_3.Equals(ref_type_2));
@@ -455,20 +455,19 @@
// String and object , LUB is object.
ScopedObjectAccess soa(Thread::Current());
RegTypeCache cache_new(true);
- const RegType& string = cache_new.JavaLangString();
- const RegType& Object = cache_new.JavaLangObject(true);
+ RegType& string = cache_new.JavaLangString();
+ RegType& Object = cache_new.JavaLangObject(true);
EXPECT_TRUE(string.Merge(Object, &cache_new).IsJavaLangObject());
// Merge two unresolved types.
- const RegType& ref_type_0 = cache_new.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
+ RegType& ref_type_0 = cache_new.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
EXPECT_TRUE(ref_type_0.IsUnresolvedReference());
- const RegType& ref_type_1 = cache_new.FromDescriptor(NULL, "Ljava/lang/DoesNotExistToo;", true);
+ RegType& ref_type_1 = cache_new.FromDescriptor(NULL, "Ljava/lang/DoesNotExistToo;", true);
EXPECT_FALSE(ref_type_0.Equals(ref_type_1));
- const RegType& merged = ref_type_1.Merge(ref_type_0, &cache_new);
+ RegType& merged = ref_type_1.Merge(ref_type_0, &cache_new);
EXPECT_TRUE(merged.IsUnresolvedMergedReference());
- RegType& merged_nonconst = const_cast<RegType&>(merged);
- std::set<uint16_t> merged_ids = (down_cast<UnresolvedMergedType*>(&merged_nonconst))->GetMergedTypes();
+ std::set<uint16_t> merged_ids = (down_cast<UnresolvedMergedType*>(&merged))->GetMergedTypes();
EXPECT_EQ(ref_type_0.GetId(), *(merged_ids.begin()));
EXPECT_EQ(ref_type_1.GetId(), *((++merged_ids.begin())));
}
@@ -479,27 +478,27 @@
RegTypeCache cache_new(true);
constexpr int32_t kTestConstantValue = 10;
- const RegType& float_type = cache_new.Float();
- const RegType& precise_cst = cache_new.FromCat1Const(kTestConstantValue, true);
- const RegType& imprecise_cst = cache_new.FromCat1Const(kTestConstantValue, false);
+ RegType& float_type = cache_new.Float();
+ RegType& precise_cst = cache_new.FromCat1Const(kTestConstantValue, true);
+ RegType& imprecise_cst = cache_new.FromCat1Const(kTestConstantValue, false);
{
// float MERGE precise cst => float.
- const RegType& merged = float_type.Merge(precise_cst, &cache_new);
+ RegType& merged = float_type.Merge(precise_cst, &cache_new);
EXPECT_TRUE(merged.IsFloat());
}
{
// precise cst MERGE float => float.
- const RegType& merged = precise_cst.Merge(float_type, &cache_new);
+ RegType& merged = precise_cst.Merge(float_type, &cache_new);
EXPECT_TRUE(merged.IsFloat());
}
{
// float MERGE imprecise cst => float.
- const RegType& merged = float_type.Merge(imprecise_cst, &cache_new);
+ RegType& merged = float_type.Merge(imprecise_cst, &cache_new);
EXPECT_TRUE(merged.IsFloat());
}
{
// imprecise cst MERGE float => float.
- const RegType& merged = imprecise_cst.Merge(float_type, &cache_new);
+ RegType& merged = imprecise_cst.Merge(float_type, &cache_new);
EXPECT_TRUE(merged.IsFloat());
}
}
@@ -510,50 +509,50 @@
RegTypeCache cache_new(true);
constexpr int32_t kTestConstantValue = 10;
- const RegType& long_lo_type = cache_new.LongLo();
- const RegType& long_hi_type = cache_new.LongHi();
- const RegType& precise_cst_lo = cache_new.FromCat2ConstLo(kTestConstantValue, true);
- const RegType& imprecise_cst_lo = cache_new.FromCat2ConstLo(kTestConstantValue, false);
- const RegType& precise_cst_hi = cache_new.FromCat2ConstHi(kTestConstantValue, true);
- const RegType& imprecise_cst_hi = cache_new.FromCat2ConstHi(kTestConstantValue, false);
+ RegType& long_lo_type = cache_new.LongLo();
+ RegType& long_hi_type = cache_new.LongHi();
+ RegType& precise_cst_lo = cache_new.FromCat2ConstLo(kTestConstantValue, true);
+ RegType& imprecise_cst_lo = cache_new.FromCat2ConstLo(kTestConstantValue, false);
+ RegType& precise_cst_hi = cache_new.FromCat2ConstHi(kTestConstantValue, true);
+ RegType& imprecise_cst_hi = cache_new.FromCat2ConstHi(kTestConstantValue, false);
{
// lo MERGE precise cst lo => lo.
- const RegType& merged = long_lo_type.Merge(precise_cst_lo, &cache_new);
+ RegType& merged = long_lo_type.Merge(precise_cst_lo, &cache_new);
EXPECT_TRUE(merged.IsLongLo());
}
{
// precise cst lo MERGE lo => lo.
- const RegType& merged = precise_cst_lo.Merge(long_lo_type, &cache_new);
+ RegType& merged = precise_cst_lo.Merge(long_lo_type, &cache_new);
EXPECT_TRUE(merged.IsLongLo());
}
{
// lo MERGE imprecise cst lo => lo.
- const RegType& merged = long_lo_type.Merge(imprecise_cst_lo, &cache_new);
+ RegType& merged = long_lo_type.Merge(imprecise_cst_lo, &cache_new);
EXPECT_TRUE(merged.IsLongLo());
}
{
// imprecise cst lo MERGE lo => lo.
- const RegType& merged = imprecise_cst_lo.Merge(long_lo_type, &cache_new);
+ RegType& merged = imprecise_cst_lo.Merge(long_lo_type, &cache_new);
EXPECT_TRUE(merged.IsLongLo());
}
{
// hi MERGE precise cst hi => hi.
- const RegType& merged = long_hi_type.Merge(precise_cst_hi, &cache_new);
+ RegType& merged = long_hi_type.Merge(precise_cst_hi, &cache_new);
EXPECT_TRUE(merged.IsLongHi());
}
{
// precise cst hi MERGE hi => hi.
- const RegType& merged = precise_cst_hi.Merge(long_hi_type, &cache_new);
+ RegType& merged = precise_cst_hi.Merge(long_hi_type, &cache_new);
EXPECT_TRUE(merged.IsLongHi());
}
{
// hi MERGE imprecise cst hi => hi.
- const RegType& merged = long_hi_type.Merge(imprecise_cst_hi, &cache_new);
+ RegType& merged = long_hi_type.Merge(imprecise_cst_hi, &cache_new);
EXPECT_TRUE(merged.IsLongHi());
}
{
// imprecise cst hi MERGE hi => hi.
- const RegType& merged = imprecise_cst_hi.Merge(long_hi_type, &cache_new);
+ RegType& merged = imprecise_cst_hi.Merge(long_hi_type, &cache_new);
EXPECT_TRUE(merged.IsLongHi());
}
}
@@ -564,50 +563,50 @@
RegTypeCache cache_new(true);
constexpr int32_t kTestConstantValue = 10;
- const RegType& double_lo_type = cache_new.DoubleLo();
- const RegType& double_hi_type = cache_new.DoubleHi();
- const RegType& precise_cst_lo = cache_new.FromCat2ConstLo(kTestConstantValue, true);
- const RegType& imprecise_cst_lo = cache_new.FromCat2ConstLo(kTestConstantValue, false);
- const RegType& precise_cst_hi = cache_new.FromCat2ConstHi(kTestConstantValue, true);
- const RegType& imprecise_cst_hi = cache_new.FromCat2ConstHi(kTestConstantValue, false);
+ RegType& double_lo_type = cache_new.DoubleLo();
+ RegType& double_hi_type = cache_new.DoubleHi();
+ RegType& precise_cst_lo = cache_new.FromCat2ConstLo(kTestConstantValue, true);
+ RegType& imprecise_cst_lo = cache_new.FromCat2ConstLo(kTestConstantValue, false);
+ RegType& precise_cst_hi = cache_new.FromCat2ConstHi(kTestConstantValue, true);
+ RegType& imprecise_cst_hi = cache_new.FromCat2ConstHi(kTestConstantValue, false);
{
// lo MERGE precise cst lo => lo.
- const RegType& merged = double_lo_type.Merge(precise_cst_lo, &cache_new);
+ RegType& merged = double_lo_type.Merge(precise_cst_lo, &cache_new);
EXPECT_TRUE(merged.IsDoubleLo());
}
{
// precise cst lo MERGE lo => lo.
- const RegType& merged = precise_cst_lo.Merge(double_lo_type, &cache_new);
+ RegType& merged = precise_cst_lo.Merge(double_lo_type, &cache_new);
EXPECT_TRUE(merged.IsDoubleLo());
}
{
// lo MERGE imprecise cst lo => lo.
- const RegType& merged = double_lo_type.Merge(imprecise_cst_lo, &cache_new);
+ RegType& merged = double_lo_type.Merge(imprecise_cst_lo, &cache_new);
EXPECT_TRUE(merged.IsDoubleLo());
}
{
// imprecise cst lo MERGE lo => lo.
- const RegType& merged = imprecise_cst_lo.Merge(double_lo_type, &cache_new);
+ RegType& merged = imprecise_cst_lo.Merge(double_lo_type, &cache_new);
EXPECT_TRUE(merged.IsDoubleLo());
}
{
// hi MERGE precise cst hi => hi.
- const RegType& merged = double_hi_type.Merge(precise_cst_hi, &cache_new);
+ RegType& merged = double_hi_type.Merge(precise_cst_hi, &cache_new);
EXPECT_TRUE(merged.IsDoubleHi());
}
{
// precise cst hi MERGE hi => hi.
- const RegType& merged = precise_cst_hi.Merge(double_hi_type, &cache_new);
+ RegType& merged = precise_cst_hi.Merge(double_hi_type, &cache_new);
EXPECT_TRUE(merged.IsDoubleHi());
}
{
// hi MERGE imprecise cst hi => hi.
- const RegType& merged = double_hi_type.Merge(imprecise_cst_hi, &cache_new);
+ RegType& merged = double_hi_type.Merge(imprecise_cst_hi, &cache_new);
EXPECT_TRUE(merged.IsDoubleHi());
}
{
// imprecise cst hi MERGE hi => hi.
- const RegType& merged = imprecise_cst_hi.Merge(double_hi_type, &cache_new);
+ RegType& merged = imprecise_cst_hi.Merge(double_hi_type, &cache_new);
EXPECT_TRUE(merged.IsDoubleHi());
}
}
@@ -616,8 +615,8 @@
// Tests creating primitive types types.
ScopedObjectAccess soa(Thread::Current());
RegTypeCache cache_new(true);
- const RegType& imprecise_const = cache_new.FromCat1Const(10, false);
- const RegType& precise_const = cache_new.FromCat1Const(10, true);
+ RegType& imprecise_const = cache_new.FromCat1Const(10, false);
+ RegType& precise_const = cache_new.FromCat1Const(10, true);
EXPECT_TRUE(imprecise_const.IsImpreciseConstant());
EXPECT_TRUE(precise_const.IsPreciseConstant());
diff --git a/runtime/verifier/register_line-inl.h b/runtime/verifier/register_line-inl.h
index 0989cd0..378c6d3 100644
--- a/runtime/verifier/register_line-inl.h
+++ b/runtime/verifier/register_line-inl.h
@@ -25,7 +25,7 @@
namespace art {
namespace verifier {
-inline const RegType& RegisterLine::GetRegisterType(uint32_t vsrc) const {
+inline RegType& RegisterLine::GetRegisterType(uint32_t vsrc) const {
// The register index was validated during the static pass, so we don't need to check it here.
DCHECK_LT(vsrc, num_regs_);
return verifier_->GetRegTypeCache()->GetFromId(line_[vsrc]);
diff --git a/runtime/verifier/register_line.cc b/runtime/verifier/register_line.cc
index 556056c..4d67cfb 100644
--- a/runtime/verifier/register_line.cc
+++ b/runtime/verifier/register_line.cc
@@ -36,7 +36,7 @@
return true;
}
-bool RegisterLine::SetRegisterType(uint32_t vdst, const RegType& new_type) {
+bool RegisterLine::SetRegisterType(uint32_t vdst, RegType& new_type) {
DCHECK_LT(vdst, num_regs_);
if (new_type.IsLowHalf() || new_type.IsHighHalf()) {
verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Expected category1 register type not '"
@@ -53,8 +53,8 @@
return true;
}
-bool RegisterLine::SetRegisterTypeWide(uint32_t vdst, const RegType& new_type1,
- const RegType& new_type2) {
+bool RegisterLine::SetRegisterTypeWide(uint32_t vdst, RegType& new_type1,
+ RegType& new_type2) {
DCHECK_LT(vdst + 1, num_regs_);
if (!new_type1.CheckWidePair(new_type2)) {
verifier_->Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "Invalid wide pair '"
@@ -75,21 +75,21 @@
result_[1] = result_[0];
}
-void RegisterLine::SetResultRegisterType(const RegType& new_type) {
+void RegisterLine::SetResultRegisterType(RegType& new_type) {
DCHECK(!new_type.IsLowHalf());
DCHECK(!new_type.IsHighHalf());
result_[0] = new_type.GetId();
result_[1] = verifier_->GetRegTypeCache()->Undefined().GetId();
}
-void RegisterLine::SetResultRegisterTypeWide(const RegType& new_type1,
- const RegType& new_type2) {
+void RegisterLine::SetResultRegisterTypeWide(RegType& new_type1,
+ RegType& new_type2) {
DCHECK(new_type1.CheckWidePair(new_type2));
result_[0] = new_type1.GetId();
result_[1] = new_type2.GetId();
}
-const RegType& RegisterLine::GetInvocationThis(const Instruction* inst, bool is_range) {
+RegType& RegisterLine::GetInvocationThis(const Instruction* inst, bool is_range) {
const size_t args_count = is_range ? inst->VRegA_3rc() : inst->VRegA_35c();
if (args_count < 1) {
verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invoke lacks 'this'";
@@ -97,7 +97,7 @@
}
/* Get the element type of the array held in vsrc */
const uint32_t this_reg = (is_range) ? inst->VRegC_3rc() : inst->VRegC_35c();
- const RegType& this_type = GetRegisterType(this_reg);
+ RegType& this_type = GetRegisterType(this_reg);
if (!this_type.IsReferenceTypes()) {
verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "tried to get class from non-reference register v"
<< this_reg << " (type=" << this_type << ")";
@@ -107,9 +107,9 @@
}
bool RegisterLine::VerifyRegisterType(uint32_t vsrc,
- const RegType& check_type) {
+ RegType& check_type) {
// Verify the src register type against the check type refining the type of the register
- const RegType& src_type = GetRegisterType(vsrc);
+ RegType& src_type = GetRegisterType(vsrc);
if (!(check_type.IsAssignableFrom(src_type))) {
enum VerifyError fail_type;
if (!check_type.IsNonZeroReferenceTypes() || !src_type.IsNonZeroReferenceTypes()) {
@@ -125,7 +125,7 @@
return false;
}
if (check_type.IsLowHalf()) {
- const RegType& src_type_h = GetRegisterType(vsrc + 1);
+ RegType& src_type_h = GetRegisterType(vsrc + 1);
if (!src_type.CheckWidePair(src_type_h)) {
verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "wide register v" << vsrc << " has type "
<< src_type << "/" << src_type_h;
@@ -139,17 +139,17 @@
return true;
}
-bool RegisterLine::VerifyRegisterTypeWide(uint32_t vsrc, const RegType& check_type1,
- const RegType& check_type2) {
+bool RegisterLine::VerifyRegisterTypeWide(uint32_t vsrc, RegType& check_type1,
+ RegType& check_type2) {
DCHECK(check_type1.CheckWidePair(check_type2));
// Verify the src register type against the check type refining the type of the register
- const RegType& src_type = GetRegisterType(vsrc);
+ RegType& src_type = GetRegisterType(vsrc);
if (!check_type1.IsAssignableFrom(src_type)) {
verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "register v" << vsrc << " has type " << src_type
<< " but expected " << check_type1;
return false;
}
- const RegType& src_type_h = GetRegisterType(vsrc + 1);
+ RegType& src_type_h = GetRegisterType(vsrc + 1);
if (!src_type.CheckWidePair(src_type_h)) {
verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "wide register v" << vsrc << " has type "
<< src_type << "/" << src_type_h;
@@ -162,9 +162,9 @@
return true;
}
-void RegisterLine::MarkRefsAsInitialized(const RegType& uninit_type) {
+void RegisterLine::MarkRefsAsInitialized(RegType& uninit_type) {
DCHECK(uninit_type.IsUninitializedTypes());
- const RegType& init_type = verifier_->GetRegTypeCache()->FromUninitialized(uninit_type);
+ RegType& init_type = verifier_->GetRegTypeCache()->FromUninitialized(uninit_type);
size_t changed = 0;
for (uint32_t i = 0; i < num_regs_; i++) {
if (GetRegisterType(i).Equals(uninit_type)) {
@@ -200,7 +200,7 @@
}
}
-std::string RegisterLine::Dump() const {
+std::string RegisterLine::Dump() {
std::string result;
for (size_t i = 0; i < num_regs_; i++) {
result += StringPrintf("%zd:[", i);
@@ -213,7 +213,7 @@
return result;
}
-void RegisterLine::MarkUninitRefsAsInvalid(const RegType& uninit_type) {
+void RegisterLine::MarkUninitRefsAsInvalid(RegType& uninit_type) {
for (size_t i = 0; i < num_regs_; i++) {
if (GetRegisterType(i).Equals(uninit_type)) {
line_[i] = verifier_->GetRegTypeCache()->Conflict().GetId();
@@ -224,7 +224,7 @@
void RegisterLine::CopyRegister1(uint32_t vdst, uint32_t vsrc, TypeCategory cat) {
DCHECK(cat == kTypeCategory1nr || cat == kTypeCategoryRef);
- const RegType& type = GetRegisterType(vsrc);
+ RegType& type = GetRegisterType(vsrc);
if (!SetRegisterType(vdst, type)) {
return;
}
@@ -238,8 +238,8 @@
}
void RegisterLine::CopyRegister2(uint32_t vdst, uint32_t vsrc) {
- const RegType& type_l = GetRegisterType(vsrc);
- const RegType& type_h = GetRegisterType(vsrc + 1);
+ RegType& type_l = GetRegisterType(vsrc);
+ RegType& type_h = GetRegisterType(vsrc + 1);
if (!type_l.CheckWidePair(type_h)) {
verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "copy2 v" << vdst << "<-v" << vsrc
@@ -250,7 +250,7 @@
}
void RegisterLine::CopyResultRegister1(uint32_t vdst, bool is_reference) {
- const RegType& type = verifier_->GetRegTypeCache()->GetFromId(result_[0]);
+ RegType& type = verifier_->GetRegTypeCache()->GetFromId(result_[0]);
if ((!is_reference && !type.IsCategory1Types()) ||
(is_reference && !type.IsReferenceTypes())) {
verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD)
@@ -267,8 +267,8 @@
* register to another register, and reset the result register.
*/
void RegisterLine::CopyResultRegister2(uint32_t vdst) {
- const RegType& type_l = verifier_->GetRegTypeCache()->GetFromId(result_[0]);
- const RegType& type_h = verifier_->GetRegTypeCache()->GetFromId(result_[1]);
+ RegType& type_l = verifier_->GetRegTypeCache()->GetFromId(result_[0]);
+ RegType& type_h = verifier_->GetRegTypeCache()->GetFromId(result_[1]);
if (!type_l.IsCategory2Types()) {
verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD)
<< "copyRes2 v" << vdst << "<- result0" << " type=" << type_l;
@@ -281,40 +281,40 @@
}
void RegisterLine::CheckUnaryOp(const Instruction* inst,
- const RegType& dst_type,
- const RegType& src_type) {
+ RegType& dst_type,
+ RegType& src_type) {
if (VerifyRegisterType(inst->VRegB_12x(), src_type)) {
SetRegisterType(inst->VRegA_12x(), dst_type);
}
}
void RegisterLine::CheckUnaryOpWide(const Instruction* inst,
- const RegType& dst_type1, const RegType& dst_type2,
- const RegType& src_type1, const RegType& src_type2) {
+ RegType& dst_type1, RegType& dst_type2,
+ RegType& src_type1, RegType& src_type2) {
if (VerifyRegisterTypeWide(inst->VRegB_12x(), src_type1, src_type2)) {
SetRegisterTypeWide(inst->VRegA_12x(), dst_type1, dst_type2);
}
}
void RegisterLine::CheckUnaryOpToWide(const Instruction* inst,
- const RegType& dst_type1, const RegType& dst_type2,
- const RegType& src_type) {
+ RegType& dst_type1, RegType& dst_type2,
+ RegType& src_type) {
if (VerifyRegisterType(inst->VRegB_12x(), src_type)) {
SetRegisterTypeWide(inst->VRegA_12x(), dst_type1, dst_type2);
}
}
void RegisterLine::CheckUnaryOpFromWide(const Instruction* inst,
- const RegType& dst_type,
- const RegType& src_type1, const RegType& src_type2) {
+ RegType& dst_type,
+ RegType& src_type1, RegType& src_type2) {
if (VerifyRegisterTypeWide(inst->VRegB_12x(), src_type1, src_type2)) {
SetRegisterType(inst->VRegA_12x(), dst_type);
}
}
void RegisterLine::CheckBinaryOp(const Instruction* inst,
- const RegType& dst_type,
- const RegType& src_type1, const RegType& src_type2,
+ RegType& dst_type,
+ RegType& src_type1, RegType& src_type2,
bool check_boolean_op) {
const uint32_t vregB = inst->VRegB_23x();
const uint32_t vregC = inst->VRegC_23x();
@@ -333,9 +333,9 @@
}
void RegisterLine::CheckBinaryOpWide(const Instruction* inst,
- const RegType& dst_type1, const RegType& dst_type2,
- const RegType& src_type1_1, const RegType& src_type1_2,
- const RegType& src_type2_1, const RegType& src_type2_2) {
+ RegType& dst_type1, RegType& dst_type2,
+ RegType& src_type1_1, RegType& src_type1_2,
+ RegType& src_type2_1, RegType& src_type2_2) {
if (VerifyRegisterTypeWide(inst->VRegB_23x(), src_type1_1, src_type1_2) &&
VerifyRegisterTypeWide(inst->VRegC_23x(), src_type2_1, src_type2_2)) {
SetRegisterTypeWide(inst->VRegA_23x(), dst_type1, dst_type2);
@@ -343,8 +343,8 @@
}
void RegisterLine::CheckBinaryOpWideShift(const Instruction* inst,
- const RegType& long_lo_type, const RegType& long_hi_type,
- const RegType& int_type) {
+ RegType& long_lo_type, RegType& long_hi_type,
+ RegType& int_type) {
if (VerifyRegisterTypeWide(inst->VRegB_23x(), long_lo_type, long_hi_type) &&
VerifyRegisterType(inst->VRegC_23x(), int_type)) {
SetRegisterTypeWide(inst->VRegA_23x(), long_lo_type, long_hi_type);
@@ -352,8 +352,8 @@
}
void RegisterLine::CheckBinaryOp2addr(const Instruction* inst,
- const RegType& dst_type, const RegType& src_type1,
- const RegType& src_type2, bool check_boolean_op) {
+ RegType& dst_type, RegType& src_type1,
+ RegType& src_type2, bool check_boolean_op) {
const uint32_t vregA = inst->VRegA_12x();
const uint32_t vregB = inst->VRegB_12x();
if (VerifyRegisterType(vregA, src_type1) &&
@@ -371,9 +371,9 @@
}
void RegisterLine::CheckBinaryOp2addrWide(const Instruction* inst,
- const RegType& dst_type1, const RegType& dst_type2,
- const RegType& src_type1_1, const RegType& src_type1_2,
- const RegType& src_type2_1, const RegType& src_type2_2) {
+ RegType& dst_type1, RegType& dst_type2,
+ RegType& src_type1_1, RegType& src_type1_2,
+ RegType& src_type2_1, RegType& src_type2_2) {
const uint32_t vregA = inst->VRegA_12x();
const uint32_t vregB = inst->VRegB_12x();
if (VerifyRegisterTypeWide(vregA, src_type1_1, src_type1_2) &&
@@ -383,8 +383,8 @@
}
void RegisterLine::CheckBinaryOp2addrWideShift(const Instruction* inst,
- const RegType& long_lo_type, const RegType& long_hi_type,
- const RegType& int_type) {
+ RegType& long_lo_type, RegType& long_hi_type,
+ RegType& int_type) {
const uint32_t vregA = inst->VRegA_12x();
const uint32_t vregB = inst->VRegB_12x();
if (VerifyRegisterTypeWide(vregA, long_lo_type, long_hi_type) &&
@@ -394,7 +394,7 @@
}
void RegisterLine::CheckLiteralOp(const Instruction* inst,
- const RegType& dst_type, const RegType& src_type,
+ RegType& dst_type, RegType& src_type,
bool check_boolean_op, bool is_lit16) {
const uint32_t vregA = is_lit16 ? inst->VRegA_22s() : inst->VRegA_22b();
const uint32_t vregB = is_lit16 ? inst->VRegB_22s() : inst->VRegB_22b();
@@ -413,7 +413,7 @@
}
void RegisterLine::PushMonitor(uint32_t reg_idx, int32_t insn_idx) {
- const RegType& reg_type = GetRegisterType(reg_idx);
+ RegType& reg_type = GetRegisterType(reg_idx);
if (!reg_type.IsReferenceTypes()) {
verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "monitor-enter on non-object (" << reg_type << ")";
} else if (monitors_.size() >= 32) {
@@ -425,7 +425,7 @@
}
void RegisterLine::PopMonitor(uint32_t reg_idx) {
- const RegType& reg_type = GetRegisterType(reg_idx);
+ RegType& reg_type = GetRegisterType(reg_idx);
if (!reg_type.IsReferenceTypes()) {
verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "monitor-exit on non-object (" << reg_type << ")";
} else if (monitors_.empty()) {
@@ -460,9 +460,9 @@
DCHECK(incoming_line != nullptr);
for (size_t idx = 0; idx < num_regs_; idx++) {
if (line_[idx] != incoming_line->line_[idx]) {
- const RegType& incoming_reg_type = incoming_line->GetRegisterType(idx);
- const RegType& cur_type = GetRegisterType(idx);
- const RegType& new_type = cur_type.Merge(incoming_reg_type, verifier_->GetRegTypeCache());
+ RegType& incoming_reg_type = incoming_line->GetRegisterType(idx);
+ RegType& cur_type = GetRegisterType(idx);
+ RegType& new_type = cur_type.Merge(incoming_reg_type, verifier_->GetRegTypeCache());
changed = changed || !cur_type.Equals(new_type);
line_[idx] = new_type.GetId();
}
@@ -508,7 +508,8 @@
std::ostream& operator<<(std::ostream& os, const RegisterLine& rhs)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- os << rhs.Dump();
+ RegisterLine& rhs_non_const = const_cast<RegisterLine&>(rhs);
+ os << rhs_non_const.Dump();
return os;
}
diff --git a/runtime/verifier/register_line.h b/runtime/verifier/register_line.h
index 57c7517..b0018d2 100644
--- a/runtime/verifier/register_line.h
+++ b/runtime/verifier/register_line.h
@@ -81,26 +81,26 @@
// Set the type of register N, verifying that the register is valid. If "newType" is the "Lo"
// part of a 64-bit value, register N+1 will be set to "newType+1".
// The register index was validated during the static pass, so we don't need to check it here.
- bool SetRegisterType(uint32_t vdst, const RegType& new_type)
+ bool SetRegisterType(uint32_t vdst, RegType& new_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool SetRegisterTypeWide(uint32_t vdst, const RegType& new_type1, const RegType& new_type2)
+ bool SetRegisterTypeWide(uint32_t vdst, RegType& new_type1, RegType& new_type2)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
/* Set the type of the "result" register. */
- void SetResultRegisterType(const RegType& new_type)
+ void SetResultRegisterType(RegType& new_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void SetResultRegisterTypeWide(const RegType& new_type1, const RegType& new_type2)
+ void SetResultRegisterTypeWide(RegType& new_type1, RegType& new_type2)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Get the type of register vsrc.
- const RegType& GetRegisterType(uint32_t vsrc) const;
+ RegType& GetRegisterType(uint32_t vsrc) const;
- bool VerifyRegisterType(uint32_t vsrc, const RegType& check_type)
+ bool VerifyRegisterType(uint32_t vsrc, RegType& check_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool VerifyRegisterTypeWide(uint32_t vsrc, const RegType& check_type1, const RegType& check_type2)
+ bool VerifyRegisterTypeWide(uint32_t vsrc, RegType& check_type1, RegType& check_type2)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void CopyFromLine(const RegisterLine* src) {
@@ -110,7 +110,7 @@
reg_to_lock_depths_ = src->reg_to_lock_depths_;
}
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void FillWithGarbage() {
memset(&line_, 0xf1, num_regs_ * sizeof(uint16_t));
@@ -126,7 +126,7 @@
* to prevent them from being used (otherwise, MarkRefsAsInitialized would mark the old ones and
* the new ones at the same time).
*/
- void MarkUninitRefsAsInvalid(const RegType& uninit_type)
+ void MarkUninitRefsAsInvalid(RegType& uninit_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
/*
@@ -134,7 +134,7 @@
* reference type. This is called when an appropriate constructor is invoked -- all copies of
* the reference must be marked as initialized.
*/
- void MarkRefsAsInitialized(const RegType& uninit_type)
+ void MarkRefsAsInitialized(RegType& uninit_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
/*
@@ -173,30 +173,30 @@
* The argument count is in vA, and the first argument is in vC, for both "simple" and "range"
* versions. We just need to make sure vA is >= 1 and then return vC.
*/
- const RegType& GetInvocationThis(const Instruction* inst, bool is_range)
+ RegType& GetInvocationThis(const Instruction* inst, bool is_range)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
/*
* Verify types for a simple two-register instruction (e.g. "neg-int").
* "dst_type" is stored into vA, and "src_type" is verified against vB.
*/
- void CheckUnaryOp(const Instruction* inst, const RegType& dst_type,
- const RegType& src_type)
+ void CheckUnaryOp(const Instruction* inst, RegType& dst_type,
+ RegType& src_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void CheckUnaryOpWide(const Instruction* inst,
- const RegType& dst_type1, const RegType& dst_type2,
- const RegType& src_type1, const RegType& src_type2)
+ RegType& dst_type1, RegType& dst_type2,
+ RegType& src_type1, RegType& src_type2)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void CheckUnaryOpToWide(const Instruction* inst,
- const RegType& dst_type1, const RegType& dst_type2,
- const RegType& src_type)
+ RegType& dst_type1, RegType& dst_type2,
+ RegType& src_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void CheckUnaryOpFromWide(const Instruction* inst,
- const RegType& dst_type,
- const RegType& src_type1, const RegType& src_type2)
+ RegType& dst_type,
+ RegType& src_type1, RegType& src_type2)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
/*
@@ -205,19 +205,19 @@
* against vB/vC.
*/
void CheckBinaryOp(const Instruction* inst,
- const RegType& dst_type, const RegType& src_type1, const RegType& src_type2,
+ RegType& dst_type, RegType& src_type1, RegType& src_type2,
bool check_boolean_op)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void CheckBinaryOpWide(const Instruction* inst,
- const RegType& dst_type1, const RegType& dst_type2,
- const RegType& src_type1_1, const RegType& src_type1_2,
- const RegType& src_type2_1, const RegType& src_type2_2)
+ RegType& dst_type1, RegType& dst_type2,
+ RegType& src_type1_1, RegType& src_type1_2,
+ RegType& src_type2_1, RegType& src_type2_2)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void CheckBinaryOpWideShift(const Instruction* inst,
- const RegType& long_lo_type, const RegType& long_hi_type,
- const RegType& int_type)
+ RegType& long_lo_type, RegType& long_hi_type,
+ RegType& int_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
/*
@@ -225,20 +225,20 @@
* are verified against vA/vB, then "dst_type" is stored into vA.
*/
void CheckBinaryOp2addr(const Instruction* inst,
- const RegType& dst_type,
- const RegType& src_type1, const RegType& src_type2,
+ RegType& dst_type,
+ RegType& src_type1, RegType& src_type2,
bool check_boolean_op)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void CheckBinaryOp2addrWide(const Instruction* inst,
- const RegType& dst_type1, const RegType& dst_type2,
- const RegType& src_type1_1, const RegType& src_type1_2,
- const RegType& src_type2_1, const RegType& src_type2_2)
+ RegType& dst_type1, RegType& dst_type2,
+ RegType& src_type1_1, RegType& src_type1_2,
+ RegType& src_type2_1, RegType& src_type2_2)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void CheckBinaryOp2addrWideShift(const Instruction* inst,
- const RegType& long_lo_type, const RegType& long_hi_type,
- const RegType& int_type)
+ RegType& long_lo_type, RegType& long_hi_type,
+ RegType& int_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
/*
@@ -248,7 +248,7 @@
* If "check_boolean_op" is set, we use the constant value in vC.
*/
void CheckLiteralOp(const Instruction* inst,
- const RegType& dst_type, const RegType& src_type,
+ RegType& dst_type, RegType& src_type,
bool check_boolean_op, bool is_lit16)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/test/004-JniTest/jni_test.cc b/test/004-JniTest/jni_test.cc
index 4909a4a..554712a 100644
--- a/test/004-JniTest/jni_test.cc
+++ b/test/004-JniTest/jni_test.cc
@@ -286,3 +286,8 @@
return char_returns[c1];
}
+
+extern "C" JNIEXPORT jboolean JNICALL Java_Main_nativeIsAssignableFrom(JNIEnv* env, jclass,
+ jclass from, jclass to) {
+ return env->IsAssignableFrom(from, to);
+}
diff --git a/test/004-JniTest/src/Main.java b/test/004-JniTest/src/Main.java
index 11c80f5..ae133be 100644
--- a/test/004-JniTest/src/Main.java
+++ b/test/004-JniTest/src/Main.java
@@ -29,6 +29,7 @@
testShortMethod();
testBooleanMethod();
testCharMethod();
+ testIsAssignableFromOnPrimitiveTypes();
}
private static native void testFindClassOnAttachedNativeThread();
@@ -151,4 +152,19 @@
}
}
}
+
+ // http://b/16531674
+ private static void testIsAssignableFromOnPrimitiveTypes() {
+ if (!nativeIsAssignableFrom(int.class, Integer.TYPE)) {
+ System.out.println("IsAssignableFrom(int.class, Integer.TYPE) returned false, expected true");
+ throw new AssertionError();
+ }
+
+ if (!nativeIsAssignableFrom(Integer.TYPE, int.class)) {
+ System.out.println("IsAssignableFrom(Integer.TYPE, int.class) returned false, expected true");
+ throw new AssertionError();
+ }
+ }
+
+ native static boolean nativeIsAssignableFrom(Class<?> from, Class<?> to);
}