Rewrite use/def masks to support 128 bits.
Reduce LIR memory usage by holding masks by pointers in the
LIR rather than directly and using pre-defined const masks
for the common cases, allocating very few on the arena.
Change-Id: I0f6d27ef6867acd157184c8c74f9612cebfe6c16
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index 69ca715..8f6d716 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -44,7 +44,7 @@
LIR* barrier = NewLIR0(kPseudoBarrier);
/* Mark all resources as being clobbered */
DCHECK(!barrier->flags.use_def_invalid);
- barrier->u.m.def_mask = ENCODE_ALL;
+ barrier->u.m.def_mask = &kEncodeAll;
}
void Mir2Lir::GenDivZeroException() {
@@ -447,6 +447,7 @@
for (int i = 0; i < elems; i++) {
RegLocation loc = UpdateLoc(info->args[i]);
if (loc.location == kLocPhysReg) {
+ ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
Store32Disp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg);
}
}
@@ -484,7 +485,12 @@
// Generate the copy loop. Going backwards for convenience
LIR* target = NewLIR0(kPseudoTargetLabel);
// Copy next element
- LoadBaseIndexed(r_src, r_idx, r_val, 2, k32);
+ {
+ ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
+ LoadBaseIndexed(r_src, r_idx, r_val, 2, k32);
+ // NOTE: No dalvik register annotation, local optimizations will be stopped
+ // by the loop boundaries.
+ }
StoreBaseIndexed(r_dst, r_idx, r_val, 2, k32);
FreeTemp(r_val);
OpDecAndBranch(kCondGe, r_idx, target);