Merge "Revert "ART: Fix StoreValue to use RefDisp when necessary.""
diff --git a/compiler/dex/frontend.cc b/compiler/dex/frontend.cc
index 414d514..b0216b5 100644
--- a/compiler/dex/frontend.cc
+++ b/compiler/dex/frontend.cc
@@ -864,7 +864,9 @@
(1 << kPromoteCompilerTemps));
} else if (cu.instruction_set == kX86_64) {
// TODO(X86_64): enable optimizations once backend is mature enough.
- cu.disable_opt = ~(uint32_t)0;
+ cu.disable_opt |= (
+ (1 << kLoadStoreElimination) |
+ (1 << kPromoteRegs));
} else if (cu.instruction_set == kArm64) {
// TODO(Arm64): enable optimizations once backend is mature enough.
cu.disable_opt = ~(uint32_t)0;
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index b3fac77..638c590 100644
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -363,20 +363,27 @@
INSTANTIATE(void Mir2Lir::CallRuntimeHelperRegLocationRegLocation, RegLocation arg0,
RegLocation arg1, bool safepoint_pc)
+// TODO: This is a hack! Reshape the two macros into functions and move them to a better place.
+#define IsSameReg(r1, r2) \
+ (GetRegInfo(r1)->Master()->GetReg().GetReg() == GetRegInfo(r2)->Master()->GetReg().GetReg())
+#define TargetArgReg(arg, is_wide) \
+ (GetRegInfo(TargetReg(arg))->FindMatchingView( \
+ (is_wide) ? RegisterInfo::k64SoloStorageMask : RegisterInfo::k32SoloStorageMask)->GetReg())
+
void Mir2Lir::CopyToArgumentRegs(RegStorage arg0, RegStorage arg1) {
- if (arg1.GetReg() == TargetReg(kArg0).GetReg()) {
- if (arg0.GetReg() == TargetReg(kArg1).GetReg()) {
+ if (IsSameReg(arg1, TargetReg(kArg0))) {
+ if (IsSameReg(arg0, TargetReg(kArg1))) {
// Swap kArg0 and kArg1 with kArg2 as temp.
- OpRegCopy(TargetReg(kArg2), arg1);
- OpRegCopy(TargetReg(kArg0), arg0);
- OpRegCopy(TargetReg(kArg1), TargetReg(kArg2));
+ OpRegCopy(TargetArgReg(kArg2, arg1.Is64Bit()), arg1);
+ OpRegCopy(TargetArgReg(kArg0, arg0.Is64Bit()), arg0);
+ OpRegCopy(TargetArgReg(kArg1, arg1.Is64Bit()), TargetReg(kArg2));
} else {
- OpRegCopy(TargetReg(kArg1), arg1);
- OpRegCopy(TargetReg(kArg0), arg0);
+ OpRegCopy(TargetArgReg(kArg1, arg1.Is64Bit()), arg1);
+ OpRegCopy(TargetArgReg(kArg0, arg0.Is64Bit()), arg0);
}
} else {
- OpRegCopy(TargetReg(kArg0), arg0);
- OpRegCopy(TargetReg(kArg1), arg1);
+ OpRegCopy(TargetArgReg(kArg0, arg0.Is64Bit()), arg0);
+ OpRegCopy(TargetArgReg(kArg1, arg1.Is64Bit()), arg1);
}
}
diff --git a/compiler/dex/quick/x86/fp_x86.cc b/compiler/dex/quick/x86/fp_x86.cc
index 1f05ab9..5082d60 100644
--- a/compiler/dex/quick/x86/fp_x86.cc
+++ b/compiler/dex/quick/x86/fp_x86.cc
@@ -221,7 +221,7 @@
LoadConstant(rl_result.reg, 0x7fffffff);
NewLIR2(kX86Cvtsi2ssRR, temp_reg.GetReg(), rl_result.reg.GetReg());
NewLIR2(kX86ComissRR, rl_src.reg.GetReg(), temp_reg.GetReg());
- LIR* branch_pos_overflow = NewLIR2(kX86Jcc8, 0, kX86CondA);
+ LIR* branch_pos_overflow = NewLIR2(kX86Jcc8, 0, kX86CondAe);
LIR* branch_na_n = NewLIR2(kX86Jcc8, 0, kX86CondP);
NewLIR2(kX86Cvttss2siRR, rl_result.reg.GetReg(), rl_src.reg.GetReg());
LIR* branch_normal = NewLIR1(kX86Jmp8, 0);
@@ -242,7 +242,7 @@
LoadConstant(rl_result.reg, 0x7fffffff);
NewLIR2(kX86Cvtsi2sdRR, temp_reg.GetReg(), rl_result.reg.GetReg());
NewLIR2(kX86ComisdRR, rl_src.reg.GetReg(), temp_reg.GetReg());
- LIR* branch_pos_overflow = NewLIR2(kX86Jcc8, 0, kX86CondA);
+ LIR* branch_pos_overflow = NewLIR2(kX86Jcc8, 0, kX86CondAe);
LIR* branch_na_n = NewLIR2(kX86Jcc8, 0, kX86CondP);
NewLIR2(kX86Cvttsd2siRR, rl_result.reg.GetReg(), rl_src.reg.GetReg());
LIR* branch_normal = NewLIR1(kX86Jmp8, 0);
@@ -281,7 +281,7 @@
LoadConstantWide(rl_result.reg, 0x7fffffffffffffff);
NewLIR2(kX86Cvtsqi2ssRR, temp_reg.GetReg(), rl_result.reg.GetReg());
NewLIR2(kX86ComissRR, rl_src.reg.GetReg(), temp_reg.GetReg());
- LIR* branch_pos_overflow = NewLIR2(kX86Jcc8, 0, kX86CondA);
+ LIR* branch_pos_overflow = NewLIR2(kX86Jcc8, 0, kX86CondAe);
LIR* branch_na_n = NewLIR2(kX86Jcc8, 0, kX86CondP);
NewLIR2(kX86Cvttss2sqiRR, rl_result.reg.GetReg(), rl_src.reg.GetReg());
LIR* branch_normal = NewLIR1(kX86Jmp8, 0);
@@ -306,7 +306,7 @@
LoadConstantWide(rl_result.reg, 0x7fffffffffffffff);
NewLIR2(kX86Cvtsqi2sdRR, temp_reg.GetReg(), rl_result.reg.GetReg());
NewLIR2(kX86ComisdRR, rl_src.reg.GetReg(), temp_reg.GetReg());
- LIR* branch_pos_overflow = NewLIR2(kX86Jcc8, 0, kX86CondA);
+ LIR* branch_pos_overflow = NewLIR2(kX86Jcc8, 0, kX86CondAe);
LIR* branch_na_n = NewLIR2(kX86Jcc8, 0, kX86CondP);
NewLIR2(kX86Cvttsd2sqiRR, rl_result.reg.GetReg(), rl_src.reg.GetReg());
LIR* branch_normal = NewLIR1(kX86Jmp8, 0);
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
index b905312..48fcd2c 100644
--- a/compiler/dex/quick/x86/int_x86.cc
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -35,14 +35,18 @@
rl_src1 = LoadValueWide(rl_src1, kCoreReg);
rl_src2 = LoadValueWide(rl_src2, kCoreReg);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegReg(kOpXor, rl_result.reg, rl_result.reg); // result = 0
- OpRegReg(kOpCmp, rl_src1.reg, rl_src2.reg);
- NewLIR2(kX86Set8R, rl_result.reg.GetReg(), kX86CondNe); // result = (src1 != src2) ? 1 : result
+ RegStorage rl_result_wide = RegStorage::Solo64(rl_result.reg.GetRegNum());
RegStorage temp_reg = AllocTemp();
- OpRegReg(kOpNeg, temp_reg, rl_result.reg);
- OpRegReg(kOpCmp, rl_src1.reg, rl_src2.reg);
- // result = (src1 < src2) ? -result : result
- OpCondRegReg(kOpCmov, kCondLt, rl_result.reg, temp_reg);
+ OpRegReg(kOpXor, temp_reg, temp_reg); // temp = 0
+ OpRegRegReg(kOpSub, rl_result_wide, rl_src1.reg, rl_src2.reg);
+ NewLIR2(kX86Set8R, temp_reg.GetReg(), kX86CondG); // temp = (src1 > src2) ? 1 : temp
+
+ NewLIR2(kX86Rol64RI, rl_result_wide.GetReg(), 1);
+ OpRegImm(kOpAnd, rl_result.reg, 1);
+ OpRegReg(kOpNeg, rl_result.reg, rl_result.reg);
+ // result = (src1 < src2) ? -1 : 0;
+ OpRegReg(kOpAdd, rl_result.reg, temp_reg);
+
StoreValue(rl_dest, rl_result);
FreeTemp(temp_reg);
return;
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index b6b5313..5f3cd92 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -1076,7 +1076,7 @@
bool image = (!image_filename.empty());
if (!image && boot_image_filename.empty()) {
- boot_image_filename += GetAndroidRoot();
+ boot_image_filename += android_root;
boot_image_filename += "/framework/boot.art";
}
std::string boot_image_option;
@@ -1136,9 +1136,8 @@
}
if (compiler_filter_string == nullptr) {
- if (instruction_set == kArm64 ||
- instruction_set == kMips) {
- // TODO: implement/fix compilers for these architectures.
+ if (instruction_set == kMips) {
+ // TODO: fix compiler for Mips.
compiler_filter_string = "interpret-only";
} else if (image) {
compiler_filter_string = "speed";
diff --git a/disassembler/disassembler_x86.cc b/disassembler/disassembler_x86.cc
index e6a6860..b012bc1 100644
--- a/disassembler/disassembler_x86.cc
+++ b/disassembler/disassembler_x86.cc
@@ -56,10 +56,16 @@
"r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
};
+// 64-bit opcode REX modifier.
+constexpr uint8_t REX_W = 0b1000;
+constexpr uint8_t REX_R = 0b0100;
+constexpr uint8_t REX_X = 0b0010;
+constexpr uint8_t REX_B = 0b0001;
+
static void DumpReg0(std::ostream& os, uint8_t rex, size_t reg,
bool byte_operand, uint8_t size_override) {
DCHECK_LT(reg, (rex == 0) ? 8u : 16u);
- bool rex_w = (rex & 0b1000) != 0;
+ bool rex_w = (rex & REX_W) != 0;
if (byte_operand) {
os << ((rex == 0) ? gReg8Names[reg] : gExtReg8Names[reg]);
} else if (rex_w) {
@@ -86,14 +92,14 @@
static void DumpReg(std::ostream& os, uint8_t rex, uint8_t reg,
bool byte_operand, uint8_t size_override, RegFile reg_file) {
- bool rex_r = (rex & 0b0100) != 0;
+ bool rex_r = (rex & REX_R) != 0;
size_t reg_num = rex_r ? (reg + 8) : reg;
DumpAnyReg(os, rex, reg_num, byte_operand, size_override, reg_file);
}
static void DumpRmReg(std::ostream& os, uint8_t rex, uint8_t reg,
bool byte_operand, uint8_t size_override, RegFile reg_file) {
- bool rex_b = (rex & 0b0001) != 0;
+ bool rex_b = (rex & REX_B) != 0;
size_t reg_num = rex_b ? (reg + 8) : reg;
DumpAnyReg(os, rex, reg_num, byte_operand, size_override, reg_file);
}
@@ -107,19 +113,19 @@
}
static void DumpBaseReg(std::ostream& os, uint8_t rex, uint8_t reg) {
- bool rex_b = (rex & 0b0001) != 0;
+ bool rex_b = (rex & REX_B) != 0;
size_t reg_num = rex_b ? (reg + 8) : reg;
DumpAddrReg(os, rex, reg_num);
}
static void DumpIndexReg(std::ostream& os, uint8_t rex, uint8_t reg) {
- bool rex_x = (rex & 0b0010) != 0;
+ bool rex_x = (rex & REX_X) != 0;
uint8_t reg_num = rex_x ? (reg + 8) : reg;
DumpAddrReg(os, rex, reg_num);
}
static void DumpOpcodeReg(std::ostream& os, uint8_t rex, uint8_t reg) {
- bool rex_b = (rex & 0b0001) != 0;
+ bool rex_b = (rex & REX_B) != 0;
size_t reg_num = rex_b ? (reg + 8) : reg;
DumpReg0(os, rex, reg_num, false, 0);
}
@@ -896,6 +902,7 @@
case 0xB0: case 0xB1: case 0xB2: case 0xB3: case 0xB4: case 0xB5: case 0xB6: case 0xB7:
opcode << "mov";
immediate_bytes = 1;
+ byte_operand = true;
reg_in_opcode = true;
break;
case 0xB8: case 0xB9: case 0xBA: case 0xBB: case 0xBC: case 0xBD: case 0xBE: case 0xBF:
@@ -916,6 +923,15 @@
byte_operand = (*instr == 0xC0);
break;
case 0xC3: opcode << "ret"; break;
+ case 0xC6:
+ static const char* c6_opcodes[] = {"mov", "unknown-c6", "unknown-c6", "unknown-c6", "unknown-c6", "unknown-c6", "unknown-c6", "unknown-c6"};
+ modrm_opcodes = c6_opcodes;
+ store = true;
+ immediate_bytes = 1;
+ has_modrm = true;
+ reg_is_opcode = true;
+ byte_operand = true;
+ break;
case 0xC7:
static const char* c7_opcodes[] = {"mov", "unknown-c7", "unknown-c7", "unknown-c7", "unknown-c7", "unknown-c7", "unknown-c7", "unknown-c7"};
modrm_opcodes = c7_opcodes;
@@ -1064,6 +1080,16 @@
if (reg_is_opcode && modrm_opcodes != NULL) {
opcode << modrm_opcodes[reg_or_opcode];
}
+
+ // Add opcode suffixes to indicate size.
+ if (byte_operand) {
+ opcode << 'b';
+ } else if ((rex & REX_W) != 0) {
+ opcode << 'q';
+ } else if (prefix[2] == 0x66) {
+ opcode << 'w';
+ }
+
if (load) {
if (!reg_is_opcode) {
DumpReg(args, rex, reg_or_opcode, byte_operand, prefix[2], dst_reg_file);
diff --git a/runtime/base/unix_file/fd_file.cc b/runtime/base/unix_file/fd_file.cc
index 6d5b59c..f29a7ec 100644
--- a/runtime/base/unix_file/fd_file.cc
+++ b/runtime/base/unix_file/fd_file.cc
@@ -122,7 +122,9 @@
char* ptr = static_cast<char*>(buffer);
while (byte_count > 0) {
ssize_t bytes_read = TEMP_FAILURE_RETRY(read(fd_, ptr, byte_count));
- if (bytes_read == -1) {
+ if (bytes_read <= 0) {
+ // 0: end of file
+ // -1: error
return false;
}
byte_count -= bytes_read; // Reduce the number of remaining bytes.
diff --git a/runtime/base/unix_file/fd_file_test.cc b/runtime/base/unix_file/fd_file_test.cc
index d620666..33b3d3e 100644
--- a/runtime/base/unix_file/fd_file_test.cc
+++ b/runtime/base/unix_file/fd_file_test.cc
@@ -16,6 +16,7 @@
#include "base/unix_file/fd_file.h"
#include "base/unix_file/random_access_file_test.h"
+#include "common_runtime_test.h" // For ScratchFile
#include "gtest/gtest.h"
namespace unix_file {
@@ -60,4 +61,15 @@
EXPECT_TRUE(file.IsOpened());
}
+TEST_F(FdFileTest, ReadFullyEmptyFile) {
+ // New scratch file, zero-length.
+ art::ScratchFile tmp;
+ FdFile file;
+ ASSERT_TRUE(file.Open(tmp.GetFilename(), O_RDONLY));
+ EXPECT_GE(file.Fd(), 0);
+ EXPECT_TRUE(file.IsOpened());
+ uint8_t buffer[16];
+ EXPECT_FALSE(file.ReadFully(&buffer, 4));
+}
+
} // namespace unix_file
diff --git a/runtime/dex_instruction.h b/runtime/dex_instruction.h
index edba502..b6810b0 100644
--- a/runtime/dex_instruction.h
+++ b/runtime/dex_instruction.h
@@ -145,28 +145,30 @@
};
enum VerifyFlag {
- kVerifyNone = 0x000000,
- kVerifyRegA = 0x000001,
- kVerifyRegAWide = 0x000002,
- kVerifyRegB = 0x000004,
- kVerifyRegBField = 0x000008,
- kVerifyRegBMethod = 0x000010,
- kVerifyRegBNewInstance = 0x000020,
- kVerifyRegBString = 0x000040,
- kVerifyRegBType = 0x000080,
- kVerifyRegBWide = 0x000100,
- kVerifyRegC = 0x000200,
- kVerifyRegCField = 0x000400,
- kVerifyRegCNewArray = 0x000800,
- kVerifyRegCType = 0x001000,
- kVerifyRegCWide = 0x002000,
- kVerifyArrayData = 0x004000,
- kVerifyBranchTarget = 0x008000,
- kVerifySwitchTargets = 0x010000,
- kVerifyVarArg = 0x020000,
- kVerifyVarArgRange = 0x040000,
- kVerifyRuntimeOnly = 0x080000,
- kVerifyError = 0x100000,
+ kVerifyNone = 0x000000,
+ kVerifyRegA = 0x000001,
+ kVerifyRegAWide = 0x000002,
+ kVerifyRegB = 0x000004,
+ kVerifyRegBField = 0x000008,
+ kVerifyRegBMethod = 0x000010,
+ kVerifyRegBNewInstance = 0x000020,
+ kVerifyRegBString = 0x000040,
+ kVerifyRegBType = 0x000080,
+ kVerifyRegBWide = 0x000100,
+ kVerifyRegC = 0x000200,
+ kVerifyRegCField = 0x000400,
+ kVerifyRegCNewArray = 0x000800,
+ kVerifyRegCType = 0x001000,
+ kVerifyRegCWide = 0x002000,
+ kVerifyArrayData = 0x004000,
+ kVerifyBranchTarget = 0x008000,
+ kVerifySwitchTargets = 0x010000,
+ kVerifyVarArg = 0x020000,
+ kVerifyVarArgNonZero = 0x040000,
+ kVerifyVarArgRange = 0x080000,
+ kVerifyVarArgRangeNonZero = 0x100000,
+ kVerifyRuntimeOnly = 0x200000,
+ kVerifyError = 0x400000,
};
static constexpr uint32_t kMaxVarArgRegs = 5;
@@ -506,7 +508,8 @@
int GetVerifyExtraFlags() const {
return (kInstructionVerifyFlags[Opcode()] & (kVerifyArrayData | kVerifyBranchTarget |
- kVerifySwitchTargets | kVerifyVarArg | kVerifyVarArgRange | kVerifyError));
+ kVerifySwitchTargets | kVerifyVarArg | kVerifyVarArgNonZero | kVerifyVarArgRange |
+ kVerifyVarArgRangeNonZero | kVerifyError));
}
bool GetVerifyIsRuntimeOnly() const {
diff --git a/runtime/dex_instruction_list.h b/runtime/dex_instruction_list.h
index 4cda58b..103b0d7 100644
--- a/runtime/dex_instruction_list.h
+++ b/runtime/dex_instruction_list.h
@@ -128,17 +128,17 @@
V(0x6B, SPUT_BYTE, "sput-byte", k21c, false, kFieldRef, kContinue | kThrow | kStore | kRegBFieldOrConstant, kVerifyRegA | kVerifyRegBField) \
V(0x6C, SPUT_CHAR, "sput-char", k21c, false, kFieldRef, kContinue | kThrow | kStore | kRegBFieldOrConstant, kVerifyRegA | kVerifyRegBField) \
V(0x6D, SPUT_SHORT, "sput-short", k21c, false, kFieldRef, kContinue | kThrow | kStore | kRegBFieldOrConstant, kVerifyRegA | kVerifyRegBField) \
- V(0x6E, INVOKE_VIRTUAL, "invoke-virtual", k35c, false, kMethodRef, kContinue | kThrow | kInvoke, kVerifyRegBMethod | kVerifyVarArg) \
- V(0x6F, INVOKE_SUPER, "invoke-super", k35c, false, kMethodRef, kContinue | kThrow | kInvoke, kVerifyRegBMethod | kVerifyVarArg) \
- V(0x70, INVOKE_DIRECT, "invoke-direct", k35c, false, kMethodRef, kContinue | kThrow | kInvoke, kVerifyRegBMethod | kVerifyVarArg) \
+ V(0x6E, INVOKE_VIRTUAL, "invoke-virtual", k35c, false, kMethodRef, kContinue | kThrow | kInvoke, kVerifyRegBMethod | kVerifyVarArgNonZero) \
+ V(0x6F, INVOKE_SUPER, "invoke-super", k35c, false, kMethodRef, kContinue | kThrow | kInvoke, kVerifyRegBMethod | kVerifyVarArgNonZero) \
+ V(0x70, INVOKE_DIRECT, "invoke-direct", k35c, false, kMethodRef, kContinue | kThrow | kInvoke, kVerifyRegBMethod | kVerifyVarArgNonZero) \
V(0x71, INVOKE_STATIC, "invoke-static", k35c, false, kMethodRef, kContinue | kThrow | kInvoke, kVerifyRegBMethod | kVerifyVarArg) \
- V(0x72, INVOKE_INTERFACE, "invoke-interface", k35c, false, kMethodRef, kContinue | kThrow | kInvoke, kVerifyRegBMethod | kVerifyVarArg) \
+ V(0x72, INVOKE_INTERFACE, "invoke-interface", k35c, false, kMethodRef, kContinue | kThrow | kInvoke, kVerifyRegBMethod | kVerifyVarArgNonZero) \
V(0x73, RETURN_VOID_BARRIER, "return-void-barrier", k10x, false, kNone, kReturn, kVerifyNone) \
- V(0x74, INVOKE_VIRTUAL_RANGE, "invoke-virtual/range", k3rc, false, kMethodRef, kContinue | kThrow | kInvoke, kVerifyRegBMethod | kVerifyVarArgRange) \
- V(0x75, INVOKE_SUPER_RANGE, "invoke-super/range", k3rc, false, kMethodRef, kContinue | kThrow | kInvoke, kVerifyRegBMethod | kVerifyVarArgRange) \
- V(0x76, INVOKE_DIRECT_RANGE, "invoke-direct/range", k3rc, false, kMethodRef, kContinue | kThrow | kInvoke, kVerifyRegBMethod | kVerifyVarArgRange) \
+ V(0x74, INVOKE_VIRTUAL_RANGE, "invoke-virtual/range", k3rc, false, kMethodRef, kContinue | kThrow | kInvoke, kVerifyRegBMethod | kVerifyVarArgRangeNonZero) \
+ V(0x75, INVOKE_SUPER_RANGE, "invoke-super/range", k3rc, false, kMethodRef, kContinue | kThrow | kInvoke, kVerifyRegBMethod | kVerifyVarArgRangeNonZero) \
+ V(0x76, INVOKE_DIRECT_RANGE, "invoke-direct/range", k3rc, false, kMethodRef, kContinue | kThrow | kInvoke, kVerifyRegBMethod | kVerifyVarArgRangeNonZero) \
V(0x77, INVOKE_STATIC_RANGE, "invoke-static/range", k3rc, false, kMethodRef, kContinue | kThrow | kInvoke, kVerifyRegBMethod | kVerifyVarArgRange) \
- V(0x78, INVOKE_INTERFACE_RANGE, "invoke-interface/range", k3rc, false, kMethodRef, kContinue | kThrow | kInvoke, kVerifyRegBMethod | kVerifyVarArgRange) \
+ V(0x78, INVOKE_INTERFACE_RANGE, "invoke-interface/range", k3rc, false, kMethodRef, kContinue | kThrow | kInvoke, kVerifyRegBMethod | kVerifyVarArgRangeNonZero) \
V(0x79, UNUSED_79, "unused-79", k10x, false, kUnknown, 0, kVerifyError) \
V(0x7A, UNUSED_7A, "unused-7a", k10x, false, kUnknown, 0, kVerifyError) \
V(0x7B, NEG_INT, "neg-int", k12x, true, kNone, kContinue, kVerifyRegA | kVerifyRegB) \
@@ -251,8 +251,8 @@
V(0xE6, IPUT_QUICK, "iput-quick", k22c, false, kFieldRef, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \
V(0xE7, IPUT_WIDE_QUICK, "iput-wide-quick", k22c, false, kFieldRef, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegAWide | kVerifyRegB | kVerifyRuntimeOnly) \
V(0xE8, IPUT_OBJECT_QUICK, "iput-object-quick", k22c, false, kFieldRef, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \
- V(0xE9, INVOKE_VIRTUAL_QUICK, "invoke-virtual-quick", k35c, false, kMethodRef, kContinue | kThrow | kInvoke, kVerifyVarArg | kVerifyRuntimeOnly) \
- V(0xEA, INVOKE_VIRTUAL_RANGE_QUICK, "invoke-virtual/range-quick", k3rc, false, kMethodRef, kContinue | kThrow | kInvoke, kVerifyVarArgRange | kVerifyRuntimeOnly) \
+ V(0xE9, INVOKE_VIRTUAL_QUICK, "invoke-virtual-quick", k35c, false, kMethodRef, kContinue | kThrow | kInvoke, kVerifyVarArgNonZero | kVerifyRuntimeOnly) \
+ V(0xEA, INVOKE_VIRTUAL_RANGE_QUICK, "invoke-virtual/range-quick", k3rc, false, kMethodRef, kContinue | kThrow | kInvoke, kVerifyVarArgRangeNonZero | kVerifyRuntimeOnly) \
V(0xEB, UNUSED_EB, "unused-eb", k10x, false, kUnknown, 0, kVerifyError) \
V(0xEC, UNUSED_EC, "unused-ec", k10x, false, kUnknown, 0, kVerifyError) \
V(0xED, UNUSED_ED, "unused-ed", k10x, false, kUnknown, 0, kVerifyError) \
diff --git a/runtime/gc/collector/garbage_collector.cc b/runtime/gc/collector/garbage_collector.cc
index a17c36b..8622fd6 100644
--- a/runtime/gc/collector/garbage_collector.cc
+++ b/runtime/gc/collector/garbage_collector.cc
@@ -31,20 +31,36 @@
namespace gc {
namespace collector {
+Iteration::Iteration()
+ : duration_ns_(0), timings_("GC iteration timing logger", true, VLOG_IS_ON(heap)) {
+ Reset(kGcCauseBackground, false); // Reset to some place holder values.
+}
+
+void Iteration::Reset(GcCause gc_cause, bool clear_soft_references) {
+ timings_.Reset();
+ pause_times_.clear();
+ duration_ns_ = 0;
+ clear_soft_references_ = clear_soft_references;
+ gc_cause_ = gc_cause;
+ freed_ = ObjectBytePair();
+ freed_los_ = ObjectBytePair();
+}
+
+uint64_t Iteration::GetEstimatedThroughput() const {
+ // Add 1ms to prevent possible division by 0.
+ return (static_cast<uint64_t>(freed_.bytes) * 1000) / (NsToMs(GetDurationNs()) + 1);
+}
+
GarbageCollector::GarbageCollector(Heap* heap, const std::string& name)
: heap_(heap),
name_(name),
- gc_cause_(kGcCauseForAlloc),
- clear_soft_references_(false),
- duration_ns_(0),
- timings_(name_.c_str(), true, VLOG_IS_ON(heap)),
pause_histogram_((name_ + " paused").c_str(), kPauseBucketSize, kPauseBucketCount),
cumulative_timings_(name) {
ResetCumulativeStatistics();
}
void GarbageCollector::RegisterPause(uint64_t nano_length) {
- pause_times_.push_back(nano_length);
+ GetCurrentIteration()->pause_times_.push_back(nano_length);
}
void GarbageCollector::ResetCumulativeStatistics() {
@@ -59,32 +75,26 @@
ATRACE_BEGIN(StringPrintf("%s %s GC", PrettyCause(gc_cause), GetName()).c_str());
Thread* self = Thread::Current();
uint64_t start_time = NanoTime();
- timings_.Reset();
- pause_times_.clear();
- duration_ns_ = 0;
- clear_soft_references_ = clear_soft_references;
- gc_cause_ = gc_cause;
- // Reset stats.
- freed_bytes_ = 0;
- freed_large_object_bytes_ = 0;
- freed_objects_ = 0;
- freed_large_objects_ = 0;
+ Iteration* current_iteration = GetCurrentIteration();
+ current_iteration->Reset(gc_cause, clear_soft_references);
RunPhases(); // Run all the GC phases.
// Add the current timings to the cumulative timings.
- cumulative_timings_.AddLogger(timings_);
+ cumulative_timings_.AddLogger(*GetTimings());
// Update cumulative statistics with how many bytes the GC iteration freed.
- total_freed_objects_ += GetFreedObjects() + GetFreedLargeObjects();
- total_freed_bytes_ += GetFreedBytes() + GetFreedLargeObjectBytes();
+ total_freed_objects_ += current_iteration->GetFreedObjects() +
+ current_iteration->GetFreedLargeObjects();
+ total_freed_bytes_ += current_iteration->GetFreedBytes() +
+ current_iteration->GetFreedLargeObjectBytes();
uint64_t end_time = NanoTime();
- duration_ns_ = end_time - start_time;
+ current_iteration->SetDurationNs(end_time - start_time);
if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
// The entire GC was paused, clear the fake pauses which might be in the pause times and add
// the whole GC duration.
- pause_times_.clear();
- RegisterPause(duration_ns_);
+ current_iteration->pause_times_.clear();
+ RegisterPause(current_iteration->GetDurationNs());
}
- total_time_ns_ += GetDurationNs();
- for (uint64_t pause_time : pause_times_) {
+ total_time_ns_ += current_iteration->GetDurationNs();
+ for (uint64_t pause_time : current_iteration->GetPauseTimes()) {
pause_histogram_.AddValue(pause_time / 1000);
}
ATRACE_END();
@@ -125,23 +135,6 @@
return (total_freed_bytes_ * 1000) / (NsToMs(GetCumulativeTimings().GetTotalNs()) + 1);
}
-uint64_t GarbageCollector::GetEstimatedLastIterationThroughput() const {
- // Add 1ms to prevent possible division by 0.
- return (static_cast<uint64_t>(freed_bytes_) * 1000) / (NsToMs(GetDurationNs()) + 1);
-}
-
-void GarbageCollector::RecordFree(uint64_t freed_objects, int64_t freed_bytes) {
- freed_objects_ += freed_objects;
- freed_bytes_ += freed_bytes;
- GetHeap()->RecordFree(freed_objects, freed_bytes);
-}
-
-void GarbageCollector::RecordFreeLargeObjects(uint64_t freed_objects, int64_t freed_bytes) {
- freed_large_objects_ += freed_objects;
- freed_large_object_bytes_ += freed_bytes;
- GetHeap()->RecordFree(freed_objects, freed_bytes);
-}
-
void GarbageCollector::ResetMeasurements() {
cumulative_timings_.Reset();
pause_histogram_.Reset();
@@ -160,6 +153,23 @@
Runtime::Current()->GetThreadList()->ResumeAll();
}
+// Returns the current GC iteration and assocated info.
+Iteration* GarbageCollector::GetCurrentIteration() {
+ return heap_->GetCurrentGcIteration();
+}
+const Iteration* GarbageCollector::GetCurrentIteration() const {
+ return heap_->GetCurrentGcIteration();
+}
+
+void GarbageCollector::RecordFree(const ObjectBytePair& freed) {
+ GetCurrentIteration()->freed_.Add(freed);
+ heap_->RecordFree(freed.objects, freed.bytes);
+}
+void GarbageCollector::RecordFreeLOS(const ObjectBytePair& freed) {
+ GetCurrentIteration()->freed_los_.Add(freed);
+ heap_->RecordFree(freed.objects, freed.bytes);
+}
+
} // namespace collector
} // namespace gc
} // namespace art
diff --git a/runtime/gc/collector/garbage_collector.h b/runtime/gc/collector/garbage_collector.h
index f4f9dbb..885569e 100644
--- a/runtime/gc/collector/garbage_collector.h
+++ b/runtime/gc/collector/garbage_collector.h
@@ -33,6 +33,78 @@
namespace collector {
+struct ObjectBytePair {
+ ObjectBytePair(uint64_t num_objects = 0, int64_t num_bytes = 0)
+ : objects(num_objects), bytes(num_bytes) {}
+ void Add(const ObjectBytePair& other) {
+ objects += other.objects;
+ bytes += other.bytes;
+ }
+ // Number of objects which were freed.
+ uint64_t objects;
+ // Freed bytes are signed since the GC can free negative bytes if it promotes objects to a space
+ // which has a larger allocation size.
+ int64_t bytes;
+};
+
+// A information related single garbage collector iteration. Since we only ever have one GC running
+// at any given time, we can have a single iteration info.
+class Iteration {
+ public:
+ Iteration();
+ // Returns how long the mutators were paused in nanoseconds.
+ const std::vector<uint64_t>& GetPauseTimes() const {
+ return pause_times_;
+ }
+ TimingLogger* GetTimings() {
+ return &timings_;
+ }
+ // Returns how long the GC took to complete in nanoseconds.
+ uint64_t GetDurationNs() const {
+ return duration_ns_;
+ }
+ int64_t GetFreedBytes() const {
+ return freed_.bytes;
+ }
+ int64_t GetFreedLargeObjectBytes() const {
+ return freed_los_.bytes;
+ }
+ uint64_t GetFreedObjects() const {
+ return freed_.objects;
+ }
+ uint64_t GetFreedLargeObjects() const {
+ return freed_los_.objects;
+ }
+ void Reset(GcCause gc_cause, bool clear_soft_references);
+ // Returns the estimated throughput of the iteration.
+ uint64_t GetEstimatedThroughput() const;
+ bool GetClearSoftReferences() const {
+ return clear_soft_references_;
+ }
+ void SetClearSoftReferences(bool clear_soft_references) {
+ clear_soft_references_ = clear_soft_references;
+ }
+ GcCause GetGcCause() const {
+ return gc_cause_;
+ }
+
+ private:
+ void SetDurationNs(uint64_t duration) {
+ duration_ns_ = duration;
+ }
+
+ GcCause gc_cause_;
+ bool clear_soft_references_;
+ uint64_t duration_ns_;
+ TimingLogger timings_;
+ ObjectBytePair freed_;
+ ObjectBytePair freed_los_;
+ std::vector<uint64_t> pause_times_;
+
+ friend class GarbageCollector;
+ DISALLOW_COPY_AND_ASSIGN(Iteration);
+};
+
class GarbageCollector {
public:
class SCOPED_LOCKABLE ScopedPause {
@@ -62,22 +134,7 @@
Heap* GetHeap() const {
return heap_;
}
-
- // Returns how long the mutators were paused in nanoseconds.
- const std::vector<uint64_t>& GetPauseTimes() const {
- return pause_times_;
- }
-
- // Returns how long the GC took to complete in nanoseconds.
- uint64_t GetDurationNs() const {
- return duration_ns_;
- }
-
void RegisterPause(uint64_t nano_length);
-
- TimingLogger& GetTimings() {
- return timings_;
- }
const CumulativeLogger& GetCumulativeTimings() const {
return cumulative_timings_;
}
@@ -87,52 +144,36 @@
// Swap the live and mark bitmaps of spaces that are active for the collector. For partial GC,
// this is the allocation space, for full GC then we swap the zygote bitmaps too.
void SwapBitmaps() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
-
- int64_t GetFreedBytes() const {
- return freed_bytes_;
- }
-
- int64_t GetFreedLargeObjectBytes() const {
- return freed_large_object_bytes_;
- }
-
- uint64_t GetFreedObjects() const {
- return freed_objects_;
- }
-
- uint64_t GetFreedLargeObjects() const {
- return freed_large_objects_;
- }
-
uint64_t GetTotalPausedTimeNs() const {
return pause_histogram_.AdjustedSum();
}
-
int64_t GetTotalFreedBytes() const {
return total_freed_bytes_;
}
-
uint64_t GetTotalFreedObjects() const {
return total_freed_objects_;
}
-
const Histogram<uint64_t>& GetPauseHistogram() const {
return pause_histogram_;
}
-
// Reset the cumulative timings and pause histogram.
void ResetMeasurements();
-
// Returns the estimated throughput in bytes / second.
uint64_t GetEstimatedMeanThroughput() const;
-
- // Returns the estimated throughput of the last GC iteration.
- uint64_t GetEstimatedLastIterationThroughput() const;
-
// Returns how many GC iterations have been run.
- size_t GetIterations() const {
+ size_t NumberOfIterations() const {
return GetCumulativeTimings().GetIterations();
}
+ // Returns the current GC iteration and assocated info.
+ Iteration* GetCurrentIteration();
+ const Iteration* GetCurrentIteration() const;
+ TimingLogger* GetTimings() {
+ return &GetCurrentIteration()->timings_;
+ }
+ // Record a free of normal objects.
+ void RecordFree(const ObjectBytePair& freed);
+ // Record a free of large objects.
+ void RecordFreeLOS(const ObjectBytePair& freed);
protected:
// Run all of the GC phases.
@@ -141,40 +182,17 @@
// Revoke all the thread-local buffers.
virtual void RevokeAllThreadLocalBuffers() = 0;
- // Record that you have freed some objects or large objects, calls Heap::RecordFree.
- // TODO: These are not thread safe, add a lock if we get parallel sweeping.
- void RecordFree(uint64_t freed_objects, int64_t freed_bytes);
- void RecordFreeLargeObjects(uint64_t freed_objects, int64_t freed_bytes);
-
static constexpr size_t kPauseBucketSize = 500;
static constexpr size_t kPauseBucketCount = 32;
Heap* const heap_;
-
std::string name_;
-
- GcCause gc_cause_;
- bool clear_soft_references_;
-
- uint64_t duration_ns_;
- TimingLogger timings_;
-
// Cumulative statistics.
Histogram<uint64_t> pause_histogram_;
uint64_t total_time_ns_;
uint64_t total_freed_objects_;
int64_t total_freed_bytes_;
-
- // Single GC statitstics, freed bytes are signed since the GC can free negative bytes if it
- // promotes objects to a space which has a larger allocation size.
- int64_t freed_bytes_;
- int64_t freed_large_object_bytes_;
- uint64_t freed_objects_;
- uint64_t freed_large_objects_;
-
CumulativeLogger cumulative_timings_;
-
- std::vector<uint64_t> pause_times_;
};
} // namespace collector
diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc
index 595dc8f..ebd1738 100644
--- a/runtime/gc/collector/mark_compact.cc
+++ b/runtime/gc/collector/mark_compact.cc
@@ -57,7 +57,7 @@
namespace collector {
void MarkCompact::BindBitmaps() {
- timings_.StartSplit("BindBitmaps");
+ GetTimings()->StartSplit("BindBitmaps");
WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
// Mark all of the spaces we never collect as immune.
for (const auto& space : GetHeap()->GetContinuousSpaces()) {
@@ -66,7 +66,7 @@
CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space;
}
}
- timings_.EndSplit();
+ GetTimings()->EndSplit();
}
MarkCompact::MarkCompact(Heap* heap, const std::string& name_prefix)
@@ -120,7 +120,7 @@
};
void MarkCompact::CalculateObjectForwardingAddresses() {
- timings_.NewSplit(__FUNCTION__);
+ GetTimings()->NewSplit(__FUNCTION__);
// The bump pointer in the space where the next forwarding address will be.
bump_pointer_ = reinterpret_cast<byte*>(space_->Begin());
// Visit all the marked objects in the bitmap.
@@ -131,7 +131,7 @@
}
void MarkCompact::InitializePhase() {
- TimingLogger::ScopedSplit split("InitializePhase", &timings_);
+ TimingLogger::ScopedSplit split("InitializePhase", GetTimings());
mark_stack_ = heap_->GetMarkStack();
DCHECK(mark_stack_ != nullptr);
immune_region_.Reset();
@@ -143,11 +143,11 @@
}
void MarkCompact::ProcessReferences(Thread* self) {
- TimingLogger::ScopedSplit split("ProcessReferences", &timings_);
+ TimingLogger::ScopedSplit split("ProcessReferences", GetTimings());
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
heap_->GetReferenceProcessor()->ProcessReferences(
- false, &timings_, clear_soft_references_, &HeapReferenceMarkedCallback, &MarkObjectCallback,
- &ProcessMarkStackCallback, this);
+ false, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(),
+ &HeapReferenceMarkedCallback, &MarkObjectCallback, &ProcessMarkStackCallback, this);
}
class BitmapSetSlowPathVisitor {
@@ -195,18 +195,18 @@
objects_with_lockword_.reset(accounting::ContinuousSpaceBitmap::Create(
"objects with lock words", space_->Begin(), space_->Size()));
CHECK(Locks::mutator_lock_->IsExclusiveHeld(self));
- TimingLogger::ScopedSplit split("MarkingPhase", &timings_);
+ TimingLogger::ScopedSplit split("MarkingPhase", GetTimings());
// Assume the cleared space is already empty.
BindBitmaps();
// Process dirty cards and add dirty cards to mod-union tables.
- heap_->ProcessCards(timings_, false);
+ heap_->ProcessCards(GetTimings(), false);
// Clear the whole card table since we can not Get any additional dirty cards during the
// paused GC. This saves memory but only works for pause the world collectors.
- timings_.NewSplit("ClearCardTable");
+ GetTimings()->NewSplit("ClearCardTable");
heap_->GetCardTable()->ClearCardTable();
// Need to do this before the checkpoint since we don't want any threads to add references to
// the live stack during the recursive mark.
- timings_.NewSplit("SwapStacks");
+ GetTimings()->NewSplit("SwapStacks");
if (kUseThreadLocalAllocationStack) {
heap_->RevokeAllThreadLocalAllocationStacks(self);
}
@@ -227,11 +227,11 @@
// Revoke buffers before measuring how many objects were moved since the TLABs need to be revoked
// before they are properly counted.
RevokeAllThreadLocalBuffers();
- timings_.StartSplit("PreSweepingGcVerification");
+ GetTimings()->StartSplit("PreSweepingGcVerification");
// Disabled due to an issue where we have objects in the bump pointer space which reference dead
// objects.
// heap_->PreSweepingGcVerification(this);
- timings_.EndSplit();
+ GetTimings()->EndSplit();
}
void MarkCompact::UpdateAndMarkModUnion() {
@@ -243,8 +243,7 @@
// TODO: Improve naming.
TimingLogger::ScopedSplit split(
space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" :
- "UpdateAndMarkImageModUnionTable",
- &timings_);
+ "UpdateAndMarkImageModUnionTable", GetTimings());
table->UpdateAndMarkReferences(MarkHeapReferenceCallback, this);
}
}
@@ -252,27 +251,28 @@
}
void MarkCompact::MarkReachableObjects() {
- timings_.StartSplit("MarkStackAsLive");
+ GetTimings()->StartSplit("MarkStackAsLive");
accounting::ObjectStack* live_stack = heap_->GetLiveStack();
heap_->MarkAllocStackAsLive(live_stack);
live_stack->Reset();
// Recursively process the mark stack.
ProcessMarkStack();
+ GetTimings()->EndSplit();
}
void MarkCompact::ReclaimPhase() {
- TimingLogger::ScopedSplit split("ReclaimPhase", &timings_);
+ TimingLogger::ScopedSplit split("ReclaimPhase", GetTimings());
WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
// Reclaim unmarked objects.
Sweep(false);
// Swap the live and mark bitmaps for each space which we modified space. This is an
// optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound
// bitmaps.
- timings_.StartSplit("SwapBitmapsAndUnBindBitmaps");
+ GetTimings()->StartSplit("SwapBitmapsAndUnBindBitmaps");
SwapBitmaps();
GetHeap()->UnBindBitmaps(); // Unbind the live and mark bitmaps.
Compact();
- timings_.EndSplit();
+ GetTimings()->EndSplit();
}
void MarkCompact::ResizeMarkStack(size_t new_size) {
@@ -340,7 +340,7 @@
};
void MarkCompact::UpdateReferences() {
- timings_.NewSplit(__FUNCTION__);
+ GetTimings()->NewSplit(__FUNCTION__);
Runtime* runtime = Runtime::Current();
// Update roots.
runtime->VisitRoots(UpdateRootCallback, this);
@@ -353,7 +353,7 @@
TimingLogger::ScopedSplit split(
space->IsZygoteSpace() ? "UpdateZygoteModUnionTableReferences" :
"UpdateImageModUnionTableReferences",
- &timings_);
+ GetTimings());
table->UpdateAndMarkReferences(&UpdateHeapReferenceCallback, this);
} else {
// No mod union table, so we need to scan the space using bitmap visit.
@@ -381,7 +381,7 @@
}
void MarkCompact::Compact() {
- timings_.NewSplit(__FUNCTION__);
+ GetTimings()->NewSplit(__FUNCTION__);
CalculateObjectForwardingAddresses();
UpdateReferences();
MoveObjects();
@@ -389,9 +389,9 @@
int64_t objects_freed = space_->GetObjectsAllocated() - live_objects_in_space_;
int64_t bytes_freed = reinterpret_cast<int64_t>(space_->End()) -
reinterpret_cast<int64_t>(bump_pointer_);
- timings_.NewSplit("RecordFree");
+ GetTimings()->NewSplit("RecordFree");
space_->RecordFree(objects_freed, bytes_freed);
- RecordFree(objects_freed, bytes_freed);
+ RecordFree(ObjectBytePair(objects_freed, bytes_freed));
space_->SetEnd(bump_pointer_);
// Need to zero out the memory we freed. TODO: Use madvise for pages.
memset(bump_pointer_, 0, bytes_freed);
@@ -399,7 +399,7 @@
// Marks all objects in the root set.
void MarkCompact::MarkRoots() {
- timings_.NewSplit("MarkRoots");
+ GetTimings()->NewSplit("MarkRoots");
Runtime::Current()->VisitRoots(MarkRootCallback, this);
}
@@ -483,9 +483,9 @@
}
void MarkCompact::SweepSystemWeaks() {
- timings_.StartSplit("SweepSystemWeaks");
+ GetTimings()->StartSplit("SweepSystemWeaks");
Runtime::Current()->SweepSystemWeaks(IsMarkedCallback, this);
- timings_.EndSplit();
+ GetTimings()->EndSplit();
}
bool MarkCompact::ShouldSweepSpace(space::ContinuousSpace* space) const {
@@ -523,7 +523,7 @@
}
void MarkCompact::MoveObjects() {
- timings_.NewSplit(__FUNCTION__);
+ GetTimings()->NewSplit(__FUNCTION__);
// Move the objects in the before forwarding bitmap.
MoveObjectVisitor visitor(this);
objects_before_forwarding_->VisitMarkedRange(reinterpret_cast<uintptr_t>(space_->Begin()),
@@ -534,7 +534,7 @@
void MarkCompact::Sweep(bool swap_bitmaps) {
DCHECK(mark_stack_->IsEmpty());
- TimingLogger::ScopedSplit split("Sweep", &timings_);
+ TimingLogger::ScopedSplit split("Sweep", GetTimings());
for (const auto& space : GetHeap()->GetContinuousSpaces()) {
if (space->IsContinuousMemMapAllocSpace()) {
space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
@@ -542,22 +542,16 @@
continue;
}
TimingLogger::ScopedSplit split(
- alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", &timings_);
- size_t freed_objects = 0;
- size_t freed_bytes = 0;
- alloc_space->Sweep(swap_bitmaps, &freed_objects, &freed_bytes);
- RecordFree(freed_objects, freed_bytes);
+ alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", GetTimings());
+ RecordFree(alloc_space->Sweep(swap_bitmaps));
}
}
SweepLargeObjects(swap_bitmaps);
}
void MarkCompact::SweepLargeObjects(bool swap_bitmaps) {
- TimingLogger::ScopedSplit split("SweepLargeObjects", &timings_);
- size_t freed_objects = 0;
- size_t freed_bytes = 0;
- heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps, &freed_objects, &freed_bytes);
- RecordFreeLargeObjects(freed_objects, freed_bytes);
+ TimingLogger::ScopedSplit split("SweepLargeObjects", GetTimings());
+ RecordFreeLOS(heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps));
}
// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been
@@ -596,13 +590,13 @@
// Scan anything that's on the mark stack.
void MarkCompact::ProcessMarkStack() {
- timings_.StartSplit("ProcessMarkStack");
+ GetTimings()->StartSplit("ProcessMarkStack");
while (!mark_stack_->IsEmpty()) {
Object* obj = mark_stack_->PopBack();
DCHECK(obj != nullptr);
ScanObject(obj);
}
- timings_.EndSplit();
+ GetTimings()->EndSplit();
}
void MarkCompact::SetSpace(space::BumpPointerSpace* space) {
@@ -611,7 +605,7 @@
}
void MarkCompact::FinishPhase() {
- TimingLogger::ScopedSplit split("FinishPhase", &timings_);
+ TimingLogger::ScopedSplit split("FinishPhase", GetTimings());
space_ = nullptr;
CHECK(mark_stack_->IsEmpty());
mark_stack_->Reset();
@@ -624,9 +618,9 @@
}
void MarkCompact::RevokeAllThreadLocalBuffers() {
- timings_.StartSplit("(Paused)RevokeAllThreadLocalBuffers");
+ GetTimings()->StartSplit("(Paused)RevokeAllThreadLocalBuffers");
GetHeap()->RevokeAllThreadLocalBuffers();
- timings_.EndSplit();
+ GetTimings()->EndSplit();
}
} // namespace collector
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index fbb349e..d08796b 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -81,7 +81,7 @@
static constexpr bool kRevokeRosAllocThreadLocalBuffersAtCheckpoint = true;
void MarkSweep::BindBitmaps() {
- timings_.StartSplit("BindBitmaps");
+ GetTimings()->StartSplit("BindBitmaps");
WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
// Mark all of the spaces we never collect as immune.
for (const auto& space : GetHeap()->GetContinuousSpaces()) {
@@ -89,7 +89,7 @@
CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space;
}
}
- timings_.EndSplit();
+ GetTimings()->EndSplit();
}
MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix)
@@ -110,7 +110,7 @@
}
void MarkSweep::InitializePhase() {
- TimingLogger::ScopedSplit split("InitializePhase", &timings_);
+ TimingLogger::ScopedSplit split("InitializePhase", GetTimings());
mark_stack_ = heap_->GetMarkStack();
DCHECK(mark_stack_ != nullptr);
immune_region_.Reset();
@@ -132,9 +132,9 @@
ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
mark_bitmap_ = heap_->GetMarkBitmap();
}
- if (!clear_soft_references_) {
+ if (!GetCurrentIteration()->GetClearSoftReferences()) {
// Always clear soft references if a non-sticky collection.
- clear_soft_references_ = GetGcType() != collector::kGcTypeSticky;
+ GetCurrentIteration()->SetClearSoftReferences(GetGcType() != collector::kGcTypeSticky);
}
}
@@ -170,15 +170,15 @@
}
void MarkSweep::ProcessReferences(Thread* self) {
- TimingLogger::ScopedSplit split("ProcessReferences", &timings_);
+ TimingLogger::ScopedSplit split("ProcessReferences", GetTimings());
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
GetHeap()->GetReferenceProcessor()->ProcessReferences(
- true, &timings_, clear_soft_references_, &HeapReferenceMarkedCallback, &MarkObjectCallback,
- &ProcessMarkStackCallback, this);
+ true, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(),
+ &HeapReferenceMarkedCallback, &MarkObjectCallback, &ProcessMarkStackCallback, this);
}
void MarkSweep::PausePhase() {
- TimingLogger::ScopedSplit split("(Paused)PausePhase", &timings_);
+ TimingLogger::ScopedSplit split("(Paused)PausePhase", GetTimings());
Thread* self = Thread::Current();
Locks::mutator_lock_->AssertExclusiveHeld(self);
if (IsConcurrent()) {
@@ -190,7 +190,7 @@
RecursiveMarkDirtyObjects(true, accounting::CardTable::kCardDirty);
}
{
- TimingLogger::ScopedSplit split("SwapStacks", &timings_);
+ TimingLogger::ScopedSplit split("SwapStacks", GetTimings());
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
heap_->SwapStacks(self);
live_stack_freeze_size_ = heap_->GetLiveStack()->Size();
@@ -198,9 +198,9 @@
// stacks and don't want anybody to allocate into the live stack.
RevokeAllThreadLocalAllocationStacks(self);
}
- timings_.StartSplit("PreSweepingGcVerification");
+ GetTimings()->StartSplit("PreSweepingGcVerification");
heap_->PreSweepingGcVerification(this);
- timings_.EndSplit();
+ GetTimings()->EndSplit();
// Disallow new system weaks to prevent a race which occurs when someone adds a new system
// weak before we sweep them. Since this new system weak may not be marked, the GC may
// incorrectly sweep it. This also fixes a race where interning may attempt to return a strong
@@ -217,7 +217,7 @@
Thread* self = Thread::Current();
CHECK(!Locks::mutator_lock_->IsExclusiveHeld(self));
// Process dirty cards and add dirty cards to mod union tables, also ages cards.
- heap_->ProcessCards(timings_, false);
+ heap_->ProcessCards(GetTimings(), false);
// The checkpoint root marking is required to avoid a race condition which occurs if the
// following happens during a reference write:
// 1. mutator dirties the card (write barrier)
@@ -243,22 +243,19 @@
void MarkSweep::RevokeAllThreadLocalAllocationStacks(Thread* self) {
if (kUseThreadLocalAllocationStack) {
- timings_.NewSplit("RevokeAllThreadLocalAllocationStacks");
+ GetTimings()->NewSplit("RevokeAllThreadLocalAllocationStacks");
Locks::mutator_lock_->AssertExclusiveHeld(self);
heap_->RevokeAllThreadLocalAllocationStacks(self);
}
}
void MarkSweep::MarkingPhase() {
- TimingLogger::ScopedSplit split("MarkingPhase", &timings_);
+ TimingLogger::ScopedSplit split("MarkingPhase", GetTimings());
Thread* self = Thread::Current();
-
BindBitmaps();
FindDefaultSpaceBitmap();
-
// Process dirty cards and add dirty cards to mod union tables.
- heap_->ProcessCards(timings_, false);
-
+ heap_->ProcessCards(GetTimings(), false);
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
MarkRoots(self);
MarkReachableObjects();
@@ -271,7 +268,7 @@
if (immune_region_.ContainsSpace(space)) {
const char* name = space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" :
"UpdateAndMarkImageModUnionTable";
- TimingLogger::ScopedSplit split(name, &timings_);
+ TimingLogger::ScopedSplit split(name, GetTimings());
accounting::ModUnionTable* mod_union_table = heap_->FindModUnionTableFromSpace(space);
CHECK(mod_union_table != nullptr);
mod_union_table->UpdateAndMarkReferences(MarkHeapReferenceCallback, this);
@@ -286,7 +283,7 @@
}
void MarkSweep::ReclaimPhase() {
- TimingLogger::ScopedSplit split("ReclaimPhase", &timings_);
+ TimingLogger::ScopedSplit split("ReclaimPhase", GetTimings());
Thread* self = Thread::Current();
// Process the references concurrently.
ProcessReferences(self);
@@ -301,18 +298,18 @@
// Swap the live and mark bitmaps for each space which we modified space. This is an
// optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound
// bitmaps.
- timings_.StartSplit("SwapBitmaps");
+ GetTimings()->StartSplit("SwapBitmaps");
SwapBitmaps();
- timings_.EndSplit();
+ GetTimings()->EndSplit();
// Unbind the live and mark bitmaps.
- TimingLogger::ScopedSplit split("UnBindBitmaps", &timings_);
+ TimingLogger::ScopedSplit split("UnBindBitmaps", GetTimings());
GetHeap()->UnBindBitmaps();
}
}
void MarkSweep::FindDefaultSpaceBitmap() {
- TimingLogger::ScopedSplit split("FindDefaultMarkBitmap", &timings_);
+ TimingLogger::ScopedSplit split("FindDefaultMarkBitmap", GetTimings());
for (const auto& space : GetHeap()->GetContinuousSpaces()) {
accounting::ContinuousSpaceBitmap* bitmap = space->GetMarkBitmap();
// We want to have the main space instead of non moving if possible.
@@ -511,9 +508,9 @@
void MarkSweep::MarkRoots(Thread* self) {
if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
// If we exclusively hold the mutator lock, all threads must be suspended.
- timings_.StartSplit("MarkRoots");
+ GetTimings()->StartSplit("MarkRoots");
Runtime::Current()->VisitRoots(MarkRootCallback, this);
- timings_.EndSplit();
+ GetTimings()->EndSplit();
RevokeAllThreadLocalAllocationStacks(self);
} else {
MarkRootsCheckpoint(self, kRevokeRosAllocThreadLocalBuffersAtCheckpoint);
@@ -525,16 +522,16 @@
}
void MarkSweep::MarkNonThreadRoots() {
- timings_.StartSplit("MarkNonThreadRoots");
+ GetTimings()->StartSplit("MarkNonThreadRoots");
Runtime::Current()->VisitNonThreadRoots(MarkRootCallback, this);
- timings_.EndSplit();
+ GetTimings()->EndSplit();
}
void MarkSweep::MarkConcurrentRoots(VisitRootFlags flags) {
- timings_.StartSplit("MarkConcurrentRoots");
+ GetTimings()->StartSplit("MarkConcurrentRoots");
// Visit all runtime roots and clear dirty flags.
Runtime::Current()->VisitConcurrentRoots(MarkRootCallback, this, flags);
- timings_.EndSplit();
+ GetTimings()->EndSplit();
}
class ScanObjectVisitor {
@@ -755,7 +752,7 @@
Thread* self = Thread::Current();
// Can't have a different split for each space since multiple spaces can have their cards being
// scanned at the same time.
- timings_.StartSplit(paused ? "(Paused)ScanGrayObjects" : "ScanGrayObjects");
+ GetTimings()->StartSplit(paused ? "(Paused)ScanGrayObjects" : "ScanGrayObjects");
// Try to take some of the mark stack since we can pass this off to the worker tasks.
Object** mark_stack_begin = mark_stack_->Begin();
Object** mark_stack_end = mark_stack_->End();
@@ -808,28 +805,28 @@
thread_pool->StartWorkers(self);
thread_pool->Wait(self, true, true);
thread_pool->StopWorkers(self);
- timings_.EndSplit();
+ GetTimings()->EndSplit();
} else {
for (const auto& space : GetHeap()->GetContinuousSpaces()) {
if (space->GetMarkBitmap() != nullptr) {
// Image spaces are handled properly since live == marked for them.
switch (space->GetGcRetentionPolicy()) {
case space::kGcRetentionPolicyNeverCollect:
- timings_.StartSplit(paused ? "(Paused)ScanGrayImageSpaceObjects" :
+ GetTimings()->StartSplit(paused ? "(Paused)ScanGrayImageSpaceObjects" :
"ScanGrayImageSpaceObjects");
break;
case space::kGcRetentionPolicyFullCollect:
- timings_.StartSplit(paused ? "(Paused)ScanGrayZygoteSpaceObjects" :
+ GetTimings()->StartSplit(paused ? "(Paused)ScanGrayZygoteSpaceObjects" :
"ScanGrayZygoteSpaceObjects");
break;
case space::kGcRetentionPolicyAlwaysCollect:
- timings_.StartSplit(paused ? "(Paused)ScanGrayAllocSpaceObjects" :
+ GetTimings()->StartSplit(paused ? "(Paused)ScanGrayAllocSpaceObjects" :
"ScanGrayAllocSpaceObjects");
break;
}
ScanObjectVisitor visitor(this);
card_table->Scan(space->GetMarkBitmap(), space->Begin(), space->End(), visitor, minimum_age);
- timings_.EndSplit();
+ GetTimings()->EndSplit();
}
}
}
@@ -866,7 +863,7 @@
// Populates the mark stack based on the set of marked objects and
// recursively marks until the mark stack is emptied.
void MarkSweep::RecursiveMark() {
- TimingLogger::ScopedSplit split("RecursiveMark", &timings_);
+ TimingLogger::ScopedSplit split("RecursiveMark", GetTimings());
// RecursiveMark will build the lists of known instances of the Reference classes. See
// DelayReferenceReferent for details.
if (kUseRecursiveMark) {
@@ -934,24 +931,24 @@
void MarkSweep::ReMarkRoots() {
Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
- timings_.StartSplit("(Paused)ReMarkRoots");
+ GetTimings()->StartSplit("(Paused)ReMarkRoots");
Runtime::Current()->VisitRoots(
MarkRootCallback, this, static_cast<VisitRootFlags>(kVisitRootFlagNewRoots |
kVisitRootFlagStopLoggingNewRoots |
kVisitRootFlagClearRootLog));
- timings_.EndSplit();
+ GetTimings()->EndSplit();
if (kVerifyRootsMarked) {
- timings_.StartSplit("(Paused)VerifyRoots");
+ GetTimings()->StartSplit("(Paused)VerifyRoots");
Runtime::Current()->VisitRoots(VerifyRootMarked, this);
- timings_.EndSplit();
+ GetTimings()->EndSplit();
}
}
void MarkSweep::SweepSystemWeaks(Thread* self) {
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
- timings_.StartSplit("SweepSystemWeaks");
+ GetTimings()->StartSplit("SweepSystemWeaks");
Runtime::Current()->SweepSystemWeaks(IsMarkedCallback, this);
- timings_.EndSplit();
+ GetTimings()->EndSplit();
}
mirror::Object* MarkSweep::VerifySystemWeakIsLiveCallback(Object* obj, void* arg) {
@@ -1009,7 +1006,7 @@
void MarkSweep::MarkRootsCheckpoint(Thread* self,
bool revoke_ros_alloc_thread_local_buffers_at_checkpoint) {
CheckpointMarkThreadRoots check_point(this, revoke_ros_alloc_thread_local_buffers_at_checkpoint);
- timings_.StartSplit("MarkRootsCheckpoint");
+ GetTimings()->StartSplit("MarkRootsCheckpoint");
ThreadList* thread_list = Runtime::Current()->GetThreadList();
// Request the check point is run on all threads returning a count of the threads that must
// run through the barrier including self.
@@ -1024,19 +1021,17 @@
}
Locks::mutator_lock_->SharedLock(self);
Locks::heap_bitmap_lock_->ExclusiveLock(self);
- timings_.EndSplit();
+ GetTimings()->EndSplit();
}
void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitmaps) {
- timings_.StartSplit("SweepArray");
+ GetTimings()->StartSplit("SweepArray");
Thread* self = Thread::Current();
mirror::Object** chunk_free_buffer = reinterpret_cast<mirror::Object**>(
sweep_array_free_buffer_mem_map_->BaseBegin());
size_t chunk_free_pos = 0;
- size_t freed_bytes = 0;
- size_t freed_large_object_bytes = 0;
- size_t freed_objects = 0;
- size_t freed_large_objects = 0;
+ ObjectBytePair freed;
+ ObjectBytePair freed_los;
// How many objects are left in the array, modified after each space is swept.
Object** objects = allocations->Begin();
size_t count = allocations->Size();
@@ -1077,10 +1072,10 @@
// if needed.
if (!mark_bitmap->Test(obj)) {
if (chunk_free_pos >= kSweepArrayChunkFreeSize) {
- timings_.StartSplit("FreeList");
- freed_objects += chunk_free_pos;
- freed_bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer);
- timings_.EndSplit();
+ GetTimings()->StartSplit("FreeList");
+ freed.objects += chunk_free_pos;
+ freed.bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer);
+ GetTimings()->EndSplit();
chunk_free_pos = 0;
}
chunk_free_buffer[chunk_free_pos++] = obj;
@@ -1090,10 +1085,10 @@
}
}
if (chunk_free_pos > 0) {
- timings_.StartSplit("FreeList");
- freed_objects += chunk_free_pos;
- freed_bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer);
- timings_.EndSplit();
+ GetTimings()->StartSplit("FreeList");
+ freed.objects += chunk_free_pos;
+ freed.bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer);
+ GetTimings()->EndSplit();
chunk_free_pos = 0;
}
// All of the references which space contained are no longer in the allocation stack, update
@@ -1114,23 +1109,16 @@
continue;
}
if (!large_mark_objects->Test(obj)) {
- ++freed_large_objects;
- freed_large_object_bytes += large_object_space->Free(self, obj);
+ ++freed_los.objects;
+ freed_los.bytes += large_object_space->Free(self, obj);
}
}
- timings_.EndSplit();
-
- timings_.StartSplit("RecordFree");
- VLOG(heap) << "Freed " << freed_objects << "/" << count << " objects with size "
- << PrettySize(freed_bytes);
- RecordFree(freed_objects, freed_bytes);
- RecordFreeLargeObjects(freed_large_objects, freed_large_object_bytes);
- timings_.EndSplit();
-
- timings_.StartSplit("ResetStack");
+ GetTimings()->NewSplit("RecordFree");
+ RecordFree(freed);
+ RecordFreeLOS(freed_los);
+ GetTimings()->NewSplit("ResetStack");
allocations->Reset();
- timings_.EndSplit();
-
+ GetTimings()->EndSplit();
sweep_array_free_buffer_mem_map_->MadviseDontNeedAndZero();
}
@@ -1139,33 +1127,27 @@
CHECK_GE(live_stack_freeze_size_, GetHeap()->GetLiveStack()->Size());
// Mark everything allocated since the last as GC live so that we can sweep concurrently,
// knowing that new allocations won't be marked as live.
- timings_.StartSplit("MarkStackAsLive");
+ GetTimings()->StartSplit("MarkStackAsLive");
accounting::ObjectStack* live_stack = heap_->GetLiveStack();
heap_->MarkAllocStackAsLive(live_stack);
live_stack->Reset();
- timings_.EndSplit();
+ GetTimings()->EndSplit();
DCHECK(mark_stack_->IsEmpty());
for (const auto& space : GetHeap()->GetContinuousSpaces()) {
if (space->IsContinuousMemMapAllocSpace()) {
space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
TimingLogger::ScopedSplit split(
- alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepMallocSpace", &timings_);
- size_t freed_objects = 0;
- size_t freed_bytes = 0;
- alloc_space->Sweep(swap_bitmaps, &freed_objects, &freed_bytes);
- RecordFree(freed_objects, freed_bytes);
+ alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepMallocSpace", GetTimings());
+ RecordFree(alloc_space->Sweep(swap_bitmaps));
}
}
SweepLargeObjects(swap_bitmaps);
}
void MarkSweep::SweepLargeObjects(bool swap_bitmaps) {
- TimingLogger::ScopedSplit split("SweepLargeObjects", &timings_);
- size_t freed_objects = 0;
- size_t freed_bytes = 0;
- heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps, &freed_objects, &freed_bytes);
- RecordFreeLargeObjects(freed_objects, freed_bytes);
+ TimingLogger::ScopedSplit split("SweepLargeObjects", GetTimings());
+ RecordFreeLOS(heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps));
}
// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been
@@ -1233,7 +1215,7 @@
// Scan anything that's on the mark stack.
void MarkSweep::ProcessMarkStack(bool paused) {
- timings_.StartSplit(paused ? "(Paused)ProcessMarkStack" : "ProcessMarkStack");
+ GetTimings()->StartSplit(paused ? "(Paused)ProcessMarkStack" : "ProcessMarkStack");
size_t thread_count = GetThreadCount(paused);
if (kParallelProcessMarkStack && thread_count > 1 &&
mark_stack_->Size() >= kMinimumParallelMarkStackSize) {
@@ -1266,7 +1248,7 @@
ScanObject(obj);
}
}
- timings_.EndSplit();
+ GetTimings()->EndSplit();
}
inline bool MarkSweep::IsMarked(const Object* object) const {
@@ -1280,7 +1262,7 @@
}
void MarkSweep::FinishPhase() {
- TimingLogger::ScopedSplit split("FinishPhase", &timings_);
+ TimingLogger::ScopedSplit split("FinishPhase", GetTimings());
if (kCountScannedTypes) {
VLOG(gc) << "MarkSweep scanned classes=" << class_count_.LoadRelaxed()
<< " arrays=" << array_count_.LoadRelaxed() << " other=" << other_count_.LoadRelaxed();
@@ -1317,9 +1299,9 @@
// not be in use.
GetHeap()->AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked();
} else {
- timings_.StartSplit("(Paused)RevokeAllThreadLocalBuffers");
+ GetTimings()->StartSplit("(Paused)RevokeAllThreadLocalBuffers");
GetHeap()->RevokeAllThreadLocalBuffers();
- timings_.EndSplit();
+ GetTimings()->EndSplit();
}
}
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index 54e77a7..8a3ac9d 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -59,7 +59,7 @@
static constexpr size_t kLargeObjectBytesAllocatedThreshold = 16 * MB;
void SemiSpace::BindBitmaps() {
- timings_.StartSplit("BindBitmaps");
+ GetTimings()->StartSplit("BindBitmaps");
WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_);
// Mark all of the spaces we never collect as immune.
for (const auto& space : GetHeap()->GetContinuousSpaces()) {
@@ -83,7 +83,7 @@
// We won't collect the large object space if a bump pointer space only collection.
is_large_object_space_immune_ = true;
}
- timings_.EndSplit();
+ GetTimings()->EndSplit();
}
SemiSpace::SemiSpace(Heap* heap, bool generational, const std::string& name_prefix)
@@ -131,7 +131,7 @@
}
void SemiSpace::InitializePhase() {
- TimingLogger::ScopedSplit split("InitializePhase", &timings_);
+ TimingLogger::ScopedSplit split("InitializePhase", GetTimings());
mark_stack_ = heap_->GetMarkStack();
DCHECK(mark_stack_ != nullptr);
immune_region_.Reset();
@@ -151,11 +151,11 @@
}
void SemiSpace::ProcessReferences(Thread* self) {
- TimingLogger::ScopedSplit split("ProcessReferences", &timings_);
+ TimingLogger::ScopedSplit split("ProcessReferences", GetTimings());
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
GetHeap()->GetReferenceProcessor()->ProcessReferences(
- false, &timings_, clear_soft_references_, &HeapReferenceMarkedCallback,
- &MarkObjectCallback, &ProcessMarkStackCallback, this);
+ false, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(),
+ &HeapReferenceMarkedCallback, &MarkObjectCallback, &ProcessMarkStackCallback, this);
}
void SemiSpace::MarkingPhase() {
@@ -176,8 +176,9 @@
// to prevent fragmentation.
RevokeAllThreadLocalBuffers();
if (generational_) {
- if (gc_cause_ == kGcCauseExplicit || gc_cause_ == kGcCauseForNativeAlloc ||
- clear_soft_references_) {
+ if (GetCurrentIteration()->GetGcCause() == kGcCauseExplicit ||
+ GetCurrentIteration()->GetGcCause() == kGcCauseForNativeAlloc ||
+ GetCurrentIteration()->GetClearSoftReferences()) {
// If an explicit, native allocation-triggered, or last attempt
// collection, collect the whole heap.
whole_heap_collection_ = true;
@@ -191,21 +192,15 @@
}
}
- if (!clear_soft_references_) {
- if (!generational_) {
- // If non-generational, always clear soft references.
- clear_soft_references_ = true;
- } else {
- // If generational, clear soft references if a whole heap collection.
- if (whole_heap_collection_) {
- clear_soft_references_ = true;
- }
- }
+ if (!generational_ || whole_heap_collection_) {
+ // If non-generational, always clear soft references.
+ // If generational, clear soft references if a whole heap collection.
+ GetCurrentIteration()->SetClearSoftReferences(true);
}
Locks::mutator_lock_->AssertExclusiveHeld(self_);
- TimingLogger::ScopedSplit split("MarkingPhase", &timings_);
+ TimingLogger::ScopedSplit split("MarkingPhase", GetTimings());
if (generational_) {
// If last_gc_to_space_end_ is out of the bounds of the from-space
// (the to-space from last GC), then point it to the beginning of
@@ -220,14 +215,14 @@
// Assume the cleared space is already empty.
BindBitmaps();
// Process dirty cards and add dirty cards to mod-union tables.
- heap_->ProcessCards(timings_, kUseRememberedSet && generational_);
+ heap_->ProcessCards(GetTimings(), kUseRememberedSet && generational_);
// Clear the whole card table since we can not Get any additional dirty cards during the
// paused GC. This saves memory but only works for pause the world collectors.
- timings_.NewSplit("ClearCardTable");
+ GetTimings()->NewSplit("ClearCardTable");
heap_->GetCardTable()->ClearCardTable();
// Need to do this before the checkpoint since we don't want any threads to add references to
// the live stack during the recursive mark.
- timings_.NewSplit("SwapStacks");
+ GetTimings()->NewSplit("SwapStacks");
if (kUseThreadLocalAllocationStack) {
heap_->RevokeAllThreadLocalAllocationStacks(self_);
}
@@ -245,7 +240,7 @@
ReaderMutexLock mu(self_, *Locks::heap_bitmap_lock_);
SweepSystemWeaks();
}
- timings_.NewSplit("RecordFree");
+ GetTimings()->NewSplit("RecordFree");
// Revoke buffers before measuring how many objects were moved since the TLABs need to be revoked
// before they are properly counted.
RevokeAllThreadLocalBuffers();
@@ -257,14 +252,14 @@
CHECK_LE(to_objects, from_objects);
// Note: Freed bytes can be negative if we copy form a compacted space to a free-list backed
// space.
- RecordFree(from_objects - to_objects, from_bytes - to_bytes);
+ RecordFree(ObjectBytePair(from_objects - to_objects, from_bytes - to_bytes));
// Clear and protect the from space.
from_space_->Clear();
VLOG(heap) << "Protecting from_space_: " << *from_space_;
from_space_->GetMemMap()->Protect(kProtectFromSpace ? PROT_NONE : PROT_READ);
- timings_.StartSplit("PreSweepingGcVerification");
+ GetTimings()->StartSplit("PreSweepingGcVerification");
heap_->PreSweepingGcVerification(this);
- timings_.EndSplit();
+ GetTimings()->EndSplit();
if (swap_semi_spaces_) {
heap_->SwapSemiSpaces();
}
@@ -280,7 +275,7 @@
TimingLogger::ScopedSplit split(
space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" :
"UpdateAndMarkImageModUnionTable",
- &timings_);
+ GetTimings());
table->UpdateAndMarkReferences(MarkHeapReferenceCallback, this);
} else if (heap_->FindRememberedSetFromSpace(space) != nullptr) {
DCHECK(kUseRememberedSet);
@@ -359,12 +354,12 @@
};
void SemiSpace::MarkReachableObjects() {
- timings_.StartSplit("MarkStackAsLive");
+ GetTimings()->StartSplit("MarkStackAsLive");
accounting::ObjectStack* live_stack = heap_->GetLiveStack();
heap_->MarkAllocStackAsLive(live_stack);
live_stack->Reset();
- timings_.NewSplit("UpdateAndMarkRememberedSets");
+ GetTimings()->NewSplit("UpdateAndMarkRememberedSets");
for (auto& space : heap_->GetContinuousSpaces()) {
// If the space is immune and has no mod union table (the
// non-moving space when the bump pointer space only collection is
@@ -403,7 +398,7 @@
}
if (is_large_object_space_immune_) {
- timings_.NewSplit("VisitLargeObjects");
+ GetTimings()->NewSplit("VisitLargeObjects");
DCHECK(generational_ && !whole_heap_collection_);
// Delay copying the live set to the marked set until here from
// BindBitmaps() as the large objects on the allocation stack may
@@ -421,13 +416,13 @@
reinterpret_cast<uintptr_t>(large_object_space->End()),
visitor);
}
- timings_.EndSplit();
+ GetTimings()->EndSplit();
// Recursively process the mark stack.
ProcessMarkStack();
}
void SemiSpace::ReclaimPhase() {
- TimingLogger::ScopedSplit split("ReclaimPhase", &timings_);
+ TimingLogger::ScopedSplit split("ReclaimPhase", GetTimings());
{
WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_);
// Reclaim unmarked objects.
@@ -435,11 +430,11 @@
// Swap the live and mark bitmaps for each space which we modified space. This is an
// optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound
// bitmaps.
- timings_.StartSplit("SwapBitmaps");
+ GetTimings()->StartSplit("SwapBitmaps");
SwapBitmaps();
- timings_.EndSplit();
+ GetTimings()->EndSplit();
// Unbind the live and mark bitmaps.
- TimingLogger::ScopedSplit split("UnBindBitmaps", &timings_);
+ TimingLogger::ScopedSplit split("UnBindBitmaps", GetTimings());
GetHeap()->UnBindBitmaps();
}
if (saved_bytes_ > 0) {
@@ -634,7 +629,7 @@
// Marks all objects in the root set.
void SemiSpace::MarkRoots() {
- timings_.NewSplit("MarkRoots");
+ GetTimings()->NewSplit("MarkRoots");
// TODO: Visit up image roots as well?
Runtime::Current()->VisitRoots(MarkRootCallback, this);
}
@@ -660,9 +655,9 @@
}
void SemiSpace::SweepSystemWeaks() {
- timings_.StartSplit("SweepSystemWeaks");
+ GetTimings()->StartSplit("SweepSystemWeaks");
Runtime::Current()->SweepSystemWeaks(MarkedForwardingAddressCallback, this);
- timings_.EndSplit();
+ GetTimings()->EndSplit();
}
bool SemiSpace::ShouldSweepSpace(space::ContinuousSpace* space) const {
@@ -671,7 +666,7 @@
void SemiSpace::Sweep(bool swap_bitmaps) {
DCHECK(mark_stack_->IsEmpty());
- TimingLogger::ScopedSplit split("Sweep", &timings_);
+ TimingLogger::ScopedSplit split("Sweep", GetTimings());
for (const auto& space : GetHeap()->GetContinuousSpaces()) {
if (space->IsContinuousMemMapAllocSpace()) {
space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
@@ -679,11 +674,8 @@
continue;
}
TimingLogger::ScopedSplit split(
- alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", &timings_);
- size_t freed_objects = 0;
- size_t freed_bytes = 0;
- alloc_space->Sweep(swap_bitmaps, &freed_objects, &freed_bytes);
- RecordFree(freed_objects, freed_bytes);
+ alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", GetTimings());
+ RecordFree(alloc_space->Sweep(swap_bitmaps));
}
}
if (!is_large_object_space_immune_) {
@@ -693,11 +685,8 @@
void SemiSpace::SweepLargeObjects(bool swap_bitmaps) {
DCHECK(!is_large_object_space_immune_);
- TimingLogger::ScopedSplit split("SweepLargeObjects", &timings_);
- size_t freed_objects = 0;
- size_t freed_bytes = 0;
- heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps, &freed_objects, &freed_bytes);
- RecordFreeLargeObjects(freed_objects, freed_bytes);
+ TimingLogger::ScopedSplit split("SweepLargeObjects", GetTimings());
+ RecordFreeLOS(heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps));
}
// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been
@@ -750,7 +739,7 @@
DCHECK(mark_bitmap != nullptr);
DCHECK_EQ(live_bitmap, mark_bitmap);
}
- timings_.StartSplit("ProcessMarkStack");
+ GetTimings()->StartSplit("ProcessMarkStack");
while (!mark_stack_->IsEmpty()) {
Object* obj = mark_stack_->PopBack();
if (generational_ && !whole_heap_collection_ && promo_dest_space->HasAddress(obj)) {
@@ -761,7 +750,7 @@
}
ScanObject(obj);
}
- timings_.EndSplit();
+ GetTimings()->EndSplit();
}
inline Object* SemiSpace::GetMarkedForwardAddress(mirror::Object* obj) const
@@ -792,7 +781,7 @@
}
void SemiSpace::FinishPhase() {
- TimingLogger::ScopedSplit split("FinishPhase", &timings_);
+ TimingLogger::ScopedSplit split("FinishPhase", GetTimings());
// Null the "to" and "from" spaces since compacting from one to the other isn't valid until
// further action is done by the heap.
to_space_ = nullptr;
@@ -833,9 +822,9 @@
}
void SemiSpace::RevokeAllThreadLocalBuffers() {
- timings_.StartSplit("(Paused)RevokeAllThreadLocalBuffers");
+ GetTimings()->StartSplit("(Paused)RevokeAllThreadLocalBuffers");
GetHeap()->RevokeAllThreadLocalBuffers();
- timings_.EndSplit();
+ GetTimings()->EndSplit();
}
} // namespace collector
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 1c94d6f..6c63e5f 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -1643,8 +1643,8 @@
if (temp_space_ != nullptr) {
CHECK(temp_space_->IsEmpty());
}
- total_objects_freed_ever_ += semi_space_collector_->GetFreedObjects();
- total_bytes_freed_ever_ += semi_space_collector_->GetFreedBytes();
+ total_objects_freed_ever_ += GetCurrentGcIteration()->GetFreedObjects();
+ total_bytes_freed_ever_ += GetCurrentGcIteration()->GetFreedBytes();
// Update the end and write out image.
non_moving_space_->SetEnd(target_space.End());
non_moving_space_->SetLimit(target_space.Limit());
@@ -1838,15 +1838,15 @@
<< "Could not find garbage collector with collector_type="
<< static_cast<size_t>(collector_type_) << " and gc_type=" << gc_type;
collector->Run(gc_cause, clear_soft_references || runtime->IsZygote());
- total_objects_freed_ever_ += collector->GetFreedObjects();
- total_bytes_freed_ever_ += collector->GetFreedBytes();
+ total_objects_freed_ever_ += GetCurrentGcIteration()->GetFreedObjects();
+ total_bytes_freed_ever_ += GetCurrentGcIteration()->GetFreedBytes();
RequestHeapTrim();
// Enqueue cleared references.
reference_processor_.EnqueueClearedReferences(self);
// Grow the heap so that we know when to perform the next GC.
GrowForUtilization(collector);
- const size_t duration = collector->GetDurationNs();
- const std::vector<uint64_t>& pause_times = collector->GetPauseTimes();
+ const size_t duration = GetCurrentGcIteration()->GetDurationNs();
+ const std::vector<uint64_t>& pause_times = GetCurrentGcIteration()->GetPauseTimes();
// Print the GC if it is an explicit GC (e.g. Runtime.gc()) or a slow GC
// (mutator time blocked >= long_pause_log_threshold_).
bool log_gc = gc_cause == kGcCauseExplicit;
@@ -1868,14 +1868,14 @@
<< ((i != pause_times.size() - 1) ? "," : "");
}
LOG(INFO) << gc_cause << " " << collector->GetName()
- << " GC freed " << collector->GetFreedObjects() << "("
- << PrettySize(collector->GetFreedBytes()) << ") AllocSpace objects, "
- << collector->GetFreedLargeObjects() << "("
- << PrettySize(collector->GetFreedLargeObjectBytes()) << ") LOS objects, "
+ << " GC freed " << current_gc_iteration_.GetFreedObjects() << "("
+ << PrettySize(current_gc_iteration_.GetFreedBytes()) << ") AllocSpace objects, "
+ << current_gc_iteration_.GetFreedLargeObjects() << "("
+ << PrettySize(current_gc_iteration_.GetFreedLargeObjectBytes()) << ") LOS objects, "
<< percent_free << "% free, " << PrettySize(current_heap_size) << "/"
<< PrettySize(total_memory) << ", " << "paused " << pause_string.str()
<< " total " << PrettyDuration((duration / 1000) * 1000);
- VLOG(heap) << ConstDumpable<TimingLogger>(collector->GetTimings());
+ VLOG(heap) << ConstDumpable<TimingLogger>(*current_gc_iteration_.GetTimings());
}
FinishGC(self, gc_type);
// Inform DDMS that a GC completed.
@@ -2313,7 +2313,7 @@
return it->second;
}
-void Heap::ProcessCards(TimingLogger& timings, bool use_rem_sets) {
+void Heap::ProcessCards(TimingLogger* timings, bool use_rem_sets) {
// Clear cards and keep track of cards cleared in the mod-union table.
for (const auto& space : continuous_spaces_) {
accounting::ModUnionTable* table = FindModUnionTableFromSpace(space);
@@ -2321,15 +2321,15 @@
if (table != nullptr) {
const char* name = space->IsZygoteSpace() ? "ZygoteModUnionClearCards" :
"ImageModUnionClearCards";
- TimingLogger::ScopedSplit split(name, &timings);
+ TimingLogger::ScopedSplit split(name, timings);
table->ClearCards();
} else if (use_rem_sets && rem_set != nullptr) {
DCHECK(collector::SemiSpace::kUseRememberedSet && collector_type_ == kCollectorTypeGSS)
<< static_cast<int>(collector_type_);
- TimingLogger::ScopedSplit split("AllocSpaceRemSetClearCards", &timings);
+ TimingLogger::ScopedSplit split("AllocSpaceRemSetClearCards", timings);
rem_set->ClearCards();
} else if (space->GetType() != space::kSpaceTypeBumpPointerSpace) {
- TimingLogger::ScopedSplit split("AllocSpaceClearCards", &timings);
+ TimingLogger::ScopedSplit split("AllocSpaceClearCards", timings);
// No mod union table for the AllocSpace. Age the cards so that the GC knows that these cards
// were dirty before the GC started.
// TODO: Need to use atomic for the case where aged(cleaning thread) -> dirty(other thread)
@@ -2337,7 +2337,8 @@
// The races are we either end up with: Aged card, unaged card. Since we have the checkpoint
// roots and then we scan / update mod union tables after. We will always scan either card.
// If we end up with the non aged card, we scan it it in the pause.
- card_table_->ModifyCardsAtomic(space->Begin(), space->End(), AgeCardVisitor(), VoidFunctor());
+ card_table_->ModifyCardsAtomic(space->Begin(), space->End(), AgeCardVisitor(),
+ VoidFunctor());
}
}
}
@@ -2347,7 +2348,7 @@
void Heap::PreGcVerificationPaused(collector::GarbageCollector* gc) {
Thread* const self = Thread::Current();
- TimingLogger* const timings = &gc->GetTimings();
+ TimingLogger* const timings = current_gc_iteration_.GetTimings();
if (verify_pre_gc_heap_) {
TimingLogger::ScopedSplit split("PreGcVerifyHeapReferences", timings);
ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
@@ -2389,13 +2390,13 @@
void Heap::PrePauseRosAllocVerification(collector::GarbageCollector* gc) {
// TODO: Add a new runtime option for this?
if (verify_pre_gc_rosalloc_) {
- RosAllocVerification(&gc->GetTimings(), "PreGcRosAllocVerification");
+ RosAllocVerification(current_gc_iteration_.GetTimings(), "PreGcRosAllocVerification");
}
}
void Heap::PreSweepingGcVerification(collector::GarbageCollector* gc) {
Thread* const self = Thread::Current();
- TimingLogger* const timings = &gc->GetTimings();
+ TimingLogger* const timings = current_gc_iteration_.GetTimings();
// Called before sweeping occurs since we want to make sure we are not going so reclaim any
// reachable objects.
if (verify_pre_sweeping_heap_) {
@@ -2421,7 +2422,7 @@
void Heap::PostGcVerificationPaused(collector::GarbageCollector* gc) {
// Only pause if we have to do some verification.
Thread* const self = Thread::Current();
- TimingLogger* const timings = &gc->GetTimings();
+ TimingLogger* const timings = GetCurrentGcIteration()->GetTimings();
if (verify_system_weaks_) {
ReaderMutexLock mu2(self, *Locks::heap_bitmap_lock_);
collector::MarkSweep* mark_sweep = down_cast<collector::MarkSweep*>(gc);
@@ -2575,9 +2576,9 @@
// We also check that the bytes allocated aren't over the footprint limit in order to prevent a
// pathological case where dead objects which aren't reclaimed by sticky could get accumulated
// if the sticky GC throughput always remained >= the full/partial throughput.
- if (collector_ran->GetEstimatedLastIterationThroughput() * kStickyGcThroughputAdjustment >=
+ if (current_gc_iteration_.GetEstimatedThroughput() * kStickyGcThroughputAdjustment >=
non_sticky_collector->GetEstimatedMeanThroughput() &&
- non_sticky_collector->GetIterations() > 0 &&
+ non_sticky_collector->NumberOfIterations() > 0 &&
bytes_allocated <= max_allowed_footprint_) {
next_gc_type_ = collector::kGcTypeSticky;
} else {
@@ -2595,7 +2596,7 @@
if (IsGcConcurrent()) {
// Calculate when to perform the next ConcurrentGC.
// Calculate the estimated GC duration.
- const double gc_duration_seconds = NsToMs(collector_ran->GetDurationNs()) / 1000.0;
+ const double gc_duration_seconds = NsToMs(current_gc_iteration_.GetDurationNs()) / 1000.0;
// Estimate how many remaining bytes we will have when we need to start the next GC.
size_t remaining_bytes = allocation_rate_ * gc_duration_seconds;
remaining_bytes = std::min(remaining_bytes, kMaxConcurrentRemainingBytes);
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 368a20c..a34cd38 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -27,6 +27,7 @@
#include "gc/accounting/atomic_stack.h"
#include "gc/accounting/card_table.h"
#include "gc/gc_cause.h"
+#include "gc/collector/garbage_collector.h"
#include "gc/collector/gc_type.h"
#include "gc/collector_type.h"
#include "globals.h"
@@ -317,6 +318,13 @@
return discontinuous_spaces_;
}
+ const collector::Iteration* GetCurrentGcIteration() const {
+ return ¤t_gc_iteration_;
+ }
+ collector::Iteration* GetCurrentGcIteration() {
+ return ¤t_gc_iteration_;
+ }
+
// Enable verification of object references when the runtime is sufficiently initialized.
void EnableObjectValidation() {
verify_object_mode_ = kVerifyObjectSupport;
@@ -690,7 +698,7 @@
void SwapStacks(Thread* self);
// Clear cards and update the mod union table.
- void ProcessCards(TimingLogger& timings, bool use_rem_sets);
+ void ProcessCards(TimingLogger* timings, bool use_rem_sets);
// Signal the heap trim daemon that there is something to do, either a heap transition or heap
// trim.
@@ -849,6 +857,9 @@
// Data structure GC overhead.
Atomic<size_t> gc_memory_overhead_;
+ // Info related to the current or previous GC iteration.
+ collector::Iteration current_gc_iteration_;
+
// Heap verification flags.
const bool verify_missing_card_marks_;
const bool verify_system_weaks_;
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index 54a63f0..abae8ff 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -411,28 +411,24 @@
bitmap->Clear(ptrs[i]);
}
}
- context->freed_objects += num_ptrs;
- context->freed_bytes += space->FreeList(self, num_ptrs, ptrs);
+ context->freed.objects += num_ptrs;
+ context->freed.bytes += space->FreeList(self, num_ptrs, ptrs);
}
-void LargeObjectSpace::Sweep(bool swap_bitmaps, size_t* out_freed_objects,
- size_t* out_freed_bytes) {
+collector::ObjectBytePair LargeObjectSpace::Sweep(bool swap_bitmaps) {
if (Begin() >= End()) {
- return;
+ return collector::ObjectBytePair(0, 0);
}
accounting::LargeObjectBitmap* live_bitmap = GetLiveBitmap();
accounting::LargeObjectBitmap* mark_bitmap = GetMarkBitmap();
if (swap_bitmaps) {
std::swap(live_bitmap, mark_bitmap);
}
- DCHECK(out_freed_objects != nullptr);
- DCHECK(out_freed_bytes != nullptr);
- SweepCallbackContext scc(swap_bitmaps, this);
+ AllocSpace::SweepCallbackContext scc(swap_bitmaps, this);
accounting::LargeObjectBitmap::SweepWalk(*live_bitmap, *mark_bitmap,
reinterpret_cast<uintptr_t>(Begin()),
reinterpret_cast<uintptr_t>(End()), SweepCallback, &scc);
- *out_freed_objects += scc.freed_objects;
- *out_freed_bytes += scc.freed_bytes;
+ return scc.freed;
}
} // namespace space
diff --git a/runtime/gc/space/large_object_space.h b/runtime/gc/space/large_object_space.h
index a84b43a..01982d0 100644
--- a/runtime/gc/space/large_object_space.h
+++ b/runtime/gc/space/large_object_space.h
@@ -73,7 +73,7 @@
return this;
}
- void Sweep(bool swap_bitmaps, size_t* out_freed_objects, size_t* out_freed_bytes);
+ collector::ObjectBytePair Sweep(bool swap_bitmaps);
virtual bool CanMoveObjects() const OVERRIDE {
return false;
diff --git a/runtime/gc/space/malloc_space.cc b/runtime/gc/space/malloc_space.cc
index 57ed0bd..4d74f3c 100644
--- a/runtime/gc/space/malloc_space.cc
+++ b/runtime/gc/space/malloc_space.cc
@@ -242,8 +242,8 @@
// Use a bulk free, that merges consecutive objects before freeing or free per object?
// Documentation suggests better free performance with merging, but this may be at the expensive
// of allocation.
- context->freed_objects += num_ptrs;
- context->freed_bytes += space->FreeList(self, num_ptrs, ptrs);
+ context->freed.objects += num_ptrs;
+ context->freed.bytes += space->FreeList(self, num_ptrs, ptrs);
}
} // namespace space
diff --git a/runtime/gc/space/space.cc b/runtime/gc/space/space.cc
index 4e28416..bff28f6 100644
--- a/runtime/gc/space/space.cc
+++ b/runtime/gc/space/space.cc
@@ -81,14 +81,12 @@
CHECK(mark_bitmap_.get() != nullptr);
}
-void ContinuousMemMapAllocSpace::Sweep(bool swap_bitmaps, size_t* freed_objects, size_t* freed_bytes) {
- DCHECK(freed_objects != nullptr);
- DCHECK(freed_bytes != nullptr);
+collector::ObjectBytePair ContinuousMemMapAllocSpace::Sweep(bool swap_bitmaps) {
accounting::ContinuousSpaceBitmap* live_bitmap = GetLiveBitmap();
accounting::ContinuousSpaceBitmap* mark_bitmap = GetMarkBitmap();
// If the bitmaps are bound then sweeping this space clearly won't do anything.
if (live_bitmap == mark_bitmap) {
- return;
+ return collector::ObjectBytePair(0, 0);
}
SweepCallbackContext scc(swap_bitmaps, this);
if (swap_bitmaps) {
@@ -98,8 +96,7 @@
accounting::ContinuousSpaceBitmap::SweepWalk(
*live_bitmap, *mark_bitmap, reinterpret_cast<uintptr_t>(Begin()),
reinterpret_cast<uintptr_t>(End()), GetSweepCallback(), reinterpret_cast<void*>(&scc));
- *freed_objects += scc.freed_objects;
- *freed_bytes += scc.freed_bytes;
+ return scc.freed;
}
// Returns the old mark bitmap.
@@ -136,9 +133,8 @@
mark_bitmap_->SetName(temp_name);
}
-Space::SweepCallbackContext::SweepCallbackContext(bool swap_bitmaps, space::Space* space)
- : swap_bitmaps(swap_bitmaps), space(space), self(Thread::Current()), freed_objects(0),
- freed_bytes(0) {
+AllocSpace::SweepCallbackContext::SweepCallbackContext(bool swap_bitmaps, space::Space* space)
+ : swap_bitmaps(swap_bitmaps), space(space), self(Thread::Current()) {
}
} // namespace space
diff --git a/runtime/gc/space/space.h b/runtime/gc/space/space.h
index 8415fa1..8444a70 100644
--- a/runtime/gc/space/space.h
+++ b/runtime/gc/space/space.h
@@ -23,6 +23,7 @@
#include "base/macros.h"
#include "base/mutex.h"
#include "gc/accounting/space_bitmap.h"
+#include "gc/collector/garbage_collector.h"
#include "globals.h"
#include "image.h"
#include "mem_map.h"
@@ -172,16 +173,6 @@
std::string name_;
protected:
- struct SweepCallbackContext {
- public:
- SweepCallbackContext(bool swap_bitmaps, space::Space* space);
- const bool swap_bitmaps;
- space::Space* const space;
- Thread* const self;
- size_t freed_objects;
- size_t freed_bytes;
- };
-
// When should objects within this space be reclaimed? Not constant as we vary it in the case
// of Zygote forking.
GcRetentionPolicy gc_retention_policy_;
@@ -232,6 +223,14 @@
virtual void RevokeAllThreadLocalBuffers() = 0;
protected:
+ struct SweepCallbackContext {
+ SweepCallbackContext(bool swap_bitmaps, space::Space* space);
+ const bool swap_bitmaps;
+ space::Space* const space;
+ Thread* const self;
+ collector::ObjectBytePair freed;
+ };
+
AllocSpace() {}
virtual ~AllocSpace() {}
@@ -415,7 +414,7 @@
return mark_bitmap_.get();
}
- void Sweep(bool swap_bitmaps, size_t* freed_objects, size_t* freed_bytes);
+ collector::ObjectBytePair Sweep(bool swap_bitmaps);
virtual accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() = 0;
protected:
diff --git a/runtime/indirect_reference_table-inl.h b/runtime/indirect_reference_table-inl.h
index b787233..f561643 100644
--- a/runtime/indirect_reference_table-inl.h
+++ b/runtime/indirect_reference_table-inl.h
@@ -59,8 +59,7 @@
// Make sure that the entry at "idx" is correctly paired with "iref".
inline bool IndirectReferenceTable::CheckEntry(const char* what, IndirectRef iref, int idx) const {
- const mirror::Object* obj = table_[idx];
- IndirectRef checkRef = ToIndirectRef(obj, idx);
+ IndirectRef checkRef = ToIndirectRef(idx);
if (UNLIKELY(checkRef != iref)) {
LOG(ERROR) << "JNI ERROR (app bug): attempt to " << what
<< " stale " << kind_ << " " << iref
diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc
index 98e1d21..ad798ed 100644
--- a/runtime/indirect_reference_table.cc
+++ b/runtime/indirect_reference_table.cc
@@ -137,13 +137,13 @@
DCHECK_GE(pScan, table_ + prevState.parts.topIndex);
}
UpdateSlotAdd(obj, pScan - table_);
- result = ToIndirectRef(obj, pScan - table_);
+ result = ToIndirectRef(pScan - table_);
*pScan = obj;
segment_state_.parts.numHoles--;
} else {
// Add to the end.
UpdateSlotAdd(obj, topIndex);
- result = ToIndirectRef(obj, topIndex);
+ result = ToIndirectRef(topIndex);
table_[topIndex++] = obj;
segment_state_.parts.topIndex = topIndex;
}
@@ -277,9 +277,6 @@
// while the read barrier won't.
entries.push_back(obj);
} else {
- // We need a read barrier if weak globals. Since this is for
- // debugging where performance isn't top priority, we
- // unconditionally enable the read barrier, which is conservative.
obj = ReadBarrier::BarrierForRoot<mirror::Object, kWithReadBarrier>(root);
entries.push_back(obj);
}
diff --git a/runtime/indirect_reference_table.h b/runtime/indirect_reference_table.h
index 5b3ed68..b3a855d 100644
--- a/runtime/indirect_reference_table.h
+++ b/runtime/indirect_reference_table.h
@@ -27,6 +27,7 @@
#include "mem_map.h"
#include "object_callbacks.h"
#include "offsets.h"
+#include "read_barrier.h"
namespace art {
namespace mirror {
@@ -215,6 +216,7 @@
}
mirror::Object** operator*() {
+ // This does not have a read barrier as this is used to visit roots.
return &table_[i_];
}
@@ -298,6 +300,7 @@
return segment_state_.parts.topIndex;
}
+ // Note IrtIterator does not have a read barrier as it's used to visit roots.
IrtIterator begin() {
return IrtIterator(table_, 0, Capacity());
}
@@ -333,7 +336,7 @@
* The object pointer itself is subject to relocation in some GC
* implementations, so we shouldn't really be using it here.
*/
- IndirectRef ToIndirectRef(const mirror::Object* /*o*/, uint32_t tableIndex) const {
+ IndirectRef ToIndirectRef(uint32_t tableIndex) const {
DCHECK_LT(tableIndex, 65536U);
uint32_t serialChunk = slot_data_[tableIndex].serial;
uintptr_t uref = serialChunk << 20 | (tableIndex << 2) | kind_;
@@ -368,9 +371,8 @@
std::unique_ptr<MemMap> table_mem_map_;
// Mem map where we store the extended debugging info.
std::unique_ptr<MemMap> slot_mem_map_;
- // bottom of the stack. If a JNI weak global table, do not directly
- // access the object references in this as they are weak roots. Use
- // Get() that has a read barrier.
+ // bottom of the stack. Do not directly access the object references
+ // in this as they are roots. Use Get() that has a read barrier.
mirror::Object** table_;
/* bit mask, ORed into all irefs */
IndirectRefKind kind_;
diff --git a/runtime/jdwp/jdwp.h b/runtime/jdwp/jdwp.h
index 1477324..325b089 100644
--- a/runtime/jdwp/jdwp.h
+++ b/runtime/jdwp/jdwp.h
@@ -294,14 +294,14 @@
ObjectId threadId)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void CleanupMatchList(JdwpEvent** match_list,
- int match_count)
+ size_t match_count)
EXCLUSIVE_LOCKS_REQUIRED(event_list_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void EventFinish(ExpandBuf* pReq);
void FindMatchingEvents(JdwpEventKind eventKind,
- ModBasket* basket,
+ const ModBasket& basket,
JdwpEvent** match_list,
- int* pMatchCount)
+ size_t* pMatchCount)
EXCLUSIVE_LOCKS_REQUIRED(event_list_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void UnregisterEvent(JdwpEvent* pEvent)
diff --git a/runtime/jdwp/jdwp_event.cc b/runtime/jdwp/jdwp_event.cc
index cb2c420..86c84e8 100644
--- a/runtime/jdwp/jdwp_event.cc
+++ b/runtime/jdwp/jdwp_event.cc
@@ -397,7 +397,7 @@
* Run through the list and remove any entries with an expired "count" mod
* from the event list, then free the match list.
*/
-void JdwpState::CleanupMatchList(JdwpEvent** match_list, int match_count) {
+void JdwpState::CleanupMatchList(JdwpEvent** match_list, size_t match_count) {
JdwpEvent** ppEvent = match_list;
while (match_count--) {
@@ -405,7 +405,8 @@
for (int i = 0; i < pEvent->modCount; i++) {
if (pEvent->mods[i].modKind == MK_COUNT && pEvent->mods[i].count.count == 0) {
- VLOG(jdwp) << "##### Removing expired event";
+ VLOG(jdwp) << StringPrintf("##### Removing expired event (requestId=%#" PRIx32 ")",
+ pEvent->requestId);
UnregisterEvent(pEvent);
EventFree(pEvent);
break;
@@ -445,7 +446,7 @@
* If we find a Count mod before rejecting an event, we decrement it. We
* need to do this even if later mods cause us to ignore the event.
*/
-static bool ModsMatch(JdwpEvent* pEvent, ModBasket* basket)
+static bool ModsMatch(JdwpEvent* pEvent, const ModBasket& basket)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
JdwpEventMod* pMod = pEvent->mods;
@@ -462,53 +463,53 @@
CHECK(false); // should not be getting these
break;
case MK_THREAD_ONLY:
- if (pMod->threadOnly.threadId != basket->threadId) {
+ if (pMod->threadOnly.threadId != basket.threadId) {
return false;
}
break;
case MK_CLASS_ONLY:
- if (!Dbg::MatchType(basket->classId, pMod->classOnly.refTypeId)) {
+ if (!Dbg::MatchType(basket.classId, pMod->classOnly.refTypeId)) {
return false;
}
break;
case MK_CLASS_MATCH:
- if (!PatternMatch(pMod->classMatch.classPattern, basket->className)) {
+ if (!PatternMatch(pMod->classMatch.classPattern, basket.className)) {
return false;
}
break;
case MK_CLASS_EXCLUDE:
- if (PatternMatch(pMod->classMatch.classPattern, basket->className)) {
+ if (PatternMatch(pMod->classMatch.classPattern, basket.className)) {
return false;
}
break;
case MK_LOCATION_ONLY:
- if (pMod->locationOnly.loc != *basket->pLoc) {
+ if (pMod->locationOnly.loc != *basket.pLoc) {
return false;
}
break;
case MK_EXCEPTION_ONLY:
- if (pMod->exceptionOnly.refTypeId != 0 && !Dbg::MatchType(basket->excepClassId, pMod->exceptionOnly.refTypeId)) {
+ if (pMod->exceptionOnly.refTypeId != 0 && !Dbg::MatchType(basket.excepClassId, pMod->exceptionOnly.refTypeId)) {
return false;
}
- if ((basket->caught && !pMod->exceptionOnly.caught) || (!basket->caught && !pMod->exceptionOnly.uncaught)) {
+ if ((basket.caught && !pMod->exceptionOnly.caught) || (!basket.caught && !pMod->exceptionOnly.uncaught)) {
return false;
}
break;
case MK_FIELD_ONLY:
- if (pMod->fieldOnly.fieldId != basket->fieldId) {
+ if (pMod->fieldOnly.fieldId != basket.fieldId) {
return false;
}
- if (!Dbg::MatchType(basket->fieldTypeID, pMod->fieldOnly.refTypeId)) {
+ if (!Dbg::MatchType(basket.fieldTypeID, pMod->fieldOnly.refTypeId)) {
return false;
}
break;
case MK_STEP:
- if (pMod->step.threadId != basket->threadId) {
+ if (pMod->step.threadId != basket.threadId) {
return false;
}
break;
case MK_INSTANCE_ONLY:
- if (pMod->instanceOnly.objectId != basket->thisPtr) {
+ if (pMod->instanceOnly.objectId != basket.thisPtr) {
return false;
}
break;
@@ -530,19 +531,16 @@
* DO NOT call this multiple times for the same eventKind, as Count mods are
* decremented during the scan.
*/
-void JdwpState::FindMatchingEvents(JdwpEventKind eventKind, ModBasket* basket,
- JdwpEvent** match_list, int* pMatchCount) {
+void JdwpState::FindMatchingEvents(JdwpEventKind eventKind, const ModBasket& basket,
+ JdwpEvent** match_list, size_t* pMatchCount) {
/* start after the existing entries */
match_list += *pMatchCount;
- JdwpEvent* pEvent = event_list_;
- while (pEvent != NULL) {
+ for (JdwpEvent* pEvent = event_list_; pEvent != nullptr; pEvent = pEvent->next) {
if (pEvent->eventKind == eventKind && ModsMatch(pEvent, basket)) {
*match_list++ = pEvent;
(*pMatchCount)++;
}
-
- pEvent = pEvent->next;
}
}
@@ -774,6 +772,22 @@
return true;
}
+static void LogMatchingEventsAndThread(JdwpEvent** match_list, size_t match_count,
+ const ModBasket& basket)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ for (size_t i = 0; i < match_count; ++i) {
+ JdwpEvent* pEvent = match_list[i];
+ VLOG(jdwp) << "EVENT #" << i << ": " << pEvent->eventKind
+ << StringPrintf(" (requestId=%#" PRIx32 ")", pEvent->requestId);
+ }
+ std::string thread_name;
+ JdwpError error = Dbg::GetThreadName(basket.threadId, thread_name);
+ if (error != JDWP::ERR_NONE) {
+ thread_name = "<unknown>";
+ }
+ VLOG(jdwp) << StringPrintf(" thread=%#" PRIx64, basket.threadId) << " " << thread_name;
+}
+
/*
* A location of interest has been reached. This handles:
* Breakpoint
@@ -829,39 +843,40 @@
return false;
}
- int match_count = 0;
+ size_t match_count = 0;
ExpandBuf* pReq = NULL;
JdwpSuspendPolicy suspend_policy = SP_NONE;
{
MutexLock mu(Thread::Current(), event_list_lock_);
JdwpEvent** match_list = AllocMatchList(event_list_size_);
if ((eventFlags & Dbg::kBreakpoint) != 0) {
- FindMatchingEvents(EK_BREAKPOINT, &basket, match_list, &match_count);
+ FindMatchingEvents(EK_BREAKPOINT, basket, match_list, &match_count);
}
if ((eventFlags & Dbg::kSingleStep) != 0) {
- FindMatchingEvents(EK_SINGLE_STEP, &basket, match_list, &match_count);
+ FindMatchingEvents(EK_SINGLE_STEP, basket, match_list, &match_count);
}
if ((eventFlags & Dbg::kMethodEntry) != 0) {
- FindMatchingEvents(EK_METHOD_ENTRY, &basket, match_list, &match_count);
+ FindMatchingEvents(EK_METHOD_ENTRY, basket, match_list, &match_count);
}
if ((eventFlags & Dbg::kMethodExit) != 0) {
- FindMatchingEvents(EK_METHOD_EXIT, &basket, match_list, &match_count);
- FindMatchingEvents(EK_METHOD_EXIT_WITH_RETURN_VALUE, &basket, match_list, &match_count);
+ FindMatchingEvents(EK_METHOD_EXIT, basket, match_list, &match_count);
+ FindMatchingEvents(EK_METHOD_EXIT_WITH_RETURN_VALUE, basket, match_list, &match_count);
}
if (match_count != 0) {
- VLOG(jdwp) << "EVENT: " << match_list[0]->eventKind << "(" << match_count << " total) "
- << basket.className << "." << Dbg::GetMethodName(pLoc->method_id)
- << StringPrintf(" thread=%#" PRIx64 " dex_pc=%#" PRIx64 ")",
- basket.threadId, pLoc->dex_pc);
-
suspend_policy = scanSuspendPolicy(match_list, match_count);
- VLOG(jdwp) << " suspend_policy=" << suspend_policy;
+
+ if (VLOG_IS_ON(jdwp)) {
+ LogMatchingEventsAndThread(match_list, match_count, basket);
+ VLOG(jdwp) << " location=" << *pLoc;
+ VLOG(jdwp) << StringPrintf(" this=%#" PRIx64, basket.thisPtr);
+ VLOG(jdwp) << " suspend_policy=" << suspend_policy;
+ }
pReq = eventPrep();
expandBufAdd1(pReq, suspend_policy);
expandBufAdd4BE(pReq, match_count);
- for (int i = 0; i < match_count; i++) {
+ for (size_t i = 0; i < match_count; i++) {
expandBufAdd1(pReq, match_list[i]->eventKind);
expandBufAdd4BE(pReq, match_list[i]->requestId);
expandBufAdd8BE(pReq, basket.threadId);
@@ -892,6 +907,8 @@
basket.fieldTypeID = typeId;
basket.fieldId = fieldId;
+ DCHECK_EQ(fieldValue != nullptr, is_modification);
+
if (InvokeInProgress()) {
VLOG(jdwp) << "Not posting field event during invoke";
return false;
@@ -912,7 +929,7 @@
return false;
}
- int match_count = 0;
+ size_t match_count = 0;
ExpandBuf* pReq = NULL;
JdwpSuspendPolicy suspend_policy = SP_NONE;
{
@@ -920,24 +937,29 @@
JdwpEvent** match_list = AllocMatchList(event_list_size_);
if (is_modification) {
- FindMatchingEvents(EK_FIELD_MODIFICATION, &basket, match_list, &match_count);
+ FindMatchingEvents(EK_FIELD_MODIFICATION, basket, match_list, &match_count);
} else {
- FindMatchingEvents(EK_FIELD_ACCESS, &basket, match_list, &match_count);
+ FindMatchingEvents(EK_FIELD_ACCESS, basket, match_list, &match_count);
}
if (match_count != 0) {
- VLOG(jdwp) << "EVENT: " << match_list[0]->eventKind << "(" << match_count << " total) "
- << basket.className << "." << Dbg::GetMethodName(pLoc->method_id)
- << StringPrintf(" thread=%#" PRIx64 " dex_pc=%#" PRIx64 ")",
- basket.threadId, pLoc->dex_pc);
-
suspend_policy = scanSuspendPolicy(match_list, match_count);
- VLOG(jdwp) << " suspend_policy=" << suspend_policy;
+
+ if (VLOG_IS_ON(jdwp)) {
+ LogMatchingEventsAndThread(match_list, match_count, basket);
+ VLOG(jdwp) << " location=" << *pLoc;
+ VLOG(jdwp) << StringPrintf(" this=%#" PRIx64, basket.thisPtr);
+ VLOG(jdwp) << StringPrintf(" type=%#" PRIx64, basket.fieldTypeID) << " "
+ << Dbg::GetClassName(basket.fieldTypeID);
+ VLOG(jdwp) << StringPrintf(" field=%#" PRIx32, basket.fieldId) << " "
+ << Dbg::GetFieldName(basket.fieldId);
+ VLOG(jdwp) << " suspend_policy=" << suspend_policy;
+ }
pReq = eventPrep();
expandBufAdd1(pReq, suspend_policy);
expandBufAdd4BE(pReq, match_count);
- for (int i = 0; i < match_count; i++) {
+ for (size_t i = 0; i < match_count; i++) {
expandBufAdd1(pReq, match_list[i]->eventKind);
expandBufAdd4BE(pReq, match_list[i]->requestId);
expandBufAdd8BE(pReq, basket.threadId);
@@ -984,30 +1006,31 @@
ExpandBuf* pReq = NULL;
JdwpSuspendPolicy suspend_policy = SP_NONE;
- int match_count = 0;
+ size_t match_count = 0;
{
// Don't allow the list to be updated while we scan it.
MutexLock mu(Thread::Current(), event_list_lock_);
JdwpEvent** match_list = AllocMatchList(event_list_size_);
if (start) {
- FindMatchingEvents(EK_THREAD_START, &basket, match_list, &match_count);
+ FindMatchingEvents(EK_THREAD_START, basket, match_list, &match_count);
} else {
- FindMatchingEvents(EK_THREAD_DEATH, &basket, match_list, &match_count);
+ FindMatchingEvents(EK_THREAD_DEATH, basket, match_list, &match_count);
}
if (match_count != 0) {
- VLOG(jdwp) << "EVENT: " << match_list[0]->eventKind << "(" << match_count << " total) "
- << StringPrintf("thread=%#" PRIx64, basket.threadId) << ")";
-
suspend_policy = scanSuspendPolicy(match_list, match_count);
- VLOG(jdwp) << " suspend_policy=" << suspend_policy;
+
+ if (VLOG_IS_ON(jdwp)) {
+ LogMatchingEventsAndThread(match_list, match_count, basket);
+ VLOG(jdwp) << " suspend_policy=" << suspend_policy;
+ }
pReq = eventPrep();
expandBufAdd1(pReq, suspend_policy);
expandBufAdd4BE(pReq, match_count);
- for (int i = 0; i < match_count; i++) {
+ for (size_t i = 0; i < match_count; i++) {
expandBufAdd1(pReq, match_list[i]->eventKind);
expandBufAdd4BE(pReq, match_list[i]->requestId);
expandBufAdd8BE(pReq, basket.threadId);
@@ -1072,33 +1095,35 @@
return false;
}
- int match_count = 0;
+ size_t match_count = 0;
ExpandBuf* pReq = NULL;
JdwpSuspendPolicy suspend_policy = SP_NONE;
{
MutexLock mu(Thread::Current(), event_list_lock_);
JdwpEvent** match_list = AllocMatchList(event_list_size_);
- FindMatchingEvents(EK_EXCEPTION, &basket, match_list, &match_count);
+ FindMatchingEvents(EK_EXCEPTION, basket, match_list, &match_count);
if (match_count != 0) {
- VLOG(jdwp) << "EVENT: " << match_list[0]->eventKind << "(" << match_count << " total)"
- << StringPrintf(" thread=%#" PRIx64, basket.threadId)
- << StringPrintf(" exceptId=%#" PRIx64, exceptionId)
- << " caught=" << basket.caught << ")"
- << " throw: " << *pThrowLoc;
- if (pCatchLoc->class_id == 0) {
- VLOG(jdwp) << " catch: (not caught)";
- } else {
- VLOG(jdwp) << " catch: " << *pCatchLoc;
- }
-
suspend_policy = scanSuspendPolicy(match_list, match_count);
- VLOG(jdwp) << " suspend_policy=" << suspend_policy;
+
+ if (VLOG_IS_ON(jdwp)) {
+ LogMatchingEventsAndThread(match_list, match_count, basket);
+ VLOG(jdwp) << " throwLocation=" << *pThrowLoc;
+ if (pCatchLoc->class_id == 0) {
+ VLOG(jdwp) << " catchLocation=uncaught";
+ } else {
+ VLOG(jdwp) << " catchLocation=" << *pCatchLoc;
+ }
+ VLOG(jdwp) << StringPrintf(" this=%#" PRIx64, basket.thisPtr);
+ VLOG(jdwp) << StringPrintf(" exceptionClass=%#" PRIx64, basket.excepClassId) << " "
+ << Dbg::GetClassName(basket.excepClassId);
+ VLOG(jdwp) << " suspend_policy=" << suspend_policy;
+ }
pReq = eventPrep();
expandBufAdd1(pReq, suspend_policy);
expandBufAdd4BE(pReq, match_count);
- for (int i = 0; i < match_count; i++) {
+ for (size_t i = 0; i < match_count; i++) {
expandBufAdd1(pReq, match_list[i]->eventKind);
expandBufAdd4BE(pReq, match_list[i]->requestId);
expandBufAdd8BE(pReq, basket.threadId);
@@ -1142,17 +1167,19 @@
ExpandBuf* pReq = NULL;
JdwpSuspendPolicy suspend_policy = SP_NONE;
- int match_count = 0;
+ size_t match_count = 0;
{
MutexLock mu(Thread::Current(), event_list_lock_);
JdwpEvent** match_list = AllocMatchList(event_list_size_);
- FindMatchingEvents(EK_CLASS_PREPARE, &basket, match_list, &match_count);
+ FindMatchingEvents(EK_CLASS_PREPARE, basket, match_list, &match_count);
if (match_count != 0) {
- VLOG(jdwp) << "EVENT: " << match_list[0]->eventKind << "(" << match_count << " total) "
- << StringPrintf("thread=%#" PRIx64, basket.threadId) << ") " << signature;
-
suspend_policy = scanSuspendPolicy(match_list, match_count);
- VLOG(jdwp) << " suspend_policy=" << suspend_policy;
+
+ if (VLOG_IS_ON(jdwp)) {
+ LogMatchingEventsAndThread(match_list, match_count, basket);
+ VLOG(jdwp) << StringPrintf(" type=%#" PRIx64, basket.classId)<< " " << signature;
+ VLOG(jdwp) << " suspend_policy=" << suspend_policy;
+ }
if (basket.threadId == debug_thread_id_) {
/*
@@ -1171,7 +1198,7 @@
expandBufAdd1(pReq, suspend_policy);
expandBufAdd4BE(pReq, match_count);
- for (int i = 0; i < match_count; i++) {
+ for (size_t i = 0; i < match_count; i++) {
expandBufAdd1(pReq, match_list[i]->eventKind);
expandBufAdd4BE(pReq, match_list[i]->requestId);
expandBufAdd8BE(pReq, basket.threadId);
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index 5606d47..513b409 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -359,8 +359,9 @@
jni_on_load_result_(kPending) {
}
- mirror::Object* GetClassLoader() {
- return class_loader_;
+ mirror::Object* GetClassLoader() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Object** root = &class_loader_;
+ return ReadBarrier::BarrierForRoot<mirror::Object, kWithReadBarrier>(root);
}
std::string GetPath() {
@@ -3160,9 +3161,7 @@
while (UNLIKELY(!allow_new_weak_globals_)) {
weak_globals_add_condition_.WaitHoldingLocks(self);
}
- // The weak globals do need a read barrier as they are weak roots.
- mirror::Object* obj = weak_globals_.Get<kWithReadBarrier>(ref);
- return obj;
+ return weak_globals_.Get(ref);
}
void JavaVMExt::DumpReferenceTables(std::ostream& os) {
diff --git a/runtime/read_barrier.h b/runtime/read_barrier.h
index 451d13c..ed5db4e 100644
--- a/runtime/read_barrier.h
+++ b/runtime/read_barrier.h
@@ -33,11 +33,15 @@
class ReadBarrier {
public:
+ // It's up to the implementation whether the given field gets
+ // updated whereas the return value must be an updated reference.
template <typename MirrorType, ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
ALWAYS_INLINE static MirrorType* Barrier(
mirror::Object* obj, MemberOffset offset, mirror::HeapReference<MirrorType>* ref_addr)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // It's up to the implementation whether the given root gets updated
+ // whereas the return value must be an updated reference.
template <typename MirrorType, ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
ALWAYS_INLINE static MirrorType* BarrierForRoot(MirrorType** root)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/reference_table.cc b/runtime/reference_table.cc
index 11527fa..cd35863 100644
--- a/runtime/reference_table.cc
+++ b/runtime/reference_table.cc
@@ -24,6 +24,7 @@
#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
#include "mirror/string-inl.h"
+#include "read_barrier.h"
#include "thread.h"
#include "utils.h"
@@ -51,7 +52,9 @@
void ReferenceTable::Remove(mirror::Object* obj) {
// We iterate backwards on the assumption that references are LIFO.
for (int i = entries_.size() - 1; i >= 0; --i) {
- if (entries_[i] == obj) {
+ mirror::Object* entry =
+ ReadBarrier::BarrierForRoot<mirror::Object, kWithReadBarrier>(&entries_[i]);
+ if (entry == obj) {
entries_.erase(entries_.begin() + i);
return;
}
@@ -140,12 +143,12 @@
return entries_.size();
}
-void ReferenceTable::Dump(std::ostream& os) const {
+void ReferenceTable::Dump(std::ostream& os) {
os << name_ << " reference table dump:\n";
Dump(os, entries_);
}
-void ReferenceTable::Dump(std::ostream& os, const Table& entries) {
+void ReferenceTable::Dump(std::ostream& os, Table& entries) {
if (entries.empty()) {
os << " (empty)\n";
return;
@@ -160,7 +163,8 @@
}
os << " Last " << (count - first) << " entries (of " << count << "):\n";
for (int idx = count - 1; idx >= first; --idx) {
- mirror::Object* ref = entries[idx];
+ mirror::Object* ref =
+ ReadBarrier::BarrierForRoot<mirror::Object, kWithReadBarrier>(&entries[idx]);
if (ref == NULL) {
continue;
}
@@ -194,7 +198,12 @@
}
// Make a copy of the table and sort it.
- Table sorted_entries(entries.begin(), entries.end());
+ Table sorted_entries;
+ for (size_t i = 0; i < entries.size(); ++i) {
+ mirror::Object* entry =
+ ReadBarrier::BarrierForRoot<mirror::Object, kWithReadBarrier>(&entries[i]);
+ sorted_entries.push_back(entry);
+ }
std::sort(sorted_entries.begin(), sorted_entries.end(), ObjectComparator());
// Remove any uninteresting stuff from the list. The sort moved them all to the end.
diff --git a/runtime/reference_table.h b/runtime/reference_table.h
index 45309c9..1cd0999 100644
--- a/runtime/reference_table.h
+++ b/runtime/reference_table.h
@@ -39,19 +39,19 @@
ReferenceTable(const char* name, size_t initial_size, size_t max_size);
~ReferenceTable();
- void Add(mirror::Object* obj);
+ void Add(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void Remove(mirror::Object* obj);
+ void Remove(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
size_t Size() const;
- void Dump(std::ostream& os) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void Dump(std::ostream& os) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void VisitRoots(RootCallback* visitor, void* arg, uint32_t tid, RootType root_type);
private:
typedef std::vector<mirror::Object*> Table;
- static void Dump(std::ostream& os, const Table& entries)
+ static void Dump(std::ostream& os, Table& entries)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
friend class IndirectReferenceTable; // For Dump.
diff --git a/runtime/scoped_thread_state_change.h b/runtime/scoped_thread_state_change.h
index 7ce68c6..d691623 100644
--- a/runtime/scoped_thread_state_change.h
+++ b/runtime/scoped_thread_state_change.h
@@ -146,7 +146,8 @@
Locks::mutator_lock_->AssertSharedHeld(Self());
DCHECK(IsRunnable()); // Don't work with raw objects in non-runnable states.
CHECK(!kMovingFields);
- return reinterpret_cast<mirror::ArtField*>(fid);
+ mirror::ArtField* field = reinterpret_cast<mirror::ArtField*>(fid);
+ return ReadBarrier::BarrierForRoot<mirror::ArtField, kWithReadBarrier>(&field);
}
jfieldID EncodeField(mirror::ArtField* field) const
@@ -162,7 +163,8 @@
Locks::mutator_lock_->AssertSharedHeld(Self());
DCHECK(IsRunnable()); // Don't work with raw objects in non-runnable states.
CHECK(!kMovingMethods);
- return reinterpret_cast<mirror::ArtMethod*>(mid);
+ mirror::ArtMethod* method = reinterpret_cast<mirror::ArtMethod*>(mid);
+ return ReadBarrier::BarrierForRoot<mirror::ArtMethod, kWithReadBarrier>(&method);
}
jmethodID EncodeMethod(mirror::ArtMethod* method) const
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 3f8f4a3..e5ae6d0 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -1298,9 +1298,7 @@
}
} else if (kind == kGlobal) {
JavaVMExt* const vm = Runtime::Current()->GetJavaVM();
- // Strong global references do not need a read barrier.
- result = vm->globals.SynchronizedGet<kWithoutReadBarrier>(
- const_cast<Thread*>(this), &vm->globals_lock, ref);
+ result = vm->globals.SynchronizedGet(const_cast<Thread*>(this), &vm->globals_lock, ref);
} else {
DCHECK_EQ(kind, kWeakGlobal);
result = Runtime::Current()->GetJavaVM()->DecodeWeakGlobal(const_cast<Thread*>(this), ref);
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index f8e75ea..89cfcdd 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -717,13 +717,28 @@
case Instruction::kVerifySwitchTargets:
result = result && CheckSwitchTargets(code_offset);
break;
+ case Instruction::kVerifyVarArgNonZero:
+ // Fall-through.
case Instruction::kVerifyVarArg: {
+ if (inst->GetVerifyExtraFlags() == Instruction::kVerifyVarArgNonZero && inst->VRegA() <= 0) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid arg count (" << inst->VRegA() << ") in "
+ "non-range invoke";
+ return false;
+ }
uint32_t args[Instruction::kMaxVarArgRegs];
inst->GetVarArgs(args);
result = result && CheckVarArgRegs(inst->VRegA(), args);
break;
}
+ case Instruction::kVerifyVarArgRangeNonZero:
+ // Fall-through.
case Instruction::kVerifyVarArgRange:
+ if (inst->GetVerifyExtraFlags() == Instruction::kVerifyVarArgRangeNonZero &&
+ inst->VRegA() <= 0) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid arg count (" << inst->VRegA() << ") in "
+ "range invoke";
+ return false;
+ }
result = result && CheckVarArgRangeRegs(inst->VRegA(), inst->VRegC());
break;
case Instruction::kVerifyError:
diff --git a/test/302-float-conversion/expected.txt b/test/302-float-conversion/expected.txt
index 7d5c1eb..0423076 100644
--- a/test/302-float-conversion/expected.txt
+++ b/test/302-float-conversion/expected.txt
@@ -1,2 +1,3 @@
Iteration Result is as expected
inter4:2.0
+max_long:9223372036854775807
diff --git a/test/302-float-conversion/src/Main.java b/test/302-float-conversion/src/Main.java
index afc5e97..2733135 100644
--- a/test/302-float-conversion/src/Main.java
+++ b/test/302-float-conversion/src/Main.java
@@ -21,6 +21,7 @@
public static void main(String args[]) {
test1();
test2();
+ test3();
}
public static void test1() {
@@ -55,4 +56,9 @@
System.out.println("inter4:" + inter4);
}
+ public static void test3() {
+ double d = Long.MAX_VALUE;
+ System.out.println("max_long:" + (long)d);
+ }
+
}