summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--compiler/linker/arm/relative_patcher_arm_base.h27
-rw-r--r--compiler/linker/arm64/relative_patcher_arm64.cc60
-rw-r--r--compiler/linker/arm64/relative_patcher_arm64.h13
-rw-r--r--compiler/linker/arm64/relative_patcher_arm64_test.cc144
-rw-r--r--compiler/optimizing/code_generator_arm64.cc90
-rw-r--r--compiler/optimizing/instruction_simplifier_shared.cc1
-rw-r--r--compiler/optimizing/nodes.h14
-rw-r--r--dexlayout/dex_ir.cc6
-rw-r--r--dexlayout/dexlayout.cc102
-rw-r--r--dexlayout/dexlayout.h3
-rw-r--r--dexlayout/dexlayout_test.cc26
-rw-r--r--runtime/arch/context-inl.h55
-rw-r--r--runtime/arch/context.cc35
-rw-r--r--runtime/debugger.cc17
-rw-r--r--runtime/debugger.h3
-rw-r--r--runtime/dex_file_annotations.cc34
-rw-r--r--runtime/dex_file_annotations.h2
-rw-r--r--runtime/hprof/hprof.cc123
-rw-r--r--runtime/jdwp/jdwp_handler.cc15
-rw-r--r--runtime/thread.cc6
-rw-r--r--test/911-get-stack-trace/src/art/PrintThread.java5
-rw-r--r--test/knownfailures.json7
22 files changed, 617 insertions, 171 deletions
diff --git a/compiler/linker/arm/relative_patcher_arm_base.h b/compiler/linker/arm/relative_patcher_arm_base.h
index 2cb1b6c535..47f840fd65 100644
--- a/compiler/linker/arm/relative_patcher_arm_base.h
+++ b/compiler/linker/arm/relative_patcher_arm_base.h
@@ -43,10 +43,11 @@ class ArmBaseRelativePatcher : public RelativePatcher {
enum class ThunkType {
kMethodCall, // Method call thunk.
kBakerReadBarrierField, // Baker read barrier, load field or array element at known offset.
+ kBakerReadBarrierArray, // Baker read barrier, array load with index in register.
kBakerReadBarrierRoot, // Baker read barrier, GC root load.
};
- struct BakerReadBarrierOffsetParams {
+ struct BakerReadBarrierFieldParams {
uint32_t holder_reg; // Holder object for reading lock word.
uint32_t base_reg; // Base register, different from holder for large offset.
// If base differs from holder, it should be a pre-defined
@@ -54,9 +55,16 @@ class ArmBaseRelativePatcher : public RelativePatcher {
// The offset is retrieved using introspection.
};
+ struct BakerReadBarrierArrayParams {
+ uint32_t base_reg; // Reference to the start of the data.
+ uint32_t dummy; // Dummy field.
+ // The index register is retrieved using introspection
+ // to limit the number of thunks we need to emit.
+ };
+
struct BakerReadBarrierRootParams {
uint32_t root_reg; // The register holding the GC root.
- uint32_t dummy;
+ uint32_t dummy; // Dummy field.
};
struct RawThunkParams {
@@ -66,8 +74,12 @@ class ArmBaseRelativePatcher : public RelativePatcher {
union ThunkParams {
RawThunkParams raw_params;
- BakerReadBarrierOffsetParams offset_params;
+ BakerReadBarrierFieldParams field_params;
+ BakerReadBarrierArrayParams array_params;
BakerReadBarrierRootParams root_params;
+ static_assert(sizeof(raw_params) == sizeof(field_params), "field_params size check");
+ static_assert(sizeof(raw_params) == sizeof(array_params), "array_params size check");
+ static_assert(sizeof(raw_params) == sizeof(root_params), "root_params size check");
};
class ThunkKey {
@@ -78,9 +90,14 @@ class ArmBaseRelativePatcher : public RelativePatcher {
return type_;
}
- BakerReadBarrierOffsetParams GetOffsetParams() const {
+ BakerReadBarrierFieldParams GetFieldParams() const {
DCHECK(type_ == ThunkType::kBakerReadBarrierField);
- return params_.offset_params;
+ return params_.field_params;
+ }
+
+ BakerReadBarrierArrayParams GetArrayParams() const {
+ DCHECK(type_ == ThunkType::kBakerReadBarrierArray);
+ return params_.array_params;
}
BakerReadBarrierRootParams GetRootParams() const {
diff --git a/compiler/linker/arm64/relative_patcher_arm64.cc b/compiler/linker/arm64/relative_patcher_arm64.cc
index 551c73b2a4..5c6fb504cf 100644
--- a/compiler/linker/arm64/relative_patcher_arm64.cc
+++ b/compiler/linker/arm64/relative_patcher_arm64.cc
@@ -29,6 +29,7 @@
#include "mirror/array-inl.h"
#include "oat.h"
#include "oat_quick_method_header.h"
+#include "read_barrier.h"
#include "utils/arm64/assembler_arm64.h"
namespace art {
@@ -313,7 +314,17 @@ void Arm64RelativePatcher::PatchBakerReadBarrierBranch(std::vector<uint8_t>* cod
uint32_t next_insn = GetInsn(code, literal_offset + 4u);
// LDR (immediate) with correct base_reg.
CheckValidReg(next_insn & 0x1fu); // Check destination register.
- CHECK_EQ(next_insn & 0xffc003e0u, 0xb9400000u | (key.GetOffsetParams().base_reg << 5));
+ CHECK_EQ(next_insn & 0xffc003e0u, 0xb9400000u | (key.GetFieldParams().base_reg << 5));
+ break;
+ }
+ case ThunkType::kBakerReadBarrierArray: {
+ DCHECK_GE(code->size() - literal_offset, 8u);
+ uint32_t next_insn = GetInsn(code, literal_offset + 4u);
+ // LDR (register) with the correct base_reg, size=10 (32-bit), option=011 (extend = LSL),
+ // and S=1 (shift amount = 2 for 32-bit version), i.e. LDR Wt, [Xn, Xm, LSL #2].
+ CheckValidReg(next_insn & 0x1fu); // Check destination register.
+ CHECK_EQ(next_insn & 0xffe0ffe0u, 0xb8607800u | (key.GetArrayParams().base_reg << 5));
+ CheckValidReg((next_insn >> 16) & 0x1f); // Check index register
break;
}
case ThunkType::kBakerReadBarrierRoot: {
@@ -344,10 +355,16 @@ ArmBaseRelativePatcher::ThunkKey Arm64RelativePatcher::GetBakerReadBarrierKey(
ThunkParams params;
switch (type) {
case BakerReadBarrierKind::kField:
- params.offset_params.base_reg = BakerReadBarrierFirstRegField::Decode(value);
- CheckValidReg(params.offset_params.base_reg);
- params.offset_params.holder_reg = BakerReadBarrierSecondRegField::Decode(value);
- CheckValidReg(params.offset_params.holder_reg);
+ params.field_params.base_reg = BakerReadBarrierFirstRegField::Decode(value);
+ CheckValidReg(params.field_params.base_reg);
+ params.field_params.holder_reg = BakerReadBarrierSecondRegField::Decode(value);
+ CheckValidReg(params.field_params.holder_reg);
+ break;
+ case BakerReadBarrierKind::kArray:
+ params.array_params.base_reg = BakerReadBarrierFirstRegField::Decode(value);
+ CheckValidReg(params.array_params.base_reg);
+ params.array_params.dummy = 0u;
+ DCHECK_EQ(BakerReadBarrierSecondRegField::Decode(value), kInvalidEncodedReg);
break;
case BakerReadBarrierKind::kGcRoot:
params.root_params.root_reg = BakerReadBarrierFirstRegField::Decode(value);
@@ -363,6 +380,9 @@ ArmBaseRelativePatcher::ThunkKey Arm64RelativePatcher::GetBakerReadBarrierKey(
static_assert(static_cast<uint32_t>(BakerReadBarrierKind::kField) + kTypeTranslationOffset ==
static_cast<uint32_t>(ThunkType::kBakerReadBarrierField),
"Thunk type translation check.");
+ static_assert(static_cast<uint32_t>(BakerReadBarrierKind::kArray) + kTypeTranslationOffset ==
+ static_cast<uint32_t>(ThunkType::kBakerReadBarrierArray),
+ "Thunk type translation check.");
static_assert(static_cast<uint32_t>(BakerReadBarrierKind::kGcRoot) + kTypeTranslationOffset ==
static_cast<uint32_t>(ThunkType::kBakerReadBarrierRoot),
"Thunk type translation check.");
@@ -394,7 +414,7 @@ static void EmitGrayCheckAndFastPath(arm64::Arm64Assembler& assembler,
// Introduce a dependency on the lock_word including rb_state,
// to prevent load-load reordering, and without using
// a memory barrier (which would be more expensive).
- __ Add(base_reg, base_reg, Operand(vixl::aarch64::ip0, LSR, 32));
+ __ Add(base_reg, base_reg, Operand(ip0, LSR, 32));
__ Br(lr); // And return back to the function.
// Note: The fake dependency is unnecessary for the slow path.
}
@@ -419,8 +439,8 @@ std::vector<uint8_t> Arm64RelativePatcher::CompileThunk(const ThunkKey& key) {
// and return to the LDR instruction to load the reference. Otherwise, use introspection
// to load the reference and call the entrypoint (in IP1) that performs further checks
// on the reference and marks it if needed.
- auto holder_reg = Register::GetXRegFromCode(key.GetOffsetParams().holder_reg);
- auto base_reg = Register::GetXRegFromCode(key.GetOffsetParams().base_reg);
+ auto holder_reg = Register::GetXRegFromCode(key.GetFieldParams().holder_reg);
+ auto base_reg = Register::GetXRegFromCode(key.GetFieldParams().base_reg);
UseScratchRegisterScope temps(assembler.GetVIXLAssembler());
temps.Exclude(ip0, ip1);
// If base_reg differs from holder_reg, the offset was too large and we must have
@@ -444,11 +464,31 @@ std::vector<uint8_t> Arm64RelativePatcher::CompileThunk(const ThunkKey& key) {
// Add null check slow path. The stack map is at the address pointed to by LR.
__ Bind(&throw_npe);
int32_t offset = GetThreadOffset<kArm64PointerSize>(kQuickThrowNullPointer).Int32Value();
- __ Ldr(ip0, MemOperand(vixl::aarch64::x19, offset));
+ __ Ldr(ip0, MemOperand(/* Thread* */ vixl::aarch64::x19, offset));
__ Br(ip0);
}
break;
}
+ case ThunkType::kBakerReadBarrierArray: {
+ auto base_reg = Register::GetXRegFromCode(key.GetArrayParams().base_reg);
+ UseScratchRegisterScope temps(assembler.GetVIXLAssembler());
+ temps.Exclude(ip0, ip1);
+ vixl::aarch64::Label slow_path;
+ int32_t data_offset =
+ mirror::Array::DataOffset(Primitive::ComponentSize(Primitive::kPrimNot)).Int32Value();
+ MemOperand lock_word(base_reg, mirror::Object::MonitorOffset().Int32Value() - data_offset);
+ DCHECK_LT(lock_word.GetOffset(), 0);
+ EmitGrayCheckAndFastPath(assembler, base_reg, lock_word, &slow_path);
+ __ Bind(&slow_path);
+ MemOperand ldr_address(lr, BAKER_MARK_INTROSPECTION_ARRAY_LDR_OFFSET);
+ __ Ldr(ip0.W(), ldr_address); // Load the LDR (register) unsigned offset.
+ __ Ubfx(ip0, ip0, 16, 6); // Extract the index register, plus 32 (bit 21 is set).
+ __ Bfi(ip1, ip0, 3, 6); // Insert ip0 to the entrypoint address to create
+ // a switch case target based on the index register.
+ __ Mov(ip0, base_reg); // Move the base register to ip0.
+ __ Br(ip1); // Jump to the entrypoint's array switch case.
+ break;
+ }
case ThunkType::kBakerReadBarrierRoot: {
// Check if the reference needs to be marked and if so (i.e. not null, not marked yet
// and it does not have a forwarding address), call the correct introspection entrypoint;
@@ -494,6 +534,7 @@ uint32_t Arm64RelativePatcher::MaxPositiveDisplacement(ThunkType type) {
case ThunkType::kMethodCall:
return kMaxMethodCallPositiveDisplacement;
case ThunkType::kBakerReadBarrierField:
+ case ThunkType::kBakerReadBarrierArray:
case ThunkType::kBakerReadBarrierRoot:
return kMaxBcondPositiveDisplacement;
}
@@ -504,6 +545,7 @@ uint32_t Arm64RelativePatcher::MaxNegativeDisplacement(ThunkType type) {
case ThunkType::kMethodCall:
return kMaxMethodCallNegativeDisplacement;
case ThunkType::kBakerReadBarrierField:
+ case ThunkType::kBakerReadBarrierArray:
case ThunkType::kBakerReadBarrierRoot:
return kMaxBcondNegativeDisplacement;
}
diff --git a/compiler/linker/arm64/relative_patcher_arm64.h b/compiler/linker/arm64/relative_patcher_arm64.h
index 7887cea5e6..71ab70eda9 100644
--- a/compiler/linker/arm64/relative_patcher_arm64.h
+++ b/compiler/linker/arm64/relative_patcher_arm64.h
@@ -19,6 +19,7 @@
#include "base/array_ref.h"
#include "base/bit_field.h"
+#include "base/bit_utils.h"
#include "linker/arm/relative_patcher_arm_base.h"
namespace art {
@@ -28,6 +29,7 @@ class Arm64RelativePatcher FINAL : public ArmBaseRelativePatcher {
public:
enum class BakerReadBarrierKind : uint8_t {
kField, // Field get or array get with constant offset (i.e. constant index).
+ kArray, // Array get with index in register.
kGcRoot, // GC root load.
kLast
};
@@ -40,6 +42,13 @@ class Arm64RelativePatcher FINAL : public ArmBaseRelativePatcher {
BakerReadBarrierSecondRegField::Encode(holder_reg);
}
+ static uint32_t EncodeBakerReadBarrierArrayData(uint32_t base_reg) {
+ CheckValidReg(base_reg);
+ return BakerReadBarrierKindField::Encode(BakerReadBarrierKind::kArray) |
+ BakerReadBarrierFirstRegField::Encode(base_reg) |
+ BakerReadBarrierSecondRegField::Encode(kInvalidEncodedReg);
+ }
+
static uint32_t EncodeBakerReadBarrierGcRootData(uint32_t root_reg) {
CheckValidReg(root_reg);
return BakerReadBarrierKindField::Encode(BakerReadBarrierKind::kGcRoot) |
@@ -68,14 +77,14 @@ class Arm64RelativePatcher FINAL : public ArmBaseRelativePatcher {
uint32_t patch_offset) OVERRIDE;
protected:
- static constexpr uint32_t kInvalidEncodedReg = /* sp/zr is invalid */ 31u;
-
ThunkKey GetBakerReadBarrierKey(const LinkerPatch& patch) OVERRIDE;
std::vector<uint8_t> CompileThunk(const ThunkKey& key) OVERRIDE;
uint32_t MaxPositiveDisplacement(ThunkType type) OVERRIDE;
uint32_t MaxNegativeDisplacement(ThunkType type) OVERRIDE;
private:
+ static constexpr uint32_t kInvalidEncodedReg = /* sp/zr is invalid */ 31u;
+
static constexpr size_t kBitsForBakerReadBarrierKind =
MinimumBitsToStore(static_cast<size_t>(BakerReadBarrierKind::kLast));
static constexpr size_t kBitsForRegister = 5u;
diff --git a/compiler/linker/arm64/relative_patcher_arm64_test.cc b/compiler/linker/arm64/relative_patcher_arm64_test.cc
index b4d35ab2a7..57ea886586 100644
--- a/compiler/linker/arm64/relative_patcher_arm64_test.cc
+++ b/compiler/linker/arm64/relative_patcher_arm64_test.cc
@@ -18,6 +18,7 @@
#include "linker/relative_patcher_test.h"
#include "linker/arm64/relative_patcher_arm64.h"
#include "lock_word.h"
+#include "mirror/array-inl.h"
#include "mirror/object.h"
#include "oat_quick_method_header.h"
@@ -46,9 +47,15 @@ class Arm64RelativePatcherTest : public RelativePatcherTest {
static constexpr uint32_t kBlPlusMax = 0x95ffffffu;
static constexpr uint32_t kBlMinusMax = 0x96000000u;
- // LDR immediate, unsigned offset.
+ // LDR immediate, 32-bit, unsigned offset.
static constexpr uint32_t kLdrWInsn = 0xb9400000u;
+ // LDR register, 32-bit, LSL #2.
+ static constexpr uint32_t kLdrWLsl2Insn = 0xb8607800u;
+
+ // LDUR, 32-bit.
+ static constexpr uint32_t kLdurWInsn = 0xb8400000u;
+
// ADD/ADDS/SUB/SUBS immediate, 64-bit.
static constexpr uint32_t kAddXInsn = 0x91000000u;
static constexpr uint32_t kAddsXInsn = 0xb1000000u;
@@ -68,7 +75,7 @@ class Arm64RelativePatcherTest : public RelativePatcherTest {
static constexpr uint32_t kLdrXSpRelInsn = 0xf94003edu;
// CBNZ x17, +0. Bits 5-23 are a placeholder for target offset from PC in units of 4-bytes.
- static constexpr uint32_t kCbnzIP1Plus0Insn = 0xb5000011;
+ static constexpr uint32_t kCbnzIP1Plus0Insn = 0xb5000011u;
void InsertInsn(std::vector<uint8_t>* code, size_t pos, uint32_t insn) {
CHECK_LE(pos, code->size());
@@ -188,7 +195,7 @@ class Arm64RelativePatcherTest : public RelativePatcherTest {
std::vector<uint8_t> GenNops(size_t num_nops) {
std::vector<uint8_t> result;
- result.reserve(num_nops * 4u + 4u);
+ result.reserve(num_nops * 4u);
for (size_t i = 0; i != num_nops; ++i) {
PushBackInsn(&result, kNopInsn);
}
@@ -228,7 +235,7 @@ class Arm64RelativePatcherTest : public RelativePatcherTest {
} else {
LOG(FATAL) << "Unexpected instruction: 0x" << std::hex << use_insn;
}
- uint32_t adrp = 0x90000000 | // ADRP x0, +SignExtend(immhi:immlo:Zeros(12), 64)
+ uint32_t adrp = 0x90000000u | // ADRP x0, +SignExtend(immhi:immlo:Zeros(12), 64)
((disp & 0x3000u) << (29 - 12)) | // immlo = ((disp & 0x3000u) >> 12) is at bit 29,
((disp & 0xffffc000) >> (14 - 5)) | // immhi = (disp >> 14) is at bit 5,
// We take the sign bit from the disp, limiting disp to +- 2GiB.
@@ -471,6 +478,14 @@ class Arm64RelativePatcherTest : public RelativePatcherTest {
return patcher->CompileThunk(key);
}
+ std::vector<uint8_t> CompileBakerArrayThunk(uint32_t base_reg) {
+ LinkerPatch patch = LinkerPatch::BakerReadBarrierBranchPatch(
+ 0u, Arm64RelativePatcher::EncodeBakerReadBarrierArrayData(base_reg));
+ auto* patcher = down_cast<Arm64RelativePatcher*>(patcher_.get());
+ ArmBaseRelativePatcher::ThunkKey key = patcher->GetBakerReadBarrierKey(patch);
+ return patcher->CompileThunk(key);
+ }
+
std::vector<uint8_t> CompileBakerGcRootThunk(uint32_t root_reg) {
LinkerPatch patch = LinkerPatch::BakerReadBarrierBranchPatch(
0u, Arm64RelativePatcher::EncodeBakerReadBarrierGcRootData(root_reg));
@@ -488,7 +503,7 @@ class Arm64RelativePatcherTest : public RelativePatcherTest {
(static_cast<uint32_t>(output_[offset + 3]) << 24);
}
- void TestBakerField(uint32_t offset, uint32_t root_reg);
+ void TestBakerField(uint32_t offset, uint32_t ref_reg);
};
const uint8_t Arm64RelativePatcherTest::kCallRawCode[] = {
@@ -885,7 +900,7 @@ TEST_FOR_OFFSETS(LDRW_SPREL_ADD_TEST, 0, 4)
TEST_FOR_OFFSETS(LDRX_SPREL_ADD_TEST, 0, 8)
-void Arm64RelativePatcherTest::TestBakerField(uint32_t offset, uint32_t root_reg) {
+void Arm64RelativePatcherTest::TestBakerField(uint32_t offset, uint32_t ref_reg) {
uint32_t valid_regs[] = {
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 18, 19, // IP0 and IP1 are reserved.
@@ -899,7 +914,7 @@ void Arm64RelativePatcherTest::TestBakerField(uint32_t offset, uint32_t root_reg
uint32_t method_idx = 0u;
for (uint32_t base_reg : valid_regs) {
for (uint32_t holder_reg : valid_regs) {
- uint32_t ldr = kLdrWInsn | (offset << (10 - 2)) | (base_reg << 5) | root_reg;
+ uint32_t ldr = kLdrWInsn | (offset << (10 - 2)) | (base_reg << 5) | ref_reg;
const std::vector<uint8_t> raw_code = RawCode({kCbnzIP1Plus0Insn, ldr});
ASSERT_EQ(kMethodCodeSize, raw_code.size());
ArrayRef<const uint8_t> code(raw_code);
@@ -922,7 +937,7 @@ void Arm64RelativePatcherTest::TestBakerField(uint32_t offset, uint32_t root_reg
++method_idx;
uint32_t cbnz_offset = thunk_offset - (GetMethodOffset(method_idx) + kLiteralOffset);
uint32_t cbnz = kCbnzIP1Plus0Insn | (cbnz_offset << (5 - 2));
- uint32_t ldr = kLdrWInsn | (offset << (10 - 2)) | (base_reg << 5) | root_reg;
+ uint32_t ldr = kLdrWInsn | (offset << (10 - 2)) | (base_reg << 5) | ref_reg;
const std::vector<uint8_t> expected_code = RawCode({cbnz, ldr});
ASSERT_EQ(kMethodCodeSize, expected_code.size());
ASSERT_TRUE(
@@ -942,7 +957,7 @@ void Arm64RelativePatcherTest::TestBakerField(uint32_t offset, uint32_t root_reg
if (holder_reg == base_reg) {
// Verify that the null-check CBZ uses the correct register, i.e. holder_reg.
ASSERT_GE(output_.size() - gray_check_offset, 4u);
- ASSERT_EQ(0x34000000 | holder_reg, GetOutputInsn(thunk_offset) & 0xff00001f);
+ ASSERT_EQ(0x34000000u | holder_reg, GetOutputInsn(thunk_offset) & 0xff00001fu);
gray_check_offset +=4u;
}
// Verify that the lock word for gray bit check is loaded from the holder address.
@@ -955,12 +970,12 @@ void Arm64RelativePatcherTest::TestBakerField(uint32_t offset, uint32_t root_reg
/* ip0 */ 16;
EXPECT_EQ(load_lock_word, GetOutputInsn(gray_check_offset));
// Verify the gray bit check.
- const uint32_t check_gray_bit_witout_offset =
- 0x37000000 | (LockWord::kReadBarrierStateShift << 19) | /* ip0 */ 16;
- EXPECT_EQ(check_gray_bit_witout_offset, GetOutputInsn(gray_check_offset + 4u) & 0xfff8001f);
+ const uint32_t check_gray_bit_without_offset =
+ 0x37000000u | (LockWord::kReadBarrierStateShift << 19) | /* ip0 */ 16;
+ EXPECT_EQ(check_gray_bit_without_offset, GetOutputInsn(gray_check_offset + 4u) & 0xfff8001fu);
// Verify the fake dependency.
const uint32_t fake_dependency =
- 0x8b408000 | // ADD Xd, Xn, Xm, LSR 32
+ 0x8b408000u | // ADD Xd, Xn, Xm, LSR 32
(/* ip0 */ 16 << 16) | // Xm = ip0
(base_reg << 5) | // Xn = base_reg
base_reg; // Xd = base_reg
@@ -973,19 +988,19 @@ void Arm64RelativePatcherTest::TestBakerField(uint32_t offset, uint32_t root_reg
}
}
-#define TEST_BAKER_FIELD(offset, root_reg) \
+#define TEST_BAKER_FIELD(offset, ref_reg) \
TEST_F(Arm64RelativePatcherTestDefault, \
- BakerOffset##offset##_##root_reg) { \
- TestBakerField(offset, root_reg); \
+ BakerOffset##offset##_##ref_reg) { \
+ TestBakerField(offset, ref_reg); \
}
-TEST_BAKER_FIELD(/* offset */ 0, /* root_reg */ 0)
-TEST_BAKER_FIELD(/* offset */ 8, /* root_reg */ 15)
-TEST_BAKER_FIELD(/* offset */ 0x3ffc, /* root_reg */ 29)
+TEST_BAKER_FIELD(/* offset */ 0, /* ref_reg */ 0)
+TEST_BAKER_FIELD(/* offset */ 8, /* ref_reg */ 15)
+TEST_BAKER_FIELD(/* offset */ 0x3ffc, /* ref_reg */ 29)
TEST_F(Arm64RelativePatcherTestDefault, BakerOffsetThunkInTheMiddle) {
// One thunk in the middle with maximum distance branches to it from both sides.
- // Use offset = 0, base_reg = 0, root_reg = 0, the LDR is simply `kLdrWInsn`.
+ // Use offset = 0, base_reg = 0, ref_reg = 0, the LDR is simply `kLdrWInsn`.
constexpr uint32_t kLiteralOffset1 = 4;
const std::vector<uint8_t> raw_code1 = RawCode({kNopInsn, kCbnzIP1Plus0Insn, kLdrWInsn});
ArrayRef<const uint8_t> code1(raw_code1);
@@ -1046,7 +1061,7 @@ TEST_F(Arm64RelativePatcherTestDefault, BakerOffsetThunkInTheMiddle) {
TEST_F(Arm64RelativePatcherTestDefault, BakerOffsetThunkBeforeFiller) {
// Based on the first part of BakerOffsetThunkInTheMiddle but the CBNZ is one instruction
// earlier, so the thunk is emitted before the filler.
- // Use offset = 0, base_reg = 0, root_reg = 0, the LDR is simply `kLdrWInsn`.
+ // Use offset = 0, base_reg = 0, ref_reg = 0, the LDR is simply `kLdrWInsn`.
constexpr uint32_t kLiteralOffset1 = 0;
const std::vector<uint8_t> raw_code1 = RawCode({kCbnzIP1Plus0Insn, kLdrWInsn, kNopInsn});
ArrayRef<const uint8_t> code1(raw_code1);
@@ -1076,7 +1091,7 @@ TEST_F(Arm64RelativePatcherTestDefault, BakerOffsetThunkBeforeFiller) {
TEST_F(Arm64RelativePatcherTestDefault, BakerOffsetThunkInTheMiddleUnreachableFromLast) {
// Based on the BakerOffsetThunkInTheMiddle but the CBNZ in the last method is preceded
// by NOP and cannot reach the thunk in the middle, so we emit an extra thunk at the end.
- // Use offset = 0, base_reg = 0, root_reg = 0, the LDR is simply `kLdrWInsn`.
+ // Use offset = 0, base_reg = 0, ref_reg = 0, the LDR is simply `kLdrWInsn`.
constexpr uint32_t kLiteralOffset1 = 4;
const std::vector<uint8_t> raw_code1 = RawCode({kNopInsn, kCbnzIP1Plus0Insn, kLdrWInsn});
ArrayRef<const uint8_t> code1(raw_code1);
@@ -1132,7 +1147,88 @@ TEST_F(Arm64RelativePatcherTestDefault, BakerOffsetThunkInTheMiddleUnreachableFr
ASSERT_TRUE(CheckLinkedMethod(MethodRef(5), ArrayRef<const uint8_t>(expected_code2)));
}
-TEST_F(Arm64RelativePatcherTestDefault, BakerRootGcRoot) {
+TEST_F(Arm64RelativePatcherTestDefault, BakerArray) {
+ uint32_t valid_regs[] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14, 15, 18, 19, // IP0 and IP1 are reserved.
+ 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+ // LR and SP/ZR are reserved.
+ };
+ auto ldr = [](uint32_t base_reg) {
+ uint32_t index_reg = (base_reg == 0u) ? 1u : 0u;
+ uint32_t ref_reg = (base_reg == 2) ? 3u : 2u;
+ return kLdrWLsl2Insn | (index_reg << 16) | (base_reg << 5) | ref_reg;
+ };
+ constexpr size_t kMethodCodeSize = 8u;
+ constexpr size_t kLiteralOffset = 0u;
+ uint32_t method_idx = 0u;
+ for (uint32_t base_reg : valid_regs) {
+ ++method_idx;
+ const std::vector<uint8_t> raw_code = RawCode({kCbnzIP1Plus0Insn, ldr(base_reg)});
+ ASSERT_EQ(kMethodCodeSize, raw_code.size());
+ ArrayRef<const uint8_t> code(raw_code);
+ const LinkerPatch patches[] = {
+ LinkerPatch::BakerReadBarrierBranchPatch(
+ kLiteralOffset, Arm64RelativePatcher::EncodeBakerReadBarrierArrayData(base_reg)),
+ };
+ AddCompiledMethod(MethodRef(method_idx), code, ArrayRef<const LinkerPatch>(patches));
+ }
+ Link();
+
+ // All thunks are at the end.
+ uint32_t thunk_offset = GetMethodOffset(method_idx) + RoundUp(kMethodCodeSize, kArm64Alignment);
+ method_idx = 0u;
+ for (uint32_t base_reg : valid_regs) {
+ ++method_idx;
+ uint32_t cbnz_offset = thunk_offset - (GetMethodOffset(method_idx) + kLiteralOffset);
+ uint32_t cbnz = kCbnzIP1Plus0Insn | (cbnz_offset << (5 - 2));
+ const std::vector<uint8_t> expected_code = RawCode({cbnz, ldr(base_reg)});
+ ASSERT_EQ(kMethodCodeSize, expected_code.size());
+ EXPECT_TRUE(CheckLinkedMethod(MethodRef(method_idx), ArrayRef<const uint8_t>(expected_code)));
+
+ std::vector<uint8_t> expected_thunk = CompileBakerArrayThunk(base_reg);
+ ASSERT_GT(output_.size(), thunk_offset);
+ ASSERT_GE(output_.size() - thunk_offset, expected_thunk.size());
+ ArrayRef<const uint8_t> compiled_thunk(output_.data() + thunk_offset,
+ expected_thunk.size());
+ if (ArrayRef<const uint8_t>(expected_thunk) != compiled_thunk) {
+ DumpDiff(ArrayRef<const uint8_t>(expected_thunk), compiled_thunk);
+ ASSERT_TRUE(false);
+ }
+
+ // Verify that the lock word for gray bit check is loaded from the correct address
+ // before the base_reg which points to the array data.
+ static constexpr size_t kGrayCheckInsns = 5;
+ ASSERT_GE(output_.size() - thunk_offset, 4u * kGrayCheckInsns);
+ int32_t data_offset =
+ mirror::Array::DataOffset(Primitive::ComponentSize(Primitive::kPrimNot)).Int32Value();
+ int32_t offset = mirror::Object::MonitorOffset().Int32Value() - data_offset;
+ ASSERT_LT(offset, 0);
+ const uint32_t load_lock_word =
+ kLdurWInsn |
+ ((offset & 0x1ffu) << 12) |
+ (base_reg << 5) |
+ /* ip0 */ 16;
+ EXPECT_EQ(load_lock_word, GetOutputInsn(thunk_offset));
+ // Verify the gray bit check.
+ const uint32_t check_gray_bit_without_offset =
+ 0x37000000u | (LockWord::kReadBarrierStateShift << 19) | /* ip0 */ 16;
+ EXPECT_EQ(check_gray_bit_without_offset, GetOutputInsn(thunk_offset + 4u) & 0xfff8001fu);
+ // Verify the fake dependency.
+ const uint32_t fake_dependency =
+ 0x8b408000u | // ADD Xd, Xn, Xm, LSR 32
+ (/* ip0 */ 16 << 16) | // Xm = ip0
+ (base_reg << 5) | // Xn = base_reg
+ base_reg; // Xd = base_reg
+ EXPECT_EQ(fake_dependency, GetOutputInsn(thunk_offset + 12u));
+ // Do not check the rest of the implementation.
+
+ // The next thunk follows on the next aligned offset.
+ thunk_offset += RoundUp(expected_thunk.size(), kArm64Alignment);
+ }
+}
+
+TEST_F(Arm64RelativePatcherTestDefault, BakerGcRoot) {
uint32_t valid_regs[] = {
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 18, 19, // IP0 and IP1 are reserved.
@@ -1180,7 +1276,7 @@ TEST_F(Arm64RelativePatcherTestDefault, BakerRootGcRoot) {
// Verify that the fast-path null-check CBZ uses the correct register, i.e. root_reg.
ASSERT_GE(output_.size() - thunk_offset, 4u);
- ASSERT_EQ(0x34000000 | root_reg, GetOutputInsn(thunk_offset) & 0xff00001f);
+ ASSERT_EQ(0x34000000u | root_reg, GetOutputInsn(thunk_offset) & 0xff00001fu);
// Do not check the rest of the implementation.
// The next thunk follows on the next aligned offset.
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 4629c54a17..eee832a732 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -91,6 +91,7 @@ constexpr uint32_t kReferenceLoadMinFarOffset = 16 * KB;
// Flags controlling the use of link-time generated thunks for Baker read barriers.
constexpr bool kBakerReadBarrierLinkTimeThunksEnableForFields = true;
+constexpr bool kBakerReadBarrierLinkTimeThunksEnableForArrays = true;
constexpr bool kBakerReadBarrierLinkTimeThunksEnableForGcRoots = true;
// Some instructions have special requirements for a temporary, for example
@@ -2759,6 +2760,7 @@ void InstructionCodeGeneratorARM64::VisitArrayGet(HArrayGet* instruction) {
// Object ArrayGet with Baker's read barrier case.
// Note that a potential implicit null check is handled in the
// CodeGeneratorARM64::GenerateArrayLoadWithBakerReadBarrier call.
+ DCHECK(!instruction->CanDoImplicitNullCheckOn(instruction->InputAt(0)));
if (index.IsConstant()) {
// Array load with a constant index can be treated as a field load.
offset += Int64ConstantFrom(index) << Primitive::ComponentSizeShift(type);
@@ -2769,12 +2771,12 @@ void InstructionCodeGeneratorARM64::VisitArrayGet(HArrayGet* instruction) {
obj.W(),
offset,
maybe_temp,
- /* needs_null_check */ true,
+ /* needs_null_check */ false,
/* use_load_acquire */ false);
} else {
Register temp = WRegisterFrom(locations->GetTemp(0));
codegen_->GenerateArrayLoadWithBakerReadBarrier(
- instruction, out, obj.W(), offset, index, temp, /* needs_null_check */ true);
+ instruction, out, obj.W(), offset, index, temp, /* needs_null_check */ false);
}
} else {
// General case.
@@ -5928,9 +5930,9 @@ void InstructionCodeGeneratorARM64::GenerateGcRootFieldLoad(
!Runtime::Current()->UseJitCompilation()) {
// Note that we do not actually check the value of `GetIsGcMarking()`
// to decide whether to mark the loaded GC root or not. Instead, we
- // load into `temp` the read barrier mark introspection entrypoint.
- // If `temp` is null, it means that `GetIsGcMarking()` is false, and
- // vice versa.
+ // load into `temp` (actually IP1) the read barrier mark introspection
+ // entrypoint. If `temp` is null, it means that `GetIsGcMarking()` is
+ // false, and vice versa.
//
// We use link-time generated thunks for the slow path. That thunk
// checks the reference and jumps to the entrypoint if needed.
@@ -6054,24 +6056,24 @@ void CodeGeneratorARM64::GenerateFieldLoadWithBakerReadBarrier(HInstruction* ins
!use_load_acquire &&
!Runtime::Current()->UseJitCompilation()) {
// Note that we do not actually check the value of `GetIsGcMarking()`
- // to decide whether to mark the loaded GC root or not. Instead, we
- // load into `temp` the read barrier mark introspection entrypoint.
- // If `temp` is null, it means that `GetIsGcMarking()` is false, and
- // vice versa.
+ // to decide whether to mark the loaded reference or not. Instead, we
+ // load into `temp` (actually IP1) the read barrier mark introspection
+ // entrypoint. If `temp` is null, it means that `GetIsGcMarking()` is
+ // false, and vice versa.
//
// We use link-time generated thunks for the slow path. That thunk checks
// the holder and jumps to the entrypoint if needed. If the holder is not
// gray, it creates a fake dependency and returns to the LDR instruction.
//
// temp = Thread::Current()->pReadBarrierMarkIntrospection
- // lr = &return_address;
+ // lr = &gray_return_address;
// if (temp != nullptr) {
// goto field_thunk<holder_reg, base_reg>(lr)
// }
// not_gray_return_address:
// // Original reference load. If the offset is too large to fit
// // into LDR, we use an adjusted base register here.
- // GcRoot<mirror::Object> root = *(obj+offset);
+ // GcRoot<mirror::Object> reference = *(obj+offset);
// gray_return_address:
DCHECK_ALIGNED(offset, sizeof(mirror::HeapReference<mirror::Object>));
@@ -6141,16 +6143,74 @@ void CodeGeneratorARM64::GenerateArrayLoadWithBakerReadBarrier(HInstruction* ins
DCHECK(kEmitCompilerReadBarrier);
DCHECK(kUseBakerReadBarrier);
+ static_assert(
+ sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t),
+ "art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes.");
+ size_t scale_factor = Primitive::ComponentSizeShift(Primitive::kPrimNot);
+
+ if (kBakerReadBarrierLinkTimeThunksEnableForArrays &&
+ !Runtime::Current()->UseJitCompilation()) {
+ // Note that we do not actually check the value of `GetIsGcMarking()`
+ // to decide whether to mark the loaded reference or not. Instead, we
+ // load into `temp` (actually IP1) the read barrier mark introspection
+ // entrypoint. If `temp` is null, it means that `GetIsGcMarking()` is
+ // false, and vice versa.
+ //
+ // We use link-time generated thunks for the slow path. That thunk checks
+ // the holder and jumps to the entrypoint if needed. If the holder is not
+ // gray, it creates a fake dependency and returns to the LDR instruction.
+ //
+ // temp = Thread::Current()->pReadBarrierMarkIntrospection
+ // lr = &gray_return_address;
+ // if (temp != nullptr) {
+ // goto field_thunk<holder_reg, base_reg>(lr)
+ // }
+ // not_gray_return_address:
+ // // Original reference load. If the offset is too large to fit
+ // // into LDR, we use an adjusted base register here.
+ // GcRoot<mirror::Object> reference = data[index];
+ // gray_return_address:
+
+ DCHECK(index.IsValid());
+ Register index_reg = RegisterFrom(index, Primitive::kPrimInt);
+ Register ref_reg = RegisterFrom(ref, Primitive::kPrimNot);
+
+ UseScratchRegisterScope temps(GetVIXLAssembler());
+ DCHECK(temps.IsAvailable(ip0));
+ DCHECK(temps.IsAvailable(ip1));
+ temps.Exclude(ip0, ip1);
+ uint32_t custom_data =
+ linker::Arm64RelativePatcher::EncodeBakerReadBarrierArrayData(temp.GetCode());
+ vixl::aarch64::Label* cbnz_label = NewBakerReadBarrierPatch(custom_data);
+
+ // ip1 = Thread::Current()->pReadBarrierMarkReg16, i.e. pReadBarrierMarkIntrospection.
+ DCHECK_EQ(ip0.GetCode(), 16u);
+ const int32_t entry_point_offset =
+ CodeGenerator::GetReadBarrierMarkEntryPointsOffset<kArm64PointerSize>(ip0.GetCode());
+ __ Ldr(ip1, MemOperand(tr, entry_point_offset));
+ __ Add(temp.X(), obj.X(), Operand(data_offset));
+ EmissionCheckScope guard(GetVIXLAssembler(),
+ (kPoisonHeapReferences ? 4u : 3u) * vixl::aarch64::kInstructionSize);
+ vixl::aarch64::Label return_address;
+ __ adr(lr, &return_address);
+ __ Bind(cbnz_label);
+ __ cbnz(ip1, static_cast<int64_t>(0)); // Placeholder, patched at link-time.
+ static_assert(BAKER_MARK_INTROSPECTION_ARRAY_LDR_OFFSET == (kPoisonHeapReferences ? -8 : -4),
+ "Array LDR must be 1 instruction (4B) before the return address label; "
+ " 2 instructions (8B) for heap poisoning.");
+ __ ldr(ref_reg, MemOperand(temp.X(), index_reg.X(), LSL, scale_factor));
+ DCHECK(!needs_null_check); // The thunk cannot handle the null check.
+ GetAssembler()->MaybeUnpoisonHeapReference(ref_reg);
+ __ Bind(&return_address);
+ return;
+ }
+
// Array cells are never volatile variables, therefore array loads
// never use Load-Acquire instructions on ARM64.
const bool use_load_acquire = false;
- static_assert(
- sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t),
- "art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes.");
// /* HeapReference<Object> */ ref =
// *(obj + data_offset + index * sizeof(HeapReference<Object>))
- size_t scale_factor = Primitive::ComponentSizeShift(Primitive::kPrimNot);
GenerateReferenceLoadWithBakerReadBarrier(instruction,
ref,
obj,
diff --git a/compiler/optimizing/instruction_simplifier_shared.cc b/compiler/optimizing/instruction_simplifier_shared.cc
index 7d1f146587..c39e5f4d3b 100644
--- a/compiler/optimizing/instruction_simplifier_shared.cc
+++ b/compiler/optimizing/instruction_simplifier_shared.cc
@@ -247,6 +247,7 @@ bool TryExtractArrayAccessAddress(HInstruction* access,
access->GetType() == Primitive::kPrimNot) {
// For object arrays, the read barrier instrumentation requires
// the original array pointer.
+ // TODO: This can be relaxed for Baker CC.
return false;
}
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 8368026e92..36c7df70ce 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -5377,10 +5377,16 @@ class HArrayGet FINAL : public HExpression<2> {
}
bool CanDoImplicitNullCheckOn(HInstruction* obj ATTRIBUTE_UNUSED) const OVERRIDE {
// TODO: We can be smarter here.
- // Currently, the array access is always preceded by an ArrayLength or a NullCheck
- // which generates the implicit null check. There are cases when these can be removed
- // to produce better code. If we ever add optimizations to do so we should allow an
- // implicit check here (as long as the address falls in the first page).
+ // Currently, unless the array is the result of NewArray, the array access is always
+ // preceded by some form of null NullCheck necessary for the bounds check, usually
+ // implicit null check on the ArrayLength input to BoundsCheck or Deoptimize for
+ // dynamic BCE. There are cases when these could be removed to produce better code.
+ // If we ever add optimizations to do so we should allow an implicit check here
+ // (as long as the address falls in the first page).
+ //
+ // As an example of such fancy optimization, we could eliminate BoundsCheck for
+ // a = cond ? new int[1] : null;
+ // a[0]; // The Phi does not need bounds check for either input.
return false;
}
diff --git a/dexlayout/dex_ir.cc b/dexlayout/dex_ir.cc
index 6bd9da8194..3f715cf37f 100644
--- a/dexlayout/dex_ir.cc
+++ b/dexlayout/dex_ir.cc
@@ -708,10 +708,12 @@ MethodItem* Collections::GenerateMethodItem(const DexFile& dex_file, ClassDataIt
MethodId* method_item = GetMethodId(cdii.GetMemberIndex());
uint32_t access_flags = cdii.GetRawMemberAccessFlags();
const DexFile::CodeItem* disk_code_item = cdii.GetMethodCodeItem();
- CodeItem* code_item = nullptr;
+ CodeItem* code_item = code_items_.GetExistingObject(cdii.GetMethodCodeItemOffset());;
DebugInfoItem* debug_info = nullptr;
if (disk_code_item != nullptr) {
- code_item = CreateCodeItem(dex_file, *disk_code_item, cdii.GetMethodCodeItemOffset());
+ if (code_item == nullptr) {
+ code_item = CreateCodeItem(dex_file, *disk_code_item, cdii.GetMethodCodeItemOffset());
+ }
debug_info = code_item->DebugInfo();
}
if (debug_info != nullptr) {
diff --git a/dexlayout/dexlayout.cc b/dexlayout/dexlayout.cc
index 204af22db7..9f7861fd47 100644
--- a/dexlayout/dexlayout.cc
+++ b/dexlayout/dexlayout.cc
@@ -1638,7 +1638,8 @@ void DexLayout::LayoutStringData(const DexFile* dex_file) {
// Orders code items according to specified class data ordering.
// NOTE: If the section following the code items is byte aligned, the last code item is left in
// place to preserve alignment. Layout needs an overhaul to handle movement of other sections.
-int32_t DexLayout::LayoutCodeItems(std::vector<dex_ir::ClassData*> new_class_data_order) {
+int32_t DexLayout::LayoutCodeItems(const DexFile* dex_file,
+ std::vector<dex_ir::ClassData*> new_class_data_order) {
// Do not move code items if class data section precedes code item section.
// ULEB encoding is variable length, causing problems determining the offset of the code items.
// TODO: We should swap the order of these sections in the future to avoid this issue.
@@ -1649,44 +1650,93 @@ int32_t DexLayout::LayoutCodeItems(std::vector<dex_ir::ClassData*> new_class_dat
}
// Find the last code item so we can leave it in place if the next section is not 4 byte aligned.
+ dex_ir::CodeItem* last_code_item = nullptr;
std::unordered_set<dex_ir::CodeItem*> visited_code_items;
bool is_code_item_aligned = IsNextSectionCodeItemAligned(code_item_offset);
if (!is_code_item_aligned) {
- dex_ir::CodeItem* last_code_item = nullptr;
for (auto& code_item_pair : header_->GetCollections().CodeItems()) {
std::unique_ptr<dex_ir::CodeItem>& code_item = code_item_pair.second;
- if (last_code_item == nullptr || last_code_item->GetOffset() < code_item->GetOffset()) {
+ if (last_code_item == nullptr
+ || last_code_item->GetOffset() < code_item->GetOffset()) {
last_code_item = code_item.get();
}
}
- // Preserve the last code item by marking it already visited.
- visited_code_items.insert(last_code_item);
- }
-
- int32_t diff = 0;
- for (dex_ir::ClassData* class_data : new_class_data_order) {
- class_data->SetOffset(class_data->GetOffset() + diff);
- for (auto& method : *class_data->DirectMethods()) {
- dex_ir::CodeItem* code_item = method->GetCodeItem();
- if (code_item != nullptr && visited_code_items.find(code_item) == visited_code_items.end()) {
- visited_code_items.insert(code_item);
- diff += UnsignedLeb128Size(code_item_offset) - UnsignedLeb128Size(code_item->GetOffset());
- code_item->SetOffset(code_item_offset);
- code_item_offset += RoundUp(code_item->GetSize(), kDexCodeItemAlignment);
+ }
+
+ enum CodeItemKind {
+ kMethodNotExecuted = 0,
+ kMethodExecuted = 1,
+ kSize = 2,
+ };
+
+ static constexpr InvokeType invoke_types[] = {
+ kDirect,
+ kVirtual
+ };
+
+ std::unordered_set<dex_ir::CodeItem*> code_items[CodeItemKind::kSize];
+ for (InvokeType invoke_type : invoke_types) {
+ for (std::unique_ptr<dex_ir::ClassDef>& class_def : header_->GetCollections().ClassDefs()) {
+ const bool is_profile_class =
+ info_->ContainsClass(*dex_file, dex::TypeIndex(class_def->ClassType()->GetIndex()));
+
+ // Skip classes that are not defined in this dex file.
+ dex_ir::ClassData* class_data = class_def->GetClassData();
+ if (class_data == nullptr) {
+ continue;
+ }
+ for (auto& method : *(invoke_type == InvokeType::kDirect
+ ? class_data->DirectMethods()
+ : class_data->VirtualMethods())) {
+ const dex_ir::MethodId *method_id = method->GetMethodId();
+ dex_ir::CodeItem *code_item = method->GetCodeItem();
+ if (code_item == last_code_item || code_item == nullptr) {
+ continue;
+ }
+ // Separate executed methods (clinits and profiled methods) from unexecuted methods.
+ // TODO: clinits are executed only once, consider separating them further.
+ const bool is_clinit = is_profile_class &&
+ (method->GetAccessFlags() & kAccConstructor) != 0 &&
+ (method->GetAccessFlags() & kAccStatic) != 0;
+ const bool is_method_executed = is_clinit ||
+ info_->ContainsMethod(MethodReference(dex_file, method_id->GetIndex()));
+ code_items[is_method_executed
+ ? CodeItemKind::kMethodExecuted
+ : CodeItemKind::kMethodNotExecuted]
+ .insert(code_item);
}
}
- for (auto& method : *class_data->VirtualMethods()) {
- dex_ir::CodeItem* code_item = method->GetCodeItem();
- if (code_item != nullptr && visited_code_items.find(code_item) == visited_code_items.end()) {
- visited_code_items.insert(code_item);
- diff += UnsignedLeb128Size(code_item_offset) - UnsignedLeb128Size(code_item->GetOffset());
- code_item->SetOffset(code_item_offset);
- code_item_offset += RoundUp(code_item->GetSize(), kDexCodeItemAlignment);
+ }
+
+ // total_diff includes diffs generated by both executed and non-executed methods.
+ int32_t total_diff = 0;
+ // The relative placement has no effect on correctness; it is used to ensure
+ // the layout is deterministic
+ for (std::unordered_set<dex_ir::CodeItem*>& code_items_set : code_items) {
+ // diff is reset for executed and non-executed methods.
+ int32_t diff = 0;
+ for (dex_ir::ClassData* data : new_class_data_order) {
+ data->SetOffset(data->GetOffset() + diff);
+ for (InvokeType invoke_type : invoke_types) {
+ for (auto &method : *(invoke_type == InvokeType::kDirect
+ ? data->DirectMethods()
+ : data->VirtualMethods())) {
+ dex_ir::CodeItem* code_item = method->GetCodeItem();
+ if (code_item != nullptr &&
+ code_items_set.find(code_item) != code_items_set.end()) {
+ diff += UnsignedLeb128Size(code_item_offset)
+ - UnsignedLeb128Size(code_item->GetOffset());
+ code_item->SetOffset(code_item_offset);
+ code_item_offset +=
+ RoundUp(code_item->GetSize(), kDexCodeItemAlignment);
+ }
+ }
}
}
+ total_diff += diff;
}
// Adjust diff to be 4-byte aligned.
- return RoundUp(diff, kDexCodeItemAlignment);
+ return RoundUp(total_diff, kDexCodeItemAlignment);
}
bool DexLayout::IsNextSectionCodeItemAligned(uint32_t offset) {
@@ -1795,7 +1845,7 @@ void DexLayout::FixupSections(uint32_t offset, uint32_t diff) {
void DexLayout::LayoutOutputFile(const DexFile* dex_file) {
LayoutStringData(dex_file);
std::vector<dex_ir::ClassData*> new_class_data_order = LayoutClassDefsAndClassData(dex_file);
- int32_t diff = LayoutCodeItems(new_class_data_order);
+ int32_t diff = LayoutCodeItems(dex_file, new_class_data_order);
// Move sections after ClassData by diff bytes.
FixupSections(header_->GetCollections().ClassDatasOffset(), diff);
// Update file size.
diff --git a/dexlayout/dexlayout.h b/dexlayout/dexlayout.h
index 69117ad763..531bc98a0c 100644
--- a/dexlayout/dexlayout.h
+++ b/dexlayout/dexlayout.h
@@ -108,7 +108,8 @@ class DexLayout {
void DumpDexFile();
std::vector<dex_ir::ClassData*> LayoutClassDefsAndClassData(const DexFile* dex_file);
- int32_t LayoutCodeItems(std::vector<dex_ir::ClassData*> new_class_data_order);
+ int32_t LayoutCodeItems(const DexFile* dex_file,
+ std::vector<dex_ir::ClassData*> new_class_data_order);
void LayoutStringData(const DexFile* dex_file);
bool IsNextSectionCodeItemAligned(uint32_t offset);
template<class T> void FixupSection(std::map<uint32_t, std::unique_ptr<T>>& map, uint32_t diff);
diff --git a/dexlayout/dexlayout_test.cc b/dexlayout/dexlayout_test.cc
index e988aac86f..877ea923fc 100644
--- a/dexlayout/dexlayout_test.cc
+++ b/dexlayout/dexlayout_test.cc
@@ -205,6 +205,19 @@ static const char kUnknownTypeDebugInfoInputDex[] =
"AAIAAAAEAAAAkAAAAAMAAAACAAAAoAAAAAUAAAADAAAAuAAAAAYAAAABAAAA0AAAAAEgAAACAAAA"
"8AAAAAIgAAAIAAAAHAEAAAMgAAACAAAAVAEAAAAgAAABAAAAYwEAAAAQAAABAAAAdAEAAA==";
+// Dex file with multiple class data items pointing to the same code item.
+// Constructed by hex editing.
+static const char kDuplicateCodeItemInputDex[] =
+ "ZGV4CjAzNQCwKtVglQOmLWuHwldN5jkBOInC7mTMhJMAAgAAcAAAAHhWNBIAAAAAAAAAAHgBAAAH"
+ "AAAAcAAAAAMAAACMAAAAAQAAAJgAAAAAAAAAAAAAAAQAAACkAAAAAQAAAMQAAAAcAQAA5AAAACQB"
+ "AAAsAQAANAEAADkBAABNAQAAUAEAAFMBAAACAAAAAwAAAAQAAAAEAAAAAgAAAAAAAAAAAAAAAAAA"
+ "AAAAAAAFAAAAAAAAAAYAAAABAAAAAAAAAAAAAAABAAAAAQAAAAAAAAABAAAAAAAAAGUBAAAAAAAA"
+ "AQABAAEAAABWAQAABAAAAHAQAwAAAA4AAQABAAAAAABbAQAAAQAAAA4AAAABAAEAAAAAAGABAAAB"
+ "AAAADgAAAAY8aW5pdD4ABkEuamF2YQADTEE7ABJMamF2YS9sYW5nL09iamVjdDsAAVYAAWEAAWIA"
+ "AQAHDgADAAcOAAUABw4AAAABAgCBgATkAQEA/AEBAPwBAAsAAAAAAAAAAQAAAAAAAAABAAAABwAA"
+ "AHAAAAACAAAAAwAAAIwAAAADAAAAAQAAAJgAAAAFAAAABAAAAKQAAAAGAAAAAQAAAMQAAAABIAAA"
+ "AwAAAOQAAAACIAAABwAAACQBAAADIAAAAwAAAFYBAAAAIAAAAQAAAGUBAAAAEAAAAQAAAHgBAAA=";
+
static void WriteBase64ToFile(const char* base64, File* file) {
// Decode base64.
CHECK(base64 != nullptr);
@@ -519,4 +532,17 @@ TEST_F(DexLayoutTest, UnknownTypeDebugInfo) {
dexlayout_exec_argv));
}
+TEST_F(DexLayoutTest, DuplicateCodeItem) {
+ ScratchFile temp_dex;
+ std::string dexlayout = GetTestAndroidRoot() + "/bin/dexlayout";
+ EXPECT_TRUE(OS::FileExists(dexlayout.c_str())) << dexlayout << " should be a valid file path";
+ std::vector<std::string> dexlayout_exec_argv =
+ { dexlayout, "-o", "/dev/null", temp_dex.GetFilename() };
+ ASSERT_TRUE(DexLayoutExec(&temp_dex,
+ kDuplicateCodeItemInputDex,
+ nullptr /* profile_file */,
+ nullptr /* profile_filename */,
+ dexlayout_exec_argv));
+}
+
} // namespace art
diff --git a/runtime/arch/context-inl.h b/runtime/arch/context-inl.h
new file mode 100644
index 0000000000..ddcbbb18e5
--- /dev/null
+++ b/runtime/arch/context-inl.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// This file is special-purpose for cases where you want a stack context. Most users should use
+// Context::Create().
+
+#include "context.h"
+
+#ifndef ART_RUNTIME_ARCH_CONTEXT_INL_H_
+#define ART_RUNTIME_ARCH_CONTEXT_INL_H_
+
+#if defined(__arm__)
+#include "arm/context_arm.h"
+#define RUNTIME_CONTEXT_TYPE arm::ArmContext
+#elif defined(__aarch64__)
+#include "arm64/context_arm64.h"
+#define RUNTIME_CONTEXT_TYPE arm64::Arm64Context
+#elif defined(__mips__) && !defined(__LP64__)
+#include "mips/context_mips.h"
+#define RUNTIME_CONTEXT_TYPE mips::MipsContext
+#elif defined(__mips__) && defined(__LP64__)
+#include "mips64/context_mips64.h"
+#define RUNTIME_CONTEXT_TYPE mips64::Mips64Context
+#elif defined(__i386__)
+#include "x86/context_x86.h"
+#define RUNTIME_CONTEXT_TYPE x86::X86Context
+#elif defined(__x86_64__)
+#include "x86_64/context_x86_64.h"
+#define RUNTIME_CONTEXT_TYPE x86_64::X86_64Context
+#else
+#error unimplemented
+#endif
+
+namespace art {
+
+using RuntimeContextType = RUNTIME_CONTEXT_TYPE;
+
+} // namespace art
+
+#undef RUNTIME_CONTEXT_TYPE
+
+#endif // ART_RUNTIME_ARCH_CONTEXT_INL_H_
diff --git a/runtime/arch/context.cc b/runtime/arch/context.cc
index bf40a3f8ce..82d8b6ca00 100644
--- a/runtime/arch/context.cc
+++ b/runtime/arch/context.cc
@@ -14,43 +14,12 @@
* limitations under the License.
*/
-#include "context.h"
-
-#if defined(__arm__)
-#include "arm/context_arm.h"
-#elif defined(__aarch64__)
-#include "arm64/context_arm64.h"
-#elif defined(__mips__) && !defined(__LP64__)
-#include "mips/context_mips.h"
-#elif defined(__mips__) && defined(__LP64__)
-#include "mips64/context_mips64.h"
-#elif defined(__i386__)
-#include "x86/context_x86.h"
-#elif defined(__x86_64__)
-#include "x86_64/context_x86_64.h"
-#else
-#include "base/logging.h"
-#endif
+#include "context-inl.h"
namespace art {
Context* Context::Create() {
-#if defined(__arm__)
- return new arm::ArmContext();
-#elif defined(__aarch64__)
- return new arm64::Arm64Context();
-#elif defined(__mips__) && !defined(__LP64__)
- return new mips::MipsContext();
-#elif defined(__mips__) && defined(__LP64__)
- return new mips64::Mips64Context();
-#elif defined(__i386__)
- return new x86::X86Context();
-#elif defined(__x86_64__)
- return new x86_64::X86_64Context();
-#else
- UNIMPLEMENTED(FATAL);
- return nullptr;
-#endif
+ return new RuntimeContextType;
}
} // namespace art
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 63794bff6f..d0b50fe820 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -1092,6 +1092,23 @@ JDWP::JdwpError Dbg::GetSignature(JDWP::RefTypeId class_id, std::string* signatu
return JDWP::ERR_NONE;
}
+JDWP::JdwpError Dbg::GetSourceDebugExtension(JDWP::RefTypeId class_id,
+ std::string* extension_data) {
+ JDWP::JdwpError error;
+ mirror::Class* c = DecodeClass(class_id, &error);
+ if (c == nullptr) {
+ return error;
+ }
+ StackHandleScope<1> hs(Thread::Current());
+ Handle<mirror::Class> klass(hs.NewHandle(c));
+ const char* data = annotations::GetSourceDebugExtension(klass);
+ if (data == nullptr) {
+ return JDWP::ERR_ABSENT_INFORMATION;
+ }
+ *extension_data = data;
+ return JDWP::ERR_NONE;
+}
+
JDWP::JdwpError Dbg::GetSourceFile(JDWP::RefTypeId class_id, std::string* result) {
JDWP::JdwpError error;
mirror::Class* c = DecodeClass(class_id, &error);
diff --git a/runtime/debugger.h b/runtime/debugger.h
index 27124e19fb..4f3ff40e86 100644
--- a/runtime/debugger.h
+++ b/runtime/debugger.h
@@ -288,6 +288,9 @@ class Dbg {
REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError GetSignature(JDWP::RefTypeId ref_type_id, std::string* signature)
REQUIRES_SHARED(Locks::mutator_lock_);
+ static JDWP::JdwpError GetSourceDebugExtension(JDWP::RefTypeId ref_type_id,
+ std::string* extension_data)
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError GetSourceFile(JDWP::RefTypeId ref_type_id, std::string* source_file)
REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError GetObjectTag(JDWP::ObjectId object_id, uint8_t* tag)
diff --git a/runtime/dex_file_annotations.cc b/runtime/dex_file_annotations.cc
index 6b9654dc49..7d56bca6ce 100644
--- a/runtime/dex_file_annotations.cc
+++ b/runtime/dex_file_annotations.cc
@@ -1420,6 +1420,40 @@ mirror::ObjectArray<mirror::String>* GetSignatureAnnotationForClass(Handle<mirro
return GetSignatureValue(data, annotation_set);
}
+const char* GetSourceDebugExtension(Handle<mirror::Class> klass) {
+ ClassData data(klass);
+ const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
+ if (annotation_set == nullptr) {
+ return nullptr;
+ }
+ const DexFile::AnnotationItem* annotation_item = SearchAnnotationSet(
+ data.GetDexFile(),
+ annotation_set,
+ "Ldalvik/annotation/SourceDebugExtension;",
+ DexFile::kDexVisibilitySystem);
+ if (annotation_item == nullptr) {
+ return nullptr;
+ }
+ const uint8_t* annotation =
+ SearchEncodedAnnotation(data.GetDexFile(), annotation_item->annotation_, "value");
+ if (annotation == nullptr) {
+ return nullptr;
+ }
+ DexFile::AnnotationValue annotation_value;
+ if (!ProcessAnnotationValue<false>(data,
+ &annotation,
+ &annotation_value,
+ ScopedNullHandle<mirror::Class>(),
+ DexFile::kAllRaw)) {
+ return nullptr;
+ }
+ if (annotation_value.type_ != DexFile::kDexAnnotationString) {
+ return nullptr;
+ }
+ dex::StringIndex index(static_cast<uint32_t>(annotation_value.value_.GetI()));
+ return data.GetDexFile().StringDataByIdx(index);
+}
+
bool IsClassAnnotationPresent(Handle<mirror::Class> klass, Handle<mirror::Class> annotation_class) {
ClassData data(klass);
const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
diff --git a/runtime/dex_file_annotations.h b/runtime/dex_file_annotations.h
index c66c5bdb8b..651c9844eb 100644
--- a/runtime/dex_file_annotations.h
+++ b/runtime/dex_file_annotations.h
@@ -89,6 +89,8 @@ bool GetInnerClassFlags(Handle<mirror::Class> klass, uint32_t* flags)
REQUIRES_SHARED(Locks::mutator_lock_);
mirror::ObjectArray<mirror::String>* GetSignatureAnnotationForClass(Handle<mirror::Class> klass)
REQUIRES_SHARED(Locks::mutator_lock_);
+const char* GetSourceDebugExtension(Handle<mirror::Class> klass)
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool IsClassAnnotationPresent(Handle<mirror::Class> klass,
Handle<mirror::Class> annotation_class)
REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/hprof/hprof.cc b/runtime/hprof/hprof.cc
index 4f390fd30a..8bdf6b1f50 100644
--- a/runtime/hprof/hprof.cc
+++ b/runtime/hprof/hprof.cc
@@ -34,7 +34,6 @@
#include <time.h>
#include <time.h>
#include <unistd.h>
-
#include <set>
#include "android-base/stringprintf.h"
@@ -502,9 +501,16 @@ class Hprof : public SingleRootVisitor {
void DumpHeapArray(mirror::Array* obj, mirror::Class* klass)
REQUIRES_SHARED(Locks::mutator_lock_);
- void DumpHeapInstanceObject(mirror::Object* obj, mirror::Class* klass)
+ void DumpFakeObjectArray(mirror::Object* obj, const std::set<mirror::Object*>& elements)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
+ void DumpHeapInstanceObject(mirror::Object* obj,
+ mirror::Class* klass,
+ const std::set<mirror::Object*>& fake_roots)
REQUIRES_SHARED(Locks::mutator_lock_);
+ bool AddRuntimeInternalObjectsField(mirror::Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
+
void ProcessHeap(bool header_first)
REQUIRES(Locks::mutator_lock_) {
// Reset current heap and object count.
@@ -1062,37 +1068,17 @@ void Hprof::MarkRootObject(const mirror::Object* obj, jobject jni_obj, HprofHeap
++objects_in_segment_;
}
-// Use for visiting the GcRoots held live by ArtFields, ArtMethods, and ClassLoaders.
-class GcRootVisitor {
- public:
- explicit GcRootVisitor(Hprof* hprof) : hprof_(hprof) {}
-
- void operator()(mirror::Object* obj ATTRIBUTE_UNUSED,
- MemberOffset offset ATTRIBUTE_UNUSED,
- bool is_static ATTRIBUTE_UNUSED) const {}
-
- // Note that these don't have read barriers. Its OK however since the GC is guaranteed to not be
- // running during the hprof dumping process.
- void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
- REQUIRES_SHARED(Locks::mutator_lock_) {
- if (!root->IsNull()) {
- VisitRoot(root);
- }
+bool Hprof::AddRuntimeInternalObjectsField(mirror::Class* klass) {
+ if (klass->IsDexCacheClass()) {
+ return true;
}
-
- void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
- REQUIRES_SHARED(Locks::mutator_lock_) {
- mirror::Object* obj = root->AsMirrorPtr();
- // The two cases are either classes or dex cache arrays. If it is a dex cache array, then use
- // VM internal. Otherwise the object is a declaring class of an ArtField or ArtMethod or a
- // class from a ClassLoader.
- hprof_->VisitRoot(obj, RootInfo(obj->IsClass() ? kRootStickyClass : kRootVMInternal));
+ // IsClassLoaderClass is true for subclasses of classloader but we only want to add the fake
+ // field to the java.lang.ClassLoader class.
+ if (klass->IsClassLoaderClass() && klass->GetSuperClass()->IsObjectClass()) {
+ return true;
}
-
-
- private:
- Hprof* const hprof_;
-};
+ return false;
+}
void Hprof::DumpHeapObject(mirror::Object* obj) {
// Ignore classes that are retired.
@@ -1103,8 +1089,41 @@ void Hprof::DumpHeapObject(mirror::Object* obj) {
++total_objects_;
- GcRootVisitor visitor(this);
- obj->VisitReferences(visitor, VoidFunctor());
+ class RootCollector {
+ public:
+ explicit RootCollector() {}
+
+ void operator()(mirror::Object*, MemberOffset, bool) const {}
+
+ // Note that these don't have read barriers. Its OK however since the GC is guaranteed to not be
+ // running during the hprof dumping process.
+ void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (!root->IsNull()) {
+ VisitRoot(root);
+ }
+ }
+
+ void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ roots_.insert(root->AsMirrorPtr());
+ }
+
+ const std::set<mirror::Object*>& GetRoots() const {
+ return roots_;
+ }
+
+ private:
+ // These roots are actually live from the object. Avoid marking them as roots in hprof to make
+ // it easier to debug class unloading.
+ mutable std::set<mirror::Object*> roots_;
+ };
+
+ RootCollector visitor;
+ // Collect all native roots.
+ if (!obj->IsClass()) {
+ obj->VisitReferences(visitor, VoidFunctor());
+ }
gc::Heap* const heap = Runtime::Current()->GetHeap();
const gc::space::ContinuousSpace* const space = heap->FindContinuousSpaceFromObject(obj, true);
@@ -1112,15 +1131,18 @@ void Hprof::DumpHeapObject(mirror::Object* obj) {
if (space != nullptr) {
if (space->IsZygoteSpace()) {
heap_type = HPROF_HEAP_ZYGOTE;
+ VisitRoot(obj, RootInfo(kRootVMInternal));
} else if (space->IsImageSpace() && heap->ObjectIsInBootImageSpace(obj)) {
// Only count objects in the boot image as HPROF_HEAP_IMAGE, this leaves app image objects as
// HPROF_HEAP_APP. b/35762934
heap_type = HPROF_HEAP_IMAGE;
+ VisitRoot(obj, RootInfo(kRootVMInternal));
}
} else {
const auto* los = heap->GetLargeObjectsSpace();
if (los->Contains(obj) && los->IsZygoteLargeObject(Thread::Current(), obj)) {
heap_type = HPROF_HEAP_ZYGOTE;
+ VisitRoot(obj, RootInfo(kRootVMInternal));
}
}
CheckHeapSegmentConstraints();
@@ -1164,7 +1186,7 @@ void Hprof::DumpHeapObject(mirror::Object* obj) {
} else if (c->IsArrayClass()) {
DumpHeapArray(obj->AsArray(), c);
} else {
- DumpHeapInstanceObject(obj, c);
+ DumpHeapInstanceObject(obj, c, visitor.GetRoots());
}
}
@@ -1269,7 +1291,10 @@ void Hprof::DumpHeapClass(mirror::Class* klass) {
// Instance fields for this class (no superclass fields)
int iFieldCount = klass->NumInstanceFields();
- if (klass->IsStringClass()) {
+ // add_internal_runtime_objects is only for classes that may retain objects live through means
+ // other than fields. It is never the case for strings.
+ const bool add_internal_runtime_objects = AddRuntimeInternalObjectsField(klass);
+ if (klass->IsStringClass() || add_internal_runtime_objects) {
__ AddU2((uint16_t)iFieldCount + 1);
} else {
__ AddU2((uint16_t)iFieldCount);
@@ -1284,6 +1309,21 @@ void Hprof::DumpHeapClass(mirror::Class* klass) {
if (klass->IsStringClass()) {
__ AddStringId(LookupStringId("value"));
__ AddU1(hprof_basic_object);
+ } else if (add_internal_runtime_objects) {
+ __ AddStringId(LookupStringId("runtimeInternalObjects"));
+ __ AddU1(hprof_basic_object);
+ }
+}
+
+void Hprof::DumpFakeObjectArray(mirror::Object* obj, const std::set<mirror::Object*>& elements) {
+ __ AddU1(HPROF_OBJECT_ARRAY_DUMP);
+ __ AddObjectId(obj);
+ __ AddStackTraceSerialNumber(LookupStackTraceSerialNumber(obj));
+ __ AddU4(elements.size());
+ __ AddClassId(LookupClassId(
+ Runtime::Current()->GetClassLinker()->GetClassRoot(ClassLinker::kObjectArrayClass)));
+ for (mirror::Object* e : elements) {
+ __ AddObjectId(e);
}
}
@@ -1327,7 +1367,9 @@ void Hprof::DumpHeapArray(mirror::Array* obj, mirror::Class* klass) {
}
}
-void Hprof::DumpHeapInstanceObject(mirror::Object* obj, mirror::Class* klass) {
+void Hprof::DumpHeapInstanceObject(mirror::Object* obj,
+ mirror::Class* klass,
+ const std::set<mirror::Object*>& fake_roots) {
// obj is an instance object.
__ AddU1(HPROF_INSTANCE_DUMP);
__ AddObjectId(obj);
@@ -1341,6 +1383,7 @@ void Hprof::DumpHeapInstanceObject(mirror::Object* obj, mirror::Class* klass) {
// What we will use for the string value if the object is a string.
mirror::Object* string_value = nullptr;
+ mirror::Object* fake_object_array = nullptr;
// Write the instance data; fields for this class, followed by super class fields, and so on.
do {
@@ -1396,8 +1439,12 @@ void Hprof::DumpHeapInstanceObject(mirror::Object* obj, mirror::Class* klass) {
}
}
__ AddObjectId(string_value);
+ } else if (AddRuntimeInternalObjectsField(klass)) {
+ // We need an id that is guaranteed to not be used, use 1/2 of the object alignment.
+ fake_object_array = reinterpret_cast<mirror::Object*>(
+ reinterpret_cast<uintptr_t>(obj) + kObjectAlignment / 2);
+ __ AddObjectId(fake_object_array);
}
-
klass = klass->GetSuperClass();
} while (klass != nullptr);
@@ -1419,6 +1466,8 @@ void Hprof::DumpHeapInstanceObject(mirror::Object* obj, mirror::Class* klass) {
__ AddU1(hprof_basic_char);
__ AddU2List(s->GetValue(), s->GetLength());
}
+ } else if (fake_object_array != nullptr) {
+ DumpFakeObjectArray(fake_object_array, fake_roots);
}
}
diff --git a/runtime/jdwp/jdwp_handler.cc b/runtime/jdwp/jdwp_handler.cc
index 971d03958c..e8a9904dc6 100644
--- a/runtime/jdwp/jdwp_handler.cc
+++ b/runtime/jdwp/jdwp_handler.cc
@@ -335,7 +335,7 @@ static JdwpError VM_CapabilitiesNew(JdwpState*, Request* request, ExpandBuf* rep
expandBufAdd1(reply, false); // canUnrestrictedlyRedefineClasses
expandBufAdd1(reply, false); // canPopFrames
expandBufAdd1(reply, true); // canUseInstanceFilters
- expandBufAdd1(reply, false); // canGetSourceDebugExtension
+ expandBufAdd1(reply, true); // canGetSourceDebugExtension
expandBufAdd1(reply, false); // canRequestVMDeathEvent
expandBufAdd1(reply, false); // canSetDefaultStratum
expandBufAdd1(reply, true); // 1.6: canGetInstanceInfo
@@ -499,13 +499,18 @@ static JdwpError RT_ClassObject(JdwpState*, Request* request, ExpandBuf* pReply)
/*
* Returns the value of the SourceDebugExtension attribute.
- *
- * JDB seems interested, but DEX files don't currently support this.
*/
-static JdwpError RT_SourceDebugExtension(JdwpState*, Request*, ExpandBuf*)
+static JdwpError RT_SourceDebugExtension(JdwpState*, Request* request, ExpandBuf* pReply)
REQUIRES_SHARED(Locks::mutator_lock_) {
/* referenceTypeId in, string out */
- return ERR_ABSENT_INFORMATION;
+ RefTypeId refTypeId = request->ReadRefTypeId();
+ std::string extension_data;
+ JdwpError status = Dbg::GetSourceDebugExtension(refTypeId, &extension_data);
+ if (status != ERR_NONE) {
+ return status;
+ }
+ expandBufAddUtf8String(pReply, extension_data);
+ return ERR_NONE;
}
static JdwpError RT_Signature(JdwpState*, Request* request, ExpandBuf* pReply, bool with_generic)
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 201701a510..62a616b646 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -35,6 +35,7 @@
#include "android-base/stringprintf.h"
#include "arch/context.h"
+#include "arch/context-inl.h"
#include "art_field-inl.h"
#include "art_method-inl.h"
#include "base/bit_utils.h"
@@ -3413,11 +3414,10 @@ void Thread::VisitRoots(RootVisitor* visitor) {
verifier->VisitRoots(visitor, RootInfo(kRootNativeStack, thread_id));
}
// Visit roots on this thread's stack
- Context* context = GetLongJumpContext();
+ RuntimeContextType context;
RootCallbackVisitor visitor_to_callback(visitor, thread_id);
- ReferenceMapVisitor<RootCallbackVisitor, kPrecise> mapper(this, context, visitor_to_callback);
+ ReferenceMapVisitor<RootCallbackVisitor, kPrecise> mapper(this, &context, visitor_to_callback);
mapper.template WalkStack<StackVisitor::CountTransitions::kNo>(false);
- ReleaseLongJumpContext(context);
for (instrumentation::InstrumentationStackFrame& frame : *GetInstrumentationStack()) {
visitor->VisitRootIfNonNull(&frame.this_object_, RootInfo(kRootVMInternal, thread_id));
}
diff --git a/test/911-get-stack-trace/src/art/PrintThread.java b/test/911-get-stack-trace/src/art/PrintThread.java
index f50a66b963..fee5ba00ab 100644
--- a/test/911-get-stack-trace/src/art/PrintThread.java
+++ b/test/911-get-stack-trace/src/art/PrintThread.java
@@ -41,7 +41,8 @@ public class PrintThread {
// We have to ignore some threads when printing all stack traces. These are threads that may or
// may not exist depending on the environment.
public final static String IGNORE_THREAD_NAME_REGEX =
- "Binder:|RenderThread|hwuiTask|Jit thread pool worker|Instr:|JDWP|Profile Saver|main";
+ "Binder:|RenderThread|hwuiTask|Jit thread pool worker|Instr:|JDWP|Profile Saver|main|" +
+ "queued-work-looper";
public final static Matcher IGNORE_THREADS =
Pattern.compile(IGNORE_THREAD_NAME_REGEX).matcher("");
@@ -88,4 +89,4 @@ public class PrintThread {
}
public static native String[][] getStackTrace(Thread thread, int start, int max);
-} \ No newline at end of file
+}
diff --git a/test/knownfailures.json b/test/knownfailures.json
index 6d207766f2..8df7466cf8 100644
--- a/test/knownfailures.json
+++ b/test/knownfailures.json
@@ -659,15 +659,16 @@
"969-iface-super",
"981-dedup-original-dex",
"984-obsolete-invoke",
- "985-re-obsolete"
+ "985-re-obsolete",
+ "987-stack-trace-dumping"
],
- "description": "Tests that use javac/dx to build (not jack).",
+ "description": "The tests above fail with --build-with-javac-dx.",
"env_vars": {"ANDROID_COMPILE_WITH_JACK": "false"},
"bug": "b/37636792"
},
{
"tests": "648-many-direct-methods",
- "variant": "optimizing",
+ "variant": "optimizing | speed-profile | no-image",
"description": "Test disabled with AOT because of dex2oatd timeouts.",
"bug": "b/33650497"
}