summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Android.mk5
-rw-r--r--cmdline/cmdline_parser_test.cc6
-rw-r--r--cmdline/detail/cmdline_parse_argument_detail.h4
-rw-r--r--cmdline/detail/cmdline_parser_detail.h6
-rw-r--r--compiler/driver/compiler_options.cc5
-rw-r--r--compiler/exception_test.cc9
-rw-r--r--compiler/jit/jit_compiler.cc2
-rw-r--r--compiler/optimizing/code_generator.cc29
-rw-r--r--compiler/optimizing/code_generator.h4
-rw-r--r--compiler/optimizing/code_generator_arm64.cc1289
-rw-r--r--compiler/optimizing/code_generator_arm64.h105
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.cc753
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.h44
-rw-r--r--compiler/optimizing/code_generator_mips.cc94
-rw-r--r--compiler/optimizing/code_generator_mips64.cc94
-rw-r--r--compiler/optimizing/code_generator_x86.cc88
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc86
-rw-r--r--compiler/optimizing/intrinsics_arm64.cc207
-rw-r--r--compiler/optimizing/intrinsics_arm_vixl.cc7
-rw-r--r--compiler/optimizing/nodes.cc9
-rw-r--r--compiler/optimizing/nodes.h7
-rw-r--r--compiler/optimizing/optimizing_compiler.cc48
-rw-r--r--compiler/optimizing/prepare_for_register_allocation.cc4
-rw-r--r--compiler/optimizing/scheduler.cc49
-rw-r--r--compiler/optimizing/scheduler.h42
-rw-r--r--compiler/optimizing/scheduler_arm.cc2
-rw-r--r--compiler/optimizing/scheduler_arm.h5
-rw-r--r--compiler/optimizing/scheduler_arm64.h4
-rw-r--r--compiler/optimizing/scheduler_test.cc15
-rw-r--r--compiler/optimizing/stack_map_stream.cc60
-rw-r--r--compiler/optimizing/stack_map_stream.h20
-rw-r--r--compiler/optimizing/stack_map_test.cc66
-rw-r--r--dex2oat/linker/oat_writer.cc98
-rw-r--r--dex2oat/linker/oat_writer_test.cc16
-rw-r--r--dexlayout/dex_ir.h2
-rw-r--r--dexlayout/dex_ir_builder.cc60
-rw-r--r--dexlayout/dexlayout_test.cc15
-rw-r--r--libartbase/base/bit_memory_region.h156
-rw-r--r--libartbase/base/bit_memory_region_test.cc18
-rw-r--r--libartbase/base/bit_struct_detail.h2
-rw-r--r--libartbase/base/bit_table.h85
-rw-r--r--libartbase/base/bit_table_test.cc26
-rw-r--r--libartbase/base/mem_map_fuchsia.cc8
-rw-r--r--libartbase/base/transform_iterator.h4
-rw-r--r--libdexfile/dex/class_accessor.h19
-rw-r--r--libdexfile/dex/code_item_accessors.h2
-rw-r--r--libdexfile/dex/dex_file-inl.h32
-rw-r--r--libdexfile/dex/dex_file.cc45
-rw-r--r--libdexfile/dex/dex_file.h208
-rw-r--r--libdexfile/dex/dex_file_verifier_test.cc55
-rw-r--r--libdexfile/dex/modifiers.h5
-rw-r--r--oatdump/oatdump.cc2
-rw-r--r--oatdump/oatdump_test.h2
-rw-r--r--openjdkjvm/OpenjdkJvm.cc12
-rw-r--r--openjdkjvmti/ti_redefine.cc11
-rw-r--r--openjdkjvmti/ti_timers.cc2
-rw-r--r--runtime/arch/arm/quick_entrypoints_arm.S4
-rw-r--r--runtime/arch/arm64/quick_entrypoints_arm64.S6
-rw-r--r--runtime/arch/mips/entrypoints_init_mips.cc10
-rw-r--r--runtime/arch/mips/quick_entrypoints_mips.S4
-rw-r--r--runtime/arch/mips64/quick_entrypoints_mips64.S4
-rw-r--r--runtime/arch/x86/quick_entrypoints_x86.S4
-rw-r--r--runtime/arch/x86_64/quick_entrypoints_x86_64.S4
-rw-r--r--runtime/art_method-inl.h40
-rw-r--r--runtime/entrypoints/quick/quick_default_externs.h8
-rw-r--r--runtime/entrypoints/quick/quick_default_init_entrypoints.h6
-rw-r--r--runtime/entrypoints/quick/quick_dexcache_entrypoints.cc28
-rw-r--r--runtime/entrypoints/quick/quick_entrypoints_list.h6
-rw-r--r--runtime/entrypoints_order_test.cc6
-rw-r--r--runtime/gc/accounting/card_table.cc14
-rw-r--r--runtime/gc/accounting/card_table.h8
-rw-r--r--runtime/gc/collector/concurrent_copying.cc6
-rw-r--r--runtime/gc/space/malloc_space.cc2
-rw-r--r--runtime/gc/space/region_space-inl.h48
-rw-r--r--runtime/gc/space/region_space.cc46
-rw-r--r--runtime/gc/space/region_space.h15
-rw-r--r--runtime/generated/asm_support_gen.h4
-rw-r--r--runtime/hidden_api.cc4
-rw-r--r--runtime/hprof/hprof.cc2
-rw-r--r--runtime/image.cc2
-rw-r--r--runtime/interpreter/unstarted_runtime.cc8
-rw-r--r--runtime/jdwp/jdwp_event.cc2
-rw-r--r--runtime/jni/check_jni.cc2
-rw-r--r--runtime/method_handles.cc4
-rw-r--r--runtime/mirror/dex_cache.h4
-rw-r--r--runtime/native/dalvik_system_DexFile.cc8
-rw-r--r--runtime/native/dalvik_system_VMDebug.cc2
-rw-r--r--runtime/native/dalvik_system_VMRuntime.cc8
-rw-r--r--runtime/native/java_lang_reflect_Constructor.cc2
-rw-r--r--runtime/oat.h4
-rw-r--r--runtime/oat_file.h2
-rw-r--r--runtime/parsed_options.cc3
-rw-r--r--runtime/runtime.cc5
-rw-r--r--runtime/runtime.h25
-rw-r--r--runtime/runtime_options.def1
-rw-r--r--runtime/stack_map.cc246
-rw-r--r--runtime/stack_map.h110
-rw-r--r--runtime/thread.cc10
-rw-r--r--runtime/verifier/method_verifier.cc2
-rw-r--r--test/008-exceptions/multidex.jpp27
-rw-r--r--test/160-read-barrier-stress/src/Main.java40
-rw-r--r--test/162-method-resolution/multidex.jpp127
-rw-r--r--test/462-checker-inlining-dex-files/multidex.jpp8
-rw-r--r--test/556-invoke-super/multidex.jpp4
-rw-r--r--test/563-checker-fakestring/smali/TestCase.smali32
-rw-r--r--test/563-checker-fakestring/src/Main.java8
-rw-r--r--test/569-checker-pattern-replacement/multidex.jpp8
-rw-r--r--test/616-cha-interface-default/multidex.jpp3
-rw-r--r--test/616-cha-proxy-method-inline/multidex.jpp3
-rw-r--r--test/626-const-class-linking/multidex.jpp27
-rw-r--r--test/635-checker-arm64-volatile-load-cc/src/Main.java12
-rw-r--r--test/638-checker-inline-caches/multidex.jpp12
-rw-r--r--test/674-hiddenapi/hiddenapi.cc30
-rw-r--r--test/683-clinit-inline-static-invoke/expected.txt0
-rw-r--r--test/683-clinit-inline-static-invoke/info.txt3
-rw-r--r--test/683-clinit-inline-static-invoke/src-multidex/MyTimeZone.java22
-rw-r--r--test/683-clinit-inline-static-invoke/src/Main.java31
-rw-r--r--test/913-heaps/expected.txt52
-rw-r--r--test/knownfailures.json23
-rw-r--r--tools/Android.mk2
-rw-r--r--tools/art2
-rw-r--r--tools/art_verifier/Android.bp48
-rw-r--r--tools/art_verifier/art_verifier.cc267
-rw-r--r--tools/class2greylist/Android.bp1
-rw-r--r--tools/class2greylist/src/com/android/class2greylist/AnnotationVisitor.java54
-rw-r--r--tools/class2greylist/src/com/android/class2greylist/Class2Greylist.java32
-rw-r--r--tools/class2greylist/test/src/com/android/javac/AnnotationVisitorTest.java186
-rw-r--r--tools/hiddenapi/hiddenapi.cc281
-rw-r--r--tools/hiddenapi/hiddenapi_test.cc73
-rw-r--r--tools/jfuzz/jfuzz.cc2
-rw-r--r--tools/libcore_failures.txt10
-rw-r--r--tools/libcore_gcstress_debug_failures.txt3
-rw-r--r--tools/libcore_gcstress_failures.txt4
-rwxr-xr-xtools/run-libcore-tests.sh6
-rwxr-xr-xtools/teardown-buildbot-device.sh17
-rw-r--r--tools/veridex/flow_analysis.cc8
-rw-r--r--tools/veridex/flow_analysis.h16
-rw-r--r--tools/veridex/hidden_api_finder.cc42
-rw-r--r--tools/veridex/precise_hidden_api_finder.cc37
-rw-r--r--tools/veridex/precise_hidden_api_finder.h2
-rw-r--r--tools/veridex/resolver.cc91
141 files changed, 2970 insertions, 3468 deletions
diff --git a/Android.mk b/Android.mk
index 1c946292ef..19c65a1e67 100644
--- a/Android.mk
+++ b/Android.mk
@@ -98,6 +98,7 @@ include $(art_path)/build/Android.common_test.mk
include $(art_path)/build/Android.gtest.mk
include $(art_path)/test/Android.run-test.mk
+# Make sure /system is writable on the device.
TEST_ART_ADB_ROOT_AND_REMOUNT := \
(adb root && \
adb wait-for-device remount && \
@@ -122,8 +123,10 @@ test-art-target-sync: $(TEST_ART_TARGET_SYNC_DEPS)
$(TEST_ART_ADB_ROOT_AND_REMOUNT)
adb sync system && adb sync data
else
+# TEST_ART_ADB_ROOT_AND_REMOUNT is not needed here, as we are only
+# pushing things to the chroot dir, which is expected to be under
+# /data on the device.
test-art-target-sync: $(TEST_ART_TARGET_SYNC_DEPS)
- $(TEST_ART_ADB_ROOT_AND_REMOUNT)
adb wait-for-device
adb push $(PRODUCT_OUT)/system $(ART_TEST_CHROOT)/
adb push $(PRODUCT_OUT)/data $(ART_TEST_CHROOT)/
diff --git a/cmdline/cmdline_parser_test.cc b/cmdline/cmdline_parser_test.cc
index a52e16328a..a33d53741c 100644
--- a/cmdline/cmdline_parser_test.cc
+++ b/cmdline/cmdline_parser_test.cc
@@ -59,7 +59,7 @@ namespace art {
template <typename T>
bool UsuallyEquals(const T& expected, const T& actual,
typename std::enable_if<
- detail::SupportsEqualityOperator<T>::value>::type* = 0) {
+ detail::SupportsEqualityOperator<T>::value>::type* = nullptr) {
return expected == actual;
}
@@ -73,8 +73,8 @@ namespace art {
template <typename T, typename ... Ignore>
bool UsuallyEquals(const T& expected, const T& actual,
const Ignore& ... more ATTRIBUTE_UNUSED,
- typename std::enable_if<std::is_pod<T>::value>::type* = 0,
- typename std::enable_if<!detail::SupportsEqualityOperator<T>::value>::type* = 0
+ typename std::enable_if<std::is_pod<T>::value>::type* = nullptr,
+ typename std::enable_if<!detail::SupportsEqualityOperator<T>::value>::type* = nullptr
) {
return memcmp(std::addressof(expected), std::addressof(actual), sizeof(T)) == 0;
}
diff --git a/cmdline/detail/cmdline_parse_argument_detail.h b/cmdline/detail/cmdline_parse_argument_detail.h
index 65c11146aa..d011e7f71d 100644
--- a/cmdline/detail/cmdline_parse_argument_detail.h
+++ b/cmdline/detail/cmdline_parse_argument_detail.h
@@ -90,7 +90,7 @@ template <typename TArg>
struct CmdlineParserArgumentInfo {
// This version will only be used if TArg is arithmetic and thus has the <= operators.
template <typename T = TArg> // Necessary to get SFINAE to kick in.
- bool CheckRange(const TArg& value, typename EnableIfNumeric<T>::type* = 0) {
+ bool CheckRange(const TArg& value, typename EnableIfNumeric<T>::type* = nullptr) {
if (has_range_) {
return min_ <= value && value <= max_;
}
@@ -99,7 +99,7 @@ struct CmdlineParserArgumentInfo {
// This version will be used at other times when TArg is not arithmetic.
template <typename T = TArg>
- bool CheckRange(const TArg&, typename DisableIfNumeric<T>::type* = 0) {
+ bool CheckRange(const TArg&, typename DisableIfNumeric<T>::type* = nullptr) {
assert(!has_range_);
return true;
}
diff --git a/cmdline/detail/cmdline_parser_detail.h b/cmdline/detail/cmdline_parser_detail.h
index 4c26ba3012..2078d7a288 100644
--- a/cmdline/detail/cmdline_parser_detail.h
+++ b/cmdline/detail/cmdline_parser_detail.h
@@ -90,7 +90,7 @@ struct SupportsEqualityOperator : // NOLINT [whitespace/labels] [4]
template <typename T>
std::string ToStringAny(const T& value,
typename std::enable_if<
- SupportsInsertionOperator<T>::value>::type* = 0) {
+ SupportsInsertionOperator<T>::value>::type* = nullptr) {
std::stringstream stream;
stream << value;
return stream.str();
@@ -99,7 +99,7 @@ std::string ToStringAny(const T& value,
template <typename T>
std::string ToStringAny(const std::vector<T> value,
typename std::enable_if<
- SupportsInsertionOperator<T>::value>::type* = 0) {
+ SupportsInsertionOperator<T>::value>::type* = nullptr) {
std::stringstream stream;
stream << "vector{";
@@ -118,7 +118,7 @@ std::string ToStringAny(const std::vector<T> value,
template <typename T>
std::string ToStringAny(const T&,
typename std::enable_if<
- !SupportsInsertionOperator<T>::value>::type* = 0
+ !SupportsInsertionOperator<T>::value>::type* = nullptr
) {
return std::string("(unknown type [no operator<< implemented] for )");
}
diff --git a/compiler/driver/compiler_options.cc b/compiler/driver/compiler_options.cc
index 62d547de44..8cc6cf10f0 100644
--- a/compiler/driver/compiler_options.cc
+++ b/compiler/driver/compiler_options.cc
@@ -116,9 +116,6 @@ bool CompilerOptions::ParseRegisterAllocationStrategy(const std::string& option,
return true;
}
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wframe-larger-than="
-
bool CompilerOptions::ParseCompilerOptions(const std::vector<std::string>& options,
bool ignore_unrecognized,
std::string* error_msg) {
@@ -133,8 +130,6 @@ bool CompilerOptions::ParseCompilerOptions(const std::vector<std::string>& optio
return ReadCompilerOptions(args, this, error_msg);
}
-#pragma GCC diagnostic pop
-
bool CompilerOptions::IsImageClass(const char* descriptor) const {
// Historical note: We used to hold the set indirectly and there was a distinction between an
// empty set and a null, null meaning to include all classes. However, the distiction has been
diff --git a/compiler/exception_test.cc b/compiler/exception_test.cc
index b0e03374a0..fd17364c26 100644
--- a/compiler/exception_test.cc
+++ b/compiler/exception_test.cc
@@ -81,7 +81,9 @@ class ExceptionTest : public CommonRuntimeTest {
stack_maps.BeginStackMapEntry(kDexPc, native_pc_offset);
stack_maps.EndStackMapEntry();
stack_maps.EndMethod();
- const size_t stack_maps_size = stack_maps.PrepareForFillIn();
+ ScopedArenaVector<uint8_t> stack_map = stack_maps.Encode();
+
+ const size_t stack_maps_size = stack_map.size();
const size_t header_size = sizeof(OatQuickMethodHeader);
const size_t code_alignment = GetInstructionSetAlignment(kRuntimeISA);
@@ -90,9 +92,8 @@ class ExceptionTest : public CommonRuntimeTest {
uint8_t* code_ptr =
AlignUp(&fake_header_code_and_maps_[stack_maps_size + header_size], code_alignment);
- MemoryRegion stack_maps_region(&fake_header_code_and_maps_[0], stack_maps_size);
- stack_maps.FillInCodeInfo(stack_maps_region);
- OatQuickMethodHeader method_header(code_ptr - stack_maps_region.begin(), code_size);
+ memcpy(&fake_header_code_and_maps_[0], stack_map.data(), stack_maps_size);
+ OatQuickMethodHeader method_header(code_ptr - fake_header_code_and_maps_.data(), code_size);
static_assert(std::is_trivially_copyable<OatQuickMethodHeader>::value, "Cannot use memcpy");
memcpy(code_ptr - header_size, &method_header, header_size);
memcpy(code_ptr, fake_code_.data(), fake_code_.size());
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index a881c5ec98..3fc559e13b 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -88,7 +88,7 @@ extern "C" void jit_types_loaded(void* handle, mirror::Class** types, size_t cou
JitCompiler::JitCompiler() {
compiler_options_.reset(new CompilerOptions());
// Special case max code units for inlining, whose default is "unset" (implictly
- // meaning no limit). Do this before parsing the actuall passed options.
+ // meaning no limit). Do this before parsing the actual passed options.
compiler_options_->SetInlineMaxCodeUnits(CompilerOptions::kDefaultInlineMaxCodeUnits);
{
std::string error_msg;
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index b0a05da0b1..a13efcaee2 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -737,17 +737,15 @@ void CodeGenerator::CreateLoadClassRuntimeCallLocationSummary(HLoadClass* cls,
void CodeGenerator::GenerateLoadClassRuntimeCall(HLoadClass* cls) {
DCHECK_EQ(cls->GetLoadKind(), HLoadClass::LoadKind::kRuntimeCall);
+ DCHECK(!cls->MustGenerateClinitCheck());
LocationSummary* locations = cls->GetLocations();
MoveConstant(locations->GetTemp(0), cls->GetTypeIndex().index_);
if (cls->NeedsAccessCheck()) {
- CheckEntrypointTypes<kQuickInitializeTypeAndVerifyAccess, void*, uint32_t>();
- InvokeRuntime(kQuickInitializeTypeAndVerifyAccess, cls, cls->GetDexPc());
- } else if (cls->MustGenerateClinitCheck()) {
- CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, uint32_t>();
- InvokeRuntime(kQuickInitializeStaticStorage, cls, cls->GetDexPc());
+ CheckEntrypointTypes<kQuickResolveTypeAndVerifyAccess, void*, uint32_t>();
+ InvokeRuntime(kQuickResolveTypeAndVerifyAccess, cls, cls->GetDexPc());
} else {
- CheckEntrypointTypes<kQuickInitializeType, void*, uint32_t>();
- InvokeRuntime(kQuickInitializeType, cls, cls->GetDexPc());
+ CheckEntrypointTypes<kQuickResolveType, void*, uint32_t>();
+ InvokeRuntime(kQuickResolveType, cls, cls->GetDexPc());
}
}
@@ -963,12 +961,6 @@ CodeGenerator::CodeGenerator(HGraph* graph,
CodeGenerator::~CodeGenerator() {}
-void CodeGenerator::ComputeStackMapSize(size_t* stack_map_size) {
- DCHECK(stack_map_size != nullptr);
- StackMapStream* stack_map_stream = GetStackMapStream();
- *stack_map_size = stack_map_stream->PrepareForFillIn();
-}
-
size_t CodeGenerator::GetNumberOfJitRoots() const {
DCHECK(code_generation_data_ != nullptr);
return code_generation_data_->GetNumberOfJitRoots();
@@ -1035,13 +1027,12 @@ static void CheckLoopEntriesCanBeUsedForOsr(const HGraph& graph,
}
}
-void CodeGenerator::BuildStackMaps(MemoryRegion stack_map_region,
- const DexFile::CodeItem* code_item_for_osr_check) {
- StackMapStream* stack_map_stream = GetStackMapStream();
- stack_map_stream->FillInCodeInfo(stack_map_region);
- if (kIsDebugBuild && code_item_for_osr_check != nullptr) {
- CheckLoopEntriesCanBeUsedForOsr(*graph_, CodeInfo(stack_map_region), *code_item_for_osr_check);
+ScopedArenaVector<uint8_t> CodeGenerator::BuildStackMaps(const DexFile::CodeItem* code_item) {
+ ScopedArenaVector<uint8_t> stack_map = GetStackMapStream()->Encode();
+ if (kIsDebugBuild && code_item != nullptr) {
+ CheckLoopEntriesCanBeUsedForOsr(*graph_, CodeInfo(stack_map.data()), *code_item);
}
+ return stack_map;
}
void CodeGenerator::RecordPcInfo(HInstruction* instruction,
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 3d58d29648..e77d621b58 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -350,9 +350,7 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> {
void AddSlowPath(SlowPathCode* slow_path);
- void BuildStackMaps(MemoryRegion stack_map_region,
- const DexFile::CodeItem* code_item_for_osr_check);
- void ComputeStackMapSize(size_t* stack_map_size);
+ ScopedArenaVector<uint8_t> BuildStackMaps(const DexFile::CodeItem* code_item_for_osr_check);
size_t GetNumberOfJitRoots() const;
// Fills the `literals` array with literals collected during code generation.
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 760b1dd09b..15e3d274a5 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -89,25 +89,10 @@ static constexpr uint32_t kPackedSwitchCompareJumpThreshold = 7;
// Reference load (except object array loads) is using LDR Wt, [Xn, #offset] which can handle
// offset < 16KiB. For offsets >= 16KiB, the load shall be emitted as two or more instructions.
-// For the Baker read barrier implementation using link-generated thunks we need to split
+// For the Baker read barrier implementation using link-time generated thunks we need to split
// the offset explicitly.
constexpr uint32_t kReferenceLoadMinFarOffset = 16 * KB;
-// Flags controlling the use of link-time generated thunks for Baker read barriers.
-constexpr bool kBakerReadBarrierLinkTimeThunksEnableForFields = true;
-constexpr bool kBakerReadBarrierLinkTimeThunksEnableForArrays = true;
-constexpr bool kBakerReadBarrierLinkTimeThunksEnableForGcRoots = true;
-
-// Some instructions have special requirements for a temporary, for example
-// LoadClass/kBssEntry and LoadString/kBssEntry for Baker read barrier require
-// temp that's not an R0 (to avoid an extra move) and Baker read barrier field
-// loads with large offsets need a fixed register to limit the number of link-time
-// thunks we generate. For these and similar cases, we want to reserve a specific
-// register that's neither callee-save nor an argument register. We choose x15.
-inline Location FixedTempLocation() {
- return Location::RegisterLocation(x15.GetCode());
-}
-
inline Condition ARM64Condition(IfCondition cond) {
switch (cond) {
case kCondEQ: return eq;
@@ -164,6 +149,16 @@ Location InvokeRuntimeCallingConvention::GetReturnLocation(DataType::Type return
return ARM64ReturnLocation(return_type);
}
+static RegisterSet OneRegInReferenceOutSaveEverythingCallerSaves() {
+ InvokeRuntimeCallingConvention calling_convention;
+ RegisterSet caller_saves = RegisterSet::Empty();
+ caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0).GetCode()));
+ DCHECK_EQ(calling_convention.GetRegisterAt(0).GetCode(),
+ RegisterFrom(calling_convention.GetReturnLocation(DataType::Type::kReference),
+ DataType::Type::kReference).GetCode());
+ return caller_saves;
+}
+
// NOLINT on __ macro to suppress wrong warning/fix (misc-macro-parentheses) from clang-tidy.
#define __ down_cast<CodeGeneratorARM64*>(codegen)->GetVIXLAssembler()-> // NOLINT
#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kArm64PointerSize, x).Int32Value()
@@ -307,35 +302,41 @@ class DivZeroCheckSlowPathARM64 : public SlowPathCodeARM64 {
class LoadClassSlowPathARM64 : public SlowPathCodeARM64 {
public:
- LoadClassSlowPathARM64(HLoadClass* cls,
- HInstruction* at,
- uint32_t dex_pc,
- bool do_clinit)
- : SlowPathCodeARM64(at),
- cls_(cls),
- dex_pc_(dex_pc),
- do_clinit_(do_clinit) {
+ LoadClassSlowPathARM64(HLoadClass* cls, HInstruction* at)
+ : SlowPathCodeARM64(at), cls_(cls) {
DCHECK(at->IsLoadClass() || at->IsClinitCheck());
+ DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
Location out = locations->Out();
- CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
+ const uint32_t dex_pc = instruction_->GetDexPc();
+ bool must_resolve_type = instruction_->IsLoadClass() && cls_->MustResolveTypeOnSlowPath();
+ bool must_do_clinit = instruction_->IsClinitCheck() || cls_->MustGenerateClinitCheck();
+ CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
InvokeRuntimeCallingConvention calling_convention;
- dex::TypeIndex type_index = cls_->GetTypeIndex();
- __ Mov(calling_convention.GetRegisterAt(0).W(), type_index.index_);
- QuickEntrypointEnum entrypoint = do_clinit_ ? kQuickInitializeStaticStorage
- : kQuickInitializeType;
- arm64_codegen->InvokeRuntime(entrypoint, instruction_, dex_pc_, this);
- if (do_clinit_) {
- CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, uint32_t>();
+ if (must_resolve_type) {
+ DCHECK(IsSameDexFile(cls_->GetDexFile(), arm64_codegen->GetGraph()->GetDexFile()));
+ dex::TypeIndex type_index = cls_->GetTypeIndex();
+ __ Mov(calling_convention.GetRegisterAt(0).W(), type_index.index_);
+ arm64_codegen->InvokeRuntime(kQuickResolveType, instruction_, dex_pc, this);
+ CheckEntrypointTypes<kQuickResolveType, void*, uint32_t>();
+ // If we also must_do_clinit, the resolved type is now in the correct register.
} else {
- CheckEntrypointTypes<kQuickInitializeType, void*, uint32_t>();
+ DCHECK(must_do_clinit);
+ Location source = instruction_->IsLoadClass() ? out : locations->InAt(0);
+ arm64_codegen->MoveLocation(LocationFrom(calling_convention.GetRegisterAt(0)),
+ source,
+ cls_->GetType());
+ }
+ if (must_do_clinit) {
+ arm64_codegen->InvokeRuntime(kQuickInitializeStaticStorage, instruction_, dex_pc, this);
+ CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, mirror::Class*>();
}
// Move the class to the desired location.
@@ -354,12 +355,6 @@ class LoadClassSlowPathARM64 : public SlowPathCodeARM64 {
// The class this slow path will load.
HLoadClass* const cls_;
- // The dex PC of `at_`.
- const uint32_t dex_pc_;
-
- // Whether to initialize the class.
- const bool do_clinit_;
-
DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathARM64);
};
@@ -604,503 +599,6 @@ void JumpTableARM64::EmitTable(CodeGeneratorARM64* codegen) {
}
}
-// Abstract base class for read barrier slow paths marking a reference
-// `ref`.
-//
-// Argument `entrypoint` must be a register location holding the read
-// barrier marking runtime entry point to be invoked or an empty
-// location; in the latter case, the read barrier marking runtime
-// entry point will be loaded by the slow path code itself.
-class ReadBarrierMarkSlowPathBaseARM64 : public SlowPathCodeARM64 {
- protected:
- ReadBarrierMarkSlowPathBaseARM64(HInstruction* instruction, Location ref, Location entrypoint)
- : SlowPathCodeARM64(instruction), ref_(ref), entrypoint_(entrypoint) {
- DCHECK(kEmitCompilerReadBarrier);
- }
-
- const char* GetDescription() const OVERRIDE { return "ReadBarrierMarkSlowPathBaseARM64"; }
-
- // Generate assembly code calling the read barrier marking runtime
- // entry point (ReadBarrierMarkRegX).
- void GenerateReadBarrierMarkRuntimeCall(CodeGenerator* codegen) {
- // No need to save live registers; it's taken care of by the
- // entrypoint. Also, there is no need to update the stack mask,
- // as this runtime call will not trigger a garbage collection.
- CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
- DCHECK_NE(ref_.reg(), LR);
- DCHECK_NE(ref_.reg(), WSP);
- DCHECK_NE(ref_.reg(), WZR);
- // IP0 is used internally by the ReadBarrierMarkRegX entry point
- // as a temporary, it cannot be the entry point's input/output.
- DCHECK_NE(ref_.reg(), IP0);
- DCHECK(0 <= ref_.reg() && ref_.reg() < kNumberOfWRegisters) << ref_.reg();
- // "Compact" slow path, saving two moves.
- //
- // Instead of using the standard runtime calling convention (input
- // and output in W0):
- //
- // W0 <- ref
- // W0 <- ReadBarrierMark(W0)
- // ref <- W0
- //
- // we just use rX (the register containing `ref`) as input and output
- // of a dedicated entrypoint:
- //
- // rX <- ReadBarrierMarkRegX(rX)
- //
- if (entrypoint_.IsValid()) {
- arm64_codegen->ValidateInvokeRuntimeWithoutRecordingPcInfo(instruction_, this);
- __ Blr(XRegisterFrom(entrypoint_));
- } else {
- // Entrypoint is not already loaded, load from the thread.
- int32_t entry_point_offset =
- Thread::ReadBarrierMarkEntryPointsOffset<kArm64PointerSize>(ref_.reg());
- // This runtime call does not require a stack map.
- arm64_codegen->InvokeRuntimeWithoutRecordingPcInfo(entry_point_offset, instruction_, this);
- }
- }
-
- // The location (register) of the marked object reference.
- const Location ref_;
-
- // The location of the entrypoint if it is already loaded.
- const Location entrypoint_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(ReadBarrierMarkSlowPathBaseARM64);
-};
-
-// Slow path marking an object reference `ref` during a read
-// barrier. The field `obj.field` in the object `obj` holding this
-// reference does not get updated by this slow path after marking.
-//
-// This means that after the execution of this slow path, `ref` will
-// always be up-to-date, but `obj.field` may not; i.e., after the
-// flip, `ref` will be a to-space reference, but `obj.field` will
-// probably still be a from-space reference (unless it gets updated by
-// another thread, or if another thread installed another object
-// reference (different from `ref`) in `obj.field`).
-//
-// Argument `entrypoint` must be a register location holding the read
-// barrier marking runtime entry point to be invoked or an empty
-// location; in the latter case, the read barrier marking runtime
-// entry point will be loaded by the slow path code itself.
-class ReadBarrierMarkSlowPathARM64 : public ReadBarrierMarkSlowPathBaseARM64 {
- public:
- ReadBarrierMarkSlowPathARM64(HInstruction* instruction,
- Location ref,
- Location entrypoint = Location::NoLocation())
- : ReadBarrierMarkSlowPathBaseARM64(instruction, ref, entrypoint) {
- DCHECK(kEmitCompilerReadBarrier);
- }
-
- const char* GetDescription() const OVERRIDE { return "ReadBarrierMarkSlowPathARM64"; }
-
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
- LocationSummary* locations = instruction_->GetLocations();
- DCHECK(locations->CanCall());
- DCHECK(ref_.IsRegister()) << ref_;
- DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(ref_.reg())) << ref_.reg();
- DCHECK(instruction_->IsLoadClass() || instruction_->IsLoadString())
- << "Unexpected instruction in read barrier marking slow path: "
- << instruction_->DebugName();
-
- __ Bind(GetEntryLabel());
- GenerateReadBarrierMarkRuntimeCall(codegen);
- __ B(GetExitLabel());
- }
-
- private:
- DISALLOW_COPY_AND_ASSIGN(ReadBarrierMarkSlowPathARM64);
-};
-
-// Slow path loading `obj`'s lock word, loading a reference from
-// object `*(obj + offset + (index << scale_factor))` into `ref`, and
-// marking `ref` if `obj` is gray according to the lock word (Baker
-// read barrier). The field `obj.field` in the object `obj` holding
-// this reference does not get updated by this slow path after marking
-// (see LoadReferenceWithBakerReadBarrierAndUpdateFieldSlowPathARM64
-// below for that).
-//
-// This means that after the execution of this slow path, `ref` will
-// always be up-to-date, but `obj.field` may not; i.e., after the
-// flip, `ref` will be a to-space reference, but `obj.field` will
-// probably still be a from-space reference (unless it gets updated by
-// another thread, or if another thread installed another object
-// reference (different from `ref`) in `obj.field`).
-//
-// Argument `entrypoint` must be a register location holding the read
-// barrier marking runtime entry point to be invoked or an empty
-// location; in the latter case, the read barrier marking runtime
-// entry point will be loaded by the slow path code itself.
-class LoadReferenceWithBakerReadBarrierSlowPathARM64 : public ReadBarrierMarkSlowPathBaseARM64 {
- public:
- LoadReferenceWithBakerReadBarrierSlowPathARM64(HInstruction* instruction,
- Location ref,
- Register obj,
- uint32_t offset,
- Location index,
- size_t scale_factor,
- bool needs_null_check,
- bool use_load_acquire,
- Register temp,
- Location entrypoint = Location::NoLocation())
- : ReadBarrierMarkSlowPathBaseARM64(instruction, ref, entrypoint),
- obj_(obj),
- offset_(offset),
- index_(index),
- scale_factor_(scale_factor),
- needs_null_check_(needs_null_check),
- use_load_acquire_(use_load_acquire),
- temp_(temp) {
- DCHECK(kEmitCompilerReadBarrier);
- DCHECK(kUseBakerReadBarrier);
- }
-
- const char* GetDescription() const OVERRIDE {
- return "LoadReferenceWithBakerReadBarrierSlowPathARM64";
- }
-
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
- LocationSummary* locations = instruction_->GetLocations();
- DCHECK(locations->CanCall());
- DCHECK(ref_.IsRegister()) << ref_;
- DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(ref_.reg())) << ref_.reg();
- DCHECK(obj_.IsW());
- DCHECK_NE(ref_.reg(), LocationFrom(temp_).reg());
- DCHECK(instruction_->IsInstanceFieldGet() ||
- instruction_->IsStaticFieldGet() ||
- instruction_->IsArrayGet() ||
- instruction_->IsArraySet() ||
- instruction_->IsInstanceOf() ||
- instruction_->IsCheckCast() ||
- (instruction_->IsInvokeVirtual() && instruction_->GetLocations()->Intrinsified()) ||
- (instruction_->IsInvokeStaticOrDirect() && instruction_->GetLocations()->Intrinsified()))
- << "Unexpected instruction in read barrier marking slow path: "
- << instruction_->DebugName();
- // The read barrier instrumentation of object ArrayGet
- // instructions does not support the HIntermediateAddress
- // instruction.
- DCHECK(!(instruction_->IsArrayGet() &&
- instruction_->AsArrayGet()->GetArray()->IsIntermediateAddress()));
-
- // Temporary register `temp_`, used to store the lock word, must
- // not be IP0 nor IP1, as we may use them to emit the reference
- // load (in the call to GenerateRawReferenceLoad below), and we
- // need the lock word to still be in `temp_` after the reference
- // load.
- DCHECK_NE(LocationFrom(temp_).reg(), IP0);
- DCHECK_NE(LocationFrom(temp_).reg(), IP1);
-
- __ Bind(GetEntryLabel());
-
- // When using MaybeGenerateReadBarrierSlow, the read barrier call is
- // inserted after the original load. However, in fast path based
- // Baker's read barriers, we need to perform the load of
- // mirror::Object::monitor_ *before* the original reference load.
- // This load-load ordering is required by the read barrier.
- // The slow path (for Baker's algorithm) should look like:
- //
- // uint32_t rb_state = Lockword(obj->monitor_).ReadBarrierState();
- // lfence; // Load fence or artificial data dependency to prevent load-load reordering
- // HeapReference<mirror::Object> ref = *src; // Original reference load.
- // bool is_gray = (rb_state == ReadBarrier::GrayState());
- // if (is_gray) {
- // ref = entrypoint(ref); // ref = ReadBarrier::Mark(ref); // Runtime entry point call.
- // }
- //
- // Note: the original implementation in ReadBarrier::Barrier is
- // slightly more complex as it performs additional checks that we do
- // not do here for performance reasons.
-
- // /* int32_t */ monitor = obj->monitor_
- uint32_t monitor_offset = mirror::Object::MonitorOffset().Int32Value();
- __ Ldr(temp_, HeapOperand(obj_, monitor_offset));
- if (needs_null_check_) {
- codegen->MaybeRecordImplicitNullCheck(instruction_);
- }
- // /* LockWord */ lock_word = LockWord(monitor)
- static_assert(sizeof(LockWord) == sizeof(int32_t),
- "art::LockWord and int32_t have different sizes.");
-
- // Introduce a dependency on the lock_word including rb_state,
- // to prevent load-load reordering, and without using
- // a memory barrier (which would be more expensive).
- // `obj` is unchanged by this operation, but its value now depends
- // on `temp`.
- __ Add(obj_.X(), obj_.X(), Operand(temp_.X(), LSR, 32));
-
- // The actual reference load.
- // A possible implicit null check has already been handled above.
- CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
- arm64_codegen->GenerateRawReferenceLoad(instruction_,
- ref_,
- obj_,
- offset_,
- index_,
- scale_factor_,
- /* needs_null_check */ false,
- use_load_acquire_);
-
- // Mark the object `ref` when `obj` is gray.
- //
- // if (rb_state == ReadBarrier::GrayState())
- // ref = ReadBarrier::Mark(ref);
- //
- // Given the numeric representation, it's enough to check the low bit of the rb_state.
- static_assert(ReadBarrier::WhiteState() == 0, "Expecting white to have value 0");
- static_assert(ReadBarrier::GrayState() == 1, "Expecting gray to have value 1");
- __ Tbz(temp_, LockWord::kReadBarrierStateShift, GetExitLabel());
- GenerateReadBarrierMarkRuntimeCall(codegen);
-
- __ B(GetExitLabel());
- }
-
- private:
- // The register containing the object holding the marked object reference field.
- Register obj_;
- // The offset, index and scale factor to access the reference in `obj_`.
- uint32_t offset_;
- Location index_;
- size_t scale_factor_;
- // Is a null check required?
- bool needs_null_check_;
- // Should this reference load use Load-Acquire semantics?
- bool use_load_acquire_;
- // A temporary register used to hold the lock word of `obj_`.
- Register temp_;
-
- DISALLOW_COPY_AND_ASSIGN(LoadReferenceWithBakerReadBarrierSlowPathARM64);
-};
-
-// Slow path loading `obj`'s lock word, loading a reference from
-// object `*(obj + offset + (index << scale_factor))` into `ref`, and
-// marking `ref` if `obj` is gray according to the lock word (Baker
-// read barrier). If needed, this slow path also atomically updates
-// the field `obj.field` in the object `obj` holding this reference
-// after marking (contrary to
-// LoadReferenceWithBakerReadBarrierSlowPathARM64 above, which never
-// tries to update `obj.field`).
-//
-// This means that after the execution of this slow path, both `ref`
-// and `obj.field` will be up-to-date; i.e., after the flip, both will
-// hold the same to-space reference (unless another thread installed
-// another object reference (different from `ref`) in `obj.field`).
-//
-// Argument `entrypoint` must be a register location holding the read
-// barrier marking runtime entry point to be invoked or an empty
-// location; in the latter case, the read barrier marking runtime
-// entry point will be loaded by the slow path code itself.
-class LoadReferenceWithBakerReadBarrierAndUpdateFieldSlowPathARM64
- : public ReadBarrierMarkSlowPathBaseARM64 {
- public:
- LoadReferenceWithBakerReadBarrierAndUpdateFieldSlowPathARM64(
- HInstruction* instruction,
- Location ref,
- Register obj,
- uint32_t offset,
- Location index,
- size_t scale_factor,
- bool needs_null_check,
- bool use_load_acquire,
- Register temp,
- Location entrypoint = Location::NoLocation())
- : ReadBarrierMarkSlowPathBaseARM64(instruction, ref, entrypoint),
- obj_(obj),
- offset_(offset),
- index_(index),
- scale_factor_(scale_factor),
- needs_null_check_(needs_null_check),
- use_load_acquire_(use_load_acquire),
- temp_(temp) {
- DCHECK(kEmitCompilerReadBarrier);
- DCHECK(kUseBakerReadBarrier);
- }
-
- const char* GetDescription() const OVERRIDE {
- return "LoadReferenceWithBakerReadBarrierAndUpdateFieldSlowPathARM64";
- }
-
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
- LocationSummary* locations = instruction_->GetLocations();
- Register ref_reg = WRegisterFrom(ref_);
- DCHECK(locations->CanCall());
- DCHECK(ref_.IsRegister()) << ref_;
- DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(ref_.reg())) << ref_.reg();
- DCHECK(obj_.IsW());
- DCHECK_NE(ref_.reg(), LocationFrom(temp_).reg());
-
- // This slow path is only used by the UnsafeCASObject intrinsic at the moment.
- DCHECK((instruction_->IsInvokeVirtual() && instruction_->GetLocations()->Intrinsified()))
- << "Unexpected instruction in read barrier marking and field updating slow path: "
- << instruction_->DebugName();
- DCHECK(instruction_->GetLocations()->Intrinsified());
- DCHECK_EQ(instruction_->AsInvoke()->GetIntrinsic(), Intrinsics::kUnsafeCASObject);
- DCHECK_EQ(offset_, 0u);
- DCHECK_EQ(scale_factor_, 0u);
- DCHECK_EQ(use_load_acquire_, false);
- // The location of the offset of the marked reference field within `obj_`.
- Location field_offset = index_;
- DCHECK(field_offset.IsRegister()) << field_offset;
-
- // Temporary register `temp_`, used to store the lock word, must
- // not be IP0 nor IP1, as we may use them to emit the reference
- // load (in the call to GenerateRawReferenceLoad below), and we
- // need the lock word to still be in `temp_` after the reference
- // load.
- DCHECK_NE(LocationFrom(temp_).reg(), IP0);
- DCHECK_NE(LocationFrom(temp_).reg(), IP1);
-
- __ Bind(GetEntryLabel());
-
- // The implementation is similar to LoadReferenceWithBakerReadBarrierSlowPathARM64's:
- //
- // uint32_t rb_state = Lockword(obj->monitor_).ReadBarrierState();
- // lfence; // Load fence or artificial data dependency to prevent load-load reordering
- // HeapReference<mirror::Object> ref = *src; // Original reference load.
- // bool is_gray = (rb_state == ReadBarrier::GrayState());
- // if (is_gray) {
- // old_ref = ref;
- // ref = entrypoint(ref); // ref = ReadBarrier::Mark(ref); // Runtime entry point call.
- // compareAndSwapObject(obj, field_offset, old_ref, ref);
- // }
-
- // /* int32_t */ monitor = obj->monitor_
- uint32_t monitor_offset = mirror::Object::MonitorOffset().Int32Value();
- __ Ldr(temp_, HeapOperand(obj_, monitor_offset));
- if (needs_null_check_) {
- codegen->MaybeRecordImplicitNullCheck(instruction_);
- }
- // /* LockWord */ lock_word = LockWord(monitor)
- static_assert(sizeof(LockWord) == sizeof(int32_t),
- "art::LockWord and int32_t have different sizes.");
-
- // Introduce a dependency on the lock_word including rb_state,
- // to prevent load-load reordering, and without using
- // a memory barrier (which would be more expensive).
- // `obj` is unchanged by this operation, but its value now depends
- // on `temp`.
- __ Add(obj_.X(), obj_.X(), Operand(temp_.X(), LSR, 32));
-
- // The actual reference load.
- // A possible implicit null check has already been handled above.
- CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
- arm64_codegen->GenerateRawReferenceLoad(instruction_,
- ref_,
- obj_,
- offset_,
- index_,
- scale_factor_,
- /* needs_null_check */ false,
- use_load_acquire_);
-
- // Mark the object `ref` when `obj` is gray.
- //
- // if (rb_state == ReadBarrier::GrayState())
- // ref = ReadBarrier::Mark(ref);
- //
- // Given the numeric representation, it's enough to check the low bit of the rb_state.
- static_assert(ReadBarrier::WhiteState() == 0, "Expecting white to have value 0");
- static_assert(ReadBarrier::GrayState() == 1, "Expecting gray to have value 1");
- __ Tbz(temp_, LockWord::kReadBarrierStateShift, GetExitLabel());
-
- // Save the old value of the reference before marking it.
- // Note that we cannot use IP to save the old reference, as IP is
- // used internally by the ReadBarrierMarkRegX entry point, and we
- // need the old reference after the call to that entry point.
- DCHECK_NE(LocationFrom(temp_).reg(), IP0);
- __ Mov(temp_.W(), ref_reg);
-
- GenerateReadBarrierMarkRuntimeCall(codegen);
-
- // If the new reference is different from the old reference,
- // update the field in the holder (`*(obj_ + field_offset)`).
- //
- // Note that this field could also hold a different object, if
- // another thread had concurrently changed it. In that case, the
- // LDXR/CMP/BNE sequence of instructions in the compare-and-set
- // (CAS) operation below would abort the CAS, leaving the field
- // as-is.
- __ Cmp(temp_.W(), ref_reg);
- __ B(eq, GetExitLabel());
-
- // Update the the holder's field atomically. This may fail if
- // mutator updates before us, but it's OK. This is achieved
- // using a strong compare-and-set (CAS) operation with relaxed
- // memory synchronization ordering, where the expected value is
- // the old reference and the desired value is the new reference.
-
- MacroAssembler* masm = arm64_codegen->GetVIXLAssembler();
- UseScratchRegisterScope temps(masm);
-
- // Convenience aliases.
- Register base = obj_.W();
- Register offset = XRegisterFrom(field_offset);
- Register expected = temp_.W();
- Register value = ref_reg;
- Register tmp_ptr = temps.AcquireX(); // Pointer to actual memory.
- Register tmp_value = temps.AcquireW(); // Value in memory.
-
- __ Add(tmp_ptr, base.X(), Operand(offset));
-
- if (kPoisonHeapReferences) {
- arm64_codegen->GetAssembler()->PoisonHeapReference(expected);
- if (value.Is(expected)) {
- // Do not poison `value`, as it is the same register as
- // `expected`, which has just been poisoned.
- } else {
- arm64_codegen->GetAssembler()->PoisonHeapReference(value);
- }
- }
-
- // do {
- // tmp_value = [tmp_ptr] - expected;
- // } while (tmp_value == 0 && failure([tmp_ptr] <- r_new_value));
-
- vixl::aarch64::Label loop_head, comparison_failed, exit_loop;
- __ Bind(&loop_head);
- __ Ldxr(tmp_value, MemOperand(tmp_ptr));
- __ Cmp(tmp_value, expected);
- __ B(&comparison_failed, ne);
- __ Stxr(tmp_value, value, MemOperand(tmp_ptr));
- __ Cbnz(tmp_value, &loop_head);
- __ B(&exit_loop);
- __ Bind(&comparison_failed);
- __ Clrex();
- __ Bind(&exit_loop);
-
- if (kPoisonHeapReferences) {
- arm64_codegen->GetAssembler()->UnpoisonHeapReference(expected);
- if (value.Is(expected)) {
- // Do not unpoison `value`, as it is the same register as
- // `expected`, which has just been unpoisoned.
- } else {
- arm64_codegen->GetAssembler()->UnpoisonHeapReference(value);
- }
- }
-
- __ B(GetExitLabel());
- }
-
- private:
- // The register containing the object holding the marked object reference field.
- const Register obj_;
- // The offset, index and scale factor to access the reference in `obj_`.
- uint32_t offset_;
- Location index_;
- size_t scale_factor_;
- // Is a null check required?
- bool needs_null_check_;
- // Should this reference load use Load-Acquire semantics?
- bool use_load_acquire_;
- // A temporary register used to hold the lock word of `obj_`; and
- // also to hold the original reference value, when the reference is
- // marked.
- const Register temp_;
-
- DISALLOW_COPY_AND_ASSIGN(LoadReferenceWithBakerReadBarrierAndUpdateFieldSlowPathARM64);
-};
-
// Slow path generating a read barrier for a heap reference.
class ReadBarrierForHeapReferenceSlowPathARM64 : public SlowPathCodeARM64 {
public:
@@ -1403,7 +901,9 @@ CodeGeneratorARM64::CodeGeneratorARM64(HGraph* graph,
jit_string_patches_(StringReferenceValueComparator(),
graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
jit_class_patches_(TypeReferenceValueComparator(),
- graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)) {
+ graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ jit_baker_read_barrier_slow_paths_(std::less<uint32_t>(),
+ graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)) {
// Save the link register (containing the return address) to mimic Quick.
AddAllocatedRegister(LocationFrom(lr));
}
@@ -1418,6 +918,16 @@ void CodeGeneratorARM64::EmitJumpTables() {
void CodeGeneratorARM64::Finalize(CodeAllocator* allocator) {
EmitJumpTables();
+
+ // Emit JIT baker read barrier slow paths.
+ DCHECK(Runtime::Current()->UseJitCompilation() || jit_baker_read_barrier_slow_paths_.empty());
+ for (auto& entry : jit_baker_read_barrier_slow_paths_) {
+ uint32_t encoded_data = entry.first;
+ vixl::aarch64::Label* slow_path_entry = &entry.second.label;
+ __ Bind(slow_path_entry);
+ CompileBakerReadBarrierThunk(*GetAssembler(), encoded_data, /* debug_name */ nullptr);
+ }
+
// Ensure we emit the literal pool.
__ FinalizeCode();
@@ -1444,13 +954,20 @@ void CodeGeneratorARM64::Finalize(CodeAllocator* allocator) {
BakerReadBarrierKind kind = BakerReadBarrierKindField::Decode(encoded_data);
// Check that the next instruction matches the expected LDR.
switch (kind) {
- case BakerReadBarrierKind::kField: {
+ case BakerReadBarrierKind::kField:
+ case BakerReadBarrierKind::kAcquire: {
DCHECK_GE(code.size() - literal_offset, 8u);
uint32_t next_insn = GetInsn(literal_offset + 4u);
- // LDR (immediate) with correct base_reg.
CheckValidReg(next_insn & 0x1fu); // Check destination register.
const uint32_t base_reg = BakerReadBarrierFirstRegField::Decode(encoded_data);
- CHECK_EQ(next_insn & 0xffc003e0u, 0xb9400000u | (base_reg << 5));
+ if (kind == BakerReadBarrierKind::kField) {
+ // LDR (immediate) with correct base_reg.
+ CHECK_EQ(next_insn & 0xffc003e0u, 0xb9400000u | (base_reg << 5));
+ } else {
+ DCHECK(kind == BakerReadBarrierKind::kAcquire);
+ // LDAR with correct base_reg.
+ CHECK_EQ(next_insn & 0xffffffe0u, 0x88dffc00u | (base_reg << 5));
+ }
break;
}
case BakerReadBarrierKind::kArray: {
@@ -1467,9 +984,12 @@ void CodeGeneratorARM64::Finalize(CodeAllocator* allocator) {
case BakerReadBarrierKind::kGcRoot: {
DCHECK_GE(literal_offset, 4u);
uint32_t prev_insn = GetInsn(literal_offset - 4u);
- // LDR (immediate) with correct root_reg.
const uint32_t root_reg = BakerReadBarrierFirstRegField::Decode(encoded_data);
- CHECK_EQ(prev_insn & 0xffc0001fu, 0xb9400000u | root_reg);
+ // Usually LDR (immediate) with correct root_reg but
+ // we may have a "MOV marked, old_value" for UnsafeCASObject.
+ if ((prev_insn & 0xffe0ffff) != (0x2a0003e0 | root_reg)) { // MOV?
+ CHECK_EQ(prev_insn & 0xffc0001fu, 0xb9400000u | root_reg); // LDR?
+ }
break;
}
default:
@@ -1654,8 +1174,24 @@ void CodeGeneratorARM64::MarkGCCard(Register object, Register value, bool value_
if (value_can_be_null) {
__ Cbz(value, &done);
}
+ // Load the address of the card table into `card`.
__ Ldr(card, MemOperand(tr, Thread::CardTableOffset<kArm64PointerSize>().Int32Value()));
+ // Calculate the offset (in the card table) of the card corresponding to
+ // `object`.
__ Lsr(temp, object, gc::accounting::CardTable::kCardShift);
+ // Write the `art::gc::accounting::CardTable::kCardDirty` value into the
+ // `object`'s card.
+ //
+ // Register `card` contains the address of the card table. Note that the card
+ // table's base is biased during its creation so that it always starts at an
+ // address whose least-significant byte is equal to `kCardDirty` (see
+ // art::gc::accounting::CardTable::Create). Therefore the STRB instruction
+ // below writes the `kCardDirty` (byte) value into the `object`'s card
+ // (located at `card + object >> kCardShift`).
+ //
+ // This dual use of the value in register `card` (1. to calculate the location
+ // of the card to mark; and 2. to load the `kCardDirty` value) saves a load
+ // (no need to explicitly load `kCardDirty` as an immediate value).
__ Strb(card, MemOperand(card, temp.X()));
if (value_can_be_null) {
__ Bind(&done);
@@ -2302,18 +1838,12 @@ void LocationsBuilderARM64::HandleFieldGet(HInstruction* instruction,
: LocationSummary::kNoCall);
if (object_field_get_with_read_barrier && kUseBakerReadBarrier) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
- // We need a temporary register for the read barrier marking slow
- // path in CodeGeneratorARM64::GenerateFieldLoadWithBakerReadBarrier.
- if (kBakerReadBarrierLinkTimeThunksEnableForFields &&
- !Runtime::Current()->UseJitCompilation() &&
- !field_info.IsVolatile()) {
- // If link-time thunks for the Baker read barrier are enabled, for AOT
- // non-volatile loads we need a temporary only if the offset is too big.
- if (field_info.GetFieldOffset().Uint32Value() >= kReferenceLoadMinFarOffset) {
- locations->AddTemp(FixedTempLocation());
- }
- } else {
- locations->AddTemp(Location::RequiresRegister());
+ // We need a temporary register for the read barrier load in
+ // CodeGeneratorARM64::GenerateFieldLoadWithBakerReadBarrier()
+ // only if the field is volatile or the offset is too big.
+ if (field_info.IsVolatile() ||
+ field_info.GetFieldOffset().Uint32Value() >= kReferenceLoadMinFarOffset) {
+ locations->AddTemp(FixedTempLocation());
}
}
locations->SetInAt(0, Location::RequiresRegister());
@@ -2776,14 +2306,11 @@ void LocationsBuilderARM64::VisitArrayGet(HArrayGet* instruction) {
: LocationSummary::kNoCall);
if (object_array_get_with_read_barrier && kUseBakerReadBarrier) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
- // We need a temporary register for the read barrier marking slow
- // path in CodeGeneratorARM64::GenerateArrayLoadWithBakerReadBarrier.
- if (kBakerReadBarrierLinkTimeThunksEnableForFields &&
- !Runtime::Current()->UseJitCompilation() &&
- instruction->GetIndex()->IsConstant()) {
+ if (instruction->GetIndex()->IsConstant()) {
// Array loads with constant index are treated as field loads.
- // If link-time thunks for the Baker read barrier are enabled, for AOT
- // constant index loads we need a temporary only if the offset is too big.
+ // We need a temporary register for the read barrier load in
+ // CodeGeneratorARM64::GenerateFieldLoadWithBakerReadBarrier()
+ // only if the offset is too big.
uint32_t offset = CodeGenerator::GetArrayDataOffset(instruction);
uint32_t index = instruction->GetIndex()->AsIntConstant()->GetValue();
offset += index << DataType::SizeShift(DataType::Type::kReference);
@@ -2791,6 +2318,8 @@ void LocationsBuilderARM64::VisitArrayGet(HArrayGet* instruction) {
locations->AddTemp(FixedTempLocation());
}
} else {
+ // We need a non-scratch temporary for the array data pointer in
+ // CodeGeneratorARM64::GenerateArrayLoadWithBakerReadBarrier().
locations->AddTemp(Location::RequiresRegister());
}
}
@@ -2846,7 +2375,7 @@ void InstructionCodeGeneratorARM64::VisitArrayGet(HArrayGet* instruction) {
} else {
Register temp = WRegisterFrom(locations->GetTemp(0));
codegen_->GenerateArrayLoadWithBakerReadBarrier(
- instruction, out, obj.W(), offset, index, temp, /* needs_null_check */ false);
+ out, obj.W(), offset, index, temp, /* needs_null_check */ false);
}
} else {
// General case.
@@ -3178,12 +2707,14 @@ void LocationsBuilderARM64::VisitClinitCheck(HClinitCheck* check) {
if (check->HasUses()) {
locations->SetOut(Location::SameAsFirstInput());
}
+ // Rely on the type initialization to save everything we need.
+ locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves());
}
void InstructionCodeGeneratorARM64::VisitClinitCheck(HClinitCheck* check) {
// We assume the class is not null.
- SlowPathCodeARM64* slow_path = new (codegen_->GetScopedAllocator()) LoadClassSlowPathARM64(
- check->GetLoadClass(), check, check->GetDexPc(), true);
+ SlowPathCodeARM64* slow_path =
+ new (codegen_->GetScopedAllocator()) LoadClassSlowPathARM64(check->GetLoadClass(), check);
codegen_->AddSlowPath(slow_path);
GenerateClassInitializationCheck(slow_path, InputRegisterAt(check, 0));
}
@@ -4734,9 +4265,18 @@ vixl::aarch64::Label* CodeGeneratorARM64::NewStringBssEntryPatch(
return NewPcRelativePatch(&dex_file, string_index.index_, adrp_label, &string_bss_entry_patches_);
}
-vixl::aarch64::Label* CodeGeneratorARM64::NewBakerReadBarrierPatch(uint32_t custom_data) {
- baker_read_barrier_patches_.emplace_back(custom_data);
- return &baker_read_barrier_patches_.back().label;
+void CodeGeneratorARM64::EmitBakerReadBarrierCbnz(uint32_t custom_data) {
+ DCHECK(!__ AllowMacroInstructions()); // In ExactAssemblyScope.
+ if (Runtime::Current()->UseJitCompilation()) {
+ auto it = jit_baker_read_barrier_slow_paths_.FindOrAdd(custom_data);
+ vixl::aarch64::Label* slow_path_entry = &it->second.label;
+ __ cbnz(mr, slow_path_entry);
+ } else {
+ baker_read_barrier_patches_.emplace_back(custom_data);
+ vixl::aarch64::Label* cbnz_label = &baker_read_barrier_patches_.back().label;
+ __ bind(cbnz_label);
+ __ cbnz(mr, static_cast<int64_t>(0)); // Placeholder, patched at link-time.
+ }
}
vixl::aarch64::Label* CodeGeneratorARM64::NewPcRelativePatch(
@@ -5053,13 +4593,7 @@ void LocationsBuilderARM64::VisitLoadClass(HLoadClass* cls) {
if (cls->GetLoadKind() == HLoadClass::LoadKind::kBssEntry) {
if (!kUseReadBarrier || kUseBakerReadBarrier) {
// Rely on the type resolution or initialization and marking to save everything we need.
- RegisterSet caller_saves = RegisterSet::Empty();
- InvokeRuntimeCallingConvention calling_convention;
- caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0).GetCode()));
- DCHECK_EQ(calling_convention.GetRegisterAt(0).GetCode(),
- RegisterFrom(calling_convention.GetReturnLocation(DataType::Type::kReference),
- DataType::Type::kReference).GetCode());
- locations->SetCustomSlowPathCallerSaves(caller_saves);
+ locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves());
} else {
// For non-Baker read barrier we have a temp-clobbering call.
}
@@ -5171,8 +4705,8 @@ void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) NO_THREAD_SA
bool do_clinit = cls->MustGenerateClinitCheck();
if (generate_null_check || do_clinit) {
DCHECK(cls->CanCallRuntime());
- SlowPathCodeARM64* slow_path = new (codegen_->GetScopedAllocator()) LoadClassSlowPathARM64(
- cls, cls, cls->GetDexPc(), do_clinit);
+ SlowPathCodeARM64* slow_path =
+ new (codegen_->GetScopedAllocator()) LoadClassSlowPathARM64(cls, cls);
codegen_->AddSlowPath(slow_path);
if (generate_null_check) {
__ Cbz(out, slow_path->GetEntryLabel());
@@ -5257,13 +4791,7 @@ void LocationsBuilderARM64::VisitLoadString(HLoadString* load) {
if (load->GetLoadKind() == HLoadString::LoadKind::kBssEntry) {
if (!kUseReadBarrier || kUseBakerReadBarrier) {
// Rely on the pResolveString and marking to save everything we need.
- RegisterSet caller_saves = RegisterSet::Empty();
- InvokeRuntimeCallingConvention calling_convention;
- caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0).GetCode()));
- DCHECK_EQ(calling_convention.GetRegisterAt(0).GetCode(),
- RegisterFrom(calling_convention.GetReturnLocation(DataType::Type::kReference),
- DataType::Type::kReference).GetCode());
- locations->SetCustomSlowPathCallerSaves(caller_saves);
+ locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves());
} else {
// For non-Baker read barrier we have a temp-clobbering call.
}
@@ -6255,76 +5783,39 @@ void CodeGeneratorARM64::GenerateGcRootFieldLoad(
if (kUseBakerReadBarrier) {
// Fast path implementation of art::ReadBarrier::BarrierForRoot when
// Baker's read barrier are used.
- if (kBakerReadBarrierLinkTimeThunksEnableForGcRoots &&
- !Runtime::Current()->UseJitCompilation()) {
- // Query `art::Thread::Current()->GetIsGcMarking()` (stored in
- // the Marking Register) to decide whether we need to enter
- // the slow path to mark the GC root.
- //
- // We use link-time generated thunks for the slow path. That thunk
- // checks the reference and jumps to the entrypoint if needed.
- //
- // lr = &return_address;
- // GcRoot<mirror::Object> root = *(obj+offset); // Original reference load.
- // if (mr) { // Thread::Current()->GetIsGcMarking()
- // goto gc_root_thunk<root_reg>(lr)
- // }
- // return_address:
-
- UseScratchRegisterScope temps(GetVIXLAssembler());
- DCHECK(temps.IsAvailable(ip0));
- DCHECK(temps.IsAvailable(ip1));
- temps.Exclude(ip0, ip1);
- uint32_t custom_data = EncodeBakerReadBarrierGcRootData(root_reg.GetCode());
- vixl::aarch64::Label* cbnz_label = NewBakerReadBarrierPatch(custom_data);
-
- EmissionCheckScope guard(GetVIXLAssembler(), 3 * vixl::aarch64::kInstructionSize);
- vixl::aarch64::Label return_address;
- __ adr(lr, &return_address);
- if (fixup_label != nullptr) {
- __ Bind(fixup_label);
- }
- static_assert(BAKER_MARK_INTROSPECTION_GC_ROOT_LDR_OFFSET == -8,
- "GC root LDR must be 2 instruction (8B) before the return address label.");
- __ ldr(root_reg, MemOperand(obj.X(), offset));
- __ Bind(cbnz_label);
- __ cbnz(mr, static_cast<int64_t>(0)); // Placeholder, patched at link-time.
- __ Bind(&return_address);
- } else {
- // Query `art::Thread::Current()->GetIsGcMarking()` (stored in
- // the Marking Register) to decide whether we need to enter
- // the slow path to mark the GC root.
- //
- // GcRoot<mirror::Object> root = *(obj+offset); // Original reference load.
- // if (mr) { // Thread::Current()->GetIsGcMarking()
- // // Slow path.
- // entrypoint = Thread::Current()->pReadBarrierMarkReg ## root.reg()
- // root = entrypoint(root); // root = ReadBarrier::Mark(root); // Entry point call.
- // }
-
- // Slow path marking the GC root `root`. The entrypoint will
- // be loaded by the slow path code.
- SlowPathCodeARM64* slow_path =
- new (GetScopedAllocator()) ReadBarrierMarkSlowPathARM64(instruction, root);
- AddSlowPath(slow_path);
-
- // /* GcRoot<mirror::Object> */ root = *(obj + offset)
- if (fixup_label == nullptr) {
- __ Ldr(root_reg, MemOperand(obj, offset));
- } else {
- EmitLdrOffsetPlaceholder(fixup_label, root_reg, obj);
- }
- static_assert(
- sizeof(mirror::CompressedReference<mirror::Object>) == sizeof(GcRoot<mirror::Object>),
- "art::mirror::CompressedReference<mirror::Object> and art::GcRoot<mirror::Object> "
- "have different sizes.");
- static_assert(sizeof(mirror::CompressedReference<mirror::Object>) == sizeof(int32_t),
- "art::mirror::CompressedReference<mirror::Object> and int32_t "
- "have different sizes.");
-
- __ Cbnz(mr, slow_path->GetEntryLabel());
- __ Bind(slow_path->GetExitLabel());
+
+ // Query `art::Thread::Current()->GetIsGcMarking()` (stored in
+ // the Marking Register) to decide whether we need to enter
+ // the slow path to mark the GC root.
+ //
+ // We use shared thunks for the slow path; shared within the method
+ // for JIT, across methods for AOT. That thunk checks the reference
+ // and jumps to the entrypoint if needed.
+ //
+ // lr = &return_address;
+ // GcRoot<mirror::Object> root = *(obj+offset); // Original reference load.
+ // if (mr) { // Thread::Current()->GetIsGcMarking()
+ // goto gc_root_thunk<root_reg>(lr)
+ // }
+ // return_address:
+
+ UseScratchRegisterScope temps(GetVIXLAssembler());
+ DCHECK(temps.IsAvailable(ip0));
+ DCHECK(temps.IsAvailable(ip1));
+ temps.Exclude(ip0, ip1);
+ uint32_t custom_data = EncodeBakerReadBarrierGcRootData(root_reg.GetCode());
+
+ ExactAssemblyScope guard(GetVIXLAssembler(), 3 * vixl::aarch64::kInstructionSize);
+ vixl::aarch64::Label return_address;
+ __ adr(lr, &return_address);
+ if (fixup_label != nullptr) {
+ __ bind(fixup_label);
}
+ static_assert(BAKER_MARK_INTROSPECTION_GC_ROOT_LDR_OFFSET == -8,
+ "GC root LDR must be 2 instructions (8B) before the return address label.");
+ __ ldr(root_reg, MemOperand(obj.X(), offset));
+ EmitBakerReadBarrierCbnz(custom_data);
+ __ bind(&return_address);
} else {
// GC root loaded through a slow path for read barriers other
// than Baker's.
@@ -6351,95 +5842,123 @@ void CodeGeneratorARM64::GenerateGcRootFieldLoad(
MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
}
+void CodeGeneratorARM64::GenerateUnsafeCasOldValueMovWithBakerReadBarrier(
+ vixl::aarch64::Register marked,
+ vixl::aarch64::Register old_value) {
+ DCHECK(kEmitCompilerReadBarrier);
+ DCHECK(kUseBakerReadBarrier);
+
+ // Similar to the Baker RB path in GenerateGcRootFieldLoad(), with a MOV instead of LDR.
+ uint32_t custom_data = EncodeBakerReadBarrierGcRootData(marked.GetCode());
+
+ ExactAssemblyScope guard(GetVIXLAssembler(), 3 * vixl::aarch64::kInstructionSize);
+ vixl::aarch64::Label return_address;
+ __ adr(lr, &return_address);
+ static_assert(BAKER_MARK_INTROSPECTION_GC_ROOT_LDR_OFFSET == -8,
+ "GC root LDR must be 2 instructions (8B) before the return address label.");
+ __ mov(marked, old_value);
+ EmitBakerReadBarrierCbnz(custom_data);
+ __ bind(&return_address);
+}
+
void CodeGeneratorARM64::GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction,
Location ref,
- Register obj,
- uint32_t offset,
- Location maybe_temp,
+ vixl::aarch64::Register obj,
+ const vixl::aarch64::MemOperand& src,
bool needs_null_check,
bool use_load_acquire) {
DCHECK(kEmitCompilerReadBarrier);
DCHECK(kUseBakerReadBarrier);
- if (kBakerReadBarrierLinkTimeThunksEnableForFields &&
- !use_load_acquire &&
- !Runtime::Current()->UseJitCompilation()) {
- // Query `art::Thread::Current()->GetIsGcMarking()` (stored in the
- // Marking Register) to decide whether we need to enter the slow
- // path to mark the reference. Then, in the slow path, check the
- // gray bit in the lock word of the reference's holder (`obj`) to
- // decide whether to mark `ref` or not.
- //
- // We use link-time generated thunks for the slow path. That thunk checks
- // the holder and jumps to the entrypoint if needed. If the holder is not
- // gray, it creates a fake dependency and returns to the LDR instruction.
- //
- // lr = &gray_return_address;
- // if (mr) { // Thread::Current()->GetIsGcMarking()
- // goto field_thunk<holder_reg, base_reg>(lr)
- // }
- // not_gray_return_address:
- // // Original reference load. If the offset is too large to fit
- // // into LDR, we use an adjusted base register here.
- // HeapReference<mirror::Object> reference = *(obj+offset);
- // gray_return_address:
-
- DCHECK_ALIGNED(offset, sizeof(mirror::HeapReference<mirror::Object>));
- Register base = obj;
- if (offset >= kReferenceLoadMinFarOffset) {
- DCHECK(maybe_temp.IsRegister());
- base = WRegisterFrom(maybe_temp);
- static_assert(IsPowerOfTwo(kReferenceLoadMinFarOffset), "Expecting a power of 2.");
- __ Add(base, obj, Operand(offset & ~(kReferenceLoadMinFarOffset - 1u)));
- offset &= (kReferenceLoadMinFarOffset - 1u);
- }
- UseScratchRegisterScope temps(GetVIXLAssembler());
- DCHECK(temps.IsAvailable(ip0));
- DCHECK(temps.IsAvailable(ip1));
- temps.Exclude(ip0, ip1);
- uint32_t custom_data = EncodeBakerReadBarrierFieldData(base.GetCode(), obj.GetCode());
- vixl::aarch64::Label* cbnz_label = NewBakerReadBarrierPatch(custom_data);
+ // Query `art::Thread::Current()->GetIsGcMarking()` (stored in the
+ // Marking Register) to decide whether we need to enter the slow
+ // path to mark the reference. Then, in the slow path, check the
+ // gray bit in the lock word of the reference's holder (`obj`) to
+ // decide whether to mark `ref` or not.
+ //
+ // We use shared thunks for the slow path; shared within the method
+ // for JIT, across methods for AOT. That thunk checks the holder
+ // and jumps to the entrypoint if needed. If the holder is not gray,
+ // it creates a fake dependency and returns to the LDR instruction.
+ //
+ // lr = &gray_return_address;
+ // if (mr) { // Thread::Current()->GetIsGcMarking()
+ // goto field_thunk<holder_reg, base_reg, use_load_acquire>(lr)
+ // }
+ // not_gray_return_address:
+ // // Original reference load. If the offset is too large to fit
+ // // into LDR, we use an adjusted base register here.
+ // HeapReference<mirror::Object> reference = *(obj+offset);
+ // gray_return_address:
- {
- EmissionCheckScope guard(GetVIXLAssembler(),
- (kPoisonHeapReferences ? 4u : 3u) * vixl::aarch64::kInstructionSize);
- vixl::aarch64::Label return_address;
- __ adr(lr, &return_address);
- __ Bind(cbnz_label);
- __ cbnz(mr, static_cast<int64_t>(0)); // Placeholder, patched at link-time.
- static_assert(BAKER_MARK_INTROSPECTION_FIELD_LDR_OFFSET == (kPoisonHeapReferences ? -8 : -4),
- "Field LDR must be 1 instruction (4B) before the return address label; "
- " 2 instructions (8B) for heap poisoning.");
- Register ref_reg = RegisterFrom(ref, DataType::Type::kReference);
- __ ldr(ref_reg, MemOperand(base.X(), offset));
- if (needs_null_check) {
- MaybeRecordImplicitNullCheck(instruction);
- }
- GetAssembler()->MaybeUnpoisonHeapReference(ref_reg);
- __ Bind(&return_address);
+ DCHECK(src.GetAddrMode() == vixl::aarch64::Offset);
+ DCHECK_ALIGNED(src.GetOffset(), sizeof(mirror::HeapReference<mirror::Object>));
+
+ UseScratchRegisterScope temps(GetVIXLAssembler());
+ DCHECK(temps.IsAvailable(ip0));
+ DCHECK(temps.IsAvailable(ip1));
+ temps.Exclude(ip0, ip1);
+ uint32_t custom_data = use_load_acquire
+ ? EncodeBakerReadBarrierAcquireData(src.GetBaseRegister().GetCode(), obj.GetCode())
+ : EncodeBakerReadBarrierFieldData(src.GetBaseRegister().GetCode(), obj.GetCode());
+
+ {
+ ExactAssemblyScope guard(GetVIXLAssembler(),
+ (kPoisonHeapReferences ? 4u : 3u) * vixl::aarch64::kInstructionSize);
+ vixl::aarch64::Label return_address;
+ __ adr(lr, &return_address);
+ EmitBakerReadBarrierCbnz(custom_data);
+ static_assert(BAKER_MARK_INTROSPECTION_FIELD_LDR_OFFSET == (kPoisonHeapReferences ? -8 : -4),
+ "Field LDR must be 1 instruction (4B) before the return address label; "
+ " 2 instructions (8B) for heap poisoning.");
+ Register ref_reg = RegisterFrom(ref, DataType::Type::kReference);
+ if (use_load_acquire) {
+ DCHECK_EQ(src.GetOffset(), 0);
+ __ ldar(ref_reg, src);
+ } else {
+ __ ldr(ref_reg, src);
}
- MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__, /* temp_loc */ LocationFrom(ip1));
- return;
+ if (needs_null_check) {
+ MaybeRecordImplicitNullCheck(instruction);
+ }
+ // Unpoison the reference explicitly if needed. MaybeUnpoisonHeapReference() uses
+ // macro instructions disallowed in ExactAssemblyScope.
+ if (kPoisonHeapReferences) {
+ __ neg(ref_reg, Operand(ref_reg));
+ }
+ __ bind(&return_address);
}
-
- // /* HeapReference<Object> */ ref = *(obj + offset)
- Register temp = WRegisterFrom(maybe_temp);
- Location no_index = Location::NoLocation();
- size_t no_scale_factor = 0u;
- GenerateReferenceLoadWithBakerReadBarrier(instruction,
- ref,
- obj,
- offset,
- no_index,
- no_scale_factor,
- temp,
- needs_null_check,
- use_load_acquire);
+ MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__, /* temp_loc */ LocationFrom(ip1));
}
-void CodeGeneratorARM64::GenerateArrayLoadWithBakerReadBarrier(HInstruction* instruction,
+void CodeGeneratorARM64::GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction,
Location ref,
Register obj,
+ uint32_t offset,
+ Location maybe_temp,
+ bool needs_null_check,
+ bool use_load_acquire) {
+ DCHECK_ALIGNED(offset, sizeof(mirror::HeapReference<mirror::Object>));
+ Register base = obj;
+ if (use_load_acquire) {
+ DCHECK(maybe_temp.IsRegister());
+ base = WRegisterFrom(maybe_temp);
+ __ Add(base, obj, offset);
+ offset = 0u;
+ } else if (offset >= kReferenceLoadMinFarOffset) {
+ DCHECK(maybe_temp.IsRegister());
+ base = WRegisterFrom(maybe_temp);
+ static_assert(IsPowerOfTwo(kReferenceLoadMinFarOffset), "Expecting a power of 2.");
+ __ Add(base, obj, Operand(offset & ~(kReferenceLoadMinFarOffset - 1u)));
+ offset &= (kReferenceLoadMinFarOffset - 1u);
+ }
+ MemOperand src(base.X(), offset);
+ GenerateFieldLoadWithBakerReadBarrier(
+ instruction, ref, obj, src, needs_null_check, use_load_acquire);
+}
+
+void CodeGeneratorARM64::GenerateArrayLoadWithBakerReadBarrier(Location ref,
+ Register obj,
uint32_t data_offset,
Location index,
Register temp,
@@ -6452,266 +5971,57 @@ void CodeGeneratorARM64::GenerateArrayLoadWithBakerReadBarrier(HInstruction* ins
"art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes.");
size_t scale_factor = DataType::SizeShift(DataType::Type::kReference);
- if (kBakerReadBarrierLinkTimeThunksEnableForArrays &&
- !Runtime::Current()->UseJitCompilation()) {
- // Query `art::Thread::Current()->GetIsGcMarking()` (stored in the
- // Marking Register) to decide whether we need to enter the slow
- // path to mark the reference. Then, in the slow path, check the
- // gray bit in the lock word of the reference's holder (`obj`) to
- // decide whether to mark `ref` or not.
- //
- // We use link-time generated thunks for the slow path. That thunk checks
- // the holder and jumps to the entrypoint if needed. If the holder is not
- // gray, it creates a fake dependency and returns to the LDR instruction.
- //
- // lr = &gray_return_address;
- // if (mr) { // Thread::Current()->GetIsGcMarking()
- // goto array_thunk<base_reg>(lr)
- // }
- // not_gray_return_address:
- // // Original reference load. If the offset is too large to fit
- // // into LDR, we use an adjusted base register here.
- // HeapReference<mirror::Object> reference = data[index];
- // gray_return_address:
-
- DCHECK(index.IsValid());
- Register index_reg = RegisterFrom(index, DataType::Type::kInt32);
- Register ref_reg = RegisterFrom(ref, DataType::Type::kReference);
-
- UseScratchRegisterScope temps(GetVIXLAssembler());
- DCHECK(temps.IsAvailable(ip0));
- DCHECK(temps.IsAvailable(ip1));
- temps.Exclude(ip0, ip1);
- uint32_t custom_data = EncodeBakerReadBarrierArrayData(temp.GetCode());
- vixl::aarch64::Label* cbnz_label = NewBakerReadBarrierPatch(custom_data);
-
- __ Add(temp.X(), obj.X(), Operand(data_offset));
- {
- EmissionCheckScope guard(GetVIXLAssembler(),
- (kPoisonHeapReferences ? 4u : 3u) * vixl::aarch64::kInstructionSize);
- vixl::aarch64::Label return_address;
- __ adr(lr, &return_address);
- __ Bind(cbnz_label);
- __ cbnz(mr, static_cast<int64_t>(0)); // Placeholder, patched at link-time.
- static_assert(BAKER_MARK_INTROSPECTION_ARRAY_LDR_OFFSET == (kPoisonHeapReferences ? -8 : -4),
- "Array LDR must be 1 instruction (4B) before the return address label; "
- " 2 instructions (8B) for heap poisoning.");
- __ ldr(ref_reg, MemOperand(temp.X(), index_reg.X(), LSL, scale_factor));
- DCHECK(!needs_null_check); // The thunk cannot handle the null check.
- GetAssembler()->MaybeUnpoisonHeapReference(ref_reg);
- __ Bind(&return_address);
- }
- MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__, /* temp_loc */ LocationFrom(ip1));
- return;
- }
-
- // Array cells are never volatile variables, therefore array loads
- // never use Load-Acquire instructions on ARM64.
- const bool use_load_acquire = false;
-
- // /* HeapReference<Object> */ ref =
- // *(obj + data_offset + index * sizeof(HeapReference<Object>))
- GenerateReferenceLoadWithBakerReadBarrier(instruction,
- ref,
- obj,
- data_offset,
- index,
- scale_factor,
- temp,
- needs_null_check,
- use_load_acquire);
-}
-
-void CodeGeneratorARM64::GenerateReferenceLoadWithBakerReadBarrier(HInstruction* instruction,
- Location ref,
- Register obj,
- uint32_t offset,
- Location index,
- size_t scale_factor,
- Register temp,
- bool needs_null_check,
- bool use_load_acquire) {
- DCHECK(kEmitCompilerReadBarrier);
- DCHECK(kUseBakerReadBarrier);
- // If we are emitting an array load, we should not be using a
- // Load Acquire instruction. In other words:
- // `instruction->IsArrayGet()` => `!use_load_acquire`.
- DCHECK(!instruction->IsArrayGet() || !use_load_acquire);
-
// Query `art::Thread::Current()->GetIsGcMarking()` (stored in the
// Marking Register) to decide whether we need to enter the slow
// path to mark the reference. Then, in the slow path, check the
// gray bit in the lock word of the reference's holder (`obj`) to
// decide whether to mark `ref` or not.
//
- // if (mr) { // Thread::Current()->GetIsGcMarking()
- // // Slow path.
- // uint32_t rb_state = Lockword(obj->monitor_).ReadBarrierState();
- // lfence; // Load fence or artificial data dependency to prevent load-load reordering
- // HeapReference<mirror::Object> ref = *src; // Original reference load.
- // bool is_gray = (rb_state == ReadBarrier::GrayState());
- // if (is_gray) {
- // entrypoint = Thread::Current()->pReadBarrierMarkReg ## root.reg()
- // ref = entrypoint(ref); // ref = ReadBarrier::Mark(ref); // Runtime entry point call.
- // }
- // } else {
- // HeapReference<mirror::Object> ref = *src; // Original reference load.
- // }
-
- // Slow path marking the object `ref` when the GC is marking. The
- // entrypoint will be loaded by the slow path code.
- SlowPathCodeARM64* slow_path =
- new (GetScopedAllocator()) LoadReferenceWithBakerReadBarrierSlowPathARM64(
- instruction,
- ref,
- obj,
- offset,
- index,
- scale_factor,
- needs_null_check,
- use_load_acquire,
- temp);
- AddSlowPath(slow_path);
-
- __ Cbnz(mr, slow_path->GetEntryLabel());
- // Fast path: the GC is not marking: just load the reference.
- GenerateRawReferenceLoad(
- instruction, ref, obj, offset, index, scale_factor, needs_null_check, use_load_acquire);
- __ Bind(slow_path->GetExitLabel());
- MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
-}
-
-void CodeGeneratorARM64::UpdateReferenceFieldWithBakerReadBarrier(HInstruction* instruction,
- Location ref,
- Register obj,
- Location field_offset,
- Register temp,
- bool needs_null_check,
- bool use_load_acquire) {
- DCHECK(kEmitCompilerReadBarrier);
- DCHECK(kUseBakerReadBarrier);
- // If we are emitting an array load, we should not be using a
- // Load Acquire instruction. In other words:
- // `instruction->IsArrayGet()` => `!use_load_acquire`.
- DCHECK(!instruction->IsArrayGet() || !use_load_acquire);
-
- // Query `art::Thread::Current()->GetIsGcMarking()` (stored in the
- // Marking Register) to decide whether we need to enter the slow
- // path to update the reference field within `obj`. Then, in the
- // slow path, check the gray bit in the lock word of the reference's
- // holder (`obj`) to decide whether to mark `ref` and update the
- // field or not.
+ // We use shared thunks for the slow path; shared within the method
+ // for JIT, across methods for AOT. That thunk checks the holder
+ // and jumps to the entrypoint if needed. If the holder is not gray,
+ // it creates a fake dependency and returns to the LDR instruction.
//
- // if (mr) { // Thread::Current()->GetIsGcMarking()
- // // Slow path.
- // uint32_t rb_state = Lockword(obj->monitor_).ReadBarrierState();
- // lfence; // Load fence or artificial data dependency to prevent load-load reordering
- // HeapReference<mirror::Object> ref = *(obj + field_offset); // Reference load.
- // bool is_gray = (rb_state == ReadBarrier::GrayState());
- // if (is_gray) {
- // old_ref = ref;
- // entrypoint = Thread::Current()->pReadBarrierMarkReg ## root.reg()
- // ref = entrypoint(ref); // ref = ReadBarrier::Mark(ref); // Runtime entry point call.
- // compareAndSwapObject(obj, field_offset, old_ref, ref);
+ // lr = &gray_return_address;
+ // if (mr) { // Thread::Current()->GetIsGcMarking()
+ // goto array_thunk<base_reg>(lr)
// }
- // }
-
- // Slow path updating the object reference at address `obj + field_offset`
- // when the GC is marking. The entrypoint will be loaded by the slow path code.
- SlowPathCodeARM64* slow_path =
- new (GetScopedAllocator()) LoadReferenceWithBakerReadBarrierAndUpdateFieldSlowPathARM64(
- instruction,
- ref,
- obj,
- /* offset */ 0u,
- /* index */ field_offset,
- /* scale_factor */ 0u /* "times 1" */,
- needs_null_check,
- use_load_acquire,
- temp);
- AddSlowPath(slow_path);
-
- __ Cbnz(mr, slow_path->GetEntryLabel());
- // Fast path: the GC is not marking: nothing to do (the field is
- // up-to-date, and we don't need to load the reference).
- __ Bind(slow_path->GetExitLabel());
- MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
-}
+ // not_gray_return_address:
+ // // Original reference load. If the offset is too large to fit
+ // // into LDR, we use an adjusted base register here.
+ // HeapReference<mirror::Object> reference = data[index];
+ // gray_return_address:
-void CodeGeneratorARM64::GenerateRawReferenceLoad(HInstruction* instruction,
- Location ref,
- Register obj,
- uint32_t offset,
- Location index,
- size_t scale_factor,
- bool needs_null_check,
- bool use_load_acquire) {
- DCHECK(obj.IsW());
- DataType::Type type = DataType::Type::kReference;
- Register ref_reg = RegisterFrom(ref, type);
+ DCHECK(index.IsValid());
+ Register index_reg = RegisterFrom(index, DataType::Type::kInt32);
+ Register ref_reg = RegisterFrom(ref, DataType::Type::kReference);
- // If needed, vixl::EmissionCheckScope guards are used to ensure
- // that no pools are emitted between the load (macro) instruction
- // and MaybeRecordImplicitNullCheck.
+ UseScratchRegisterScope temps(GetVIXLAssembler());
+ DCHECK(temps.IsAvailable(ip0));
+ DCHECK(temps.IsAvailable(ip1));
+ temps.Exclude(ip0, ip1);
+ uint32_t custom_data = EncodeBakerReadBarrierArrayData(temp.GetCode());
- if (index.IsValid()) {
- // Load types involving an "index": ArrayGet,
- // UnsafeGetObject/UnsafeGetObjectVolatile and UnsafeCASObject
- // intrinsics.
- if (use_load_acquire) {
- // UnsafeGetObjectVolatile intrinsic case.
- // Register `index` is not an index in an object array, but an
- // offset to an object reference field within object `obj`.
- DCHECK(instruction->IsInvoke()) << instruction->DebugName();
- DCHECK(instruction->GetLocations()->Intrinsified());
- DCHECK(instruction->AsInvoke()->GetIntrinsic() == Intrinsics::kUnsafeGetObjectVolatile)
- << instruction->AsInvoke()->GetIntrinsic();
- DCHECK_EQ(offset, 0u);
- DCHECK_EQ(scale_factor, 0u);
- DCHECK_EQ(needs_null_check, false);
- // /* HeapReference<mirror::Object> */ ref = *(obj + index)
- MemOperand field = HeapOperand(obj, XRegisterFrom(index));
- LoadAcquire(instruction, ref_reg, field, /* needs_null_check */ false);
- } else {
- // ArrayGet and UnsafeGetObject and UnsafeCASObject intrinsics cases.
- // /* HeapReference<mirror::Object> */ ref = *(obj + offset + (index << scale_factor))
- if (index.IsConstant()) {
- uint32_t computed_offset = offset + (Int64FromLocation(index) << scale_factor);
- EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes);
- Load(type, ref_reg, HeapOperand(obj, computed_offset));
- if (needs_null_check) {
- MaybeRecordImplicitNullCheck(instruction);
- }
- } else {
- UseScratchRegisterScope temps(GetVIXLAssembler());
- Register temp = temps.AcquireW();
- __ Add(temp, obj, offset);
- {
- EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes);
- Load(type, ref_reg, HeapOperand(temp, XRegisterFrom(index), LSL, scale_factor));
- if (needs_null_check) {
- MaybeRecordImplicitNullCheck(instruction);
- }
- }
- }
- }
- } else {
- // /* HeapReference<mirror::Object> */ ref = *(obj + offset)
- MemOperand field = HeapOperand(obj, offset);
- if (use_load_acquire) {
- // Implicit null checks are handled by CodeGeneratorARM64::LoadAcquire.
- LoadAcquire(instruction, ref_reg, field, needs_null_check);
- } else {
- EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes);
- Load(type, ref_reg, field);
- if (needs_null_check) {
- MaybeRecordImplicitNullCheck(instruction);
- }
+ __ Add(temp.X(), obj.X(), Operand(data_offset));
+ {
+ ExactAssemblyScope guard(GetVIXLAssembler(),
+ (kPoisonHeapReferences ? 4u : 3u) * vixl::aarch64::kInstructionSize);
+ vixl::aarch64::Label return_address;
+ __ adr(lr, &return_address);
+ EmitBakerReadBarrierCbnz(custom_data);
+ static_assert(BAKER_MARK_INTROSPECTION_ARRAY_LDR_OFFSET == (kPoisonHeapReferences ? -8 : -4),
+ "Array LDR must be 1 instruction (4B) before the return address label; "
+ " 2 instructions (8B) for heap poisoning.");
+ __ ldr(ref_reg, MemOperand(temp.X(), index_reg.X(), LSL, scale_factor));
+ DCHECK(!needs_null_check); // The thunk cannot handle the null check.
+ // Unpoison the reference explicitly if needed. MaybeUnpoisonHeapReference() uses
+ // macro instructions disallowed in ExactAssemblyScope.
+ if (kPoisonHeapReferences) {
+ __ neg(ref_reg, Operand(ref_reg));
}
+ __ bind(&return_address);
}
-
- // Object* ref = ref_addr->AsMirrorPtr()
- GetAssembler()->MaybeUnpoisonHeapReference(ref_reg);
+ MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__, /* temp_loc */ LocationFrom(ip1));
}
void CodeGeneratorARM64::MaybeGenerateMarkingRegisterCheck(int code, Location temp_loc) {
@@ -6890,7 +6200,8 @@ void CodeGeneratorARM64::CompileBakerReadBarrierThunk(Arm64Assembler& assembler,
/*out*/ std::string* debug_name) {
BakerReadBarrierKind kind = BakerReadBarrierKindField::Decode(encoded_data);
switch (kind) {
- case BakerReadBarrierKind::kField: {
+ case BakerReadBarrierKind::kField:
+ case BakerReadBarrierKind::kAcquire: {
auto base_reg =
Register::GetXRegFromCode(BakerReadBarrierFirstRegField::Decode(encoded_data));
CheckValidReg(base_reg.GetCode());
@@ -6916,11 +6227,18 @@ void CodeGeneratorARM64::CompileBakerReadBarrierThunk(Arm64Assembler& assembler,
MemOperand lock_word(holder_reg, mirror::Object::MonitorOffset().Int32Value());
EmitGrayCheckAndFastPath(assembler, base_reg, lock_word, &slow_path, throw_npe);
__ Bind(&slow_path);
- MemOperand ldr_address(lr, BAKER_MARK_INTROSPECTION_FIELD_LDR_OFFSET);
- __ Ldr(ip0.W(), ldr_address); // Load the LDR (immediate) unsigned offset.
- LoadReadBarrierMarkIntrospectionEntrypoint(assembler, ip1);
- __ Ubfx(ip0.W(), ip0.W(), 10, 12); // Extract the offset.
- __ Ldr(ip0.W(), MemOperand(base_reg, ip0, LSL, 2)); // Load the reference.
+ if (kind == BakerReadBarrierKind::kField) {
+ MemOperand ldr_address(lr, BAKER_MARK_INTROSPECTION_FIELD_LDR_OFFSET);
+ __ Ldr(ip0.W(), ldr_address); // Load the LDR (immediate) unsigned offset.
+ LoadReadBarrierMarkIntrospectionEntrypoint(assembler, ip1);
+ __ Ubfx(ip0.W(), ip0.W(), 10, 12); // Extract the offset.
+ __ Ldr(ip0.W(), MemOperand(base_reg, ip0, LSL, 2)); // Load the reference.
+ } else {
+ DCHECK(kind == BakerReadBarrierKind::kAcquire);
+ DCHECK(!base_reg.Is(holder_reg));
+ LoadReadBarrierMarkIntrospectionEntrypoint(assembler, ip1);
+ __ Ldar(ip0.W(), MemOperand(base_reg));
+ }
// Do not unpoison. With heap poisoning enabled, the entrypoint expects a poisoned reference.
__ Br(ip1); // Jump to the entrypoint.
break;
@@ -6988,7 +6306,12 @@ void CodeGeneratorARM64::CompileBakerReadBarrierThunk(Arm64Assembler& assembler,
UNREACHABLE();
}
- if (GetCompilerOptions().GenerateAnyDebugInfo()) {
+ // For JIT, the slow path is considered part of the compiled method,
+ // so JIT should pass null as `debug_name`. Tests may not have a runtime.
+ DCHECK(Runtime::Current() == nullptr ||
+ !Runtime::Current()->UseJitCompilation() ||
+ debug_name == nullptr);
+ if (debug_name != nullptr && GetCompilerOptions().GenerateAnyDebugInfo()) {
std::ostringstream oss;
oss << "BakerReadBarrierThunk";
switch (kind) {
@@ -6996,6 +6319,10 @@ void CodeGeneratorARM64::CompileBakerReadBarrierThunk(Arm64Assembler& assembler,
oss << "Field_r" << BakerReadBarrierFirstRegField::Decode(encoded_data)
<< "_r" << BakerReadBarrierSecondRegField::Decode(encoded_data);
break;
+ case BakerReadBarrierKind::kAcquire:
+ oss << "Acquire_r" << BakerReadBarrierFirstRegField::Decode(encoded_data)
+ << "_r" << BakerReadBarrierSecondRegField::Decode(encoded_data);
+ break;
case BakerReadBarrierKind::kArray:
oss << "Array_r" << BakerReadBarrierFirstRegField::Decode(encoded_data);
DCHECK_EQ(kBakerReadBarrierInvalidEncodedReg,
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 93bab3180c..4f6a44fe4d 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -92,6 +92,16 @@ const vixl::aarch64::CPURegList runtime_reserved_core_registers =
((kEmitCompilerReadBarrier && kUseBakerReadBarrier) ? mr : vixl::aarch64::NoCPUReg),
vixl::aarch64::lr);
+// Some instructions have special requirements for a temporary, for example
+// LoadClass/kBssEntry and LoadString/kBssEntry for Baker read barrier require
+// temp that's not an R0 (to avoid an extra move) and Baker read barrier field
+// loads with large offsets need a fixed register to limit the number of link-time
+// thunks we generate. For these and similar cases, we want to reserve a specific
+// register that's neither callee-save nor an argument register. We choose x15.
+inline Location FixedTempLocation() {
+ return Location::RegisterLocation(vixl::aarch64::x15.GetCode());
+}
+
// Callee-save registers AAPCS64, without x19 (Thread Register) (nor
// x20 (Marking Register) when emitting Baker read barriers).
const vixl::aarch64::CPURegList callee_saved_core_registers(
@@ -619,9 +629,9 @@ class CodeGeneratorARM64 : public CodeGenerator {
dex::StringIndex string_index,
vixl::aarch64::Label* adrp_label = nullptr);
- // Add a new baker read barrier patch and return the label to be bound
- // before the CBNZ instruction.
- vixl::aarch64::Label* NewBakerReadBarrierPatch(uint32_t custom_data);
+ // Emit the CBNZ instruction for baker read barrier and record
+ // the associated patch for AOT or slow path for JIT.
+ void EmitBakerReadBarrierCbnz(uint32_t custom_data);
vixl::aarch64::Literal<uint32_t>* DeduplicateBootImageAddressLiteral(uint64_t address);
vixl::aarch64::Literal<uint32_t>* DeduplicateJitStringLiteral(const DexFile& dex_file,
@@ -661,6 +671,18 @@ class CodeGeneratorARM64 : public CodeGenerator {
uint32_t offset,
vixl::aarch64::Label* fixup_label,
ReadBarrierOption read_barrier_option);
+ // Generate MOV for the `old_value` in UnsafeCASObject and mark it with Baker read barrier.
+ void GenerateUnsafeCasOldValueMovWithBakerReadBarrier(vixl::aarch64::Register marked,
+ vixl::aarch64::Register old_value);
+ // Fast path implementation of ReadBarrier::Barrier for a heap
+ // reference field load when Baker's read barriers are used.
+ // Overload suitable for Unsafe.getObject/-Volatile() intrinsic.
+ void GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction,
+ Location ref,
+ vixl::aarch64::Register obj,
+ const vixl::aarch64::MemOperand& src,
+ bool needs_null_check,
+ bool use_load_acquire);
// Fast path implementation of ReadBarrier::Barrier for a heap
// reference field load when Baker's read barriers are used.
void GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction,
@@ -672,58 +694,12 @@ class CodeGeneratorARM64 : public CodeGenerator {
bool use_load_acquire);
// Fast path implementation of ReadBarrier::Barrier for a heap
// reference array load when Baker's read barriers are used.
- void GenerateArrayLoadWithBakerReadBarrier(HInstruction* instruction,
- Location ref,
+ void GenerateArrayLoadWithBakerReadBarrier(Location ref,
vixl::aarch64::Register obj,
uint32_t data_offset,
Location index,
vixl::aarch64::Register temp,
bool needs_null_check);
- // Factored implementation, used by GenerateFieldLoadWithBakerReadBarrier,
- // GenerateArrayLoadWithBakerReadBarrier and some intrinsics.
- //
- // Load the object reference located at the address
- // `obj + offset + (index << scale_factor)`, held by object `obj`, into
- // `ref`, and mark it if needed.
- void GenerateReferenceLoadWithBakerReadBarrier(HInstruction* instruction,
- Location ref,
- vixl::aarch64::Register obj,
- uint32_t offset,
- Location index,
- size_t scale_factor,
- vixl::aarch64::Register temp,
- bool needs_null_check,
- bool use_load_acquire);
-
- // Generate code checking whether the the reference field at the
- // address `obj + field_offset`, held by object `obj`, needs to be
- // marked, and if so, marking it and updating the field within `obj`
- // with the marked value.
- //
- // This routine is used for the implementation of the
- // UnsafeCASObject intrinsic with Baker read barriers.
- //
- // This method has a structure similar to
- // GenerateReferenceLoadWithBakerReadBarrier, but note that argument
- // `ref` is only as a temporary here, and thus its value should not
- // be used afterwards.
- void UpdateReferenceFieldWithBakerReadBarrier(HInstruction* instruction,
- Location ref,
- vixl::aarch64::Register obj,
- Location field_offset,
- vixl::aarch64::Register temp,
- bool needs_null_check,
- bool use_load_acquire);
-
- // Generate a heap reference load (with no read barrier).
- void GenerateRawReferenceLoad(HInstruction* instruction,
- Location ref,
- vixl::aarch64::Register obj,
- uint32_t offset,
- Location index,
- size_t scale_factor,
- bool needs_null_check,
- bool use_load_acquire);
// Emit code checking the status of the Marking Register, and
// aborting the program if MR does not match the value stored in the
@@ -798,9 +774,10 @@ class CodeGeneratorARM64 : public CodeGenerator {
// Encoding of thunk type and data for link-time generated thunks for Baker read barriers.
enum class BakerReadBarrierKind : uint8_t {
- kField, // Field get or array get with constant offset (i.e. constant index).
- kArray, // Array get with index in register.
- kGcRoot, // GC root load.
+ kField, // Field get or array get with constant offset (i.e. constant index).
+ kAcquire, // Volatile field get.
+ kArray, // Array get with index in register.
+ kGcRoot, // GC root load.
kLast = kGcRoot
};
@@ -833,6 +810,15 @@ class CodeGeneratorARM64 : public CodeGenerator {
BakerReadBarrierSecondRegField::Encode(holder_reg);
}
+ static inline uint32_t EncodeBakerReadBarrierAcquireData(uint32_t base_reg, uint32_t holder_reg) {
+ CheckValidReg(base_reg);
+ CheckValidReg(holder_reg);
+ DCHECK_NE(base_reg, holder_reg);
+ return BakerReadBarrierKindField::Encode(BakerReadBarrierKind::kAcquire) |
+ BakerReadBarrierFirstRegField::Encode(base_reg) |
+ BakerReadBarrierSecondRegField::Encode(holder_reg);
+ }
+
static inline uint32_t EncodeBakerReadBarrierArrayData(uint32_t base_reg) {
CheckValidReg(base_reg);
return BakerReadBarrierKindField::Encode(BakerReadBarrierKind::kArray) |
@@ -928,6 +914,19 @@ class CodeGeneratorARM64 : public CodeGenerator {
// Patches for class literals in JIT compiled code.
TypeToLiteralMap jit_class_patches_;
+ // Baker read barrier slow paths, mapping custom data (uint32_t) to label.
+ // Wrap the label to work around vixl::aarch64::Label being non-copyable
+ // and non-moveable and as such unusable in ArenaSafeMap<>.
+ struct LabelWrapper {
+ LabelWrapper(const LabelWrapper& src)
+ : label() {
+ DCHECK(!src.label.IsLinked() && !src.label.IsBound());
+ }
+ LabelWrapper() = default;
+ vixl::aarch64::Label label;
+ };
+ ArenaSafeMap<uint32_t, LabelWrapper> jit_baker_read_barrier_slow_paths_;
+
friend class linker::Arm64RelativePatcherTest;
DISALLOW_COPY_AND_ASSIGN(CodeGeneratorARM64);
};
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 6d6d1a2aa9..f62421645e 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -85,15 +85,10 @@ static constexpr uint32_t kPackedSwitchCompareJumpThreshold = 7;
// Reference load (except object array loads) is using LDR Rt, [Rn, #offset] which can handle
// offset < 4KiB. For offsets >= 4KiB, the load shall be emitted as two or more instructions.
-// For the Baker read barrier implementation using link-generated thunks we need to split
+// For the Baker read barrier implementation using link-time generated thunks we need to split
// the offset explicitly.
constexpr uint32_t kReferenceLoadMinFarOffset = 4 * KB;
-// Flags controlling the use of link-time generated thunks for Baker read barriers.
-constexpr bool kBakerReadBarrierLinkTimeThunksEnableForFields = true;
-constexpr bool kBakerReadBarrierLinkTimeThunksEnableForArrays = true;
-constexpr bool kBakerReadBarrierLinkTimeThunksEnableForGcRoots = true;
-
// Using a base helps identify when we hit Marking Register check breakpoints.
constexpr int kMarkingRegisterCheckBreakCodeBaseCode = 0x10;
@@ -108,14 +103,6 @@ constexpr int kMarkingRegisterCheckBreakCodeBaseCode = 0x10;
// Marker that code is yet to be, and must, be implemented.
#define TODO_VIXL32(level) LOG(level) << __PRETTY_FUNCTION__ << " unimplemented "
-static inline void EmitPlaceholderBne(CodeGeneratorARMVIXL* codegen, vixl32::Label* patch_label) {
- ExactAssemblyScope eas(codegen->GetVIXLAssembler(), kMaxInstructionSizeInBytes);
- __ bind(patch_label);
- vixl32::Label placeholder_label;
- __ b(ne, EncodingSize(Wide), &placeholder_label); // Placeholder, patched at link-time.
- __ bind(&placeholder_label);
-}
-
static inline bool CanEmitNarrowLdr(vixl32::Register rt, vixl32::Register rn, uint32_t offset) {
return rt.IsLow() && rn.IsLow() && offset < 32u;
}
@@ -150,6 +137,15 @@ class EmitAdrCode {
int32_t adr_location_;
};
+static RegisterSet OneRegInReferenceOutSaveEverythingCallerSaves() {
+ InvokeRuntimeCallingConventionARMVIXL calling_convention;
+ RegisterSet caller_saves = RegisterSet::Empty();
+ caller_saves.Add(LocationFrom(calling_convention.GetRegisterAt(0)));
+ // TODO: Add GetReturnLocation() to the calling convention so that we can DCHECK()
+ // that the the kPrimNot result register is the same as the first argument register.
+ return caller_saves;
+}
+
// SaveLiveRegisters and RestoreLiveRegisters from SlowPathCodeARM operate on sets of S registers,
// for each live D registers they treat two corresponding S registers as live ones.
//
@@ -509,29 +505,39 @@ class BoundsCheckSlowPathARMVIXL : public SlowPathCodeARMVIXL {
class LoadClassSlowPathARMVIXL : public SlowPathCodeARMVIXL {
public:
- LoadClassSlowPathARMVIXL(HLoadClass* cls, HInstruction* at, uint32_t dex_pc, bool do_clinit)
- : SlowPathCodeARMVIXL(at), cls_(cls), dex_pc_(dex_pc), do_clinit_(do_clinit) {
+ LoadClassSlowPathARMVIXL(HLoadClass* cls, HInstruction* at)
+ : SlowPathCodeARMVIXL(at), cls_(cls) {
DCHECK(at->IsLoadClass() || at->IsClinitCheck());
+ DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
Location out = locations->Out();
+ const uint32_t dex_pc = instruction_->GetDexPc();
+ bool must_resolve_type = instruction_->IsLoadClass() && cls_->MustResolveTypeOnSlowPath();
+ bool must_do_clinit = instruction_->IsClinitCheck() || cls_->MustGenerateClinitCheck();
CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen);
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
InvokeRuntimeCallingConventionARMVIXL calling_convention;
- dex::TypeIndex type_index = cls_->GetTypeIndex();
- __ Mov(calling_convention.GetRegisterAt(0), type_index.index_);
- QuickEntrypointEnum entrypoint = do_clinit_ ? kQuickInitializeStaticStorage
- : kQuickInitializeType;
- arm_codegen->InvokeRuntime(entrypoint, instruction_, dex_pc_, this);
- if (do_clinit_) {
- CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, uint32_t>();
+ if (must_resolve_type) {
+ DCHECK(IsSameDexFile(cls_->GetDexFile(), arm_codegen->GetGraph()->GetDexFile()));
+ dex::TypeIndex type_index = cls_->GetTypeIndex();
+ __ Mov(calling_convention.GetRegisterAt(0), type_index.index_);
+ arm_codegen->InvokeRuntime(kQuickResolveType, instruction_, dex_pc, this);
+ CheckEntrypointTypes<kQuickResolveType, void*, uint32_t>();
+ // If we also must_do_clinit, the resolved type is now in the correct register.
} else {
- CheckEntrypointTypes<kQuickInitializeType, void*, uint32_t>();
+ DCHECK(must_do_clinit);
+ Location source = instruction_->IsLoadClass() ? out : locations->InAt(0);
+ arm_codegen->Move32(LocationFrom(calling_convention.GetRegisterAt(0)), source);
+ }
+ if (must_do_clinit) {
+ arm_codegen->InvokeRuntime(kQuickInitializeStaticStorage, instruction_, dex_pc, this);
+ CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, mirror::Class*>();
}
// Move the class to the desired location.
@@ -549,12 +555,6 @@ class LoadClassSlowPathARMVIXL : public SlowPathCodeARMVIXL {
// The class this slow path will load.
HLoadClass* const cls_;
- // The dex PC of `at_`.
- const uint32_t dex_pc_;
-
- // Whether to initialize the class.
- const bool do_clinit_;
-
DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathARMVIXL);
};
@@ -783,207 +783,12 @@ class ReadBarrierMarkSlowPathBaseARMVIXL : public SlowPathCodeARMVIXL {
DISALLOW_COPY_AND_ASSIGN(ReadBarrierMarkSlowPathBaseARMVIXL);
};
-// Slow path marking an object reference `ref` during a read
-// barrier. The field `obj.field` in the object `obj` holding this
-// reference does not get updated by this slow path after marking.
-//
-// This means that after the execution of this slow path, `ref` will
-// always be up-to-date, but `obj.field` may not; i.e., after the
-// flip, `ref` will be a to-space reference, but `obj.field` will
-// probably still be a from-space reference (unless it gets updated by
-// another thread, or if another thread installed another object
-// reference (different from `ref`) in `obj.field`).
-//
-// Argument `entrypoint` must be a register location holding the read
-// barrier marking runtime entry point to be invoked or an empty
-// location; in the latter case, the read barrier marking runtime
-// entry point will be loaded by the slow path code itself.
-class ReadBarrierMarkSlowPathARMVIXL : public ReadBarrierMarkSlowPathBaseARMVIXL {
- public:
- ReadBarrierMarkSlowPathARMVIXL(HInstruction* instruction,
- Location ref,
- Location entrypoint = Location::NoLocation())
- : ReadBarrierMarkSlowPathBaseARMVIXL(instruction, ref, entrypoint) {
- DCHECK(kEmitCompilerReadBarrier);
- }
-
- const char* GetDescription() const OVERRIDE { return "ReadBarrierMarkSlowPathARMVIXL"; }
-
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
- LocationSummary* locations = instruction_->GetLocations();
- DCHECK(locations->CanCall());
- DCHECK(ref_.IsRegister()) << ref_;
- DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(ref_.reg())) << ref_.reg();
- DCHECK(instruction_->IsLoadClass() || instruction_->IsLoadString())
- << "Unexpected instruction in read barrier marking slow path: "
- << instruction_->DebugName();
-
- __ Bind(GetEntryLabel());
- GenerateReadBarrierMarkRuntimeCall(codegen);
- __ B(GetExitLabel());
- }
-
- private:
- DISALLOW_COPY_AND_ASSIGN(ReadBarrierMarkSlowPathARMVIXL);
-};
-
-// Slow path loading `obj`'s lock word, loading a reference from
-// object `*(obj + offset + (index << scale_factor))` into `ref`, and
-// marking `ref` if `obj` is gray according to the lock word (Baker
-// read barrier). The field `obj.field` in the object `obj` holding
-// this reference does not get updated by this slow path after marking
-// (see LoadReferenceWithBakerReadBarrierAndUpdateFieldSlowPathARMVIXL
-// below for that).
-//
-// This means that after the execution of this slow path, `ref` will
-// always be up-to-date, but `obj.field` may not; i.e., after the
-// flip, `ref` will be a to-space reference, but `obj.field` will
-// probably still be a from-space reference (unless it gets updated by
-// another thread, or if another thread installed another object
-// reference (different from `ref`) in `obj.field`).
-//
-// Argument `entrypoint` must be a register location holding the read
-// barrier marking runtime entry point to be invoked or an empty
-// location; in the latter case, the read barrier marking runtime
-// entry point will be loaded by the slow path code itself.
-class LoadReferenceWithBakerReadBarrierSlowPathARMVIXL : public ReadBarrierMarkSlowPathBaseARMVIXL {
- public:
- LoadReferenceWithBakerReadBarrierSlowPathARMVIXL(HInstruction* instruction,
- Location ref,
- vixl32::Register obj,
- uint32_t offset,
- Location index,
- ScaleFactor scale_factor,
- bool needs_null_check,
- vixl32::Register temp,
- Location entrypoint = Location::NoLocation())
- : ReadBarrierMarkSlowPathBaseARMVIXL(instruction, ref, entrypoint),
- obj_(obj),
- offset_(offset),
- index_(index),
- scale_factor_(scale_factor),
- needs_null_check_(needs_null_check),
- temp_(temp) {
- DCHECK(kEmitCompilerReadBarrier);
- DCHECK(kUseBakerReadBarrier);
- }
-
- const char* GetDescription() const OVERRIDE {
- return "LoadReferenceWithBakerReadBarrierSlowPathARMVIXL";
- }
-
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
- LocationSummary* locations = instruction_->GetLocations();
- vixl32::Register ref_reg = RegisterFrom(ref_);
- DCHECK(locations->CanCall());
- DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(ref_reg.GetCode())) << ref_reg;
- DCHECK(instruction_->IsInstanceFieldGet() ||
- instruction_->IsStaticFieldGet() ||
- instruction_->IsArrayGet() ||
- instruction_->IsArraySet() ||
- instruction_->IsInstanceOf() ||
- instruction_->IsCheckCast() ||
- (instruction_->IsInvokeVirtual() && instruction_->GetLocations()->Intrinsified()) ||
- (instruction_->IsInvokeStaticOrDirect() && instruction_->GetLocations()->Intrinsified()))
- << "Unexpected instruction in read barrier marking slow path: "
- << instruction_->DebugName();
- // The read barrier instrumentation of object ArrayGet
- // instructions does not support the HIntermediateAddress
- // instruction.
- DCHECK(!(instruction_->IsArrayGet() &&
- instruction_->AsArrayGet()->GetArray()->IsIntermediateAddress()));
-
- // Temporary register `temp_`, used to store the lock word, must
- // not be IP, as we may use it to emit the reference load (in the
- // call to GenerateRawReferenceLoad below), and we need the lock
- // word to still be in `temp_` after the reference load.
- DCHECK(!temp_.Is(ip));
-
- __ Bind(GetEntryLabel());
-
- // When using MaybeGenerateReadBarrierSlow, the read barrier call is
- // inserted after the original load. However, in fast path based
- // Baker's read barriers, we need to perform the load of
- // mirror::Object::monitor_ *before* the original reference load.
- // This load-load ordering is required by the read barrier.
- // The slow path (for Baker's algorithm) should look like:
- //
- // uint32_t rb_state = Lockword(obj->monitor_).ReadBarrierState();
- // lfence; // Load fence or artificial data dependency to prevent load-load reordering
- // HeapReference<mirror::Object> ref = *src; // Original reference load.
- // bool is_gray = (rb_state == ReadBarrier::GrayState());
- // if (is_gray) {
- // ref = entrypoint(ref); // ref = ReadBarrier::Mark(ref); // Runtime entry point call.
- // }
- //
- // Note: the original implementation in ReadBarrier::Barrier is
- // slightly more complex as it performs additional checks that we do
- // not do here for performance reasons.
-
- CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen);
-
- // /* int32_t */ monitor = obj->monitor_
- uint32_t monitor_offset = mirror::Object::MonitorOffset().Int32Value();
- arm_codegen->GetAssembler()->LoadFromOffset(kLoadWord, temp_, obj_, monitor_offset);
- if (needs_null_check_) {
- codegen->MaybeRecordImplicitNullCheck(instruction_);
- }
- // /* LockWord */ lock_word = LockWord(monitor)
- static_assert(sizeof(LockWord) == sizeof(int32_t),
- "art::LockWord and int32_t have different sizes.");
-
- // Introduce a dependency on the lock_word including the rb_state,
- // which shall prevent load-load reordering without using
- // a memory barrier (which would be more expensive).
- // `obj` is unchanged by this operation, but its value now depends
- // on `temp`.
- __ Add(obj_, obj_, Operand(temp_, ShiftType::LSR, 32));
-
- // The actual reference load.
- // A possible implicit null check has already been handled above.
- arm_codegen->GenerateRawReferenceLoad(
- instruction_, ref_, obj_, offset_, index_, scale_factor_, /* needs_null_check */ false);
-
- // Mark the object `ref` when `obj` is gray.
- //
- // if (rb_state == ReadBarrier::GrayState())
- // ref = ReadBarrier::Mark(ref);
- //
- // Given the numeric representation, it's enough to check the low bit of the
- // rb_state. We do that by shifting the bit out of the lock word with LSRS
- // which can be a 16-bit instruction unlike the TST immediate.
- static_assert(ReadBarrier::WhiteState() == 0, "Expecting white to have value 0");
- static_assert(ReadBarrier::GrayState() == 1, "Expecting gray to have value 1");
- __ Lsrs(temp_, temp_, LockWord::kReadBarrierStateShift + 1);
- __ B(cc, GetExitLabel()); // Carry flag is the last bit shifted out by LSRS.
- GenerateReadBarrierMarkRuntimeCall(codegen);
-
- __ B(GetExitLabel());
- }
-
- private:
- // The register containing the object holding the marked object reference field.
- vixl32::Register obj_;
- // The offset, index and scale factor to access the reference in `obj_`.
- uint32_t offset_;
- Location index_;
- ScaleFactor scale_factor_;
- // Is a null check required?
- bool needs_null_check_;
- // A temporary register used to hold the lock word of `obj_`.
- vixl32::Register temp_;
-
- DISALLOW_COPY_AND_ASSIGN(LoadReferenceWithBakerReadBarrierSlowPathARMVIXL);
-};
-
// Slow path loading `obj`'s lock word, loading a reference from
// object `*(obj + offset + (index << scale_factor))` into `ref`, and
// marking `ref` if `obj` is gray according to the lock word (Baker
// read barrier). If needed, this slow path also atomically updates
// the field `obj.field` in the object `obj` holding this reference
-// after marking (contrary to
-// LoadReferenceWithBakerReadBarrierSlowPathARMVIXL above, which never
-// tries to update `obj.field`).
+// after marking.
//
// This means that after the execution of this slow path, both `ref`
// and `obj.field` will be up-to-date; i.e., after the flip, both will
@@ -1050,7 +855,7 @@ class LoadReferenceWithBakerReadBarrierAndUpdateFieldSlowPathARMVIXL
__ Bind(GetEntryLabel());
- // The implementation is similar to LoadReferenceWithBakerReadBarrierSlowPathARMVIXL's:
+ // The implementation is:
//
// uint32_t rb_state = Lockword(obj->monitor_).ReadBarrierState();
// lfence; // Load fence or artificial data dependency to prevent load-load reordering
@@ -2352,7 +2157,9 @@ CodeGeneratorARMVIXL::CodeGeneratorARMVIXL(HGraph* graph,
jit_string_patches_(StringReferenceValueComparator(),
graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
jit_class_patches_(TypeReferenceValueComparator(),
- graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)) {
+ graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ jit_baker_read_barrier_slow_paths_(std::less<uint32_t>(),
+ graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)) {
// Always save the LR register to mimic Quick.
AddAllocatedRegister(Location::RegisterLocation(LR));
// Give D30 and D31 as scratch register to VIXL. The register allocator only works on
@@ -2408,6 +2215,16 @@ void CodeGeneratorARMVIXL::FixJumpTables() {
void CodeGeneratorARMVIXL::Finalize(CodeAllocator* allocator) {
FixJumpTables();
+
+ // Emit JIT baker read barrier slow paths.
+ DCHECK(Runtime::Current()->UseJitCompilation() || jit_baker_read_barrier_slow_paths_.empty());
+ for (auto& entry : jit_baker_read_barrier_slow_paths_) {
+ uint32_t encoded_data = entry.first;
+ vixl::aarch32::Label* slow_path_entry = &entry.second.label;
+ __ Bind(slow_path_entry);
+ CompileBakerReadBarrierThunk(*GetAssembler(), encoded_data, /* debug_name */ nullptr);
+ }
+
GetAssembler()->FinalizeCode();
CodeGenerator::Finalize(allocator);
@@ -5947,16 +5764,10 @@ void LocationsBuilderARMVIXL::HandleFieldGet(HInstruction* instruction,
locations->AddTemp(Location::RequiresRegister());
locations->AddTemp(Location::RequiresRegister());
} else if (object_field_get_with_read_barrier && kUseBakerReadBarrier) {
- // We need a temporary register for the read barrier marking slow
- // path in CodeGeneratorARMVIXL::GenerateFieldLoadWithBakerReadBarrier.
- if (kBakerReadBarrierLinkTimeThunksEnableForFields &&
- !Runtime::Current()->UseJitCompilation()) {
- // If link-time thunks for the Baker read barrier are enabled, for AOT
- // loads we need a temporary only if the offset is too big.
- if (field_info.GetFieldOffset().Uint32Value() >= kReferenceLoadMinFarOffset) {
- locations->AddTemp(Location::RequiresRegister());
- }
- } else {
+ // We need a temporary register for the read barrier load in
+ // CodeGeneratorARMVIXL::GenerateFieldLoadWithBakerReadBarrier()
+ // only if the offset is too big.
+ if (field_info.GetFieldOffset().Uint32Value() >= kReferenceLoadMinFarOffset) {
locations->AddTemp(Location::RequiresRegister());
}
}
@@ -6371,12 +6182,11 @@ void LocationsBuilderARMVIXL::VisitArrayGet(HArrayGet* instruction) {
object_array_get_with_read_barrier ? Location::kOutputOverlap : Location::kNoOutputOverlap);
}
if (object_array_get_with_read_barrier && kUseBakerReadBarrier) {
- if (kBakerReadBarrierLinkTimeThunksEnableForFields &&
- !Runtime::Current()->UseJitCompilation() &&
- instruction->GetIndex()->IsConstant()) {
+ if (instruction->GetIndex()->IsConstant()) {
// Array loads with constant index are treated as field loads.
- // If link-time thunks for the Baker read barrier are enabled, for AOT
- // constant index loads we need a temporary only if the offset is too big.
+ // We need a temporary register for the read barrier load in
+ // CodeGeneratorARMVIXL::GenerateFieldLoadWithBakerReadBarrier()
+ // only if the offset is too big.
uint32_t offset = CodeGenerator::GetArrayDataOffset(instruction);
uint32_t index = instruction->GetIndex()->AsIntConstant()->GetValue();
offset += index << DataType::SizeShift(DataType::Type::kReference);
@@ -6384,9 +6194,8 @@ void LocationsBuilderARMVIXL::VisitArrayGet(HArrayGet* instruction) {
locations->AddTemp(Location::RequiresRegister());
}
} else {
- // If using introspection, we need a non-scratch temporary for the array data pointer.
- // Otherwise, we need a temporary register for the read barrier marking slow
- // path in CodeGeneratorARMVIXL::GenerateArrayLoadWithBakerReadBarrier.
+ // We need a non-scratch temporary for the array data pointer in
+ // CodeGeneratorARMVIXL::GenerateArrayLoadWithBakerReadBarrier().
locations->AddTemp(Location::RequiresRegister());
}
} else if (mirror::kUseStringCompression && instruction->IsStringCharAt()) {
@@ -6516,7 +6325,7 @@ void InstructionCodeGeneratorARMVIXL::VisitArrayGet(HArrayGet* instruction) {
} else {
Location temp = locations->GetTemp(0);
codegen_->GenerateArrayLoadWithBakerReadBarrier(
- instruction, out_loc, obj, data_offset, index, temp, /* needs_null_check */ false);
+ out_loc, obj, data_offset, index, temp, /* needs_null_check */ false);
}
} else {
vixl32::Register out = OutputRegister(instruction);
@@ -7035,9 +6844,25 @@ void CodeGeneratorARMVIXL::MarkGCCard(vixl32::Register temp,
if (can_be_null) {
__ CompareAndBranchIfZero(value, &is_null);
}
+ // Load the address of the card table into `card`.
GetAssembler()->LoadFromOffset(
kLoadWord, card, tr, Thread::CardTableOffset<kArmPointerSize>().Int32Value());
+ // Calculate the offset (in the card table) of the card corresponding to
+ // `object`.
__ Lsr(temp, object, Operand::From(gc::accounting::CardTable::kCardShift));
+ // Write the `art::gc::accounting::CardTable::kCardDirty` value into the
+ // `object`'s card.
+ //
+ // Register `card` contains the address of the card table. Note that the card
+ // table's base is biased during its creation so that it always starts at an
+ // address whose least-significant byte is equal to `kCardDirty` (see
+ // art::gc::accounting::CardTable::Create). Therefore the STRB instruction
+ // below writes the `kCardDirty` (byte) value into the `object`'s card
+ // (located at `card + object >> kCardShift`).
+ //
+ // This dual use of the value in register `card` (1. to calculate the location
+ // of the card to mark; and 2. to load the `kCardDirty` value) saves a load
+ // (no need to explicitly load `kCardDirty` as an immediate value).
__ Strb(card, MemOperand(card, temp));
if (can_be_null) {
__ Bind(&is_null);
@@ -7412,12 +7237,7 @@ void LocationsBuilderARMVIXL::VisitLoadClass(HLoadClass* cls) {
if (load_kind == HLoadClass::LoadKind::kBssEntry) {
if (!kUseReadBarrier || kUseBakerReadBarrier) {
// Rely on the type resolution or initialization and marking to save everything we need.
- RegisterSet caller_saves = RegisterSet::Empty();
- InvokeRuntimeCallingConventionARMVIXL calling_convention;
- caller_saves.Add(LocationFrom(calling_convention.GetRegisterAt(0)));
- // TODO: Add GetReturnLocation() to the calling convention so that we can DCHECK()
- // that the the kPrimNot result register is the same as the first argument register.
- locations->SetCustomSlowPathCallerSaves(caller_saves);
+ locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves());
} else {
// For non-Baker read barrier we have a temp-clobbering call.
}
@@ -7504,8 +7324,7 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadClass(HLoadClass* cls) NO_THREAD_
if (generate_null_check || cls->MustGenerateClinitCheck()) {
DCHECK(cls->CanCallRuntime());
LoadClassSlowPathARMVIXL* slow_path =
- new (codegen_->GetScopedAllocator()) LoadClassSlowPathARMVIXL(
- cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
+ new (codegen_->GetScopedAllocator()) LoadClassSlowPathARMVIXL(cls, cls);
codegen_->AddSlowPath(slow_path);
if (generate_null_check) {
__ CompareAndBranchIfZero(out, slow_path->GetEntryLabel());
@@ -7546,15 +7365,14 @@ void LocationsBuilderARMVIXL::VisitClinitCheck(HClinitCheck* check) {
if (check->HasUses()) {
locations->SetOut(Location::SameAsFirstInput());
}
+ // Rely on the type initialization to save everything we need.
+ locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves());
}
void InstructionCodeGeneratorARMVIXL::VisitClinitCheck(HClinitCheck* check) {
// We assume the class is not null.
LoadClassSlowPathARMVIXL* slow_path =
- new (codegen_->GetScopedAllocator()) LoadClassSlowPathARMVIXL(check->GetLoadClass(),
- check,
- check->GetDexPc(),
- /* do_clinit */ true);
+ new (codegen_->GetScopedAllocator()) LoadClassSlowPathARMVIXL(check->GetLoadClass(), check);
codegen_->AddSlowPath(slow_path);
GenerateClassInitializationCheck(slow_path, InputRegisterAt(check, 0));
}
@@ -7668,12 +7486,7 @@ void LocationsBuilderARMVIXL::VisitLoadString(HLoadString* load) {
if (load_kind == HLoadString::LoadKind::kBssEntry) {
if (!kUseReadBarrier || kUseBakerReadBarrier) {
// Rely on the pResolveString and marking to save everything we need, including temps.
- RegisterSet caller_saves = RegisterSet::Empty();
- InvokeRuntimeCallingConventionARMVIXL calling_convention;
- caller_saves.Add(LocationFrom(calling_convention.GetRegisterAt(0)));
- // TODO: Add GetReturnLocation() to the calling convention so that we can DCHECK()
- // that the the kPrimNot result register is the same as the first argument register.
- locations->SetCustomSlowPathCallerSaves(caller_saves);
+ locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves());
} else {
// For non-Baker read barrier we have a temp-clobbering call.
}
@@ -8792,73 +8605,41 @@ void CodeGeneratorARMVIXL::GenerateGcRootFieldLoad(
if (kUseBakerReadBarrier) {
// Fast path implementation of art::ReadBarrier::BarrierForRoot when
// Baker's read barrier are used.
- if (kBakerReadBarrierLinkTimeThunksEnableForGcRoots &&
- !Runtime::Current()->UseJitCompilation()) {
- // Query `art::Thread::Current()->GetIsGcMarking()` (stored in
- // the Marking Register) to decide whether we need to enter
- // the slow path to mark the GC root.
- //
- // We use link-time generated thunks for the slow path. That thunk
- // checks the reference and jumps to the entrypoint if needed.
- //
- // lr = &return_address;
- // GcRoot<mirror::Object> root = *(obj+offset); // Original reference load.
- // if (mr) { // Thread::Current()->GetIsGcMarking()
- // goto gc_root_thunk<root_reg>(lr)
- // }
- // return_address:
- UseScratchRegisterScope temps(GetVIXLAssembler());
- temps.Exclude(ip);
- bool narrow = CanEmitNarrowLdr(root_reg, obj, offset);
- uint32_t custom_data = EncodeBakerReadBarrierGcRootData(root_reg.GetCode(), narrow);
- vixl32::Label* bne_label = NewBakerReadBarrierPatch(custom_data);
-
- vixl::EmissionCheckScope guard(GetVIXLAssembler(), 4 * vixl32::kMaxInstructionSizeInBytes);
- vixl32::Label return_address;
- EmitAdrCode adr(GetVIXLAssembler(), lr, &return_address);
- __ cmp(mr, Operand(0));
- // Currently the offset is always within range. If that changes,
- // we shall have to split the load the same way as for fields.
- DCHECK_LT(offset, kReferenceLoadMinFarOffset);
- ptrdiff_t old_offset = GetVIXLAssembler()->GetBuffer()->GetCursorOffset();
- __ ldr(EncodingSize(narrow ? Narrow : Wide), root_reg, MemOperand(obj, offset));
- EmitPlaceholderBne(this, bne_label);
- __ Bind(&return_address);
- DCHECK_EQ(old_offset - GetVIXLAssembler()->GetBuffer()->GetCursorOffset(),
- narrow ? BAKER_MARK_INTROSPECTION_GC_ROOT_LDR_NARROW_OFFSET
- : BAKER_MARK_INTROSPECTION_GC_ROOT_LDR_WIDE_OFFSET);
- } else {
- // Query `art::Thread::Current()->GetIsGcMarking()` (stored in
- // the Marking Register) to decide whether we need to enter
- // the slow path to mark the GC root.
- //
- // GcRoot<mirror::Object> root = *(obj+offset); // Original reference load.
- // if (mr) { // Thread::Current()->GetIsGcMarking()
- // // Slow path.
- // entrypoint = Thread::Current()->pReadBarrierMarkReg ## root.reg()
- // root = entrypoint(root); // root = ReadBarrier::Mark(root); // Entry point call.
- // }
-
- // Slow path marking the GC root `root`. The entrypoint will
- // be loaded by the slow path code.
- SlowPathCodeARMVIXL* slow_path =
- new (GetScopedAllocator()) ReadBarrierMarkSlowPathARMVIXL(instruction, root);
- AddSlowPath(slow_path);
+ // Query `art::Thread::Current()->GetIsGcMarking()` (stored in
+ // the Marking Register) to decide whether we need to enter
+ // the slow path to mark the GC root.
+ //
+ // We use shared thunks for the slow path; shared within the method
+ // for JIT, across methods for AOT. That thunk checks the reference
+ // and jumps to the entrypoint if needed.
+ //
+ // lr = &return_address;
+ // GcRoot<mirror::Object> root = *(obj+offset); // Original reference load.
+ // if (mr) { // Thread::Current()->GetIsGcMarking()
+ // goto gc_root_thunk<root_reg>(lr)
+ // }
+ // return_address:
- // /* GcRoot<mirror::Object> */ root = *(obj + offset)
- GetAssembler()->LoadFromOffset(kLoadWord, root_reg, obj, offset);
- static_assert(
- sizeof(mirror::CompressedReference<mirror::Object>) == sizeof(GcRoot<mirror::Object>),
- "art::mirror::CompressedReference<mirror::Object> and art::GcRoot<mirror::Object> "
- "have different sizes.");
- static_assert(sizeof(mirror::CompressedReference<mirror::Object>) == sizeof(int32_t),
- "art::mirror::CompressedReference<mirror::Object> and int32_t "
- "have different sizes.");
-
- __ CompareAndBranchIfNonZero(mr, slow_path->GetEntryLabel());
- __ Bind(slow_path->GetExitLabel());
- }
+ UseScratchRegisterScope temps(GetVIXLAssembler());
+ temps.Exclude(ip);
+ bool narrow = CanEmitNarrowLdr(root_reg, obj, offset);
+ uint32_t custom_data = EncodeBakerReadBarrierGcRootData(root_reg.GetCode(), narrow);
+
+ vixl::EmissionCheckScope guard(GetVIXLAssembler(), 4 * vixl32::kMaxInstructionSizeInBytes);
+ vixl32::Label return_address;
+ EmitAdrCode adr(GetVIXLAssembler(), lr, &return_address);
+ __ cmp(mr, Operand(0));
+ // Currently the offset is always within range. If that changes,
+ // we shall have to split the load the same way as for fields.
+ DCHECK_LT(offset, kReferenceLoadMinFarOffset);
+ ptrdiff_t old_offset = GetVIXLAssembler()->GetBuffer()->GetCursorOffset();
+ __ ldr(EncodingSize(narrow ? Narrow : Wide), root_reg, MemOperand(obj, offset));
+ EmitBakerReadBarrierBne(custom_data);
+ __ Bind(&return_address);
+ DCHECK_EQ(old_offset - GetVIXLAssembler()->GetBuffer()->GetCursorOffset(),
+ narrow ? BAKER_MARK_INTROSPECTION_GC_ROOT_LDR_NARROW_OFFSET
+ : BAKER_MARK_INTROSPECTION_GC_ROOT_LDR_WIDE_OFFSET);
} else {
// GC root loaded through a slow path for read barriers other
// than Baker's.
@@ -8880,94 +8661,92 @@ void CodeGeneratorARMVIXL::GenerateGcRootFieldLoad(
void CodeGeneratorARMVIXL::GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction,
Location ref,
vixl32::Register obj,
- uint32_t offset,
- Location temp,
+ const vixl32::MemOperand& src,
bool needs_null_check) {
DCHECK(kEmitCompilerReadBarrier);
DCHECK(kUseBakerReadBarrier);
- if (kBakerReadBarrierLinkTimeThunksEnableForFields &&
- !Runtime::Current()->UseJitCompilation()) {
- // Query `art::Thread::Current()->GetIsGcMarking()` (stored in the
- // Marking Register) to decide whether we need to enter the slow
- // path to mark the reference. Then, in the slow path, check the
- // gray bit in the lock word of the reference's holder (`obj`) to
- // decide whether to mark `ref` or not.
- //
- // We use link-time generated thunks for the slow path. That thunk checks
- // the holder and jumps to the entrypoint if needed. If the holder is not
- // gray, it creates a fake dependency and returns to the LDR instruction.
- //
- // lr = &gray_return_address;
- // if (mr) { // Thread::Current()->GetIsGcMarking()
- // goto field_thunk<holder_reg, base_reg>(lr)
- // }
- // not_gray_return_address:
- // // Original reference load. If the offset is too large to fit
- // // into LDR, we use an adjusted base register here.
- // HeapReference<mirror::Object> reference = *(obj+offset);
- // gray_return_address:
-
- DCHECK_ALIGNED(offset, sizeof(mirror::HeapReference<mirror::Object>));
- vixl32::Register ref_reg = RegisterFrom(ref, DataType::Type::kReference);
- bool narrow = CanEmitNarrowLdr(ref_reg, obj, offset);
- vixl32::Register base = obj;
- if (offset >= kReferenceLoadMinFarOffset) {
- base = RegisterFrom(temp);
- static_assert(IsPowerOfTwo(kReferenceLoadMinFarOffset), "Expecting a power of 2.");
- __ Add(base, obj, Operand(offset & ~(kReferenceLoadMinFarOffset - 1u)));
- offset &= (kReferenceLoadMinFarOffset - 1u);
- // Use narrow LDR only for small offsets. Generating narrow encoding LDR for the large
- // offsets with `(offset & (kReferenceLoadMinFarOffset - 1u)) < 32u` would most likely
- // increase the overall code size when taking the generated thunks into account.
- DCHECK(!narrow);
- }
- UseScratchRegisterScope temps(GetVIXLAssembler());
- temps.Exclude(ip);
- uint32_t custom_data = EncodeBakerReadBarrierFieldData(base.GetCode(), obj.GetCode(), narrow);
- vixl32::Label* bne_label = NewBakerReadBarrierPatch(custom_data);
+ // Query `art::Thread::Current()->GetIsGcMarking()` (stored in the
+ // Marking Register) to decide whether we need to enter the slow
+ // path to mark the reference. Then, in the slow path, check the
+ // gray bit in the lock word of the reference's holder (`obj`) to
+ // decide whether to mark `ref` or not.
+ //
+ // We use shared thunks for the slow path; shared within the method
+ // for JIT, across methods for AOT. That thunk checks the holder
+ // and jumps to the entrypoint if needed. If the holder is not gray,
+ // it creates a fake dependency and returns to the LDR instruction.
+ //
+ // lr = &gray_return_address;
+ // if (mr) { // Thread::Current()->GetIsGcMarking()
+ // goto field_thunk<holder_reg, base_reg>(lr)
+ // }
+ // not_gray_return_address:
+ // // Original reference load. If the offset is too large to fit
+ // // into LDR, we use an adjusted base register here.
+ // HeapReference<mirror::Object> reference = *(obj+offset);
+ // gray_return_address:
- {
- vixl::EmissionCheckScope guard(
- GetVIXLAssembler(),
- (kPoisonHeapReferences ? 5u : 4u) * vixl32::kMaxInstructionSizeInBytes);
- vixl32::Label return_address;
- EmitAdrCode adr(GetVIXLAssembler(), lr, &return_address);
- __ cmp(mr, Operand(0));
- EmitPlaceholderBne(this, bne_label);
- ptrdiff_t old_offset = GetVIXLAssembler()->GetBuffer()->GetCursorOffset();
- __ ldr(EncodingSize(narrow ? Narrow : Wide), ref_reg, MemOperand(base, offset));
- if (needs_null_check) {
- MaybeRecordImplicitNullCheck(instruction);
- }
- // Note: We need a specific width for the unpoisoning NEG.
- if (kPoisonHeapReferences) {
- if (narrow) {
- // The only 16-bit encoding is T1 which sets flags outside IT block (i.e. RSBS, not RSB).
- __ rsbs(EncodingSize(Narrow), ref_reg, ref_reg, Operand(0));
- } else {
- __ rsb(EncodingSize(Wide), ref_reg, ref_reg, Operand(0));
- }
+ DCHECK(src.GetAddrMode() == vixl32::Offset);
+ DCHECK_ALIGNED(src.GetOffsetImmediate(), sizeof(mirror::HeapReference<mirror::Object>));
+ vixl32::Register ref_reg = RegisterFrom(ref, DataType::Type::kReference);
+ bool narrow = CanEmitNarrowLdr(ref_reg, src.GetBaseRegister(), src.GetOffsetImmediate());
+
+ UseScratchRegisterScope temps(GetVIXLAssembler());
+ temps.Exclude(ip);
+ uint32_t custom_data =
+ EncodeBakerReadBarrierFieldData(src.GetBaseRegister().GetCode(), obj.GetCode(), narrow);
+
+ {
+ vixl::EmissionCheckScope guard(
+ GetVIXLAssembler(),
+ (kPoisonHeapReferences ? 5u : 4u) * vixl32::kMaxInstructionSizeInBytes);
+ vixl32::Label return_address;
+ EmitAdrCode adr(GetVIXLAssembler(), lr, &return_address);
+ __ cmp(mr, Operand(0));
+ EmitBakerReadBarrierBne(custom_data);
+ ptrdiff_t old_offset = GetVIXLAssembler()->GetBuffer()->GetCursorOffset();
+ __ ldr(EncodingSize(narrow ? Narrow : Wide), ref_reg, src);
+ if (needs_null_check) {
+ MaybeRecordImplicitNullCheck(instruction);
+ }
+ // Note: We need a specific width for the unpoisoning NEG.
+ if (kPoisonHeapReferences) {
+ if (narrow) {
+ // The only 16-bit encoding is T1 which sets flags outside IT block (i.e. RSBS, not RSB).
+ __ rsbs(EncodingSize(Narrow), ref_reg, ref_reg, Operand(0));
+ } else {
+ __ rsb(EncodingSize(Wide), ref_reg, ref_reg, Operand(0));
}
- __ Bind(&return_address);
- DCHECK_EQ(old_offset - GetVIXLAssembler()->GetBuffer()->GetCursorOffset(),
- narrow ? BAKER_MARK_INTROSPECTION_FIELD_LDR_NARROW_OFFSET
- : BAKER_MARK_INTROSPECTION_FIELD_LDR_WIDE_OFFSET);
}
- MaybeGenerateMarkingRegisterCheck(/* code */ 20, /* temp_loc */ LocationFrom(ip));
- return;
+ __ Bind(&return_address);
+ DCHECK_EQ(old_offset - GetVIXLAssembler()->GetBuffer()->GetCursorOffset(),
+ narrow ? BAKER_MARK_INTROSPECTION_FIELD_LDR_NARROW_OFFSET
+ : BAKER_MARK_INTROSPECTION_FIELD_LDR_WIDE_OFFSET);
}
-
- // /* HeapReference<Object> */ ref = *(obj + offset)
- Location no_index = Location::NoLocation();
- ScaleFactor no_scale_factor = TIMES_1;
- GenerateReferenceLoadWithBakerReadBarrier(
- instruction, ref, obj, offset, no_index, no_scale_factor, temp, needs_null_check);
+ MaybeGenerateMarkingRegisterCheck(/* code */ 20, /* temp_loc */ LocationFrom(ip));
}
-void CodeGeneratorARMVIXL::GenerateArrayLoadWithBakerReadBarrier(HInstruction* instruction,
+void CodeGeneratorARMVIXL::GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction,
Location ref,
vixl32::Register obj,
+ uint32_t offset,
+ Location temp,
+ bool needs_null_check) {
+ DCHECK_ALIGNED(offset, sizeof(mirror::HeapReference<mirror::Object>));
+ vixl32::Register base = obj;
+ if (offset >= kReferenceLoadMinFarOffset) {
+ base = RegisterFrom(temp);
+ static_assert(IsPowerOfTwo(kReferenceLoadMinFarOffset), "Expecting a power of 2.");
+ __ Add(base, obj, Operand(offset & ~(kReferenceLoadMinFarOffset - 1u)));
+ offset &= (kReferenceLoadMinFarOffset - 1u);
+ }
+ GenerateFieldLoadWithBakerReadBarrier(
+ instruction, ref, obj, MemOperand(base, offset), needs_null_check);
+}
+
+void CodeGeneratorARMVIXL::GenerateArrayLoadWithBakerReadBarrier(Location ref,
+ vixl32::Register obj,
uint32_t data_offset,
Location index,
Location temp,
@@ -8980,113 +8759,57 @@ void CodeGeneratorARMVIXL::GenerateArrayLoadWithBakerReadBarrier(HInstruction* i
"art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes.");
ScaleFactor scale_factor = TIMES_4;
- if (kBakerReadBarrierLinkTimeThunksEnableForArrays &&
- !Runtime::Current()->UseJitCompilation()) {
- // Query `art::Thread::Current()->GetIsGcMarking()` (stored in the
- // Marking Register) to decide whether we need to enter the slow
- // path to mark the reference. Then, in the slow path, check the
- // gray bit in the lock word of the reference's holder (`obj`) to
- // decide whether to mark `ref` or not.
- //
- // We use link-time generated thunks for the slow path. That thunk checks
- // the holder and jumps to the entrypoint if needed. If the holder is not
- // gray, it creates a fake dependency and returns to the LDR instruction.
- //
- // lr = &gray_return_address;
- // if (mr) { // Thread::Current()->GetIsGcMarking()
- // goto array_thunk<base_reg>(lr)
- // }
- // not_gray_return_address:
- // // Original reference load. If the offset is too large to fit
- // // into LDR, we use an adjusted base register here.
- // HeapReference<mirror::Object> reference = data[index];
- // gray_return_address:
-
- DCHECK(index.IsValid());
- vixl32::Register index_reg = RegisterFrom(index, DataType::Type::kInt32);
- vixl32::Register ref_reg = RegisterFrom(ref, DataType::Type::kReference);
- vixl32::Register data_reg = RegisterFrom(temp, DataType::Type::kInt32); // Raw pointer.
-
- UseScratchRegisterScope temps(GetVIXLAssembler());
- temps.Exclude(ip);
- uint32_t custom_data = EncodeBakerReadBarrierArrayData(data_reg.GetCode());
- vixl32::Label* bne_label = NewBakerReadBarrierPatch(custom_data);
-
- __ Add(data_reg, obj, Operand(data_offset));
- {
- vixl::EmissionCheckScope guard(
- GetVIXLAssembler(),
- (kPoisonHeapReferences ? 5u : 4u) * vixl32::kMaxInstructionSizeInBytes);
- vixl32::Label return_address;
- EmitAdrCode adr(GetVIXLAssembler(), lr, &return_address);
- __ cmp(mr, Operand(0));
- EmitPlaceholderBne(this, bne_label);
- ptrdiff_t old_offset = GetVIXLAssembler()->GetBuffer()->GetCursorOffset();
- __ ldr(ref_reg, MemOperand(data_reg, index_reg, vixl32::LSL, scale_factor));
- DCHECK(!needs_null_check); // The thunk cannot handle the null check.
- // Note: We need a Wide NEG for the unpoisoning.
- if (kPoisonHeapReferences) {
- __ rsb(EncodingSize(Wide), ref_reg, ref_reg, Operand(0));
- }
- __ Bind(&return_address);
- DCHECK_EQ(old_offset - GetVIXLAssembler()->GetBuffer()->GetCursorOffset(),
- BAKER_MARK_INTROSPECTION_ARRAY_LDR_OFFSET);
- }
- MaybeGenerateMarkingRegisterCheck(/* code */ 21, /* temp_loc */ LocationFrom(ip));
- return;
- }
-
- // /* HeapReference<Object> */ ref =
- // *(obj + data_offset + index * sizeof(HeapReference<Object>))
- GenerateReferenceLoadWithBakerReadBarrier(
- instruction, ref, obj, data_offset, index, scale_factor, temp, needs_null_check);
-}
-
-void CodeGeneratorARMVIXL::GenerateReferenceLoadWithBakerReadBarrier(HInstruction* instruction,
- Location ref,
- vixl32::Register obj,
- uint32_t offset,
- Location index,
- ScaleFactor scale_factor,
- Location temp,
- bool needs_null_check) {
- DCHECK(kEmitCompilerReadBarrier);
- DCHECK(kUseBakerReadBarrier);
-
// Query `art::Thread::Current()->GetIsGcMarking()` (stored in the
// Marking Register) to decide whether we need to enter the slow
// path to mark the reference. Then, in the slow path, check the
// gray bit in the lock word of the reference's holder (`obj`) to
// decide whether to mark `ref` or not.
//
- // if (mr) { // Thread::Current()->GetIsGcMarking()
- // // Slow path.
- // uint32_t rb_state = Lockword(obj->monitor_).ReadBarrierState();
- // lfence; // Load fence or artificial data dependency to prevent load-load reordering
- // HeapReference<mirror::Object> ref = *src; // Original reference load.
- // bool is_gray = (rb_state == ReadBarrier::GrayState());
- // if (is_gray) {
- // entrypoint = Thread::Current()->pReadBarrierMarkReg ## root.reg()
- // ref = entrypoint(ref); // ref = ReadBarrier::Mark(ref); // Runtime entry point call.
+ // We use shared thunks for the slow path; shared within the method
+ // for JIT, across methods for AOT. That thunk checks the holder
+ // and jumps to the entrypoint if needed. If the holder is not gray,
+ // it creates a fake dependency and returns to the LDR instruction.
+ //
+ // lr = &gray_return_address;
+ // if (mr) { // Thread::Current()->GetIsGcMarking()
+ // goto array_thunk<base_reg>(lr)
// }
- // } else {
- // HeapReference<mirror::Object> ref = *src; // Original reference load.
- // }
+ // not_gray_return_address:
+ // // Original reference load. If the offset is too large to fit
+ // // into LDR, we use an adjusted base register here.
+ // HeapReference<mirror::Object> reference = data[index];
+ // gray_return_address:
- vixl32::Register temp_reg = RegisterFrom(temp);
+ DCHECK(index.IsValid());
+ vixl32::Register index_reg = RegisterFrom(index, DataType::Type::kInt32);
+ vixl32::Register ref_reg = RegisterFrom(ref, DataType::Type::kReference);
+ vixl32::Register data_reg = RegisterFrom(temp, DataType::Type::kInt32); // Raw pointer.
- // Slow path marking the object `ref` when the GC is marking. The
- // entrypoint will be loaded by the slow path code.
- SlowPathCodeARMVIXL* slow_path =
- new (GetScopedAllocator()) LoadReferenceWithBakerReadBarrierSlowPathARMVIXL(
- instruction, ref, obj, offset, index, scale_factor, needs_null_check, temp_reg);
- AddSlowPath(slow_path);
+ UseScratchRegisterScope temps(GetVIXLAssembler());
+ temps.Exclude(ip);
+ uint32_t custom_data = EncodeBakerReadBarrierArrayData(data_reg.GetCode());
- __ CompareAndBranchIfNonZero(mr, slow_path->GetEntryLabel());
- // Fast path: the GC is not marking: just load the reference.
- GenerateRawReferenceLoad(instruction, ref, obj, offset, index, scale_factor, needs_null_check);
- __ Bind(slow_path->GetExitLabel());
- MaybeGenerateMarkingRegisterCheck(/* code */ 22);
+ __ Add(data_reg, obj, Operand(data_offset));
+ {
+ vixl::EmissionCheckScope guard(
+ GetVIXLAssembler(),
+ (kPoisonHeapReferences ? 5u : 4u) * vixl32::kMaxInstructionSizeInBytes);
+ vixl32::Label return_address;
+ EmitAdrCode adr(GetVIXLAssembler(), lr, &return_address);
+ __ cmp(mr, Operand(0));
+ EmitBakerReadBarrierBne(custom_data);
+ ptrdiff_t old_offset = GetVIXLAssembler()->GetBuffer()->GetCursorOffset();
+ __ ldr(ref_reg, MemOperand(data_reg, index_reg, vixl32::LSL, scale_factor));
+ DCHECK(!needs_null_check); // The thunk cannot handle the null check.
+ // Note: We need a Wide NEG for the unpoisoning.
+ if (kPoisonHeapReferences) {
+ __ rsb(EncodingSize(Wide), ref_reg, ref_reg, Operand(0));
+ }
+ __ Bind(&return_address);
+ DCHECK_EQ(old_offset - GetVIXLAssembler()->GetBuffer()->GetCursorOffset(),
+ BAKER_MARK_INTROSPECTION_ARRAY_LDR_OFFSET);
+ }
+ MaybeGenerateMarkingRegisterCheck(/* code */ 21, /* temp_loc */ LocationFrom(ip));
}
void CodeGeneratorARMVIXL::UpdateReferenceFieldWithBakerReadBarrier(HInstruction* instruction,
@@ -9491,9 +9214,20 @@ CodeGeneratorARMVIXL::PcRelativePatchInfo* CodeGeneratorARMVIXL::NewPcRelativePa
return &patches->back();
}
-vixl32::Label* CodeGeneratorARMVIXL::NewBakerReadBarrierPatch(uint32_t custom_data) {
- baker_read_barrier_patches_.emplace_back(custom_data);
- return &baker_read_barrier_patches_.back().label;
+void CodeGeneratorARMVIXL::EmitBakerReadBarrierBne(uint32_t custom_data) {
+ ExactAssemblyScope eas(GetVIXLAssembler(), 1 * k32BitT32InstructionSizeInBytes);
+ if (Runtime::Current()->UseJitCompilation()) {
+ auto it = jit_baker_read_barrier_slow_paths_.FindOrAdd(custom_data);
+ vixl::aarch32::Label* slow_path_entry = &it->second.label;
+ __ b(ne, EncodingSize(Wide), slow_path_entry);
+ } else {
+ baker_read_barrier_patches_.emplace_back(custom_data);
+ vixl::aarch32::Label* patch_label = &baker_read_barrier_patches_.back().label;
+ __ bind(patch_label);
+ vixl32::Label placeholder_label;
+ __ b(ne, EncodingSize(Wide), &placeholder_label); // Placeholder, patched at link-time.
+ __ bind(&placeholder_label);
+ }
}
VIXLUInt32Literal* CodeGeneratorARMVIXL::DeduplicateBootImageAddressLiteral(uint32_t address) {
@@ -10085,7 +9819,12 @@ void CodeGeneratorARMVIXL::CompileBakerReadBarrierThunk(ArmVIXLAssembler& assemb
UNREACHABLE();
}
- if (GetCompilerOptions().GenerateAnyDebugInfo()) {
+ // For JIT, the slow path is considered part of the compiled method,
+ // so JIT should pass null as `debug_name`. Tests may not have a runtime.
+ DCHECK(Runtime::Current() == nullptr ||
+ !Runtime::Current()->UseJitCompilation() ||
+ debug_name == nullptr);
+ if (debug_name != nullptr && GetCompilerOptions().GenerateAnyDebugInfo()) {
std::ostringstream oss;
oss << "BakerReadBarrierThunk";
switch (kind) {
diff --git a/compiler/optimizing/code_generator_arm_vixl.h b/compiler/optimizing/code_generator_arm_vixl.h
index fc8cf98173..2fd18cab47 100644
--- a/compiler/optimizing/code_generator_arm_vixl.h
+++ b/compiler/optimizing/code_generator_arm_vixl.h
@@ -589,9 +589,9 @@ class CodeGeneratorARMVIXL : public CodeGenerator {
PcRelativePatchInfo* NewStringBssEntryPatch(const DexFile& dex_file,
dex::StringIndex string_index);
- // Add a new baker read barrier patch and return the label to be bound
- // before the BNE instruction.
- vixl::aarch32::Label* NewBakerReadBarrierPatch(uint32_t custom_data);
+ // Emit the BNE instruction for baker read barrier and record
+ // the associated patch for AOT or slow path for JIT.
+ void EmitBakerReadBarrierBne(uint32_t custom_data);
VIXLUInt32Literal* DeduplicateBootImageAddressLiteral(uint32_t address);
VIXLUInt32Literal* DeduplicateJitStringLiteral(const DexFile& dex_file,
@@ -624,6 +624,14 @@ class CodeGeneratorARMVIXL : public CodeGenerator {
ReadBarrierOption read_barrier_option);
// Fast path implementation of ReadBarrier::Barrier for a heap
// reference field load when Baker's read barriers are used.
+ // Overload suitable for Unsafe.getObject/-Volatile() intrinsic.
+ void GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction,
+ Location ref,
+ vixl::aarch32::Register obj,
+ const vixl::aarch32::MemOperand& src,
+ bool needs_null_check);
+ // Fast path implementation of ReadBarrier::Barrier for a heap
+ // reference field load when Baker's read barriers are used.
void GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction,
Location ref,
vixl::aarch32::Register obj,
@@ -632,27 +640,12 @@ class CodeGeneratorARMVIXL : public CodeGenerator {
bool needs_null_check);
// Fast path implementation of ReadBarrier::Barrier for a heap
// reference array load when Baker's read barriers are used.
- void GenerateArrayLoadWithBakerReadBarrier(HInstruction* instruction,
- Location ref,
+ void GenerateArrayLoadWithBakerReadBarrier(Location ref,
vixl::aarch32::Register obj,
uint32_t data_offset,
Location index,
Location temp,
bool needs_null_check);
- // Factored implementation, used by GenerateFieldLoadWithBakerReadBarrier,
- // GenerateArrayLoadWithBakerReadBarrier and some intrinsics.
- //
- // Load the object reference located at the address
- // `obj + offset + (index << scale_factor)`, held by object `obj`, into
- // `ref`, and mark it if needed.
- void GenerateReferenceLoadWithBakerReadBarrier(HInstruction* instruction,
- Location ref,
- vixl::aarch32::Register obj,
- uint32_t offset,
- Location index,
- ScaleFactor scale_factor,
- Location temp,
- bool needs_null_check);
// Generate code checking whether the the reference field at the
// address `obj + field_offset`, held by object `obj`, needs to be
@@ -916,6 +909,19 @@ class CodeGeneratorARMVIXL : public CodeGenerator {
// Patches for class literals in JIT compiled code.
TypeToLiteralMap jit_class_patches_;
+ // Baker read barrier slow paths, mapping custom data (uint32_t) to label.
+ // Wrap the label to work around vixl::aarch32::Label being non-copyable
+ // and non-moveable and as such unusable in ArenaSafeMap<>.
+ struct LabelWrapper {
+ LabelWrapper(const LabelWrapper& src)
+ : label() {
+ DCHECK(!src.label.IsReferenced() && !src.label.IsBound());
+ }
+ LabelWrapper() = default;
+ vixl::aarch32::Label label;
+ };
+ ArenaSafeMap<uint32_t, LabelWrapper> jit_baker_read_barrier_slow_paths_;
+
friend class linker::Thumb2RelativePatcherTest;
DISALLOW_COPY_AND_ASSIGN(CodeGeneratorARMVIXL);
};
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 4aed2c091c..476e8ab944 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -160,6 +160,14 @@ Location InvokeRuntimeCallingConvention::GetReturnLocation(DataType::Type type)
return MipsReturnLocation(type);
}
+static RegisterSet OneRegInReferenceOutSaveEverythingCallerSaves() {
+ InvokeRuntimeCallingConvention calling_convention;
+ RegisterSet caller_saves = RegisterSet::Empty();
+ caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ // The reference is returned in the same register. This differs from the standard return location.
+ return caller_saves;
+}
+
// NOLINT on __ macro to suppress wrong warning/fix (misc-macro-parentheses) from clang-tidy.
#define __ down_cast<CodeGeneratorMIPS*>(codegen)->GetAssembler()-> // NOLINT
#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMipsPointerSize, x).Int32Value()
@@ -222,35 +230,41 @@ class DivZeroCheckSlowPathMIPS : public SlowPathCodeMIPS {
class LoadClassSlowPathMIPS : public SlowPathCodeMIPS {
public:
- LoadClassSlowPathMIPS(HLoadClass* cls,
- HInstruction* at,
- uint32_t dex_pc,
- bool do_clinit)
- : SlowPathCodeMIPS(at),
- cls_(cls),
- dex_pc_(dex_pc),
- do_clinit_(do_clinit) {
+ LoadClassSlowPathMIPS(HLoadClass* cls, HInstruction* at)
+ : SlowPathCodeMIPS(at), cls_(cls) {
DCHECK(at->IsLoadClass() || at->IsClinitCheck());
+ DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
Location out = locations->Out();
+ const uint32_t dex_pc = instruction_->GetDexPc();
+ bool must_resolve_type = instruction_->IsLoadClass() && cls_->MustResolveTypeOnSlowPath();
+ bool must_do_clinit = instruction_->IsClinitCheck() || cls_->MustGenerateClinitCheck();
+
CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
- InvokeRuntimeCallingConvention calling_convention;
- DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
- dex::TypeIndex type_index = cls_->GetTypeIndex();
- __ LoadConst32(calling_convention.GetRegisterAt(0), type_index.index_);
- QuickEntrypointEnum entrypoint = do_clinit_ ? kQuickInitializeStaticStorage
- : kQuickInitializeType;
- mips_codegen->InvokeRuntime(entrypoint, instruction_, dex_pc_, this);
- if (do_clinit_) {
- CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, uint32_t>();
+ InvokeRuntimeCallingConvention calling_convention;
+ if (must_resolve_type) {
+ DCHECK(IsSameDexFile(cls_->GetDexFile(), mips_codegen->GetGraph()->GetDexFile()));
+ dex::TypeIndex type_index = cls_->GetTypeIndex();
+ __ LoadConst32(calling_convention.GetRegisterAt(0), type_index.index_);
+ mips_codegen->InvokeRuntime(kQuickResolveType, instruction_, dex_pc, this);
+ CheckEntrypointTypes<kQuickResolveType, void*, uint32_t>();
+ // If we also must_do_clinit, the resolved type is now in the correct register.
} else {
- CheckEntrypointTypes<kQuickInitializeType, void*, uint32_t>();
+ DCHECK(must_do_clinit);
+ Location source = instruction_->IsLoadClass() ? out : locations->InAt(0);
+ mips_codegen->MoveLocation(Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ source,
+ cls_->GetType());
+ }
+ if (must_do_clinit) {
+ mips_codegen->InvokeRuntime(kQuickInitializeStaticStorage, instruction_, dex_pc, this);
+ CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, mirror::Class*>();
}
// Move the class to the desired location.
@@ -272,12 +286,6 @@ class LoadClassSlowPathMIPS : public SlowPathCodeMIPS {
// The class this slow path will load.
HLoadClass* const cls_;
- // The dex PC of `at_`.
- const uint32_t dex_pc_;
-
- // Whether to initialize the class.
- const bool do_clinit_;
-
DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathMIPS);
};
@@ -1860,12 +1868,27 @@ void CodeGeneratorMIPS::MarkGCCard(Register object,
if (value_can_be_null) {
__ Beqz(value, &done);
}
+ // Load the address of the card table into `card`.
__ LoadFromOffset(kLoadWord,
card,
TR,
Thread::CardTableOffset<kMipsPointerSize>().Int32Value());
+ // Calculate the address of the card corresponding to `object`.
__ Srl(temp, object, gc::accounting::CardTable::kCardShift);
__ Addu(temp, card, temp);
+ // Write the `art::gc::accounting::CardTable::kCardDirty` value into the
+ // `object`'s card.
+ //
+ // Register `card` contains the address of the card table. Note that the card
+ // table's base is biased during its creation so that it always starts at an
+ // address whose least-significant byte is equal to `kCardDirty` (see
+ // art::gc::accounting::CardTable::Create). Therefore the SB instruction
+ // below writes the `kCardDirty` (byte) value into the `object`'s card
+ // (located at `card + object >> kCardShift`).
+ //
+ // This dual use of the value in register `card` (1. to calculate the location
+ // of the card to mark; and 2. to load the `kCardDirty` value) saves a load
+ // (no need to explicitly load `kCardDirty` as an immediate value).
__ Sb(card, temp, 0);
if (value_can_be_null) {
__ Bind(&done);
@@ -3594,15 +3617,14 @@ void LocationsBuilderMIPS::VisitClinitCheck(HClinitCheck* check) {
if (check->HasUses()) {
locations->SetOut(Location::SameAsFirstInput());
}
+ // Rely on the type initialization to save everything we need.
+ locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves());
}
void InstructionCodeGeneratorMIPS::VisitClinitCheck(HClinitCheck* check) {
// We assume the class is not null.
- SlowPathCodeMIPS* slow_path = new (codegen_->GetScopedAllocator()) LoadClassSlowPathMIPS(
- check->GetLoadClass(),
- check,
- check->GetDexPc(),
- true);
+ SlowPathCodeMIPS* slow_path =
+ new (codegen_->GetScopedAllocator()) LoadClassSlowPathMIPS(check->GetLoadClass(), check);
codegen_->AddSlowPath(slow_path);
GenerateClassInitializationCheck(slow_path,
check->GetLocations()->InAt(0).AsRegister<Register>());
@@ -8137,10 +8159,7 @@ void LocationsBuilderMIPS::VisitLoadClass(HLoadClass* cls) {
if (load_kind == HLoadClass::LoadKind::kBssEntry) {
if (!kUseReadBarrier || kUseBakerReadBarrier) {
// Rely on the type resolution or initialization and marking to save everything we need.
- RegisterSet caller_saves = RegisterSet::Empty();
- InvokeRuntimeCallingConvention calling_convention;
- caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
- locations->SetCustomSlowPathCallerSaves(caller_saves);
+ locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves());
} else {
// For non-Baker read barriers we have a temp-clobbering call.
}
@@ -8277,8 +8296,8 @@ void InstructionCodeGeneratorMIPS::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAF
if (generate_null_check || cls->MustGenerateClinitCheck()) {
DCHECK(cls->CanCallRuntime());
- SlowPathCodeMIPS* slow_path = new (codegen_->GetScopedAllocator()) LoadClassSlowPathMIPS(
- cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
+ SlowPathCodeMIPS* slow_path =
+ new (codegen_->GetScopedAllocator()) LoadClassSlowPathMIPS(cls, cls);
codegen_->AddSlowPath(slow_path);
if (generate_null_check) {
__ Beqz(out, slow_path->GetEntryLabel());
@@ -8371,10 +8390,7 @@ void LocationsBuilderMIPS::VisitLoadString(HLoadString* load) {
if (load_kind == HLoadString::LoadKind::kBssEntry) {
if (!kUseReadBarrier || kUseBakerReadBarrier) {
// Rely on the pResolveString and marking to save everything we need.
- RegisterSet caller_saves = RegisterSet::Empty();
- InvokeRuntimeCallingConvention calling_convention;
- caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
- locations->SetCustomSlowPathCallerSaves(caller_saves);
+ locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves());
} else {
// For non-Baker read barriers we have a temp-clobbering call.
}
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 75169139cd..c05f62722c 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -112,6 +112,14 @@ Location InvokeRuntimeCallingConvention::GetReturnLocation(DataType::Type type)
return Mips64ReturnLocation(type);
}
+static RegisterSet OneRegInReferenceOutSaveEverythingCallerSaves() {
+ InvokeRuntimeCallingConvention calling_convention;
+ RegisterSet caller_saves = RegisterSet::Empty();
+ caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ // The reference is returned in the same register. This differs from the standard return location.
+ return caller_saves;
+}
+
// NOLINT on __ macro to suppress wrong warning/fix (misc-macro-parentheses) from clang-tidy.
#define __ down_cast<CodeGeneratorMIPS64*>(codegen)->GetAssembler()-> // NOLINT
#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMips64PointerSize, x).Int32Value()
@@ -175,35 +183,41 @@ class DivZeroCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
class LoadClassSlowPathMIPS64 : public SlowPathCodeMIPS64 {
public:
- LoadClassSlowPathMIPS64(HLoadClass* cls,
- HInstruction* at,
- uint32_t dex_pc,
- bool do_clinit)
- : SlowPathCodeMIPS64(at),
- cls_(cls),
- dex_pc_(dex_pc),
- do_clinit_(do_clinit) {
+ LoadClassSlowPathMIPS64(HLoadClass* cls, HInstruction* at)
+ : SlowPathCodeMIPS64(at), cls_(cls) {
DCHECK(at->IsLoadClass() || at->IsClinitCheck());
+ DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
Location out = locations->Out();
+ const uint32_t dex_pc = instruction_->GetDexPc();
+ bool must_resolve_type = instruction_->IsLoadClass() && cls_->MustResolveTypeOnSlowPath();
+ bool must_do_clinit = instruction_->IsClinitCheck() || cls_->MustGenerateClinitCheck();
+
CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
- InvokeRuntimeCallingConvention calling_convention;
- DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
- dex::TypeIndex type_index = cls_->GetTypeIndex();
- __ LoadConst32(calling_convention.GetRegisterAt(0), type_index.index_);
- QuickEntrypointEnum entrypoint = do_clinit_ ? kQuickInitializeStaticStorage
- : kQuickInitializeType;
- mips64_codegen->InvokeRuntime(entrypoint, instruction_, dex_pc_, this);
- if (do_clinit_) {
- CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, uint32_t>();
+ InvokeRuntimeCallingConvention calling_convention;
+ if (must_resolve_type) {
+ DCHECK(IsSameDexFile(cls_->GetDexFile(), mips64_codegen->GetGraph()->GetDexFile()));
+ dex::TypeIndex type_index = cls_->GetTypeIndex();
+ __ LoadConst32(calling_convention.GetRegisterAt(0), type_index.index_);
+ mips64_codegen->InvokeRuntime(kQuickResolveType, instruction_, dex_pc, this);
+ CheckEntrypointTypes<kQuickResolveType, void*, uint32_t>();
+ // If we also must_do_clinit, the resolved type is now in the correct register.
} else {
- CheckEntrypointTypes<kQuickInitializeType, void*, uint32_t>();
+ DCHECK(must_do_clinit);
+ Location source = instruction_->IsLoadClass() ? out : locations->InAt(0);
+ mips64_codegen->MoveLocation(Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ source,
+ cls_->GetType());
+ }
+ if (must_do_clinit) {
+ mips64_codegen->InvokeRuntime(kQuickInitializeStaticStorage, instruction_, dex_pc, this);
+ CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, mirror::Class*>();
}
// Move the class to the desired location.
@@ -225,12 +239,6 @@ class LoadClassSlowPathMIPS64 : public SlowPathCodeMIPS64 {
// The class this slow path will load.
HLoadClass* const cls_;
- // The dex PC of `at_`.
- const uint32_t dex_pc_;
-
- // Whether to initialize the class.
- const bool do_clinit_;
-
DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathMIPS64);
};
@@ -1482,12 +1490,27 @@ void CodeGeneratorMIPS64::MarkGCCard(GpuRegister object,
if (value_can_be_null) {
__ Beqzc(value, &done);
}
+ // Load the address of the card table into `card`.
__ LoadFromOffset(kLoadDoubleword,
card,
TR,
Thread::CardTableOffset<kMips64PointerSize>().Int32Value());
+ // Calculate the address of the card corresponding to `object`.
__ Dsrl(temp, object, gc::accounting::CardTable::kCardShift);
__ Daddu(temp, card, temp);
+ // Write the `art::gc::accounting::CardTable::kCardDirty` value into the
+ // `object`'s card.
+ //
+ // Register `card` contains the address of the card table. Note that the card
+ // table's base is biased during its creation so that it always starts at an
+ // address whose least-significant byte is equal to `kCardDirty` (see
+ // art::gc::accounting::CardTable::Create). Therefore the SB instruction
+ // below writes the `kCardDirty` (byte) value into the `object`'s card
+ // (located at `card + object >> kCardShift`).
+ //
+ // This dual use of the value in register `card` (1. to calculate the location
+ // of the card to mark; and 2. to load the `kCardDirty` value) saves a load
+ // (no need to explicitly load `kCardDirty` as an immediate value).
__ Sb(card, temp, 0);
if (value_can_be_null) {
__ Bind(&done);
@@ -3149,15 +3172,14 @@ void LocationsBuilderMIPS64::VisitClinitCheck(HClinitCheck* check) {
if (check->HasUses()) {
locations->SetOut(Location::SameAsFirstInput());
}
+ // Rely on the type initialization to save everything we need.
+ locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves());
}
void InstructionCodeGeneratorMIPS64::VisitClinitCheck(HClinitCheck* check) {
// We assume the class is not null.
- SlowPathCodeMIPS64* slow_path = new (codegen_->GetScopedAllocator()) LoadClassSlowPathMIPS64(
- check->GetLoadClass(),
- check,
- check->GetDexPc(),
- true);
+ SlowPathCodeMIPS64* slow_path =
+ new (codegen_->GetScopedAllocator()) LoadClassSlowPathMIPS64(check->GetLoadClass(), check);
codegen_->AddSlowPath(slow_path);
GenerateClassInitializationCheck(slow_path,
check->GetLocations()->InAt(0).AsRegister<GpuRegister>());
@@ -6209,10 +6231,7 @@ void LocationsBuilderMIPS64::VisitLoadClass(HLoadClass* cls) {
if (load_kind == HLoadClass::LoadKind::kBssEntry) {
if (!kUseReadBarrier || kUseBakerReadBarrier) {
// Rely on the type resolution or initialization and marking to save everything we need.
- RegisterSet caller_saves = RegisterSet::Empty();
- InvokeRuntimeCallingConvention calling_convention;
- caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
- locations->SetCustomSlowPathCallerSaves(caller_saves);
+ locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves());
} else {
// For non-Baker read barriers we have a temp-clobbering call.
}
@@ -6315,8 +6334,8 @@ void InstructionCodeGeneratorMIPS64::VisitLoadClass(HLoadClass* cls) NO_THREAD_S
if (generate_null_check || cls->MustGenerateClinitCheck()) {
DCHECK(cls->CanCallRuntime());
- SlowPathCodeMIPS64* slow_path = new (codegen_->GetScopedAllocator()) LoadClassSlowPathMIPS64(
- cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
+ SlowPathCodeMIPS64* slow_path =
+ new (codegen_->GetScopedAllocator()) LoadClassSlowPathMIPS64(cls, cls);
codegen_->AddSlowPath(slow_path);
if (generate_null_check) {
__ Beqzc(out, slow_path->GetEntryLabel());
@@ -6384,10 +6403,7 @@ void LocationsBuilderMIPS64::VisitLoadString(HLoadString* load) {
if (load_kind == HLoadString::LoadKind::kBssEntry) {
if (!kUseReadBarrier || kUseBakerReadBarrier) {
// Rely on the pResolveString and marking to save everything we need.
- RegisterSet caller_saves = RegisterSet::Empty();
- InvokeRuntimeCallingConvention calling_convention;
- caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
- locations->SetCustomSlowPathCallerSaves(caller_saves);
+ locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves());
} else {
// For non-Baker read barriers we have a temp-clobbering call.
}
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 30436eef9c..63bd8413eb 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -55,6 +55,15 @@ static constexpr int kFakeReturnRegister = Register(8);
static constexpr int64_t kDoubleNaN = INT64_C(0x7FF8000000000000);
static constexpr int32_t kFloatNaN = INT32_C(0x7FC00000);
+static RegisterSet OneRegInReferenceOutSaveEverythingCallerSaves() {
+ InvokeRuntimeCallingConvention calling_convention;
+ RegisterSet caller_saves = RegisterSet::Empty();
+ caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ // TODO: Add GetReturnLocation() to the calling convention so that we can DCHECK()
+ // that the the kPrimNot result register is the same as the first argument register.
+ return caller_saves;
+}
+
// NOLINT on __ macro to suppress wrong warning/fix (misc-macro-parentheses) from clang-tidy.
#define __ down_cast<X86Assembler*>(codegen->GetAssembler())-> // NOLINT
#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kX86PointerSize, x).Int32Value()
@@ -255,36 +264,42 @@ class LoadStringSlowPathX86 : public SlowPathCode {
class LoadClassSlowPathX86 : public SlowPathCode {
public:
- LoadClassSlowPathX86(HLoadClass* cls,
- HInstruction* at,
- uint32_t dex_pc,
- bool do_clinit)
- : SlowPathCode(at), cls_(cls), dex_pc_(dex_pc), do_clinit_(do_clinit) {
+ LoadClassSlowPathX86(HLoadClass* cls, HInstruction* at)
+ : SlowPathCode(at), cls_(cls) {
DCHECK(at->IsLoadClass() || at->IsClinitCheck());
+ DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
+ Location out = locations->Out();
+ const uint32_t dex_pc = instruction_->GetDexPc();
+ bool must_resolve_type = instruction_->IsLoadClass() && cls_->MustResolveTypeOnSlowPath();
+ bool must_do_clinit = instruction_->IsClinitCheck() || cls_->MustGenerateClinitCheck();
+
CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
InvokeRuntimeCallingConvention calling_convention;
- dex::TypeIndex type_index = cls_->GetTypeIndex();
- __ movl(calling_convention.GetRegisterAt(0), Immediate(type_index.index_));
- x86_codegen->InvokeRuntime(do_clinit_ ? kQuickInitializeStaticStorage
- : kQuickInitializeType,
- instruction_,
- dex_pc_,
- this);
- if (do_clinit_) {
- CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, uint32_t>();
+ if (must_resolve_type) {
+ DCHECK(IsSameDexFile(cls_->GetDexFile(), x86_codegen->GetGraph()->GetDexFile()));
+ dex::TypeIndex type_index = cls_->GetTypeIndex();
+ __ movl(calling_convention.GetRegisterAt(0), Immediate(type_index.index_));
+ x86_codegen->InvokeRuntime(kQuickResolveType, instruction_, dex_pc, this);
+ CheckEntrypointTypes<kQuickResolveType, void*, uint32_t>();
+ // If we also must_do_clinit, the resolved type is now in the correct register.
} else {
- CheckEntrypointTypes<kQuickInitializeType, void*, uint32_t>();
+ DCHECK(must_do_clinit);
+ Location source = instruction_->IsLoadClass() ? out : locations->InAt(0);
+ x86_codegen->Move32(Location::RegisterLocation(calling_convention.GetRegisterAt(0)), source);
+ }
+ if (must_do_clinit) {
+ x86_codegen->InvokeRuntime(kQuickInitializeStaticStorage, instruction_, dex_pc, this);
+ CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, mirror::Class*>();
}
// Move the class to the desired location.
- Location out = locations->Out();
if (out.IsValid()) {
DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
x86_codegen->Move32(out, Location::RegisterLocation(EAX));
@@ -299,12 +314,6 @@ class LoadClassSlowPathX86 : public SlowPathCode {
// The class this slow path will load.
HLoadClass* const cls_;
- // The dex PC of `at_`.
- const uint32_t dex_pc_;
-
- // Whether to initialize the class.
- const bool do_clinit_;
-
DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathX86);
};
@@ -5095,9 +5104,25 @@ void CodeGeneratorX86::MarkGCCard(Register temp,
__ testl(value, value);
__ j(kEqual, &is_null);
}
+ // Load the address of the card table into `card`.
__ fs()->movl(card, Address::Absolute(Thread::CardTableOffset<kX86PointerSize>().Int32Value()));
+ // Calculate the offset (in the card table) of the card corresponding to
+ // `object`.
__ movl(temp, object);
__ shrl(temp, Immediate(gc::accounting::CardTable::kCardShift));
+ // Write the `art::gc::accounting::CardTable::kCardDirty` value into the
+ // `object`'s card.
+ //
+ // Register `card` contains the address of the card table. Note that the card
+ // table's base is biased during its creation so that it always starts at an
+ // address whose least-significant byte is equal to `kCardDirty` (see
+ // art::gc::accounting::CardTable::Create). Therefore the MOVB instruction
+ // below writes the `kCardDirty` (byte) value into the `object`'s card
+ // (located at `card + object >> kCardShift`).
+ //
+ // This dual use of the value in register `card` (1. to calculate the location
+ // of the card to mark; and 2. to load the `kCardDirty` value) saves a load
+ // (no need to explicitly load `kCardDirty` as an immediate value).
__ movb(Address(temp, card, TIMES_1, 0),
X86ManagedRegister::FromCpuRegister(card).AsByteRegister());
if (value_can_be_null) {
@@ -6489,10 +6514,7 @@ void LocationsBuilderX86::VisitLoadClass(HLoadClass* cls) {
if (load_kind == HLoadClass::LoadKind::kBssEntry) {
if (!kUseReadBarrier || kUseBakerReadBarrier) {
// Rely on the type resolution and/or initialization to save everything.
- RegisterSet caller_saves = RegisterSet::Empty();
- InvokeRuntimeCallingConvention calling_convention;
- caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
- locations->SetCustomSlowPathCallerSaves(caller_saves);
+ locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves());
} else {
// For non-Baker read barrier we have a temp-clobbering call.
}
@@ -6588,8 +6610,7 @@ void InstructionCodeGeneratorX86::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAFE
if (generate_null_check || cls->MustGenerateClinitCheck()) {
DCHECK(cls->CanCallRuntime());
- SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) LoadClassSlowPathX86(
- cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
+ SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) LoadClassSlowPathX86(cls, cls);
codegen_->AddSlowPath(slow_path);
if (generate_null_check) {
@@ -6632,12 +6653,14 @@ void LocationsBuilderX86::VisitClinitCheck(HClinitCheck* check) {
if (check->HasUses()) {
locations->SetOut(Location::SameAsFirstInput());
}
+ // Rely on the type initialization to save everything we need.
+ locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves());
}
void InstructionCodeGeneratorX86::VisitClinitCheck(HClinitCheck* check) {
// We assume the class to not be null.
- SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) LoadClassSlowPathX86(
- check->GetLoadClass(), check, check->GetDexPc(), true);
+ SlowPathCode* slow_path =
+ new (codegen_->GetScopedAllocator()) LoadClassSlowPathX86(check->GetLoadClass(), check);
codegen_->AddSlowPath(slow_path);
GenerateClassInitializationCheck(slow_path,
check->GetLocations()->InAt(0).AsRegister<Register>());
@@ -6711,10 +6734,7 @@ void LocationsBuilderX86::VisitLoadString(HLoadString* load) {
if (load_kind == HLoadString::LoadKind::kBssEntry) {
if (!kUseReadBarrier || kUseBakerReadBarrier) {
// Rely on the pResolveString to save everything.
- RegisterSet caller_saves = RegisterSet::Empty();
- InvokeRuntimeCallingConvention calling_convention;
- caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
- locations->SetCustomSlowPathCallerSaves(caller_saves);
+ locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves());
} else {
// For non-Baker read barrier we have a temp-clobbering call.
}
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 0d7837e70f..0bd7319677 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -56,6 +56,13 @@ static constexpr FloatRegister kFpuCalleeSaves[] = { XMM12, XMM13, XMM14, XMM15
static constexpr int kC2ConditionMask = 0x400;
+static RegisterSet OneRegInReferenceOutSaveEverythingCallerSaves() {
+ // Custom calling convention: RAX serves as both input and output.
+ RegisterSet caller_saves = RegisterSet::Empty();
+ caller_saves.Add(Location::RegisterLocation(RAX));
+ return caller_saves;
+}
+
// NOLINT on __ macro to suppress wrong warning/fix (misc-macro-parentheses) from clang-tidy.
#define __ down_cast<X86_64Assembler*>(codegen->GetAssembler())-> // NOLINT
#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kX86_64PointerSize, x).Int32Value()
@@ -239,34 +246,41 @@ class BoundsCheckSlowPathX86_64 : public SlowPathCode {
class LoadClassSlowPathX86_64 : public SlowPathCode {
public:
- LoadClassSlowPathX86_64(HLoadClass* cls,
- HInstruction* at,
- uint32_t dex_pc,
- bool do_clinit)
- : SlowPathCode(at), cls_(cls), dex_pc_(dex_pc), do_clinit_(do_clinit) {
+ LoadClassSlowPathX86_64(HLoadClass* cls, HInstruction* at)
+ : SlowPathCode(at), cls_(cls) {
DCHECK(at->IsLoadClass() || at->IsClinitCheck());
+ DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
+ Location out = locations->Out();
+ const uint32_t dex_pc = instruction_->GetDexPc();
+ bool must_resolve_type = instruction_->IsLoadClass() && cls_->MustResolveTypeOnSlowPath();
+ bool must_do_clinit = instruction_->IsClinitCheck() || cls_->MustGenerateClinitCheck();
+
CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
__ Bind(GetEntryLabel());
-
SaveLiveRegisters(codegen, locations);
// Custom calling convention: RAX serves as both input and output.
- __ movl(CpuRegister(RAX), Immediate(cls_->GetTypeIndex().index_));
- x86_64_codegen->InvokeRuntime(do_clinit_ ? kQuickInitializeStaticStorage : kQuickInitializeType,
- instruction_,
- dex_pc_,
- this);
- if (do_clinit_) {
- CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, uint32_t>();
+ if (must_resolve_type) {
+ DCHECK(IsSameDexFile(cls_->GetDexFile(), x86_64_codegen->GetGraph()->GetDexFile()));
+ dex::TypeIndex type_index = cls_->GetTypeIndex();
+ __ movl(CpuRegister(RAX), Immediate(type_index.index_));
+ x86_64_codegen->InvokeRuntime(kQuickResolveType, instruction_, dex_pc, this);
+ CheckEntrypointTypes<kQuickResolveType, void*, uint32_t>();
+ // If we also must_do_clinit, the resolved type is now in the correct register.
} else {
- CheckEntrypointTypes<kQuickInitializeType, void*, uint32_t>();
+ DCHECK(must_do_clinit);
+ Location source = instruction_->IsLoadClass() ? out : locations->InAt(0);
+ x86_64_codegen->Move(Location::RegisterLocation(RAX), source);
+ }
+ if (must_do_clinit) {
+ x86_64_codegen->InvokeRuntime(kQuickInitializeStaticStorage, instruction_, dex_pc, this);
+ CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, mirror::Class*>();
}
- Location out = locations->Out();
// Move the class to the desired location.
if (out.IsValid()) {
DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
@@ -283,12 +297,6 @@ class LoadClassSlowPathX86_64 : public SlowPathCode {
// The class this slow path will load.
HLoadClass* const cls_;
- // The dex PC of `at_`.
- const uint32_t dex_pc_;
-
- // Whether to initialize the class.
- const bool do_clinit_;
-
DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathX86_64);
};
@@ -5428,10 +5436,26 @@ void CodeGeneratorX86_64::MarkGCCard(CpuRegister temp,
__ testl(value, value);
__ j(kEqual, &is_null);
}
+ // Load the address of the card table into `card`.
__ gs()->movq(card, Address::Absolute(Thread::CardTableOffset<kX86_64PointerSize>().Int32Value(),
/* no_rip */ true));
+ // Calculate the offset (in the card table) of the card corresponding to
+ // `object`.
__ movq(temp, object);
__ shrq(temp, Immediate(gc::accounting::CardTable::kCardShift));
+ // Write the `art::gc::accounting::CardTable::kCardDirty` value into the
+ // `object`'s card.
+ //
+ // Register `card` contains the address of the card table. Note that the card
+ // table's base is biased during its creation so that it always starts at an
+ // address whose least-significant byte is equal to `kCardDirty` (see
+ // art::gc::accounting::CardTable::Create). Therefore the MOVB instruction
+ // below writes the `kCardDirty` (byte) value into the `object`'s card
+ // (located at `card + object >> kCardShift`).
+ //
+ // This dual use of the value in register `card` (1. to calculate the location
+ // of the card to mark; and 2. to load the `kCardDirty` value) saves a load
+ // (no need to explicitly load `kCardDirty` as an immediate value).
__ movb(Address(temp, card, TIMES_1, 0), card);
if (value_can_be_null) {
__ Bind(&is_null);
@@ -5831,10 +5855,7 @@ void LocationsBuilderX86_64::VisitLoadClass(HLoadClass* cls) {
if (load_kind == HLoadClass::LoadKind::kBssEntry) {
if (!kUseReadBarrier || kUseBakerReadBarrier) {
// Rely on the type resolution and/or initialization to save everything.
- // Custom calling convention: RAX serves as both input and output.
- RegisterSet caller_saves = RegisterSet::Empty();
- caller_saves.Add(Location::RegisterLocation(RAX));
- locations->SetCustomSlowPathCallerSaves(caller_saves);
+ locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves());
} else {
// For non-Baker read barrier we have a temp-clobbering call.
}
@@ -5927,8 +5948,8 @@ void InstructionCodeGeneratorX86_64::VisitLoadClass(HLoadClass* cls) NO_THREAD_S
if (generate_null_check || cls->MustGenerateClinitCheck()) {
DCHECK(cls->CanCallRuntime());
- SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) LoadClassSlowPathX86_64(
- cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
+ SlowPathCode* slow_path =
+ new (codegen_->GetScopedAllocator()) LoadClassSlowPathX86_64(cls, cls);
codegen_->AddSlowPath(slow_path);
if (generate_null_check) {
__ testl(out, out);
@@ -5949,6 +5970,8 @@ void LocationsBuilderX86_64::VisitClinitCheck(HClinitCheck* check) {
if (check->HasUses()) {
locations->SetOut(Location::SameAsFirstInput());
}
+ // Rely on the type initialization to save everything we need.
+ locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves());
}
void LocationsBuilderX86_64::VisitLoadMethodHandle(HLoadMethodHandle* load) {
@@ -5973,8 +5996,8 @@ void InstructionCodeGeneratorX86_64::VisitLoadMethodType(HLoadMethodType* load)
void InstructionCodeGeneratorX86_64::VisitClinitCheck(HClinitCheck* check) {
// We assume the class to not be null.
- SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) LoadClassSlowPathX86_64(
- check->GetLoadClass(), check, check->GetDexPc(), true);
+ SlowPathCode* slow_path =
+ new (codegen_->GetScopedAllocator()) LoadClassSlowPathX86_64(check->GetLoadClass(), check);
codegen_->AddSlowPath(slow_path);
GenerateClassInitializationCheck(slow_path,
check->GetLocations()->InAt(0).AsRegister<CpuRegister>());
@@ -6008,10 +6031,7 @@ void LocationsBuilderX86_64::VisitLoadString(HLoadString* load) {
if (load->GetLoadKind() == HLoadString::LoadKind::kBssEntry) {
if (!kUseReadBarrier || kUseBakerReadBarrier) {
// Rely on the pResolveString to save everything.
- // Custom calling convention: RAX serves as both input and output.
- RegisterSet caller_saves = RegisterSet::Empty();
- caller_saves.Add(Location::RegisterLocation(RAX));
- locations->SetCustomSlowPathCallerSaves(caller_saves);
+ locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves());
} else {
// For non-Baker read barrier we have a temp-clobbering call.
}
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index 4b2bcc8ca8..74d4a8f63b 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -745,15 +745,15 @@ static void GenUnsafeGet(HInvoke* invoke,
if (type == DataType::Type::kReference && kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
// UnsafeGetObject/UnsafeGetObjectVolatile with Baker's read barrier case.
Register temp = WRegisterFrom(locations->GetTemp(0));
- codegen->GenerateReferenceLoadWithBakerReadBarrier(invoke,
- trg_loc,
- base,
- /* offset */ 0u,
- /* index */ offset_loc,
- /* scale_factor */ 0u,
- temp,
- /* needs_null_check */ false,
- is_volatile);
+ MacroAssembler* masm = codegen->GetVIXLAssembler();
+ // Piggy-back on the field load path using introspection for the Baker read barrier.
+ __ Add(temp, base, offset.W()); // Offset should not exceed 32 bits.
+ codegen->GenerateFieldLoadWithBakerReadBarrier(invoke,
+ trg_loc,
+ base,
+ MemOperand(temp.X()),
+ /* needs_null_check */ false,
+ is_volatile);
} else {
// Other cases.
MemOperand mem_op(base.X(), offset);
@@ -782,9 +782,9 @@ static void CreateIntIntIntToIntLocations(ArenaAllocator* allocator, HInvoke* in
kIntrinsified);
if (can_call && kUseBakerReadBarrier) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
- // We need a temporary register for the read barrier marking slow
- // path in CodeGeneratorARM64::GenerateReferenceLoadWithBakerReadBarrier.
- locations->AddTemp(Location::RequiresRegister());
+ // We need a temporary register for the read barrier load in order to use
+ // CodeGeneratorARM64::GenerateFieldLoadWithBakerReadBarrier().
+ locations->AddTemp(FixedTempLocation());
}
locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
locations->SetInAt(1, Location::RequiresRegister());
@@ -984,106 +984,155 @@ static void CreateIntIntIntIntIntToInt(ArenaAllocator* allocator,
? LocationSummary::kCallOnSlowPath
: LocationSummary::kNoCall,
kIntrinsified);
+ if (can_call) {
+ locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
+ }
locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
locations->SetInAt(1, Location::RequiresRegister());
locations->SetInAt(2, Location::RequiresRegister());
locations->SetInAt(3, Location::RequiresRegister());
locations->SetInAt(4, Location::RequiresRegister());
- // If heap poisoning is enabled, we don't want the unpoisoning
- // operations to potentially clobber the output. Likewise when
- // emitting a (Baker) read barrier, which may call.
- Location::OutputOverlap overlaps =
- ((kPoisonHeapReferences && type == DataType::Type::kReference) || can_call)
- ? Location::kOutputOverlap
- : Location::kNoOutputOverlap;
- locations->SetOut(Location::RequiresRegister(), overlaps);
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
if (type == DataType::Type::kReference && kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
- // Temporary register for (Baker) read barrier.
+ // We need two non-scratch temporary registers for (Baker) read barrier.
+ locations->AddTemp(Location::RequiresRegister());
locations->AddTemp(Location::RequiresRegister());
}
}
+class BakerReadBarrierCasSlowPathARM64 : public SlowPathCodeARM64 {
+ public:
+ explicit BakerReadBarrierCasSlowPathARM64(HInvoke* invoke)
+ : SlowPathCodeARM64(invoke) {}
+
+ const char* GetDescription() const OVERRIDE { return "BakerReadBarrierCasSlowPathARM64"; }
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
+ Arm64Assembler* assembler = arm64_codegen->GetAssembler();
+ MacroAssembler* masm = assembler->GetVIXLAssembler();
+ __ Bind(GetEntryLabel());
+
+ // Get the locations.
+ LocationSummary* locations = instruction_->GetLocations();
+ Register base = WRegisterFrom(locations->InAt(1)); // Object pointer.
+ Register offset = XRegisterFrom(locations->InAt(2)); // Long offset.
+ Register expected = WRegisterFrom(locations->InAt(3)); // Expected.
+ Register value = WRegisterFrom(locations->InAt(4)); // Value.
+
+ Register old_value = WRegisterFrom(locations->GetTemp(0)); // The old value from main path.
+ Register marked = WRegisterFrom(locations->GetTemp(1)); // The marked old value.
+
+ // Mark the `old_value` from the main path and compare with `expected`. This clobbers the
+ // `tmp_ptr` scratch register but we do not want to allocate another non-scratch temporary.
+ arm64_codegen->GenerateUnsafeCasOldValueMovWithBakerReadBarrier(marked, old_value);
+ __ Cmp(marked, expected);
+ __ B(GetExitLabel(), ne); // If taken, Z=false indicates failure.
+
+ // The `old_value` we have read did not match `expected` (which is always a to-space reference)
+ // but after the read barrier in GenerateUnsafeCasOldValueMovWithBakerReadBarrier() the marked
+ // to-space value matched, so the `old_value` must be a from-space reference to the same
+ // object. Do the same CAS loop as the main path but check for both `expected` and the unmarked
+ // old value representing the to-space and from-space references for the same object.
+
+ UseScratchRegisterScope temps(masm);
+ Register tmp_ptr = temps.AcquireX();
+ Register tmp = temps.AcquireSameSizeAs(value);
+
+ // Recalculate the `tmp_ptr` clobbered above.
+ __ Add(tmp_ptr, base.X(), Operand(offset));
+
+ // do {
+ // tmp_value = [tmp_ptr];
+ // } while ((tmp_value == expected || tmp == old_value) && failure([tmp_ptr] <- r_new_value));
+ // result = (tmp_value == expected || tmp == old_value);
+
+ vixl::aarch64::Label loop_head;
+ __ Bind(&loop_head);
+ __ Ldaxr(tmp, MemOperand(tmp_ptr));
+ assembler->MaybeUnpoisonHeapReference(tmp);
+ __ Cmp(tmp, expected);
+ __ Ccmp(tmp, old_value, ZFlag, ne);
+ __ B(GetExitLabel(), ne); // If taken, Z=false indicates failure.
+ assembler->MaybePoisonHeapReference(value);
+ __ Stlxr(tmp.W(), value, MemOperand(tmp_ptr));
+ assembler->MaybeUnpoisonHeapReference(value);
+ __ Cbnz(tmp.W(), &loop_head);
+
+ // Z=true from the above CMP+CCMP indicates success.
+ __ B(GetExitLabel());
+ }
+};
+
static void GenCas(HInvoke* invoke, DataType::Type type, CodeGeneratorARM64* codegen) {
- MacroAssembler* masm = codegen->GetVIXLAssembler();
+ Arm64Assembler* assembler = codegen->GetAssembler();
+ MacroAssembler* masm = assembler->GetVIXLAssembler();
LocationSummary* locations = invoke->GetLocations();
- Location out_loc = locations->Out();
- Register out = WRegisterFrom(out_loc); // Boolean result.
-
- Register base = WRegisterFrom(locations->InAt(1)); // Object pointer.
- Location offset_loc = locations->InAt(2);
- Register offset = XRegisterFrom(offset_loc); // Long offset.
- Register expected = RegisterFrom(locations->InAt(3), type); // Expected.
- Register value = RegisterFrom(locations->InAt(4), type); // Value.
+ Register out = WRegisterFrom(locations->Out()); // Boolean result.
+ Register base = WRegisterFrom(locations->InAt(1)); // Object pointer.
+ Register offset = XRegisterFrom(locations->InAt(2)); // Long offset.
+ Register expected = RegisterFrom(locations->InAt(3), type); // Expected.
+ Register value = RegisterFrom(locations->InAt(4), type); // Value.
// This needs to be before the temp registers, as MarkGCCard also uses VIXL temps.
if (type == DataType::Type::kReference) {
// Mark card for object assuming new value is stored.
bool value_can_be_null = true; // TODO: Worth finding out this information?
codegen->MarkGCCard(base, value, value_can_be_null);
-
- // The only read barrier implementation supporting the
- // UnsafeCASObject intrinsic is the Baker-style read barriers.
- DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier);
-
- if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
- Register temp = WRegisterFrom(locations->GetTemp(0));
- // Need to make sure the reference stored in the field is a to-space
- // one before attempting the CAS or the CAS could fail incorrectly.
- codegen->UpdateReferenceFieldWithBakerReadBarrier(
- invoke,
- out_loc, // Unused, used only as a "temporary" within the read barrier.
- base,
- /* field_offset */ offset_loc,
- temp,
- /* needs_null_check */ false,
- /* use_load_acquire */ false);
- }
}
UseScratchRegisterScope temps(masm);
Register tmp_ptr = temps.AcquireX(); // Pointer to actual memory.
- Register tmp_value = temps.AcquireSameSizeAs(value); // Value in memory.
+ Register old_value; // Value in memory.
- Register tmp_32 = tmp_value.W();
+ vixl::aarch64::Label exit_loop_label;
+ vixl::aarch64::Label* exit_loop = &exit_loop_label;
+ vixl::aarch64::Label* failure = &exit_loop_label;
- __ Add(tmp_ptr, base.X(), Operand(offset));
+ if (kEmitCompilerReadBarrier && type == DataType::Type::kReference) {
+ // The only read barrier implementation supporting the
+ // UnsafeCASObject intrinsic is the Baker-style read barriers.
+ DCHECK(kUseBakerReadBarrier);
- if (kPoisonHeapReferences && type == DataType::Type::kReference) {
- codegen->GetAssembler()->PoisonHeapReference(expected);
- if (value.Is(expected)) {
- // Do not poison `value`, as it is the same register as
- // `expected`, which has just been poisoned.
- } else {
- codegen->GetAssembler()->PoisonHeapReference(value);
- }
+ BakerReadBarrierCasSlowPathARM64* slow_path =
+ new (codegen->GetScopedAllocator()) BakerReadBarrierCasSlowPathARM64(invoke);
+ codegen->AddSlowPath(slow_path);
+ exit_loop = slow_path->GetExitLabel();
+ failure = slow_path->GetEntryLabel();
+ // We need to store the `old_value` in a non-scratch register to make sure
+ // the Baker read barrier in the slow path does not clobber it.
+ old_value = WRegisterFrom(locations->GetTemp(0));
+ } else {
+ old_value = temps.AcquireSameSizeAs(value);
}
+ __ Add(tmp_ptr, base.X(), Operand(offset));
+
// do {
- // tmp_value = [tmp_ptr] - expected;
- // } while (tmp_value == 0 && failure([tmp_ptr] <- r_new_value));
- // result = tmp_value != 0;
+ // tmp_value = [tmp_ptr];
+ // } while (tmp_value == expected && failure([tmp_ptr] <- r_new_value));
+ // result = tmp_value == expected;
- vixl::aarch64::Label loop_head, exit_loop;
+ vixl::aarch64::Label loop_head;
__ Bind(&loop_head);
- __ Ldaxr(tmp_value, MemOperand(tmp_ptr));
- __ Cmp(tmp_value, expected);
- __ B(&exit_loop, ne);
- __ Stlxr(tmp_32, value, MemOperand(tmp_ptr));
- __ Cbnz(tmp_32, &loop_head);
- __ Bind(&exit_loop);
- __ Cset(out, eq);
-
- if (kPoisonHeapReferences && type == DataType::Type::kReference) {
- codegen->GetAssembler()->UnpoisonHeapReference(expected);
- if (value.Is(expected)) {
- // Do not unpoison `value`, as it is the same register as
- // `expected`, which has just been unpoisoned.
- } else {
- codegen->GetAssembler()->UnpoisonHeapReference(value);
- }
+ __ Ldaxr(old_value, MemOperand(tmp_ptr));
+ if (type == DataType::Type::kReference) {
+ assembler->MaybeUnpoisonHeapReference(old_value);
}
+ __ Cmp(old_value, expected);
+ __ B(failure, ne);
+ if (type == DataType::Type::kReference) {
+ assembler->MaybePoisonHeapReference(value);
+ }
+ __ Stlxr(old_value.W(), value, MemOperand(tmp_ptr)); // Reuse `old_value` for STLXR result.
+ if (type == DataType::Type::kReference) {
+ assembler->MaybeUnpoisonHeapReference(value);
+ }
+ __ Cbnz(old_value.W(), &loop_head);
+ __ Bind(exit_loop);
+ __ Cset(out, eq);
}
void IntrinsicLocationsBuilderARM64::VisitUnsafeCASInt(HInvoke* invoke) {
diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc
index 2963308da8..b92075053e 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.cc
+++ b/compiler/optimizing/intrinsics_arm_vixl.cc
@@ -638,8 +638,11 @@ static void GenUnsafeGet(HInvoke* invoke,
if (kEmitCompilerReadBarrier) {
if (kUseBakerReadBarrier) {
Location temp = locations->GetTemp(0);
- codegen->GenerateReferenceLoadWithBakerReadBarrier(
- invoke, trg_loc, base, 0U, offset_loc, TIMES_1, temp, /* needs_null_check */ false);
+ // Piggy-back on the field load path using introspection for the Baker read barrier.
+ __ Add(RegisterFrom(temp), base, Operand(offset));
+ MemOperand src(RegisterFrom(temp), 0);
+ codegen->GenerateFieldLoadWithBakerReadBarrier(
+ invoke, trg_loc, base, src, /* needs_null_check */ false);
if (is_volatile) {
__ Dmb(vixl32::ISH);
}
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 8f822cce5a..79a7e2c858 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -1301,6 +1301,15 @@ void HInstruction::ReplaceUsesDominatedBy(HInstruction* dominator, HInstruction*
++it;
if (dominator->StrictlyDominates(user)) {
user->ReplaceInput(replacement, index);
+ } else if (user->IsPhi() && !user->AsPhi()->IsCatchPhi()) {
+ // If the input flows from a block dominated by `dominator`, we can replace it.
+ // We do not perform this for catch phis as we don't have control flow support
+ // for their inputs.
+ const ArenaVector<HBasicBlock*>& predecessors = user->GetBlock()->GetPredecessors();
+ HBasicBlock* predecessor = predecessors[index];
+ if (dominator->GetBlock()->Dominates(predecessor)) {
+ user->ReplaceInput(replacement, index);
+ }
}
}
}
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 16a7417301..8b9e1da0d3 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -6284,6 +6284,13 @@ class HLoadClass FINAL : public HInstruction {
bool IsInBootImage() const { return GetPackedFlag<kFlagIsInBootImage>(); }
bool MustGenerateClinitCheck() const { return GetPackedFlag<kFlagGenerateClInitCheck>(); }
+ bool MustResolveTypeOnSlowPath() const {
+ // Check that this instruction has a slow path.
+ DCHECK(GetLoadKind() != LoadKind::kRuntimeCall); // kRuntimeCall calls on main path.
+ DCHECK(GetLoadKind() == LoadKind::kBssEntry || MustGenerateClinitCheck());
+ return GetLoadKind() == LoadKind::kBssEntry;
+ }
+
void MarkInBootImage() {
SetPackedFlag<kFlagIsInBootImage>(true);
}
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index d96746fdd7..c40cbcf52a 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -711,15 +711,7 @@ CompiledMethod* OptimizingCompiler::Emit(ArenaAllocator* allocator,
CodeGenerator* codegen,
const DexFile::CodeItem* code_item_for_osr_check) const {
ArenaVector<linker::LinkerPatch> linker_patches = EmitAndSortLinkerPatches(codegen);
- ArenaVector<uint8_t> stack_map(allocator->Adapter(kArenaAllocStackMaps));
- ArenaVector<uint8_t> method_info(allocator->Adapter(kArenaAllocStackMaps));
- size_t stack_map_size = 0;
- size_t method_info_size = 0;
- codegen->ComputeStackMapSize(&stack_map_size);
- stack_map.resize(stack_map_size);
- method_info.resize(method_info_size);
- codegen->BuildStackMaps(MemoryRegion(stack_map.data(), stack_map.size()),
- code_item_for_osr_check);
+ ScopedArenaVector<uint8_t> stack_map = codegen->BuildStackMaps(code_item_for_osr_check);
CompiledMethod* compiled_method = CompiledMethod::SwapAllocCompiledMethod(
GetCompilerDriver(),
@@ -1097,19 +1089,19 @@ CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
return compiled_method;
}
-static void CreateJniStackMap(ArenaStack* arena_stack,
- const JniCompiledMethod& jni_compiled_method,
- /* out */ ArenaVector<uint8_t>* stack_map) {
- ScopedArenaAllocator allocator(arena_stack);
- StackMapStream stack_map_stream(&allocator, jni_compiled_method.GetInstructionSet());
- stack_map_stream.BeginMethod(
+static ScopedArenaVector<uint8_t> CreateJniStackMap(ScopedArenaAllocator* allocator,
+ const JniCompiledMethod& jni_compiled_method) {
+ // StackMapStream is quite large, so allocate it using the ScopedArenaAllocator
+ // to stay clear of the frame size limit.
+ std::unique_ptr<StackMapStream> stack_map_stream(
+ new (allocator) StackMapStream(allocator, jni_compiled_method.GetInstructionSet()));
+ stack_map_stream->BeginMethod(
jni_compiled_method.GetFrameSize(),
jni_compiled_method.GetCoreSpillMask(),
jni_compiled_method.GetFpSpillMask(),
/* num_dex_registers */ 0);
- stack_map_stream.EndMethod();
- stack_map->resize(stack_map_stream.PrepareForFillIn());
- stack_map_stream.FillInCodeInfo(MemoryRegion(stack_map->data(), stack_map->size()));
+ stack_map_stream->EndMethod();
+ return stack_map_stream->Encode();
}
CompiledMethod* OptimizingCompiler::JniCompile(uint32_t access_flags,
@@ -1163,8 +1155,9 @@ CompiledMethod* OptimizingCompiler::JniCompile(uint32_t access_flags,
compiler_options, access_flags, method_idx, dex_file);
MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kCompiledNativeStub);
- ArenaVector<uint8_t> stack_map(allocator.Adapter(kArenaAllocStackMaps));
- CreateJniStackMap(&arena_stack, jni_compiled_method, &stack_map);
+ ScopedArenaAllocator stack_map_allocator(&arena_stack); // Will hold the stack map.
+ ScopedArenaVector<uint8_t> stack_map = CreateJniStackMap(&stack_map_allocator,
+ jni_compiled_method);
return CompiledMethod::SwapAllocCompiledMethod(
GetCompilerDriver(),
jni_compiled_method.GetInstructionSet(),
@@ -1229,11 +1222,11 @@ bool OptimizingCompiler::JitCompile(Thread* self,
ScopedNullHandle<mirror::ObjectArray<mirror::Object>> roots;
ArenaSet<ArtMethod*, std::less<ArtMethod*>> cha_single_implementation_list(
allocator.Adapter(kArenaAllocCHA));
- ArenaVector<uint8_t> stack_map(allocator.Adapter(kArenaAllocStackMaps));
ArenaStack arena_stack(runtime->GetJitArenaPool());
// StackMapStream is large and it does not fit into this frame, so we need helper method.
- // TODO: Try to avoid the extra memory copy that results from this.
- CreateJniStackMap(&arena_stack, jni_compiled_method, &stack_map);
+ ScopedArenaAllocator stack_map_allocator(&arena_stack); // Will hold the stack map.
+ ScopedArenaVector<uint8_t> stack_map = CreateJniStackMap(&stack_map_allocator,
+ jni_compiled_method);
uint8_t* stack_map_data = nullptr;
uint8_t* roots_data = nullptr;
uint32_t data_size = code_cache->ReserveData(self,
@@ -1326,8 +1319,7 @@ bool OptimizingCompiler::JitCompile(Thread* self,
}
}
- size_t stack_map_size = 0;
- codegen->ComputeStackMapSize(&stack_map_size);
+ ScopedArenaVector<uint8_t> stack_map = codegen->BuildStackMaps(code_item);
size_t number_of_roots = codegen->GetNumberOfJitRoots();
// We allocate an object array to ensure the JIT roots that we will collect in EmitJitRoots
// will be visible by the GC between EmitLiterals and CommitCode. Once CommitCode is
@@ -1345,7 +1337,7 @@ bool OptimizingCompiler::JitCompile(Thread* self,
uint8_t* stack_map_data = nullptr;
uint8_t* roots_data = nullptr;
uint32_t data_size = code_cache->ReserveData(self,
- stack_map_size,
+ stack_map.size(),
number_of_roots,
method,
&stack_map_data,
@@ -1354,7 +1346,7 @@ bool OptimizingCompiler::JitCompile(Thread* self,
MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kJitOutOfMemoryForCommit);
return false;
}
- codegen->BuildStackMaps(MemoryRegion(stack_map_data, stack_map_size), code_item);
+ memcpy(stack_map_data, stack_map.data(), stack_map.size());
codegen->EmitJitRoots(code_allocator.GetData(), roots, roots_data);
const void* code = code_cache->CommitCode(
@@ -1395,7 +1387,7 @@ bool OptimizingCompiler::JitCompile(Thread* self,
info.code_address = code_address;
info.code_size = code_allocator.GetMemory().size();
info.frame_size_in_bytes = method_header->GetFrameSizeInBytes();
- info.code_info = stack_map_size == 0 ? nullptr : stack_map_data;
+ info.code_info = stack_map.size() == 0 ? nullptr : stack_map_data;
info.cfi = ArrayRef<const uint8_t>(*codegen->GetAssembler()->cfi().data());
GenerateJitDebugInfo(method, info);
}
diff --git a/compiler/optimizing/prepare_for_register_allocation.cc b/compiler/optimizing/prepare_for_register_allocation.cc
index 831bccc90a..060613d349 100644
--- a/compiler/optimizing/prepare_for_register_allocation.cc
+++ b/compiler/optimizing/prepare_for_register_allocation.cc
@@ -150,7 +150,9 @@ void PrepareForRegisterAllocation::VisitClinitCheck(HClinitCheck* check) {
if (can_merge_with_load_class && !load_class->HasUses()) {
load_class->GetBlock()->RemoveInstruction(load_class);
}
- } else if (can_merge_with_load_class && !load_class->NeedsAccessCheck()) {
+ } else if (can_merge_with_load_class &&
+ load_class->GetLoadKind() != HLoadClass::LoadKind::kRuntimeCall) {
+ DCHECK(!load_class->NeedsAccessCheck());
// Pass the initialization duty to the `HLoadClass` instruction,
// and remove the instruction from the graph.
DCHECK(load_class->HasEnvironment());
diff --git a/compiler/optimizing/scheduler.cc b/compiler/optimizing/scheduler.cc
index 588ea03d69..1aa16f45bc 100644
--- a/compiler/optimizing/scheduler.cc
+++ b/compiler/optimizing/scheduler.cc
@@ -545,60 +545,67 @@ SchedulingNode* CriticalPathSchedulingNodeSelector::GetHigherPrioritySchedulingN
void HScheduler::Schedule(HGraph* graph) {
// We run lsa here instead of in a separate pass to better control whether we
// should run the analysis or not.
+ const HeapLocationCollector* heap_location_collector = nullptr;
LoadStoreAnalysis lsa(graph);
if (!only_optimize_loop_blocks_ || graph->HasLoops()) {
lsa.Run();
- scheduling_graph_.SetHeapLocationCollector(lsa.GetHeapLocationCollector());
+ heap_location_collector = &lsa.GetHeapLocationCollector();
}
for (HBasicBlock* block : graph->GetReversePostOrder()) {
if (IsSchedulable(block)) {
- Schedule(block);
+ Schedule(block, heap_location_collector);
}
}
}
-void HScheduler::Schedule(HBasicBlock* block) {
- ScopedArenaVector<SchedulingNode*> scheduling_nodes(allocator_->Adapter(kArenaAllocScheduler));
+void HScheduler::Schedule(HBasicBlock* block,
+ const HeapLocationCollector* heap_location_collector) {
+ ScopedArenaAllocator allocator(block->GetGraph()->GetArenaStack());
+ ScopedArenaVector<SchedulingNode*> scheduling_nodes(allocator.Adapter(kArenaAllocScheduler));
// Build the scheduling graph.
- scheduling_graph_.Clear();
+ SchedulingGraph scheduling_graph(this, &allocator, heap_location_collector);
for (HBackwardInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
HInstruction* instruction = it.Current();
CHECK_EQ(instruction->GetBlock(), block)
<< instruction->DebugName()
<< " is in block " << instruction->GetBlock()->GetBlockId()
<< ", and expected in block " << block->GetBlockId();
- SchedulingNode* node = scheduling_graph_.AddNode(instruction, IsSchedulingBarrier(instruction));
+ SchedulingNode* node = scheduling_graph.AddNode(instruction, IsSchedulingBarrier(instruction));
CalculateLatency(node);
scheduling_nodes.push_back(node);
}
- if (scheduling_graph_.Size() <= 1) {
- scheduling_graph_.Clear();
+ if (scheduling_graph.Size() <= 1) {
return;
}
cursor_ = block->GetLastInstruction();
+ // The list of candidates for scheduling. A node becomes a candidate when all
+ // its predecessors have been scheduled.
+ ScopedArenaVector<SchedulingNode*> candidates(allocator.Adapter(kArenaAllocScheduler));
+
// Find the initial candidates for scheduling.
- candidates_.clear();
for (SchedulingNode* node : scheduling_nodes) {
if (!node->HasUnscheduledSuccessors()) {
node->MaybeUpdateCriticalPath(node->GetLatency());
- candidates_.push_back(node);
+ candidates.push_back(node);
}
}
- ScopedArenaVector<SchedulingNode*> initial_candidates(allocator_->Adapter(kArenaAllocScheduler));
+ ScopedArenaVector<SchedulingNode*> initial_candidates(allocator.Adapter(kArenaAllocScheduler));
if (kDumpDotSchedulingGraphs) {
// Remember the list of initial candidates for debug output purposes.
- initial_candidates.assign(candidates_.begin(), candidates_.end());
+ initial_candidates.assign(candidates.begin(), candidates.end());
}
// Schedule all nodes.
- while (!candidates_.empty()) {
- Schedule(selector_->PopHighestPriorityNode(&candidates_, scheduling_graph_));
+ selector_->Reset();
+ while (!candidates.empty()) {
+ SchedulingNode* node = selector_->PopHighestPriorityNode(&candidates, scheduling_graph);
+ Schedule(node, &candidates);
}
if (kDumpDotSchedulingGraphs) {
@@ -607,11 +614,12 @@ void HScheduler::Schedule(HBasicBlock* block) {
std::stringstream description;
description << graph->GetDexFile().PrettyMethod(graph->GetMethodIdx())
<< " B" << block->GetBlockId();
- scheduling_graph_.DumpAsDotGraph(description.str(), initial_candidates);
+ scheduling_graph.DumpAsDotGraph(description.str(), initial_candidates);
}
}
-void HScheduler::Schedule(SchedulingNode* scheduling_node) {
+void HScheduler::Schedule(SchedulingNode* scheduling_node,
+ /*inout*/ ScopedArenaVector<SchedulingNode*>* candidates) {
// Check whether any of the node's predecessors will be valid candidates after
// this node is scheduled.
uint32_t path_to_node = scheduling_node->GetCriticalPath();
@@ -620,7 +628,7 @@ void HScheduler::Schedule(SchedulingNode* scheduling_node) {
path_to_node + predecessor->GetInternalLatency() + predecessor->GetLatency());
predecessor->DecrementNumberOfUnscheduledSuccessors();
if (!predecessor->HasUnscheduledSuccessors()) {
- candidates_.push_back(predecessor);
+ candidates->push_back(predecessor);
}
}
for (SchedulingNode* predecessor : scheduling_node->GetOtherPredecessors()) {
@@ -630,7 +638,7 @@ void HScheduler::Schedule(SchedulingNode* scheduling_node) {
// correctness. So we do not use them to compute the critical path.
predecessor->DecrementNumberOfUnscheduledSuccessors();
if (!predecessor->HasUnscheduledSuccessors()) {
- candidates_.push_back(predecessor);
+ candidates->push_back(predecessor);
}
}
@@ -779,7 +787,6 @@ bool HInstructionScheduling::Run(bool only_optimize_loop_blocks,
#if defined(ART_ENABLE_CODEGEN_arm64) || defined(ART_ENABLE_CODEGEN_arm)
// Phase-local allocator that allocates scheduler internal data structures like
// scheduling nodes, internel nodes map, dependencies, etc.
- ScopedArenaAllocator allocator(graph_->GetArenaStack());
CriticalPathSchedulingNodeSelector critical_path_selector;
RandomSchedulingNodeSelector random_selector;
SchedulingNodeSelector* selector = schedule_randomly
@@ -795,7 +802,7 @@ bool HInstructionScheduling::Run(bool only_optimize_loop_blocks,
switch (instruction_set_) {
#ifdef ART_ENABLE_CODEGEN_arm64
case InstructionSet::kArm64: {
- arm64::HSchedulerARM64 scheduler(&allocator, selector);
+ arm64::HSchedulerARM64 scheduler(selector);
scheduler.SetOnlyOptimizeLoopBlocks(only_optimize_loop_blocks);
scheduler.Schedule(graph_);
break;
@@ -805,7 +812,7 @@ bool HInstructionScheduling::Run(bool only_optimize_loop_blocks,
case InstructionSet::kThumb2:
case InstructionSet::kArm: {
arm::SchedulingLatencyVisitorARM arm_latency_visitor(codegen_);
- arm::HSchedulerARM scheduler(&allocator, selector, &arm_latency_visitor);
+ arm::HSchedulerARM scheduler(selector, &arm_latency_visitor);
scheduler.SetOnlyOptimizeLoopBlocks(only_optimize_loop_blocks);
scheduler.Schedule(graph_);
break;
diff --git a/compiler/optimizing/scheduler.h b/compiler/optimizing/scheduler.h
index c7683e04a7..fd48d844e6 100644
--- a/compiler/optimizing/scheduler.h
+++ b/compiler/optimizing/scheduler.h
@@ -251,12 +251,14 @@ class SchedulingNode : public DeletableArenaObject<kArenaAllocScheduler> {
*/
class SchedulingGraph : public ValueObject {
public:
- SchedulingGraph(const HScheduler* scheduler, ScopedArenaAllocator* allocator)
+ SchedulingGraph(const HScheduler* scheduler,
+ ScopedArenaAllocator* allocator,
+ const HeapLocationCollector* heap_location_collector)
: scheduler_(scheduler),
allocator_(allocator),
contains_scheduling_barrier_(false),
nodes_map_(allocator_->Adapter(kArenaAllocScheduler)),
- heap_location_collector_(nullptr) {}
+ heap_location_collector_(heap_location_collector) {}
SchedulingNode* AddNode(HInstruction* instr, bool is_scheduling_barrier = false) {
std::unique_ptr<SchedulingNode> node(
@@ -268,15 +270,6 @@ class SchedulingGraph : public ValueObject {
return result;
}
- void Clear() {
- nodes_map_.clear();
- contains_scheduling_barrier_ = false;
- }
-
- void SetHeapLocationCollector(const HeapLocationCollector& heap_location_collector) {
- heap_location_collector_ = &heap_location_collector;
- }
-
SchedulingNode* GetNode(const HInstruction* instr) const {
auto it = nodes_map_.find(instr);
if (it == nodes_map_.end()) {
@@ -329,7 +322,7 @@ class SchedulingGraph : public ValueObject {
ScopedArenaHashMap<const HInstruction*, std::unique_ptr<SchedulingNode>> nodes_map_;
- const HeapLocationCollector* heap_location_collector_;
+ const HeapLocationCollector* const heap_location_collector_;
};
/*
@@ -377,6 +370,7 @@ class SchedulingLatencyVisitor : public HGraphDelegateVisitor {
class SchedulingNodeSelector : public ArenaObject<kArenaAllocScheduler> {
public:
+ virtual void Reset() {}
virtual SchedulingNode* PopHighestPriorityNode(ScopedArenaVector<SchedulingNode*>* nodes,
const SchedulingGraph& graph) = 0;
virtual ~SchedulingNodeSelector() {}
@@ -418,6 +412,7 @@ class CriticalPathSchedulingNodeSelector : public SchedulingNodeSelector {
public:
CriticalPathSchedulingNodeSelector() : prev_select_(nullptr) {}
+ void Reset() OVERRIDE { prev_select_ = nullptr; }
SchedulingNode* PopHighestPriorityNode(ScopedArenaVector<SchedulingNode*>* nodes,
const SchedulingGraph& graph) OVERRIDE;
@@ -434,16 +429,11 @@ class CriticalPathSchedulingNodeSelector : public SchedulingNodeSelector {
class HScheduler {
public:
- HScheduler(ScopedArenaAllocator* allocator,
- SchedulingLatencyVisitor* latency_visitor,
- SchedulingNodeSelector* selector)
- : allocator_(allocator),
- latency_visitor_(latency_visitor),
+ HScheduler(SchedulingLatencyVisitor* latency_visitor, SchedulingNodeSelector* selector)
+ : latency_visitor_(latency_visitor),
selector_(selector),
only_optimize_loop_blocks_(true),
- scheduling_graph_(this, allocator),
- cursor_(nullptr),
- candidates_(allocator_->Adapter(kArenaAllocScheduler)) {}
+ cursor_(nullptr) {}
virtual ~HScheduler() {}
void Schedule(HGraph* graph);
@@ -454,8 +444,9 @@ class HScheduler {
virtual bool IsSchedulingBarrier(const HInstruction* instruction) const;
protected:
- void Schedule(HBasicBlock* block);
- void Schedule(SchedulingNode* scheduling_node);
+ void Schedule(HBasicBlock* block, const HeapLocationCollector* heap_location_collector);
+ void Schedule(SchedulingNode* scheduling_node,
+ /*inout*/ ScopedArenaVector<SchedulingNode*>* candidates);
void Schedule(HInstruction* instruction);
// Any instruction returning `false` via this method will prevent its
@@ -476,19 +467,12 @@ class HScheduler {
node->SetInternalLatency(latency_visitor_->GetLastVisitedInternalLatency());
}
- ScopedArenaAllocator* const allocator_;
SchedulingLatencyVisitor* const latency_visitor_;
SchedulingNodeSelector* const selector_;
bool only_optimize_loop_blocks_;
- // We instantiate the members below as part of this class to avoid
- // instantiating them locally for every chunk scheduled.
- SchedulingGraph scheduling_graph_;
// A pointer indicating where the next instruction to be scheduled will be inserted.
HInstruction* cursor_;
- // The list of candidates for scheduling. A node becomes a candidate when all
- // its predecessors have been scheduled.
- ScopedArenaVector<SchedulingNode*> candidates_;
private:
DISALLOW_COPY_AND_ASSIGN(HScheduler);
diff --git a/compiler/optimizing/scheduler_arm.cc b/compiler/optimizing/scheduler_arm.cc
index 8dcadaad2e..d89d1171a1 100644
--- a/compiler/optimizing/scheduler_arm.cc
+++ b/compiler/optimizing/scheduler_arm.cc
@@ -679,7 +679,7 @@ void SchedulingLatencyVisitorARM::VisitArrayGet(HArrayGet* instruction) {
} else {
last_visited_internal_latency_ += kArmIntegerOpLatency;
}
- last_visited_internal_latency_ = kArmMemoryLoadLatency;
+ last_visited_latency_ = kArmMemoryLoadLatency;
}
}
break;
diff --git a/compiler/optimizing/scheduler_arm.h b/compiler/optimizing/scheduler_arm.h
index 0cb8684376..2f369486b3 100644
--- a/compiler/optimizing/scheduler_arm.h
+++ b/compiler/optimizing/scheduler_arm.h
@@ -137,10 +137,9 @@ class SchedulingLatencyVisitorARM : public SchedulingLatencyVisitor {
class HSchedulerARM : public HScheduler {
public:
- HSchedulerARM(ScopedArenaAllocator* allocator,
- SchedulingNodeSelector* selector,
+ HSchedulerARM(SchedulingNodeSelector* selector,
SchedulingLatencyVisitorARM* arm_latency_visitor)
- : HScheduler(allocator, arm_latency_visitor, selector) {}
+ : HScheduler(arm_latency_visitor, selector) {}
~HSchedulerARM() OVERRIDE {}
bool IsSchedulable(const HInstruction* instruction) const OVERRIDE {
diff --git a/compiler/optimizing/scheduler_arm64.h b/compiler/optimizing/scheduler_arm64.h
index 4f394d5e16..0d2f8d9fa0 100644
--- a/compiler/optimizing/scheduler_arm64.h
+++ b/compiler/optimizing/scheduler_arm64.h
@@ -134,8 +134,8 @@ class SchedulingLatencyVisitorARM64 : public SchedulingLatencyVisitor {
class HSchedulerARM64 : public HScheduler {
public:
- HSchedulerARM64(ScopedArenaAllocator* allocator, SchedulingNodeSelector* selector)
- : HScheduler(allocator, &arm64_latency_visitor_, selector) {}
+ explicit HSchedulerARM64(SchedulingNodeSelector* selector)
+ : HScheduler(&arm64_latency_visitor_, selector) {}
~HSchedulerARM64() OVERRIDE {}
bool IsSchedulable(const HInstruction* instruction) const OVERRIDE {
diff --git a/compiler/optimizing/scheduler_test.cc b/compiler/optimizing/scheduler_test.cc
index 7079e07ae1..fe23fb4cff 100644
--- a/compiler/optimizing/scheduler_test.cc
+++ b/compiler/optimizing/scheduler_test.cc
@@ -146,7 +146,9 @@ class SchedulerTest : public OptimizingUnitTest {
environment->SetRawEnvAt(1, mul);
mul->AddEnvUseAt(div_check->GetEnvironment(), 1);
- SchedulingGraph scheduling_graph(scheduler, GetScopedAllocator());
+ SchedulingGraph scheduling_graph(scheduler,
+ GetScopedAllocator(),
+ /* heap_location_collector */ nullptr);
// Instructions must be inserted in reverse order into the scheduling graph.
for (HInstruction* instr : ReverseRange(block_instructions)) {
scheduling_graph.AddNode(instr);
@@ -276,11 +278,10 @@ class SchedulerTest : public OptimizingUnitTest {
entry->AddInstruction(instr);
}
- SchedulingGraph scheduling_graph(scheduler, GetScopedAllocator());
HeapLocationCollector heap_location_collector(graph_);
heap_location_collector.VisitBasicBlock(entry);
heap_location_collector.BuildAliasingMatrix();
- scheduling_graph.SetHeapLocationCollector(heap_location_collector);
+ SchedulingGraph scheduling_graph(scheduler, GetScopedAllocator(), &heap_location_collector);
for (HInstruction* instr : ReverseRange(block_instructions)) {
// Build scheduling graph with memory access aliasing information
@@ -354,13 +355,13 @@ class SchedulerTest : public OptimizingUnitTest {
#if defined(ART_ENABLE_CODEGEN_arm64)
TEST_F(SchedulerTest, DependencyGraphAndSchedulerARM64) {
CriticalPathSchedulingNodeSelector critical_path_selector;
- arm64::HSchedulerARM64 scheduler(GetScopedAllocator(), &critical_path_selector);
+ arm64::HSchedulerARM64 scheduler(&critical_path_selector);
TestBuildDependencyGraphAndSchedule(&scheduler);
}
TEST_F(SchedulerTest, ArrayAccessAliasingARM64) {
CriticalPathSchedulingNodeSelector critical_path_selector;
- arm64::HSchedulerARM64 scheduler(GetScopedAllocator(), &critical_path_selector);
+ arm64::HSchedulerARM64 scheduler(&critical_path_selector);
TestDependencyGraphOnAliasingArrayAccesses(&scheduler);
}
#endif
@@ -369,14 +370,14 @@ TEST_F(SchedulerTest, ArrayAccessAliasingARM64) {
TEST_F(SchedulerTest, DependencyGraphAndSchedulerARM) {
CriticalPathSchedulingNodeSelector critical_path_selector;
arm::SchedulingLatencyVisitorARM arm_latency_visitor(/*CodeGenerator*/ nullptr);
- arm::HSchedulerARM scheduler(GetScopedAllocator(), &critical_path_selector, &arm_latency_visitor);
+ arm::HSchedulerARM scheduler(&critical_path_selector, &arm_latency_visitor);
TestBuildDependencyGraphAndSchedule(&scheduler);
}
TEST_F(SchedulerTest, ArrayAccessAliasingARM) {
CriticalPathSchedulingNodeSelector critical_path_selector;
arm::SchedulingLatencyVisitorARM arm_latency_visitor(/*CodeGenerator*/ nullptr);
- arm::HSchedulerARM scheduler(GetScopedAllocator(), &critical_path_selector, &arm_latency_visitor);
+ arm::HSchedulerARM scheduler(&critical_path_selector, &arm_latency_visitor);
TestDependencyGraphOnAliasingArrayAccesses(&scheduler);
}
#endif
diff --git a/compiler/optimizing/stack_map_stream.cc b/compiler/optimizing/stack_map_stream.cc
index 429054cec7..60ca61c133 100644
--- a/compiler/optimizing/stack_map_stream.cc
+++ b/compiler/optimizing/stack_map_stream.cc
@@ -45,9 +45,10 @@ void StackMapStream::BeginMethod(size_t frame_size_in_bytes,
uint32_t num_dex_registers) {
DCHECK(!in_method_) << "Mismatched Begin/End calls";
in_method_ = true;
- DCHECK_EQ(frame_size_in_bytes_, 0u) << "BeginMethod was already called";
+ DCHECK_EQ(packed_frame_size_, 0u) << "BeginMethod was already called";
- frame_size_in_bytes_ = frame_size_in_bytes;
+ DCHECK_ALIGNED(frame_size_in_bytes, kStackAlignment);
+ packed_frame_size_ = frame_size_in_bytes / kStackAlignment;
core_spill_mask_ = core_spill_mask;
fp_spill_mask_ = fp_spill_mask;
num_dex_registers_ = num_dex_registers;
@@ -56,6 +57,15 @@ void StackMapStream::BeginMethod(size_t frame_size_in_bytes,
void StackMapStream::EndMethod() {
DCHECK(in_method_) << "Mismatched Begin/End calls";
in_method_ = false;
+
+ // Read the stack masks now. The compiler might have updated them.
+ for (size_t i = 0; i < lazy_stack_masks_.size(); i++) {
+ BitVector* stack_mask = lazy_stack_masks_[i];
+ if (stack_mask != nullptr && stack_mask->GetNumberOfBits() != 0) {
+ stack_maps_[i][StackMap::kStackMaskIndex] =
+ stack_masks_.Dedup(stack_mask->GetRawStorage(), stack_mask->GetNumberOfBits());
+ }
+ }
}
void StackMapStream::BeginStackMapEntry(uint32_t dex_pc,
@@ -280,46 +290,28 @@ ALWAYS_INLINE static void EncodeTable(Writer& out, const Builder& bit_table) {
bit_table.Encode(out);
}
-size_t StackMapStream::PrepareForFillIn() {
- DCHECK_EQ(out_.size(), 0u);
-
- // Read the stack masks now. The compiler might have updated them.
- for (size_t i = 0; i < lazy_stack_masks_.size(); i++) {
- BitVector* stack_mask = lazy_stack_masks_[i];
- if (stack_mask != nullptr && stack_mask->GetNumberOfBits() != 0) {
- stack_maps_[i][StackMap::kStackMaskIndex] =
- stack_masks_.Dedup(stack_mask->GetRawStorage(), stack_mask->GetNumberOfBits());
- }
- }
+ScopedArenaVector<uint8_t> StackMapStream::Encode() {
+ DCHECK(in_stack_map_ == false) << "Mismatched Begin/End calls";
+ DCHECK(in_inline_info_ == false) << "Mismatched Begin/End calls";
- EncodeUnsignedLeb128(&out_, frame_size_in_bytes_);
- EncodeUnsignedLeb128(&out_, core_spill_mask_);
- EncodeUnsignedLeb128(&out_, fp_spill_mask_);
- EncodeUnsignedLeb128(&out_, num_dex_registers_);
- BitMemoryWriter<ScopedArenaVector<uint8_t>> out(&out_, out_.size() * kBitsPerByte);
+ ScopedArenaVector<uint8_t> buffer(allocator_->Adapter(kArenaAllocStackMapStream));
+ BitMemoryWriter<ScopedArenaVector<uint8_t>> out(&buffer);
+ out.WriteVarint(packed_frame_size_);
+ out.WriteVarint(core_spill_mask_);
+ out.WriteVarint(fp_spill_mask_);
+ out.WriteVarint(num_dex_registers_);
EncodeTable(out, stack_maps_);
- EncodeTable(out, inline_infos_);
- EncodeTable(out, method_infos_);
EncodeTable(out, register_masks_);
EncodeTable(out, stack_masks_);
+ EncodeTable(out, inline_infos_);
+ EncodeTable(out, method_infos_);
EncodeTable(out, dex_register_masks_);
EncodeTable(out, dex_register_maps_);
EncodeTable(out, dex_register_catalog_);
- return out_.size();
-}
-
-void StackMapStream::FillInCodeInfo(MemoryRegion region) {
- DCHECK(in_stack_map_ == false) << "Mismatched Begin/End calls";
- DCHECK(in_inline_info_ == false) << "Mismatched Begin/End calls";
- DCHECK_NE(0u, out_.size()) << "PrepareForFillIn not called before FillIn";
- DCHECK_EQ(region.size(), out_.size());
-
- region.CopyFromVector(0, out_);
-
// Verify that we can load the CodeInfo and check some essentials.
- CodeInfo code_info(region);
- CHECK_EQ(code_info.Size(), out_.size());
+ CodeInfo code_info(buffer.data());
+ CHECK_EQ(code_info.Size(), buffer.size());
CHECK_EQ(code_info.GetNumberOfStackMaps(), stack_maps_.size());
// Verify all written data (usually only in debug builds).
@@ -328,6 +320,8 @@ void StackMapStream::FillInCodeInfo(MemoryRegion region) {
dcheck(code_info);
}
}
+
+ return buffer;
}
} // namespace art
diff --git a/compiler/optimizing/stack_map_stream.h b/compiler/optimizing/stack_map_stream.h
index de79f4921e..01c6bf9e0e 100644
--- a/compiler/optimizing/stack_map_stream.h
+++ b/compiler/optimizing/stack_map_stream.h
@@ -34,10 +34,11 @@ namespace art {
* Collects and builds stack maps for a method. All the stack maps
* for a method are placed in a CodeInfo object.
*/
-class StackMapStream : public ValueObject {
+class StackMapStream : public DeletableArenaObject<kArenaAllocStackMapStream> {
public:
explicit StackMapStream(ScopedArenaAllocator* allocator, InstructionSet instruction_set)
- : instruction_set_(instruction_set),
+ : allocator_(allocator),
+ instruction_set_(instruction_set),
stack_maps_(allocator),
inline_infos_(allocator),
method_infos_(allocator),
@@ -46,13 +47,13 @@ class StackMapStream : public ValueObject {
dex_register_masks_(allocator),
dex_register_maps_(allocator),
dex_register_catalog_(allocator),
- out_(allocator->Adapter(kArenaAllocStackMapStream)),
lazy_stack_masks_(allocator->Adapter(kArenaAllocStackMapStream)),
current_stack_map_(),
current_inline_infos_(allocator->Adapter(kArenaAllocStackMapStream)),
current_dex_registers_(allocator->Adapter(kArenaAllocStackMapStream)),
previous_dex_registers_(allocator->Adapter(kArenaAllocStackMapStream)),
dex_register_timestamp_(allocator->Adapter(kArenaAllocStackMapStream)),
+ expected_num_dex_registers_(0u),
temp_dex_register_mask_(allocator, 32, true, kArenaAllocStackMapStream),
temp_dex_register_map_(allocator->Adapter(kArenaAllocStackMapStream)) {
}
@@ -87,18 +88,18 @@ class StackMapStream : public ValueObject {
uint32_t GetStackMapNativePcOffset(size_t i);
void SetStackMapNativePcOffset(size_t i, uint32_t native_pc_offset);
- // Prepares the stream to fill in a memory region. Must be called before FillIn.
- // Returns the size (in bytes) needed to store this stream.
- size_t PrepareForFillIn();
- void FillInCodeInfo(MemoryRegion region);
+ // Encode all stack map data.
+ // The returned vector is allocated using the allocator passed to the StackMapStream.
+ ScopedArenaVector<uint8_t> Encode();
private:
static constexpr uint32_t kNoValue = -1;
void CreateDexRegisterMap();
+ ScopedArenaAllocator* allocator_;
const InstructionSet instruction_set_;
- uint32_t frame_size_in_bytes_ = 0;
+ uint32_t packed_frame_size_ = 0;
uint32_t core_spill_mask_ = 0;
uint32_t fp_spill_mask_ = 0;
uint32_t num_dex_registers_ = 0;
@@ -108,9 +109,8 @@ class StackMapStream : public ValueObject {
BitTableBuilder<RegisterMask> register_masks_;
BitmapTableBuilder stack_masks_;
BitmapTableBuilder dex_register_masks_;
- BitTableBuilder<MaskInfo> dex_register_maps_;
+ BitTableBuilder<DexRegisterMapInfo> dex_register_maps_;
BitTableBuilder<DexRegisterInfo> dex_register_catalog_;
- ScopedArenaVector<uint8_t> out_;
ScopedArenaVector<BitVector*> lazy_stack_masks_;
diff --git a/compiler/optimizing/stack_map_test.cc b/compiler/optimizing/stack_map_test.cc
index 16a9216311..d28f09fbba 100644
--- a/compiler/optimizing/stack_map_test.cc
+++ b/compiler/optimizing/stack_map_test.cc
@@ -62,12 +62,9 @@ TEST(StackMapTest, Test1) {
stream.EndStackMapEntry();
stream.EndMethod();
- size_t size = stream.PrepareForFillIn();
- void* memory = allocator.Alloc(size, kArenaAllocMisc);
- MemoryRegion region(memory, size);
- stream.FillInCodeInfo(region);
+ ScopedArenaVector<uint8_t> memory = stream.Encode();
- CodeInfo code_info(region);
+ CodeInfo code_info(memory.data());
ASSERT_EQ(1u, code_info.GetNumberOfStackMaps());
uint32_t number_of_catalog_entries = code_info.GetNumberOfLocationCatalogEntries();
@@ -151,12 +148,9 @@ TEST(StackMapTest, Test2) {
stream.EndStackMapEntry();
stream.EndMethod();
- size_t size = stream.PrepareForFillIn();
- void* memory = allocator.Alloc(size, kArenaAllocMisc);
- MemoryRegion region(memory, size);
- stream.FillInCodeInfo(region);
+ ScopedArenaVector<uint8_t> memory = stream.Encode();
- CodeInfo code_info(region);
+ CodeInfo code_info(memory.data());
ASSERT_EQ(4u, code_info.GetNumberOfStackMaps());
uint32_t number_of_catalog_entries = code_info.GetNumberOfLocationCatalogEntries();
@@ -324,12 +318,9 @@ TEST(StackMapTest, TestDeduplicateInlineInfoDexRegisterMap) {
stream.EndStackMapEntry();
stream.EndMethod();
- size_t size = stream.PrepareForFillIn();
- void* memory = allocator.Alloc(size, kArenaAllocMisc);
- MemoryRegion region(memory, size);
- stream.FillInCodeInfo(region);
+ ScopedArenaVector<uint8_t> memory = stream.Encode();
- CodeInfo code_info(region);
+ CodeInfo code_info(memory.data());
ASSERT_EQ(1u, code_info.GetNumberOfStackMaps());
uint32_t number_of_catalog_entries = code_info.GetNumberOfLocationCatalogEntries();
@@ -382,12 +373,9 @@ TEST(StackMapTest, TestNonLiveDexRegisters) {
stream.EndStackMapEntry();
stream.EndMethod();
- size_t size = stream.PrepareForFillIn();
- void* memory = allocator.Alloc(size, kArenaAllocMisc);
- MemoryRegion region(memory, size);
- stream.FillInCodeInfo(region);
+ ScopedArenaVector<uint8_t> memory = stream.Encode();
- CodeInfo code_info(region);
+ CodeInfo code_info(memory.data());
ASSERT_EQ(1u, code_info.GetNumberOfStackMaps());
uint32_t number_of_catalog_entries = code_info.GetNumberOfLocationCatalogEntries();
@@ -444,12 +432,9 @@ TEST(StackMapTest, TestShareDexRegisterMap) {
stream.EndStackMapEntry();
stream.EndMethod();
- size_t size = stream.PrepareForFillIn();
- void* memory = allocator.Alloc(size, kArenaAllocMisc);
- MemoryRegion region(memory, size);
- stream.FillInCodeInfo(region);
+ ScopedArenaVector<uint8_t> memory = stream.Encode();
- CodeInfo ci(region);
+ CodeInfo ci(memory.data());
// Verify first stack map.
StackMap sm0 = ci.GetStackMapAt(0);
@@ -495,12 +480,9 @@ TEST(StackMapTest, TestNoDexRegisterMap) {
stream.EndStackMapEntry();
stream.EndMethod();
- size_t size = stream.PrepareForFillIn();
- void* memory = allocator.Alloc(size, kArenaAllocMisc);
- MemoryRegion region(memory, size);
- stream.FillInCodeInfo(region);
+ ScopedArenaVector<uint8_t> memory = stream.Encode();
- CodeInfo code_info(region);
+ CodeInfo code_info(memory.data());
ASSERT_EQ(2u, code_info.GetNumberOfStackMaps());
uint32_t number_of_catalog_entries = code_info.GetNumberOfLocationCatalogEntries();
@@ -597,12 +579,9 @@ TEST(StackMapTest, InlineTest) {
stream.EndStackMapEntry();
stream.EndMethod();
- size_t size = stream.PrepareForFillIn();
- void* memory = allocator.Alloc(size, kArenaAllocMisc);
- MemoryRegion region(memory, size);
- stream.FillInCodeInfo(region);
+ ScopedArenaVector<uint8_t> memory = stream.Encode();
- CodeInfo ci(region);
+ CodeInfo ci(memory.data());
{
// Verify first stack map.
@@ -744,12 +723,9 @@ TEST(StackMapTest, TestDeduplicateStackMask) {
stream.EndStackMapEntry();
stream.EndMethod();
- size_t size = stream.PrepareForFillIn();
- void* memory = allocator.Alloc(size, kArenaAllocMisc);
- MemoryRegion region(memory, size);
- stream.FillInCodeInfo(region);
+ ScopedArenaVector<uint8_t> memory = stream.Encode();
- CodeInfo code_info(region);
+ CodeInfo code_info(memory.data());
ASSERT_EQ(2u, code_info.GetNumberOfStackMaps());
StackMap stack_map1 = code_info.GetStackMapForNativePcOffset(4 * kPcAlign);
@@ -771,14 +747,12 @@ TEST(StackMapTest, TestDedupeBitTables) {
stream.EndStackMapEntry();
stream.EndMethod();
- std::vector<uint8_t> memory(stream.PrepareForFillIn());
- MemoryRegion region(memory.data(), memory.size());
- stream.FillInCodeInfo(region);
+ ScopedArenaVector<uint8_t> memory = stream.Encode();
std::vector<uint8_t> out;
- CodeInfo::DedupeMap dedupe_map;
- size_t deduped1 = CodeInfo::Dedupe(&out, memory.data(), &dedupe_map);
- size_t deduped2 = CodeInfo::Dedupe(&out, memory.data(), &dedupe_map);
+ CodeInfo::Deduper deduper(&out);
+ size_t deduped1 = deduper.Dedupe(memory.data());
+ size_t deduped2 = deduper.Dedupe(memory.data());
for (size_t deduped : { deduped1, deduped2 }) {
CodeInfo code_info(out.data() + deduped);
diff --git a/dex2oat/linker/oat_writer.cc b/dex2oat/linker/oat_writer.cc
index a1a547c8e5..8bac7206c6 100644
--- a/dex2oat/linker/oat_writer.cc
+++ b/dex2oat/linker/oat_writer.cc
@@ -38,6 +38,7 @@
#include "compiled_method-inl.h"
#include "debug/method_debug_info.h"
#include "dex/art_dex_file_loader.h"
+#include "dex/class_accessor-inl.h"
#include "dex/dex_file-inl.h"
#include "dex/dex_file_loader.h"
#include "dex/dex_file_types.h"
@@ -791,7 +792,7 @@ class OatWriter::DexMethodVisitor {
return true;
}
- virtual bool VisitMethod(size_t class_def_method_index, const ClassDataItemIterator& it) = 0;
+ virtual bool VisitMethod(size_t class_def_method_index, const ClassAccessor::Method& method) = 0;
virtual bool EndClass() {
if (kIsDebugBuild) {
@@ -861,10 +862,10 @@ class OatWriter::InitBssLayoutMethodVisitor : public DexMethodVisitor {
: DexMethodVisitor(writer, /* offset */ 0u) {}
bool VisitMethod(size_t class_def_method_index ATTRIBUTE_UNUSED,
- const ClassDataItemIterator& it) OVERRIDE {
+ const ClassAccessor::Method& method) OVERRIDE {
// Look for patches with .bss references and prepare maps with placeholders for their offsets.
CompiledMethod* compiled_method = writer_->compiler_driver_->GetCompiledMethod(
- MethodReference(dex_file_, it.GetMemberIndex()));
+ MethodReference(dex_file_, method.GetIndex()));
if (HasCompiledCode(compiled_method)) {
for (const LinkerPatch& patch : compiled_method->GetPatches()) {
if (patch.GetType() == LinkerPatch::Type::kDataBimgRelRo) {
@@ -943,12 +944,12 @@ class OatWriter::InitOatClassesMethodVisitor : public DexMethodVisitor {
}
bool VisitMethod(size_t class_def_method_index ATTRIBUTE_UNUSED,
- const ClassDataItemIterator& it) OVERRIDE {
+ const ClassAccessor::Method& method) OVERRIDE {
// Fill in the compiled_methods_ array for methods that have a
// CompiledMethod. We track the number of non-null entries in
// compiled_methods_with_code_ since we only want to allocate
// OatMethodOffsets for the compiled methods.
- uint32_t method_idx = it.GetMemberIndex();
+ uint32_t method_idx = method.GetIndex();
CompiledMethod* compiled_method =
writer_->compiler_driver_->GetCompiledMethod(MethodReference(dex_file_, method_idx));
compiled_methods_.push_back(compiled_method);
@@ -1150,7 +1151,7 @@ class OatWriter::LayoutCodeMethodVisitor : public OatDexMethodVisitor {
}
bool VisitMethod(size_t class_def_method_index,
- const ClassDataItemIterator& it)
+ const ClassAccessor::Method& method)
OVERRIDE
REQUIRES_SHARED(Locks::mutator_lock_) {
Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
@@ -1182,7 +1183,7 @@ class OatWriter::LayoutCodeMethodVisitor : public OatDexMethodVisitor {
}
}
- MethodReference method_ref(dex_file_, it.GetMemberIndex());
+ MethodReference method_ref(dex_file_, method.GetIndex());
// Lookup method hotness from profile, if available.
// Otherwise assume a default of none-hotness.
@@ -1199,8 +1200,8 @@ class OatWriter::LayoutCodeMethodVisitor : public OatDexMethodVisitor {
method_ref,
method_offsets_index_,
class_def_index_,
- it.GetMethodAccessFlags(),
- it.GetMethodCodeItem(),
+ method.GetAccessFlags(),
+ method.GetCodeItem(),
debug_info_idx
};
ordered_methods_.push_back(method_data);
@@ -1437,12 +1438,13 @@ class OatWriter::LayoutReserveOffsetCodeMethodVisitor : public OrderedMethodVisi
class OatWriter::InitMapMethodVisitor : public OatDexMethodVisitor {
public:
- static constexpr bool kDebugVerifyDedupedCodeInfo = false;
-
InitMapMethodVisitor(OatWriter* writer, size_t offset)
- : OatDexMethodVisitor(writer, offset) {}
+ : OatDexMethodVisitor(writer, offset),
+ dedupe_bit_table_(&writer_->code_info_data_) {
+ }
- bool VisitMethod(size_t class_def_method_index, const ClassDataItemIterator& it ATTRIBUTE_UNUSED)
+ bool VisitMethod(size_t class_def_method_index,
+ const ClassAccessor::Method& method ATTRIBUTE_UNUSED)
OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
OatClass* oat_class = &writer_->oat_classes_[oat_class_index_];
CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index);
@@ -1453,21 +1455,9 @@ class OatWriter::InitMapMethodVisitor : public OatDexMethodVisitor {
ArrayRef<const uint8_t> map = compiled_method->GetVmapTable();
if (map.size() != 0u) {
- // Deduplicate the inner bittables within the CodeInfo.
- std::vector<uint8_t>* data = &writer_->code_info_data_;
size_t offset = dedupe_code_info_.GetOrCreate(map.data(), [=]() {
- size_t deduped_offset = CodeInfo::Dedupe(data, map.data(), &dedupe_bit_table_);
- if (kDebugVerifyDedupedCodeInfo) {
- InstructionSet isa = writer_->GetCompilerOptions().GetInstructionSet();
- std::stringstream old_code_info;
- VariableIndentationOutputStream old_vios(&old_code_info);
- std::stringstream new_code_info;
- VariableIndentationOutputStream new_vios(&new_code_info);
- CodeInfo(map.data()).Dump(&old_vios, 0, true, isa);
- CodeInfo(data->data() + deduped_offset).Dump(&new_vios, 0, true, isa);
- DCHECK_EQ(old_code_info.str(), new_code_info.str());
- }
- return offset_ + deduped_offset;
+ // Deduplicate the inner BitTable<>s within the CodeInfo.
+ return offset_ + dedupe_bit_table_.Dedupe(map.data());
});
// Code offset is not initialized yet, so set the map offset to 0u-offset.
DCHECK_EQ(oat_class->method_offsets_[method_offsets_index_].code_offset_, 0u);
@@ -1485,8 +1475,8 @@ class OatWriter::InitMapMethodVisitor : public OatDexMethodVisitor {
// The compiler already deduplicated the pointers but it did not dedupe the tables.
SafeMap<const uint8_t*, size_t> dedupe_code_info_;
- // Deduplicate at BitTable level. The value is bit offset within code_info_data_.
- std::map<BitMemoryRegion, uint32_t, BitMemoryRegion::Less> dedupe_bit_table_;
+ // Deduplicate at BitTable level.
+ CodeInfo::Deduper dedupe_bit_table_;
};
class OatWriter::InitImageMethodVisitor : public OatDexMethodVisitor {
@@ -1543,7 +1533,7 @@ class OatWriter::InitImageMethodVisitor : public OatDexMethodVisitor {
return true;
}
- bool VisitMethod(size_t class_def_method_index, const ClassDataItemIterator& it) OVERRIDE
+ bool VisitMethod(size_t class_def_method_index, const ClassAccessor::Method& method) OVERRIDE
REQUIRES_SHARED(Locks::mutator_lock_) {
// Skip methods that are not in the image.
if (!IsImageClass()) {
@@ -1562,22 +1552,22 @@ class OatWriter::InitImageMethodVisitor : public OatDexMethodVisitor {
Thread* self = Thread::Current();
ObjPtr<mirror::DexCache> dex_cache = class_linker_->FindDexCache(self, *dex_file_);
- ArtMethod* method;
+ ArtMethod* resolved_method;
if (writer_->GetCompilerOptions().IsBootImage()) {
- const InvokeType invoke_type = it.GetMethodInvokeType(
- dex_file_->GetClassDef(class_def_index_));
+ const InvokeType invoke_type = method.GetInvokeType(
+ dex_file_->GetClassDef(class_def_index_).access_flags_);
// Unchecked as we hold mutator_lock_ on entry.
ScopedObjectAccessUnchecked soa(self);
StackHandleScope<1> hs(self);
- method = class_linker_->ResolveMethod<ClassLinker::ResolveMode::kNoChecks>(
- it.GetMemberIndex(),
+ resolved_method = class_linker_->ResolveMethod<ClassLinker::ResolveMode::kNoChecks>(
+ method.GetIndex(),
hs.NewHandle(dex_cache),
ScopedNullHandle<mirror::ClassLoader>(),
/* referrer */ nullptr,
invoke_type);
- if (method == nullptr) {
+ if (resolved_method == nullptr) {
LOG(FATAL_WITHOUT_ABORT) << "Unexpected failure to resolve a method: "
- << dex_file_->PrettyMethod(it.GetMemberIndex(), true);
+ << dex_file_->PrettyMethod(method.GetIndex(), true);
self->AssertPendingException();
mirror::Throwable* exc = self->GetException();
std::string dump = exc->Dump();
@@ -1588,12 +1578,14 @@ class OatWriter::InitImageMethodVisitor : public OatDexMethodVisitor {
// Should already have been resolved by the compiler.
// It may not be resolved if the class failed to verify, in this case, don't set the
// entrypoint. This is not fatal since we shall use a resolution method.
- method = class_linker_->LookupResolvedMethod(it.GetMemberIndex(), dex_cache, class_loader_);
+ resolved_method = class_linker_->LookupResolvedMethod(method.GetIndex(),
+ dex_cache,
+ class_loader_);
}
- if (method != nullptr &&
+ if (resolved_method != nullptr &&
compiled_method != nullptr &&
compiled_method->GetQuickCode().size() != 0) {
- method->SetEntryPointFromQuickCompiledCodePtrSize(
+ resolved_method->SetEntryPointFromQuickCompiledCodePtrSize(
reinterpret_cast<void*>(offsets.code_offset_), pointer_size_);
}
@@ -1904,7 +1896,7 @@ class OatWriter::WriteCodeMethodVisitor : public OrderedMethodVisitor {
DCHECK(target != nullptr);
const void* oat_code_offset =
target->GetEntryPointFromQuickCompiledCodePtrSize(pointer_size_);
- if (oat_code_offset != 0) {
+ if (oat_code_offset != nullptr) {
DCHECK(!writer_->GetCompilerOptions().IsBootImage());
DCHECK(!Runtime::Current()->GetClassLinker()->IsQuickResolutionStub(oat_code_offset));
DCHECK(!Runtime::Current()->GetClassLinker()->IsQuickToInterpreterBridge(oat_code_offset));
@@ -2001,26 +1993,17 @@ class OatWriter::WriteCodeMethodVisitor : public OrderedMethodVisitor {
// Visit all methods from all classes in all dex files with the specified visitor.
bool OatWriter::VisitDexMethods(DexMethodVisitor* visitor) {
for (const DexFile* dex_file : *dex_files_) {
- const size_t class_def_count = dex_file->NumClassDefs();
- for (size_t class_def_index = 0; class_def_index != class_def_count; ++class_def_index) {
- if (UNLIKELY(!visitor->StartClass(dex_file, class_def_index))) {
+ for (ClassAccessor accessor : dex_file->GetClasses()) {
+ if (UNLIKELY(!visitor->StartClass(dex_file, accessor.GetClassDefIndex()))) {
return false;
}
if (MayHaveCompiledMethods()) {
- const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_index);
- const uint8_t* class_data = dex_file->GetClassData(class_def);
- if (class_data != nullptr) { // ie not an empty class, such as a marker interface
- ClassDataItemIterator it(*dex_file, class_data);
- it.SkipAllFields();
- size_t class_def_method_index = 0u;
- while (it.HasNextMethod()) {
- if (!visitor->VisitMethod(class_def_method_index, it)) {
- return false;
- }
- ++class_def_method_index;
- it.Next();
+ size_t class_def_method_index = 0u;
+ for (const ClassAccessor::Method& method : accessor.GetMethods()) {
+ if (!visitor->VisitMethod(class_def_method_index, method)) {
+ return false;
}
- DCHECK(!it.HasNext());
+ ++class_def_method_index;
}
}
if (UNLIKELY(!visitor->EndClass())) {
@@ -2087,6 +2070,7 @@ size_t OatWriter::InitOatMaps(size_t offset) {
InitMapMethodVisitor visitor(this, offset);
bool success = VisitDexMethods(&visitor);
DCHECK(success);
+ code_info_data_.shrink_to_fit();
offset += code_info_data_.size();
}
return offset;
diff --git a/dex2oat/linker/oat_writer_test.cc b/dex2oat/linker/oat_writer_test.cc
index bb27e8c80b..d73f10a6ed 100644
--- a/dex2oat/linker/oat_writer_test.cc
+++ b/dex2oat/linker/oat_writer_test.cc
@@ -26,6 +26,7 @@
#include "compiled_method-inl.h"
#include "compiler.h"
#include "debug/method_debug_info.h"
+#include "dex/class_accessor-inl.h"
#include "dex/dex_file_loader.h"
#include "dex/quick_compiler_callbacks.h"
#include "dex/test_dex_file_builder.h"
@@ -428,22 +429,15 @@ TEST_F(OatTest, WriteRead) {
CHECK_EQ(dex_file.GetLocationChecksum(), oat_dex_file->GetDexFileLocationChecksum());
ScopedObjectAccess soa(Thread::Current());
auto pointer_size = class_linker->GetImagePointerSize();
- for (size_t i = 0; i < dex_file.NumClassDefs(); i++) {
- const DexFile::ClassDef& class_def = dex_file.GetClassDef(i);
- const uint8_t* class_data = dex_file.GetClassData(class_def);
-
- size_t num_virtual_methods = 0;
- if (class_data != nullptr) {
- ClassDataItemIterator it(dex_file, class_data);
- num_virtual_methods = it.NumVirtualMethods();
- }
+ for (ClassAccessor accessor : dex_file.GetClasses()) {
+ size_t num_virtual_methods = accessor.NumVirtualMethods();
- const char* descriptor = dex_file.GetClassDescriptor(class_def);
+ const char* descriptor = accessor.GetDescriptor();
ObjPtr<mirror::Class> klass = class_linker->FindClass(soa.Self(),
descriptor,
ScopedNullHandle<mirror::ClassLoader>());
- const OatFile::OatClass oat_class = oat_dex_file->GetOatClass(i);
+ const OatFile::OatClass oat_class = oat_dex_file->GetOatClass(accessor.GetClassDefIndex());
CHECK_EQ(ClassStatus::kNotReady, oat_class.GetStatus()) << descriptor;
CHECK_EQ(kCompile ? OatClassType::kOatClassAllCompiled : OatClassType::kOatClassNoneCompiled,
oat_class.GetType()) << descriptor;
diff --git a/dexlayout/dex_ir.h b/dexlayout/dex_ir.h
index 9f355ba9e8..8f853eaeb5 100644
--- a/dexlayout/dex_ir.h
+++ b/dexlayout/dex_ir.h
@@ -926,8 +926,6 @@ class ClassDef : public IndexedItem {
ClassData* GetClassData() { return class_data_; }
EncodedArrayItem* StaticValues() { return static_values_; }
- MethodItem* GenerateMethodItem(Header& header, ClassDataItemIterator& cdii);
-
void Accept(AbstractDispatcher* dispatch) { dispatch->Dispatch(this); }
private:
diff --git a/dexlayout/dex_ir_builder.cc b/dexlayout/dex_ir_builder.cc
index a04a2349c4..a83a46b7e2 100644
--- a/dexlayout/dex_ir_builder.cc
+++ b/dexlayout/dex_ir_builder.cc
@@ -21,6 +21,7 @@
#include "dex_ir_builder.h"
+#include "dex/class_accessor-inl.h"
#include "dex/code_item_accessors-inl.h"
#include "dex/dex_file_exception_helpers.h"
#include "dexlayout.h"
@@ -162,7 +163,7 @@ class BuilderMaps {
const DexFile::CodeItem* disk_code_item,
uint32_t offset,
uint32_t dex_method_index);
- ClassData* CreateClassData(const DexFile& dex_file, const uint8_t* encoded_data, uint32_t offset);
+ ClassData* CreateClassData(const DexFile& dex_file, const DexFile::ClassDef& class_def);
void AddAnnotationsFromMapListSection(const DexFile& dex_file,
uint32_t start_offset,
@@ -197,7 +198,7 @@ class BuilderMaps {
uint8_t length,
EncodedValue* item);
- MethodItem GenerateMethodItem(const DexFile& dex_file, ClassDataItemIterator& cdii);
+ MethodItem GenerateMethodItem(const DexFile& dex_file, const ClassAccessor::Method& method);
ParameterAnnotation* GenerateParameterAnnotation(
const DexFile& dex_file,
@@ -488,8 +489,7 @@ void BuilderMaps::CreateClassDef(const DexFile& dex_file, uint32_t i) {
const uint8_t* static_data = dex_file.GetEncodedStaticFieldValuesArray(disk_class_def);
EncodedArrayItem* static_values =
CreateEncodedArrayItem(dex_file, static_data, disk_class_def.static_values_off_);
- ClassData* class_data = CreateClassData(
- dex_file, dex_file.GetClassData(disk_class_def), disk_class_def.class_data_off_);
+ ClassData* class_data = CreateClassData(dex_file, disk_class_def);
CreateAndAddIndexedItem(header_->ClassDefs(),
header_->ClassDefs().GetOffset() + i * ClassDef::ItemSize(),
i,
@@ -894,36 +894,43 @@ CodeItem* BuilderMaps::DedupeOrCreateCodeItem(const DexFile& dex_file,
return code_item;
}
-ClassData* BuilderMaps::CreateClassData(
- const DexFile& dex_file, const uint8_t* encoded_data, uint32_t offset) {
+ClassData* BuilderMaps::CreateClassData(const DexFile& dex_file,
+ const DexFile::ClassDef& class_def) {
// Read the fields and methods defined by the class, resolving the circular reference from those
// to classes by setting class at the same time.
+ const uint32_t offset = class_def.class_data_off_;
ClassData* class_data = class_datas_map_.GetExistingObject(offset);
- if (class_data == nullptr && encoded_data != nullptr) {
- ClassDataItemIterator cdii(dex_file, encoded_data);
+ if (class_data == nullptr && offset != 0u) {
+ ClassAccessor accessor(dex_file, class_def);
// Static fields.
FieldItemVector* static_fields = new FieldItemVector();
- for (; cdii.HasNextStaticField(); cdii.Next()) {
- FieldId* field_item = header_->FieldIds()[cdii.GetMemberIndex()];
- uint32_t access_flags = cdii.GetRawMemberAccessFlags();
+ for (const ClassAccessor::Field& field : accessor.GetStaticFields()) {
+ FieldId* field_item = header_->FieldIds()[field.GetIndex()];
+ uint32_t access_flags = field.GetRawAccessFlags();
static_fields->emplace_back(access_flags, field_item);
}
- // Instance fields.
FieldItemVector* instance_fields = new FieldItemVector();
- for (; cdii.HasNextInstanceField(); cdii.Next()) {
- FieldId* field_item = header_->FieldIds()[cdii.GetMemberIndex()];
- uint32_t access_flags = cdii.GetRawMemberAccessFlags();
+ for (const ClassAccessor::Field& field : accessor.GetInstanceFields()) {
+ FieldId* field_item = header_->FieldIds()[field.GetIndex()];
+ uint32_t access_flags = field.GetRawAccessFlags();
instance_fields->emplace_back(access_flags, field_item);
}
// Direct methods.
MethodItemVector* direct_methods = new MethodItemVector();
- for (; cdii.HasNextDirectMethod(); cdii.Next()) {
- direct_methods->push_back(GenerateMethodItem(dex_file, cdii));
+ auto direct_methods_it = accessor.GetDirectMethods();
+ for (auto it = direct_methods_it.begin(); it != direct_methods_it.end(); ++it) {
+ direct_methods->push_back(GenerateMethodItem(dex_file, *it));
}
// Virtual methods.
MethodItemVector* virtual_methods = new MethodItemVector();
- for (; cdii.HasNextVirtualMethod(); cdii.Next()) {
- virtual_methods->push_back(GenerateMethodItem(dex_file, cdii));
+ auto virtual_methods_it = accessor.GetVirtualMethods();
+ const uint8_t* last_data_ptr;
+ for (auto it = virtual_methods_it.begin(); ; ++it) {
+ if (it == virtual_methods_it.end()) {
+ last_data_ptr = it->GetDataPointer();
+ break;
+ }
+ virtual_methods->push_back(GenerateMethodItem(dex_file, *it));
}
class_data = class_datas_map_.CreateAndAddItem(header_->ClassDatas(),
eagerly_assign_offsets_,
@@ -932,7 +939,7 @@ ClassData* BuilderMaps::CreateClassData(
instance_fields,
direct_methods,
virtual_methods);
- class_data->SetSize(cdii.EndDataPointer() - encoded_data);
+ class_data->SetSize(last_data_ptr - dex_file.GetClassData(class_def));
}
return class_data;
}
@@ -1168,16 +1175,17 @@ void BuilderMaps::ReadEncodedValue(const DexFile& dex_file,
}
}
-MethodItem BuilderMaps::GenerateMethodItem(const DexFile& dex_file, ClassDataItemIterator& cdii) {
- MethodId* method_id = header_->MethodIds()[cdii.GetMemberIndex()];
- uint32_t access_flags = cdii.GetRawMemberAccessFlags();
- const DexFile::CodeItem* disk_code_item = cdii.GetMethodCodeItem();
+MethodItem BuilderMaps::GenerateMethodItem(const DexFile& dex_file,
+ const ClassAccessor::Method& method) {
+ MethodId* method_id = header_->MethodIds()[method.GetIndex()];
+ uint32_t access_flags = method.GetRawAccessFlags();
+ const DexFile::CodeItem* disk_code_item = method.GetCodeItem();
// Temporary hack to prevent incorrectly deduping code items if they have the same offset since
// they may have different debug info streams.
CodeItem* code_item = DedupeOrCreateCodeItem(dex_file,
disk_code_item,
- cdii.GetMethodCodeItemOffset(),
- cdii.GetMemberIndex());
+ method.GetCodeItemOffset(),
+ method.GetIndex());
return MethodItem(access_flags, method_id, code_item);
}
diff --git a/dexlayout/dexlayout_test.cc b/dexlayout/dexlayout_test.cc
index 2b1352db16..a20930b28b 100644
--- a/dexlayout/dexlayout_test.cc
+++ b/dexlayout/dexlayout_test.cc
@@ -26,6 +26,7 @@
#include "common_runtime_test.h"
#include "dex/art_dex_file_loader.h"
#include "dex/base64_test_util.h"
+#include "dex/class_accessor-inl.h"
#include "dex/code_item_accessors-inl.h"
#include "dex/dex_file-inl.h"
#include "dex/dex_file_loader.h"
@@ -682,16 +683,9 @@ TEST_F(DexLayoutTest, CodeItemOverrun) {
MutateDexFile(temp_dex.GetFile(), GetTestDexFileName("ManyMethods"), [] (DexFile* dex) {
bool mutated_successfully = false;
// Change the dex instructions to make an opcode that spans past the end of the code item.
- for (size_t i = 0; i < dex->NumClassDefs(); ++i) {
- const DexFile::ClassDef& def = dex->GetClassDef(i);
- const uint8_t* data = dex->GetClassData(def);
- if (data == nullptr) {
- continue;
- }
- ClassDataItemIterator it(*dex, data);
- it.SkipAllFields();
- while (it.HasNextMethod()) {
- DexFile::CodeItem* item = const_cast<DexFile::CodeItem*>(it.GetMethodCodeItem());
+ for (ClassAccessor accessor : dex->GetClasses()) {
+ for (const ClassAccessor::Method& method : accessor.GetMethods()) {
+ DexFile::CodeItem* item = const_cast<DexFile::CodeItem*>(method.GetCodeItem());
if (item != nullptr) {
CodeItemInstructionAccessor instructions(*dex, item);
if (instructions.begin() != instructions.end()) {
@@ -714,7 +708,6 @@ TEST_F(DexLayoutTest, CodeItemOverrun) {
}
}
}
- it.Next();
}
}
CHECK(mutated_successfully)
diff --git a/libartbase/base/bit_memory_region.h b/libartbase/base/bit_memory_region.h
index 7d8de399b9..5668b6cd79 100644
--- a/libartbase/base/bit_memory_region.h
+++ b/libartbase/base/bit_memory_region.h
@@ -29,47 +29,43 @@ namespace art {
class BitMemoryRegion FINAL : public ValueObject {
public:
struct Less {
- constexpr bool operator()(const BitMemoryRegion& lhs, const BitMemoryRegion& rhs) const {
- if (lhs.size_in_bits() != rhs.size_in_bits()) {
- return lhs.size_in_bits() < rhs.size_in_bits();
- }
- size_t bit = 0;
- constexpr size_t kNumBits = BitSizeOf<uint32_t>();
- for (; bit + kNumBits <= lhs.size_in_bits(); bit += kNumBits) {
- uint32_t lhs_bits = lhs.LoadBits(bit, kNumBits);
- uint32_t rhs_bits = rhs.LoadBits(bit, kNumBits);
- if (lhs_bits != rhs_bits) {
- return lhs_bits < rhs_bits;
- }
- }
- size_t num_bits = lhs.size_in_bits() - bit;
- return lhs.LoadBits(bit, num_bits) < rhs.LoadBits(bit, num_bits);
+ bool operator()(const BitMemoryRegion& lhs, const BitMemoryRegion& rhs) const {
+ return Compare(lhs, rhs) < 0;
}
};
BitMemoryRegion() = default;
- ALWAYS_INLINE BitMemoryRegion(void* data, size_t bit_start, size_t bit_size)
- : data_(reinterpret_cast<uintptr_t*>(AlignDown(data, sizeof(uintptr_t)))),
- bit_start_(bit_start + 8 * (reinterpret_cast<uintptr_t>(data) % sizeof(uintptr_t))),
- bit_size_(bit_size) {
+ ALWAYS_INLINE BitMemoryRegion(uint8_t* data, ssize_t bit_start, size_t bit_size) {
+ // Normalize the data pointer. Note that bit_start may be negative.
+ uint8_t* aligned_data = AlignDown(data + (bit_start >> kBitsPerByteLog2), sizeof(uintptr_t));
+ data_ = reinterpret_cast<uintptr_t*>(aligned_data);
+ bit_start_ = bit_start + kBitsPerByte * (data - aligned_data);
+ bit_size_ = bit_size;
+ DCHECK_LT(bit_start_, static_cast<size_t>(kBitsPerIntPtrT));
}
ALWAYS_INLINE explicit BitMemoryRegion(MemoryRegion region)
: BitMemoryRegion(region.begin(), /* bit_start */ 0, region.size_in_bits()) {
}
ALWAYS_INLINE BitMemoryRegion(MemoryRegion region, size_t bit_offset, size_t bit_length)
: BitMemoryRegion(region) {
- DCHECK_LE(bit_offset, bit_size_);
- DCHECK_LE(bit_length, bit_size_ - bit_offset);
- bit_start_ += bit_offset;
- bit_size_ = bit_length;
+ *this = Subregion(bit_offset, bit_length);
}
ALWAYS_INLINE bool IsValid() const { return data_ != nullptr; }
+ const uint8_t* data() const {
+ DCHECK_ALIGNED(bit_start_, kBitsPerByte);
+ return reinterpret_cast<const uint8_t*>(data_) + bit_start_ / kBitsPerByte;
+ }
+
size_t size_in_bits() const {
return bit_size_;
}
+ void Resize(size_t bit_size) {
+ bit_size_ = bit_size;
+ }
+
ALWAYS_INLINE BitMemoryRegion Subregion(size_t bit_offset, size_t bit_length) const {
DCHECK_LE(bit_offset, bit_size_);
DCHECK_LE(bit_length, bit_size_ - bit_offset);
@@ -79,12 +75,11 @@ class BitMemoryRegion FINAL : public ValueObject {
return result;
}
- // Increase the size of the region and return the newly added range (starting at the old end).
- ALWAYS_INLINE BitMemoryRegion Extend(size_t bit_length) {
+ ALWAYS_INLINE BitMemoryRegion Subregion(size_t bit_offset) const {
+ DCHECK_LE(bit_offset, bit_size_);
BitMemoryRegion result = *this;
- result.bit_start_ += result.bit_size_;
- result.bit_size_ = bit_length;
- bit_size_ += bit_length;
+ result.bit_start_ += bit_offset;
+ result.bit_size_ -= bit_offset;
return result;
}
@@ -183,10 +178,26 @@ class BitMemoryRegion FINAL : public ValueObject {
return count;
}
- ALWAYS_INLINE bool Equals(const BitMemoryRegion& other) const {
- return data_ == other.data_ &&
- bit_start_ == other.bit_start_ &&
- bit_size_ == other.bit_size_;
+ static int Compare(const BitMemoryRegion& lhs, const BitMemoryRegion& rhs) {
+ if (lhs.size_in_bits() != rhs.size_in_bits()) {
+ return (lhs.size_in_bits() < rhs.size_in_bits()) ? -1 : 1;
+ }
+ size_t bit = 0;
+ constexpr size_t kNumBits = BitSizeOf<uint32_t>();
+ for (; bit + kNumBits <= lhs.size_in_bits(); bit += kNumBits) {
+ uint32_t lhs_bits = lhs.LoadBits(bit, kNumBits);
+ uint32_t rhs_bits = rhs.LoadBits(bit, kNumBits);
+ if (lhs_bits != rhs_bits) {
+ return (lhs_bits < rhs_bits) ? -1 : 1;
+ }
+ }
+ size_t num_bits = lhs.size_in_bits() - bit;
+ uint32_t lhs_bits = lhs.LoadBits(bit, num_bits);
+ uint32_t rhs_bits = rhs.LoadBits(bit, num_bits);
+ if (lhs_bits != rhs_bits) {
+ return (lhs_bits < rhs_bits) ? -1 : 1;
+ }
+ return 0;
}
private:
@@ -196,30 +207,49 @@ class BitMemoryRegion FINAL : public ValueObject {
size_t bit_size_ = 0;
};
+constexpr uint32_t kVarintHeaderBits = 4;
+constexpr uint32_t kVarintSmallValue = 11; // Maximum value which is stored as-is.
+
class BitMemoryReader {
public:
- explicit BitMemoryReader(const uint8_t* data, size_t bit_offset = 0)
- : finished_region_(const_cast<uint8_t*>(data), /* bit_start */ 0, bit_offset) {
- DCHECK_EQ(GetBitOffset(), bit_offset);
+ BitMemoryReader(BitMemoryReader&&) = default;
+ explicit BitMemoryReader(BitMemoryRegion data)
+ : finished_region_(data.Subregion(0, 0) /* set the length to zero */ ) {
+ }
+ explicit BitMemoryReader(const uint8_t* data, ssize_t bit_offset = 0)
+ : finished_region_(const_cast<uint8_t*>(data), bit_offset, /* bit_length */ 0) {
}
- size_t GetBitOffset() const { return finished_region_.size_in_bits(); }
+ const uint8_t* data() const { return finished_region_.data(); }
- ALWAYS_INLINE BitMemoryRegion Skip(size_t bit_length) {
- return finished_region_.Extend(bit_length);
- }
+ BitMemoryRegion GetReadRegion() const { return finished_region_; }
+
+ size_t NumberOfReadBits() const { return finished_region_.size_in_bits(); }
- // Get the most recently read bits.
- ALWAYS_INLINE BitMemoryRegion Tail(size_t bit_length) {
- return finished_region_.Subregion(finished_region_.size_in_bits() - bit_length, bit_length);
+ ALWAYS_INLINE BitMemoryRegion ReadRegion(size_t bit_length) {
+ size_t bit_offset = finished_region_.size_in_bits();
+ finished_region_.Resize(bit_offset + bit_length);
+ return finished_region_.Subregion(bit_offset, bit_length);
}
ALWAYS_INLINE uint32_t ReadBits(size_t bit_length) {
- return finished_region_.Extend(bit_length).LoadBits(0, bit_length);
+ return ReadRegion(bit_length).LoadBits(/* bit_offset */ 0, bit_length);
}
ALWAYS_INLINE bool ReadBit() {
- return finished_region_.Extend(1).LoadBit(0);
+ return ReadRegion(/* bit_length */ 1).LoadBit(/* bit_offset */ 0);
+ }
+
+ // Read variable-length bit-packed integer.
+ // The first four bits determine the variable length of the encoded integer:
+ // Values 0..11 represent the result as-is, with no further following bits.
+ // Values 12..15 mean the result is in the next 8/16/24/32-bits respectively.
+ ALWAYS_INLINE uint32_t ReadVarint() {
+ uint32_t x = ReadBits(kVarintHeaderBits);
+ if (x > kVarintSmallValue) {
+ x = ReadBits((x - kVarintSmallValue) * kBitsPerByte);
+ }
+ return x;
}
private:
@@ -234,36 +264,58 @@ template<typename Vector>
class BitMemoryWriter {
public:
explicit BitMemoryWriter(Vector* out, size_t bit_offset = 0)
- : out_(out), bit_offset_(bit_offset) {
- DCHECK_EQ(GetBitOffset(), bit_offset);
+ : out_(out), bit_start_(bit_offset), bit_offset_(bit_offset) {
+ DCHECK_EQ(NumberOfWrittenBits(), 0u);
+ }
+
+ BitMemoryRegion GetWrittenRegion() const {
+ return BitMemoryRegion(out_->data(), bit_start_, bit_offset_ - bit_start_);
}
const uint8_t* data() const { return out_->data(); }
- size_t GetBitOffset() const { return bit_offset_; }
+ size_t NumberOfWrittenBits() const { return bit_offset_ - bit_start_; }
ALWAYS_INLINE BitMemoryRegion Allocate(size_t bit_length) {
out_->resize(BitsToBytesRoundUp(bit_offset_ + bit_length));
- BitMemoryRegion region(MemoryRegion(out_->data(), out_->size()), bit_offset_, bit_length);
+ BitMemoryRegion region(out_->data(), bit_offset_, bit_length);
DCHECK_LE(bit_length, std::numeric_limits<size_t>::max() - bit_offset_) << "Overflow";
bit_offset_ += bit_length;
return region;
}
+ ALWAYS_INLINE void WriteRegion(const BitMemoryRegion& region) {
+ Allocate(region.size_in_bits()).StoreBits(/* bit_offset */ 0, region, region.size_in_bits());
+ }
+
ALWAYS_INLINE void WriteBits(uint32_t value, size_t bit_length) {
- Allocate(bit_length).StoreBits(0, value, bit_length);
+ Allocate(bit_length).StoreBits(/* bit_offset */ 0, value, bit_length);
}
ALWAYS_INLINE void WriteBit(bool value) {
- Allocate(1).StoreBit(0, value);
+ Allocate(1).StoreBit(/* bit_offset */ 0, value);
}
- ALWAYS_INLINE void WriteRegion(const BitMemoryRegion& region) {
- Allocate(region.size_in_bits()).StoreBits(0, region, region.size_in_bits());
+ // Write variable-length bit-packed integer.
+ ALWAYS_INLINE void WriteVarint(uint32_t value) {
+ if (value <= kVarintSmallValue) {
+ WriteBits(value, kVarintHeaderBits);
+ } else {
+ uint32_t num_bits = RoundUp(MinimumBitsToStore(value), kBitsPerByte);
+ uint32_t header = kVarintSmallValue + num_bits / kBitsPerByte;
+ WriteBits(header, kVarintHeaderBits);
+ WriteBits(value, num_bits);
+ }
+ }
+
+ ALWAYS_INLINE void ByteAlign() {
+ size_t end = bit_start_ + bit_offset_;
+ bit_offset_ += RoundUp(end, kBitsPerByte) - end;
}
private:
Vector* out_;
+ size_t bit_start_;
size_t bit_offset_;
DISALLOW_COPY_AND_ASSIGN(BitMemoryWriter);
diff --git a/libartbase/base/bit_memory_region_test.cc b/libartbase/base/bit_memory_region_test.cc
index b7546985a9..02623bf040 100644
--- a/libartbase/base/bit_memory_region_test.cc
+++ b/libartbase/base/bit_memory_region_test.cc
@@ -33,6 +33,24 @@ static void CheckBits(uint8_t* data,
}
}
+TEST(BitMemoryRegion, TestVarint) {
+ for (size_t start_bit_offset = 0; start_bit_offset <= 32; start_bit_offset++) {
+ uint32_t values[] = { 0, 1, 11, 12, 15, 16, 255, 256, 1u << 16, 1u << 24, ~1u, ~0u };
+ for (uint32_t value : values) {
+ std::vector<uint8_t> buffer;
+ BitMemoryWriter<std::vector<uint8_t>> writer(&buffer, start_bit_offset);
+ writer.WriteVarint(value);
+
+ BitMemoryReader reader(buffer.data(), start_bit_offset);
+ uint32_t result = reader.ReadVarint();
+ uint32_t upper_bound = RoundUp(MinimumBitsToStore(value), kBitsPerByte) + kVarintHeaderBits;
+ EXPECT_EQ(writer.NumberOfWrittenBits(), reader.NumberOfReadBits());
+ EXPECT_EQ(value, result);
+ EXPECT_GE(upper_bound, writer.NumberOfWrittenBits());
+ }
+ }
+}
+
TEST(BitMemoryRegion, TestBit) {
uint8_t data[sizeof(uint32_t) * 2];
for (size_t bit_offset = 0; bit_offset < 2 * sizeof(uint32_t) * kBitsPerByte; ++bit_offset) {
diff --git a/libartbase/base/bit_struct_detail.h b/libartbase/base/bit_struct_detail.h
index 68c2e4461f..60de1b68ef 100644
--- a/libartbase/base/bit_struct_detail.h
+++ b/libartbase/base/bit_struct_detail.h
@@ -85,7 +85,7 @@ struct HasUnderscoreField {
static constexpr FalseT Test(...);
public:
- static constexpr bool value = decltype(Test<T>(0))::value;
+ static constexpr bool value = decltype(Test<T>(nullptr))::value;
};
// Infer the type of the member of &T::M.
diff --git a/libartbase/base/bit_table.h b/libartbase/base/bit_table.h
index 1c7614b695..54e88618cb 100644
--- a/libartbase/base/bit_table.h
+++ b/libartbase/base/bit_table.h
@@ -33,34 +33,6 @@
namespace art {
-constexpr uint32_t kVarintHeaderBits = 4;
-constexpr uint32_t kVarintSmallValue = 11; // Maximum value which is stored as-is.
-
-// Load variable-length bit-packed integer from `data` starting at `bit_offset`.
-// The first four bits determine the variable length of the encoded integer:
-// Values 0..11 represent the result as-is, with no further following bits.
-// Values 12..15 mean the result is in the next 8/16/24/32-bits respectively.
-ALWAYS_INLINE static inline uint32_t DecodeVarintBits(BitMemoryReader& reader) {
- uint32_t x = reader.ReadBits(kVarintHeaderBits);
- if (x > kVarintSmallValue) {
- x = reader.ReadBits((x - kVarintSmallValue) * kBitsPerByte);
- }
- return x;
-}
-
-// Store variable-length bit-packed integer from `data` starting at `bit_offset`.
-template<typename Vector>
-ALWAYS_INLINE static inline void EncodeVarintBits(BitMemoryWriter<Vector>& out, uint32_t value) {
- if (value <= kVarintSmallValue) {
- out.WriteBits(value, kVarintHeaderBits);
- } else {
- uint32_t num_bits = RoundUp(MinimumBitsToStore(value), kBitsPerByte);
- uint32_t header = kVarintSmallValue + num_bits / kBitsPerByte;
- out.WriteBits(header, kVarintHeaderBits);
- out.WriteBits(value, num_bits);
- }
-}
-
// Generic purpose table of uint32_t values, which are tightly packed at bit level.
// It has its own header with the number of rows and the bit-widths of all columns.
// The values are accessible by (row, column). The value -1 is stored efficiently.
@@ -77,23 +49,21 @@ class BitTableBase {
ALWAYS_INLINE void Decode(BitMemoryReader& reader) {
// Decode row count and column sizes from the table header.
- size_t initial_bit_offset = reader.GetBitOffset();
- num_rows_ = DecodeVarintBits(reader);
+ num_rows_ = reader.ReadVarint();
if (num_rows_ != 0) {
column_offset_[0] = 0;
for (uint32_t i = 0; i < kNumColumns; i++) {
- size_t column_end = column_offset_[i] + DecodeVarintBits(reader);
+ size_t column_end = column_offset_[i] + reader.ReadVarint();
column_offset_[i + 1] = dchecked_integral_cast<uint16_t>(column_end);
}
}
- header_bit_size_ = reader.GetBitOffset() - initial_bit_offset;
// Record the region which contains the table data and skip past it.
- table_data_ = reader.Skip(num_rows_ * NumRowBits());
+ table_data_ = reader.ReadRegion(num_rows_ * NumRowBits());
}
ALWAYS_INLINE uint32_t Get(uint32_t row, uint32_t column = 0) const {
- DCHECK_NE(header_bit_size_, 0u) << "Table has not been loaded";
+ DCHECK(table_data_.IsValid()) << "Table has not been loaded";
DCHECK_LT(row, num_rows_);
DCHECK_LT(column, kNumColumns);
size_t offset = row * NumRowBits() + column_offset_[column];
@@ -101,7 +71,7 @@ class BitTableBase {
}
ALWAYS_INLINE BitMemoryRegion GetBitMemoryRegion(uint32_t row, uint32_t column = 0) const {
- DCHECK_NE(header_bit_size_, 0u) << "Table has not been loaded";
+ DCHECK(table_data_.IsValid()) << "Table has not been loaded";
DCHECK_LT(row, num_rows_);
DCHECK_LT(column, kNumColumns);
size_t offset = row * NumRowBits() + column_offset_[column];
@@ -118,16 +88,20 @@ class BitTableBase {
return column_offset_[column + 1] - column_offset_[column];
}
- size_t HeaderBitSize() const { return header_bit_size_; }
+ size_t DataBitSize() const { return table_data_.size_in_bits(); }
- size_t BitSize() const { return header_bit_size_ + table_data_.size_in_bits(); }
+ bool Equals(const BitTableBase& other) const {
+ return num_rows_ == other.num_rows_ &&
+ std::equal(column_offset_, column_offset_ + kNumColumns, other.column_offset_) &&
+ BitMemoryRegion::Compare(table_data_, other.table_data_) == 0;
+ }
protected:
BitMemoryRegion table_data_;
size_t num_rows_ = 0;
-
uint16_t column_offset_[kNumColumns + 1] = {};
- uint16_t header_bit_size_ = 0;
+
+ DISALLOW_COPY_AND_ASSIGN(BitTableBase);
};
// Helper class which can be used to create BitTable accessors with named getters.
@@ -151,9 +125,10 @@ class BitTableAccessor {
}
// Helper macro to create constructors and per-table utilities in derived class.
-#define BIT_TABLE_HEADER() \
+#define BIT_TABLE_HEADER(NAME) \
using BitTableAccessor<kNumColumns>::BitTableAccessor; /* inherit constructors */ \
template<int COLUMN, int UNUSED /*needed to compile*/> struct ColumnName; \
+ static constexpr const char* kTableName = #NAME; \
// Helper macro to create named column accessors in derived class.
#define BIT_TABLE_COLUMN(COLUMN, NAME) \
@@ -176,12 +151,6 @@ static const char* const* GetBitTableColumnNamesImpl(std::index_sequence<Columns
return names;
}
-// Returns the names of all columns in the given accessor.
-template<typename Accessor>
-static const char* const* GetBitTableColumnNames() {
- return GetBitTableColumnNamesImpl<Accessor>(std::make_index_sequence<Accessor::kNumColumns>());
-}
-
// Wrapper which makes it easier to use named accessors for the individual rows.
template<typename Accessor>
class BitTable : public BitTableBase<Accessor::kNumColumns> {
@@ -239,6 +208,14 @@ class BitTable : public BitTableBase<Accessor::kNumColumns> {
ALWAYS_INLINE Accessor GetInvalidRow() const {
return Accessor(this, static_cast<uint32_t>(-1));
}
+
+ const char* GetName() const {
+ return Accessor::kTableName;
+ }
+
+ const char* const* GetColumnNames() const {
+ return GetBitTableColumnNamesImpl<Accessor>(std::make_index_sequence<Accessor::kNumColumns>());
+ }
};
template<typename Accessor>
@@ -376,15 +353,15 @@ class BitTableBuilderBase {
// Encode the stored data into a BitTable.
template<typename Vector>
void Encode(BitMemoryWriter<Vector>& out) const {
- size_t initial_bit_offset = out.GetBitOffset();
+ size_t initial_bit_offset = out.NumberOfWrittenBits();
std::array<uint32_t, kNumColumns> column_bits;
Measure(&column_bits);
- EncodeVarintBits(out, size());
+ out.WriteVarint(size());
if (size() != 0) {
// Write table header.
for (uint32_t c = 0; c < kNumColumns; c++) {
- EncodeVarintBits(out, column_bits[c]);
+ out.WriteVarint(column_bits[c]);
}
// Write table data.
@@ -398,7 +375,7 @@ class BitTableBuilderBase {
// Verify the written data.
if (kIsDebugBuild) {
BitTableBase<kNumColumns> table;
- BitMemoryReader reader(out.data(), initial_bit_offset);
+ BitMemoryReader reader(out.GetWrittenRegion().Subregion(initial_bit_offset));
table.Decode(reader);
DCHECK_EQ(size(), table.NumRows());
for (uint32_t c = 0; c < kNumColumns; c++) {
@@ -467,11 +444,11 @@ class BitmapTableBuilder {
// Encode the stored data into a BitTable.
template<typename Vector>
void Encode(BitMemoryWriter<Vector>& out) const {
- size_t initial_bit_offset = out.GetBitOffset();
+ size_t initial_bit_offset = out.NumberOfWrittenBits();
- EncodeVarintBits(out, size());
+ out.WriteVarint(size());
if (size() != 0) {
- EncodeVarintBits(out, max_num_bits_);
+ out.WriteVarint(max_num_bits_);
// Write table data.
for (MemoryRegion row : rows_) {
@@ -484,7 +461,7 @@ class BitmapTableBuilder {
// Verify the written data.
if (kIsDebugBuild) {
BitTableBase<1> table;
- BitMemoryReader reader(out.data(), initial_bit_offset);
+ BitMemoryReader reader(out.GetWrittenRegion().Subregion(initial_bit_offset));
table.Decode(reader);
DCHECK_EQ(size(), table.NumRows());
DCHECK_EQ(max_num_bits_, table.NumColumnBits(0));
diff --git a/libartbase/base/bit_table_test.cc b/libartbase/base/bit_table_test.cc
index 2fd9052516..bf32dc6e00 100644
--- a/libartbase/base/bit_table_test.cc
+++ b/libartbase/base/bit_table_test.cc
@@ -26,22 +26,6 @@
namespace art {
-TEST(BitTableTest, TestVarint) {
- for (size_t start_bit_offset = 0; start_bit_offset <= 32; start_bit_offset++) {
- uint32_t values[] = { 0, 1, 11, 12, 15, 16, 255, 256, ~1u, ~0u };
- for (uint32_t value : values) {
- std::vector<uint8_t> buffer;
- BitMemoryWriter<std::vector<uint8_t>> writer(&buffer, start_bit_offset);
- EncodeVarintBits(writer, value);
-
- BitMemoryReader reader(buffer.data(), start_bit_offset);
- uint32_t result = DecodeVarintBits(reader);
- EXPECT_EQ(writer.GetBitOffset(), reader.GetBitOffset());
- EXPECT_EQ(value, result);
- }
- }
-}
-
TEST(BitTableTest, TestEmptyTable) {
MallocArenaPool pool;
ArenaStack arena_stack(&pool);
@@ -54,7 +38,7 @@ TEST(BitTableTest, TestEmptyTable) {
BitMemoryReader reader(buffer.data());
BitTableBase<1> table(reader);
- EXPECT_EQ(writer.GetBitOffset(), reader.GetBitOffset());
+ EXPECT_EQ(writer.NumberOfWrittenBits(), reader.NumberOfReadBits());
EXPECT_EQ(0u, table.NumRows());
}
@@ -75,7 +59,7 @@ TEST(BitTableTest, TestSingleColumnTable) {
BitMemoryReader reader(buffer.data());
BitTableBase<1> table(reader);
- EXPECT_EQ(writer.GetBitOffset(), reader.GetBitOffset());
+ EXPECT_EQ(writer.NumberOfWrittenBits(), reader.NumberOfReadBits());
EXPECT_EQ(4u, table.NumRows());
EXPECT_EQ(42u, table.Get(0));
EXPECT_EQ(kNoValue, table.Get(1));
@@ -98,7 +82,7 @@ TEST(BitTableTest, TestUnalignedTable) {
BitMemoryReader reader(buffer.data(), start_bit_offset);
BitTableBase<1> table(reader);
- EXPECT_EQ(writer.GetBitOffset(), reader.GetBitOffset());
+ EXPECT_EQ(writer.NumberOfWrittenBits(), reader.NumberOfReadBits());
EXPECT_EQ(1u, table.NumRows());
EXPECT_EQ(42u, table.Get(0));
}
@@ -119,7 +103,7 @@ TEST(BitTableTest, TestBigTable) {
BitMemoryReader reader(buffer.data());
BitTableBase<4> table(reader);
- EXPECT_EQ(writer.GetBitOffset(), reader.GetBitOffset());
+ EXPECT_EQ(writer.NumberOfWrittenBits(), reader.NumberOfReadBits());
EXPECT_EQ(2u, table.NumRows());
EXPECT_EQ(42u, table.Get(0, 0));
EXPECT_EQ(kNoValue, table.Get(0, 1));
@@ -169,7 +153,7 @@ TEST(BitTableTest, TestBitmapTable) {
BitMemoryReader reader(buffer.data());
BitTableBase<1> table(reader);
- EXPECT_EQ(writer.GetBitOffset(), reader.GetBitOffset());
+ EXPECT_EQ(writer.NumberOfWrittenBits(), reader.NumberOfReadBits());
for (auto it : indicies) {
uint64_t expected = it.first;
BitMemoryRegion actual = table.GetBitMemoryRegion(it.second);
diff --git a/libartbase/base/mem_map_fuchsia.cc b/libartbase/base/mem_map_fuchsia.cc
index db31efb1c0..d1c92ce4d6 100644
--- a/libartbase/base/mem_map_fuchsia.cc
+++ b/libartbase/base/mem_map_fuchsia.cc
@@ -41,8 +41,8 @@ void MemMap::TargetMMapInit() {
ZX_INFO_VMAR,
&vmarinfo,
sizeof(vmarinfo),
- NULL,
- NULL), ZX_OK) << "could not find info from root vmar";
+ nullptr,
+ nullptr), ZX_OK) << "could not find info from root vmar";
uintptr_t lower_mem_start = FUCHSIA_LOWER_MEM_START - vmarinfo.base;
fuchsia_lowmem_size = FUCHSIA_LOWER_MEM_SIZE;
@@ -97,8 +97,8 @@ void* MemMap::TargetMMap(void* start, size_t len, int prot, int flags, int fd, o
ZX_INFO_VMAR,
&vmarinfo,
sizeof(vmarinfo),
- NULL,
- NULL);
+ nullptr,
+ nullptr);
if (status < 0 || reinterpret_cast<uintptr_t>(start) < vmarinfo.base) {
errno = EINVAL;
return MAP_FAILED;
diff --git a/libartbase/base/transform_iterator.h b/libartbase/base/transform_iterator.h
index 92655438f4..5b0574d0e4 100644
--- a/libartbase/base/transform_iterator.h
+++ b/libartbase/base/transform_iterator.h
@@ -71,7 +71,7 @@ class TransformIterator {
return *this;
}
- TransformIterator& operator++(int) {
+ TransformIterator operator++(int) {
TransformIterator tmp(*this);
++*this;
return tmp;
@@ -86,7 +86,7 @@ class TransformIterator {
return *this;
}
- TransformIterator& operator--(int) {
+ TransformIterator operator--(int) {
TransformIterator tmp(*this);
--*this;
return tmp;
diff --git a/libdexfile/dex/class_accessor.h b/libdexfile/dex/class_accessor.h
index 896fcadb10..d40577f31f 100644
--- a/libdexfile/dex/class_accessor.h
+++ b/libdexfile/dex/class_accessor.h
@@ -31,7 +31,7 @@ class ClassIteratorData;
// Classes to access Dex data.
class ClassAccessor {
- private:
+ public:
class BaseItem {
public:
explicit BaseItem(const DexFile& dex_file,
@@ -61,6 +61,18 @@ class ClassAccessor {
return dex_file_;
}
+ const uint8_t* GetDataPointer() const {
+ return ptr_pos_;
+ }
+
+ bool MemberIsNative() const {
+ return GetRawAccessFlags() & kAccNative;
+ }
+
+ bool MemberIsFinal() const {
+ return GetRawAccessFlags() & kAccFinal;
+ }
+
protected:
// Internal data pointer for reading.
const DexFile& dex_file_;
@@ -69,7 +81,6 @@ class ClassAccessor {
uint32_t access_flags_ = 0u;
};
- public:
// A decoded version of the method of a class_data_item.
class Method : public BaseItem {
public:
@@ -346,6 +357,10 @@ class ClassAccessor {
return class_def_index_;
}
+ const DexFile::ClassDef& GetClassDef() const {
+ return dex_file_.GetClassDef(GetClassDefIndex());
+ }
+
protected:
// Template visitor to reduce copy paste for visiting elements.
// No thread safety analysis since the visitor may require capabilities.
diff --git a/libdexfile/dex/code_item_accessors.h b/libdexfile/dex/code_item_accessors.h
index 5786d3f611..695cc7b1b2 100644
--- a/libdexfile/dex/code_item_accessors.h
+++ b/libdexfile/dex/code_item_accessors.h
@@ -80,7 +80,7 @@ class CodeItemInstructionAccessor {
uint32_t insns_size_in_code_units_ = 0;
// Pointer to the instructions, null if there is no code item.
- const uint16_t* insns_ = 0;
+ const uint16_t* insns_ = nullptr;
};
// Abstracts accesses to code item fields other than debug info for CompactDexFile and
diff --git a/libdexfile/dex/dex_file-inl.h b/libdexfile/dex/dex_file-inl.h
index 09668594dd..c512361586 100644
--- a/libdexfile/dex/dex_file-inl.h
+++ b/libdexfile/dex/dex_file-inl.h
@@ -204,26 +204,6 @@ inline bool Signature::operator==(const Signature& rhs) const {
return true;
}
-inline
-InvokeType ClassDataItemIterator::GetMethodInvokeType(const DexFile::ClassDef& class_def) const {
- if (HasNextDirectMethod()) {
- if ((GetRawMemberAccessFlags() & kAccStatic) != 0) {
- return kStatic;
- } else {
- return kDirect;
- }
- } else {
- DCHECK_EQ(GetRawMemberAccessFlags() & kAccStatic, 0U);
- if ((class_def.access_flags_ & kAccInterface) != 0) {
- return kInterface;
- } else if ((GetRawMemberAccessFlags() & kAccConstructor) != 0) {
- return kSuper;
- } else {
- return kVirtual;
- }
- }
-}
-
template<typename NewLocalCallback, typename IndexToStringData, typename TypeIndexToStringData>
bool DexFile::DecodeDebugLocalInfo(const uint8_t* stream,
const std::string& location,
@@ -518,18 +498,6 @@ inline const uint8_t* DexFile::GetCatchHandlerData(const DexInstructionIterator&
return handler_data + offset;
}
-template <typename Visitor>
-inline void DexFile::ClassDef::VisitMethods(const DexFile* dex_file, const Visitor& visitor) const {
- const uint8_t* class_data = dex_file->GetClassData(*this);
- if (class_data != nullptr) {
- ClassDataItemIterator it(*dex_file, class_data);
- it.SkipAllFields();
- for (; it.HasNext(); it.Next()) {
- visitor(it);
- }
- }
-}
-
inline IterationRange<ClassIterator> DexFile::GetClasses() const {
return { ClassIterator(*this, 0u), ClassIterator(*this, NumClassDefs()) };
}
diff --git a/libdexfile/dex/dex_file.cc b/libdexfile/dex/dex_file.cc
index f1f896058c..a2198b7c98 100644
--- a/libdexfile/dex/dex_file.cc
+++ b/libdexfile/dex/dex_file.cc
@@ -31,6 +31,7 @@
#include "base/enums.h"
#include "base/leb128.h"
#include "base/stl_util.h"
+#include "class_accessor-inl.h"
#include "descriptors_names.h"
#include "dex_file-inl.h"
#include "standard_dex_file.h"
@@ -219,21 +220,12 @@ const DexFile::ClassDef* DexFile::FindClassDef(dex::TypeIndex type_idx) const {
uint32_t DexFile::FindCodeItemOffset(const DexFile::ClassDef& class_def,
uint32_t method_idx) const {
- const uint8_t* class_data = GetClassData(class_def);
- CHECK(class_data != nullptr);
- ClassDataItemIterator it(*this, class_data);
- it.SkipAllFields();
- while (it.HasNextDirectMethod()) {
- if (it.GetMemberIndex() == method_idx) {
- return it.GetMethodCodeItemOffset();
+ ClassAccessor accessor(*this, class_def);
+ CHECK(accessor.HasClassData());
+ for (const ClassAccessor::Method& method : accessor.GetMethods()) {
+ if (method.GetIndex() == method_idx) {
+ return method.GetCodeItemOffset();
}
- it.Next();
- }
- while (it.HasNextVirtualMethod()) {
- if (it.GetMemberIndex() == method_idx) {
- return it.GetMethodCodeItemOffset();
- }
- it.Next();
}
LOG(FATAL) << "Unable to find method " << method_idx;
UNREACHABLE();
@@ -684,31 +676,6 @@ std::ostream& operator<<(std::ostream& os, const Signature& sig) {
return os << sig.ToString();
}
-// Decodes the header section from the class data bytes.
-void ClassDataItemIterator::ReadClassDataHeader() {
- CHECK(ptr_pos_ != nullptr);
- header_.static_fields_size_ = DecodeUnsignedLeb128(&ptr_pos_);
- header_.instance_fields_size_ = DecodeUnsignedLeb128(&ptr_pos_);
- header_.direct_methods_size_ = DecodeUnsignedLeb128(&ptr_pos_);
- header_.virtual_methods_size_ = DecodeUnsignedLeb128(&ptr_pos_);
-}
-
-void ClassDataItemIterator::ReadClassDataField() {
- field_.field_idx_delta_ = DecodeUnsignedLeb128(&ptr_pos_);
- field_.access_flags_ = DecodeUnsignedLeb128(&ptr_pos_);
- // The user of the iterator is responsible for checking if there
- // are unordered or duplicate indexes.
-}
-
-void ClassDataItemIterator::ReadClassDataMethod() {
- method_.method_idx_delta_ = DecodeUnsignedLeb128(&ptr_pos_);
- method_.access_flags_ = DecodeUnsignedLeb128(&ptr_pos_);
- method_.code_off_ = DecodeUnsignedLeb128(&ptr_pos_);
- if (last_idx_ != 0 && method_.method_idx_delta_ == 0) {
- LOG(WARNING) << "Duplicate method in " << dex_file_.GetLocation();
- }
-}
-
EncodedArrayValueIterator::EncodedArrayValueIterator(const DexFile& dex_file,
const uint8_t* array_data)
: dex_file_(dex_file),
diff --git a/libdexfile/dex/dex_file.h b/libdexfile/dex/dex_file.h
index 25cd2f4ddc..98787d1dd0 100644
--- a/libdexfile/dex/dex_file.h
+++ b/libdexfile/dex/dex_file.h
@@ -238,9 +238,6 @@ class DexFile {
}
}
- template <typename Visitor>
- void VisitMethods(const DexFile* dex_file, const Visitor& visitor) const;
-
private:
DISALLOW_COPY_AND_ASSIGN(ClassDef);
};
@@ -1174,211 +1171,6 @@ class Signature : public ValueObject {
};
std::ostream& operator<<(std::ostream& os, const Signature& sig);
-// Iterate and decode class_data_item
-class ClassDataItemIterator {
- public:
- ClassDataItemIterator(const DexFile& dex_file, const uint8_t* raw_class_data_item)
- : dex_file_(dex_file), pos_(0), ptr_pos_(raw_class_data_item), last_idx_(0) {
- ReadClassDataHeader();
- if (EndOfInstanceFieldsPos() > 0) {
- ReadClassDataField();
- } else if (EndOfVirtualMethodsPos() > 0) {
- ReadClassDataMethod();
- }
- }
- uint32_t NumStaticFields() const {
- return header_.static_fields_size_;
- }
- uint32_t NumInstanceFields() const {
- return header_.instance_fields_size_;
- }
- uint32_t NumDirectMethods() const {
- return header_.direct_methods_size_;
- }
- uint32_t NumVirtualMethods() const {
- return header_.virtual_methods_size_;
- }
- bool IsAtMethod() const {
- return pos_ >= EndOfInstanceFieldsPos();
- }
- bool IsAtVirtualMethod() const {
- return pos_ >= EndOfDirectMethodsPos();
- }
- bool HasNextStaticField() const {
- return pos_ < EndOfStaticFieldsPos();
- }
- bool HasNextInstanceField() const {
- return pos_ >= EndOfStaticFieldsPos() && pos_ < EndOfInstanceFieldsPos();
- }
- bool HasNextDirectMethod() const {
- return pos_ >= EndOfInstanceFieldsPos() && pos_ < EndOfDirectMethodsPos();
- }
- bool HasNextVirtualMethod() const {
- return pos_ >= EndOfDirectMethodsPos() && pos_ < EndOfVirtualMethodsPos();
- }
- bool HasNextMethod() const {
- const bool result = pos_ >= EndOfInstanceFieldsPos() && pos_ < EndOfVirtualMethodsPos();
- DCHECK_EQ(result, HasNextDirectMethod() || HasNextVirtualMethod());
- return result;
- }
- void SkipStaticFields() {
- while (HasNextStaticField()) {
- Next();
- }
- }
- void SkipInstanceFields() {
- while (HasNextInstanceField()) {
- Next();
- }
- }
- void SkipAllFields() {
- SkipStaticFields();
- SkipInstanceFields();
- }
- void SkipDirectMethods() {
- while (HasNextDirectMethod()) {
- Next();
- }
- }
- void SkipVirtualMethods() {
- while (HasNextVirtualMethod()) {
- Next();
- }
- }
- bool HasNext() const {
- return pos_ < EndOfVirtualMethodsPos();
- }
- inline void Next() {
- pos_++;
- if (pos_ < EndOfStaticFieldsPos()) {
- last_idx_ = GetMemberIndex();
- ReadClassDataField();
- } else if (pos_ == EndOfStaticFieldsPos() && NumInstanceFields() > 0) {
- last_idx_ = 0; // transition to next array, reset last index
- ReadClassDataField();
- } else if (pos_ < EndOfInstanceFieldsPos()) {
- last_idx_ = GetMemberIndex();
- ReadClassDataField();
- } else if (pos_ == EndOfInstanceFieldsPos() && NumDirectMethods() > 0) {
- last_idx_ = 0; // transition to next array, reset last index
- ReadClassDataMethod();
- } else if (pos_ < EndOfDirectMethodsPos()) {
- last_idx_ = GetMemberIndex();
- ReadClassDataMethod();
- } else if (pos_ == EndOfDirectMethodsPos() && NumVirtualMethods() > 0) {
- last_idx_ = 0; // transition to next array, reset last index
- ReadClassDataMethod();
- } else if (pos_ < EndOfVirtualMethodsPos()) {
- last_idx_ = GetMemberIndex();
- ReadClassDataMethod();
- } else {
- DCHECK(!HasNext());
- }
- }
- uint32_t GetMemberIndex() const {
- if (pos_ < EndOfInstanceFieldsPos()) {
- return last_idx_ + field_.field_idx_delta_;
- } else {
- DCHECK_LT(pos_, EndOfVirtualMethodsPos());
- return last_idx_ + method_.method_idx_delta_;
- }
- }
- uint32_t GetRawMemberAccessFlags() const {
- if (pos_ < EndOfInstanceFieldsPos()) {
- return field_.access_flags_;
- } else {
- DCHECK_LT(pos_, EndOfVirtualMethodsPos());
- return method_.access_flags_;
- }
- }
- uint32_t GetFieldAccessFlags() const {
- return GetMemberAccessFlags() & kAccValidFieldFlags;
- }
- uint32_t GetMethodAccessFlags() const {
- return GetMemberAccessFlags() & kAccValidMethodFlags;
- }
- uint32_t GetMemberAccessFlags() const {
- return HiddenApiAccessFlags::RemoveFromDex(GetRawMemberAccessFlags());
- }
- HiddenApiAccessFlags::ApiList DecodeHiddenAccessFlags() const {
- return HiddenApiAccessFlags::DecodeFromDex(GetRawMemberAccessFlags());
- }
- bool MemberIsNative() const {
- return GetRawMemberAccessFlags() & kAccNative;
- }
- bool MemberIsFinal() const {
- return GetRawMemberAccessFlags() & kAccFinal;
- }
- ALWAYS_INLINE InvokeType GetMethodInvokeType(const DexFile::ClassDef& class_def) const;
- const DexFile::CodeItem* GetMethodCodeItem() const {
- return dex_file_.GetCodeItem(method_.code_off_);
- }
- uint32_t GetMethodCodeItemOffset() const {
- return method_.code_off_;
- }
- const uint8_t* DataPointer() const {
- return ptr_pos_;
- }
- const uint8_t* EndDataPointer() const {
- CHECK(!HasNext());
- return ptr_pos_;
- }
-
- private:
- // A dex file's class_data_item is leb128 encoded, this structure holds a decoded form of the
- // header for a class_data_item
- struct ClassDataHeader {
- uint32_t static_fields_size_; // the number of static fields
- uint32_t instance_fields_size_; // the number of instance fields
- uint32_t direct_methods_size_; // the number of direct methods
- uint32_t virtual_methods_size_; // the number of virtual methods
- } header_;
-
- // Read and decode header from a class_data_item stream into header
- void ReadClassDataHeader();
-
- uint32_t EndOfStaticFieldsPos() const {
- return header_.static_fields_size_;
- }
- uint32_t EndOfInstanceFieldsPos() const {
- return EndOfStaticFieldsPos() + header_.instance_fields_size_;
- }
- uint32_t EndOfDirectMethodsPos() const {
- return EndOfInstanceFieldsPos() + header_.direct_methods_size_;
- }
- uint32_t EndOfVirtualMethodsPos() const {
- return EndOfDirectMethodsPos() + header_.virtual_methods_size_;
- }
-
- // A decoded version of the field of a class_data_item
- struct ClassDataField {
- uint32_t field_idx_delta_; // delta of index into the field_ids array for FieldId
- uint32_t access_flags_; // access flags for the field
- ClassDataField() : field_idx_delta_(0), access_flags_(0) {}
- };
- ClassDataField field_;
-
- // Read and decode a field from a class_data_item stream into field
- void ReadClassDataField();
-
- // A decoded version of the method of a class_data_item
- struct ClassDataMethod {
- uint32_t method_idx_delta_; // delta of index into the method_ids array for MethodId
- uint32_t access_flags_;
- uint32_t code_off_;
- ClassDataMethod() : method_idx_delta_(0), access_flags_(0), code_off_(0) {}
- };
- ClassDataMethod method_;
-
- // Read and decode a method from a class_data_item stream into method
- void ReadClassDataMethod();
-
- const DexFile& dex_file_;
- size_t pos_; // integral number of items passed
- const uint8_t* ptr_pos_; // pointer into stream of class_data_item
- uint32_t last_idx_; // last read field or method index to apply delta to
-};
-
class EncodedArrayValueIterator {
public:
EncodedArrayValueIterator(const DexFile& dex_file, const uint8_t* array_data);
diff --git a/libdexfile/dex/dex_file_verifier_test.cc b/libdexfile/dex/dex_file_verifier_test.cc
index 78b53a0bd4..a22a457dbe 100644
--- a/libdexfile/dex/dex_file_verifier_test.cc
+++ b/libdexfile/dex/dex_file_verifier_test.cc
@@ -25,6 +25,7 @@
#include "base/leb128.h"
#include "base/macros.h"
#include "base64_test_util.h"
+#include "class_accessor-inl.h"
#include "descriptors_names.h"
#include "dex_file-inl.h"
#include "dex_file_loader.h"
@@ -238,27 +239,10 @@ static const char kMethodFlagsTestDex[] =
static const uint8_t* FindMethodData(const DexFile* dex_file,
const char* name,
/*out*/ uint32_t* method_idx = nullptr) {
- const DexFile::ClassDef& class_def = dex_file->GetClassDef(0);
- const uint8_t* class_data = dex_file->GetClassData(class_def);
-
- ClassDataItemIterator it(*dex_file, class_data);
-
- const uint8_t* trailing = class_data;
- // Need to manually decode the four entries. DataPointer() doesn't work for this, as the first
- // element has already been loaded into the iterator.
- DecodeUnsignedLeb128(&trailing);
- DecodeUnsignedLeb128(&trailing);
- DecodeUnsignedLeb128(&trailing);
- DecodeUnsignedLeb128(&trailing);
-
- // Skip all fields.
- while (it.HasNextStaticField() || it.HasNextInstanceField()) {
- trailing = it.DataPointer();
- it.Next();
- }
+ ClassAccessor accessor(*dex_file, dex_file->GetClassDef(0));
- while (it.HasNextMethod()) {
- uint32_t method_index = it.GetMemberIndex();
+ for (const ClassAccessor::Method& method : accessor.GetMethods()) {
+ uint32_t method_index = method.GetIndex();
dex::StringIndex name_index = dex_file->GetMethodId(method_index).name_idx_;
const DexFile::StringId& string_id = dex_file->GetStringId(name_index);
const char* str = dex_file->GetStringData(string_id);
@@ -266,12 +250,11 @@ static const uint8_t* FindMethodData(const DexFile* dex_file,
if (method_idx != nullptr) {
*method_idx = method_index;
}
- DecodeUnsignedLeb128(&trailing);
+ // Go back 2 lebs to the access flags.
+ const uint8_t* trailing = ReverseSearchUnsignedLeb128(method.GetDataPointer());
+ trailing = ReverseSearchUnsignedLeb128(trailing);
return trailing;
}
-
- trailing = it.DataPointer();
- it.Next();
}
return nullptr;
@@ -849,31 +832,17 @@ TEST_F(DexFileVerifierTest, MethodAccessFlagsInterfaces) {
// is to the access flags, so that the caller doesn't have to handle the leb128-encoded method-index
// delta.
static const uint8_t* FindFieldData(const DexFile* dex_file, const char* name) {
- const DexFile::ClassDef& class_def = dex_file->GetClassDef(0);
- const uint8_t* class_data = dex_file->GetClassData(class_def);
-
- ClassDataItemIterator it(*dex_file, class_data);
+ ClassAccessor accessor(*dex_file, dex_file->GetClassDef(0));
- const uint8_t* trailing = class_data;
- // Need to manually decode the four entries. DataPointer() doesn't work for this, as the first
- // element has already been loaded into the iterator.
- DecodeUnsignedLeb128(&trailing);
- DecodeUnsignedLeb128(&trailing);
- DecodeUnsignedLeb128(&trailing);
- DecodeUnsignedLeb128(&trailing);
-
- while (it.HasNextStaticField() || it.HasNextInstanceField()) {
- uint32_t field_index = it.GetMemberIndex();
+ for (const ClassAccessor::Field& field : accessor.GetFields()) {
+ uint32_t field_index = field.GetIndex();
dex::StringIndex name_index = dex_file->GetFieldId(field_index).name_idx_;
const DexFile::StringId& string_id = dex_file->GetStringId(name_index);
const char* str = dex_file->GetStringData(string_id);
if (strcmp(name, str) == 0) {
- DecodeUnsignedLeb128(&trailing);
- return trailing;
+ // Go to the back of the access flags.
+ return ReverseSearchUnsignedLeb128(field.GetDataPointer());
}
-
- trailing = it.DataPointer();
- it.Next();
}
return nullptr;
diff --git a/libdexfile/dex/modifiers.h b/libdexfile/dex/modifiers.h
index be82fff65c..38f8455b64 100644
--- a/libdexfile/dex/modifiers.h
+++ b/libdexfile/dex/modifiers.h
@@ -42,9 +42,8 @@ static constexpr uint32_t kAccEnum = 0x4000; // class, field, ic (1.5)
static constexpr uint32_t kAccJavaFlagsMask = 0xffff; // bits set from Java sources (low 16)
-// The following flags are used to insert hidden API access flags into boot
-// class path dex files. They are decoded by DexFile::ClassDataItemIterator and
-// removed from the access flags before used by the runtime.
+// The following flags are used to insert hidden API access flags into boot class path dex files.
+// They are decoded by ClassAccessor and removed from the access flags before used by the runtime.
static constexpr uint32_t kAccDexHiddenBit = 0x00000020; // field, method (not native)
static constexpr uint32_t kAccDexHiddenBitNative = 0x00000200; // method (native)
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index 9d73879979..c04c50e027 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -1578,7 +1578,7 @@ class OatDumper {
// The optimizing compiler outputs its CodeInfo data in the vmap table.
StackMapsHelper helper(oat_method.GetVmapTable(), instruction_set_);
if (AddStatsObject(oat_method.GetVmapTable())) {
- helper.GetCodeInfo().AddSizeStats(&stats_);
+ helper.GetCodeInfo().CollectSizeStats(oat_method.GetVmapTable(), &stats_);
}
const uint8_t* quick_native_pc = reinterpret_cast<const uint8_t*>(quick_code);
size_t offset = 0;
diff --git a/oatdump/oatdump_test.h b/oatdump/oatdump_test.h
index 2c28f06b2e..4ee510130b 100644
--- a/oatdump/oatdump_test.h
+++ b/oatdump/oatdump_test.h
@@ -171,7 +171,7 @@ class OatDumpTest : public CommonRuntimeTest {
// Code and dex code do not show up if list only.
expected_prefixes.push_back("DEX CODE:");
expected_prefixes.push_back("CODE:");
- expected_prefixes.push_back("InlineInfos");
+ expected_prefixes.push_back("InlineInfo");
}
if (mode == kModeArt) {
exec_argv.push_back("--image=" + core_art_location_);
diff --git a/openjdkjvm/OpenjdkJvm.cc b/openjdkjvm/OpenjdkJvm.cc
index df002b6efa..8d0200c346 100644
--- a/openjdkjvm/OpenjdkJvm.cc
+++ b/openjdkjvm/OpenjdkJvm.cc
@@ -187,7 +187,7 @@ JNIEXPORT int jio_fprintf(FILE* fp, const char* fmt, ...) {
}
JNIEXPORT int jio_vfprintf(FILE* fp, const char* fmt, va_list args) {
- assert(fp != NULL);
+ assert(fp != nullptr);
return vfprintf(fp, fmt, args);
}
@@ -203,7 +203,7 @@ JNIEXPORT void* JVM_FindLibraryEntry(void* handle, const char* name) {
JNIEXPORT jlong JVM_CurrentTimeMillis(JNIEnv* env ATTRIBUTE_UNUSED,
jclass clazz ATTRIBUTE_UNUSED) {
struct timeval tv;
- gettimeofday(&tv, (struct timezone *) NULL);
+ gettimeofday(&tv, (struct timezone *) nullptr);
jlong when = tv.tv_sec * 1000LL + tv.tv_usec / 1000;
return when;
}
@@ -319,8 +319,8 @@ JNIEXPORT jstring JVM_NativeLoad(JNIEnv* env,
jstring javaFilename,
jobject javaLoader) {
ScopedUtfChars filename(env, javaFilename);
- if (filename.c_str() == NULL) {
- return NULL;
+ if (filename.c_str() == nullptr) {
+ return nullptr;
}
std::string error_msg;
@@ -348,7 +348,7 @@ JNIEXPORT void JVM_SetThreadPriority(JNIEnv* env, jobject jthread, jint prio) {
art::ScopedObjectAccess soa(env);
art::MutexLock mu(soa.Self(), *art::Locks::thread_list_lock_);
art::Thread* thread = art::Thread::FromManagedThread(soa, jthread);
- if (thread != NULL) {
+ if (thread != nullptr) {
thread->SetNativePriority(prio);
}
}
@@ -421,7 +421,7 @@ JNIEXPORT void JVM_SetNativeThreadName(JNIEnv* env, jobject jthread, jstring jav
art::SuspendReason::kInternal,
&timed_out);
}
- if (thread != NULL) {
+ if (thread != nullptr) {
{
art::ScopedObjectAccess soa(env);
thread->SetThreadName(name.c_str());
diff --git a/openjdkjvmti/ti_redefine.cc b/openjdkjvmti/ti_redefine.cc
index 1476880f45..dd0428dfcf 100644
--- a/openjdkjvmti/ti_redefine.cc
+++ b/openjdkjvmti/ti_redefine.cc
@@ -368,17 +368,6 @@ jvmtiError Redefiner::RedefineClasses(ArtJvmTiEnv* env,
if (res != OK) {
return res;
}
- // We make a copy of the class_bytes to pass into the retransformation.
- // This makes cleanup easier (since we unambiguously own the bytes) and also is useful since we
- // will need to keep the original bytes around unaltered for subsequent RetransformClasses calls
- // to get the passed in bytes.
- unsigned char* class_bytes_copy = nullptr;
- res = env->Allocate(definitions[i].class_byte_count, &class_bytes_copy);
- if (res != OK) {
- return res;
- }
- memcpy(class_bytes_copy, definitions[i].class_bytes, definitions[i].class_byte_count);
-
ArtClassDefinition def;
res = def.Init(self, definitions[i]);
if (res != OK) {
diff --git a/openjdkjvmti/ti_timers.cc b/openjdkjvmti/ti_timers.cc
index 24fb0419ee..11b58c452e 100644
--- a/openjdkjvmti/ti_timers.cc
+++ b/openjdkjvmti/ti_timers.cc
@@ -83,7 +83,7 @@ jvmtiError TimerUtil::GetTime(jvmtiEnv* env ATTRIBUTE_UNUSED, jlong* nanos_ptr)
// No CLOCK_MONOTONIC support on older Mac OS.
struct timeval t;
t.tv_sec = t.tv_usec = 0;
- gettimeofday(&t, NULL);
+ gettimeofday(&t, nullptr);
*nanos_ptr = static_cast<jlong>(t.tv_sec)*1000000000LL + static_cast<jlong>(t.tv_usec)*1000LL;
#endif
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index 96d7c9629d..c86baa1057 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -1036,8 +1036,8 @@ END \name
.endm
ONE_ARG_SAVE_EVERYTHING_DOWNCALL_FOR_CLINIT art_quick_initialize_static_storage, artInitializeStaticStorageFromCode
-ONE_ARG_SAVE_EVERYTHING_DOWNCALL_FOR_CLINIT art_quick_initialize_type, artInitializeTypeFromCode
-ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_initialize_type_and_verify_access, artInitializeTypeAndVerifyAccessFromCode
+ONE_ARG_SAVE_EVERYTHING_DOWNCALL_FOR_CLINIT art_quick_resolve_type, artResolveTypeFromCode
+ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_type_and_verify_access, artResolveTypeAndVerifyAccessFromCode
ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_method_handle, artResolveMethodHandleFromCode
ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_method_type, artResolveMethodTypeFromCode
ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_string, artResolveStringFromCode
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index 6b0de4848b..40a8dbc008 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -1561,8 +1561,8 @@ TWO_ARG_REF_DOWNCALL art_quick_handle_fill_data, artHandleFillArrayDataFromCode,
* returned.
*/
ONE_ARG_SAVE_EVERYTHING_DOWNCALL_FOR_CLINIT art_quick_initialize_static_storage, artInitializeStaticStorageFromCode
-ONE_ARG_SAVE_EVERYTHING_DOWNCALL_FOR_CLINIT art_quick_initialize_type, artInitializeTypeFromCode
-ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_initialize_type_and_verify_access, artInitializeTypeAndVerifyAccessFromCode
+ONE_ARG_SAVE_EVERYTHING_DOWNCALL_FOR_CLINIT art_quick_resolve_type, artResolveTypeFromCode
+ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_type_and_verify_access, artResolveTypeAndVerifyAccessFromCode
ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_method_handle, artResolveMethodHandleFromCode
ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_method_type, artResolveMethodTypeFromCode
ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_string, artResolveStringFromCode
@@ -2785,6 +2785,8 @@ READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg29, w29, x29
* the root register to IP0 and jumps to the customized entrypoint,
* art_quick_read_barrier_mark_introspection_gc_roots. The thunk also
* performs all the fast-path checks, so we need just the slow path.
+ * The UnsafeCASObject intrinsic is also using the GC root entrypoint with
+ * MOV instead of LDR, the destination register is in the same bits.
*
* The code structure is
* art_quick_read_barrier_mark_introspection:
diff --git a/runtime/arch/mips/entrypoints_init_mips.cc b/runtime/arch/mips/entrypoints_init_mips.cc
index 2b69c1753b..05172dbe43 100644
--- a/runtime/arch/mips/entrypoints_init_mips.cc
+++ b/runtime/arch/mips/entrypoints_init_mips.cc
@@ -192,15 +192,15 @@ void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) {
qpoints->pCheckInstanceOf = art_quick_check_instance_of;
static_assert(!IsDirectEntrypoint(kQuickCheckInstanceOf), "Non-direct C stub marked direct.");
- // DexCache
+ // Resolution and initialization
qpoints->pInitializeStaticStorage = art_quick_initialize_static_storage;
static_assert(!IsDirectEntrypoint(kQuickInitializeStaticStorage),
"Non-direct C stub marked direct.");
- qpoints->pInitializeTypeAndVerifyAccess = art_quick_initialize_type_and_verify_access;
- static_assert(!IsDirectEntrypoint(kQuickInitializeTypeAndVerifyAccess),
+ qpoints->pResolveTypeAndVerifyAccess = art_quick_resolve_type_and_verify_access;
+ static_assert(!IsDirectEntrypoint(kQuickResolveTypeAndVerifyAccess),
"Non-direct C stub marked direct.");
- qpoints->pInitializeType = art_quick_initialize_type;
- static_assert(!IsDirectEntrypoint(kQuickInitializeType), "Non-direct C stub marked direct.");
+ qpoints->pResolveType = art_quick_resolve_type;
+ static_assert(!IsDirectEntrypoint(kQuickResolveType), "Non-direct C stub marked direct.");
qpoints->pResolveString = art_quick_resolve_string;
static_assert(!IsDirectEntrypoint(kQuickResolveString), "Non-direct C stub marked direct.");
qpoints->pResolveMethodHandle = art_quick_resolve_method_handle;
diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S
index 303333cd0e..b10d1fc849 100644
--- a/runtime/arch/mips/quick_entrypoints_mips.S
+++ b/runtime/arch/mips/quick_entrypoints_mips.S
@@ -2086,13 +2086,13 @@ ONE_ARG_SAVE_EVERYTHING_DOWNCALL_FOR_CLINIT art_quick_initialize_static_storage,
/*
* Entry from managed code when dex cache misses for a type_idx.
*/
-ONE_ARG_SAVE_EVERYTHING_DOWNCALL_FOR_CLINIT art_quick_initialize_type, artInitializeTypeFromCode
+ONE_ARG_SAVE_EVERYTHING_DOWNCALL_FOR_CLINIT art_quick_resolve_type, artResolveTypeFromCode
/*
* Entry from managed code when type_idx needs to be checked for access and dex cache may also
* miss.
*/
-ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_initialize_type_and_verify_access, artInitializeTypeAndVerifyAccessFromCode
+ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_type_and_verify_access, artResolveTypeAndVerifyAccessFromCode
/*
* Called by managed code when the value in rSUSPEND has been decremented to 0.
diff --git a/runtime/arch/mips64/quick_entrypoints_mips64.S b/runtime/arch/mips64/quick_entrypoints_mips64.S
index f35cb16b03..ebf1d5b0b4 100644
--- a/runtime/arch/mips64/quick_entrypoints_mips64.S
+++ b/runtime/arch/mips64/quick_entrypoints_mips64.S
@@ -1986,13 +1986,13 @@ ONE_ARG_SAVE_EVERYTHING_DOWNCALL_FOR_CLINIT art_quick_initialize_static_storage,
/*
* Entry from managed code when dex cache misses for a type_idx.
*/
-ONE_ARG_SAVE_EVERYTHING_DOWNCALL_FOR_CLINIT art_quick_initialize_type, artInitializeTypeFromCode
+ONE_ARG_SAVE_EVERYTHING_DOWNCALL_FOR_CLINIT art_quick_resolve_type, artResolveTypeFromCode
/*
* Entry from managed code when type_idx needs to be checked for access and dex cache may also
* miss.
*/
-ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_initialize_type_and_verify_access, artInitializeTypeAndVerifyAccessFromCode
+ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_type_and_verify_access, artResolveTypeAndVerifyAccessFromCode
/*
* Called by managed code when the value in rSUSPEND has been decremented to 0.
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index 9fe41ca83b..b0bed56ce4 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -1281,8 +1281,8 @@ GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved32_tlab, artAllocArrayFr
GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved64_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_64
ONE_ARG_SAVE_EVERYTHING_DOWNCALL_FOR_CLINIT art_quick_initialize_static_storage, artInitializeStaticStorageFromCode
-ONE_ARG_SAVE_EVERYTHING_DOWNCALL_FOR_CLINIT art_quick_initialize_type, artInitializeTypeFromCode
-ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_initialize_type_and_verify_access, artInitializeTypeAndVerifyAccessFromCode
+ONE_ARG_SAVE_EVERYTHING_DOWNCALL_FOR_CLINIT art_quick_resolve_type, artResolveTypeFromCode
+ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_type_and_verify_access, artResolveTypeAndVerifyAccessFromCode
ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_method_handle, artResolveMethodHandleFromCode
ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_method_type, artResolveMethodTypeFromCode
ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_string, artResolveStringFromCode
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index c41d3e4685..a8a648f2ca 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -1301,8 +1301,8 @@ DEFINE_FUNCTION art_quick_alloc_object_initialized_region_tlab
END_FUNCTION art_quick_alloc_object_initialized_region_tlab
ONE_ARG_SAVE_EVERYTHING_DOWNCALL_FOR_CLINIT art_quick_initialize_static_storage, artInitializeStaticStorageFromCode
-ONE_ARG_SAVE_EVERYTHING_DOWNCALL_FOR_CLINIT art_quick_initialize_type, artInitializeTypeFromCode
-ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_initialize_type_and_verify_access, artInitializeTypeAndVerifyAccessFromCode
+ONE_ARG_SAVE_EVERYTHING_DOWNCALL_FOR_CLINIT art_quick_resolve_type, artResolveTypeFromCode
+ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_type_and_verify_access, artResolveTypeAndVerifyAccessFromCode
ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_method_handle, artResolveMethodHandleFromCode
ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_method_type, artResolveMethodTypeFromCode
ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_string, artResolveStringFromCode
diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h
index ac22f07a34..f693524a6c 100644
--- a/runtime/art_method-inl.h
+++ b/runtime/art_method-inl.h
@@ -374,12 +374,50 @@ inline HiddenApiAccessFlags::ApiList ArtMethod::GetHiddenApiAccessFlags()
case Intrinsics::kSystemArrayCopyChar:
case Intrinsics::kStringGetCharsNoCheck:
case Intrinsics::kReferenceGetReferent:
+ case Intrinsics::kMemoryPeekByte:
+ case Intrinsics::kMemoryPokeByte:
+ case Intrinsics::kUnsafeCASInt:
+ case Intrinsics::kUnsafeCASLong:
+ case Intrinsics::kUnsafeCASObject:
+ case Intrinsics::kUnsafeGet:
+ case Intrinsics::kUnsafeGetAndAddInt:
+ case Intrinsics::kUnsafeGetAndAddLong:
+ case Intrinsics::kUnsafeGetAndSetInt:
+ case Intrinsics::kUnsafeGetAndSetLong:
+ case Intrinsics::kUnsafeGetAndSetObject:
+ case Intrinsics::kUnsafeGetLong:
+ case Intrinsics::kUnsafeGetLongVolatile:
+ case Intrinsics::kUnsafeGetObject:
+ case Intrinsics::kUnsafeGetObjectVolatile:
+ case Intrinsics::kUnsafeGetVolatile:
+ case Intrinsics::kUnsafePut:
+ case Intrinsics::kUnsafePutLong:
+ case Intrinsics::kUnsafePutLongOrdered:
+ case Intrinsics::kUnsafePutLongVolatile:
+ case Intrinsics::kUnsafePutObject:
+ case Intrinsics::kUnsafePutObjectOrdered:
+ case Intrinsics::kUnsafePutObjectVolatile:
+ case Intrinsics::kUnsafePutOrdered:
+ case Intrinsics::kUnsafePutVolatile:
+ case Intrinsics::kUnsafeLoadFence:
+ case Intrinsics::kUnsafeStoreFence:
+ case Intrinsics::kUnsafeFullFence:
// These intrinsics are on the light greylist and will fail a DCHECK in
// SetIntrinsic() if their flags change on the respective dex methods.
// Note that the DCHECK currently won't fail if the dex methods are
// whitelisted, e.g. in the core image (b/77733081). As a result, we
// might print warnings but we won't change the semantics.
return HiddenApiAccessFlags::kLightGreylist;
+ case Intrinsics::kStringNewStringFromBytes:
+ case Intrinsics::kStringNewStringFromChars:
+ case Intrinsics::kStringNewStringFromString:
+ case Intrinsics::kMemoryPeekIntNative:
+ case Intrinsics::kMemoryPeekLongNative:
+ case Intrinsics::kMemoryPeekShortNative:
+ case Intrinsics::kMemoryPokeIntNative:
+ case Intrinsics::kMemoryPokeLongNative:
+ case Intrinsics::kMemoryPokeShortNative:
+ return HiddenApiAccessFlags::kDarkGreylist;
case Intrinsics::kVarHandleFullFence:
case Intrinsics::kVarHandleAcquireFence:
case Intrinsics::kVarHandleReleaseFence:
@@ -475,7 +513,7 @@ inline void ArtMethod::SetIntrinsic(uint32_t intrinsic) {
// (b) only VarHandle intrinsics are blacklisted at the moment and they
// should not be used outside tests with disabled API checks.
if (hidden_api_flags != HiddenApiAccessFlags::kWhitelist) {
- DCHECK_EQ(hidden_api_flags, GetHiddenApiAccessFlags());
+ DCHECK_EQ(hidden_api_flags, GetHiddenApiAccessFlags()) << PrettyMethod();
}
} else {
SetAccessFlags(new_value);
diff --git a/runtime/entrypoints/quick/quick_default_externs.h b/runtime/entrypoints/quick/quick_default_externs.h
index 938489b730..aa32113eb8 100644
--- a/runtime/entrypoints/quick/quick_default_externs.h
+++ b/runtime/entrypoints/quick/quick_default_externs.h
@@ -33,10 +33,10 @@ class ArtMethod;
// Cast entrypoints.
extern "C" void art_quick_check_instance_of(art::mirror::Object*, art::mirror::Class*);
-// DexCache entrypoints.
-extern "C" void* art_quick_initialize_static_storage(uint32_t);
-extern "C" void* art_quick_initialize_type(uint32_t);
-extern "C" void* art_quick_initialize_type_and_verify_access(uint32_t);
+// Resolution and initialization entrypoints.
+extern "C" void* art_quick_initialize_static_storage(art::mirror::Class*);
+extern "C" void* art_quick_resolve_type(uint32_t);
+extern "C" void* art_quick_resolve_type_and_verify_access(uint32_t);
extern "C" void* art_quick_resolve_method_handle(uint32_t);
extern "C" void* art_quick_resolve_method_type(uint32_t);
extern "C" void* art_quick_resolve_string(uint32_t);
diff --git a/runtime/entrypoints/quick/quick_default_init_entrypoints.h b/runtime/entrypoints/quick/quick_default_init_entrypoints.h
index 5dcece4208..8e784c164c 100644
--- a/runtime/entrypoints/quick/quick_default_init_entrypoints.h
+++ b/runtime/entrypoints/quick/quick_default_init_entrypoints.h
@@ -33,10 +33,10 @@ static void DefaultInitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qp
// Alloc
ResetQuickAllocEntryPoints(qpoints, /* is_marking */ true);
- // DexCache
+ // Resolution and initialization
qpoints->pInitializeStaticStorage = art_quick_initialize_static_storage;
- qpoints->pInitializeTypeAndVerifyAccess = art_quick_initialize_type_and_verify_access;
- qpoints->pInitializeType = art_quick_initialize_type;
+ qpoints->pResolveTypeAndVerifyAccess = art_quick_resolve_type_and_verify_access;
+ qpoints->pResolveType = art_quick_resolve_type;
qpoints->pResolveMethodHandle = art_quick_resolve_method_handle;
qpoints->pResolveMethodType = art_quick_resolve_method_type;
qpoints->pResolveString = art_quick_resolve_string;
diff --git a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
index 85d633f6a6..c4d85a3ef8 100644
--- a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
@@ -95,7 +95,7 @@ static inline void StoreTypeInBss(ArtMethod* outer_method,
static inline void StoreStringInBss(ArtMethod* outer_method,
dex::StringIndex string_idx,
ObjPtr<mirror::String> resolved_string)
- REQUIRES_SHARED(Locks::mutator_lock_) __attribute__((optnone)) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const DexFile* dex_file = outer_method->GetDexFile();
DCHECK(dex_file != nullptr);
const OatDexFile* oat_dex_file = dex_file->GetOatDexFile();
@@ -129,27 +129,25 @@ static ALWAYS_INLINE bool CanReferenceBss(ArtMethod* outer_method, ArtMethod* ca
return outer_method->GetDexFile() == caller->GetDexFile();
}
-extern "C" mirror::Class* artInitializeStaticStorageFromCode(uint32_t type_idx, Thread* self)
+extern "C" mirror::Class* artInitializeStaticStorageFromCode(mirror::Class* klass, Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) {
// Called to ensure static storage base is initialized for direct static field reads and writes.
// A class may be accessing another class' fields when it doesn't have access, as access has been
// given by inheritance.
ScopedQuickEntrypointChecks sqec(self);
- auto caller_and_outer = GetCalleeSaveMethodCallerAndOuterMethod(
- self, CalleeSaveType::kSaveEverythingForClinit);
- ArtMethod* caller = caller_and_outer.caller;
- ObjPtr<mirror::Class> result = ResolveVerifyAndClinit(dex::TypeIndex(type_idx),
- caller,
- self,
- /* can_run_clinit */ true,
- /* verify_access */ false);
- if (LIKELY(result != nullptr) && CanReferenceBss(caller_and_outer.outer_method, caller)) {
- StoreTypeInBss(caller_and_outer.outer_method, dex::TypeIndex(type_idx), result);
+ DCHECK(klass != nullptr);
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Class> h_klass = hs.NewHandle(klass);
+ bool success = class_linker->EnsureInitialized(
+ self, h_klass, /* can_init_fields */ true, /* can_init_parents */ true);
+ if (UNLIKELY(!success)) {
+ return nullptr;
}
- return result.Ptr();
+ return h_klass.Get();
}
-extern "C" mirror::Class* artInitializeTypeFromCode(uint32_t type_idx, Thread* self)
+extern "C" mirror::Class* artResolveTypeFromCode(uint32_t type_idx, Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) {
// Called when the .bss slot was empty or for main-path runtime call.
ScopedQuickEntrypointChecks sqec(self);
@@ -167,7 +165,7 @@ extern "C" mirror::Class* artInitializeTypeFromCode(uint32_t type_idx, Thread* s
return result.Ptr();
}
-extern "C" mirror::Class* artInitializeTypeAndVerifyAccessFromCode(uint32_t type_idx, Thread* self)
+extern "C" mirror::Class* artResolveTypeAndVerifyAccessFromCode(uint32_t type_idx, Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) {
// Called when caller isn't guaranteed to have access to a type.
ScopedQuickEntrypointChecks sqec(self);
diff --git a/runtime/entrypoints/quick/quick_entrypoints_list.h b/runtime/entrypoints/quick/quick_entrypoints_list.h
index 4ce954c48a..42b680e05a 100644
--- a/runtime/entrypoints/quick/quick_entrypoints_list.h
+++ b/runtime/entrypoints/quick/quick_entrypoints_list.h
@@ -38,9 +38,9 @@
V(InstanceofNonTrivial, size_t, mirror::Object*, mirror::Class*) \
V(CheckInstanceOf, void, mirror::Object*, mirror::Class*) \
\
- V(InitializeStaticStorage, void*, uint32_t) \
- V(InitializeTypeAndVerifyAccess, void*, uint32_t) \
- V(InitializeType, void*, uint32_t) \
+ V(InitializeStaticStorage, void*, mirror::Class*) \
+ V(ResolveTypeAndVerifyAccess, void*, uint32_t) \
+ V(ResolveType, void*, uint32_t) \
V(ResolveMethodHandle, void*, uint32_t) \
V(ResolveMethodType, void*, uint32_t) \
V(ResolveString, void*, uint32_t) \
diff --git a/runtime/entrypoints_order_test.cc b/runtime/entrypoints_order_test.cc
index c3cd793780..cb85804986 100644
--- a/runtime/entrypoints_order_test.cc
+++ b/runtime/entrypoints_order_test.cc
@@ -181,11 +181,11 @@ class EntrypointsOrderTest : public CommonRuntimeTest {
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pInstanceofNonTrivial, pCheckInstanceOf, sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pCheckInstanceOf, pInitializeStaticStorage,
sizeof(void*));
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pInitializeStaticStorage, pInitializeTypeAndVerifyAccess,
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pInitializeStaticStorage, pResolveTypeAndVerifyAccess,
sizeof(void*));
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pInitializeTypeAndVerifyAccess, pInitializeType,
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pResolveTypeAndVerifyAccess, pResolveType,
sizeof(void*));
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pInitializeType, pResolveMethodHandle, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pResolveType, pResolveMethodHandle, sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pResolveMethodHandle, pResolveMethodType, sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pResolveMethodType, pResolveString, sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pResolveString, pSet8Instance, sizeof(void*));
diff --git a/runtime/gc/accounting/card_table.cc b/runtime/gc/accounting/card_table.cc
index c7f936fb11..22104a30fe 100644
--- a/runtime/gc/accounting/card_table.cc
+++ b/runtime/gc/accounting/card_table.cc
@@ -41,10 +41,10 @@ constexpr uint8_t CardTable::kCardDirty;
* non-null values to heap addresses should go through an entry in
* WriteBarrier, and from there to here.
*
- * The heap is divided into "cards" of GC_CARD_SIZE bytes, as
- * determined by GC_CARD_SHIFT. The card table contains one byte of
+ * The heap is divided into "cards" of `kCardSize` bytes, as
+ * determined by `kCardShift`. The card table contains one byte of
* data per card, to be used by the GC. The value of the byte will be
- * one of GC_CARD_CLEAN or GC_CARD_DIRTY.
+ * one of `kCardClean` or `kCardDirty`.
*
* After any store of a non-null object pointer into a heap object,
* code is obliged to mark the card dirty. The setters in
@@ -53,9 +53,9 @@ constexpr uint8_t CardTable::kCardDirty;
*
* The card table's base [the "biased card table"] gets set to a
* rather strange value. In order to keep the JIT from having to
- * fabricate or load GC_DIRTY_CARD to store into the card table,
+ * fabricate or load `kCardDirty` to store into the card table,
* biased base is within the mmap allocation at a point where its low
- * byte is equal to GC_DIRTY_CARD. See CardTable::Create for details.
+ * byte is equal to `kCardDirty`. See CardTable::Create for details.
*/
CardTable* CardTable::Create(const uint8_t* heap_begin, size_t heap_capacity) {
@@ -75,8 +75,8 @@ CardTable* CardTable::Create(const uint8_t* heap_begin, size_t heap_capacity) {
uint8_t* cardtable_begin = mem_map->Begin();
CHECK(cardtable_begin != nullptr);
- // We allocated up to a bytes worth of extra space to allow biased_begin's byte value to equal
- // kCardDirty, compute a offset value to make this the case
+ // We allocated up to a bytes worth of extra space to allow `biased_begin`'s byte value to equal
+ // `kCardDirty`, compute a offset value to make this the case
size_t offset = 0;
uint8_t* biased_begin = reinterpret_cast<uint8_t*>(reinterpret_cast<uintptr_t>(cardtable_begin) -
(reinterpret_cast<uintptr_t>(heap_begin) >> kCardShift));
diff --git a/runtime/gc/accounting/card_table.h b/runtime/gc/accounting/card_table.h
index f3548f7ce5..b8520b7dc0 100644
--- a/runtime/gc/accounting/card_table.h
+++ b/runtime/gc/accounting/card_table.h
@@ -56,7 +56,7 @@ class CardTable {
static CardTable* Create(const uint8_t* heap_begin, size_t heap_capacity);
~CardTable();
- // Set the card associated with the given address to GC_CARD_DIRTY.
+ // Set the card associated with the given address to `kCardDirty`.
ALWAYS_INLINE void MarkCard(const void *addr) {
*CardFromAddr(addr) = kCardDirty;
}
@@ -84,8 +84,8 @@ class CardTable {
}
}
- // Returns a value that when added to a heap address >> GC_CARD_SHIFT will address the appropriate
- // card table byte. For convenience this value is cached in every Thread
+ // Returns a value that when added to a heap address >> `kCardShift` will address the appropriate
+ // card table byte. For convenience this value is cached in every Thread.
uint8_t* GetBiasedBegin() const {
return biased_begin_;
}
@@ -148,7 +148,7 @@ class CardTable {
// Value used to compute card table addresses from object addresses, see GetBiasedBegin
uint8_t* const biased_begin_;
// Card table doesn't begin at the beginning of the mem_map_, instead it is displaced by offset
- // to allow the byte value of biased_begin_ to equal GC_CARD_DIRTY
+ // to allow the byte value of `biased_begin_` to equal `kCardDirty`.
const size_t offset_;
DISALLOW_IMPLICIT_CONSTRUCTORS(CardTable);
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index edaa043ce6..b03f67152b 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -53,9 +53,9 @@ static constexpr size_t kDefaultGcMarkStackSize = 2 * MB;
// If kFilterModUnionCards then we attempt to filter cards that don't need to be dirty in the mod
// union table. Disabled since it does not seem to help the pause much.
static constexpr bool kFilterModUnionCards = kIsDebugBuild;
-// If kDisallowReadBarrierDuringScan is true then the GC aborts if there are any that occur during
-// ConcurrentCopying::Scan. May be used to diagnose possibly unnecessary read barriers.
-// Only enabled for kIsDebugBuild to avoid performance hit.
+// If kDisallowReadBarrierDuringScan is true then the GC aborts if there are any read barrier that
+// occur during ConcurrentCopying::Scan in GC thread. May be used to diagnose possibly unnecessary
+// read barriers. Only enabled for kIsDebugBuild to avoid performance hit.
static constexpr bool kDisallowReadBarrierDuringScan = kIsDebugBuild;
// Slow path mark stack size, increase this if the stack is getting full and it is causing
// performance problems.
diff --git a/runtime/gc/space/malloc_space.cc b/runtime/gc/space/malloc_space.cc
index 0965560e2c..6936fdc6d4 100644
--- a/runtime/gc/space/malloc_space.cc
+++ b/runtime/gc/space/malloc_space.cc
@@ -247,7 +247,7 @@ void MallocSpace::SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* ar
}
}
// Use a bulk free, that merges consecutive objects before freeing or free per object?
- // Documentation suggests better free performance with merging, but this may be at the expensive
+ // Documentation suggests better free performance with merging, but this may be at the expense
// of allocation.
context->freed.objects += num_ptrs;
context->freed.bytes += space->FreeList(self, num_ptrs, ptrs);
diff --git a/runtime/gc/space/region_space-inl.h b/runtime/gc/space/region_space-inl.h
index c6ec174a13..436eb2c09b 100644
--- a/runtime/gc/space/region_space-inl.h
+++ b/runtime/gc/space/region_space-inl.h
@@ -247,14 +247,14 @@ inline mirror::Object* RegionSpace::AllocLarge(size_t num_bytes,
/* out */ size_t* bytes_tl_bulk_allocated) {
DCHECK_ALIGNED(num_bytes, kAlignment);
DCHECK_GT(num_bytes, kRegionSize);
- size_t num_regs = RoundUp(num_bytes, kRegionSize) / kRegionSize;
- DCHECK_GT(num_regs, 0U);
- DCHECK_LT((num_regs - 1) * kRegionSize, num_bytes);
- DCHECK_LE(num_bytes, num_regs * kRegionSize);
+ size_t num_regs_in_large_region = RoundUp(num_bytes, kRegionSize) / kRegionSize;
+ DCHECK_GT(num_regs_in_large_region, 0U);
+ DCHECK_LT((num_regs_in_large_region - 1) * kRegionSize, num_bytes);
+ DCHECK_LE(num_bytes, num_regs_in_large_region * kRegionSize);
MutexLock mu(Thread::Current(), region_lock_);
if (!kForEvac) {
// Retain sufficient free regions for full evacuation.
- if ((num_non_free_regions_ + num_regs) * 2 > num_regions_) {
+ if ((num_non_free_regions_ + num_regs_in_large_region) * 2 > num_regions_) {
return nullptr;
}
}
@@ -265,7 +265,7 @@ inline mirror::Object* RegionSpace::AllocLarge(size_t num_bytes,
size_t next_region1 = -1;
mirror::Object* region1 = AllocLargeInRange<kForEvac>(cyclic_alloc_region_index_,
num_regions_,
- num_regs,
+ num_regs_in_large_region,
bytes_allocated,
usable_size,
bytes_tl_bulk_allocated,
@@ -280,16 +280,16 @@ inline mirror::Object* RegionSpace::AllocLarge(size_t num_bytes,
}
// If the previous attempt failed, try to find a range of free regions within
- // [0, cyclic_alloc_region_index_ + num_regions_ - 1).
+ // [0, min(cyclic_alloc_region_index_ + num_regs_in_large_region - 1, num_regions_)).
size_t next_region2 = -1;
- mirror::Object* region2 =
- AllocLargeInRange<kForEvac>(0,
- cyclic_alloc_region_index_ + num_regions_ - 1,
- num_regs,
- bytes_allocated,
- usable_size,
- bytes_tl_bulk_allocated,
- &next_region2);
+ mirror::Object* region2 = AllocLargeInRange<kForEvac>(
+ 0,
+ std::min(cyclic_alloc_region_index_ + num_regs_in_large_region - 1, num_regions_),
+ num_regs_in_large_region,
+ bytes_allocated,
+ usable_size,
+ bytes_tl_bulk_allocated,
+ &next_region2);
if (region2 != nullptr) {
DCHECK_LT(0u, next_region2);
DCHECK_LE(next_region2, num_regions_);
@@ -302,7 +302,7 @@ inline mirror::Object* RegionSpace::AllocLarge(size_t num_bytes,
// Try to find a range of free regions within [0, num_regions_).
mirror::Object* region = AllocLargeInRange<kForEvac>(0,
num_regions_,
- num_regs,
+ num_regs_in_large_region,
bytes_allocated,
usable_size,
bytes_tl_bulk_allocated);
@@ -316,17 +316,21 @@ inline mirror::Object* RegionSpace::AllocLarge(size_t num_bytes,
template<bool kForEvac>
inline mirror::Object* RegionSpace::AllocLargeInRange(size_t begin,
size_t end,
- size_t num_regs,
+ size_t num_regs_in_large_region,
/* out */ size_t* bytes_allocated,
/* out */ size_t* usable_size,
/* out */ size_t* bytes_tl_bulk_allocated,
/* out */ size_t* next_region) {
+ DCHECK_LE(0u, begin);
+ DCHECK_LT(begin, end);
+ DCHECK_LE(end, num_regions_);
size_t left = begin;
- while (left + num_regs - 1 < end) {
+ while (left + num_regs_in_large_region - 1 < end) {
bool found = true;
size_t right = left;
- DCHECK_LT(right, left + num_regs) << "The inner loop should iterate at least once";
- while (right < left + num_regs) {
+ DCHECK_LT(right, left + num_regs_in_large_region)
+ << "The inner loop should iterate at least once";
+ while (right < left + num_regs_in_large_region) {
if (regions_[right].IsFree()) {
++right;
// Ensure `right` is not going beyond the past-the-end index of the region space.
@@ -338,7 +342,7 @@ inline mirror::Object* RegionSpace::AllocLargeInRange(size_t begin,
}
if (found) {
// `right` points to the one region past the last free region.
- DCHECK_EQ(left + num_regs, right);
+ DCHECK_EQ(left + num_regs_in_large_region, right);
Region* first_reg = &regions_[left];
DCHECK(first_reg->IsFree());
first_reg->UnfreeLarge(this, time_);
@@ -347,7 +351,7 @@ inline mirror::Object* RegionSpace::AllocLargeInRange(size_t begin,
} else {
++num_non_free_regions_;
}
- size_t allocated = num_regs * kRegionSize;
+ size_t allocated = num_regs_in_large_region * kRegionSize;
// We make 'top' all usable bytes, as the caller of this
// allocation may use all of 'usable_size' (see mirror::Array::Alloc).
first_reg->SetTop(first_reg->Begin() + allocated);
diff --git a/runtime/gc/space/region_space.cc b/runtime/gc/space/region_space.cc
index 0701330e81..0569092bcd 100644
--- a/runtime/gc/space/region_space.cc
+++ b/runtime/gc/space/region_space.cc
@@ -16,6 +16,7 @@
#include "bump_pointer_space-inl.h"
#include "bump_pointer_space.h"
+#include "base/dumpable.h"
#include "gc/accounting/read_barrier_table.h"
#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
@@ -42,6 +43,9 @@ static constexpr bool kPoisonDeadObjectsInUnevacuatedRegions = true;
// points to a valid, non-protected memory area.
static constexpr uint32_t kPoisonDeadObject = 0xBADDB01D; // "BADDROID"
+// Whether we check a region's live bytes count against the region bitmap.
+static constexpr bool kCheckLiveBytesAgainstRegionBitmap = kIsDebugBuild;
+
MemMap* RegionSpace::CreateMemMap(const std::string& name, size_t capacity,
uint8_t* requested_begin) {
CHECK_ALIGNED(capacity, kRegionSize);
@@ -316,6 +320,9 @@ void RegionSpace::ClearFromSpace(/* out */ uint64_t* cleared_bytes,
};
for (size_t i = 0; i < std::min(num_regions_, non_free_region_index_limit_); ++i) {
Region* r = &regions_[i];
+ if (kCheckLiveBytesAgainstRegionBitmap) {
+ CheckLiveBytesAgainstRegionBitmap(r);
+ }
if (r->IsInFromSpace()) {
*cleared_bytes += r->BytesAllocated();
*cleared_objects += r->ObjectsAllocated();
@@ -404,6 +411,42 @@ void RegionSpace::ClearFromSpace(/* out */ uint64_t* cleared_bytes,
num_evac_regions_ = 0;
}
+void RegionSpace::CheckLiveBytesAgainstRegionBitmap(Region* r) {
+ if (r->LiveBytes() == static_cast<size_t>(-1)) {
+ // Live bytes count is undefined for `r`; nothing to check here.
+ return;
+ }
+
+ // Functor walking the region space bitmap for the range corresponding
+ // to region `r` and calculating the sum of live bytes.
+ size_t live_bytes_recount = 0u;
+ auto recount_live_bytes =
+ [&r, &live_bytes_recount](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
+ DCHECK_ALIGNED(obj, kAlignment);
+ if (r->IsLarge()) {
+ // If `r` is a large region, then it contains at most one
+ // object, which must start at the beginning of the
+ // region. The live byte count in that case is equal to the
+ // allocated regions (large region + large tails regions).
+ DCHECK_EQ(reinterpret_cast<uint8_t*>(obj), r->Begin());
+ DCHECK_EQ(live_bytes_recount, 0u);
+ live_bytes_recount = r->Top() - r->Begin();
+ } else {
+ DCHECK(r->IsAllocated())
+ << "r->State()=" << r->State() << " r->LiveBytes()=" << r->LiveBytes();
+ size_t obj_size = obj->SizeOf<kDefaultVerifyFlags>();
+ size_t alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment);
+ live_bytes_recount += alloc_size;
+ }
+ };
+ // Visit live objects in `r` and recount the live bytes.
+ GetLiveBitmap()->VisitMarkedRange(reinterpret_cast<uintptr_t>(r->Begin()),
+ reinterpret_cast<uintptr_t>(r->Top()),
+ recount_live_bytes);
+ // Check that this recount matches the region's current live bytes count.
+ DCHECK_EQ(live_bytes_recount, r->LiveBytes());
+}
+
// Poison the memory area in range [`begin`, `end`) with value `kPoisonDeadObject`.
static void PoisonUnevacuatedRange(uint8_t* begin, uint8_t* end) {
static constexpr size_t kPoisonDeadObjectSize = sizeof(kPoisonDeadObject);
@@ -423,7 +466,8 @@ void RegionSpace::PoisonDeadObjectsInUnevacuatedRegion(Region* r) {
// The live byte count of `r` should be different from -1, as this
// region should neither be a newly allocated region nor an
// evacuated region.
- DCHECK_NE(r->LiveBytes(), static_cast<size_t>(-1));
+ DCHECK_NE(r->LiveBytes(), static_cast<size_t>(-1))
+ << "Unexpected live bytes count of -1 in " << Dumpable<Region>(*r);
// Past-the-end address of the previously visited (live) object (or
// the beginning of the region, if `maybe_poison` has not run yet).
diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h
index a12917140b..90f1f1dd2a 100644
--- a/runtime/gc/space/region_space.h
+++ b/runtime/gc/space/region_space.h
@@ -586,22 +586,27 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
Region* AllocateRegion(bool for_evac) REQUIRES(region_lock_);
// Scan region range [`begin`, `end`) in increasing order to try to
- // allocate a large region having a size of `num_regs` regions. If
- // there is no space in the region space to allocate this large
- // region, return null.
+ // allocate a large region having a size of `num_regs_in_large_region`
+ // regions. If there is no space in the region space to allocate this
+ // large region, return null.
//
// If argument `next_region` is not null, use `*next_region` to
// return the index to the region next to the allocated large region
// returned by this method.
template<bool kForEvac>
- mirror::Object* AllocLargeInRange(size_t num_regs,
- size_t begin,
+ mirror::Object* AllocLargeInRange(size_t begin,
size_t end,
+ size_t num_regs_in_large_region,
/* out */ size_t* bytes_allocated,
/* out */ size_t* usable_size,
/* out */ size_t* bytes_tl_bulk_allocated,
/* out */ size_t* next_region = nullptr) REQUIRES(region_lock_);
+ // Check that the value of `r->LiveBytes()` matches the number of
+ // (allocated) bytes used by live objects according to the live bits
+ // in the region space bitmap range corresponding to region `r`.
+ void CheckLiveBytesAgainstRegionBitmap(Region* r);
+
// Poison memory areas used by dead objects within unevacuated
// region `r`. This is meant to detect dangling references to dead
// objects earlier in debug mode.
diff --git a/runtime/generated/asm_support_gen.h b/runtime/generated/asm_support_gen.h
index 464c2b749f..a9230e0bee 100644
--- a/runtime/generated/asm_support_gen.h
+++ b/runtime/generated/asm_support_gen.h
@@ -80,9 +80,9 @@ DEFINE_CHECK_EQ(static_cast<int32_t>(STRING_DEX_CACHE_SIZE_MINUS_ONE), (static_c
DEFINE_CHECK_EQ(static_cast<int32_t>(STRING_DEX_CACHE_HASH_BITS), (static_cast<int32_t>(art::LeastSignificantBit(art::mirror::DexCache::kDexCacheStringCacheSize))))
#define STRING_DEX_CACHE_ELEMENT_SIZE 8
DEFINE_CHECK_EQ(static_cast<int32_t>(STRING_DEX_CACHE_ELEMENT_SIZE), (static_cast<int32_t>(sizeof(art::mirror::StringDexCachePair))))
-#define METHOD_DEX_CACHE_SIZE_MINUS_ONE 1023
+#define METHOD_DEX_CACHE_SIZE_MINUS_ONE 511
DEFINE_CHECK_EQ(static_cast<int32_t>(METHOD_DEX_CACHE_SIZE_MINUS_ONE), (static_cast<int32_t>(art::mirror::DexCache::kDexCacheMethodCacheSize - 1)))
-#define METHOD_DEX_CACHE_HASH_BITS 10
+#define METHOD_DEX_CACHE_HASH_BITS 9
DEFINE_CHECK_EQ(static_cast<int32_t>(METHOD_DEX_CACHE_HASH_BITS), (static_cast<int32_t>(art::LeastSignificantBit(art::mirror::DexCache::kDexCacheMethodCacheSize))))
#define CARD_TABLE_CARD_SHIFT 0xa
DEFINE_CHECK_EQ(static_cast<size_t>(CARD_TABLE_CARD_SHIFT), (static_cast<size_t>(art::gc::accounting::CardTable::kCardShift)))
diff --git a/runtime/hidden_api.cc b/runtime/hidden_api.cc
index e41d1d3eb9..5729800bb0 100644
--- a/runtime/hidden_api.cc
+++ b/runtime/hidden_api.cc
@@ -174,6 +174,10 @@ void MemberSignature::LogAccessToEventLog(AccessMethod access_method, Action act
if (action_taken == kDeny) {
log_maker.AddTaggedData(FIELD_HIDDEN_API_ACCESS_DENIED, 1);
}
+ const std::string& package_name = Runtime::Current()->GetProcessPackageName();
+ if (!package_name.empty()) {
+ log_maker.SetPackageName(package_name);
+ }
std::ostringstream signature_str;
Dump(signature_str);
log_maker.AddTaggedData(FIELD_HIDDEN_API_SIGNATURE, signature_str.str());
diff --git a/runtime/hprof/hprof.cc b/runtime/hprof/hprof.cc
index dc42cfa4fe..3f44928e3a 100644
--- a/runtime/hprof/hprof.cc
+++ b/runtime/hprof/hprof.cc
@@ -1590,7 +1590,7 @@ void Hprof::VisitRoot(mirror::Object* obj, const RootInfo& info) {
if (obj == nullptr) {
return;
}
- MarkRootObject(obj, 0, xlate[info.GetType()], info.GetThreadId());
+ MarkRootObject(obj, nullptr, xlate[info.GetType()], info.GetThreadId());
}
// If "direct_to_ddms" is true, the other arguments are ignored, and data is
diff --git a/runtime/image.cc b/runtime/image.cc
index 028c515c91..b7a872c821 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -26,7 +26,7 @@
namespace art {
const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' };
-const uint8_t ImageHeader::kImageVersion[] = { '0', '6', '3', '\0' }; // Image relocations.
+const uint8_t ImageHeader::kImageVersion[] = { '0', '6', '4', '\0' }; // Half DexCache F&M arrays.
ImageHeader::ImageHeader(uint32_t image_begin,
uint32_t image_size,
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
index 22a6e9d941..74aa787db7 100644
--- a/runtime/interpreter/unstarted_runtime.cc
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -1077,6 +1077,8 @@ void UnstartedRuntime::UnstartedThreadCurrentThread(
Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset ATTRIBUTE_UNUSED) {
if (CheckCallers(shadow_frame,
{ "void java.lang.Thread.init(java.lang.ThreadGroup, java.lang.Runnable, "
+ "java.lang.String, long, java.security.AccessControlContext)",
+ "void java.lang.Thread.init(java.lang.ThreadGroup, java.lang.Runnable, "
"java.lang.String, long)",
"void java.lang.Thread.<init>()",
"void java.util.logging.LogManager$Cleaner.<init>("
@@ -1111,6 +1113,8 @@ void UnstartedRuntime::UnstartedThreadGetNativeState(
{ "java.lang.Thread$State java.lang.Thread.getState()",
"java.lang.ThreadGroup java.lang.Thread.getThreadGroup()",
"void java.lang.Thread.init(java.lang.ThreadGroup, java.lang.Runnable, "
+ "java.lang.String, long, java.security.AccessControlContext)",
+ "void java.lang.Thread.init(java.lang.ThreadGroup, java.lang.Runnable, "
"java.lang.String, long)",
"void java.lang.Thread.<init>()",
"void java.util.logging.LogManager$Cleaner.<init>("
@@ -1965,7 +1969,7 @@ void UnstartedRuntime::Invoke(Thread* self, const CodeItemDataAccessor& accessor
const auto& iter = invoke_handlers_.find(name);
if (iter != invoke_handlers_.end()) {
// Clear out the result in case it's not zeroed out.
- result->SetL(0);
+ result->SetL(nullptr);
// Push the shadow frame. This is so the failing method can be seen in abort dumps.
self->PushShadowFrame(shadow_frame);
@@ -1986,7 +1990,7 @@ void UnstartedRuntime::Jni(Thread* self, ArtMethod* method, mirror::Object* rece
const auto& iter = jni_handlers_.find(name);
if (iter != jni_handlers_.end()) {
// Clear out the result in case it's not zeroed out.
- result->SetL(0);
+ result->SetL(nullptr);
(*iter->second)(self, method, receiver, args, result);
} else if (Runtime::Current()->IsActiveTransaction()) {
AbortTransactionF(self, "Attempt to invoke native method in non-started runtime: %s",
diff --git a/runtime/jdwp/jdwp_event.cc b/runtime/jdwp/jdwp_event.cc
index 9409b7661f..0353ea7462 100644
--- a/runtime/jdwp/jdwp_event.cc
+++ b/runtime/jdwp/jdwp_event.cc
@@ -1159,7 +1159,7 @@ void JdwpState::PostException(const EventLocation* pThrowLoc, mirror::Throwable*
}
basket.className = Dbg::GetClassName(basket.locationClass.Get());
basket.exceptionClass.Assign(exception_object->GetClass());
- basket.caught = (pCatchLoc->method != 0);
+ basket.caught = (pCatchLoc->method != nullptr);
basket.thisPtr.Assign(thisPtr);
/* don't try to post an exception caused by the debugger */
diff --git a/runtime/jni/check_jni.cc b/runtime/jni/check_jni.cc
index 66bd74b504..c5e8830d29 100644
--- a/runtime/jni/check_jni.cc
+++ b/runtime/jni/check_jni.cc
@@ -1462,7 +1462,7 @@ class ScopedCheck {
break;
}
}
- return 0;
+ return nullptr;
}
void AbortF(const char* fmt, ...) __attribute__((__format__(__printf__, 2, 3))) {
diff --git a/runtime/method_handles.cc b/runtime/method_handles.cc
index 01a32a2288..570fc48272 100644
--- a/runtime/method_handles.cc
+++ b/runtime/method_handles.cc
@@ -484,7 +484,7 @@ static inline bool MethodHandleInvokeMethod(ArtMethod* called_method,
first_dest_reg,
new_shadow_frame)) {
DCHECK(self->IsExceptionPending());
- result->SetL(0);
+ result->SetL(nullptr);
return false;
}
} else {
@@ -500,7 +500,7 @@ static inline bool MethodHandleInvokeMethod(ArtMethod* called_method,
operands,
new_shadow_frame)) {
DCHECK(self->IsExceptionPending());
- result->SetL(0);
+ result->SetL(nullptr);
return false;
}
}
diff --git a/runtime/mirror/dex_cache.h b/runtime/mirror/dex_cache.h
index ab5fb85dc0..87f4f0ab7b 100644
--- a/runtime/mirror/dex_cache.h
+++ b/runtime/mirror/dex_cache.h
@@ -157,12 +157,12 @@ class MANAGED DexCache FINAL : public Object {
"String dex cache size is not a power of 2.");
// Size of field dex cache. Needs to be a power of 2 for entrypoint assumptions to hold.
- static constexpr size_t kDexCacheFieldCacheSize = 1024;
+ static constexpr size_t kDexCacheFieldCacheSize = 512;
static_assert(IsPowerOfTwo(kDexCacheFieldCacheSize),
"Field dex cache size is not a power of 2.");
// Size of method dex cache. Needs to be a power of 2 for entrypoint assumptions to hold.
- static constexpr size_t kDexCacheMethodCacheSize = 1024;
+ static constexpr size_t kDexCacheMethodCacheSize = 512;
static_assert(IsPowerOfTwo(kDexCacheMethodCacheSize),
"Method dex cache size is not a power of 2.");
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index cdba6b204f..b598df3eba 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -233,13 +233,13 @@ static jobject DexFile_createCookieWithDirectBuffer(JNIEnv* env,
if (base_address == nullptr) {
ScopedObjectAccess soa(env);
ThrowWrappedIOException("dexFileBuffer not direct");
- return 0;
+ return nullptr;
}
std::unique_ptr<MemMap> dex_mem_map(AllocateDexMemoryMap(env, start, end));
if (dex_mem_map == nullptr) {
DCHECK(Thread::Current()->IsExceptionPending());
- return 0;
+ return nullptr;
}
size_t length = static_cast<size_t>(end - start);
@@ -255,7 +255,7 @@ static jobject DexFile_createCookieWithArray(JNIEnv* env,
std::unique_ptr<MemMap> dex_mem_map(AllocateDexMemoryMap(env, start, end));
if (dex_mem_map == nullptr) {
DCHECK(Thread::Current()->IsExceptionPending());
- return 0;
+ return nullptr;
}
auto destination = reinterpret_cast<jbyte*>(dex_mem_map.get()->Begin());
@@ -273,7 +273,7 @@ static jobject DexFile_openDexFileNative(JNIEnv* env,
jobjectArray dex_elements) {
ScopedUtfChars sourceName(env, javaSourceName);
if (sourceName.c_str() == nullptr) {
- return 0;
+ return nullptr;
}
Runtime* const runtime = Runtime::Current();
diff --git a/runtime/native/dalvik_system_VMDebug.cc b/runtime/native/dalvik_system_VMDebug.cc
index f1e267becc..7ac4086362 100644
--- a/runtime/native/dalvik_system_VMDebug.cc
+++ b/runtime/native/dalvik_system_VMDebug.cc
@@ -388,7 +388,7 @@ static jobjectArray VMDebug_getInstancesOfClasses(JNIEnv* env,
// as PSS, private/shared dirty/shared data are available via
// /proc/<pid>/smaps.
static void VMDebug_getHeapSpaceStats(JNIEnv* env, jclass, jlongArray data) {
- jlong* arr = reinterpret_cast<jlong*>(env->GetPrimitiveArrayCritical(data, 0));
+ jlong* arr = reinterpret_cast<jlong*>(env->GetPrimitiveArrayCritical(data, nullptr));
if (arr == nullptr || env->GetArrayLength(data) < 9) {
return;
}
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index 3227c69305..9b3fd16ac0 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -675,6 +675,13 @@ static void VMRuntime_setDedupeHiddenApiWarnings(JNIEnv* env ATTRIBUTE_UNUSED,
Runtime::Current()->SetDedupeHiddenApiWarnings(dedupe);
}
+static void VMRuntime_setProcessPackageName(JNIEnv* env,
+ jclass klass ATTRIBUTE_UNUSED,
+ jstring java_package_name) {
+ ScopedUtfChars package_name(env, java_package_name);
+ Runtime::Current()->SetProcessPackageName(package_name.c_str());
+}
+
static JNINativeMethod gMethods[] = {
FAST_NATIVE_METHOD(VMRuntime, addressOf, "(Ljava/lang/Object;)J"),
NATIVE_METHOD(VMRuntime, bootClassPath, "()Ljava/lang/String;"),
@@ -718,6 +725,7 @@ static JNINativeMethod gMethods[] = {
NATIVE_METHOD(VMRuntime, didPruneDalvikCache, "()Z"),
NATIVE_METHOD(VMRuntime, setSystemDaemonThreadPriority, "()V"),
NATIVE_METHOD(VMRuntime, setDedupeHiddenApiWarnings, "(Z)V"),
+ NATIVE_METHOD(VMRuntime, setProcessPackageName, "(Ljava/lang/String;)V"),
};
void register_dalvik_system_VMRuntime(JNIEnv* env) {
diff --git a/runtime/native/java_lang_reflect_Constructor.cc b/runtime/native/java_lang_reflect_Constructor.cc
index e54674f72b..4b4d6e332c 100644
--- a/runtime/native/java_lang_reflect_Constructor.cc
+++ b/runtime/native/java_lang_reflect_Constructor.cc
@@ -121,7 +121,7 @@ static jobject Constructor_newInstance0(JNIEnv* env, jobject javaMethod, jobject
static jobject Constructor_newInstanceFromSerialization(JNIEnv* env, jclass unused ATTRIBUTE_UNUSED,
jclass ctorClass, jclass allocClass) {
jmethodID ctor = env->GetMethodID(ctorClass, "<init>", "()V");
- DCHECK(ctor != NULL);
+ DCHECK(ctor != nullptr);
return env->NewObject(allocClass, ctor);
}
diff --git a/runtime/oat.h b/runtime/oat.h
index 69aacebe1c..037c8f9c1a 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -32,8 +32,8 @@ class InstructionSetFeatures;
class PACKED(4) OatHeader {
public:
static constexpr uint8_t kOatMagic[] = { 'o', 'a', 't', '\n' };
- // Last oat version changed reason: Move MethodInfo into CodeInfo.
- static constexpr uint8_t kOatVersion[] = { '1', '5', '8', '\0' };
+ // Last oat version changed reason: Add stack map fast path for GC.
+ static constexpr uint8_t kOatVersion[] = { '1', '6', '1', '\0' };
static constexpr const char* kImageLocationKey = "image-location";
static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline";
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index 5f87bf0f99..4ed26facf7 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -550,7 +550,7 @@ class OatDexFile FINAL {
const IndexBssMapping* const method_bss_mapping_ = nullptr;
const IndexBssMapping* const type_bss_mapping_ = nullptr;
const IndexBssMapping* const string_bss_mapping_ = nullptr;
- const uint32_t* const oat_class_offsets_pointer_ = 0u;
+ const uint32_t* const oat_class_offsets_pointer_ = nullptr;
TypeLookupTable lookup_table_;
const DexLayoutSections* const dex_layout_sections_ = nullptr;
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index 7383d477bb..a44e5a4b54 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -333,6 +333,9 @@ std::unique_ptr<RuntimeParser> ParsedOptions::MakeParser(bool ignore_unrecognize
.IntoKey(M::UseStderrLogger)
.Define("-Xonly-use-system-oat-files")
.IntoKey(M::OnlyUseSystemOatFiles)
+ .Define("-Xverifier-logging-threshold=_")
+ .WithType<unsigned int>()
+ .IntoKey(M::VerifierLoggingThreshold)
.Ignore({
"-ea", "-da", "-enableassertions", "-disableassertions", "--runtime-arg", "-esa",
"-dsa", "-enablesystemassertions", "-disablesystemassertions", "-Xrs", "-Xint:_",
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index a81c4d0518..facebda953 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -273,7 +273,8 @@ Runtime::Runtime()
pruned_dalvik_cache_(false),
// Initially assume we perceive jank in case the process state is never updated.
process_state_(kProcessStateJankPerceptible),
- zygote_no_threads_(false) {
+ zygote_no_threads_(false),
+ verifier_logging_threshold_ms_(100) {
static_assert(Runtime::kCalleeSaveSize ==
static_cast<uint32_t>(CalleeSaveType::kLastCalleeSaveType), "Unexpected size");
@@ -1438,6 +1439,8 @@ bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) {
}
}
+ verifier_logging_threshold_ms_ = runtime_options.GetOrDefault(Opt::VerifierLoggingThreshold);
+
std::string error_msg;
java_vm_ = JavaVMExt::Create(this, runtime_options, &error_msg);
if (java_vm_.get() == nullptr) {
diff --git a/runtime/runtime.h b/runtime/runtime.h
index f413733804..a98e8a81ed 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -579,6 +579,18 @@ class Runtime {
return hidden_api_access_event_log_rate_;
}
+ const std::string& GetProcessPackageName() const {
+ return process_package_name_;
+ }
+
+ void SetProcessPackageName(const char* package_name) {
+ if (package_name == nullptr) {
+ process_package_name_.clear();
+ } else {
+ process_package_name_ = package_name;
+ }
+ }
+
bool IsDexFileFallbackEnabled() const {
return allow_dex_file_fallback_;
}
@@ -767,6 +779,10 @@ class Runtime {
static constexpr int32_t kUnsetSdkVersion = 0u;
+ uint32_t GetVerifierLoggingThresholdMs() const {
+ return verifier_logging_threshold_ms_;
+ }
+
private:
static void InitPlatformSignalHandlers();
@@ -1027,10 +1043,13 @@ class Runtime {
// when there is a warning. This is only used for testing.
bool always_set_hidden_api_warning_flag_;
- // How often to log hidden API access to the event log. An integer between 0 (never)
- // and 0x10000 (always).
+ // How often to log hidden API access to the event log. An integer between 0
+ // (never) and 0x10000 (always).
uint32_t hidden_api_access_event_log_rate_;
+ // The package of the app running in this process.
+ std::string process_package_name_;
+
// Whether threads should dump their native stack on SIGQUIT.
bool dump_native_stack_on_sig_quit_;
@@ -1073,6 +1092,8 @@ class Runtime {
std::unique_ptr<MemMap> protected_fault_page_;
+ uint32_t verifier_logging_threshold_ms_;
+
DISALLOW_COPY_AND_ASSIGN(Runtime);
};
diff --git a/runtime/runtime_options.def b/runtime/runtime_options.def
index 3f9a3229ca..ef21f9f9e0 100644
--- a/runtime/runtime_options.def
+++ b/runtime/runtime_options.def
@@ -152,5 +152,6 @@ RUNTIME_OPTIONS_KEY (unsigned int, GlobalRefAllocStackTraceLimit, 0) //
RUNTIME_OPTIONS_KEY (Unit, UseStderrLogger)
RUNTIME_OPTIONS_KEY (Unit, OnlyUseSystemOatFiles)
+RUNTIME_OPTIONS_KEY (unsigned int, VerifierLoggingThreshold, 100)
#undef RUNTIME_OPTIONS_KEY
diff --git a/runtime/stack_map.cc b/runtime/stack_map.cc
index cd82284a9a..62dec15c57 100644
--- a/runtime/stack_map.cc
+++ b/runtime/stack_map.cc
@@ -31,82 +31,70 @@ CodeInfo::CodeInfo(const OatQuickMethodHeader* header, DecodeFlags flags)
: CodeInfo(header->GetOptimizedCodeInfoPtr(), flags) {
}
+// Returns true if the decoded table was deduped.
template<typename Accessor>
-ALWAYS_INLINE static void DecodeTable(BitTable<Accessor>& table,
- BitMemoryReader& reader,
- const uint8_t* data) {
+ALWAYS_INLINE static bool DecodeTable(BitTable<Accessor>& table, BitMemoryReader& reader) {
bool is_deduped = reader.ReadBit();
if (is_deduped) {
- // 'data' points to the start of the reader's data.
- uint32_t current_bit_offset = reader.GetBitOffset();
- uint32_t bit_offset_backwards = DecodeVarintBits(reader) - current_bit_offset;
- uint32_t byte_offset_backwards = BitsToBytesRoundUp(bit_offset_backwards);
- BitMemoryReader reader2(data - byte_offset_backwards,
- byte_offset_backwards * kBitsPerByte - bit_offset_backwards);
+ ssize_t bit_offset = reader.NumberOfReadBits() - reader.ReadVarint();
+ BitMemoryReader reader2(reader.data(), bit_offset); // The offset is negative.
table.Decode(reader2);
} else {
table.Decode(reader);
}
+ return is_deduped;
}
void CodeInfo::Decode(const uint8_t* data, DecodeFlags flags) {
- const uint8_t* begin = data;
- frame_size_in_bytes_ = DecodeUnsignedLeb128(&data);
- core_spill_mask_ = DecodeUnsignedLeb128(&data);
- fp_spill_mask_ = DecodeUnsignedLeb128(&data);
- number_of_dex_registers_ = DecodeUnsignedLeb128(&data);
- BitMemoryReader reader(data, /* bit_offset */ 0);
- DecodeTable(stack_maps_, reader, data);
- DecodeTable(inline_infos_, reader, data);
- DecodeTable(method_infos_, reader, data);
- if (flags & DecodeFlags::InlineInfoOnly) {
- return;
- }
- DecodeTable(register_masks_, reader, data);
- DecodeTable(stack_masks_, reader, data);
- DecodeTable(dex_register_masks_, reader, data);
- DecodeTable(dex_register_maps_, reader, data);
- DecodeTable(dex_register_catalog_, reader, data);
- size_in_bits_ = (data - begin) * kBitsPerByte + reader.GetBitOffset();
+ BitMemoryReader reader(data);
+ ForEachHeaderField([this, &reader](auto member_pointer) {
+ this->*member_pointer = reader.ReadVarint();
+ });
+ ForEachBitTableField([this, &reader](auto member_pointer) {
+ DecodeTable(this->*member_pointer, reader);
+ }, flags);
+ size_in_bits_ = reader.NumberOfReadBits();
}
-template<typename Accessor>
-ALWAYS_INLINE static void DedupeTable(BitMemoryWriter<std::vector<uint8_t>>& writer,
- BitMemoryReader& reader,
- CodeInfo::DedupeMap* dedupe_map) {
- bool is_deduped = reader.ReadBit();
- DCHECK(!is_deduped);
- BitTable<Accessor> bit_table(reader);
- BitMemoryRegion region = reader.Tail(bit_table.BitSize());
- auto it = dedupe_map->insert(std::make_pair(region, writer.GetBitOffset() + 1 /* dedupe bit */));
- if (it.second /* new bit table */ || region.size_in_bits() < 32) {
- writer.WriteBit(false); // Is not deduped.
- writer.WriteRegion(region);
- } else {
- writer.WriteBit(true); // Is deduped.
- EncodeVarintBits(writer, writer.GetBitOffset() - it.first->second);
+size_t CodeInfo::Deduper::Dedupe(const uint8_t* code_info_data) {
+ writer_.ByteAlign();
+ size_t deduped_offset = writer_.NumberOfWrittenBits() / kBitsPerByte;
+ BitMemoryReader reader(code_info_data);
+ CodeInfo code_info; // Temporary storage for decoded data.
+ ForEachHeaderField([this, &reader, &code_info](auto member_pointer) {
+ code_info.*member_pointer = reader.ReadVarint();
+ writer_.WriteVarint(code_info.*member_pointer);
+ });
+ ForEachBitTableField([this, &reader, &code_info](auto member_pointer) {
+ bool is_deduped = reader.ReadBit();
+ DCHECK(!is_deduped);
+ size_t bit_table_start = reader.NumberOfReadBits();
+ (code_info.*member_pointer).Decode(reader);
+ BitMemoryRegion region = reader.GetReadRegion().Subregion(bit_table_start);
+ auto it = dedupe_map_.insert(std::make_pair(region, /* placeholder */ 0));
+ if (it.second /* new bit table */ || region.size_in_bits() < 32) {
+ writer_.WriteBit(false); // Is not deduped.
+ it.first->second = writer_.NumberOfWrittenBits();
+ writer_.WriteRegion(region);
+ } else {
+ writer_.WriteBit(true); // Is deduped.
+ size_t bit_offset = writer_.NumberOfWrittenBits();
+ writer_.WriteVarint(bit_offset - it.first->second);
+ }
+ });
+
+ if (kIsDebugBuild) {
+ CodeInfo old_code_info(code_info_data);
+ CodeInfo new_code_info(writer_.data() + deduped_offset);
+ ForEachHeaderField([&old_code_info, &new_code_info](auto member_pointer) {
+ DCHECK_EQ(old_code_info.*member_pointer, new_code_info.*member_pointer);
+ });
+ ForEachBitTableField([&old_code_info, &new_code_info](auto member_pointer) {
+ DCHECK((old_code_info.*member_pointer).Equals(new_code_info.*member_pointer));
+ });
}
-}
-size_t CodeInfo::Dedupe(std::vector<uint8_t>* out, const uint8_t* in, DedupeMap* dedupe_map) {
- // Remember the current offset in the output buffer so that we can return it later.
- const size_t result = out->size();
- // Copy the header which encodes QuickMethodFrameInfo.
- EncodeUnsignedLeb128(out, DecodeUnsignedLeb128(&in));
- EncodeUnsignedLeb128(out, DecodeUnsignedLeb128(&in));
- EncodeUnsignedLeb128(out, DecodeUnsignedLeb128(&in));
- EncodeUnsignedLeb128(out, DecodeUnsignedLeb128(&in));
- BitMemoryReader reader(in, /* bit_offset */ 0);
- BitMemoryWriter<std::vector<uint8_t>> writer(out, /* bit_offset */ out->size() * kBitsPerByte);
- DedupeTable<StackMap>(writer, reader, dedupe_map);
- DedupeTable<InlineInfo>(writer, reader, dedupe_map);
- DedupeTable<MethodInfo>(writer, reader, dedupe_map);
- DedupeTable<RegisterMask>(writer, reader, dedupe_map);
- DedupeTable<MaskInfo>(writer, reader, dedupe_map);
- DedupeTable<MaskInfo>(writer, reader, dedupe_map);
- DedupeTable<DexRegisterMapInfo>(writer, reader, dedupe_map);
- DedupeTable<DexRegisterInfo>(writer, reader, dedupe_map);
- return result;
+ return deduped_offset;
}
BitTable<StackMap>::const_iterator CodeInfo::BinarySearchNativePc(uint32_t packed_pc) const {
@@ -193,33 +181,32 @@ void CodeInfo::DecodeDexRegisterMap(uint32_t stack_map_index,
}
}
-template<typename Accessor>
-static void AddTableSizeStats(const char* table_name,
- const BitTable<Accessor>& table,
- /*out*/ Stats* parent) {
- Stats* table_stats = parent->Child(table_name);
- table_stats->AddBits(table.BitSize());
- table_stats->Child("Header")->AddBits(table.HeaderBitSize());
- const char* const* column_names = GetBitTableColumnNames<Accessor>();
- for (size_t c = 0; c < table.NumColumns(); c++) {
- if (table.NumColumnBits(c) > 0) {
- Stats* column_stats = table_stats->Child(column_names[c]);
- column_stats->AddBits(table.NumRows() * table.NumColumnBits(c), table.NumRows());
+// Decode the CodeInfo while collecting size statistics.
+void CodeInfo::CollectSizeStats(const uint8_t* code_info_data, /*out*/ Stats* parent) {
+ Stats* codeinfo_stats = parent->Child("CodeInfo");
+ BitMemoryReader reader(code_info_data);
+ ForEachHeaderField([&reader](auto) { reader.ReadVarint(); });
+ codeinfo_stats->Child("Header")->AddBits(reader.NumberOfReadBits());
+ CodeInfo code_info; // Temporary storage for decoded tables.
+ ForEachBitTableField([codeinfo_stats, &reader, &code_info](auto member_pointer) {
+ auto& table = code_info.*member_pointer;
+ size_t bit_offset = reader.NumberOfReadBits();
+ bool deduped = DecodeTable(table, reader);
+ if (deduped) {
+ codeinfo_stats->Child("DedupeOffset")->AddBits(reader.NumberOfReadBits() - bit_offset);
+ } else {
+ Stats* table_stats = codeinfo_stats->Child(table.GetName());
+ table_stats->AddBits(reader.NumberOfReadBits() - bit_offset);
+ const char* const* column_names = table.GetColumnNames();
+ for (size_t c = 0; c < table.NumColumns(); c++) {
+ if (table.NumColumnBits(c) > 0) {
+ Stats* column_stats = table_stats->Child(column_names[c]);
+ column_stats->AddBits(table.NumRows() * table.NumColumnBits(c), table.NumRows());
+ }
+ }
}
- }
-}
-
-void CodeInfo::AddSizeStats(/*out*/ Stats* parent) const {
- Stats* stats = parent->Child("CodeInfo");
- stats->AddBytes(Size());
- AddTableSizeStats<StackMap>("StackMaps", stack_maps_, stats);
- AddTableSizeStats<InlineInfo>("InlineInfos", inline_infos_, stats);
- AddTableSizeStats<MethodInfo>("MethodInfo", method_infos_, stats);
- AddTableSizeStats<RegisterMask>("RegisterMasks", register_masks_, stats);
- AddTableSizeStats<MaskInfo>("StackMasks", stack_masks_, stats);
- AddTableSizeStats<MaskInfo>("DexRegisterMasks", dex_register_masks_, stats);
- AddTableSizeStats<DexRegisterMapInfo>("DexRegisterMaps", dex_register_maps_, stats);
- AddTableSizeStats<DexRegisterInfo>("DexRegisterCatalog", dex_register_catalog_, stats);
+ });
+ codeinfo_stats->AddBytes(BitsToBytesRoundUp(reader.NumberOfReadBits()));
}
void DexRegisterMap::Dump(VariableIndentationOutputStream* vios) const {
@@ -235,56 +222,49 @@ void DexRegisterMap::Dump(VariableIndentationOutputStream* vios) const {
}
}
-template<typename Accessor>
-static void DumpTable(VariableIndentationOutputStream* vios,
- const char* table_name,
- const BitTable<Accessor>& table,
- bool verbose,
- bool is_mask = false) {
- if (table.NumRows() != 0) {
- vios->Stream() << table_name << " BitSize=" << table.BitSize();
- vios->Stream() << " Rows=" << table.NumRows() << " Bits={";
- const char* const* column_names = GetBitTableColumnNames<Accessor>();
- for (size_t c = 0; c < table.NumColumns(); c++) {
- vios->Stream() << (c != 0 ? " " : "");
- vios->Stream() << column_names[c] << "=" << table.NumColumnBits(c);
- }
- vios->Stream() << "}\n";
- if (verbose) {
- ScopedIndentation indent1(vios);
- for (size_t r = 0; r < table.NumRows(); r++) {
- vios->Stream() << "[" << std::right << std::setw(3) << r << "]={";
- for (size_t c = 0; c < table.NumColumns(); c++) {
- vios->Stream() << (c != 0 ? " " : "");
- if (is_mask) {
- BitMemoryRegion bits = table.GetBitMemoryRegion(r, c);
- for (size_t b = 0, e = bits.size_in_bits(); b < e; b++) {
- vios->Stream() << bits.LoadBit(e - b - 1);
- }
- } else {
- vios->Stream() << std::right << std::setw(8) << static_cast<int32_t>(table.Get(r, c));
- }
- }
- vios->Stream() << "}\n";
- }
- }
- }
-}
-
void CodeInfo::Dump(VariableIndentationOutputStream* vios,
uint32_t code_offset,
bool verbose,
InstructionSet instruction_set) const {
- vios->Stream() << "CodeInfo\n";
+ vios->Stream() << "CodeInfo BitSize=" << size_in_bits_
+ << " FrameSize:" << packed_frame_size_ * kStackAlignment
+ << " CoreSpillMask:" << std::hex << core_spill_mask_
+ << " FpSpillMask:" << std::hex << fp_spill_mask_
+ << " NumberOfDexRegisters:" << std::dec << number_of_dex_registers_
+ << "\n";
ScopedIndentation indent1(vios);
- DumpTable<StackMap>(vios, "StackMaps", stack_maps_, verbose);
- DumpTable<InlineInfo>(vios, "InlineInfos", inline_infos_, verbose);
- DumpTable<MethodInfo>(vios, "MethodInfo", method_infos_, verbose);
- DumpTable<RegisterMask>(vios, "RegisterMasks", register_masks_, verbose);
- DumpTable<MaskInfo>(vios, "StackMasks", stack_masks_, verbose, true /* is_mask */);
- DumpTable<MaskInfo>(vios, "DexRegisterMasks", dex_register_masks_, verbose, true /* is_mask */);
- DumpTable<DexRegisterMapInfo>(vios, "DexRegisterMaps", dex_register_maps_, verbose);
- DumpTable<DexRegisterInfo>(vios, "DexRegisterCatalog", dex_register_catalog_, verbose);
+ ForEachBitTableField([this, &vios, verbose](auto member_pointer) {
+ const auto& table = this->*member_pointer;
+ if (table.NumRows() != 0) {
+ vios->Stream() << table.GetName() << " BitSize=" << table.DataBitSize();
+ vios->Stream() << " Rows=" << table.NumRows() << " Bits={";
+ const char* const* column_names = table.GetColumnNames();
+ for (size_t c = 0; c < table.NumColumns(); c++) {
+ vios->Stream() << (c != 0 ? " " : "");
+ vios->Stream() << column_names[c] << "=" << table.NumColumnBits(c);
+ }
+ vios->Stream() << "}\n";
+ if (verbose) {
+ ScopedIndentation indent1(vios);
+ for (size_t r = 0; r < table.NumRows(); r++) {
+ vios->Stream() << "[" << std::right << std::setw(3) << r << "]={";
+ for (size_t c = 0; c < table.NumColumns(); c++) {
+ vios->Stream() << (c != 0 ? " " : "");
+ if (&table == static_cast<const void*>(&stack_masks_) ||
+ &table == static_cast<const void*>(&dex_register_masks_)) {
+ BitMemoryRegion bits = table.GetBitMemoryRegion(r, c);
+ for (size_t b = 0, e = bits.size_in_bits(); b < e; b++) {
+ vios->Stream() << bits.LoadBit(e - b - 1);
+ }
+ } else {
+ vios->Stream() << std::right << std::setw(8) << static_cast<int32_t>(table.Get(r, c));
+ }
+ }
+ vios->Stream() << "}\n";
+ }
+ }
+ }
+ });
// Display stack maps along with (live) Dex register maps.
if (verbose) {
diff --git a/runtime/stack_map.h b/runtime/stack_map.h
index 8bfae7c39f..5f44286089 100644
--- a/runtime/stack_map.h
+++ b/runtime/stack_map.h
@@ -128,7 +128,7 @@ class StackMap : public BitTableAccessor<8> {
OSR = 1,
Debug = 2,
};
- BIT_TABLE_HEADER()
+ BIT_TABLE_HEADER(StackMap)
BIT_TABLE_COLUMN(0, Kind)
BIT_TABLE_COLUMN(1, PackedNativePc)
BIT_TABLE_COLUMN(2, DexPc)
@@ -174,7 +174,7 @@ class StackMap : public BitTableAccessor<8> {
*/
class InlineInfo : public BitTableAccessor<6> {
public:
- BIT_TABLE_HEADER()
+ BIT_TABLE_HEADER(InlineInfo)
BIT_TABLE_COLUMN(0, IsLast) // Determines if there are further rows for further depths.
BIT_TABLE_COLUMN(1, DexPc)
BIT_TABLE_COLUMN(2, MethodInfoIndex)
@@ -201,21 +201,27 @@ class InlineInfo : public BitTableAccessor<6> {
const StackMap& stack_map) const;
};
-class MaskInfo : public BitTableAccessor<1> {
+class StackMask : public BitTableAccessor<1> {
public:
- BIT_TABLE_HEADER()
+ BIT_TABLE_HEADER(StackMask)
+ BIT_TABLE_COLUMN(0, Mask)
+};
+
+class DexRegisterMask : public BitTableAccessor<1> {
+ public:
+ BIT_TABLE_HEADER(DexRegisterMask)
BIT_TABLE_COLUMN(0, Mask)
};
class DexRegisterMapInfo : public BitTableAccessor<1> {
public:
- BIT_TABLE_HEADER()
+ BIT_TABLE_HEADER(DexRegisterMapInfo)
BIT_TABLE_COLUMN(0, CatalogueIndex)
};
class DexRegisterInfo : public BitTableAccessor<2> {
public:
- BIT_TABLE_HEADER()
+ BIT_TABLE_HEADER(DexRegisterInfo)
BIT_TABLE_COLUMN(0, Kind)
BIT_TABLE_COLUMN(1, PackedValue)
@@ -246,7 +252,7 @@ class DexRegisterInfo : public BitTableAccessor<2> {
// therefore it is worth encoding the mask as value+shift.
class RegisterMask : public BitTableAccessor<2> {
public:
- BIT_TABLE_HEADER()
+ BIT_TABLE_HEADER(RegisterMask)
BIT_TABLE_COLUMN(0, Value)
BIT_TABLE_COLUMN(1, Shift)
@@ -259,7 +265,7 @@ class RegisterMask : public BitTableAccessor<2> {
// Separating them greatly improves dedup efficiency of the other tables.
class MethodInfo : public BitTableAccessor<1> {
public:
- BIT_TABLE_HEADER()
+ BIT_TABLE_HEADER(MethodInfo)
BIT_TABLE_COLUMN(0, MethodIndex)
};
@@ -269,22 +275,37 @@ class MethodInfo : public BitTableAccessor<1> {
*/
class CodeInfo {
public:
+ class Deduper {
+ public:
+ explicit Deduper(std::vector<uint8_t>* output) : writer_(output) {
+ DCHECK_EQ(output->size(), 0u);
+ }
+
+ // Copy CodeInfo into output while de-duplicating the internal bit tables.
+ // It returns the byte offset of the copied CodeInfo within the output.
+ size_t Dedupe(const uint8_t* code_info);
+
+ private:
+ BitMemoryWriter<std::vector<uint8_t>> writer_;
+
+ // Deduplicate at BitTable level. The value is bit offset within the output.
+ std::map<BitMemoryRegion, uint32_t, BitMemoryRegion::Less> dedupe_map_;
+ };
+
enum DecodeFlags {
- Default = 0,
+ AllTables = 0,
+ // Limits the decoding only to the data needed by GC.
+ GcMasksOnly = 1,
// Limits the decoding only to the main stack map table and inline info table.
// This is sufficient for many use cases and makes the header decoding faster.
- InlineInfoOnly = 1,
+ InlineInfoOnly = 2,
};
- explicit CodeInfo(const uint8_t* data, DecodeFlags flags = DecodeFlags::Default) {
+ explicit CodeInfo(const uint8_t* data, DecodeFlags flags = AllTables) {
Decode(reinterpret_cast<const uint8_t*>(data), flags);
}
- explicit CodeInfo(MemoryRegion region) : CodeInfo(region.begin()) {
- DCHECK_EQ(Size(), region.size());
- }
-
- explicit CodeInfo(const OatQuickMethodHeader* header, DecodeFlags flags = DecodeFlags::Default);
+ explicit CodeInfo(const OatQuickMethodHeader* header, DecodeFlags flags = AllTables);
size_t Size() const {
return BitsToBytesRoundUp(size_in_bits_);
@@ -413,26 +434,19 @@ class CodeInfo {
InstructionSet instruction_set) const;
// Accumulate code info size statistics into the given Stats tree.
- void AddSizeStats(/*out*/ Stats* parent) const;
+ static void CollectSizeStats(const uint8_t* code_info, /*out*/ Stats* parent);
ALWAYS_INLINE static QuickMethodFrameInfo DecodeFrameInfo(const uint8_t* data) {
+ BitMemoryReader reader(data);
return QuickMethodFrameInfo(
- DecodeUnsignedLeb128(&data),
- DecodeUnsignedLeb128(&data),
- DecodeUnsignedLeb128(&data));
+ reader.ReadVarint() * kStackAlignment, // Decode packed_frame_size_ and unpack.
+ reader.ReadVarint(), // core_spill_mask_.
+ reader.ReadVarint()); // fp_spill_mask_.
}
- typedef std::map<BitMemoryRegion, uint32_t, BitMemoryRegion::Less> DedupeMap;
-
- // Copy CodeInfo data while de-duplicating the internal bit tables.
- // The 'out' vector must be reused between Dedupe calls (it does not have to be empty).
- // The 'dedupe_map' stores the bit offsets of bit tables within the 'out' vector.
- // It returns the byte offset of the copied CodeInfo within the 'out' vector.
- static size_t Dedupe(std::vector<uint8_t>* out,
- const uint8_t* in,
- /*inout*/ DedupeMap* dedupe_map);
-
private:
+ CodeInfo() {}
+
// Returns lower bound (fist stack map which has pc greater or equal than the desired one).
// It ignores catch stack maps at the end (it is the same as if they had maximum pc value).
BitTable<StackMap>::const_iterator BinarySearchNativePc(uint32_t packed_pc) const;
@@ -444,16 +458,44 @@ class CodeInfo {
void Decode(const uint8_t* data, DecodeFlags flags);
- uint32_t frame_size_in_bytes_;
+ // Invokes the callback with member pointer of each header field.
+ template<typename Callback>
+ ALWAYS_INLINE static void ForEachHeaderField(Callback callback) {
+ callback(&CodeInfo::packed_frame_size_);
+ callback(&CodeInfo::core_spill_mask_);
+ callback(&CodeInfo::fp_spill_mask_);
+ callback(&CodeInfo::number_of_dex_registers_);
+ }
+
+ // Invokes the callback with member pointer of each BitTable field.
+ template<typename Callback>
+ ALWAYS_INLINE static void ForEachBitTableField(Callback callback, DecodeFlags flags = AllTables) {
+ callback(&CodeInfo::stack_maps_);
+ callback(&CodeInfo::register_masks_);
+ callback(&CodeInfo::stack_masks_);
+ if (flags & DecodeFlags::GcMasksOnly) {
+ return;
+ }
+ callback(&CodeInfo::inline_infos_);
+ callback(&CodeInfo::method_infos_);
+ if (flags & DecodeFlags::InlineInfoOnly) {
+ return;
+ }
+ callback(&CodeInfo::dex_register_masks_);
+ callback(&CodeInfo::dex_register_maps_);
+ callback(&CodeInfo::dex_register_catalog_);
+ }
+
+ uint32_t packed_frame_size_; // Frame size in kStackAlignment units.
uint32_t core_spill_mask_;
uint32_t fp_spill_mask_;
uint32_t number_of_dex_registers_;
BitTable<StackMap> stack_maps_;
+ BitTable<RegisterMask> register_masks_;
+ BitTable<StackMask> stack_masks_;
BitTable<InlineInfo> inline_infos_;
BitTable<MethodInfo> method_infos_;
- BitTable<RegisterMask> register_masks_;
- BitTable<MaskInfo> stack_masks_;
- BitTable<MaskInfo> dex_register_masks_;
+ BitTable<DexRegisterMask> dex_register_masks_;
BitTable<DexRegisterMapInfo> dex_register_maps_;
BitTable<DexRegisterInfo> dex_register_catalog_;
uint32_t size_in_bits_ = 0;
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 18dc0e8c45..df7f19d118 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -3223,8 +3223,8 @@ void Thread::DumpThreadOffset(std::ostream& os, uint32_t offset) {
QUICK_ENTRY_POINT_INFO(pInstanceofNonTrivial)
QUICK_ENTRY_POINT_INFO(pCheckInstanceOf)
QUICK_ENTRY_POINT_INFO(pInitializeStaticStorage)
- QUICK_ENTRY_POINT_INFO(pInitializeTypeAndVerifyAccess)
- QUICK_ENTRY_POINT_INFO(pInitializeType)
+ QUICK_ENTRY_POINT_INFO(pResolveTypeAndVerifyAccess)
+ QUICK_ENTRY_POINT_INFO(pResolveType)
QUICK_ENTRY_POINT_INFO(pResolveString)
QUICK_ENTRY_POINT_INFO(pSet8Instance)
QUICK_ENTRY_POINT_INFO(pSet8Static)
@@ -3604,7 +3604,9 @@ class ReferenceMapVisitor : public StackVisitor {
StackReference<mirror::Object>* vreg_base = reinterpret_cast<StackReference<mirror::Object>*>(
reinterpret_cast<uintptr_t>(cur_quick_frame));
uintptr_t native_pc_offset = method_header->NativeQuickPcOffset(GetCurrentQuickFramePc());
- CodeInfo code_info(method_header);
+ CodeInfo code_info(method_header, kPrecise
+ ? CodeInfo::DecodeFlags::AllTables // We will need dex register maps.
+ : CodeInfo::DecodeFlags::GcMasksOnly);
StackMap map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
DCHECK(map.IsValid());
@@ -3621,7 +3623,7 @@ class ReferenceMapVisitor : public StackVisitor {
vreg_info.VisitStack(&new_ref, i, this);
if (ref != new_ref) {
ref_addr->Assign(new_ref);
- }
+ }
}
}
}
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 01b6bf8f15..a1b8938eaa 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -439,7 +439,7 @@ MethodVerifier::FailureData MethodVerifier::VerifyMethod(Thread* self,
}
if (kTimeVerifyMethod) {
uint64_t duration_ns = NanoTime() - start_ns;
- if (duration_ns > MsToNs(100)) {
+ if (duration_ns > MsToNs(Runtime::Current()->GetVerifierLoggingThresholdMs())) {
LOG(WARNING) << "Verification of " << dex_file->PrettyMethod(method_idx)
<< " took " << PrettyDuration(duration_ns)
<< (IsLargeMethod(verifier.CodeItem()) ? " (large method)" : "");
diff --git a/test/008-exceptions/multidex.jpp b/test/008-exceptions/multidex.jpp
deleted file mode 100644
index a3746f5149..0000000000
--- a/test/008-exceptions/multidex.jpp
+++ /dev/null
@@ -1,27 +0,0 @@
-BadError:
- @@com.android.jack.annotations.ForceInMainDex
- class BadError
-BadInit:
- @@com.android.jack.annotations.ForceInMainDex
- class BadInit
-BadErrorNoStringInit:
- @@com.android.jack.annotations.ForceInMainDex
- class BadErrorNoStringInit
-BadInitNoStringInit:
- @@com.android.jack.annotations.ForceInMainDex
- class BadInitNoStringInit
-BadSuperClass:
- @@com.android.jack.annotations.ForceInMainDex
- class BadSuperClass
-DerivedFromBadSuperClass:
- @@com.android.jack.annotations.ForceInMainDex
- class DerivedFromBadSuperClass
-Main:
- @@com.android.jack.annotations.ForceInMainDex
- class Main
-MultiDexBadInit:
- @@com.android.jack.annotations.ForceInMainDex
- class MultiDexBadInit
-MultiDexBadInitWrapper1:
- @@com.android.jack.annotations.ForceInMainDex
- class MultiDexBadInitWrapper1
diff --git a/test/160-read-barrier-stress/src/Main.java b/test/160-read-barrier-stress/src/Main.java
index 7e130cef94..04880c5c4b 100644
--- a/test/160-read-barrier-stress/src/Main.java
+++ b/test/160-read-barrier-stress/src/Main.java
@@ -14,8 +14,11 @@
* limitations under the License.
*/
+import java.lang.reflect.Field;
+import sun.misc.Unsafe;
+
public class Main {
- public static void main(String[] args) {
+ public static void main(String[] args) throws Exception {
// Initialize local variables for comparison.
Object f0000 = manyFields.testField0000;
Object f1024 = manyFields.testField1024;
@@ -44,6 +47,16 @@ public class Main {
testString2 = "testString2";
testString3 = "testString3";
}
+ // Initialize Unsafe.
+ Unsafe unsafe = getUnsafe();
+ long f0000Offset =
+ unsafe.objectFieldOffset(ManyFields.class.getField("testField0000"));
+ long f1024Offset =
+ unsafe.objectFieldOffset(ManyFields.class.getField("testField1024"));
+ long f4444Offset =
+ unsafe.objectFieldOffset(ManyFields.class.getField("testField4444"));
+ long f4999Offset =
+ unsafe.objectFieldOffset(ManyFields.class.getField("testField4999"));
// Continually check reads from `manyFields` and `largeArray` while allocating
// over 64MiB memory (with heap size limited to 16MiB), ensuring we run GC and
@@ -74,7 +87,17 @@ public class Main {
assertSameObject(testString2, "testString2");
assertSameObject(testString3, "testString3");
}
- // TODO: Stress GC roots (const-string/const-class, kBssEntry/kReferrersClass).
+ // TODO: Stress GC roots (const-class, kBssEntry/kReferrersClass).
+ // Test Unsafe.getObject().
+ assertSameObject(f0000, unsafe.getObject(mf, f0000Offset));
+ assertSameObject(f1024, unsafe.getObject(mf, f1024Offset));
+ assertSameObject(f4444, unsafe.getObject(mf, f4444Offset));
+ assertSameObject(f4999, unsafe.getObject(mf, f4999Offset));
+ // Test Unsafe.compareAndSwapObject().
+ assertEqual(false, unsafe.compareAndSwapObject(mf, f4444Offset, f1024, f4444));
+ assertEqual(true, unsafe.compareAndSwapObject(mf, f1024Offset, f1024, f4444));
+ assertEqual(true, unsafe.compareAndSwapObject(mf, f1024Offset, f4444, f1024));
+ assertEqual(false, unsafe.compareAndSwapObject(mf, f1024Offset, f4444, f1024));
}
}
@@ -84,6 +107,19 @@ public class Main {
}
}
+ public static void assertEqual(boolean expected, boolean actual) {
+ if (expected != actual) {
+ throw new Error("Expected " + expected +", got " + actual);
+ }
+ }
+
+ public static Unsafe getUnsafe() throws Exception {
+ Class<?> unsafeClass = Class.forName("sun.misc.Unsafe");
+ Field f = unsafeClass.getDeclaredField("theUnsafe");
+ f.setAccessible(true);
+ return (Unsafe) f.get(null);
+ }
+
public static void allocateAtLeast1KiB() {
// Give GC more work by allocating Object arrays.
memory[allocationIndex] = new Object[1024 / 4];
diff --git a/test/162-method-resolution/multidex.jpp b/test/162-method-resolution/multidex.jpp
deleted file mode 100644
index 5722f7f1d8..0000000000
--- a/test/162-method-resolution/multidex.jpp
+++ /dev/null
@@ -1,127 +0,0 @@
-Test1Base:
- @@com.android.jack.annotations.ForceInMainDex
- class Test1Base
-Test1Derived:
- @@com.android.jack.annotations.ForceInMainDex
- class Test1Derived
-Test1User2:
- @@com.android.jack.annotations.ForceInMainDex
- class Test1User2
-
-Test2Base:
- @@com.android.jack.annotations.ForceInMainDex
- class Test2Base
-Test2Derived:
- @@com.android.jack.annotations.ForceInMainDex
- class Test2Derived
-Test2Interface:
- @@com.android.jack.annotations.ForceInMainDex
- class Test2Interface
-Test2User:
- @@com.android.jack.annotations.ForceInMainDex
- class Test2User
-Test2User2:
- @@com.android.jack.annotations.ForceInMainDex
- class Test2User2
-
-Test3Base:
- @@com.android.jack.annotations.ForceInMainDex
- class Test3Base
-Test3Derived:
- @@com.android.jack.annotations.ForceInMainDex
- class Test3Derived
-Test3Interface:
- @@com.android.jack.annotations.ForceInMainDex
- class Test3Interface
-
-Test4Interface:
- @@com.android.jack.annotations.ForceInMainDex
- class Test4Interface
-Test4Derived:
- @@com.android.jack.annotations.ForceInMainDex
- class Test4Derived
-Test4User:
- @@com.android.jack.annotations.ForceInMainDex
- class Test4User
-
-Test5Interface:
- @@com.android.jack.annotations.ForceInMainDex
- class Test5Interface
-Test5Base:
- @@com.android.jack.annotations.ForceInMainDex
- class Test5Base
-Test5Derived:
- @@com.android.jack.annotations.ForceInMainDex
- class Test5Derived
-Test5User:
- @@com.android.jack.annotations.ForceInMainDex
- class Test5User
-Test5User2:
- @@com.android.jack.annotations.ForceInMainDex
- class Test5User2
-
-Test6Interface:
- @@com.android.jack.annotations.ForceInMainDex
- class Test6Interface
-Test6Derived:
- @@com.android.jack.annotations.ForceInMainDex
- class Test6Derived
-Test6User:
- @@com.android.jack.annotations.ForceInMainDex
- class Test6User
-Test6User2:
- @@com.android.jack.annotations.ForceInMainDex
- class Test6User2
-
-Test7Base:
- @@com.android.jack.annotations.ForceInMainDex
- class Test7Base
-Test7Interface:
- @@com.android.jack.annotations.ForceInMainDex
- class Test7Interface
-Test7Derived:
- @@com.android.jack.annotations.ForceInMainDex
- class Test7Derived
-Test7User:
- @@com.android.jack.annotations.ForceInMainDex
- class Test7User
-
-Test8Base:
- @@com.android.jack.annotations.ForceInMainDex
- class Test8Base
-Test8Derived:
- @@com.android.jack.annotations.ForceInMainDex
- class Test8Derived
-Test8User:
- @@com.android.jack.annotations.ForceInMainDex
- class Test8User
-Test8User2:
- @@com.android.jack.annotations.ForceInMainDex
- class Test8User2
-
-Test9Base:
- @@com.android.jack.annotations.ForceInMainDex
- class Test9Base
-Test9Derived:
- @@com.android.jack.annotations.ForceInMainDex
- class Test9Derived
-Test9User:
- @@com.android.jack.annotations.ForceInMainDex
- class Test9User
-Test9User2:
- @@com.android.jack.annotations.ForceInMainDex
- class Test9User2
-
-Test10Base:
- @@com.android.jack.annotations.ForceInMainDex
- class Test10Base
-Test10Interface:
- @@com.android.jack.annotations.ForceInMainDex
- class Test10Interface
-Test10User:
- @@com.android.jack.annotations.ForceInMainDex
- class Test10User
-
-Main:
- @@com.android.jack.annotations.ForceInMainDex
- class Main
diff --git a/test/462-checker-inlining-dex-files/multidex.jpp b/test/462-checker-inlining-dex-files/multidex.jpp
deleted file mode 100644
index ae554566cb..0000000000
--- a/test/462-checker-inlining-dex-files/multidex.jpp
+++ /dev/null
@@ -1,8 +0,0 @@
-Main:
- @@com.android.jack.annotations.ForceInMainDex
- class Main
-
-AAA:
- @@com.android.jack.annotations.ForceInMainDex
- class AAA
-
diff --git a/test/556-invoke-super/multidex.jpp b/test/556-invoke-super/multidex.jpp
deleted file mode 100644
index fe018019e3..0000000000
--- a/test/556-invoke-super/multidex.jpp
+++ /dev/null
@@ -1,4 +0,0 @@
-Main:
- @@com.android.jack.annotations.ForceInMainDex
- class Main*
-
diff --git a/test/563-checker-fakestring/smali/TestCase.smali b/test/563-checker-fakestring/smali/TestCase.smali
index 9d10bd77dd..0fe39ee229 100644
--- a/test/563-checker-fakestring/smali/TestCase.smali
+++ b/test/563-checker-fakestring/smali/TestCase.smali
@@ -305,3 +305,35 @@
return-object v0
.end method
+
+## CHECK-START: java.lang.String TestCase.loopAndStringInitAndPhi(byte[], boolean) register (after)
+## CHECK: NewInstance
+## CHECK-NOT: NewInstance
+## CHECK-DAG: <<Invoke1:l\d+>> InvokeStaticOrDirect method_name:java.lang.String.<init>
+## CHECK-DAG: <<Invoke2:l\d+>> InvokeStaticOrDirect method_name:java.lang.String.<init>
+## CHECK-DAG: <<Phi:l\d+>> Phi [<<Invoke2>>,<<Invoke1>>]
+## CHECK-DAG: Return [<<Phi>>]
+.method public static loopAndStringInitAndPhi([BZ)Ljava/lang/String;
+ .registers 4
+
+ if-nez p1, :allocate_other
+ new-instance v0, Ljava/lang/String;
+
+ # Loop
+ :loop_header
+ if-eqz p1, :loop_exit
+ goto :loop_header
+
+ :loop_exit
+ const-string v1, "UTF8"
+ invoke-direct {v0, p0, v1}, Ljava/lang/String;-><init>([BLjava/lang/String;)V
+ goto : exit
+
+ :allocate_other
+ const-string v1, "UTF8"
+ new-instance v0, Ljava/lang/String;
+ invoke-direct {v0, p0, v1}, Ljava/lang/String;-><init>([BLjava/lang/String;)V
+ :exit
+ return-object v0
+
+.end method
diff --git a/test/563-checker-fakestring/src/Main.java b/test/563-checker-fakestring/src/Main.java
index 3639d59878..df9e9dc074 100644
--- a/test/563-checker-fakestring/src/Main.java
+++ b/test/563-checker-fakestring/src/Main.java
@@ -113,7 +113,6 @@ public class Main {
result = (String) m.invoke(null, new Object[] { testData, false });
assertEqual(testString, result);
}
-
{
Method m = c.getMethod(
"deoptimizeNewInstanceAfterLoop", int[].class, byte[].class, int.class);
@@ -127,6 +126,13 @@ public class Main {
}
}
}
+ {
+ Method m = c.getMethod("loopAndStringInitAndPhi", byte[].class, boolean.class);
+ String result = (String) m.invoke(null, new Object[] { testData, true });
+ assertEqual(testString, result);
+ result = (String) m.invoke(null, new Object[] { testData, false });
+ assertEqual(testString, result);
+ }
}
public static boolean doThrow = false;
diff --git a/test/569-checker-pattern-replacement/multidex.jpp b/test/569-checker-pattern-replacement/multidex.jpp
deleted file mode 100644
index cfc8ad1fc9..0000000000
--- a/test/569-checker-pattern-replacement/multidex.jpp
+++ /dev/null
@@ -1,8 +0,0 @@
-Main:
- @@com.android.jack.annotations.ForceInMainDex
- class Main
-
-BaseInMainDex:
- @@com.android.jack.annotations.ForceInMainDex
- class BaseInMainDex
-
diff --git a/test/616-cha-interface-default/multidex.jpp b/test/616-cha-interface-default/multidex.jpp
deleted file mode 100644
index b0d200ea38..0000000000
--- a/test/616-cha-interface-default/multidex.jpp
+++ /dev/null
@@ -1,3 +0,0 @@
-Main:
- @@com.android.jack.annotations.ForceInMainDex
- class Main
diff --git a/test/616-cha-proxy-method-inline/multidex.jpp b/test/616-cha-proxy-method-inline/multidex.jpp
deleted file mode 100644
index b0d200ea38..0000000000
--- a/test/616-cha-proxy-method-inline/multidex.jpp
+++ /dev/null
@@ -1,3 +0,0 @@
-Main:
- @@com.android.jack.annotations.ForceInMainDex
- class Main
diff --git a/test/626-const-class-linking/multidex.jpp b/test/626-const-class-linking/multidex.jpp
deleted file mode 100644
index c7a66488c0..0000000000
--- a/test/626-const-class-linking/multidex.jpp
+++ /dev/null
@@ -1,27 +0,0 @@
-ClassPair:
- @@com.android.jack.annotations.ForceInMainDex
- class ClassPair
-DefiningLoader:
- @@com.android.jack.annotations.ForceInMainDex
- class DefiningLoader
-DelegatingLoader:
- @@com.android.jack.annotations.ForceInMainDex
- class DelegatingLoader
-Helper1:
- @@com.android.jack.annotations.ForceInMainDex
- class Helper1
-Main:
- @@com.android.jack.annotations.ForceInMainDex
- class Main
-MisbehavingLoader:
- @@com.android.jack.annotations.ForceInMainDex
- class MisbehavingLoader
-RacyLoader:
- @@com.android.jack.annotations.ForceInMainDex
- class RacyLoader
-RacyMisbehavingHelper:
- @@com.android.jack.annotations.ForceInMainDex
- class RacyMisbehavingHelper
-RacyMisbehavingLoader:
- @@com.android.jack.annotations.ForceInMainDex
- class RacyMisbehavingLoader
diff --git a/test/635-checker-arm64-volatile-load-cc/src/Main.java b/test/635-checker-arm64-volatile-load-cc/src/Main.java
index 6a26e940ac..89fad4c202 100644
--- a/test/635-checker-arm64-volatile-load-cc/src/Main.java
+++ b/test/635-checker-arm64-volatile-load-cc/src/Main.java
@@ -255,9 +255,9 @@ public class Main {
/// CHECK-START-ARM64: void Main.testStaticVolatileFieldGetWithLargeOffset() disassembly (after)
/// CHECK: StaticFieldGet
- /// CHECK: mov x17, #<<Offset:0x[0-9a-f]{4}>>
- /// CHECK: add x16, {{x\d+}}, x17
- /// CHECK: ldar {{w\d+}}, [x16]
+ /// CHECK: mov <<Kind:x|w>><<Temp1:\d+>>, #<<Offset:0x[0-9a-f]{4}>>
+ /// CHECK: add <<Kind>><<Temp2:\d+>>, <<Kind>>{{\d+}}, <<Kind>><<Temp1>>
+ /// CHECK: ldar {{w\d+}}, [x<<Temp2>>]
static void testStaticVolatileFieldGetWithLargeOffset() {
// The offset of this static field cannot be encoded as an immediate on ARM64.
Object s = s999;
@@ -265,9 +265,9 @@ public class Main {
/// CHECK-START-ARM64: void Main.testInstanceVolatileFieldGetWithLargeOffset() disassembly (after)
/// CHECK: InstanceFieldGet
- /// CHECK: mov x17, #<<Offset:0x[0-9a-f]{4}>>
- /// CHECK: add x16, {{x\d+}}, x17
- /// CHECK: ldar {{w\d+}}, [x16]
+ /// CHECK: mov <<Kind:x|w>><<Temp1:\d+>>, #<<Offset:0x[0-9a-f]{4}>>
+ /// CHECK: add <<Kind>><<Temp2:\d+>>, <<Kind>>{{\d+}}, <<Kind>><<Temp1>>
+ /// CHECK: ldar {{w\d+}}, [x<<Temp2>>]
void testInstanceVolatileFieldGetWithLargeOffset() {
// The offset of this instance field cannot be encoded as an immediate on ARM64.
Object i = i1029;
diff --git a/test/638-checker-inline-caches/multidex.jpp b/test/638-checker-inline-caches/multidex.jpp
deleted file mode 100644
index 69a2cc1ff1..0000000000
--- a/test/638-checker-inline-caches/multidex.jpp
+++ /dev/null
@@ -1,12 +0,0 @@
-Main:
- @@com.android.jack.annotations.ForceInMainDex
- class Main
-Super:
- @@com.android.jack.annotations.ForceInMainDex
- class Super
-SubA:
- @@com.android.jack.annotations.ForceInMainDex
- class SubA
-SubB
- @@com.android.jack.annotations.ForceInMainDex
- class SubB
diff --git a/test/674-hiddenapi/hiddenapi.cc b/test/674-hiddenapi/hiddenapi.cc
index 04c3fbf03a..96754c3076 100644
--- a/test/674-hiddenapi/hiddenapi.cc
+++ b/test/674-hiddenapi/hiddenapi.cc
@@ -63,8 +63,8 @@ extern "C" JNIEXPORT void JNICALL Java_Main_appendToBootClassLoader(
static jobject NewInstance(JNIEnv* env, jclass klass) {
jmethodID constructor = env->GetMethodID(klass, "<init>", "()V");
- if (constructor == NULL) {
- return NULL;
+ if (constructor == nullptr) {
+ return nullptr;
}
return env->NewObject(klass, constructor);
}
@@ -74,7 +74,7 @@ extern "C" JNIEXPORT jboolean JNICALL Java_JNI_canDiscoverField(
ScopedUtfChars utf_name(env, name);
jfieldID field = is_static ? env->GetStaticFieldID(klass, utf_name.c_str(), "I")
: env->GetFieldID(klass, utf_name.c_str(), "I");
- if (field == NULL) {
+ if (field == nullptr) {
env->ExceptionClear();
return JNI_FALSE;
}
@@ -87,7 +87,7 @@ extern "C" JNIEXPORT jboolean JNICALL Java_JNI_canGetField(
ScopedUtfChars utf_name(env, name);
jfieldID field = is_static ? env->GetStaticFieldID(klass, utf_name.c_str(), "I")
: env->GetFieldID(klass, utf_name.c_str(), "I");
- if (field == NULL) {
+ if (field == nullptr) {
env->ExceptionClear();
return JNI_FALSE;
}
@@ -95,7 +95,7 @@ extern "C" JNIEXPORT jboolean JNICALL Java_JNI_canGetField(
env->GetStaticIntField(klass, field);
} else {
jobject obj = NewInstance(env, klass);
- if (obj == NULL) {
+ if (obj == nullptr) {
env->ExceptionDescribe();
env->ExceptionClear();
return JNI_FALSE;
@@ -117,7 +117,7 @@ extern "C" JNIEXPORT jboolean JNICALL Java_JNI_canSetField(
ScopedUtfChars utf_name(env, name);
jfieldID field = is_static ? env->GetStaticFieldID(klass, utf_name.c_str(), "I")
: env->GetFieldID(klass, utf_name.c_str(), "I");
- if (field == NULL) {
+ if (field == nullptr) {
env->ExceptionClear();
return JNI_FALSE;
}
@@ -125,7 +125,7 @@ extern "C" JNIEXPORT jboolean JNICALL Java_JNI_canSetField(
env->SetStaticIntField(klass, field, 42);
} else {
jobject obj = NewInstance(env, klass);
- if (obj == NULL) {
+ if (obj == nullptr) {
env->ExceptionDescribe();
env->ExceptionClear();
return JNI_FALSE;
@@ -147,7 +147,7 @@ extern "C" JNIEXPORT jboolean JNICALL Java_JNI_canDiscoverMethod(
ScopedUtfChars utf_name(env, name);
jmethodID method = is_static ? env->GetStaticMethodID(klass, utf_name.c_str(), "()I")
: env->GetMethodID(klass, utf_name.c_str(), "()I");
- if (method == NULL) {
+ if (method == nullptr) {
env->ExceptionClear();
return JNI_FALSE;
}
@@ -160,7 +160,7 @@ extern "C" JNIEXPORT jboolean JNICALL Java_JNI_canInvokeMethodA(
ScopedUtfChars utf_name(env, name);
jmethodID method = is_static ? env->GetStaticMethodID(klass, utf_name.c_str(), "()I")
: env->GetMethodID(klass, utf_name.c_str(), "()I");
- if (method == NULL) {
+ if (method == nullptr) {
env->ExceptionClear();
return JNI_FALSE;
}
@@ -169,7 +169,7 @@ extern "C" JNIEXPORT jboolean JNICALL Java_JNI_canInvokeMethodA(
env->CallStaticIntMethodA(klass, method, nullptr);
} else {
jobject obj = NewInstance(env, klass);
- if (obj == NULL) {
+ if (obj == nullptr) {
env->ExceptionDescribe();
env->ExceptionClear();
return JNI_FALSE;
@@ -191,7 +191,7 @@ extern "C" JNIEXPORT jboolean JNICALL Java_JNI_canInvokeMethodV(
ScopedUtfChars utf_name(env, name);
jmethodID method = is_static ? env->GetStaticMethodID(klass, utf_name.c_str(), "()I")
: env->GetMethodID(klass, utf_name.c_str(), "()I");
- if (method == NULL) {
+ if (method == nullptr) {
env->ExceptionClear();
return JNI_FALSE;
}
@@ -200,7 +200,7 @@ extern "C" JNIEXPORT jboolean JNICALL Java_JNI_canInvokeMethodV(
env->CallStaticIntMethod(klass, method);
} else {
jobject obj = NewInstance(env, klass);
- if (obj == NULL) {
+ if (obj == nullptr) {
env->ExceptionDescribe();
env->ExceptionClear();
return JNI_FALSE;
@@ -224,7 +224,7 @@ extern "C" JNIEXPORT jboolean JNICALL Java_JNI_canDiscoverConstructor(
JNIEnv* env, jclass, jclass klass, jstring args) {
ScopedUtfChars utf_args(env, args);
jmethodID constructor = env->GetMethodID(klass, "<init>", utf_args.c_str());
- if (constructor == NULL) {
+ if (constructor == nullptr) {
env->ExceptionClear();
return JNI_FALSE;
}
@@ -236,7 +236,7 @@ extern "C" JNIEXPORT jboolean JNICALL Java_JNI_canInvokeConstructorA(
JNIEnv* env, jclass, jclass klass, jstring args) {
ScopedUtfChars utf_args(env, args);
jmethodID constructor = env->GetMethodID(klass, "<init>", utf_args.c_str());
- if (constructor == NULL) {
+ if (constructor == nullptr) {
env->ExceptionClear();
return JNI_FALSE;
}
@@ -261,7 +261,7 @@ extern "C" JNIEXPORT jboolean JNICALL Java_JNI_canInvokeConstructorV(
JNIEnv* env, jclass, jclass klass, jstring args) {
ScopedUtfChars utf_args(env, args);
jmethodID constructor = env->GetMethodID(klass, "<init>", utf_args.c_str());
- if (constructor == NULL) {
+ if (constructor == nullptr) {
env->ExceptionClear();
return JNI_FALSE;
}
diff --git a/test/683-clinit-inline-static-invoke/expected.txt b/test/683-clinit-inline-static-invoke/expected.txt
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/683-clinit-inline-static-invoke/expected.txt
diff --git a/test/683-clinit-inline-static-invoke/info.txt b/test/683-clinit-inline-static-invoke/info.txt
new file mode 100644
index 0000000000..32e5cdcb99
--- /dev/null
+++ b/test/683-clinit-inline-static-invoke/info.txt
@@ -0,0 +1,3 @@
+Regression test for a bug where the class initialization check for an inlined
+call to a static method used a type index from the wrong dex file because the
+current dex file does not have a TypeId for it. This was likely to crash.
diff --git a/test/683-clinit-inline-static-invoke/src-multidex/MyTimeZone.java b/test/683-clinit-inline-static-invoke/src-multidex/MyTimeZone.java
new file mode 100644
index 0000000000..b74b310a45
--- /dev/null
+++ b/test/683-clinit-inline-static-invoke/src-multidex/MyTimeZone.java
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import android.icu.util.TimeZone;
+
+public abstract class MyTimeZone extends TimeZone {
+ // Reference to MyTimeZone.getDefaultTimeZoneType() shall resolve
+ // to TimeZone.getDefaultTimeZoneType() which should be easily inlined.
+}
diff --git a/test/683-clinit-inline-static-invoke/src/Main.java b/test/683-clinit-inline-static-invoke/src/Main.java
new file mode 100644
index 0000000000..b4ccfaa95b
--- /dev/null
+++ b/test/683-clinit-inline-static-invoke/src/Main.java
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public static void main(String[] args) {
+ // The following is a simple static field getter that can be inlined, referenced
+ // through a subclass with the declaring class having no TypeId in current DexFile.
+ // When we inline this getter, we're left with HLoadClass+HClinitCheck which cannot
+ // be merged back to the InvokeStaticOrDirect for implicit class init check.
+ // The declaring class is in the boot image, so the LoadClass can load it using the
+ // .data.bimg.rel.ro section. However, the ClinitCheck entrypoint was previously
+ // taking a type index of the declaring class and since we did not have a valid
+ // TypeId in the current DexFile, we erroneously provided the type index from the
+ // declaring DexFile and that caused a crash. This was fixed by changing the
+ // ClinitCheck entrypoint to take the Class reference from LoadClass.
+ int dummy = MyTimeZone.getDefaultTimeZoneType();
+ }
+}
diff --git a/test/913-heaps/expected.txt b/test/913-heaps/expected.txt
index 01d374bebd..065b854a6a 100644
--- a/test/913-heaps/expected.txt
+++ b/test/913-heaps/expected.txt
@@ -1,9 +1,9 @@
---
true true
-root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=136, length=-1]
+root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=132, length=-1]
root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestNonRoot,vreg=8,location= 31])--> 1@1000 [size=16, length=-1]
-root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=136, length=-1]
-root@root --(thread)--> 3000@0 [size=136, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=132, length=-1]
+root@root --(thread)--> 3000@0 [size=132, length=-1]
1001@0 --(superclass)--> 1000@0 [size=123456780000, length=-1]
1002@0 --(interface)--> 2001@0 [size=123456780004, length=-1]
1002@0 --(superclass)--> 1001@0 [size=123456780001, length=-1]
@@ -44,14 +44,14 @@ root@root --(thread)--> 3000@0 [size=136, length=-1]
---
root@root --(jni-global)--> 1@1000 [size=16, length=-1]
root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 1@1000 [size=16, length=-1]
-root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=136, length=-1]
+root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=132, length=-1]
root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=10,location= 8])--> 1@1000 [size=16, length=-1]
root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=5,location= 8])--> 1@1000 [size=16, length=-1]
root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=13,location= 20])--> 1@1000 [size=16, length=-1]
root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=4,location= 20])--> 1@1000 [size=16, length=-1]
-root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=136, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=132, length=-1]
root@root --(thread)--> 1@1000 [size=16, length=-1]
-root@root --(thread)--> 3000@0 [size=136, length=-1]
+root@root --(thread)--> 3000@0 [size=132, length=-1]
1001@0 --(superclass)--> 1000@0 [size=123456780005, length=-1]
1002@0 --(interface)--> 2001@0 [size=123456780009, length=-1]
1002@0 --(superclass)--> 1001@0 [size=123456780006, length=-1]
@@ -90,18 +90,18 @@ root@root --(thread)--> 3000@0 [size=136, length=-1]
5@1002 --(field@9)--> 6@1000 [size=16, length=-1]
6@1000 --(class)--> 1000@0 [size=123456780005, length=-1]
---
-root@root --(thread)--> 3000@0 [size=136, length=-1]
+root@root --(thread)--> 3000@0 [size=132, length=-1]
---
3@1001 --(class)--> 1001@0 [size=123456780011, length=-1]
---
-root@root --(thread)--> 3000@0 [size=136, length=-1]
+root@root --(thread)--> 3000@0 [size=132, length=-1]
---
3@1001 --(class)--> 1001@0 [size=123456780016, length=-1]
---
-root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=136, length=-1]
+root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=132, length=-1]
root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestNonRoot,vreg=8,location= 31])--> 1@1000 [size=16, length=-1]
-root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=136, length=-1]
-root@root --(thread)--> 3000@0 [size=136, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=132, length=-1]
+root@root --(thread)--> 3000@0 [size=132, length=-1]
---
1001@0 --(superclass)--> 1000@0 [size=123456780020, length=-1]
3@1001 --(class)--> 1001@0 [size=123456780021, length=-1]
@@ -110,14 +110,14 @@ root@root --(thread)--> 3000@0 [size=136, length=-1]
---
root@root --(jni-global)--> 1@1000 [size=16, length=-1]
root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 1@1000 [size=16, length=-1]
-root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=136, length=-1]
+root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=132, length=-1]
root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=10,location= 8])--> 1@1000 [size=16, length=-1]
root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=5,location= 8])--> 1@1000 [size=16, length=-1]
root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=13,location= 20])--> 1@1000 [size=16, length=-1]
root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=4,location= 20])--> 1@1000 [size=16, length=-1]
-root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=136, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=132, length=-1]
root@root --(thread)--> 1@1000 [size=16, length=-1]
-root@root --(thread)--> 3000@0 [size=136, length=-1]
+root@root --(thread)--> 3000@0 [size=132, length=-1]
---
1001@0 --(superclass)--> 1000@0 [size=123456780025, length=-1]
3@1001 --(class)--> 1001@0 [size=123456780026, length=-1]
@@ -198,10 +198,10 @@ root@root --(thread)--> 1@1000 [size=16, length=-1]
---
---
---- untagged objects
-root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=136, length=-1]
+root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=132, length=-1]
root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestNonRoot,vreg=8,location= 31])--> 1@1000 [size=16, length=-1]
-root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=136, length=-1]
-root@root --(thread)--> 3000@0 [size=136, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=132, length=-1]
+root@root --(thread)--> 3000@0 [size=132, length=-1]
1001@0 --(superclass)--> 1000@0 [size=123456780050, length=-1]
1002@0 --(interface)--> 2001@0 [size=123456780054, length=-1]
1002@0 --(superclass)--> 1001@0 [size=123456780051, length=-1]
@@ -242,14 +242,14 @@ root@root --(thread)--> 3000@0 [size=136, length=-1]
---
root@root --(jni-global)--> 1@1000 [size=16, length=-1]
root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 1@1000 [size=16, length=-1]
-root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=136, length=-1]
+root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=132, length=-1]
root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=10,location= 8])--> 1@1000 [size=16, length=-1]
root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=5,location= 8])--> 1@1000 [size=16, length=-1]
root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=13,location= 20])--> 1@1000 [size=16, length=-1]
root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=4,location= 20])--> 1@1000 [size=16, length=-1]
-root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=136, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=132, length=-1]
root@root --(thread)--> 1@1000 [size=16, length=-1]
-root@root --(thread)--> 3000@0 [size=136, length=-1]
+root@root --(thread)--> 3000@0 [size=132, length=-1]
1001@0 --(superclass)--> 1000@0 [size=123456780055, length=-1]
1002@0 --(interface)--> 2001@0 [size=123456780059, length=-1]
1002@0 --(superclass)--> 1001@0 [size=123456780056, length=-1]
@@ -289,9 +289,9 @@ root@root --(thread)--> 3000@0 [size=136, length=-1]
6@1000 --(class)--> 1000@0 [size=123456780055, length=-1]
---
---- tagged classes
-root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=136, length=-1]
-root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=136, length=-1]
-root@root --(thread)--> 3000@0 [size=136, length=-1]
+root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=132, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=132, length=-1]
+root@root --(thread)--> 3000@0 [size=132, length=-1]
1001@0 --(superclass)--> 1000@0 [size=123456780060, length=-1]
1002@0 --(interface)--> 2001@0 [size=123456780064, length=-1]
1002@0 --(superclass)--> 1001@0 [size=123456780061, length=-1]
@@ -316,9 +316,9 @@ root@root --(thread)--> 3000@0 [size=136, length=-1]
5@1002 --(field@8)--> 500@0 [size=20, length=2]
6@1000 --(class)--> 1000@0 [size=123456780060, length=-1]
---
-root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=136, length=-1]
-root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=136, length=-1]
-root@root --(thread)--> 3000@0 [size=136, length=-1]
+root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=132, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=132, length=-1]
+root@root --(thread)--> 3000@0 [size=132, length=-1]
1001@0 --(superclass)--> 1000@0 [size=123456780065, length=-1]
1002@0 --(interface)--> 2001@0 [size=123456780069, length=-1]
1002@0 --(superclass)--> 1001@0 [size=123456780066, length=-1]
diff --git a/test/knownfailures.json b/test/knownfailures.json
index 15c1198b8b..7322a35884 100644
--- a/test/knownfailures.json
+++ b/test/knownfailures.json
@@ -548,6 +548,18 @@
"bug": "b/33650497"
},
{
+ "tests": "1946-list-descriptors",
+ "description": "ASAN+interp-ac/switch interpreter means this is too slow to finish in the timeout",
+ "variant": "target & interp-ac",
+ "env_vars": {"SANITIZE_TARGET": "address"}
+ },
+ {
+ "tests": "1946-list-descriptors",
+ "description": "ASAN+interp-ac/switch interpreter means this is too slow to finish in the timeout",
+ "variant": "host & interp-ac",
+ "env_vars": {"SANITIZE_HOST": "address"}
+ },
+ {
"tests": "202-thread-oome",
"description": "ASAN aborts when large thread stacks are requested.",
"variant": "host",
@@ -666,6 +678,11 @@
"description": ["Requires zip, which isn't available on device"]
},
{
+ "tests": ["683-clinit-inline-static-invoke"],
+ "variant": "jvm",
+ "description": ["Uses android-specific boot image class."]
+ },
+ {
"tests": ["1941-dispose-stress", "522-checker-regression-monitor-exit"],
"variant": "jvm",
"bug": "b/73888836",
@@ -1016,15 +1033,15 @@
},
{
"tests": ["712-varhandle-invocations"],
- "variant": "interpreter & gcstress",
+ "variant": "gcstress",
"bug": "b/111630237",
"description": ["Test timing out under gcstress possibly due to slower unwinding by libbacktrace"]
},
{
- "tests": ["712-varhandle-invocations", "624-checker-stringops"],
+ "tests": ["624-checker-stringops"],
"variant": "optimizing & gcstress | speed-profile & gcstress",
"bug": "b/111545159",
- "description": ["These tests seem to expose some error with our gc when run in these configurations"]
+ "description": ["Seem to expose some error with our gc when run in these configurations"]
},
{
"tests": ["021-string2"],
diff --git a/tools/Android.mk b/tools/Android.mk
index 9ecf0cd7ed..e90f5f5b45 100644
--- a/tools/Android.mk
+++ b/tools/Android.mk
@@ -32,3 +32,5 @@ LOCAL_MODULE := art-script
LOCAL_SRC_FILES := art
LOCAL_MODULE_STEM := art
include $(BUILD_PREBUILT)
+
+include $(LOCAL_PATH)/class2greylist/test/Android.mk
diff --git a/tools/art b/tools/art
index 62df7eb328..aebf5a6778 100644
--- a/tools/art
+++ b/tools/art
@@ -359,7 +359,7 @@ if [ "$ANDROID_DATA" = "/data" ] || [ "$ANDROID_DATA" = "" ]; then
fi
if [ "$PERF" != "" ]; then
- LAUNCH_WRAPPER="perf record -g -o $ANDROID_DATA/perf.data -e cycles:u $LAUNCH_WRAPPER"
+ LAUNCH_WRAPPER="perf record -g --call-graph dwarf -F 10000 -o $ANDROID_DATA/perf.data -e cycles:u $LAUNCH_WRAPPER"
EXTRA_OPTIONS+=(-Xcompiler-option --generate-debug-info)
fi
diff --git a/tools/art_verifier/Android.bp b/tools/art_verifier/Android.bp
new file mode 100644
index 0000000000..afd52fbcaa
--- /dev/null
+++ b/tools/art_verifier/Android.bp
@@ -0,0 +1,48 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+art_cc_defaults {
+ name: "art_verifier-defaults",
+ defaults: ["art_defaults"],
+ host_supported: true,
+ srcs: [
+ "art_verifier.cc",
+ ],
+ header_libs: [
+ "art_cmdlineparser_headers",
+ ],
+ static_libs: art_static_dependencies + [
+ "libart",
+ "libartbase",
+ "libdexfile",
+ "libprofile",
+ ],
+ target: {
+ android: {
+ static_libs: [
+ "libtombstoned_client_static",
+ ],
+ },
+ darwin: {
+ enabled: false,
+ },
+ },
+}
+
+art_cc_binary {
+ name: "art_verifier",
+ defaults: ["art_verifier-defaults"],
+}
diff --git a/tools/art_verifier/art_verifier.cc b/tools/art_verifier/art_verifier.cc
new file mode 100644
index 0000000000..fc62410889
--- /dev/null
+++ b/tools/art_verifier/art_verifier.cc
@@ -0,0 +1,267 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <string>
+#include <vector>
+
+#include "android-base/logging.h"
+
+#include "base/logging.h"
+#include "base/os.h"
+#include "class_linker-inl.h"
+#include "dex/art_dex_file_loader.h"
+#include "dex/class_accessor-inl.h"
+#include "dex/dex_file-inl.h"
+#include "interpreter/unstarted_runtime.h"
+#include "mirror/class-inl.h"
+#include "mirror/dex_cache-inl.h"
+#include "runtime.h"
+#include "scoped_thread_state_change-inl.h"
+#include "verifier/method_verifier.h"
+#include "well_known_classes.h"
+
+#include <sys/stat.h>
+#include "cmdline.h"
+
+namespace art {
+
+namespace {
+
+bool LoadDexFile(const std::string& dex_filename,
+ std::vector<std::unique_ptr<const DexFile>>* dex_files) {
+ const ArtDexFileLoader dex_file_loader;
+ std::string error_msg;
+ if (!dex_file_loader.Open(dex_filename.c_str(),
+ dex_filename.c_str(),
+ /* verify */ true,
+ /* verify_checksum */ true,
+ &error_msg,
+ dex_files)) {
+ LOG(ERROR) << error_msg;
+ return false;
+ }
+ return true;
+}
+
+jobject Install(Runtime* runtime,
+ std::vector<std::unique_ptr<const DexFile>>& in,
+ std::vector<const DexFile*>* out)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ Thread* self = Thread::Current();
+ CHECK(self != nullptr);
+
+ // Need well-known-classes.
+ WellKnownClasses::Init(self->GetJniEnv());
+ // Need a class loader. Fake that we're a compiler.
+ // Note: this will run initializers through the unstarted runtime, so make sure it's
+ // initialized.
+ interpreter::UnstartedRuntime::Initialize();
+
+ for (std::unique_ptr<const DexFile>& dex_file : in) {
+ out->push_back(dex_file.release());
+ }
+
+ ClassLinker* class_linker = runtime->GetClassLinker();
+
+ jobject class_loader = class_linker->CreatePathClassLoader(self, *out);
+
+ // Need to register dex files to get a working dex cache.
+ for (const DexFile* dex_file : *out) {
+ ObjPtr<mirror::DexCache> dex_cache = class_linker->RegisterDexFile(
+ *dex_file, self->DecodeJObject(class_loader)->AsClassLoader());
+ CHECK(dex_cache != nullptr);
+ }
+
+ return class_loader;
+}
+
+struct MethodVerifierArgs : public CmdlineArgs {
+ protected:
+ using Base = CmdlineArgs;
+
+ virtual ParseStatus ParseCustom(const StringPiece& option,
+ std::string* error_msg) OVERRIDE {
+ {
+ ParseStatus base_parse = Base::ParseCustom(option, error_msg);
+ if (base_parse != kParseUnknownArgument) {
+ return base_parse;
+ }
+ }
+
+ if (option.starts_with("--dex-file=")) {
+ dex_filename_ = option.substr(strlen("--dex-file=")).data();
+ } else if (option == "--dex-file-verifier") {
+ dex_file_verifier_ = true;
+ } else if (option == "--verbose") {
+ method_verifier_verbose_ = true;
+ } else if (option == "--verbose-debug") {
+ method_verifier_verbose_debug_ = true;
+ } else if (option.starts_with("--repetitions=")) {
+ char* end;
+ repetitions_ = strtoul(option.substr(strlen("--repetitions=")).data(), &end, 10);
+ } else {
+ return kParseUnknownArgument;
+ }
+
+ return kParseOk;
+ }
+
+ virtual ParseStatus ParseChecks(std::string* error_msg) OVERRIDE {
+ // Perform the parent checks.
+ ParseStatus parent_checks = Base::ParseChecks(error_msg);
+ if (parent_checks != kParseOk) {
+ return parent_checks;
+ }
+
+ // Perform our own checks.
+ if (dex_filename_ == nullptr) {
+ *error_msg = "--dex-filename not set";
+ return kParseError;
+ }
+
+ return kParseOk;
+ }
+
+ virtual std::string GetUsage() const {
+ std::string usage;
+
+ usage +=
+ "Usage: method_verifier_cmd [options] ...\n"
+ // Dex file is required.
+ " --dex-file=<file.dex>: specifies an input dex file.\n"
+ " Example: --dex-file=app.apk\n"
+ " --dex-file-verifier: only run dex file verifier.\n"
+ " --verbose: use verbose verifier mode.\n"
+ " --verbose-debug: use verbose verifier debug mode.\n"
+ " --repetitions=<count>: repeat the verification count times.\n"
+ "\n";
+
+ usage += Base::GetUsage();
+
+ return usage;
+ }
+
+ public:
+ const char* dex_filename_ = nullptr;
+
+ bool dex_file_verifier_ = false;
+
+ bool method_verifier_verbose_ = false;
+ bool method_verifier_verbose_debug_ = false;
+
+ size_t repetitions_ = 0u;
+};
+
+struct MethodVerifierMain : public CmdlineMain<MethodVerifierArgs> {
+ bool NeedsRuntime() OVERRIDE {
+ return true;
+ }
+
+ bool ExecuteWithoutRuntime() OVERRIDE {
+ LOG(FATAL) << "Unreachable";
+ UNREACHABLE();
+ }
+
+ bool ExecuteWithRuntime(Runtime* runtime) OVERRIDE {
+ CHECK(args_ != nullptr);
+
+ const size_t dex_reps = args_->dex_file_verifier_
+ // If we're focused on the dex file verifier, use the
+ // repetitions parameter.
+ ? std::max(static_cast<size_t>(1u), args_->repetitions_)
+ // Otherwise just load the dex files once.
+ : 1;
+
+ std::vector<std::unique_ptr<const DexFile>> unique_dex_files;
+ for (size_t i = 0; i != dex_reps; ++i) {
+ if (args_->dex_file_verifier_ && args_->repetitions_ != 0) {
+ LOG(INFO) << "Repetition " << (i + 1);
+ }
+ unique_dex_files.clear();
+ if (!LoadDexFile(args_->dex_filename_, &unique_dex_files)) {
+ return false;
+ }
+ }
+ if (args_->dex_file_verifier_) {
+ // We're done here.
+ return true;
+ }
+
+ ScopedObjectAccess soa(Thread::Current());
+ std::vector<const DexFile*> dex_files;
+ jobject class_loader = Install(runtime, unique_dex_files, &dex_files);
+ CHECK(class_loader != nullptr);
+
+ StackHandleScope<2> scope(soa.Self());
+ Handle<mirror::ClassLoader> h_loader = scope.NewHandle(
+ soa.Decode<mirror::ClassLoader>(class_loader));
+ MutableHandle<mirror::Class> h_klass(scope.NewHandle<mirror::Class>(nullptr));
+
+ if (args_->method_verifier_verbose_) {
+ gLogVerbosity.verifier = true;
+ }
+ if (args_->method_verifier_verbose_debug_) {
+ gLogVerbosity.verifier_debug = true;
+ }
+
+ const size_t verifier_reps = std::max(static_cast<size_t>(1u), args_->repetitions_);
+
+ ClassLinker* class_linker = runtime->GetClassLinker();
+ for (size_t i = 0; i != verifier_reps; ++i) {
+ if (args_->repetitions_ != 0) {
+ LOG(INFO) << "Repetition " << (i + 1);
+ }
+ for (const DexFile* dex_file : dex_files) {
+ for (ClassAccessor accessor : dex_file->GetClasses()) {
+ const char* descriptor = accessor.GetDescriptor();
+ h_klass.Assign(class_linker->FindClass(soa.Self(), descriptor, h_loader));
+ if (h_klass == nullptr || h_klass->IsErroneous()) {
+ if (args_->repetitions_ == 0) {
+ LOG(ERROR) << "Warning: could not load " << descriptor;
+ }
+ soa.Self()->ClearException();
+ continue;
+ }
+ std::string error_msg;
+ verifier::FailureKind res =
+ verifier::MethodVerifier::VerifyClass(soa.Self(),
+ h_klass.Get(),
+ runtime->GetCompilerCallbacks(),
+ true,
+ verifier::HardFailLogMode::kLogWarning,
+ &error_msg);
+ if (args_->repetitions_ == 0) {
+ LOG(INFO) << descriptor << ": " << res << " " << error_msg;
+ }
+ }
+ }
+ }
+
+ return true;
+ }
+};
+
+} // namespace
+
+} // namespace art
+
+int main(int argc, char** argv) {
+ // Output all logging to stderr.
+ android::base::SetLogger(android::base::StderrLogger);
+
+ art::MethodVerifierMain main;
+ return main.Main(argc, argv);
+}
diff --git a/tools/class2greylist/Android.bp b/tools/class2greylist/Android.bp
index 7b1233bb85..1e3cdff59c 100644
--- a/tools/class2greylist/Android.bp
+++ b/tools/class2greylist/Android.bp
@@ -20,6 +20,7 @@ java_library_host {
static_libs: [
"commons-cli-1.2",
"apache-bcel",
+ "guava",
],
}
diff --git a/tools/class2greylist/src/com/android/class2greylist/AnnotationVisitor.java b/tools/class2greylist/src/com/android/class2greylist/AnnotationVisitor.java
index 66857525aa..c5c8ef02d7 100644
--- a/tools/class2greylist/src/com/android/class2greylist/AnnotationVisitor.java
+++ b/tools/class2greylist/src/com/android/class2greylist/AnnotationVisitor.java
@@ -16,6 +16,8 @@
package com.android.class2greylist;
+import com.google.common.annotations.VisibleForTesting;
+
import org.apache.bcel.Const;
import org.apache.bcel.classfile.AnnotationEntry;
import org.apache.bcel.classfile.DescendingVisitor;
@@ -27,6 +29,8 @@ import org.apache.bcel.classfile.JavaClass;
import org.apache.bcel.classfile.Method;
import java.util.Locale;
+import java.util.Set;
+import java.util.function.Predicate;
/**
* Visits a JavaClass instance and pulls out all members annotated with a
@@ -44,13 +48,46 @@ public class AnnotationVisitor extends EmptyVisitor {
private final JavaClass mClass;
private final String mAnnotationType;
+ private final Predicate<Member> mMemberFilter;
private final Status mStatus;
private final DescendingVisitor mDescendingVisitor;
- public AnnotationVisitor(JavaClass clazz, String annotation, Status d) {
+ /**
+ * Represents a member of a class file (a field or method).
+ */
+ @VisibleForTesting
+ public static class Member {
+
+ /**
+ * Signature of this member.
+ */
+ public final String signature;
+ /**
+ * Indicates if this is a synthetic bridge method.
+ */
+ public final boolean bridge;
+
+ public Member(String signature, boolean bridge) {
+ this.signature = signature;
+ this.bridge = bridge;
+ }
+ }
+
+ public AnnotationVisitor(
+ JavaClass clazz, String annotation, Set<String> publicApis, Status status) {
+ this(clazz,
+ annotation,
+ member -> !(member.bridge && publicApis.contains(member.signature)),
+ status);
+ }
+
+ @VisibleForTesting
+ public AnnotationVisitor(
+ JavaClass clazz, String annotation, Predicate<Member> memberFilter, Status status) {
mClass = clazz;
mAnnotationType = annotation;
- mStatus = d;
+ mMemberFilter = memberFilter;
+ mStatus = status;
mDescendingVisitor = new DescendingVisitor(clazz, this);
}
@@ -81,14 +118,19 @@ public class AnnotationVisitor extends EmptyVisitor {
mStatus.debug("Visit member %s : %s", member.getName(), member.getSignature());
for (AnnotationEntry a : member.getAnnotationEntries()) {
if (mAnnotationType.equals(a.getAnnotationType())) {
- mStatus.debug("Method has annotation %s", mAnnotationType);
+ mStatus.debug("Member has annotation %s", mAnnotationType);
+ boolean bridge = (member.getAccessFlags() & Const.ACC_BRIDGE) != 0;
+ if (bridge) {
+ mStatus.debug("Member is a bridge", mAnnotationType);
+ }
String signature = String.format(Locale.US, signatureFormatString,
getClassDescriptor(definingClass), member.getName(), member.getSignature());
for (ElementValuePair property : a.getElementValuePairs()) {
switch (property.getNameString()) {
case EXPECTED_SIGNATURE:
String expected = property.getValue().stringifyValue();
- if (!signature.equals(expected)) {
+ // Don't enforce for bridge methods; they're generated so won't match.
+ if (!bridge && !signature.equals(expected)) {
error(definingClass, member,
"Expected signature does not match generated:\n"
+ "Expected: %s\n"
@@ -97,7 +139,9 @@ public class AnnotationVisitor extends EmptyVisitor {
break;
}
}
- mStatus.greylistEntry(signature);
+ if (mMemberFilter.test(new Member(signature, bridge))) {
+ mStatus.greylistEntry(signature);
+ }
}
}
}
diff --git a/tools/class2greylist/src/com/android/class2greylist/Class2Greylist.java b/tools/class2greylist/src/com/android/class2greylist/Class2Greylist.java
index 3e9e320b5b..abc9421e65 100644
--- a/tools/class2greylist/src/com/android/class2greylist/Class2Greylist.java
+++ b/tools/class2greylist/src/com/android/class2greylist/Class2Greylist.java
@@ -16,6 +16,9 @@
package com.android.class2greylist;
+import com.google.common.collect.Sets;
+import com.google.common.io.Files;
+
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.GnuParser;
@@ -23,9 +26,12 @@ import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.OptionBuilder;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
-import org.apache.commons.cli.PatternOptionBuilder;
+import java.io.File;
import java.io.IOException;
+import java.nio.charset.Charset;
+import java.util.Collections;
+import java.util.Set;
/**
* Build time tool for extracting a list of members from jar files that have the @UsedByApps
@@ -38,6 +44,11 @@ public class Class2Greylist {
public static void main(String[] args) {
Options options = new Options();
options.addOption(OptionBuilder
+ .withLongOpt("public-api-list")
+ .hasArgs(1)
+ .withDescription("Public API list file. Used to de-dupe bridge methods.")
+ .create("p"));
+ options.addOption(OptionBuilder
.withLongOpt("debug")
.hasArgs(0)
.withDescription("Enable debug")
@@ -61,6 +72,7 @@ public class Class2Greylist {
if (cmd.hasOption('h')) {
help(options);
}
+ String publicApiFilename = cmd.getOptionValue('p', null);
String[] jarFiles = cmd.getArgs();
if (jarFiles.length == 0) {
@@ -70,12 +82,26 @@ public class Class2Greylist {
Status status = new Status(cmd.hasOption('d'));
+ Set<String> publicApis;
+ if (publicApiFilename != null) {
+ try {
+ publicApis = Sets.newHashSet(
+ Files.readLines(new File(publicApiFilename), Charset.forName("UTF-8")));
+ } catch (IOException e) {
+ status.error(e);
+ System.exit(1);
+ return;
+ }
+ } else {
+ publicApis = Collections.emptySet();
+ }
+
for (String jarFile : jarFiles) {
status.debug("Processing jar file %s", jarFile);
try {
JarReader reader = new JarReader(status, jarFile);
- reader.stream().forEach(clazz -> new AnnotationVisitor(
- clazz, ANNOTATION_TYPE, status).visit());
+ reader.stream().forEach(clazz -> new AnnotationVisitor(clazz, ANNOTATION_TYPE,
+ publicApis, status).visit());
reader.close();
} catch (IOException e) {
status.error(e);
diff --git a/tools/class2greylist/test/src/com/android/javac/AnnotationVisitorTest.java b/tools/class2greylist/test/src/com/android/javac/AnnotationVisitorTest.java
index 2d9721803c..ff9c265a25 100644
--- a/tools/class2greylist/test/src/com/android/javac/AnnotationVisitorTest.java
+++ b/tools/class2greylist/test/src/com/android/javac/AnnotationVisitorTest.java
@@ -19,34 +19,42 @@ package com.android.javac;
import static com.google.common.truth.Truth.assertThat;
import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
-import static org.mockito.MockitoAnnotations.initMocks;
+import static org.mockito.Mockito.withSettings;
-import com.android.class2greylist.Status;
import com.android.class2greylist.AnnotationVisitor;
+import com.android.class2greylist.Status;
import com.google.common.base.Joiner;
+import com.google.common.collect.Sets;
import org.junit.Before;
+import org.junit.Rule;
import org.junit.Test;
+import org.junit.rules.TestName;
import org.mockito.ArgumentCaptor;
-import org.mockito.Mock;
import java.io.IOException;
+import java.util.Set;
public class AnnotationVisitorTest {
private static final String ANNOTATION = "Lannotation/Anno;";
+ @Rule
+ public TestName mTestName = new TestName();
+
private Javac mJavac;
- @Mock
private Status mStatus;
@Before
public void setup() throws IOException {
- initMocks(this);
+ System.out.println(String.format("\n============== STARTING TEST: %s ==============\n",
+ mTestName.getMethodName()));
+ mStatus = mock(Status.class, withSettings().verboseLogging());
mJavac = new Javac();
mJavac.addSource("annotation.Anno", Joiner.on('\n').join(
"package annotation;",
@@ -75,7 +83,7 @@ public class AnnotationVisitorTest {
"}"));
assertThat(mJavac.compile()).isTrue();
- new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), ANNOTATION, mStatus)
+ new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), ANNOTATION, x -> true, mStatus)
.visit();
assertNoErrors();
@@ -95,7 +103,7 @@ public class AnnotationVisitorTest {
"}"));
assertThat(mJavac.compile()).isTrue();
- new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), ANNOTATION, mStatus)
+ new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), ANNOTATION, x -> true, mStatus)
.visit();
assertNoErrors();
@@ -115,7 +123,7 @@ public class AnnotationVisitorTest {
"}"));
assertThat(mJavac.compile()).isTrue();
- new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), ANNOTATION, mStatus)
+ new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), ANNOTATION, x -> true, mStatus)
.visit();
assertNoErrors();
@@ -135,7 +143,7 @@ public class AnnotationVisitorTest {
"}"));
assertThat(mJavac.compile()).isTrue();
- new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), ANNOTATION, mStatus)
+ new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), ANNOTATION, x -> true, mStatus)
.visit();
assertNoErrors();
@@ -155,7 +163,7 @@ public class AnnotationVisitorTest {
"}"));
assertThat(mJavac.compile()).isTrue();
- new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), ANNOTATION, mStatus)
+ new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), ANNOTATION, x -> true, mStatus)
.visit();
verify(mStatus, times(1)).error(any(String.class));
@@ -174,7 +182,7 @@ public class AnnotationVisitorTest {
"}"));
assertThat(mJavac.compile()).isTrue();
- new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class$Inner"), ANNOTATION,
+ new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class$Inner"), ANNOTATION, x -> true,
mStatus).visit();
assertNoErrors();
@@ -192,11 +200,165 @@ public class AnnotationVisitorTest {
"}"));
assertThat(mJavac.compile()).isTrue();
- new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), ANNOTATION, mStatus)
+ new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), ANNOTATION, x -> true, mStatus)
.visit();
assertNoErrors();
verify(mStatus, never()).greylistEntry(any(String.class));
}
+ @Test
+ public void testMethodArgGenerics() throws IOException {
+ mJavac.addSource("a.b.Class", Joiner.on('\n').join(
+ "package a.b;",
+ "import annotation.Anno;",
+ "public class Class<T extends String> {",
+ " @Anno(expectedSignature=\"La/b/Class;->method(Ljava/lang/String;)V\")",
+ " public void method(T arg) {}",
+ "}"));
+ assertThat(mJavac.compile()).isTrue();
+
+ new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), ANNOTATION, x -> true, mStatus)
+ .visit();
+
+ assertNoErrors();
+ ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
+ verify(mStatus, times(1)).greylistEntry(greylist.capture());
+ assertThat(greylist.getValue()).isEqualTo("La/b/Class;->method(Ljava/lang/String;)V");
+ }
+
+ @Test
+ public void testOverrideMethodWithBridge() throws IOException {
+ mJavac.addSource("a.b.Base", Joiner.on('\n').join(
+ "package a.b;",
+ "abstract class Base<T> {",
+ " protected abstract void method(T arg);",
+ "}"));
+
+ mJavac.addSource("a.b.Class", Joiner.on('\n').join(
+ "package a.b;",
+ "import annotation.Anno;",
+ "public class Class<T extends String> extends Base<T> {",
+ " @Override",
+ " @Anno(expectedSignature=\"La/b/Class;->method(Ljava/lang/String;)V\")",
+ " public void method(T arg) {}",
+ "}"));
+ assertThat(mJavac.compile()).isTrue();
+
+ new AnnotationVisitor(mJavac.getCompiledClass("a.b.Base"), ANNOTATION, x -> true, mStatus)
+ .visit();
+ new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), ANNOTATION, x -> true, mStatus)
+ .visit();
+
+ assertNoErrors();
+ ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
+ // A bridge method is generated for the above, so we expect 2 greylist entries.
+ verify(mStatus, times(2)).greylistEntry(greylist.capture());
+ assertThat(greylist.getAllValues()).containsExactly(
+ "La/b/Class;->method(Ljava/lang/Object;)V",
+ "La/b/Class;->method(Ljava/lang/String;)V");
+ }
+
+ @Test
+ public void testOverridePublicMethodWithBridge() throws IOException {
+ mJavac.addSource("a.b.Base", Joiner.on('\n').join(
+ "package a.b;",
+ "public abstract class Base<T> {",
+ " public void method(T arg) {}",
+ "}"));
+
+ mJavac.addSource("a.b.Class", Joiner.on('\n').join(
+ "package a.b;",
+ "import annotation.Anno;",
+ "public class Class<T extends String> extends Base<T> {",
+ " @Override",
+ " @Anno(expectedSignature=\"La/b/Class;->method(Ljava/lang/String;)V\")",
+ " public void method(T arg) {}",
+ "}"));
+ assertThat(mJavac.compile()).isTrue();
+
+ new AnnotationVisitor(mJavac.getCompiledClass("a.b.Base"), ANNOTATION, x -> true, mStatus)
+ .visit();
+ new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), ANNOTATION, x -> true, mStatus)
+ .visit();
+
+ assertNoErrors();
+ ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
+ // A bridge method is generated for the above, so we expect 2 greylist entries.
+ verify(mStatus, times(2)).greylistEntry(greylist.capture());
+ assertThat(greylist.getAllValues()).containsExactly(
+ "La/b/Class;->method(Ljava/lang/Object;)V",
+ "La/b/Class;->method(Ljava/lang/String;)V");
+ }
+
+ @Test
+ public void testBridgeMethodsFromInterface() throws IOException {
+ mJavac.addSource("a.b.Interface", Joiner.on('\n').join(
+ "package a.b;",
+ "public interface Interface {",
+ " public void method(Object arg);",
+ "}"));
+
+ mJavac.addSource("a.b.Base", Joiner.on('\n').join(
+ "package a.b;",
+ "import annotation.Anno;",
+ "class Base {",
+ " @Anno(expectedSignature=\"La/b/Base;->method(Ljava/lang/Object;)V\")",
+ " public void method(Object arg) {}",
+ "}"));
+
+ mJavac.addSource("a.b.Class", Joiner.on('\n').join(
+ "package a.b;",
+ "public class Class extends Base implements Interface {",
+ "}"));
+ assertThat(mJavac.compile()).isTrue();
+
+ new AnnotationVisitor(
+ mJavac.getCompiledClass("a.b.Interface"), ANNOTATION, x -> true, mStatus).visit();
+ new AnnotationVisitor(
+ mJavac.getCompiledClass("a.b.Base"), ANNOTATION, x -> true, mStatus).visit();
+ new AnnotationVisitor(
+ mJavac.getCompiledClass("a.b.Class"), ANNOTATION, x -> true, mStatus).visit();
+
+ assertNoErrors();
+ ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
+ // A bridge method is generated for the above, so we expect 2 greylist entries.
+ verify(mStatus, times(2)).greylistEntry(greylist.capture());
+ assertThat(greylist.getAllValues()).containsExactly(
+ "La/b/Class;->method(Ljava/lang/Object;)V",
+ "La/b/Base;->method(Ljava/lang/Object;)V");
+ }
+
+ @Test
+ public void testPublicBridgeExcluded() throws IOException {
+ mJavac.addSource("a.b.Base", Joiner.on('\n').join(
+ "package a.b;",
+ "public abstract class Base<T> {",
+ " public void method(T arg) {}",
+ "}"));
+
+ mJavac.addSource("a.b.Class", Joiner.on('\n').join(
+ "package a.b;",
+ "import annotation.Anno;",
+ "public class Class<T extends String> extends Base<T> {",
+ " @Override",
+ " @Anno",
+ " public void method(T arg) {}",
+ "}"));
+ assertThat(mJavac.compile()).isTrue();
+
+ Set<String> publicApis = Sets.newHashSet(
+ "La/b/Base;->method(Ljava/lang/Object;)V",
+ "La/b/Class;->method(Ljava/lang/Object;)V");
+ new AnnotationVisitor(mJavac.getCompiledClass("a.b.Base"), ANNOTATION, publicApis,
+ mStatus).visit();
+ new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), ANNOTATION, publicApis,
+ mStatus).visit();
+
+ assertNoErrors();
+ ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
+ // The bridge method generated for the above, is a public API so should be excluded
+ verify(mStatus, times(1)).greylistEntry(greylist.capture());
+ assertThat(greylist.getValue()).isEqualTo("La/b/Class;->method(Ljava/lang/String;)V");
+ }
}
diff --git a/tools/hiddenapi/hiddenapi.cc b/tools/hiddenapi/hiddenapi.cc
index 0381381bbf..bf8a1b755e 100644
--- a/tools/hiddenapi/hiddenapi.cc
+++ b/tools/hiddenapi/hiddenapi.cc
@@ -26,6 +26,7 @@
#include "base/os.h"
#include "base/unix_file/fd_file.h"
#include "dex/art_dex_file_loader.h"
+#include "dex/class_accessor-inl.h"
#include "dex/dex_file-inl.h"
#include "dex/hidden_api_access_flags.h"
@@ -75,7 +76,9 @@ NO_RETURN static void Usage(const char* fmt, ...) {
UsageError("");
UsageError(" Command \"list\": dump lists of public and private API");
UsageError(" --boot-dex=<filename>: dex file which belongs to boot class path");
- UsageError(" --stub-dex=<filename>: dex/apk file which belongs to SDK API stubs");
+ UsageError(" --stub-classpath=<filenames>: colon-separated list of dex/apk files");
+ UsageError(" which form API stubs of boot class path. Multiple classpaths can");
+ UsageError(" be specified");
UsageError("");
UsageError(" --out-public=<filename>: output file for a list of all public APIs");
UsageError(" --out-private=<filename>: output file for a list of all private APIs");
@@ -89,42 +92,33 @@ static bool Contains(const std::vector<E>& vec, const E& elem) {
return std::find(vec.begin(), vec.end(), elem) != vec.end();
}
-class DexClass {
+class DexClass : public ClassAccessor {
public:
- DexClass(const DexFile& dex_file, uint32_t idx)
- : dex_file_(dex_file), class_def_(dex_file.GetClassDef(idx)) {}
+ explicit DexClass(const ClassAccessor& accessor) : ClassAccessor(accessor) {}
- const DexFile& GetDexFile() const { return dex_file_; }
- const uint8_t* GetData() const { return dex_file_.GetClassData(class_def_); }
+ const uint8_t* GetData() const { return dex_file_.GetClassData(GetClassDef()); }
- const dex::TypeIndex GetClassIndex() const { return class_def_.class_idx_; }
- const dex::TypeIndex GetSuperclassIndex() const { return class_def_.superclass_idx_; }
+ const dex::TypeIndex GetSuperclassIndex() const { return GetClassDef().superclass_idx_; }
bool HasSuperclass() const { return dex_file_.IsTypeIndexValid(GetSuperclassIndex()); }
- std::string GetDescriptor() const { return dex_file_.GetClassDescriptor(class_def_); }
-
std::string GetSuperclassDescriptor() const {
- if (HasSuperclass()) {
- return dex_file_.StringByTypeIdx(GetSuperclassIndex());
- } else {
- return "";
- }
+ return HasSuperclass() ? dex_file_.StringByTypeIdx(GetSuperclassIndex()) : "";
}
std::set<std::string> GetInterfaceDescriptors() const {
std::set<std::string> list;
- const DexFile::TypeList* ifaces = dex_file_.GetInterfacesList(class_def_);
+ const DexFile::TypeList* ifaces = dex_file_.GetInterfacesList(GetClassDef());
for (uint32_t i = 0; ifaces != nullptr && i < ifaces->Size(); ++i) {
list.insert(dex_file_.StringByTypeIdx(ifaces->GetTypeItem(i).type_idx_));
}
return list;
}
- inline bool IsVisible() const { return HasAccessFlags(kAccPublic); }
+ inline bool IsPublic() const { return HasAccessFlags(kAccPublic); }
inline bool Equals(const DexClass& other) const {
- bool equals = GetDescriptor() == other.GetDescriptor();
+ bool equals = strcmp(GetDescriptor(), other.GetDescriptor()) == 0;
if (equals) {
// TODO(dbrazdil): Check that methods/fields match as well once b/111116543 is fixed.
CHECK_EQ(GetAccessFlags(), other.GetAccessFlags());
@@ -135,39 +129,40 @@ class DexClass {
}
private:
- uint32_t GetAccessFlags() const { return class_def_.access_flags_; }
+ uint32_t GetAccessFlags() const { return GetClassDef().access_flags_; }
bool HasAccessFlags(uint32_t mask) const { return (GetAccessFlags() & mask) == mask; }
-
- const DexFile& dex_file_;
- const DexFile::ClassDef& class_def_;
};
class DexMember {
public:
- DexMember(const DexClass& klass, const ClassDataItemIterator& it)
- : klass_(klass), it_(it) {
- DCHECK_EQ(IsMethod() ? GetMethodId().class_idx_ : GetFieldId().class_idx_,
- klass_.GetClassIndex());
+ DexMember(const DexClass& klass, const ClassAccessor::Field& item)
+ : klass_(klass), item_(item), is_method_(false) {
+ DCHECK_EQ(GetFieldId().class_idx_, klass.GetClassIdx());
+ }
+
+ DexMember(const DexClass& klass, const ClassAccessor::Method& item)
+ : klass_(klass), item_(item), is_method_(true) {
+ DCHECK_EQ(GetMethodId().class_idx_, klass.GetClassIdx());
}
inline const DexClass& GetDeclaringClass() const { return klass_; }
// Sets hidden bits in access flags and writes them back into the DEX in memory.
- // Note that this will not update the cached data of ClassDataItemIterator
+ // Note that this will not update the cached data of the class accessor
// until it iterates over this item again and therefore will fail a CHECK if
// it is called multiple times on the same DexMember.
- void SetHidden(HiddenApiAccessFlags::ApiList value) {
- const uint32_t old_flags = it_.GetRawMemberAccessFlags();
+ void SetHidden(HiddenApiAccessFlags::ApiList value) const {
+ const uint32_t old_flags = item_.GetRawAccessFlags();
const uint32_t new_flags = HiddenApiAccessFlags::EncodeForDex(old_flags, value);
CHECK_EQ(UnsignedLeb128Size(new_flags), UnsignedLeb128Size(old_flags));
// Locate the LEB128-encoded access flags in class data.
// `ptr` initially points to the next ClassData item. We iterate backwards
// until we hit the terminating byte of the previous Leb128 value.
- const uint8_t* ptr = it_.DataPointer();
+ const uint8_t* ptr = item_.GetDataPointer();
if (IsMethod()) {
ptr = ReverseSearchUnsignedLeb128(ptr);
- DCHECK_EQ(DecodeUnsignedLeb128WithoutMovingCursor(ptr), it_.GetMethodCodeItemOffset());
+ DCHECK_EQ(DecodeUnsignedLeb128WithoutMovingCursor(ptr), GetMethod().GetCodeItemOffset());
}
ptr = ReverseSearchUnsignedLeb128(ptr);
DCHECK_EQ(DecodeUnsignedLeb128WithoutMovingCursor(ptr), old_flags);
@@ -176,23 +171,23 @@ class DexMember {
UpdateUnsignedLeb128(const_cast<uint8_t*>(ptr), new_flags);
}
- inline bool IsMethod() const { return it_.IsAtMethod(); }
- inline bool IsVirtualMethod() const { return it_.IsAtVirtualMethod(); }
+ inline bool IsMethod() const { return is_method_; }
+ inline bool IsVirtualMethod() const { return IsMethod() && !GetMethod().IsStaticOrDirect(); }
+ inline bool IsConstructor() const { return IsMethod() && HasAccessFlags(kAccConstructor); }
- // Returns true if the member is public/protected and is in a public class.
- inline bool IsVisible() const {
- return GetDeclaringClass().IsVisible() &&
- (HasAccessFlags(kAccPublic) || HasAccessFlags(kAccProtected));
+ inline bool IsPublicOrProtected() const {
+ return HasAccessFlags(kAccPublic) || HasAccessFlags(kAccProtected);
}
// Constructs a string with a unique signature of this class member.
std::string GetApiEntry() const {
std::stringstream ss;
- ss << klass_.GetDescriptor() << "->" << GetName() << (IsMethod() ? "" : ":") << GetSignature();
+ ss << klass_.GetDescriptor() << "->" << GetName() << (IsMethod() ? "" : ":")
+ << GetSignature();
return ss.str();
}
- inline bool operator==(const DexMember& other) {
+ inline bool operator==(const DexMember& other) const {
// These need to match if they should resolve to one another.
bool equals = IsMethod() == other.IsMethod() &&
GetName() == other.GetName() &&
@@ -207,31 +202,37 @@ class DexMember {
}
private:
- inline uint32_t GetAccessFlags() const { return it_.GetMemberAccessFlags(); }
+ inline uint32_t GetAccessFlags() const { return item_.GetAccessFlags(); }
inline uint32_t HasAccessFlags(uint32_t mask) const { return (GetAccessFlags() & mask) == mask; }
inline std::string GetName() const {
- return IsMethod() ? klass_.GetDexFile().GetMethodName(GetMethodId())
- : klass_.GetDexFile().GetFieldName(GetFieldId());
+ return IsMethod() ? item_.GetDexFile().GetMethodName(GetMethodId())
+ : item_.GetDexFile().GetFieldName(GetFieldId());
}
inline std::string GetSignature() const {
- return IsMethod() ? klass_.GetDexFile().GetMethodSignature(GetMethodId()).ToString()
- : klass_.GetDexFile().GetFieldTypeDescriptor(GetFieldId());
+ return IsMethod() ? item_.GetDexFile().GetMethodSignature(GetMethodId()).ToString()
+ : item_.GetDexFile().GetFieldTypeDescriptor(GetFieldId());
+ }
+
+ inline const ClassAccessor::Method& GetMethod() const {
+ DCHECK(IsMethod());
+ return down_cast<const ClassAccessor::Method&>(item_);
}
inline const DexFile::MethodId& GetMethodId() const {
DCHECK(IsMethod());
- return klass_.GetDexFile().GetMethodId(it_.GetMemberIndex());
+ return item_.GetDexFile().GetMethodId(item_.GetIndex());
}
inline const DexFile::FieldId& GetFieldId() const {
DCHECK(!IsMethod());
- return klass_.GetDexFile().GetFieldId(it_.GetMemberIndex());
+ return item_.GetDexFile().GetFieldId(item_.GetIndex());
}
const DexClass& klass_;
- const ClassDataItemIterator& it_;
+ const ClassAccessor::BaseItem& item_;
+ const bool is_method_;
};
class ClassPath FINAL {
@@ -243,22 +244,20 @@ class ClassPath FINAL {
template<typename Fn>
void ForEachDexClass(Fn fn) {
for (auto& dex_file : dex_files_) {
- for (uint32_t class_idx = 0; class_idx < dex_file->NumClassDefs(); ++class_idx) {
- DexClass klass(*dex_file, class_idx);
- fn(klass);
+ for (ClassAccessor accessor : dex_file->GetClasses()) {
+ fn(DexClass(accessor));
}
}
}
template<typename Fn>
void ForEachDexMember(Fn fn) {
- ForEachDexClass([&fn](DexClass& klass) {
- const uint8_t* klass_data = klass.GetData();
- if (klass_data != nullptr) {
- for (ClassDataItemIterator it(klass.GetDexFile(), klass_data); it.HasNext(); it.Next()) {
- DexMember member(klass, it);
- fn(member);
- }
+ ForEachDexClass([&fn](const DexClass& klass) {
+ for (const ClassAccessor::Field& field : klass.GetFields()) {
+ fn(DexMember(klass, field));
+ }
+ for (const ClassAccessor::Method& method : klass.GetMethods()) {
+ fn(DexMember(klass, method));
}
});
}
@@ -344,6 +343,24 @@ class HierarchyClass FINAL {
return ForEachResolvableMember_Impl(other, fn) != ResolutionResult::kNotFound;
}
+ // Returns true if this class contains at least one member matching `other`.
+ bool HasMatchingMember(const DexMember& other) {
+ return ForEachMatchingMember(
+ other, [](const DexMember&) { return true; }) != ResolutionResult::kNotFound;
+ }
+
+ // Recursively iterates over all subclasses of this class and invokes `fn`
+ // on each one. If `fn` returns false for a particular subclass, exploring its
+ // subclasses is skipped.
+ template<typename Fn>
+ void ForEachSubClass(Fn fn) {
+ for (HierarchyClass* subclass : extended_by_) {
+ if (fn(subclass)) {
+ subclass->ForEachSubClass(fn);
+ }
+ }
+ }
+
private:
// Result of resolution which takes into account whether the member was found
// for the first time or not. This is just a performance optimization to prevent
@@ -397,16 +414,18 @@ class HierarchyClass FINAL {
template<typename Fn>
ResolutionResult ForEachMatchingMember(const DexMember& other, Fn fn) {
ResolutionResult found = ResolutionResult::kNotFound;
+ auto compare_member = [&](const DexMember& member) {
+ if (member == other) {
+ found = Accumulate(found, fn(member) ? ResolutionResult::kFoundNew
+ : ResolutionResult::kFoundOld);
+ }
+ };
for (const DexClass& dex_class : dex_classes_) {
- const uint8_t* data = dex_class.GetData();
- if (data != nullptr) {
- for (ClassDataItemIterator it(dex_class.GetDexFile(), data); it.HasNext(); it.Next()) {
- DexMember member(dex_class, it);
- if (member == other) {
- found = Accumulate(found, fn(member) ? ResolutionResult::kFoundNew
- : ResolutionResult::kFoundOld);
- }
- }
+ for (const ClassAccessor::Field& field : dex_class.GetFields()) {
+ compare_member(DexMember(dex_class, field));
+ }
+ for (const ClassAccessor::Method& method : dex_class.GetMethods()) {
+ compare_member(DexMember(dex_class, method));
}
}
return found;
@@ -438,7 +457,7 @@ class HierarchyClass FINAL {
class Hierarchy FINAL {
public:
- explicit Hierarchy(ClassPath& class_path) : class_path_(class_path) {
+ explicit Hierarchy(ClassPath& classpath) : classpath_(classpath) {
BuildClassHierarchy();
}
@@ -454,6 +473,48 @@ class Hierarchy FINAL {
return (klass != nullptr) && klass->ForEachResolvableMember(other, fn);
}
+ // Returns true if `member`, which belongs to this classpath, is visible to
+ // code in child class loaders.
+ bool IsMemberVisible(const DexMember& member) {
+ if (!member.IsPublicOrProtected()) {
+ // Member is private or package-private. Cannot be visible.
+ return false;
+ } else if (member.GetDeclaringClass().IsPublic()) {
+ // Member is public or protected, and class is public. It must be visible.
+ return true;
+ } else if (member.IsConstructor()) {
+ // Member is public or protected constructor and class is not public.
+ // Must be hidden because it cannot be implicitly exposed by a subclass.
+ return false;
+ } else {
+ // Member is public or protected method, but class is not public. Check if
+ // it is exposed through a public subclass.
+ // Example code (`foo` exposed by ClassB):
+ // class ClassA { public void foo() { ... } }
+ // public class ClassB extends ClassA {}
+ HierarchyClass* klass = FindClass(member.GetDeclaringClass().GetDescriptor());
+ CHECK(klass != nullptr);
+ bool visible = false;
+ klass->ForEachSubClass([&visible, &member](HierarchyClass* subclass) {
+ if (subclass->HasMatchingMember(member)) {
+ // There is a member which matches `member` in `subclass`, either
+ // a virtual method overriding `member` or a field overshadowing
+ // `member`. In either case, `member` remains hidden.
+ CHECK(member.IsVirtualMethod() || !member.IsMethod());
+ return false; // do not explore deeper
+ } else if (subclass->GetOneDexClass().IsPublic()) {
+ // `subclass` inherits and exposes `member`.
+ visible = true;
+ return false; // do not explore deeper
+ } else {
+ // `subclass` inherits `member` but does not expose it.
+ return true; // explore deeper
+ }
+ });
+ return visible;
+ }
+ }
+
private:
HierarchyClass* FindClass(const std::string& descriptor) {
auto it = classes_.find(descriptor);
@@ -467,7 +528,7 @@ class Hierarchy FINAL {
void BuildClassHierarchy() {
// Create one HierarchyClass entry in `classes_` per class descriptor
// and add all DexClass objects with the same descriptor to that entry.
- class_path_.ForEachDexClass([this](DexClass& klass) {
+ classpath_.ForEachDexClass([this](const DexClass& klass) {
classes_[klass.GetDescriptor()].AddDexClass(klass);
});
@@ -494,7 +555,7 @@ class Hierarchy FINAL {
}
}
- ClassPath& class_path_;
+ ClassPath& classpath_;
std::map<std::string, HierarchyClass> classes_;
};
@@ -547,8 +608,9 @@ class HiddenApi FINAL {
const StringPiece option(argv[i]);
if (option.starts_with("--boot-dex=")) {
boot_dex_paths_.push_back(option.substr(strlen("--boot-dex=")).ToString());
- } else if (option.starts_with("--stub-dex=")) {
- stub_dex_paths_.push_back(option.substr(strlen("--stub-dex=")).ToString());
+ } else if (option.starts_with("--stub-classpath=")) {
+ stub_classpaths_.push_back(android::base::Split(
+ option.substr(strlen("--stub-classpath=")).ToString(), ":"));
} else if (option.starts_with("--out-public=")) {
out_public_path_ = option.substr(strlen("--out-public=")).ToString();
} else if (option.starts_with("--out-private=")) {
@@ -578,19 +640,15 @@ class HiddenApi FINAL {
OpenApiFile(blacklist_path_, api_list, HiddenApiAccessFlags::kBlacklist);
// Open all dex files.
- ClassPath boot_class_path(boot_dex_paths_, /* open_writable */ true);
+ ClassPath boot_classpath(boot_dex_paths_, /* open_writable */ true);
// Set access flags of all members.
- boot_class_path.ForEachDexMember([&api_list](DexMember& boot_member) {
+ boot_classpath.ForEachDexMember([&api_list](const DexMember& boot_member) {
auto it = api_list.find(boot_member.GetApiEntry());
- if (it == api_list.end()) {
- boot_member.SetHidden(HiddenApiAccessFlags::kWhitelist);
- } else {
- boot_member.SetHidden(it->second);
- }
+ boot_member.SetHidden(it == api_list.end() ? HiddenApiAccessFlags::kWhitelist : it->second);
});
- boot_class_path.UpdateDexChecksums();
+ boot_classpath.UpdateDexChecksums();
}
void OpenApiFile(const std::string& path,
@@ -614,7 +672,7 @@ class HiddenApi FINAL {
void ListApi() {
if (boot_dex_paths_.empty()) {
Usage("No boot DEX files specified");
- } else if (stub_dex_paths_.empty()) {
+ } else if (stub_classpaths_.empty()) {
Usage("No stub DEX files specified");
} else if (out_public_path_.empty()) {
Usage("No public API output path specified");
@@ -630,39 +688,43 @@ class HiddenApi FINAL {
std::set<std::string> unresolved;
// Open all dex files.
- ClassPath stub_class_path(stub_dex_paths_, /* open_writable */ false);
- ClassPath boot_class_path(boot_dex_paths_, /* open_writable */ false);
- Hierarchy boot_hierarchy(boot_class_path);
+ ClassPath boot_classpath(boot_dex_paths_, /* open_writable */ false);
+ Hierarchy boot_hierarchy(boot_classpath);
// Mark all boot dex members private.
- boot_class_path.ForEachDexMember([&boot_members](DexMember& boot_member) {
+ boot_classpath.ForEachDexMember([&boot_members](const DexMember& boot_member) {
boot_members[boot_member.GetApiEntry()] = false;
});
// Resolve each SDK dex member against the framework and mark it white.
- stub_class_path.ForEachDexMember(
- [&boot_hierarchy, &boot_members, &unresolved](DexMember& stub_member) {
- if (!stub_member.IsVisible()) {
- // Typically fake constructors and inner-class `this` fields.
- return;
- }
- bool resolved = boot_hierarchy.ForEachResolvableMember(
- stub_member,
- [&boot_members](DexMember& boot_member) {
- std::string entry = boot_member.GetApiEntry();
- auto it = boot_members.find(entry);
- CHECK(it != boot_members.end());
- if (it->second) {
- return false; // has been marked before
- } else {
- it->second = true;
- return true; // marked for the first time
- }
- });
- if (!resolved) {
- unresolved.insert(stub_member.GetApiEntry());
- }
- });
+ for (const std::vector<std::string>& stub_classpath_dex : stub_classpaths_) {
+ ClassPath stub_classpath(stub_classpath_dex, /* open_writable */ false);
+ Hierarchy stub_hierarchy(stub_classpath);
+ stub_classpath.ForEachDexMember(
+ [&stub_hierarchy, &boot_hierarchy, &boot_members, &unresolved](
+ const DexMember& stub_member) {
+ if (!stub_hierarchy.IsMemberVisible(stub_member)) {
+ // Typically fake constructors and inner-class `this` fields.
+ return;
+ }
+ bool resolved = boot_hierarchy.ForEachResolvableMember(
+ stub_member,
+ [&boot_members](const DexMember& boot_member) {
+ std::string entry = boot_member.GetApiEntry();
+ auto it = boot_members.find(entry);
+ CHECK(it != boot_members.end());
+ if (it->second) {
+ return false; // has been marked before
+ } else {
+ it->second = true;
+ return true; // marked for the first time
+ }
+ });
+ if (!resolved) {
+ unresolved.insert(stub_member.GetApiEntry());
+ }
+ });
+ }
// Print errors.
for (const std::string& str : unresolved) {
@@ -685,7 +747,10 @@ class HiddenApi FINAL {
// Paths to DEX files which should be processed.
std::vector<std::string> boot_dex_paths_;
- std::vector<std::string> stub_dex_paths_;
+
+ // Set of public API stub classpaths. Each classpath is formed by a list
+ // of DEX/APK files in the order they appear on the classpath.
+ std::vector<std::vector<std::string>> stub_classpaths_;
// Paths to text files which contain the lists of API members.
std::string light_greylist_path_;
diff --git a/tools/hiddenapi/hiddenapi_test.cc b/tools/hiddenapi/hiddenapi_test.cc
index aa87f21e7f..b50f684f09 100644
--- a/tools/hiddenapi/hiddenapi_test.cc
+++ b/tools/hiddenapi/hiddenapi_test.cc
@@ -20,6 +20,7 @@
#include "base/zip_archive.h"
#include "common_runtime_test.h"
#include "dex/art_dex_file_loader.h"
+#include "dex/class_accessor-inl.h"
#include "dex/dex_file-inl.h"
#include "exec_utils.h"
@@ -114,40 +115,27 @@ class HiddenApiTest : public CommonRuntimeTest {
}
const DexFile::ClassDef& FindClass(const char* desc, const DexFile& dex_file) {
- for (uint32_t i = 0; i < dex_file.NumClassDefs(); ++i) {
- const DexFile::ClassDef& class_def = dex_file.GetClassDef(i);
- if (strcmp(desc, dex_file.GetClassDescriptor(class_def)) == 0) {
- return class_def;
- }
- }
- LOG(FATAL) << "Could not find class " << desc;
- UNREACHABLE();
+ const DexFile::TypeId* type_id = dex_file.FindTypeId(desc);
+ CHECK(type_id != nullptr) << "Could not find class " << desc;
+ const DexFile::ClassDef* found = dex_file.FindClassDef(dex_file.GetIndexForTypeId(*type_id));
+ CHECK(found != nullptr) << "Could not find class " << desc;
+ return *found;
}
HiddenApiAccessFlags::ApiList GetFieldHiddenFlags(const char* name,
uint32_t expected_visibility,
const DexFile::ClassDef& class_def,
const DexFile& dex_file) {
- const uint8_t* class_data = dex_file.GetClassData(class_def);
- if (class_data == nullptr) {
- LOG(FATAL) << "Class " << dex_file.GetClassDescriptor(class_def) << " has no data";
- UNREACHABLE();
- }
+ ClassAccessor accessor(dex_file, class_def);
+ CHECK(accessor.HasClassData()) << "Class " << accessor.GetDescriptor() << " has no data";
- for (ClassDataItemIterator it(dex_file, class_data); it.HasNext(); it.Next()) {
- if (it.IsAtMethod()) {
- break;
- }
- const DexFile::FieldId& fid = dex_file.GetFieldId(it.GetMemberIndex());
+ for (const ClassAccessor::Field& field : accessor.GetFields()) {
+ const DexFile::FieldId& fid = dex_file.GetFieldId(field.GetIndex());
if (strcmp(name, dex_file.GetFieldName(fid)) == 0) {
- uint32_t actual_visibility = it.GetFieldAccessFlags() & kAccVisibilityFlags;
- if (actual_visibility != expected_visibility) {
- LOG(FATAL) << "Field " << name << " in class " << dex_file.GetClassDescriptor(class_def)
- << " does not have the expected visibility flags (" << expected_visibility
- << " != " << actual_visibility << ")";
- UNREACHABLE();
- }
- return it.DecodeHiddenAccessFlags();
+ const uint32_t actual_visibility = field.GetAccessFlags() & kAccVisibilityFlags;
+ CHECK_EQ(actual_visibility, expected_visibility)
+ << "Field " << name << " in class " << accessor.GetDescriptor();
+ return field.DecodeHiddenAccessFlags();
}
}
@@ -161,31 +149,18 @@ class HiddenApiTest : public CommonRuntimeTest {
bool expected_native,
const DexFile::ClassDef& class_def,
const DexFile& dex_file) {
- const uint8_t* class_data = dex_file.GetClassData(class_def);
- if (class_data == nullptr) {
- LOG(FATAL) << "Class " << dex_file.GetClassDescriptor(class_def) << " has no data";
- UNREACHABLE();
- }
+ ClassAccessor accessor(dex_file, class_def);
+ CHECK(accessor.HasClassData()) << "Class " << accessor.GetDescriptor() << " has no data";
- for (ClassDataItemIterator it(dex_file, class_data); it.HasNext(); it.Next()) {
- if (!it.IsAtMethod()) {
- continue;
- }
- const DexFile::MethodId& mid = dex_file.GetMethodId(it.GetMemberIndex());
+ for (const ClassAccessor::Method& method : accessor.GetMethods()) {
+ const DexFile::MethodId& mid = dex_file.GetMethodId(method.GetIndex());
if (strcmp(name, dex_file.GetMethodName(mid)) == 0) {
- if (expected_native != it.MemberIsNative()) {
- LOG(FATAL) << "Expected native=" << expected_native << " for method " << name
- << " in class " << dex_file.GetClassDescriptor(class_def);
- UNREACHABLE();
- }
- uint32_t actual_visibility = it.GetMethodAccessFlags() & kAccVisibilityFlags;
- if (actual_visibility != expected_visibility) {
- LOG(FATAL) << "Method " << name << " in class " << dex_file.GetClassDescriptor(class_def)
- << " does not have the expected visibility flags (" << expected_visibility
- << " != " << actual_visibility << ")";
- UNREACHABLE();
- }
- return it.DecodeHiddenAccessFlags();
+ CHECK_EQ(expected_native, method.MemberIsNative())
+ << "Method " << name << " in class " << accessor.GetDescriptor();
+ const uint32_t actual_visibility = method.GetAccessFlags() & kAccVisibilityFlags;
+ CHECK_EQ(actual_visibility, expected_visibility)
+ << "Method " << name << " in class " << accessor.GetDescriptor();
+ return method.DecodeHiddenAccessFlags();
}
}
diff --git a/tools/jfuzz/jfuzz.cc b/tools/jfuzz/jfuzz.cc
index 60c62752ef..a97a99ce4b 100644
--- a/tools/jfuzz/jfuzz.cc
+++ b/tools/jfuzz/jfuzz.cc
@@ -1302,7 +1302,7 @@ class JFuzz {
int32_t main(int32_t argc, char** argv) {
// Time-based seed.
struct timeval tp;
- gettimeofday(&tp, NULL);
+ gettimeofday(&tp, nullptr);
// Defaults.
uint32_t seed = (tp.tv_sec * 1000000 + tp.tv_usec);
diff --git a/tools/libcore_failures.txt b/tools/libcore_failures.txt
index 60aac3cdb5..264217ead6 100644
--- a/tools/libcore_failures.txt
+++ b/tools/libcore_failures.txt
@@ -214,5 +214,15 @@
description: "java.io.IOException: Error writing ASN.1 encoding",
result: EXEC_FAILED,
names: ["libcore.javax.crypto.spec.AlgorithmParametersTestGCM#testEncoding"]
+},
+{
+ description: "Tests fail because mockito can not read android.os.Build$VERSION",
+ result: EXEC_FAILED,
+ bug: 111704422,
+ names: ["libcore.java.lang.ThreadTest#testUncaughtExceptionPreHandler_calledBeforeDefaultHandler",
+ "libcore.java.lang.ThreadTest#testUncaughtExceptionPreHandler_noDefaultHandler",
+ "libcore.javax.crypto.CipherInputStreamTest#testCloseTwice",
+ "libcore.libcore.io.BlockGuardOsTest#test_android_getaddrinfo_networkPolicy",
+ "libcore.libcore.io.BlockGuardOsTest#test_checkNewMethodsInPosix"]
}
]
diff --git a/tools/libcore_gcstress_debug_failures.txt b/tools/libcore_gcstress_debug_failures.txt
index 06f6339d76..942a4e0fc6 100644
--- a/tools/libcore_gcstress_debug_failures.txt
+++ b/tools/libcore_gcstress_debug_failures.txt
@@ -9,13 +9,14 @@
result: EXEC_FAILED,
modes: [device],
names: ["jsr166.CompletableFutureTest#testCompleteOnTimeout_completed",
+ "jsr166.CompletableFutureTest#testDelayedExecutor",
"libcore.libcore.icu.TransliteratorTest#testAll",
"libcore.libcore.icu.RelativeDateTimeFormatterTest#test_bug25821045",
"libcore.libcore.icu.RelativeDateTimeFormatterTest#test_bug25883157",
"libcore.java.lang.ref.ReferenceQueueTest#testRemoveWithDelayedResultAndTimeout",
- "libcore.java.lang.ref.ReferenceQueueTest#testRemoveWithDelayedResultAndNoTimeout",
"libcore.java.util.TimeZoneTest#testSetDefaultDeadlock",
"libcore.javax.crypto.CipherBasicsTest#testBasicEncryption",
+ "org.apache.harmony.tests.java.text.MessageFormatTest#test_parseLjava_lang_String",
"org.apache.harmony.tests.java.util.TimerTest#testThrowingTaskKillsTimerThread"]
},
{
diff --git a/tools/libcore_gcstress_failures.txt b/tools/libcore_gcstress_failures.txt
index e48f3f7672..6840f9ebec 100644
--- a/tools/libcore_gcstress_failures.txt
+++ b/tools/libcore_gcstress_failures.txt
@@ -30,7 +30,9 @@
names: ["libcore.java.lang.StringTest#testFastPathString_wellFormedUtf8Sequence",
"org.apache.harmony.tests.java.lang.ref.ReferenceQueueTest#test_remove",
"org.apache.harmony.tests.java.util.TimerTest#testOverdueTaskExecutesImmediately",
- "org.apache.harmony.tests.java.util.WeakHashMapTest#test_keySet_hasNext"]
+ "org.apache.harmony.tests.java.util.WeakHashMapTest#test_keySet_hasNext",
+ "libcore.java.text.DecimalFormatTest#testCurrencySymbolSpacing",
+ "libcore.java.text.SimpleDateFormatTest#testLocales"]
},
{
description: "GC crash",
diff --git a/tools/run-libcore-tests.sh b/tools/run-libcore-tests.sh
index aff009abb6..240d63c6d3 100755
--- a/tools/run-libcore-tests.sh
+++ b/tools/run-libcore-tests.sh
@@ -156,7 +156,11 @@ fi
# Increase the timeout, as vogar cannot set individual test
# timeout when being asked to run packages, and some tests go above
# the default timeout.
-vogar_args="$vogar_args --timeout 480"
+if $gcstress && $debug && $device_mode; then
+ vogar_args="$vogar_args --timeout 960"
+else
+ vogar_args="$vogar_args --timeout 480"
+fi
# set the toolchain to use.
vogar_args="$vogar_args --toolchain d8 --language CUR"
diff --git a/tools/teardown-buildbot-device.sh b/tools/teardown-buildbot-device.sh
index be68b9f490..6634fb4bb7 100755
--- a/tools/teardown-buildbot-device.sh
+++ b/tools/teardown-buildbot-device.sh
@@ -42,15 +42,14 @@ if [[ -n "$ART_TEST_CHROOT" ]]; then
# that started this process.
for_all_chroot_process() {
local action=$1
- for link in $(adb shell ls -d "/proc/*/root"); do
- local root=$(adb shell readlink "$link")
- if [[ "x$root" = "x$ART_TEST_CHROOT" ]]; then
- local dir=$(dirname "$link")
- local pid=$(basename "$dir")
- local cmdline=$(adb shell cat "$dir"/cmdline | tr '\000' ' ')
- $action "$pid" "$cmdline"
- fi
- done
+ adb shell ls -ld "/proc/*/root" \
+ | sed -n -e "s,^.* \\(/proc/.*/root\\) -> $ART_TEST_CHROOT\$,\\1,p" \
+ | while read link; do
+ local dir=$(dirname "$link")
+ local pid=$(basename "$dir")
+ local cmdline=$(adb shell cat "$dir"/cmdline | tr '\000' ' ')
+ $action "$pid" "$cmdline"
+ done
}
# display_process PID CMDLINE
diff --git a/tools/veridex/flow_analysis.cc b/tools/veridex/flow_analysis.cc
index d4f7e5f91d..f5eb4ea67d 100644
--- a/tools/veridex/flow_analysis.cc
+++ b/tools/veridex/flow_analysis.cc
@@ -17,6 +17,7 @@
#include "flow_analysis.h"
#include "dex/bytecode_utils.h"
+#include "dex/class_accessor-inl.h"
#include "dex/code_item_accessors-inl.h"
#include "dex/dex_instruction-inl.h"
#include "dex/dex_file-inl.h"
@@ -26,6 +27,13 @@
namespace art {
+VeriFlowAnalysis::VeriFlowAnalysis(VeridexResolver* resolver,
+ const ClassAccessor::Method& method)
+ : resolver_(resolver),
+ method_id_(method.GetIndex()),
+ code_item_accessor_(method.GetInstructionsAndData()),
+ dex_registers_(code_item_accessor_.InsnsSizeInCodeUnits()),
+ instruction_infos_(code_item_accessor_.InsnsSizeInCodeUnits()) {}
void VeriFlowAnalysis::SetAsBranchTarget(uint32_t dex_pc) {
if (dex_registers_[dex_pc] == nullptr) {
diff --git a/tools/veridex/flow_analysis.h b/tools/veridex/flow_analysis.h
index fc093600c3..9c86024711 100644
--- a/tools/veridex/flow_analysis.h
+++ b/tools/veridex/flow_analysis.h
@@ -17,6 +17,7 @@
#ifndef ART_TOOLS_VERIDEX_FLOW_ANALYSIS_H_
#define ART_TOOLS_VERIDEX_FLOW_ANALYSIS_H_
+#include "dex/class_accessor.h"
#include "dex/code_item_accessors.h"
#include "dex/dex_file_reference.h"
#include "dex/method_reference.h"
@@ -108,12 +109,7 @@ struct InstructionInfo {
class VeriFlowAnalysis {
public:
- VeriFlowAnalysis(VeridexResolver* resolver, const ClassDataItemIterator& it)
- : resolver_(resolver),
- method_id_(it.GetMemberIndex()),
- code_item_accessor_(resolver->GetDexFile(), it.GetMethodCodeItem()),
- dex_registers_(code_item_accessor_.InsnsSizeInCodeUnits()),
- instruction_infos_(code_item_accessor_.InsnsSizeInCodeUnits()) {}
+ VeriFlowAnalysis(VeridexResolver* resolver, const ClassAccessor::Method& method);
void Run();
@@ -189,8 +185,8 @@ struct ReflectAccessInfo {
// Collects all reflection uses.
class FlowAnalysisCollector : public VeriFlowAnalysis {
public:
- FlowAnalysisCollector(VeridexResolver* resolver, const ClassDataItemIterator& it)
- : VeriFlowAnalysis(resolver, it) {}
+ FlowAnalysisCollector(VeridexResolver* resolver, const ClassAccessor::Method& method)
+ : VeriFlowAnalysis(resolver, method) {}
const std::vector<ReflectAccessInfo>& GetUses() const {
return uses_;
@@ -208,9 +204,9 @@ class FlowAnalysisCollector : public VeriFlowAnalysis {
class FlowAnalysisSubstitutor : public VeriFlowAnalysis {
public:
FlowAnalysisSubstitutor(VeridexResolver* resolver,
- const ClassDataItemIterator& it,
+ const ClassAccessor::Method& method,
const std::map<MethodReference, std::vector<ReflectAccessInfo>>& accesses)
- : VeriFlowAnalysis(resolver, it), accesses_(accesses) {}
+ : VeriFlowAnalysis(resolver, method), accesses_(accesses) {}
const std::vector<ReflectAccessInfo>& GetUses() const {
return uses_;
diff --git a/tools/veridex/hidden_api_finder.cc b/tools/veridex/hidden_api_finder.cc
index 8c6139f75a..4eba10e764 100644
--- a/tools/veridex/hidden_api_finder.cc
+++ b/tools/veridex/hidden_api_finder.cc
@@ -16,6 +16,7 @@
#include "hidden_api_finder.h"
+#include "dex/class_accessor-inl.h"
#include "dex/code_item_accessors-inl.h"
#include "dex/dex_instruction-inl.h"
#include "dex/dex_file.h"
@@ -62,23 +63,9 @@ void HiddenApiFinder::CollectAccesses(VeridexResolver* resolver) {
}
// Note: we collect strings constants only referenced in code items as the string table
// contains other kind of strings (eg types).
- size_t class_def_count = dex_file.NumClassDefs();
- for (size_t class_def_index = 0; class_def_index < class_def_count; ++class_def_index) {
- const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
- const uint8_t* class_data = dex_file.GetClassData(class_def);
- if (class_data == nullptr) {
- // Empty class.
- continue;
- }
- ClassDataItemIterator it(dex_file, class_data);
- it.SkipAllFields();
- for (; it.HasNextMethod(); it.Next()) {
- const DexFile::CodeItem* code_item = it.GetMethodCodeItem();
- if (code_item == nullptr) {
- continue;
- }
- CodeItemDataAccessor code_item_accessor(dex_file, code_item);
- for (const DexInstructionPcPair& inst : code_item_accessor) {
+ for (ClassAccessor accessor : dex_file.GetClasses()) {
+ for (const ClassAccessor::Method& method : accessor.GetMethods()) {
+ for (const DexInstructionPcPair& inst : method.GetInstructions()) {
switch (inst->Opcode()) {
case Instruction::CONST_STRING: {
dex::StringIndex string_index(inst->VRegB_21c());
@@ -103,8 +90,7 @@ void HiddenApiFinder::CollectAccesses(VeridexResolver* resolver) {
// We only keep track of the location for strings, as these will be the
// field/method names the user is interested in.
strings_.insert(name);
- reflection_locations_[name].push_back(
- MethodReference(&dex_file, it.GetMemberIndex()));
+ reflection_locations_[name].push_back(method.GetReference());
}
}
break;
@@ -114,8 +100,7 @@ void HiddenApiFinder::CollectAccesses(VeridexResolver* resolver) {
case Instruction::INVOKE_STATIC:
case Instruction::INVOKE_SUPER:
case Instruction::INVOKE_VIRTUAL: {
- CheckMethod(
- inst->VRegB_35c(), resolver, MethodReference(&dex_file, it.GetMemberIndex()));
+ CheckMethod(inst->VRegB_35c(), resolver, method.GetReference());
break;
}
@@ -124,8 +109,7 @@ void HiddenApiFinder::CollectAccesses(VeridexResolver* resolver) {
case Instruction::INVOKE_STATIC_RANGE:
case Instruction::INVOKE_SUPER_RANGE:
case Instruction::INVOKE_VIRTUAL_RANGE: {
- CheckMethod(
- inst->VRegB_3rc(), resolver, MethodReference(&dex_file, it.GetMemberIndex()));
+ CheckMethod(inst->VRegB_3rc(), resolver, method.GetReference());
break;
}
@@ -136,8 +120,7 @@ void HiddenApiFinder::CollectAccesses(VeridexResolver* resolver) {
case Instruction::IGET_BYTE:
case Instruction::IGET_CHAR:
case Instruction::IGET_SHORT: {
- CheckField(
- inst->VRegC_22c(), resolver, MethodReference(&dex_file, it.GetMemberIndex()));
+ CheckField(inst->VRegC_22c(), resolver, method.GetReference());
break;
}
@@ -148,8 +131,7 @@ void HiddenApiFinder::CollectAccesses(VeridexResolver* resolver) {
case Instruction::IPUT_BYTE:
case Instruction::IPUT_CHAR:
case Instruction::IPUT_SHORT: {
- CheckField(
- inst->VRegC_22c(), resolver, MethodReference(&dex_file, it.GetMemberIndex()));
+ CheckField(inst->VRegC_22c(), resolver, method.GetReference());
break;
}
@@ -160,8 +142,7 @@ void HiddenApiFinder::CollectAccesses(VeridexResolver* resolver) {
case Instruction::SGET_BYTE:
case Instruction::SGET_CHAR:
case Instruction::SGET_SHORT: {
- CheckField(
- inst->VRegB_21c(), resolver, MethodReference(&dex_file, it.GetMemberIndex()));
+ CheckField(inst->VRegB_21c(), resolver, method.GetReference());
break;
}
@@ -172,8 +153,7 @@ void HiddenApiFinder::CollectAccesses(VeridexResolver* resolver) {
case Instruction::SPUT_BYTE:
case Instruction::SPUT_CHAR:
case Instruction::SPUT_SHORT: {
- CheckField(
- inst->VRegB_21c(), resolver, MethodReference(&dex_file, it.GetMemberIndex()));
+ CheckField(inst->VRegB_21c(), resolver, method.GetReference());
break;
}
diff --git a/tools/veridex/precise_hidden_api_finder.cc b/tools/veridex/precise_hidden_api_finder.cc
index 89754c2cdf..445221e8f7 100644
--- a/tools/veridex/precise_hidden_api_finder.cc
+++ b/tools/veridex/precise_hidden_api_finder.cc
@@ -16,6 +16,7 @@
#include "precise_hidden_api_finder.h"
+#include "dex/class_accessor-inl.h"
#include "dex/code_item_accessors-inl.h"
#include "dex/dex_instruction-inl.h"
#include "dex/dex_file.h"
@@ -31,25 +32,13 @@ namespace art {
void PreciseHiddenApiFinder::RunInternal(
const std::vector<std::unique_ptr<VeridexResolver>>& resolvers,
- const std::function<void(VeridexResolver*, const ClassDataItemIterator&)>& action) {
+ const std::function<void(VeridexResolver*, const ClassAccessor::Method&)>& action) {
for (const std::unique_ptr<VeridexResolver>& resolver : resolvers) {
- const DexFile& dex_file = resolver->GetDexFile();
- size_t class_def_count = dex_file.NumClassDefs();
- for (size_t class_def_index = 0; class_def_index < class_def_count; ++class_def_index) {
- const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
- const uint8_t* class_data = dex_file.GetClassData(class_def);
- if (class_data == nullptr) {
- // Empty class.
- continue;
- }
- ClassDataItemIterator it(dex_file, class_data);
- it.SkipAllFields();
- for (; it.HasNextMethod(); it.Next()) {
- const DexFile::CodeItem* code_item = it.GetMethodCodeItem();
- if (code_item == nullptr) {
- continue;
+ for (ClassAccessor accessor : resolver->GetDexFile().GetClasses()) {
+ for (const ClassAccessor::Method& method : accessor.GetMethods()) {
+ if (method.GetCodeItem() != nullptr) {
+ action(resolver.get(), method);
}
- action(resolver.get(), it);
}
}
}
@@ -68,10 +57,10 @@ void PreciseHiddenApiFinder::AddUsesAt(const std::vector<ReflectAccessInfo>& acc
void PreciseHiddenApiFinder::Run(const std::vector<std::unique_ptr<VeridexResolver>>& resolvers) {
// Collect reflection uses.
- RunInternal(resolvers, [this] (VeridexResolver* resolver, const ClassDataItemIterator& it) {
- FlowAnalysisCollector collector(resolver, it);
+ RunInternal(resolvers, [this] (VeridexResolver* resolver, const ClassAccessor::Method& method) {
+ FlowAnalysisCollector collector(resolver, method);
collector.Run();
- AddUsesAt(collector.GetUses(), MethodReference(&resolver->GetDexFile(), it.GetMemberIndex()));
+ AddUsesAt(collector.GetUses(), method.GetReference());
});
// For non-final reflection uses, do a limited fixed point calculation over the code to try
@@ -84,11 +73,11 @@ void PreciseHiddenApiFinder::Run(const std::vector<std::unique_ptr<VeridexResolv
std::map<MethodReference, std::vector<ReflectAccessInfo>> current_uses
= std::move(abstract_uses_);
RunInternal(resolvers,
- [this, current_uses] (VeridexResolver* resolver, const ClassDataItemIterator& it) {
- FlowAnalysisSubstitutor substitutor(resolver, it, current_uses);
+ [this, current_uses] (VeridexResolver* resolver,
+ const ClassAccessor::Method& method) {
+ FlowAnalysisSubstitutor substitutor(resolver, method, current_uses);
substitutor.Run();
- AddUsesAt(substitutor.GetUses(),
- MethodReference(&resolver->GetDexFile(), it.GetMemberIndex()));
+ AddUsesAt(substitutor.GetUses(), method.GetReference());
});
}
}
diff --git a/tools/veridex/precise_hidden_api_finder.h b/tools/veridex/precise_hidden_api_finder.h
index 1c4d0ae84e..8c5126cf1b 100644
--- a/tools/veridex/precise_hidden_api_finder.h
+++ b/tools/veridex/precise_hidden_api_finder.h
@@ -48,7 +48,7 @@ class PreciseHiddenApiFinder {
// Run over all methods of all dex files, and call `action` on each.
void RunInternal(
const std::vector<std::unique_ptr<VeridexResolver>>& resolvers,
- const std::function<void(VeridexResolver*, const ClassDataItemIterator&)>& action);
+ const std::function<void(VeridexResolver*, const ClassAccessor::Method&)>& action);
// Add uses found in method `ref`.
void AddUsesAt(const std::vector<ReflectAccessInfo>& accesses, MethodReference ref);
diff --git a/tools/veridex/resolver.cc b/tools/veridex/resolver.cc
index 9113039b04..56729fffd0 100644
--- a/tools/veridex/resolver.cc
+++ b/tools/veridex/resolver.cc
@@ -16,6 +16,7 @@
#include "resolver.h"
+#include "dex/class_accessor-inl.h"
#include "dex/dex_file-inl.h"
#include "dex/primitive.h"
#include "hidden_api.h"
@@ -24,34 +25,22 @@
namespace art {
void VeridexResolver::Run() {
- size_t class_def_count = dex_file_.NumClassDefs();
- for (size_t class_def_index = 0; class_def_index < class_def_count; ++class_def_index) {
- const DexFile::ClassDef& class_def = dex_file_.GetClassDef(class_def_index);
- std::string name(dex_file_.StringByTypeIdx(class_def.class_idx_));
+ for (ClassAccessor accessor : dex_file_.GetClasses()) {
+ std::string name(accessor.GetDescriptor());
auto existing = type_map_.find(name);
+ const uint32_t type_idx = accessor.GetClassIdx().index_;
if (existing != type_map_.end()) {
// Class already exists, cache it and move on.
- type_infos_[class_def.class_idx_.index_] = *existing->second;
+ type_infos_[type_idx] = *existing->second;
continue;
}
- type_infos_[class_def.class_idx_.index_] = VeriClass(Primitive::Type::kPrimNot, 0, &class_def);
- type_map_[name] = &(type_infos_[class_def.class_idx_.index_]);
-
- const uint8_t* class_data = dex_file_.GetClassData(class_def);
- if (class_data == nullptr) {
- // Empty class.
- continue;
- }
-
- ClassDataItemIterator it(dex_file_, class_data);
- for (; it.HasNextStaticField(); it.Next()) {
- field_infos_[it.GetMemberIndex()] = it.DataPointer();
+ type_infos_[type_idx] = VeriClass(Primitive::Type::kPrimNot, 0, &accessor.GetClassDef());
+ type_map_[name] = &type_infos_[type_idx];
+ for (const ClassAccessor::Field& field : accessor.GetFields()) {
+ field_infos_[field.GetIndex()] = field.GetDataPointer();
}
- for (; it.HasNextInstanceField(); it.Next()) {
- field_infos_[it.GetMemberIndex()] = it.DataPointer();
- }
- for (; it.HasNextMethod(); it.Next()) {
- method_infos_[it.GetMemberIndex()] = it.DataPointer();
+ for (const ClassAccessor::Method& method : accessor.GetMethods()) {
+ method_infos_[method.GetIndex()] = method.GetDataPointer();
}
}
}
@@ -148,18 +137,14 @@ VeriMethod VeridexResolver::LookupMethodIn(const VeriClass& kls,
// Look at methods declared in `kls`.
const DexFile& other_dex_file = resolver->dex_file_;
- const uint8_t* class_data = other_dex_file.GetClassData(*kls.GetClassDef());
- if (class_data != nullptr) {
- ClassDataItemIterator it(other_dex_file, class_data);
- it.SkipAllFields();
- for (; it.HasNextMethod(); it.Next()) {
- const DexFile::MethodId& other_method_id = other_dex_file.GetMethodId(it.GetMemberIndex());
- if (HasSameNameAndSignature(other_dex_file,
- other_method_id,
- method_name,
- method_signature)) {
- return it.DataPointer();
- }
+ ClassAccessor other_dex_accessor(other_dex_file, *kls.GetClassDef());
+ for (const ClassAccessor::Method& method : other_dex_accessor.GetMethods()) {
+ const DexFile::MethodId& other_method_id = other_dex_file.GetMethodId(method.GetIndex());
+ if (HasSameNameAndSignature(other_dex_file,
+ other_method_id,
+ method_name,
+ method_signature)) {
+ return method.GetDataPointer();
}
}
@@ -207,17 +192,14 @@ VeriField VeridexResolver::LookupFieldIn(const VeriClass& kls,
// Look at fields declared in `kls`.
const DexFile& other_dex_file = resolver->dex_file_;
- const uint8_t* class_data = other_dex_file.GetClassData(*kls.GetClassDef());
- if (class_data != nullptr) {
- ClassDataItemIterator it(other_dex_file, class_data);
- for (; it.HasNextStaticField() || it.HasNextInstanceField(); it.Next()) {
- const DexFile::FieldId& other_field_id = other_dex_file.GetFieldId(it.GetMemberIndex());
- if (HasSameNameAndType(other_dex_file,
- other_field_id,
- field_name,
- field_type)) {
- return it.DataPointer();
- }
+ ClassAccessor other_dex_accessor(other_dex_file, *kls.GetClassDef());
+ for (const ClassAccessor::Field& field : other_dex_accessor.GetFields()) {
+ const DexFile::FieldId& other_field_id = other_dex_file.GetFieldId(field.GetIndex());
+ if (HasSameNameAndType(other_dex_file,
+ other_field_id,
+ field_name,
+ field_type)) {
+ return field.GetDataPointer();
}
}
@@ -260,18 +242,13 @@ VeriMethod VeridexResolver::LookupDeclaredMethodIn(const VeriClass& kls,
}
VeridexResolver* resolver = GetResolverOf(kls);
const DexFile& other_dex_file = resolver->dex_file_;
- const uint8_t* class_data = other_dex_file.GetClassData(*kls.GetClassDef());
- if (class_data != nullptr) {
- ClassDataItemIterator it(other_dex_file, class_data);
- it.SkipAllFields();
- for (; it.HasNextMethod(); it.Next()) {
- const DexFile::MethodId& other_method_id = other_dex_file.GetMethodId(it.GetMemberIndex());
- if (HasSameNameAndSignature(other_dex_file,
- other_method_id,
- method_name,
- type)) {
- return it.DataPointer();
- }
+ ClassAccessor other_dex_accessor(other_dex_file, *kls.GetClassDef());
+ for (const ClassAccessor::Method& method : other_dex_accessor.GetMethods()) {
+ if (HasSameNameAndSignature(other_dex_file,
+ other_dex_file.GetMethodId(method.GetIndex()),
+ method_name,
+ type)) {
+ return method.GetDataPointer();
}
}
return nullptr;