summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--build/Android.gtest.mk2
-rw-r--r--build/art.go2
-rw-r--r--cmdline/cmdline_types.h18
-rw-r--r--cmdline/detail/cmdline_parse_argument_detail.h6
-rw-r--r--cmdline/token_range.h4
-rw-r--r--compiler/Android.bp6
-rw-r--r--compiler/dex/verified_method.cc29
-rw-r--r--compiler/driver/compiler_driver.cc2
-rw-r--r--compiler/image_writer.cc7
-rw-r--r--compiler/linker/mips/relative_patcher_mips32r6_test.cc27
-rw-r--r--compiler/linker/mips/relative_patcher_mips_test.cc26
-rw-r--r--compiler/linker/mips64/relative_patcher_mips64.cc111
-rw-r--r--compiler/linker/mips64/relative_patcher_mips64.h50
-rw-r--r--compiler/linker/mips64/relative_patcher_mips64_test.cc127
-rw-r--r--compiler/linker/relative_patcher.cc7
-rw-r--r--compiler/optimizing/code_generator.cc8
-rw-r--r--compiler/optimizing/code_generator.h13
-rw-r--r--compiler/optimizing/code_generator_arm.cc68
-rw-r--r--compiler/optimizing/code_generator_arm.h14
-rw-r--r--compiler/optimizing/code_generator_arm64.cc65
-rw-r--r--compiler/optimizing/code_generator_arm64.h17
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.cc129
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.h2
-rw-r--r--compiler/optimizing/code_generator_mips.cc37
-rw-r--r--compiler/optimizing/code_generator_mips64.cc228
-rw-r--r--compiler/optimizing/code_generator_mips64.h50
-rw-r--r--compiler/optimizing/code_generator_utils.h8
-rw-r--r--compiler/optimizing/code_generator_x86.cc58
-rw-r--r--compiler/optimizing/code_generator_x86.h8
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc72
-rw-r--r--compiler/optimizing/code_generator_x86_64.h9
-rw-r--r--compiler/optimizing/inliner.cc2
-rw-r--r--compiler/optimizing/instruction_simplifier.cc12
-rw-r--r--compiler/optimizing/intrinsics_arm_vixl.cc51
-rw-r--r--compiler/optimizing/nodes.cc33
-rw-r--r--compiler/optimizing/nodes.h20
-rw-r--r--compiler/optimizing/optimizing_cfi_test.cc27
-rw-r--r--compiler/optimizing/optimizing_cfi_test_expected.inc12
-rw-r--r--compiler/optimizing/optimizing_compiler.cc149
-rw-r--r--compiler/optimizing/sharpening.cc26
-rw-r--r--compiler/optimizing/sharpening.h7
-rw-r--r--compiler/utils/arm/assembler_arm_vixl.cc9
-rw-r--r--compiler/utils/arm/jni_macro_assembler_arm_vixl.cc27
-rw-r--r--compiler/utils/assembler_test_base.h12
-rw-r--r--compiler/utils/mips64/assembler_mips64.cc301
-rw-r--r--compiler/utils/mips64/assembler_mips64.h126
-rw-r--r--compiler/utils/mips64/assembler_mips64_test.cc330
-rw-r--r--dex2oat/dex2oat.cc38
-rw-r--r--disassembler/disassembler_mips.cc37
-rw-r--r--oatdump/oatdump.cc16
-rw-r--r--oatdump/oatdump_test.cc4
-rw-r--r--patchoat/patchoat.cc12
-rw-r--r--profman/profman.cc4
-rw-r--r--runtime/Android.bp1
-rw-r--r--runtime/arch/arm/instruction_set_features_arm.cc4
-rw-r--r--runtime/arch/arm64/instruction_set_features_arm64.cc4
-rw-r--r--runtime/arch/instruction_set_features.cc4
-rw-r--r--runtime/arch/mips/instruction_set_features_mips.cc4
-rw-r--r--runtime/arch/mips64/instruction_set_features_mips64.cc4
-rw-r--r--runtime/arch/x86/instruction_set_features_x86.cc4
-rw-r--r--runtime/cha.cc7
-rw-r--r--runtime/class_linker-inl.h26
-rw-r--r--runtime/class_linker_test.cc6
-rw-r--r--runtime/class_table-inl.h13
-rw-r--r--runtime/class_table.cc24
-rw-r--r--runtime/class_table.h7
-rw-r--r--runtime/class_table_test.cc163
-rw-r--r--runtime/elf_file.cc4
-rw-r--r--runtime/gc/collector/concurrent_copying.cc5
-rw-r--r--runtime/gc/reference_processor.cc39
-rw-r--r--runtime/gc/reference_processor.h7
-rw-r--r--runtime/gc/space/image_space.cc6
-rw-r--r--runtime/interpreter/interpreter_switch_impl.cc95
-rw-r--r--runtime/jit/jit_code_cache.cc82
-rw-r--r--runtime/jit/profile_saver.cc4
-rw-r--r--runtime/mirror/dex_cache-inl.h2
-rw-r--r--runtime/mirror/string-inl.h5
-rw-r--r--runtime/native/java_lang_ref_Reference.cc7
-rw-r--r--runtime/oat_file_assistant.cc7
-rw-r--r--runtime/oat_file_assistant_test.cc5
-rw-r--r--runtime/openjdkjvmti/OpenjdkJvmTi.cc2
-rw-r--r--runtime/openjdkjvmti/ti_method.cc59
-rw-r--r--runtime/openjdkjvmti/ti_method.h5
-rw-r--r--runtime/openjdkjvmti/ti_stack.cc10
-rw-r--r--runtime/runtime.cc8
-rw-r--r--runtime/thread.cc82
-rw-r--r--runtime/utils.cc153
-rw-r--r--runtime/utils.h66
-rw-r--r--runtime/utils_test.cc52
-rw-r--r--runtime/verifier/method_verifier_test.cc4
-rw-r--r--runtime/verifier/verifier_deps.cc11
-rw-r--r--test/021-string2/expected.txt4
-rw-r--r--test/021-string2/src/Main.java25
-rw-r--r--test/153-reference-stress/expected.txt1
-rw-r--r--test/153-reference-stress/info.txt1
-rw-r--r--test/153-reference-stress/src/Main.java73
-rw-r--r--test/616-cha-regression-proxy-method/expected.txt1
-rw-r--r--test/616-cha-regression-proxy-method/info.txt1
-rw-r--r--test/616-cha-regression-proxy-method/src/Main.java131
-rw-r--r--test/616-cha/src/Main.java9
-rw-r--r--test/630-safecast-array/expected.txt0
-rw-r--r--test/630-safecast-array/info.txt3
-rw-r--r--test/630-safecast-array/smali/Main.smali33
-rw-r--r--test/911-get-stack-trace/expected.txt358
-rw-r--r--test/911-get-stack-trace/src/Main.java15
-rw-r--r--test/911-get-stack-trace/stack_trace.cc75
-rw-r--r--test/Android.run-test.mk2
107 files changed, 2951 insertions, 1237 deletions
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 1691dbb3bc..d59d8f690c 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -86,6 +86,7 @@ ART_GTEST_dex2oat_environment_tests_DEX_DEPS := Main MainStripped MultiDex Multi
ART_GTEST_atomic_method_ref_map_test_DEX_DEPS := Interfaces
ART_GTEST_class_linker_test_DEX_DEPS := Interfaces MethodTypes MultiDex MyClass Nested Statics StaticsFromCode
+ART_GTEST_class_table_test_DEX_DEPS := XandY
ART_GTEST_compiler_driver_test_DEX_DEPS := AbstractMethod StaticLeafMethods ProfileTestMultiDex
ART_GTEST_dex_cache_test_DEX_DEPS := Main Packages MethodTypes
ART_GTEST_dex_file_test_DEX_DEPS := GetMethodSignature Main Nested
@@ -598,6 +599,7 @@ ART_TEST_TARGET_VALGRIND_GTEST$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
ART_TEST_TARGET_VALGRIND_GTEST_RULES :=
ART_GTEST_TARGET_ANDROID_ROOT :=
ART_GTEST_class_linker_test_DEX_DEPS :=
+ART_GTEST_class_table_test_DEX_DEPS :=
ART_GTEST_compiler_driver_test_DEX_DEPS :=
ART_GTEST_dex_file_test_DEX_DEPS :=
ART_GTEST_exception_test_DEX_DEPS :=
diff --git a/build/art.go b/build/art.go
index ccaa11dfe7..0af1767fdd 100644
--- a/build/art.go
+++ b/build/art.go
@@ -259,7 +259,7 @@ func artDefaultsFactory() (blueprint.Module, []interface{}) {
}
func artLibrary() (blueprint.Module, []interface{}) {
- library, _ := cc.NewLibrary(android.HostAndDeviceSupported, true, true)
+ library, _ := cc.NewLibrary(android.HostAndDeviceSupported)
module, props := library.Init()
props = installCodegenCustomizer(module, props, true)
diff --git a/cmdline/cmdline_types.h b/cmdline/cmdline_types.h
index 156ca9ef3e..e41d9bde59 100644
--- a/cmdline/cmdline_types.h
+++ b/cmdline/cmdline_types.h
@@ -22,6 +22,8 @@
#include "detail/cmdline_debug_detail.h"
#include "cmdline_type_parser.h"
+#include "android-base/strings.h"
+
// Includes for the types that are being specialized
#include <string>
#include "base/logging.h"
@@ -447,7 +449,7 @@ struct ParseStringList {
}
std::string Join() const {
- return art::Join(list_, Separator);
+ return android::base::Join(list_, Separator);
}
static ParseStringList<Separator> Split(const std::string& str) {
@@ -709,43 +711,43 @@ struct CmdlineType<ProfileSaverOptions> : CmdlineTypeParser<ProfileSaverOptions>
// The rest of these options are always the wildcard from '-Xps-*'
std::string suffix = RemovePrefix(option);
- if (StartsWith(option, "min-save-period-ms:")) {
+ if (android::base::StartsWith(option, "min-save-period-ms:")) {
CmdlineType<unsigned int> type_parser;
return ParseInto(existing,
&ProfileSaverOptions::min_save_period_ms_,
type_parser.Parse(suffix));
}
- if (StartsWith(option, "save-resolved-classes-delay-ms:")) {
+ if (android::base::StartsWith(option, "save-resolved-classes-delay-ms:")) {
CmdlineType<unsigned int> type_parser;
return ParseInto(existing,
&ProfileSaverOptions::save_resolved_classes_delay_ms_,
type_parser.Parse(suffix));
}
- if (StartsWith(option, "startup-method-samples:")) {
+ if (android::base::StartsWith(option, "startup-method-samples:")) {
CmdlineType<unsigned int> type_parser;
return ParseInto(existing,
&ProfileSaverOptions::startup_method_samples_,
type_parser.Parse(suffix));
}
- if (StartsWith(option, "min-methods-to-save:")) {
+ if (android::base::StartsWith(option, "min-methods-to-save:")) {
CmdlineType<unsigned int> type_parser;
return ParseInto(existing,
&ProfileSaverOptions::min_methods_to_save_,
type_parser.Parse(suffix));
}
- if (StartsWith(option, "min-classes-to-save:")) {
+ if (android::base::StartsWith(option, "min-classes-to-save:")) {
CmdlineType<unsigned int> type_parser;
return ParseInto(existing,
&ProfileSaverOptions::min_classes_to_save_,
type_parser.Parse(suffix));
}
- if (StartsWith(option, "min-notification-before-wake:")) {
+ if (android::base::StartsWith(option, "min-notification-before-wake:")) {
CmdlineType<unsigned int> type_parser;
return ParseInto(existing,
&ProfileSaverOptions::min_notification_before_wake_,
type_parser.Parse(suffix));
}
- if (StartsWith(option, "max-notification-before-wake:")) {
+ if (android::base::StartsWith(option, "max-notification-before-wake:")) {
CmdlineType<unsigned int> type_parser;
return ParseInto(existing,
&ProfileSaverOptions::max_notification_before_wake_,
diff --git a/cmdline/detail/cmdline_parse_argument_detail.h b/cmdline/detail/cmdline_parse_argument_detail.h
index 14eac30aa1..da03c2198f 100644
--- a/cmdline/detail/cmdline_parse_argument_detail.h
+++ b/cmdline/detail/cmdline_parse_argument_detail.h
@@ -25,6 +25,8 @@
#include <numeric>
#include <memory>
+#include "android-base/strings.h"
+
#include "cmdline_parse_result.h"
#include "cmdline_types.h"
#include "token_range.h"
@@ -399,7 +401,7 @@ namespace art {
allowed_values.push_back(name);
}
- std::string allowed_values_flat = Join(allowed_values, ',');
+ std::string allowed_values_flat = android::base::Join(allowed_values, ',');
return CmdlineResult(CmdlineResult::kFailure,
"Argument value '" + argument + "' does not match any of known valid"
"values: {" + allowed_values_flat + "}");
@@ -426,7 +428,7 @@ namespace art {
allowed_values.push_back(arg_name);
}
- std::string allowed_values_flat = Join(allowed_values, ',');
+ std::string allowed_values_flat = android::base::Join(allowed_values, ',');
return CmdlineResult(CmdlineResult::kFailure,
"Argument value '" + argument + "' does not match any of known valid"
"values: {" + allowed_values_flat + "}");
diff --git a/cmdline/token_range.h b/cmdline/token_range.h
index 335806795a..c22d6c8959 100644
--- a/cmdline/token_range.h
+++ b/cmdline/token_range.h
@@ -23,6 +23,8 @@
#include <algorithm>
#include <memory>
+#include "android-base/strings.h"
+
namespace art {
// A range of tokens to make token matching algorithms easier.
//
@@ -374,7 +376,7 @@ struct TokenRange {
// e.g. ["hello", "world"].join('$') == "hello$world"
std::string Join(char separator) const {
TokenList tmp(begin(), end());
- return art::Join(tmp, separator);
+ return android::base::Join(tmp, separator);
// TODO: Join should probably take an offset or iterators
}
diff --git a/compiler/Android.bp b/compiler/Android.bp
index db55ea0ef7..2eb6fba674 100644
--- a/compiler/Android.bp
+++ b/compiler/Android.bp
@@ -145,6 +145,7 @@ art_cc_defaults {
mips64: {
srcs: [
"jni/quick/mips64/calling_convention_mips64.cc",
+ "linker/mips64/relative_patcher_mips64.cc",
"optimizing/code_generator_mips64.cc",
"optimizing/intrinsics_mips64.cc",
"utils/mips64/assembler_mips64.cc",
@@ -381,6 +382,11 @@ art_cc_test {
"linker/mips/relative_patcher_mips32r6_test.cc",
],
},
+ mips64: {
+ srcs: [
+ "linker/mips64/relative_patcher_mips64_test.cc",
+ ],
+ },
x86: {
srcs: [
"linker/x86/relative_patcher_x86_test.cc",
diff --git a/compiler/dex/verified_method.cc b/compiler/dex/verified_method.cc
index 1bdace9284..a5979cc26e 100644
--- a/compiler/dex/verified_method.cc
+++ b/compiler/dex/verified_method.cc
@@ -218,35 +218,18 @@ void VerifiedMethod::GenerateSafeCastSet(verifier::MethodVerifier* method_verifi
for (; inst < end; inst = inst->Next()) {
Instruction::Code code = inst->Opcode();
- if ((code == Instruction::CHECK_CAST) || (code == Instruction::APUT_OBJECT)) {
+ if (code == Instruction::CHECK_CAST) {
uint32_t dex_pc = inst->GetDexPc(code_item->insns_);
if (!method_verifier->GetInstructionFlags(dex_pc).IsVisited()) {
// Do not attempt to quicken this instruction, it's unreachable anyway.
continue;
}
const verifier::RegisterLine* line = method_verifier->GetRegLine(dex_pc);
- bool is_safe_cast = false;
- if (code == Instruction::CHECK_CAST) {
- const verifier::RegType& reg_type(line->GetRegisterType(method_verifier,
- inst->VRegA_21c()));
- const verifier::RegType& cast_type =
- method_verifier->ResolveCheckedClass(dex::TypeIndex(inst->VRegB_21c()));
- is_safe_cast = cast_type.IsStrictlyAssignableFrom(reg_type, method_verifier);
- } else {
- const verifier::RegType& array_type(line->GetRegisterType(method_verifier,
- inst->VRegB_23x()));
- // We only know its safe to assign to an array if the array type is precise. For example,
- // an Object[] can have any type of object stored in it, but it may also be assigned a
- // String[] in which case the stores need to be of Strings.
- if (array_type.IsPreciseReference()) {
- const verifier::RegType& value_type(line->GetRegisterType(method_verifier,
- inst->VRegA_23x()));
- const verifier::RegType& component_type = method_verifier->GetRegTypeCache()
- ->GetComponentType(array_type, method_verifier->GetClassLoader());
- is_safe_cast = component_type.IsStrictlyAssignableFrom(value_type, method_verifier);
- }
- }
- if (is_safe_cast) {
+ const verifier::RegType& reg_type(line->GetRegisterType(method_verifier,
+ inst->VRegA_21c()));
+ const verifier::RegType& cast_type =
+ method_verifier->ResolveCheckedClass(dex::TypeIndex(inst->VRegB_21c()));
+ if (cast_type.IsStrictlyAssignableFrom(reg_type, method_verifier)) {
// Verify ordering for push_back() to the sorted vector.
DCHECK(safe_cast_set_.empty() || safe_cast_set_.back() < dex_pc);
safe_cast_set_.push_back(dex_pc);
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index e62bdb5530..1b1de78d90 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -299,7 +299,7 @@ CompilerDriver::CompilerDriver(
dump_passes_(dump_passes),
timings_logger_(timer),
compiler_context_(nullptr),
- support_boot_image_fixup_(instruction_set != kMips64),
+ support_boot_image_fixup_(true),
dex_files_for_oat_file_(nullptr),
compiled_method_storage_(swap_fd),
profile_compilation_info_(profile_compilation_info),
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index a47e711c28..b22ca470b5 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -803,6 +803,13 @@ bool ImageWriter::PruneAppImageClassInternal(
result = result || PruneAppImageClassInternal(klass->GetSuperClass(),
&my_early_exit,
visited);
+ // Remove the class if the dex file is not in the set of dex files. This happens for classes that
+ // are from uses library if there is no profile. b/30688277
+ mirror::DexCache* dex_cache = klass->GetDexCache();
+ if (dex_cache != nullptr) {
+ result = result ||
+ dex_file_oat_index_map_.find(dex_cache->GetDexFile()) == dex_file_oat_index_map_.end();
+ }
// Erase the element we stored earlier since we are exiting the function.
auto it = visited->find(klass);
DCHECK(it != visited->end());
diff --git a/compiler/linker/mips/relative_patcher_mips32r6_test.cc b/compiler/linker/mips/relative_patcher_mips32r6_test.cc
index a16aaca545..4f9a3a050a 100644
--- a/compiler/linker/mips/relative_patcher_mips32r6_test.cc
+++ b/compiler/linker/mips/relative_patcher_mips32r6_test.cc
@@ -29,10 +29,10 @@ class Mips32r6RelativePatcherTest : public RelativePatcherTest {
Mips32r6RelativePatcherTest() : RelativePatcherTest(kMips, "mips32r6") {}
protected:
- static const uint8_t UnpatchedPcRelativeRawCode[];
- static const uint32_t LiteralOffset;
- static const uint32_t AnchorOffset;
- static const ArrayRef<const uint8_t> UnpatchedPcRelativeCode;
+ static const uint8_t kUnpatchedPcRelativeRawCode[];
+ static const uint32_t kLiteralOffset;
+ static const uint32_t kAnchorOffset;
+ static const ArrayRef<const uint8_t> kUnpatchedPcRelativeCode;
uint32_t GetMethodOffset(uint32_t method_idx) {
auto result = method_offset_map_.FindMethodOffset(MethodRef(method_idx));
@@ -45,24 +45,25 @@ class Mips32r6RelativePatcherTest : public RelativePatcherTest {
void TestStringReference(uint32_t string_offset);
};
-const uint8_t Mips32r6RelativePatcherTest::UnpatchedPcRelativeRawCode[] = {
+const uint8_t Mips32r6RelativePatcherTest::kUnpatchedPcRelativeRawCode[] = {
0x34, 0x12, 0x5E, 0xEE, // auipc s2, high(diff); placeholder = 0x1234
0x78, 0x56, 0x52, 0x26, // addiu s2, s2, low(diff); placeholder = 0x5678
};
-const uint32_t Mips32r6RelativePatcherTest::LiteralOffset = 0; // At auipc (where patching starts).
-const uint32_t Mips32r6RelativePatcherTest::AnchorOffset = 0; // At auipc (where PC+0 points).
-const ArrayRef<const uint8_t> Mips32r6RelativePatcherTest::UnpatchedPcRelativeCode(
- UnpatchedPcRelativeRawCode);
+const uint32_t Mips32r6RelativePatcherTest::kLiteralOffset = 0; // At auipc (where
+ // patching starts).
+const uint32_t Mips32r6RelativePatcherTest::kAnchorOffset = 0; // At auipc (where PC+0 points).
+const ArrayRef<const uint8_t> Mips32r6RelativePatcherTest::kUnpatchedPcRelativeCode(
+ kUnpatchedPcRelativeRawCode);
void Mips32r6RelativePatcherTest::CheckPcRelativePatch(const ArrayRef<const LinkerPatch>& patches,
uint32_t target_offset) {
- AddCompiledMethod(MethodRef(1u), UnpatchedPcRelativeCode, ArrayRef<const LinkerPatch>(patches));
+ AddCompiledMethod(MethodRef(1u), kUnpatchedPcRelativeCode, ArrayRef<const LinkerPatch>(patches));
Link();
auto result = method_offset_map_.FindMethodOffset(MethodRef(1u));
ASSERT_TRUE(result.first);
- uint32_t diff = target_offset - (result.second + AnchorOffset);
+ uint32_t diff = target_offset - (result.second + kAnchorOffset);
if (patches[0].GetType() == LinkerPatch::Type::kDexCacheArray) {
diff += kDexCacheArrayLwOffset;
}
@@ -79,7 +80,7 @@ void Mips32r6RelativePatcherTest::TestDexCacheReference(uint32_t dex_cache_array
uint32_t element_offset) {
dex_cache_arrays_begin_ = dex_cache_arrays_begin;
LinkerPatch patches[] = {
- LinkerPatch::DexCacheArrayPatch(LiteralOffset, nullptr, AnchorOffset, element_offset)
+ LinkerPatch::DexCacheArrayPatch(kLiteralOffset, nullptr, kAnchorOffset, element_offset)
};
CheckPcRelativePatch(ArrayRef<const LinkerPatch>(patches),
dex_cache_arrays_begin_ + element_offset);
@@ -89,7 +90,7 @@ void Mips32r6RelativePatcherTest::TestStringReference(uint32_t string_offset) {
constexpr uint32_t kStringIndex = 1u;
string_index_to_offset_map_.Put(kStringIndex, string_offset);
LinkerPatch patches[] = {
- LinkerPatch::RelativeStringPatch(LiteralOffset, nullptr, AnchorOffset, kStringIndex)
+ LinkerPatch::RelativeStringPatch(kLiteralOffset, nullptr, kAnchorOffset, kStringIndex)
};
CheckPcRelativePatch(ArrayRef<const LinkerPatch>(patches), string_offset);
}
diff --git a/compiler/linker/mips/relative_patcher_mips_test.cc b/compiler/linker/mips/relative_patcher_mips_test.cc
index 335ce2e476..faeb92a315 100644
--- a/compiler/linker/mips/relative_patcher_mips_test.cc
+++ b/compiler/linker/mips/relative_patcher_mips_test.cc
@@ -29,10 +29,10 @@ class MipsRelativePatcherTest : public RelativePatcherTest {
MipsRelativePatcherTest() : RelativePatcherTest(kMips, "mips32r2") {}
protected:
- static const uint8_t UnpatchedPcRelativeRawCode[];
- static const uint32_t LiteralOffset;
- static const uint32_t AnchorOffset;
- static const ArrayRef<const uint8_t> UnpatchedPcRelativeCode;
+ static const uint8_t kUnpatchedPcRelativeRawCode[];
+ static const uint32_t kLiteralOffset;
+ static const uint32_t kAnchorOffset;
+ static const ArrayRef<const uint8_t> kUnpatchedPcRelativeCode;
uint32_t GetMethodOffset(uint32_t method_idx) {
auto result = method_offset_map_.FindMethodOffset(MethodRef(method_idx));
@@ -45,26 +45,26 @@ class MipsRelativePatcherTest : public RelativePatcherTest {
void TestStringReference(uint32_t string_offset);
};
-const uint8_t MipsRelativePatcherTest::UnpatchedPcRelativeRawCode[] = {
+const uint8_t MipsRelativePatcherTest::kUnpatchedPcRelativeRawCode[] = {
0x00, 0x00, 0x10, 0x04, // nal
0x34, 0x12, 0x12, 0x3C, // lui s2, high(diff); placeholder = 0x1234
0x78, 0x56, 0x52, 0x36, // ori s2, s2, low(diff); placeholder = 0x5678
0x21, 0x90, 0x5F, 0x02, // addu s2, s2, ra
};
-const uint32_t MipsRelativePatcherTest::LiteralOffset = 4; // At lui (where patching starts).
-const uint32_t MipsRelativePatcherTest::AnchorOffset = 8; // At ori (where PC+0 points).
-const ArrayRef<const uint8_t> MipsRelativePatcherTest::UnpatchedPcRelativeCode(
- UnpatchedPcRelativeRawCode);
+const uint32_t MipsRelativePatcherTest::kLiteralOffset = 4; // At lui (where patching starts).
+const uint32_t MipsRelativePatcherTest::kAnchorOffset = 8; // At ori (where PC+0 points).
+const ArrayRef<const uint8_t> MipsRelativePatcherTest::kUnpatchedPcRelativeCode(
+ kUnpatchedPcRelativeRawCode);
void MipsRelativePatcherTest::CheckPcRelativePatch(const ArrayRef<const LinkerPatch>& patches,
uint32_t target_offset) {
- AddCompiledMethod(MethodRef(1u), UnpatchedPcRelativeCode, ArrayRef<const LinkerPatch>(patches));
+ AddCompiledMethod(MethodRef(1u), kUnpatchedPcRelativeCode, ArrayRef<const LinkerPatch>(patches));
Link();
auto result = method_offset_map_.FindMethodOffset(MethodRef(1u));
ASSERT_TRUE(result.first);
- uint32_t diff = target_offset - (result.second + AnchorOffset);
+ uint32_t diff = target_offset - (result.second + kAnchorOffset);
if (patches[0].GetType() == LinkerPatch::Type::kDexCacheArray) {
diff += kDexCacheArrayLwOffset;
}
@@ -82,7 +82,7 @@ void MipsRelativePatcherTest::TestDexCacheReference(uint32_t dex_cache_arrays_be
uint32_t element_offset) {
dex_cache_arrays_begin_ = dex_cache_arrays_begin;
LinkerPatch patches[] = {
- LinkerPatch::DexCacheArrayPatch(LiteralOffset, nullptr, AnchorOffset, element_offset)
+ LinkerPatch::DexCacheArrayPatch(kLiteralOffset, nullptr, kAnchorOffset, element_offset)
};
CheckPcRelativePatch(ArrayRef<const LinkerPatch>(patches),
dex_cache_arrays_begin_ + element_offset);
@@ -92,7 +92,7 @@ void MipsRelativePatcherTest::TestStringReference(uint32_t string_offset) {
constexpr uint32_t kStringIndex = 1u;
string_index_to_offset_map_.Put(kStringIndex, string_offset);
LinkerPatch patches[] = {
- LinkerPatch::RelativeStringPatch(LiteralOffset, nullptr, AnchorOffset, kStringIndex)
+ LinkerPatch::RelativeStringPatch(kLiteralOffset, nullptr, kAnchorOffset, kStringIndex)
};
CheckPcRelativePatch(ArrayRef<const LinkerPatch>(patches), string_offset);
}
diff --git a/compiler/linker/mips64/relative_patcher_mips64.cc b/compiler/linker/mips64/relative_patcher_mips64.cc
new file mode 100644
index 0000000000..c47971635b
--- /dev/null
+++ b/compiler/linker/mips64/relative_patcher_mips64.cc
@@ -0,0 +1,111 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "linker/mips64/relative_patcher_mips64.h"
+
+#include "compiled_method.h"
+
+namespace art {
+namespace linker {
+
+uint32_t Mips64RelativePatcher::ReserveSpace(
+ uint32_t offset,
+ const CompiledMethod* compiled_method ATTRIBUTE_UNUSED,
+ MethodReference method_ref ATTRIBUTE_UNUSED) {
+ return offset; // No space reserved; no limit on relative call distance.
+}
+
+uint32_t Mips64RelativePatcher::ReserveSpaceEnd(uint32_t offset) {
+ return offset; // No space reserved; no limit on relative call distance.
+}
+
+uint32_t Mips64RelativePatcher::WriteThunks(OutputStream* out ATTRIBUTE_UNUSED, uint32_t offset) {
+ return offset; // No thunks added; no limit on relative call distance.
+}
+
+void Mips64RelativePatcher::PatchCall(std::vector<uint8_t>* code,
+ uint32_t literal_offset,
+ uint32_t patch_offset,
+ uint32_t target_offset) {
+ // Basic sanity checks.
+ DCHECK_GE(code->size(), 8u);
+ DCHECK_LE(literal_offset, code->size() - 8u);
+ // auipc reg, offset_high
+ DCHECK_EQ((*code)[literal_offset + 0], 0x34);
+ DCHECK_EQ((*code)[literal_offset + 1], 0x12);
+ DCHECK_EQ(((*code)[literal_offset + 2] & 0x1F), 0x1E);
+ DCHECK_EQ(((*code)[literal_offset + 3] & 0xFC), 0xEC);
+ // jialc reg, offset_low
+ DCHECK_EQ((*code)[literal_offset + 4], 0x78);
+ DCHECK_EQ((*code)[literal_offset + 5], 0x56);
+ DCHECK_EQ(((*code)[literal_offset + 6] & 0xE0), 0x00);
+ DCHECK_EQ((*code)[literal_offset + 7], 0xF8);
+
+ // Apply patch.
+ uint32_t diff = target_offset - patch_offset;
+ // Note that a combination of auipc with an instruction that adds a sign-extended
+ // 16-bit immediate operand (e.g. jialc) provides a PC-relative range of
+ // PC-0x80000000 to PC+0x7FFF7FFF on MIPS64, that is, short of 2GB on one end
+ // by 32KB.
+ diff += (diff & 0x8000) << 1; // Account for sign extension in jialc.
+
+ // auipc reg, offset_high
+ (*code)[literal_offset + 0] = static_cast<uint8_t>(diff >> 16);
+ (*code)[literal_offset + 1] = static_cast<uint8_t>(diff >> 24);
+ // jialc reg, offset_low
+ (*code)[literal_offset + 4] = static_cast<uint8_t>(diff >> 0);
+ (*code)[literal_offset + 5] = static_cast<uint8_t>(diff >> 8);
+}
+
+void Mips64RelativePatcher::PatchPcRelativeReference(std::vector<uint8_t>* code,
+ const LinkerPatch& patch,
+ uint32_t patch_offset,
+ uint32_t target_offset) {
+ uint32_t anchor_literal_offset = patch.PcInsnOffset();
+ uint32_t literal_offset = patch.LiteralOffset();
+
+ // Basic sanity checks.
+ DCHECK_GE(code->size(), 8u);
+ DCHECK_LE(literal_offset, code->size() - 8u);
+ DCHECK_EQ(literal_offset, anchor_literal_offset);
+ // auipc reg, offset_high
+ DCHECK_EQ((*code)[literal_offset + 0], 0x34);
+ DCHECK_EQ((*code)[literal_offset + 1], 0x12);
+ DCHECK_EQ(((*code)[literal_offset + 2] & 0x1F), 0x1E);
+ DCHECK_EQ(((*code)[literal_offset + 3] & 0xFC), 0xEC);
+ // instr reg(s), offset_low
+ DCHECK_EQ((*code)[literal_offset + 4], 0x78);
+ DCHECK_EQ((*code)[literal_offset + 5], 0x56);
+
+ // Apply patch.
+ uint32_t anchor_offset = patch_offset - literal_offset + anchor_literal_offset;
+ uint32_t diff = target_offset - anchor_offset;
+ // Note that a combination of auipc with an instruction that adds a sign-extended
+ // 16-bit immediate operand (e.g. ld) provides a PC-relative range of
+ // PC-0x80000000 to PC+0x7FFF7FFF on MIPS64, that is, short of 2GB on one end
+ // by 32KB.
+ diff += (diff & 0x8000) << 1; // Account for sign extension in instruction following auipc.
+
+ // auipc reg, offset_high
+ (*code)[literal_offset + 0] = static_cast<uint8_t>(diff >> 16);
+ (*code)[literal_offset + 1] = static_cast<uint8_t>(diff >> 24);
+ // instr reg(s), offset_low
+ (*code)[literal_offset + 4] = static_cast<uint8_t>(diff >> 0);
+ (*code)[literal_offset + 5] = static_cast<uint8_t>(diff >> 8);
+}
+
+} // namespace linker
+} // namespace art
diff --git a/compiler/linker/mips64/relative_patcher_mips64.h b/compiler/linker/mips64/relative_patcher_mips64.h
new file mode 100644
index 0000000000..8ef8cebe2f
--- /dev/null
+++ b/compiler/linker/mips64/relative_patcher_mips64.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_LINKER_MIPS64_RELATIVE_PATCHER_MIPS64_H_
+#define ART_COMPILER_LINKER_MIPS64_RELATIVE_PATCHER_MIPS64_H_
+
+#include "linker/relative_patcher.h"
+
+namespace art {
+namespace linker {
+
+class Mips64RelativePatcher FINAL : public RelativePatcher {
+ public:
+ explicit Mips64RelativePatcher() {}
+
+ uint32_t ReserveSpace(uint32_t offset,
+ const CompiledMethod* compiled_method,
+ MethodReference method_ref) OVERRIDE;
+ uint32_t ReserveSpaceEnd(uint32_t offset) OVERRIDE;
+ uint32_t WriteThunks(OutputStream* out, uint32_t offset) OVERRIDE;
+ void PatchCall(std::vector<uint8_t>* code,
+ uint32_t literal_offset,
+ uint32_t patch_offset,
+ uint32_t target_offset) OVERRIDE;
+ void PatchPcRelativeReference(std::vector<uint8_t>* code,
+ const LinkerPatch& patch,
+ uint32_t patch_offset,
+ uint32_t target_offset) OVERRIDE;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(Mips64RelativePatcher);
+};
+
+} // namespace linker
+} // namespace art
+
+#endif // ART_COMPILER_LINKER_MIPS64_RELATIVE_PATCHER_MIPS64_H_
diff --git a/compiler/linker/mips64/relative_patcher_mips64_test.cc b/compiler/linker/mips64/relative_patcher_mips64_test.cc
new file mode 100644
index 0000000000..9e37f6bbea
--- /dev/null
+++ b/compiler/linker/mips64/relative_patcher_mips64_test.cc
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "linker/relative_patcher_test.h"
+#include "linker/mips64/relative_patcher_mips64.h"
+
+namespace art {
+namespace linker {
+
+class Mips64RelativePatcherTest : public RelativePatcherTest {
+ public:
+ Mips64RelativePatcherTest() : RelativePatcherTest(kMips64, "default") {}
+
+ protected:
+ static const uint8_t kUnpatchedPcRelativeRawCode[];
+ static const uint8_t kUnpatchedPcRelativeCallRawCode[];
+ static const uint32_t kLiteralOffset;
+ static const uint32_t kAnchorOffset;
+ static const ArrayRef<const uint8_t> kUnpatchedPcRelativeCode;
+ static const ArrayRef<const uint8_t> kUnpatchedPcRelativeCallCode;
+
+ uint32_t GetMethodOffset(uint32_t method_idx) {
+ auto result = method_offset_map_.FindMethodOffset(MethodRef(method_idx));
+ CHECK(result.first);
+ return result.second;
+ }
+
+ void CheckPcRelativePatch(const ArrayRef<const LinkerPatch>& patches, uint32_t target_offset);
+ void TestDexCacheReference(uint32_t dex_cache_arrays_begin, uint32_t element_offset);
+ void TestStringReference(uint32_t string_offset);
+};
+
+const uint8_t Mips64RelativePatcherTest::kUnpatchedPcRelativeRawCode[] = {
+ 0x34, 0x12, 0x5E, 0xEE, // auipc s2, high(diff); placeholder = 0x1234
+ 0x78, 0x56, 0x52, 0x26, // addiu s2, s2, low(diff); placeholder = 0x5678
+};
+const uint8_t Mips64RelativePatcherTest::kUnpatchedPcRelativeCallRawCode[] = {
+ 0x34, 0x12, 0x3E, 0xEC, // auipc at, high(diff); placeholder = 0x1234
+ 0x78, 0x56, 0x01, 0xF8, // jialc at, low(diff); placeholder = 0x5678
+};
+const uint32_t Mips64RelativePatcherTest::kLiteralOffset = 0; // At auipc (where patching starts).
+const uint32_t Mips64RelativePatcherTest::kAnchorOffset = 0; // At auipc (where PC+0 points).
+const ArrayRef<const uint8_t> Mips64RelativePatcherTest::kUnpatchedPcRelativeCode(
+ kUnpatchedPcRelativeRawCode);
+const ArrayRef<const uint8_t> Mips64RelativePatcherTest::kUnpatchedPcRelativeCallCode(
+ kUnpatchedPcRelativeCallRawCode);
+
+void Mips64RelativePatcherTest::CheckPcRelativePatch(const ArrayRef<const LinkerPatch>& patches,
+ uint32_t target_offset) {
+ AddCompiledMethod(MethodRef(1u), kUnpatchedPcRelativeCode, ArrayRef<const LinkerPatch>(patches));
+ Link();
+
+ auto result = method_offset_map_.FindMethodOffset(MethodRef(1u));
+ ASSERT_TRUE(result.first);
+
+ uint32_t diff = target_offset - (result.second + kAnchorOffset);
+ diff += (diff & 0x8000) << 1; // Account for sign extension in instruction following auipc.
+
+ const uint8_t expected_code[] = {
+ static_cast<uint8_t>(diff >> 16), static_cast<uint8_t>(diff >> 24), 0x5E, 0xEE,
+ static_cast<uint8_t>(diff), static_cast<uint8_t>(diff >> 8), 0x52, 0x26,
+ };
+ EXPECT_TRUE(CheckLinkedMethod(MethodRef(1u), ArrayRef<const uint8_t>(expected_code)));
+}
+
+void Mips64RelativePatcherTest::TestDexCacheReference(uint32_t dex_cache_arrays_begin,
+ uint32_t element_offset) {
+ dex_cache_arrays_begin_ = dex_cache_arrays_begin;
+ LinkerPatch patches[] = {
+ LinkerPatch::DexCacheArrayPatch(kLiteralOffset, nullptr, kAnchorOffset, element_offset)
+ };
+ CheckPcRelativePatch(ArrayRef<const LinkerPatch>(patches),
+ dex_cache_arrays_begin_ + element_offset);
+}
+
+TEST_F(Mips64RelativePatcherTest, DexCacheReference) {
+ TestDexCacheReference(/* dex_cache_arrays_begin */ 0x12345678, /* element_offset */ 0x1234);
+}
+
+TEST_F(Mips64RelativePatcherTest, CallOther) {
+ LinkerPatch method1_patches[] = {
+ LinkerPatch::RelativeCodePatch(kLiteralOffset, nullptr, 2u),
+ };
+ AddCompiledMethod(MethodRef(1u),
+ kUnpatchedPcRelativeCallCode,
+ ArrayRef<const LinkerPatch>(method1_patches));
+ LinkerPatch method2_patches[] = {
+ LinkerPatch::RelativeCodePatch(kLiteralOffset, nullptr, 1u),
+ };
+ AddCompiledMethod(MethodRef(2u),
+ kUnpatchedPcRelativeCallCode,
+ ArrayRef<const LinkerPatch>(method2_patches));
+ Link();
+
+ uint32_t method1_offset = GetMethodOffset(1u);
+ uint32_t method2_offset = GetMethodOffset(2u);
+ uint32_t diff_after = method2_offset - (method1_offset + kAnchorOffset /* PC adjustment */);
+ diff_after += (diff_after & 0x8000) << 1; // Account for sign extension in jialc.
+ static const uint8_t method1_expected_code[] = {
+ static_cast<uint8_t>(diff_after >> 16), static_cast<uint8_t>(diff_after >> 24), 0x3E, 0xEC,
+ static_cast<uint8_t>(diff_after), static_cast<uint8_t>(diff_after >> 8), 0x01, 0xF8,
+ };
+ EXPECT_TRUE(CheckLinkedMethod(MethodRef(1u), ArrayRef<const uint8_t>(method1_expected_code)));
+ uint32_t diff_before = method1_offset - (method2_offset + kAnchorOffset /* PC adjustment */);
+ diff_before += (diff_before & 0x8000) << 1; // Account for sign extension in jialc.
+ static const uint8_t method2_expected_code[] = {
+ static_cast<uint8_t>(diff_before >> 16), static_cast<uint8_t>(diff_before >> 24), 0x3E, 0xEC,
+ static_cast<uint8_t>(diff_before), static_cast<uint8_t>(diff_before >> 8), 0x01, 0xF8,
+ };
+ EXPECT_TRUE(CheckLinkedMethod(MethodRef(2u), ArrayRef<const uint8_t>(method2_expected_code)));
+}
+
+} // namespace linker
+} // namespace art
diff --git a/compiler/linker/relative_patcher.cc b/compiler/linker/relative_patcher.cc
index 77655947fd..f1538b10cc 100644
--- a/compiler/linker/relative_patcher.cc
+++ b/compiler/linker/relative_patcher.cc
@@ -25,6 +25,9 @@
#ifdef ART_ENABLE_CODEGEN_mips
#include "linker/mips/relative_patcher_mips.h"
#endif
+#ifdef ART_ENABLE_CODEGEN_mips64
+#include "linker/mips64/relative_patcher_mips64.h"
+#endif
#ifdef ART_ENABLE_CODEGEN_x86
#include "linker/x86/relative_patcher_x86.h"
#endif
@@ -103,6 +106,10 @@ std::unique_ptr<RelativePatcher> RelativePatcher::Create(
return std::unique_ptr<RelativePatcher>(
new MipsRelativePatcher(features->AsMipsInstructionSetFeatures()));
#endif
+#ifdef ART_ENABLE_CODEGEN_mips64
+ case kMips64:
+ return std::unique_ptr<RelativePatcher>(new Mips64RelativePatcher());
+#endif
default:
return std::unique_ptr<RelativePatcher>(new RelativePatcherNone);
}
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index fa6a5225e7..402eeee65f 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -1402,6 +1402,14 @@ void CodeGenerator::EmitJitRoots(uint8_t* code,
entry.second = index;
++index;
}
+ for (auto& entry : jit_class_roots_) {
+ // Update the `roots` with the class, and replace the address temporarily
+ // stored to the index in the table.
+ uint64_t address = entry.second;
+ roots->Set(index, reinterpret_cast<StackReference<mirror::Class>*>(address)->AsMirrorPtr());
+ entry.second = index;
+ ++index;
+ }
EmitJitRootPatches(code, roots_data);
}
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 4b11e7c699..2e2c3c00af 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -34,6 +34,7 @@
#include "stack_map_stream.h"
#include "string_reference.h"
#include "utils/label.h"
+#include "utils/type_reference.h"
namespace art {
@@ -343,7 +344,7 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> {
void BuildStackMaps(MemoryRegion region, const DexFile::CodeItem& code_item);
size_t ComputeStackMapsSize();
size_t GetNumberOfJitRoots() const {
- return jit_string_roots_.size();
+ return jit_string_roots_.size() + jit_class_roots_.size();
}
// Fills the `literals` array with literals collected during code generation.
@@ -611,6 +612,8 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> {
block_order_(nullptr),
jit_string_roots_(StringReferenceValueComparator(),
graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ jit_class_roots_(TypeReferenceValueComparator(),
+ graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
disasm_info_(nullptr),
stats_(stats),
graph_(graph),
@@ -681,6 +684,7 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> {
virtual void EmitJitRootPatches(uint8_t* code ATTRIBUTE_UNUSED,
const uint8_t* roots_data ATTRIBUTE_UNUSED) {
DCHECK_EQ(jit_string_roots_.size(), 0u);
+ DCHECK_EQ(jit_class_roots_.size(), 0u);
}
// Frame size required for this method.
@@ -711,7 +715,12 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> {
// Maps a StringReference (dex_file, string_index) to the index in the literal table.
// Entries are intially added with a 0 index, and `EmitJitRoots` will compute all the
// indices.
- ArenaSafeMap<StringReference, size_t, StringReferenceValueComparator> jit_string_roots_;
+ ArenaSafeMap<StringReference, uint32_t, StringReferenceValueComparator> jit_string_roots_;
+
+ // Maps a ClassReference (dex_file, type_index) to the index in the literal table.
+ // Entries are intially added with a pointer in the handle zone, and `EmitJitRoots`
+ // will compute all the indices.
+ ArenaSafeMap<TypeReference, uint64_t, TypeReferenceValueComparator> jit_class_roots_;
DisassemblyInformation* disasm_info_;
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index ed6eef1b55..8104613d3f 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -1216,7 +1216,9 @@ CodeGeneratorARM::CodeGeneratorARM(HGraph* graph,
boot_image_address_patches_(std::less<uint32_t>(),
graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
jit_string_patches_(StringReferenceValueComparator(),
- graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
+ graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ jit_class_patches_(TypeReferenceValueComparator(),
+ graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
// Always save the LR register to mimic Quick.
AddAllocatedRegister(Location::RegisterLocation(LR));
}
@@ -5712,8 +5714,7 @@ HLoadClass::LoadKind CodeGeneratorARM::GetSupportedLoadClassKind(
break;
case HLoadClass::LoadKind::kBootImageAddress:
break;
- case HLoadClass::LoadKind::kDexCacheAddress:
- DCHECK(Runtime::Current()->UseJitCompilation());
+ case HLoadClass::LoadKind::kJitTableAddress:
break;
case HLoadClass::LoadKind::kDexCachePcRelative:
DCHECK(!Runtime::Current()->UseJitCompilation());
@@ -5814,22 +5815,12 @@ void InstructionCodeGeneratorARM::VisitLoadClass(HLoadClass* cls) {
__ LoadLiteral(out, codegen_->DeduplicateBootImageAddressLiteral(address));
break;
}
- case HLoadClass::LoadKind::kDexCacheAddress: {
- DCHECK_NE(cls->GetAddress(), 0u);
- uint32_t address = dchecked_integral_cast<uint32_t>(cls->GetAddress());
- // 16-bit LDR immediate has a 5-bit offset multiplied by the size and that gives
- // a 128B range. To try and reduce the number of literals if we load multiple types,
- // simply split the dex cache address to a 128B aligned base loaded from a literal
- // and the remaining offset embedded in the load.
- static_assert(sizeof(GcRoot<mirror::Class>) == 4u, "Expected GC root to be 4 bytes.");
- DCHECK_ALIGNED(cls->GetAddress(), 4u);
- constexpr size_t offset_bits = /* encoded bits */ 5 + /* scale */ 2;
- uint32_t base_address = address & ~MaxInt<uint32_t>(offset_bits);
- uint32_t offset = address & MaxInt<uint32_t>(offset_bits);
- __ LoadLiteral(out, codegen_->DeduplicateDexCacheAddressLiteral(base_address));
- // /* GcRoot<mirror::Class> */ out = *(base_address + offset)
- GenerateGcRootFieldLoad(cls, out_loc, out, offset, read_barrier_option);
- generate_null_check = !cls->IsInDexCache();
+ case HLoadClass::LoadKind::kJitTableAddress: {
+ __ LoadLiteral(out, codegen_->DeduplicateJitClassLiteral(cls->GetDexFile(),
+ cls->GetTypeIndex(),
+ cls->GetAddress()));
+ // /* GcRoot<mirror::Class> */ out = *out
+ GenerateGcRootFieldLoad(cls, out_loc, out, /* offset */ 0, kCompilerReadBarrierOption);
break;
}
case HLoadClass::LoadKind::kDexCachePcRelative: {
@@ -7379,10 +7370,6 @@ Literal* CodeGeneratorARM::DeduplicateBootImageAddressLiteral(uint32_t address)
return DeduplicateUint32Literal(dchecked_integral_cast<uint32_t>(address), map);
}
-Literal* CodeGeneratorARM::DeduplicateDexCacheAddressLiteral(uint32_t address) {
- return DeduplicateUint32Literal(address, &uint32_literals_);
-}
-
Literal* CodeGeneratorARM::DeduplicateJitStringLiteral(const DexFile& dex_file,
dex::StringIndex string_index) {
jit_string_roots_.Overwrite(StringReference(&dex_file, string_index), /* placeholder */ 0u);
@@ -7391,6 +7378,15 @@ Literal* CodeGeneratorARM::DeduplicateJitStringLiteral(const DexFile& dex_file,
[this]() { return __ NewLiteral<uint32_t>(/* placeholder */ 0u); });
}
+Literal* CodeGeneratorARM::DeduplicateJitClassLiteral(const DexFile& dex_file,
+ dex::TypeIndex type_index,
+ uint64_t address) {
+ jit_class_roots_.Overwrite(TypeReference(&dex_file, type_index), address);
+ return jit_class_patches_.GetOrCreate(
+ TypeReference(&dex_file, type_index),
+ [this]() { return __ NewLiteral<uint32_t>(/* placeholder */ 0u); });
+}
+
template <LinkerPatch (*Factory)(size_t, const DexFile*, uint32_t, uint32_t)>
inline void CodeGeneratorARM::EmitPcRelativeLinkerPatches(
const ArenaDeque<PcRelativePatchInfo>& infos,
@@ -7707,18 +7703,28 @@ void InstructionCodeGeneratorARM::VisitClassTableGet(HClassTableGet* instruction
}
}
+static void PatchJitRootUse(uint8_t* code,
+ const uint8_t* roots_data,
+ Literal* literal,
+ uint64_t index_in_table) {
+ DCHECK(literal->GetLabel()->IsBound());
+ uint32_t literal_offset = literal->GetLabel()->Position();
+ uintptr_t address =
+ reinterpret_cast<uintptr_t>(roots_data) + index_in_table * sizeof(GcRoot<mirror::Object>);
+ uint8_t* data = code + literal_offset;
+ reinterpret_cast<uint32_t*>(data)[0] = dchecked_integral_cast<uint32_t>(address);
+}
+
void CodeGeneratorARM::EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) {
for (const auto& entry : jit_string_patches_) {
const auto& it = jit_string_roots_.find(entry.first);
DCHECK(it != jit_string_roots_.end());
- size_t index_in_table = it->second;
- Literal* literal = entry.second;
- DCHECK(literal->GetLabel()->IsBound());
- uint32_t literal_offset = literal->GetLabel()->Position();
- uintptr_t address =
- reinterpret_cast<uintptr_t>(roots_data) + index_in_table * sizeof(GcRoot<mirror::Object>);
- uint8_t* data = code + literal_offset;
- reinterpret_cast<uint32_t*>(data)[0] = dchecked_integral_cast<uint32_t>(address);
+ PatchJitRootUse(code, roots_data, entry.second, it->second);
+ }
+ for (const auto& entry : jit_class_patches_) {
+ const auto& it = jit_class_roots_.find(entry.first);
+ DCHECK(it != jit_class_roots_.end());
+ PatchJitRootUse(code, roots_data, entry.second, it->second);
}
}
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index 8230512825..605169deed 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -489,8 +489,10 @@ class CodeGeneratorARM : public CodeGenerator {
dex::StringIndex string_index);
Literal* DeduplicateBootImageTypeLiteral(const DexFile& dex_file, dex::TypeIndex type_index);
Literal* DeduplicateBootImageAddressLiteral(uint32_t address);
- Literal* DeduplicateDexCacheAddressLiteral(uint32_t address);
Literal* DeduplicateJitStringLiteral(const DexFile& dex_file, dex::StringIndex string_index);
+ Literal* DeduplicateJitClassLiteral(const DexFile& dex_file,
+ dex::TypeIndex type_index,
+ uint64_t address);
void EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) OVERRIDE;
@@ -599,9 +601,9 @@ class CodeGeneratorARM : public CodeGenerator {
using StringToLiteralMap = ArenaSafeMap<StringReference,
Literal*,
StringReferenceValueComparator>;
- using BootTypeToLiteralMap = ArenaSafeMap<TypeReference,
- Literal*,
- TypeReferenceValueComparator>;
+ using TypeToLiteralMap = ArenaSafeMap<TypeReference,
+ Literal*,
+ TypeReferenceValueComparator>;
Literal* DeduplicateUint32Literal(uint32_t value, Uint32ToLiteralMap* map);
Literal* DeduplicateMethodLiteral(MethodReference target_method, MethodToLiteralMap* map);
@@ -638,7 +640,7 @@ class CodeGeneratorARM : public CodeGenerator {
// PC-relative String patch info; type depends on configuration (app .bss or boot image PIC).
ArenaDeque<PcRelativePatchInfo> pc_relative_string_patches_;
// Deduplication map for boot type literals for kBootImageLinkTimeAddress.
- BootTypeToLiteralMap boot_image_type_patches_;
+ TypeToLiteralMap boot_image_type_patches_;
// PC-relative type patch info.
ArenaDeque<PcRelativePatchInfo> pc_relative_type_patches_;
// Deduplication map for patchable boot image addresses.
@@ -646,6 +648,8 @@ class CodeGeneratorARM : public CodeGenerator {
// Patches for string literals in JIT compiled code.
StringToLiteralMap jit_string_patches_;
+ // Patches for class literals in JIT compiled code.
+ TypeToLiteralMap jit_class_patches_;
DISALLOW_COPY_AND_ASSIGN(CodeGeneratorARM);
};
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 6eebd69a04..5cff303e2e 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -1162,7 +1162,9 @@ CodeGeneratorARM64::CodeGeneratorARM64(HGraph* graph,
boot_image_address_patches_(std::less<uint32_t>(),
graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
jit_string_patches_(StringReferenceValueComparator(),
- graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
+ graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ jit_class_patches_(TypeReferenceValueComparator(),
+ graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
// Save the link register (containing the return address) to mimic Quick.
AddAllocatedRegister(LocationFrom(lr));
}
@@ -4169,11 +4171,6 @@ vixl::aarch64::Literal<uint32_t>* CodeGeneratorARM64::DeduplicateBootImageAddres
return DeduplicateUint32Literal(dchecked_integral_cast<uint32_t>(address), map);
}
-vixl::aarch64::Literal<uint64_t>* CodeGeneratorARM64::DeduplicateDexCacheAddressLiteral(
- uint64_t address) {
- return DeduplicateUint64Literal(address);
-}
-
vixl::aarch64::Literal<uint32_t>* CodeGeneratorARM64::DeduplicateJitStringLiteral(
const DexFile& dex_file, dex::StringIndex string_index) {
jit_string_roots_.Overwrite(StringReference(&dex_file, string_index), /* placeholder */ 0u);
@@ -4182,6 +4179,14 @@ vixl::aarch64::Literal<uint32_t>* CodeGeneratorARM64::DeduplicateJitStringLitera
[this]() { return __ CreateLiteralDestroyedWithPool<uint32_t>(/* placeholder */ 0u); });
}
+vixl::aarch64::Literal<uint32_t>* CodeGeneratorARM64::DeduplicateJitClassLiteral(
+ const DexFile& dex_file, dex::TypeIndex type_index, uint64_t address) {
+ jit_class_roots_.Overwrite(TypeReference(&dex_file, type_index), address);
+ return jit_class_patches_.GetOrCreate(
+ TypeReference(&dex_file, type_index),
+ [this]() { return __ CreateLiteralDestroyedWithPool<uint32_t>(/* placeholder */ 0u); });
+}
+
void CodeGeneratorARM64::EmitAdrpPlaceholder(vixl::aarch64::Label* fixup_label,
vixl::aarch64::Register reg) {
DCHECK(reg.IsX());
@@ -4359,7 +4364,7 @@ HLoadClass::LoadKind CodeGeneratorARM64::GetSupportedLoadClassKind(
break;
case HLoadClass::LoadKind::kBootImageAddress:
break;
- case HLoadClass::LoadKind::kDexCacheAddress:
+ case HLoadClass::LoadKind::kJitTableAddress:
DCHECK(Runtime::Current()->UseJitCompilation());
break;
case HLoadClass::LoadKind::kDexCachePcRelative:
@@ -4452,26 +4457,16 @@ void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) {
__ Ldr(out.W(), codegen_->DeduplicateBootImageAddressLiteral(cls->GetAddress()));
break;
}
- case HLoadClass::LoadKind::kDexCacheAddress: {
- DCHECK_NE(cls->GetAddress(), 0u);
- // LDR immediate has a 12-bit offset multiplied by the size and for 32-bit loads
- // that gives a 16KiB range. To try and reduce the number of literals if we load
- // multiple types, simply split the dex cache address to a 16KiB aligned base
- // loaded from a literal and the remaining offset embedded in the load.
- static_assert(sizeof(GcRoot<mirror::Class>) == 4u, "Expected GC root to be 4 bytes.");
- DCHECK_ALIGNED(cls->GetAddress(), 4u);
- constexpr size_t offset_bits = /* encoded bits */ 12 + /* scale */ 2;
- uint64_t base_address = cls->GetAddress() & ~MaxInt<uint64_t>(offset_bits);
- uint32_t offset = cls->GetAddress() & MaxInt<uint64_t>(offset_bits);
- __ Ldr(out.X(), codegen_->DeduplicateDexCacheAddressLiteral(base_address));
- // /* GcRoot<mirror::Class> */ out = *(base_address + offset)
+ case HLoadClass::LoadKind::kJitTableAddress: {
+ __ Ldr(out, codegen_->DeduplicateJitClassLiteral(cls->GetDexFile(),
+ cls->GetTypeIndex(),
+ cls->GetAddress()));
GenerateGcRootFieldLoad(cls,
out_loc,
out.X(),
- offset,
+ /* offset */ 0,
/* fixup_label */ nullptr,
- read_barrier_option);
- generate_null_check = !cls->IsInDexCache();
+ kCompilerReadBarrierOption);
break;
}
case HLoadClass::LoadKind::kDexCachePcRelative: {
@@ -5782,17 +5777,27 @@ void InstructionCodeGeneratorARM64::VisitClassTableGet(HClassTableGet* instructi
}
}
+static void PatchJitRootUse(uint8_t* code,
+ const uint8_t* roots_data,
+ vixl::aarch64::Literal<uint32_t>* literal,
+ uint64_t index_in_table) {
+ uint32_t literal_offset = literal->GetOffset();
+ uintptr_t address =
+ reinterpret_cast<uintptr_t>(roots_data) + index_in_table * sizeof(GcRoot<mirror::Object>);
+ uint8_t* data = code + literal_offset;
+ reinterpret_cast<uint32_t*>(data)[0] = dchecked_integral_cast<uint32_t>(address);
+}
+
void CodeGeneratorARM64::EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) {
for (const auto& entry : jit_string_patches_) {
const auto& it = jit_string_roots_.find(entry.first);
DCHECK(it != jit_string_roots_.end());
- size_t index_in_table = it->second;
- vixl::aarch64::Literal<uint32_t>* literal = entry.second;
- uint32_t literal_offset = literal->GetOffset();
- uintptr_t address =
- reinterpret_cast<uintptr_t>(roots_data) + index_in_table * sizeof(GcRoot<mirror::Object>);
- uint8_t* data = code + literal_offset;
- reinterpret_cast<uint32_t*>(data)[0] = dchecked_integral_cast<uint32_t>(address);
+ PatchJitRootUse(code, roots_data, entry.second, it->second);
+ }
+ for (const auto& entry : jit_class_patches_) {
+ const auto& it = jit_class_roots_.find(entry.first);
+ DCHECK(it != jit_class_roots_.end());
+ PatchJitRootUse(code, roots_data, entry.second, it->second);
}
}
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 868c8b07ed..85b6f9faf5 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -566,9 +566,11 @@ class CodeGeneratorARM64 : public CodeGenerator {
vixl::aarch64::Literal<uint32_t>* DeduplicateBootImageTypeLiteral(const DexFile& dex_file,
dex::TypeIndex type_index);
vixl::aarch64::Literal<uint32_t>* DeduplicateBootImageAddressLiteral(uint64_t address);
- vixl::aarch64::Literal<uint64_t>* DeduplicateDexCacheAddressLiteral(uint64_t address);
vixl::aarch64::Literal<uint32_t>* DeduplicateJitStringLiteral(const DexFile& dex_file,
dex::StringIndex string_index);
+ vixl::aarch64::Literal<uint32_t>* DeduplicateJitClassLiteral(const DexFile& dex_file,
+ dex::TypeIndex string_index,
+ uint64_t address);
void EmitAdrpPlaceholder(vixl::aarch64::Label* fixup_label, vixl::aarch64::Register reg);
void EmitAddPlaceholder(vixl::aarch64::Label* fixup_label,
@@ -682,9 +684,9 @@ class CodeGeneratorARM64 : public CodeGenerator {
using StringToLiteralMap = ArenaSafeMap<StringReference,
vixl::aarch64::Literal<uint32_t>*,
StringReferenceValueComparator>;
- using BootTypeToLiteralMap = ArenaSafeMap<TypeReference,
- vixl::aarch64::Literal<uint32_t>*,
- TypeReferenceValueComparator>;
+ using TypeToLiteralMap = ArenaSafeMap<TypeReference,
+ vixl::aarch64::Literal<uint32_t>*,
+ TypeReferenceValueComparator>;
vixl::aarch64::Literal<uint32_t>* DeduplicateUint32Literal(uint32_t value,
Uint32ToLiteralMap* map);
@@ -733,8 +735,7 @@ class CodeGeneratorARM64 : public CodeGenerator {
// Deduplication map for 32-bit literals, used for non-patchable boot image addresses.
Uint32ToLiteralMap uint32_literals_;
- // Deduplication map for 64-bit literals, used for non-patchable method address, method code
- // or string dex cache address.
+ // Deduplication map for 64-bit literals, used for non-patchable method address or method code.
Uint64ToLiteralMap uint64_literals_;
// Method patch info, map MethodReference to a literal for method address and method code.
MethodToLiteralMap method_patches_;
@@ -749,7 +750,7 @@ class CodeGeneratorARM64 : public CodeGenerator {
// PC-relative String patch info; type depends on configuration (app .bss or boot image PIC).
ArenaDeque<PcRelativePatchInfo> pc_relative_string_patches_;
// Deduplication map for boot type literals for kBootImageLinkTimeAddress.
- BootTypeToLiteralMap boot_image_type_patches_;
+ TypeToLiteralMap boot_image_type_patches_;
// PC-relative type patch info.
ArenaDeque<PcRelativePatchInfo> pc_relative_type_patches_;
// Deduplication map for patchable boot image addresses.
@@ -757,6 +758,8 @@ class CodeGeneratorARM64 : public CodeGenerator {
// Patches for string literals in JIT compiled code.
StringToLiteralMap jit_string_patches_;
+ // Patches for class literals in JIT compiled code.
+ TypeToLiteralMap jit_class_patches_;
DISALLOW_COPY_AND_ASSIGN(CodeGeneratorARM64);
};
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 3a3d2a9db1..55f3c3ceef 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -57,6 +57,9 @@ using helpers::OutputVRegister;
using helpers::RegisterFrom;
using helpers::SRegisterFrom;
+using vixl::ExactAssemblyScope;
+using vixl::CodeBufferCheckScope;
+
using RegisterList = vixl32::RegisterList;
static bool ExpectedPairLayout(Location location) {
@@ -843,9 +846,9 @@ class ReadBarrierMarkAndUpdateFieldSlowPathARMVIXL : public SlowPathCodeARMVIXL
__ Subs(tmp, tmp, expected);
{
- AssemblerAccurateScope aas(arm_codegen->GetVIXLAssembler(),
- 2 * kMaxInstructionSizeInBytes,
- CodeBufferCheckScope::kMaximumSize);
+ ExactAssemblyScope aas(arm_codegen->GetVIXLAssembler(),
+ 2 * kMaxInstructionSizeInBytes,
+ CodeBufferCheckScope::kMaximumSize);
__ it(ne);
__ clrex(ne);
@@ -1261,9 +1264,9 @@ void JumpTableARMVIXL::EmitTable(CodeGeneratorARMVIXL* codegen) {
// We are about to use the assembler to place literals directly. Make sure we have enough
// underlying code buffer and we have generated a jump table of the right size, using
// codegen->GetVIXLAssembler()->GetBuffer().Align();
- AssemblerAccurateScope aas(codegen->GetVIXLAssembler(),
- num_entries * sizeof(int32_t),
- CodeBufferCheckScope::kMaximumSize);
+ ExactAssemblyScope aas(codegen->GetVIXLAssembler(),
+ num_entries * sizeof(int32_t),
+ CodeBufferCheckScope::kMaximumSize);
// TODO(VIXL): Check that using lower case bind is fine here.
codegen->GetVIXLAssembler()->bind(&table_start_);
for (uint32_t i = 0; i < num_entries; i++) {
@@ -1377,9 +1380,9 @@ void CodeGeneratorARMVIXL::GenerateFrameEntry() {
vixl32::Register temp = temps.Acquire();
__ Sub(temp, sp, static_cast<int32_t>(GetStackOverflowReservedBytes(kArm)));
// The load must immediately precede RecordPcInfo.
- AssemblerAccurateScope aas(GetVIXLAssembler(),
- vixl32::kMaxInstructionSizeInBytes,
- CodeBufferCheckScope::kMaximumSize);
+ ExactAssemblyScope aas(GetVIXLAssembler(),
+ vixl32::kMaxInstructionSizeInBytes,
+ CodeBufferCheckScope::kMaximumSize);
__ ldr(temp, MemOperand(temp));
RecordPcInfo(nullptr, 0);
}
@@ -1637,9 +1640,9 @@ void CodeGeneratorARMVIXL::InvokeRuntime(QuickEntrypointEnum entrypoint,
__ Ldr(lr, MemOperand(tr, GetThreadOffset<kArmPointerSize>(entrypoint).Int32Value()));
// Ensure the pc position is recorded immediately after the `blx` instruction.
// blx in T32 has only 16bit encoding that's why a stricter check for the scope is used.
- AssemblerAccurateScope aas(GetVIXLAssembler(),
- vixl32::k16BitT32InstructionSizeInBytes,
- CodeBufferCheckScope::kExactSize);
+ ExactAssemblyScope aas(GetVIXLAssembler(),
+ vixl32::k16BitT32InstructionSizeInBytes,
+ CodeBufferCheckScope::kExactSize);
__ blx(lr);
if (EntrypointRequiresStackMap(entrypoint)) {
RecordPcInfo(instruction, dex_pc, slow_path);
@@ -2082,9 +2085,9 @@ void InstructionCodeGeneratorARMVIXL::HandleCondition(HCondition* cond) {
__ Cmp(InputRegisterAt(cond, 0),
CodeGenerator::GetInt32ValueOf(right.GetConstant()));
}
- AssemblerAccurateScope aas(GetVIXLAssembler(),
- 3 * vixl32::kMaxInstructionSizeInBytes,
- CodeBufferCheckScope::kMaximumSize);
+ ExactAssemblyScope aas(GetVIXLAssembler(),
+ 3 * vixl32::kMaxInstructionSizeInBytes,
+ CodeBufferCheckScope::kMaximumSize);
__ ite(ARMCondition(cond->GetCondition()));
__ mov(ARMCondition(cond->GetCondition()), OutputRegister(cond), 1);
__ mov(ARMCondition(cond->GetOppositeCondition()), OutputRegister(cond), 0);
@@ -2370,9 +2373,9 @@ void InstructionCodeGeneratorARMVIXL::VisitInvokeInterface(HInvokeInterface* inv
// Ensure the pc position is recorded immediately after the `ldr` instruction.
{
- AssemblerAccurateScope aas(GetVIXLAssembler(),
- vixl32::kMaxInstructionSizeInBytes,
- CodeBufferCheckScope::kMaximumSize);
+ ExactAssemblyScope aas(GetVIXLAssembler(),
+ vixl32::kMaxInstructionSizeInBytes,
+ CodeBufferCheckScope::kMaximumSize);
// /* HeapReference<Class> */ temp = receiver->klass_
__ ldr(temp, MemOperand(RegisterFrom(receiver), class_offset));
codegen_->MaybeRecordImplicitNullCheck(invoke);
@@ -2418,7 +2421,7 @@ void InstructionCodeGeneratorARMVIXL::VisitInvokeInterface(HInvokeInterface* inv
{
// Ensure the pc position is recorded immediately after the `blx` instruction.
// blx in T32 has only 16bit encoding that's why a stricter check for the scope is used.
- AssemblerAccurateScope aas(GetVIXLAssembler(),
+ ExactAssemblyScope aas(GetVIXLAssembler(),
vixl32::k16BitT32InstructionSizeInBytes,
CodeBufferCheckScope::kExactSize);
// LR();
@@ -3793,9 +3796,9 @@ void InstructionCodeGeneratorARMVIXL::HandleShift(HBinaryOperation* op) {
// If the shift is > 32 bits, override the high part
__ Subs(temp, o_l, Operand::From(kArmBitsPerWord));
{
- AssemblerAccurateScope guard(GetVIXLAssembler(),
- 2 * vixl32::kMaxInstructionSizeInBytes,
- CodeBufferCheckScope::kMaximumSize);
+ ExactAssemblyScope guard(GetVIXLAssembler(),
+ 2 * vixl32::kMaxInstructionSizeInBytes,
+ CodeBufferCheckScope::kMaximumSize);
__ it(pl);
__ lsl(pl, o_h, low, temp);
}
@@ -3812,9 +3815,9 @@ void InstructionCodeGeneratorARMVIXL::HandleShift(HBinaryOperation* op) {
// If the shift is > 32 bits, override the low part
__ Subs(temp, o_h, Operand::From(kArmBitsPerWord));
{
- AssemblerAccurateScope guard(GetVIXLAssembler(),
- 2 * vixl32::kMaxInstructionSizeInBytes,
- CodeBufferCheckScope::kMaximumSize);
+ ExactAssemblyScope guard(GetVIXLAssembler(),
+ 2 * vixl32::kMaxInstructionSizeInBytes,
+ CodeBufferCheckScope::kMaximumSize);
__ it(pl);
__ asr(pl, o_l, high, temp);
}
@@ -3829,9 +3832,9 @@ void InstructionCodeGeneratorARMVIXL::HandleShift(HBinaryOperation* op) {
__ Orr(o_l, o_l, temp);
__ Subs(temp, o_h, Operand::From(kArmBitsPerWord));
{
- AssemblerAccurateScope guard(GetVIXLAssembler(),
- 2 * vixl32::kMaxInstructionSizeInBytes,
- CodeBufferCheckScope::kMaximumSize);
+ ExactAssemblyScope guard(GetVIXLAssembler(),
+ 2 * vixl32::kMaxInstructionSizeInBytes,
+ CodeBufferCheckScope::kMaximumSize);
__ it(pl);
__ lsr(pl, o_l, high, temp);
}
@@ -3948,9 +3951,9 @@ void InstructionCodeGeneratorARMVIXL::VisitNewInstance(HNewInstance* instruction
GetAssembler()->LoadFromOffset(kLoadWord, temp, tr, QUICK_ENTRY_POINT(pNewEmptyString));
GetAssembler()->LoadFromOffset(kLoadWord, lr, temp, code_offset.Int32Value());
// blx in T32 has only 16bit encoding that's why a stricter check for the scope is used.
- AssemblerAccurateScope aas(GetVIXLAssembler(),
- vixl32::k16BitT32InstructionSizeInBytes,
- CodeBufferCheckScope::kExactSize);
+ ExactAssemblyScope aas(GetVIXLAssembler(),
+ vixl32::k16BitT32InstructionSizeInBytes,
+ CodeBufferCheckScope::kExactSize);
__ blx(lr);
codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
} else {
@@ -4192,9 +4195,9 @@ void InstructionCodeGeneratorARMVIXL::GenerateWideAtomicStore(vixl32::Register a
__ Bind(&fail);
{
// Ensure the pc position is recorded immediately after the `ldrexd` instruction.
- AssemblerAccurateScope aas(GetVIXLAssembler(),
- vixl32::kMaxInstructionSizeInBytes,
- CodeBufferCheckScope::kMaximumSize);
+ ExactAssemblyScope aas(GetVIXLAssembler(),
+ vixl32::kMaxInstructionSizeInBytes,
+ CodeBufferCheckScope::kMaximumSize);
// We need a load followed by store. (The address used in a STREX instruction must
// be the same as the address in the most recently executed LDREX instruction.)
__ ldrexd(temp1, temp2, MemOperand(addr));
@@ -4715,9 +4718,9 @@ void CodeGeneratorARMVIXL::GenerateImplicitNullCheck(HNullCheck* instruction) {
UseScratchRegisterScope temps(GetVIXLAssembler());
// Ensure the pc position is recorded immediately after the `ldr` instruction.
- AssemblerAccurateScope aas(GetVIXLAssembler(),
- vixl32::kMaxInstructionSizeInBytes,
- CodeBufferCheckScope::kMaximumSize);
+ ExactAssemblyScope aas(GetVIXLAssembler(),
+ vixl32::kMaxInstructionSizeInBytes,
+ CodeBufferCheckScope::kMaximumSize);
__ ldr(temps.Acquire(), MemOperand(InputRegisterAt(instruction, 0)));
RecordPcInfo(instruction, instruction->GetDexPc());
}
@@ -5233,9 +5236,9 @@ void InstructionCodeGeneratorARMVIXL::VisitArraySet(HArraySet* instruction) {
{
// Ensure we record the pc position immediately after the `ldr` instruction.
- AssemblerAccurateScope aas(GetVIXLAssembler(),
- vixl32::kMaxInstructionSizeInBytes,
- CodeBufferCheckScope::kMaximumSize);
+ ExactAssemblyScope aas(GetVIXLAssembler(),
+ vixl32::kMaxInstructionSizeInBytes,
+ CodeBufferCheckScope::kMaximumSize);
// /* HeapReference<Class> */ temp1 = array->klass_
__ ldr(temp1, MemOperand(array, class_offset));
codegen_->MaybeRecordImplicitNullCheck(instruction);
@@ -5384,9 +5387,9 @@ void InstructionCodeGeneratorARMVIXL::VisitArrayLength(HArrayLength* instruction
vixl32::Register obj = InputRegisterAt(instruction, 0);
vixl32::Register out = OutputRegister(instruction);
{
- AssemblerAccurateScope aas(GetVIXLAssembler(),
- vixl32::kMaxInstructionSizeInBytes,
- CodeBufferCheckScope::kMaximumSize);
+ ExactAssemblyScope aas(GetVIXLAssembler(),
+ vixl32::kMaxInstructionSizeInBytes,
+ CodeBufferCheckScope::kMaximumSize);
__ ldr(out, MemOperand(obj, offset));
codegen_->MaybeRecordImplicitNullCheck(instruction);
}
@@ -5776,7 +5779,7 @@ HLoadClass::LoadKind CodeGeneratorARMVIXL::GetSupportedLoadClassKind(
case HLoadClass::LoadKind::kBootImageAddress:
// TODO(VIXL): Enable it back when literal pools are fixed in VIXL.
return HLoadClass::LoadKind::kDexCacheViaMethod;
- case HLoadClass::LoadKind::kDexCacheAddress:
+ case HLoadClass::LoadKind::kJitTableAddress:
// TODO(VIXL): Enable it back when literal pools are fixed in VIXL.
return HLoadClass::LoadKind::kDexCacheViaMethod;
case HLoadClass::LoadKind::kDexCachePcRelative:
@@ -5868,7 +5871,7 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadClass(HLoadClass* cls) {
TODO_VIXL32(FATAL);
break;
}
- case HLoadClass::LoadKind::kDexCacheAddress: {
+ case HLoadClass::LoadKind::kJitTableAddress: {
TODO_VIXL32(FATAL);
break;
}
@@ -7351,9 +7354,9 @@ void CodeGeneratorARMVIXL::GenerateStaticOrDirectCall(
relative_call_patches_.emplace_back(*invoke->GetTargetMethod().dex_file,
invoke->GetTargetMethod().dex_method_index);
{
- AssemblerAccurateScope aas(GetVIXLAssembler(),
- vixl32::kMaxInstructionSizeInBytes,
- CodeBufferCheckScope::kMaximumSize);
+ ExactAssemblyScope aas(GetVIXLAssembler(),
+ vixl32::kMaxInstructionSizeInBytes,
+ CodeBufferCheckScope::kMaximumSize);
__ bind(&relative_call_patches_.back().label);
// Arbitrarily branch to the BL itself, override at link time.
__ bl(&relative_call_patches_.back().label);
@@ -7365,9 +7368,9 @@ void CodeGeneratorARMVIXL::GenerateStaticOrDirectCall(
// LR()
{
// blx in T32 has only 16bit encoding that's why a stricter check for the scope is used.
- AssemblerAccurateScope aas(GetVIXLAssembler(),
- vixl32::k16BitT32InstructionSizeInBytes,
- CodeBufferCheckScope::kExactSize);
+ ExactAssemblyScope aas(GetVIXLAssembler(),
+ vixl32::k16BitT32InstructionSizeInBytes,
+ CodeBufferCheckScope::kExactSize);
__ blx(lr);
}
break;
@@ -7380,9 +7383,9 @@ void CodeGeneratorARMVIXL::GenerateStaticOrDirectCall(
ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArmPointerSize).Int32Value());
{
// blx in T32 has only 16bit encoding that's why a stricter check for the scope is used.
- AssemblerAccurateScope aas(GetVIXLAssembler(),
- vixl32::k16BitT32InstructionSizeInBytes,
- CodeBufferCheckScope::kExactSize);
+ ExactAssemblyScope aas(GetVIXLAssembler(),
+ vixl32::k16BitT32InstructionSizeInBytes,
+ CodeBufferCheckScope::kExactSize);
// LR()
__ blx(lr);
}
@@ -7406,9 +7409,9 @@ void CodeGeneratorARMVIXL::GenerateVirtualCall(HInvokeVirtual* invoke, Location
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
{
// Make sure the pc is recorded immediately after the `ldr` instruction.
- AssemblerAccurateScope aas(GetVIXLAssembler(),
- vixl32::kMaxInstructionSizeInBytes,
- CodeBufferCheckScope::kMaximumSize);
+ ExactAssemblyScope aas(GetVIXLAssembler(),
+ vixl32::kMaxInstructionSizeInBytes,
+ CodeBufferCheckScope::kMaximumSize);
// /* HeapReference<Class> */ temp = receiver->klass_
__ ldr(temp, MemOperand(receiver, class_offset));
MaybeRecordImplicitNullCheck(invoke);
@@ -7433,9 +7436,9 @@ void CodeGeneratorARMVIXL::GenerateVirtualCall(HInvokeVirtual* invoke, Location
// `RecordPcInfo()` immediately following record the correct pc. Use a scope to help guarantee
// that.
// blx in T32 has only 16bit encoding that's why a stricter check for the scope is used.
- AssemblerAccurateScope aas(GetVIXLAssembler(),
- vixl32::k16BitT32InstructionSizeInBytes,
- CodeBufferCheckScope::kExactSize);
+ ExactAssemblyScope aas(GetVIXLAssembler(),
+ vixl32::k16BitT32InstructionSizeInBytes,
+ CodeBufferCheckScope::kExactSize);
__ blx(lr);
}
@@ -7702,9 +7705,9 @@ void InstructionCodeGeneratorARMVIXL::VisitClassTableGet(HClassTableGet* instruc
void CodeGeneratorARMVIXL::EmitMovwMovtPlaceholder(
CodeGeneratorARMVIXL::PcRelativePatchInfo* labels,
vixl32::Register out) {
- AssemblerAccurateScope aas(GetVIXLAssembler(),
- 3 * vixl32::kMaxInstructionSizeInBytes,
- CodeBufferCheckScope::kMaximumSize);
+ ExactAssemblyScope aas(GetVIXLAssembler(),
+ 3 * vixl32::kMaxInstructionSizeInBytes,
+ CodeBufferCheckScope::kMaximumSize);
// TODO(VIXL): Think about using mov instead of movw.
__ bind(&labels->movw_label);
__ movw(out, /* placeholder */ 0u);
diff --git a/compiler/optimizing/code_generator_arm_vixl.h b/compiler/optimizing/code_generator_arm_vixl.h
index 5ec3da4652..93ea601ed8 100644
--- a/compiler/optimizing/code_generator_arm_vixl.h
+++ b/compiler/optimizing/code_generator_arm_vixl.h
@@ -120,7 +120,7 @@ class JumpTableARMVIXL : public DeletableArenaObject<kArenaAllocSwitchTable> {
bb_addresses_(switch_instr->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
uint32_t num_entries = switch_instr_->GetNumEntries();
for (uint32_t i = 0; i < num_entries; i++) {
- IntLiteral *lit = new IntLiteral(0);
+ IntLiteral *lit = new IntLiteral(0, vixl32::RawLiteral::kManuallyPlaced);
bb_addresses_.emplace_back(lit);
}
}
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index ff48f6642d..456c5c6a92 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -757,6 +757,11 @@ void CodeGeneratorMIPS::GenerateFrameEntry() {
if (RequiresCurrentMethod()) {
__ StoreToOffset(kStoreWord, kMethodRegisterArgument, SP, kCurrentMethodStackOffset);
}
+
+ if (GetGraph()->HasShouldDeoptimizeFlag()) {
+ // Initialize should deoptimize flag to 0.
+ __ StoreToOffset(kStoreWord, ZERO, SP, GetStackOffsetOfShouldDeoptimizeFlag());
+ }
}
void CodeGeneratorMIPS::GenerateFrameExit() {
@@ -4689,14 +4694,17 @@ void InstructionCodeGeneratorMIPS::GenConditionalMoveR6(HSelect* select) {
}
}
-void LocationsBuilderMIPS::VisitShouldDeoptimizeFlag(
- HShouldDeoptimizeFlag* flag ATTRIBUTE_UNUSED) {
- // TODO: to be implemented.
+void LocationsBuilderMIPS::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) {
+ LocationSummary* locations = new (GetGraph()->GetArena())
+ LocationSummary(flag, LocationSummary::kNoCall);
+ locations->SetOut(Location::RequiresRegister());
}
-void InstructionCodeGeneratorMIPS::VisitShouldDeoptimizeFlag(
- HShouldDeoptimizeFlag* flag ATTRIBUTE_UNUSED) {
- // TODO: to be implemented.
+void InstructionCodeGeneratorMIPS::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) {
+ __ LoadFromOffset(kLoadWord,
+ flag->GetLocations()->Out().AsRegister<Register>(),
+ SP,
+ codegen_->GetStackOffsetOfShouldDeoptimizeFlag());
}
void LocationsBuilderMIPS::VisitSelect(HSelect* select) {
@@ -5251,9 +5259,9 @@ HLoadClass::LoadKind CodeGeneratorMIPS::GetSupportedLoadClassKind(
break;
case HLoadClass::LoadKind::kBootImageAddress:
break;
- case HLoadClass::LoadKind::kDexCacheAddress:
+ case HLoadClass::LoadKind::kJitTableAddress:
DCHECK(Runtime::Current()->UseJitCompilation());
- fallback_load = false;
+ fallback_load = true;
break;
case HLoadClass::LoadKind::kDexCachePcRelative:
DCHECK(!Runtime::Current()->UseJitCompilation());
@@ -5614,17 +5622,8 @@ void InstructionCodeGeneratorMIPS::VisitLoadClass(HLoadClass* cls) {
codegen_->DeduplicateBootImageAddressLiteral(address));
break;
}
- case HLoadClass::LoadKind::kDexCacheAddress: {
- DCHECK_NE(cls->GetAddress(), 0u);
- uint32_t address = dchecked_integral_cast<uint32_t>(cls->GetAddress());
- static_assert(sizeof(GcRoot<mirror::Class>) == 4u, "Expected GC root to be 4 bytes.");
- DCHECK_ALIGNED(cls->GetAddress(), 4u);
- int16_t offset = Low16Bits(address);
- uint32_t base_address = address - offset; // This accounts for offset sign extension.
- __ Lui(out, High16Bits(base_address));
- // /* GcRoot<mirror::Class> */ out = *(base_address + offset)
- GenerateGcRootFieldLoad(cls, out_loc, out, offset);
- generate_null_check = !cls->IsInDexCache();
+ case HLoadClass::LoadKind::kJitTableAddress: {
+ LOG(FATAL) << "Unimplemented";
break;
}
case HLoadClass::LoadKind::kDexCachePcRelative: {
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index b1f9b1db53..44d3759978 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -18,6 +18,7 @@
#include "art_method.h"
#include "code_generator_utils.h"
+#include "compiled_method.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "entrypoints/quick/quick_entrypoints_enum.h"
#include "gc/accounting/card_table.h"
@@ -399,7 +400,15 @@ CodeGeneratorMIPS64::CodeGeneratorMIPS64(HGraph* graph,
instruction_visitor_(graph, this),
move_resolver_(graph->GetArena(), this),
assembler_(graph->GetArena()),
- isa_features_(isa_features) {
+ isa_features_(isa_features),
+ uint64_literals_(std::less<uint64_t>(),
+ graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ method_patches_(MethodReferenceComparator(),
+ graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ call_patches_(MethodReferenceComparator(),
+ graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ pc_relative_dex_cache_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ relative_call_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
// Save RA (containing the return address) to mimic Quick.
AddAllocatedRegister(Location::RegisterLocation(RA));
}
@@ -510,8 +519,6 @@ void CodeGeneratorMIPS64::GenerateFrameEntry() {
RecordPcInfo(nullptr, 0);
}
- // TODO: anything related to T9/GP/GOT/PIC/.so's?
-
if (HasEmptyFrame()) {
return;
}
@@ -562,13 +569,16 @@ void CodeGeneratorMIPS64::GenerateFrameEntry() {
"kCurrentMethodStackOffset must fit into int16_t");
__ Sd(kMethodRegisterArgument, SP, kCurrentMethodStackOffset);
}
+
+ if (GetGraph()->HasShouldDeoptimizeFlag()) {
+ // Initialize should_deoptimize flag to 0.
+ __ StoreToOffset(kStoreWord, ZERO, SP, GetStackOffsetOfShouldDeoptimizeFlag());
+ }
}
void CodeGeneratorMIPS64::GenerateFrameExit() {
__ cfi().RememberState();
- // TODO: anything related to T9/GP/GOT/PIC/.so's?
-
if (!HasEmptyFrame()) {
// Deallocate the rest of the frame.
@@ -878,6 +888,103 @@ void CodeGeneratorMIPS64::MarkGCCard(GpuRegister object,
}
}
+template <LinkerPatch (*Factory)(size_t, const DexFile*, uint32_t, uint32_t)>
+inline void CodeGeneratorMIPS64::EmitPcRelativeLinkerPatches(
+ const ArenaDeque<PcRelativePatchInfo>& infos,
+ ArenaVector<LinkerPatch>* linker_patches) {
+ for (const PcRelativePatchInfo& info : infos) {
+ const DexFile& dex_file = info.target_dex_file;
+ size_t offset_or_index = info.offset_or_index;
+ DCHECK(info.pc_rel_label.IsBound());
+ uint32_t pc_rel_offset = __ GetLabelLocation(&info.pc_rel_label);
+ linker_patches->push_back(Factory(pc_rel_offset, &dex_file, pc_rel_offset, offset_or_index));
+ }
+}
+
+void CodeGeneratorMIPS64::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) {
+ DCHECK(linker_patches->empty());
+ size_t size =
+ method_patches_.size() +
+ call_patches_.size() +
+ pc_relative_dex_cache_patches_.size() +
+ relative_call_patches_.size();
+ linker_patches->reserve(size);
+ for (const auto& entry : method_patches_) {
+ const MethodReference& target_method = entry.first;
+ Literal* literal = entry.second;
+ DCHECK(literal->GetLabel()->IsBound());
+ uint32_t literal_offset = __ GetLabelLocation(literal->GetLabel());
+ linker_patches->push_back(LinkerPatch::MethodPatch(literal_offset,
+ target_method.dex_file,
+ target_method.dex_method_index));
+ }
+ for (const auto& entry : call_patches_) {
+ const MethodReference& target_method = entry.first;
+ Literal* literal = entry.second;
+ DCHECK(literal->GetLabel()->IsBound());
+ uint32_t literal_offset = __ GetLabelLocation(literal->GetLabel());
+ linker_patches->push_back(LinkerPatch::CodePatch(literal_offset,
+ target_method.dex_file,
+ target_method.dex_method_index));
+ }
+ EmitPcRelativeLinkerPatches<LinkerPatch::DexCacheArrayPatch>(pc_relative_dex_cache_patches_,
+ linker_patches);
+ for (const PcRelativePatchInfo& info : relative_call_patches_) {
+ const DexFile& dex_file = info.target_dex_file;
+ uint32_t method_index = info.offset_or_index;
+ DCHECK(info.pc_rel_label.IsBound());
+ uint32_t pc_rel_offset = __ GetLabelLocation(&info.pc_rel_label);
+ linker_patches->push_back(
+ LinkerPatch::RelativeCodePatch(pc_rel_offset, &dex_file, method_index));
+ }
+}
+
+CodeGeneratorMIPS64::PcRelativePatchInfo* CodeGeneratorMIPS64::NewPcRelativeDexCacheArrayPatch(
+ const DexFile& dex_file, uint32_t element_offset) {
+ return NewPcRelativePatch(dex_file, element_offset, &pc_relative_dex_cache_patches_);
+}
+
+CodeGeneratorMIPS64::PcRelativePatchInfo* CodeGeneratorMIPS64::NewPcRelativeCallPatch(
+ const DexFile& dex_file, uint32_t method_index) {
+ return NewPcRelativePatch(dex_file, method_index, &relative_call_patches_);
+}
+
+CodeGeneratorMIPS64::PcRelativePatchInfo* CodeGeneratorMIPS64::NewPcRelativePatch(
+ const DexFile& dex_file, uint32_t offset_or_index, ArenaDeque<PcRelativePatchInfo>* patches) {
+ patches->emplace_back(dex_file, offset_or_index);
+ return &patches->back();
+}
+
+Literal* CodeGeneratorMIPS64::DeduplicateUint64Literal(uint64_t value) {
+ return uint64_literals_.GetOrCreate(
+ value,
+ [this, value]() { return __ NewLiteral<uint64_t>(value); });
+}
+
+Literal* CodeGeneratorMIPS64::DeduplicateMethodLiteral(MethodReference target_method,
+ MethodToLiteralMap* map) {
+ return map->GetOrCreate(
+ target_method,
+ [this]() { return __ NewLiteral<uint32_t>(/* placeholder */ 0u); });
+}
+
+Literal* CodeGeneratorMIPS64::DeduplicateMethodAddressLiteral(MethodReference target_method) {
+ return DeduplicateMethodLiteral(target_method, &method_patches_);
+}
+
+Literal* CodeGeneratorMIPS64::DeduplicateMethodCodeLiteral(MethodReference target_method) {
+ return DeduplicateMethodLiteral(target_method, &call_patches_);
+}
+
+void CodeGeneratorMIPS64::EmitPcRelativeAddressPlaceholderHigh(PcRelativePatchInfo* info,
+ GpuRegister out) {
+ __ Bind(&info->pc_rel_label);
+ // Add the high half of a 32-bit offset to PC.
+ __ Auipc(out, /* placeholder */ 0x1234);
+ // The immediately following instruction will add the sign-extended low half of the 32-bit
+ // offset to `out` (e.g. ld, jialc, addiu).
+}
+
void CodeGeneratorMIPS64::SetupBlockedRegisters() const {
// ZERO, K0, K1, GP, SP, RA are always reserved and can't be allocated.
blocked_core_registers_[ZERO] = true;
@@ -946,7 +1053,6 @@ void CodeGeneratorMIPS64::InvokeRuntime(QuickEntrypointEnum entrypoint,
uint32_t dex_pc,
SlowPathCode* slow_path) {
ValidateInvokeRuntime(entrypoint, instruction, slow_path);
- // TODO: anything related to T9/GP/GOT/PIC/.so's?
__ LoadFromOffset(kLoadDoubleword,
T9,
TR,
@@ -2636,14 +2742,17 @@ void InstructionCodeGeneratorMIPS64::VisitDeoptimize(HDeoptimize* deoptimize) {
/* false_target */ nullptr);
}
-void LocationsBuilderMIPS64::VisitShouldDeoptimizeFlag(
- HShouldDeoptimizeFlag* flag ATTRIBUTE_UNUSED) {
- // TODO: to be implemented.
+void LocationsBuilderMIPS64::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) {
+ LocationSummary* locations = new (GetGraph()->GetArena())
+ LocationSummary(flag, LocationSummary::kNoCall);
+ locations->SetOut(Location::RequiresRegister());
}
-void InstructionCodeGeneratorMIPS64::VisitShouldDeoptimizeFlag(
- HShouldDeoptimizeFlag* flag ATTRIBUTE_UNUSED) {
- // TODO: to be implemented.
+void InstructionCodeGeneratorMIPS64::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) {
+ __ LoadFromOffset(kLoadWord,
+ flag->GetLocations()->Out().AsRegister<GpuRegister>(),
+ SP,
+ codegen_->GetStackOffsetOfShouldDeoptimizeFlag());
}
void LocationsBuilderMIPS64::VisitSelect(HSelect* select) {
@@ -2986,39 +3095,33 @@ HLoadClass::LoadKind CodeGeneratorMIPS64::GetSupportedLoadClassKind(
HInvokeStaticOrDirect::DispatchInfo CodeGeneratorMIPS64::GetSupportedInvokeStaticOrDirectDispatch(
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
HInvokeStaticOrDirect* invoke ATTRIBUTE_UNUSED) {
- switch (desired_dispatch_info.method_load_kind) {
- case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddressWithFixup:
- case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative:
- // TODO: Implement these types. For the moment, we fall back to kDexCacheViaMethod.
- return HInvokeStaticOrDirect::DispatchInfo {
- HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod,
- HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod,
- 0u,
- 0u
- };
- default:
- break;
- }
- switch (desired_dispatch_info.code_ptr_location) {
- case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
- case HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative:
- // TODO: Implement these types. For the moment, we fall back to kCallArtMethod.
- return HInvokeStaticOrDirect::DispatchInfo {
- desired_dispatch_info.method_load_kind,
- HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod,
- desired_dispatch_info.method_load_data,
- 0u
- };
- default:
- return desired_dispatch_info;
- }
+ // On MIPS64 we support all dispatch types.
+ return desired_dispatch_info;
}
void CodeGeneratorMIPS64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) {
// All registers are assumed to be correctly set up per the calling convention.
-
Location callee_method = temp; // For all kinds except kRecursive, callee will be in temp.
- switch (invoke->GetMethodLoadKind()) {
+ HInvokeStaticOrDirect::MethodLoadKind method_load_kind = invoke->GetMethodLoadKind();
+ HInvokeStaticOrDirect::CodePtrLocation code_ptr_location = invoke->GetCodePtrLocation();
+
+ // For better instruction scheduling we load the direct code pointer before the method pointer.
+ switch (code_ptr_location) {
+ case HInvokeStaticOrDirect::CodePtrLocation::kCallDirect:
+ // T9 = invoke->GetDirectCodePtr();
+ __ LoadLiteral(T9, kLoadDoubleword, DeduplicateUint64Literal(invoke->GetDirectCodePtr()));
+ break;
+ case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
+ // T9 = code address from literal pool with link-time patch.
+ __ LoadLiteral(T9,
+ kLoadUnsignedWord,
+ DeduplicateMethodCodeLiteral(invoke->GetTargetMethod()));
+ break;
+ default:
+ break;
+ }
+
+ switch (method_load_kind) {
case HInvokeStaticOrDirect::MethodLoadKind::kStringInit: {
// temp = thread->string_init_entrypoint
uint32_t offset =
@@ -3033,14 +3136,23 @@ void CodeGeneratorMIPS64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invo
callee_method = invoke->GetLocations()->InAt(invoke->GetSpecialInputIndex());
break;
case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress:
- __ LoadConst64(temp.AsRegister<GpuRegister>(), invoke->GetMethodAddress());
+ __ LoadLiteral(temp.AsRegister<GpuRegister>(),
+ kLoadDoubleword,
+ DeduplicateUint64Literal(invoke->GetMethodAddress()));
break;
case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddressWithFixup:
- case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative:
- // TODO: Implement these types.
- // Currently filtered out by GetSupportedInvokeStaticOrDirectDispatch().
- LOG(FATAL) << "Unsupported";
- UNREACHABLE();
+ __ LoadLiteral(temp.AsRegister<GpuRegister>(),
+ kLoadUnsignedWord,
+ DeduplicateMethodAddressLiteral(invoke->GetTargetMethod()));
+ break;
+ case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative: {
+ uint32_t offset = invoke->GetDexCacheArrayOffset();
+ CodeGeneratorMIPS64::PcRelativePatchInfo* info =
+ NewPcRelativeDexCacheArrayPatch(invoke->GetDexFile(), offset);
+ EmitPcRelativeAddressPlaceholderHigh(info, AT);
+ __ Ld(temp.AsRegister<GpuRegister>(), AT, /* placeholder */ 0x5678);
+ break;
+ }
case HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod: {
Location current_method = invoke->GetLocations()->InAt(invoke->GetSpecialInputIndex());
GpuRegister reg = temp.AsRegister<GpuRegister>();
@@ -3071,23 +3183,25 @@ void CodeGeneratorMIPS64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invo
}
}
- switch (invoke->GetCodePtrLocation()) {
+ switch (code_ptr_location) {
case HInvokeStaticOrDirect::CodePtrLocation::kCallSelf:
- __ Jialc(&frame_entry_label_, T9);
+ __ Balc(&frame_entry_label_);
break;
case HInvokeStaticOrDirect::CodePtrLocation::kCallDirect:
- // LR = invoke->GetDirectCodePtr();
- __ LoadConst64(T9, invoke->GetDirectCodePtr());
- // LR()
+ case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
+ // T9 prepared above for better instruction scheduling.
+ // T9()
__ Jalr(T9);
__ Nop();
break;
- case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
- case HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative:
- // TODO: Implement these types.
- // Currently filtered out by GetSupportedInvokeStaticOrDirectDispatch().
- LOG(FATAL) << "Unsupported";
- UNREACHABLE();
+ case HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative: {
+ CodeGeneratorMIPS64::PcRelativePatchInfo* info =
+ NewPcRelativeCallPatch(*invoke->GetTargetMethod().dex_file,
+ invoke->GetTargetMethod().dex_method_index);
+ EmitPcRelativeAddressPlaceholderHigh(info, AT);
+ __ Jialc(AT, /* placeholder */ 0x5678);
+ break;
+ }
case HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod:
// T9 = callee_method->entry_point_from_quick_compiled_code_;
__ LoadFromOffset(kLoadDoubleword,
diff --git a/compiler/optimizing/code_generator_mips64.h b/compiler/optimizing/code_generator_mips64.h
index 690eccb7d8..067c1f940f 100644
--- a/compiler/optimizing/code_generator_mips64.h
+++ b/compiler/optimizing/code_generator_mips64.h
@@ -279,6 +279,9 @@ class CodeGeneratorMIPS64 : public CodeGenerator {
Mips64Assembler* GetAssembler() OVERRIDE { return &assembler_; }
const Mips64Assembler& GetAssembler() const OVERRIDE { return assembler_; }
+ // Emit linker patches.
+ void EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) OVERRIDE;
+
void MarkGCCard(GpuRegister object, GpuRegister value, bool value_can_be_null);
// Register allocation.
@@ -357,7 +360,44 @@ class CodeGeneratorMIPS64 : public CodeGenerator {
void GenerateImplicitNullCheck(HNullCheck* instruction) OVERRIDE;
void GenerateExplicitNullCheck(HNullCheck* instruction) OVERRIDE;
+ // The PcRelativePatchInfo is used for PC-relative addressing of dex cache arrays,
+ // boot image strings and method calls. The only difference is the interpretation of
+ // the offset_or_index.
+ struct PcRelativePatchInfo {
+ PcRelativePatchInfo(const DexFile& dex_file, uint32_t off_or_idx)
+ : target_dex_file(dex_file), offset_or_index(off_or_idx) { }
+ PcRelativePatchInfo(PcRelativePatchInfo&& other) = default;
+
+ const DexFile& target_dex_file;
+ // Either the dex cache array element offset or the string/type/method index.
+ uint32_t offset_or_index;
+ // Label for the auipc instruction.
+ Mips64Label pc_rel_label;
+ };
+
+ PcRelativePatchInfo* NewPcRelativeDexCacheArrayPatch(const DexFile& dex_file,
+ uint32_t element_offset);
+ PcRelativePatchInfo* NewPcRelativeCallPatch(const DexFile& dex_file,
+ uint32_t method_index);
+
+ void EmitPcRelativeAddressPlaceholderHigh(PcRelativePatchInfo* info, GpuRegister out);
+
private:
+ using Uint64ToLiteralMap = ArenaSafeMap<uint64_t, Literal*>;
+ using MethodToLiteralMap = ArenaSafeMap<MethodReference, Literal*, MethodReferenceComparator>;
+ Literal* DeduplicateUint64Literal(uint64_t value);
+ Literal* DeduplicateMethodLiteral(MethodReference target_method, MethodToLiteralMap* map);
+ Literal* DeduplicateMethodAddressLiteral(MethodReference target_method);
+ Literal* DeduplicateMethodCodeLiteral(MethodReference target_method);
+
+ PcRelativePatchInfo* NewPcRelativePatch(const DexFile& dex_file,
+ uint32_t offset_or_index,
+ ArenaDeque<PcRelativePatchInfo>* patches);
+
+ template <LinkerPatch (*Factory)(size_t, const DexFile*, uint32_t, uint32_t)>
+ void EmitPcRelativeLinkerPatches(const ArenaDeque<PcRelativePatchInfo>& infos,
+ ArenaVector<LinkerPatch>* linker_patches);
+
// Labels for each block that will be compiled.
Mips64Label* block_labels_; // Indexed by block id.
Mips64Label frame_entry_label_;
@@ -367,6 +407,16 @@ class CodeGeneratorMIPS64 : public CodeGenerator {
Mips64Assembler assembler_;
const Mips64InstructionSetFeatures& isa_features_;
+ // Deduplication map for 64-bit literals, used for non-patchable method address or method code
+ // address.
+ Uint64ToLiteralMap uint64_literals_;
+ // Method patch info, map MethodReference to a literal for method address and method code.
+ MethodToLiteralMap method_patches_;
+ MethodToLiteralMap call_patches_;
+ // PC-relative patch info.
+ ArenaDeque<PcRelativePatchInfo> pc_relative_dex_cache_patches_;
+ ArenaDeque<PcRelativePatchInfo> relative_call_patches_;
+
DISALLOW_COPY_AND_ASSIGN(CodeGeneratorMIPS64);
};
diff --git a/compiler/optimizing/code_generator_utils.h b/compiler/optimizing/code_generator_utils.h
index 7efed8c9ec..a6b41c0588 100644
--- a/compiler/optimizing/code_generator_utils.h
+++ b/compiler/optimizing/code_generator_utils.h
@@ -18,6 +18,8 @@
#define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_UTILS_H_
#include <cstdint>
+#include <cstdlib>
+#include <limits>
namespace art {
@@ -32,6 +34,12 @@ void CalculateMagicAndShiftForDivRem(int64_t divisor, bool is_long, int64_t* mag
// that it has been previously visited by the InstructionCodeGenerator.
bool IsBooleanValueOrMaterializedCondition(HInstruction* cond_input);
+template <typename T> T AbsOrMin(T value) {
+ return (value == std::numeric_limits<T>::min())
+ ? value
+ : std::abs(value);
+}
+
} // namespace art
#endif // ART_COMPILER_OPTIMIZING_CODE_GENERATOR_UTILS_H_
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index d6e92ccb81..8612a67c8b 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -1013,6 +1013,7 @@ CodeGeneratorX86::CodeGeneratorX86(HGraph* graph,
string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
jit_string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ jit_class_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
constant_area_start_(-1),
fixups_to_jump_tables_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
method_address_offset_(-1) {
@@ -6034,7 +6035,7 @@ HLoadClass::LoadKind CodeGeneratorX86::GetSupportedLoadClassKind(
break;
case HLoadClass::LoadKind::kBootImageAddress:
break;
- case HLoadClass::LoadKind::kDexCacheAddress:
+ case HLoadClass::LoadKind::kJitTableAddress:
DCHECK(Runtime::Current()->UseJitCompilation());
break;
case HLoadClass::LoadKind::kDexCacheViaMethod:
@@ -6073,6 +6074,16 @@ void LocationsBuilderX86::VisitLoadClass(HLoadClass* cls) {
locations->SetOut(Location::RequiresRegister());
}
+Label* CodeGeneratorX86::NewJitRootClassPatch(const DexFile& dex_file,
+ dex::TypeIndex dex_index,
+ uint64_t address) {
+ jit_class_roots_.Overwrite(TypeReference(&dex_file, dex_index), address);
+ // Add a patch entry and return the label.
+ jit_class_patches_.emplace_back(dex_file, dex_index.index_);
+ PatchInfo<Label>* info = &jit_class_patches_.back();
+ return &info->label;
+}
+
void InstructionCodeGeneratorX86::VisitLoadClass(HLoadClass* cls) {
LocationSummary* locations = cls->GetLocations();
if (cls->NeedsAccessCheck()) {
@@ -6124,16 +6135,12 @@ void InstructionCodeGeneratorX86::VisitLoadClass(HLoadClass* cls) {
codegen_->RecordSimplePatch();
break;
}
- case HLoadClass::LoadKind::kDexCacheAddress: {
- DCHECK_NE(cls->GetAddress(), 0u);
- uint32_t address = dchecked_integral_cast<uint32_t>(cls->GetAddress());
+ case HLoadClass::LoadKind::kJitTableAddress: {
+ Address address = Address::Absolute(CodeGeneratorX86::kDummy32BitOffset);
+ Label* fixup_label = codegen_->NewJitRootClassPatch(
+ cls->GetDexFile(), cls->GetTypeIndex(), cls->GetAddress());
// /* GcRoot<mirror::Class> */ out = *address
- GenerateGcRootFieldLoad(cls,
- out_loc,
- Address::Absolute(address),
- /* fixup_label */ nullptr,
- read_barrier_option);
- generate_null_check = !cls->IsInDexCache();
+ GenerateGcRootFieldLoad(cls, out_loc, address, fixup_label, kCompilerReadBarrierOption);
break;
}
case HLoadClass::LoadKind::kDexCachePcRelative: {
@@ -7770,18 +7777,31 @@ void CodeGeneratorX86::MoveFromReturnRegister(Location target, Primitive::Type t
}
}
+void CodeGeneratorX86::PatchJitRootUse(uint8_t* code,
+ const uint8_t* roots_data,
+ const PatchInfo<Label>& info,
+ uint64_t index_in_table) const {
+ uint32_t code_offset = info.label.Position() - kLabelPositionToLiteralOffsetAdjustment;
+ uintptr_t address =
+ reinterpret_cast<uintptr_t>(roots_data) + index_in_table * sizeof(GcRoot<mirror::Object>);
+ typedef __attribute__((__aligned__(1))) uint32_t unaligned_uint32_t;
+ reinterpret_cast<unaligned_uint32_t*>(code + code_offset)[0] =
+ dchecked_integral_cast<uint32_t>(address);
+}
+
void CodeGeneratorX86::EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) {
for (const PatchInfo<Label>& info : jit_string_patches_) {
- const auto& it = jit_string_roots_.find(StringReference(&info.dex_file,
- dex::StringIndex(info.index)));
+ const auto& it = jit_string_roots_.find(
+ StringReference(&info.dex_file, dex::StringIndex(info.index)));
DCHECK(it != jit_string_roots_.end());
- size_t index_in_table = it->second;
- uint32_t code_offset = info.label.Position() - kLabelPositionToLiteralOffsetAdjustment;
- uintptr_t address =
- reinterpret_cast<uintptr_t>(roots_data) + index_in_table * sizeof(GcRoot<mirror::Object>);
- typedef __attribute__((__aligned__(1))) uint32_t unaligned_uint32_t;
- reinterpret_cast<unaligned_uint32_t*>(code + code_offset)[0] =
- dchecked_integral_cast<uint32_t>(address);
+ PatchJitRootUse(code, roots_data, info, it->second);
+ }
+
+ for (const PatchInfo<Label>& info : jit_class_patches_) {
+ const auto& it = jit_class_roots_.find(
+ TypeReference(&info.dex_file, dex::TypeIndex(info.index)));
+ DCHECK(it != jit_class_roots_.end());
+ PatchJitRootUse(code, roots_data, info, it->second);
}
}
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 2ae3670bed..c44da97a90 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -416,12 +416,17 @@ class CodeGeneratorX86 : public CodeGenerator {
Label* NewStringBssEntryPatch(HLoadString* load_string);
Label* NewPcRelativeDexCacheArrayPatch(const DexFile& dex_file, uint32_t element_offset);
Label* NewJitRootStringPatch(const DexFile& dex_file, dex::StringIndex dex_index);
+ Label* NewJitRootClassPatch(const DexFile& dex_file, dex::TypeIndex dex_index, uint64_t address);
void MoveFromReturnRegister(Location trg, Primitive::Type type) OVERRIDE;
// Emit linker patches.
void EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) OVERRIDE;
+ void PatchJitRootUse(uint8_t* code,
+ const uint8_t* roots_data,
+ const PatchInfo<Label>& info,
+ uint64_t index_in_table) const;
void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) OVERRIDE;
// Emit a write barrier.
@@ -623,6 +628,9 @@ class CodeGeneratorX86 : public CodeGenerator {
// Patches for string root accesses in JIT compiled code.
ArenaDeque<PatchInfo<Label>> jit_string_patches_;
+ // Patches for class root accesses in JIT compiled code.
+ ArenaDeque<PatchInfo<Label>> jit_class_patches_;
+
// Offset to the start of the constant area in the assembled code.
// Used for fixups to the constant area.
int32_t constant_area_start_;
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 4474decf59..7dfc736d9c 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -1260,7 +1260,8 @@ CodeGeneratorX86_64::CodeGeneratorX86_64(HGraph* graph,
string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
fixups_to_jump_tables_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- jit_string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
+ jit_string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ jit_class_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
AddAllocatedRegister(Location::RegisterLocation(kFakeReturnRegister));
}
@@ -5460,8 +5461,7 @@ HLoadClass::LoadKind CodeGeneratorX86_64::GetSupportedLoadClassKind(
break;
case HLoadClass::LoadKind::kBootImageAddress:
break;
- case HLoadClass::LoadKind::kDexCacheAddress:
- DCHECK(Runtime::Current()->UseJitCompilation());
+ case HLoadClass::LoadKind::kJitTableAddress:
break;
case HLoadClass::LoadKind::kDexCachePcRelative:
DCHECK(!Runtime::Current()->UseJitCompilation());
@@ -5500,6 +5500,16 @@ void LocationsBuilderX86_64::VisitLoadClass(HLoadClass* cls) {
locations->SetOut(Location::RequiresRegister());
}
+Label* CodeGeneratorX86_64::NewJitRootClassPatch(const DexFile& dex_file,
+ dex::TypeIndex dex_index,
+ uint64_t address) {
+ jit_class_roots_.Overwrite(TypeReference(&dex_file, dex_index), address);
+ // Add a patch entry and return the label.
+ jit_class_patches_.emplace_back(dex_file, dex_index.index_);
+ PatchInfo<Label>* info = &jit_class_patches_.back();
+ return &info->label;
+}
+
void InstructionCodeGeneratorX86_64::VisitLoadClass(HLoadClass* cls) {
LocationSummary* locations = cls->GetLocations();
if (cls->NeedsAccessCheck()) {
@@ -5543,26 +5553,13 @@ void InstructionCodeGeneratorX86_64::VisitLoadClass(HLoadClass* cls) {
codegen_->RecordSimplePatch();
break;
}
- case HLoadClass::LoadKind::kDexCacheAddress: {
- DCHECK_NE(cls->GetAddress(), 0u);
+ case HLoadClass::LoadKind::kJitTableAddress: {
+ Address address = Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset,
+ /* no_rip */ true);
+ Label* fixup_label =
+ codegen_->NewJitRootClassPatch(cls->GetDexFile(), cls->GetTypeIndex(), cls->GetAddress());
// /* GcRoot<mirror::Class> */ out = *address
- if (IsUint<32>(cls->GetAddress())) {
- Address address = Address::Absolute(cls->GetAddress(), /* no_rip */ true);
- GenerateGcRootFieldLoad(cls,
- out_loc,
- address,
- /* fixup_label */ nullptr,
- read_barrier_option);
- } else {
- // TODO: Consider using opcode A1, i.e. movl eax, moff32 (with 64-bit address).
- __ movq(out, Immediate(cls->GetAddress()));
- GenerateGcRootFieldLoad(cls,
- out_loc,
- Address(out, 0),
- /* fixup_label */ nullptr,
- read_barrier_option);
- }
- generate_null_check = !cls->IsInDexCache();
+ GenerateGcRootFieldLoad(cls, out_loc, address, fixup_label, kCompilerReadBarrierOption);
break;
}
case HLoadClass::LoadKind::kDexCachePcRelative: {
@@ -7127,18 +7124,31 @@ void CodeGeneratorX86_64::MoveInt64ToAddress(const Address& addr_low,
}
}
+void CodeGeneratorX86_64::PatchJitRootUse(uint8_t* code,
+ const uint8_t* roots_data,
+ const PatchInfo<Label>& info,
+ uint64_t index_in_table) const {
+ uint32_t code_offset = info.label.Position() - kLabelPositionToLiteralOffsetAdjustment;
+ uintptr_t address =
+ reinterpret_cast<uintptr_t>(roots_data) + index_in_table * sizeof(GcRoot<mirror::Object>);
+ typedef __attribute__((__aligned__(1))) uint32_t unaligned_uint32_t;
+ reinterpret_cast<unaligned_uint32_t*>(code + code_offset)[0] =
+ dchecked_integral_cast<uint32_t>(address);
+}
+
void CodeGeneratorX86_64::EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) {
for (const PatchInfo<Label>& info : jit_string_patches_) {
- const auto& it = jit_string_roots_.find(StringReference(&info.dex_file,
- dex::StringIndex(info.index)));
+ const auto& it = jit_string_roots_.find(
+ StringReference(&info.dex_file, dex::StringIndex(info.index)));
DCHECK(it != jit_string_roots_.end());
- size_t index_in_table = it->second;
- uint32_t code_offset = info.label.Position() - kLabelPositionToLiteralOffsetAdjustment;
- uintptr_t address =
- reinterpret_cast<uintptr_t>(roots_data) + index_in_table * sizeof(GcRoot<mirror::Object>);
- typedef __attribute__((__aligned__(1))) uint32_t unaligned_uint32_t;
- reinterpret_cast<unaligned_uint32_t*>(code + code_offset)[0] =
- dchecked_integral_cast<uint32_t>(address);
+ PatchJitRootUse(code, roots_data, info, it->second);
+ }
+
+ for (const PatchInfo<Label>& info : jit_class_patches_) {
+ const auto& it = jit_class_roots_.find(
+ TypeReference(&info.dex_file, dex::TypeIndex(info.index)));
+ DCHECK(it != jit_class_roots_.end());
+ PatchJitRootUse(code, roots_data, info, it->second);
}
}
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 2f41f73da6..391a23b7ce 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -413,11 +413,17 @@ class CodeGeneratorX86_64 : public CodeGenerator {
Label* NewStringBssEntryPatch(HLoadString* load_string);
Label* NewPcRelativeDexCacheArrayPatch(const DexFile& dex_file, uint32_t element_offset);
Label* NewJitRootStringPatch(const DexFile& dex_file, dex::StringIndex dex_index);
+ Label* NewJitRootClassPatch(const DexFile& dex_file, dex::TypeIndex dex_index, uint64_t address);
void MoveFromReturnRegister(Location trg, Primitive::Type type) OVERRIDE;
void EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) OVERRIDE;
+ void PatchJitRootUse(uint8_t* code,
+ const uint8_t* roots_data,
+ const PatchInfo<Label>& info,
+ uint64_t index_in_table) const;
+
void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) OVERRIDE;
const X86_64InstructionSetFeatures& GetInstructionSetFeatures() const {
@@ -608,6 +614,9 @@ class CodeGeneratorX86_64 : public CodeGenerator {
// Patches for string literals in JIT compiled code.
ArenaDeque<PatchInfo<Label>> jit_string_patches_;
+ // Patches for class literals in JIT compiled code.
+ ArenaDeque<PatchInfo<Label>> jit_class_patches_;
+
DISALLOW_COPY_AND_ASSIGN(CodeGeneratorX86_64);
};
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 8d93867230..fe4662abb1 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -1444,7 +1444,7 @@ size_t HInliner::RunOptimizations(HGraph* callee_graph,
// optimization that could lead to a HDeoptimize. The following optimizations do not.
HDeadCodeElimination dce(callee_graph, stats_, "dead_code_elimination$inliner");
HConstantFolding fold(callee_graph, "constant_folding$inliner");
- HSharpening sharpening(callee_graph, codegen_, dex_compilation_unit, compiler_driver_);
+ HSharpening sharpening(callee_graph, codegen_, dex_compilation_unit, compiler_driver_, handles_);
InstructionSimplifier simplify(callee_graph, stats_);
IntrinsicsRecognizer intrinsics(callee_graph, stats_);
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index 658b80468e..c615df1f1d 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -1185,6 +1185,18 @@ void InstructionSimplifierVisitor::VisitCondition(HCondition* condition) {
RecordSimplification();
}
+// Return whether x / divisor == x * (1.0f / divisor), for every float x.
+static constexpr bool CanDivideByReciprocalMultiplyFloat(int32_t divisor) {
+ // True, if the most significant bits of divisor are 0.
+ return ((divisor & 0x7fffff) == 0);
+}
+
+// Return whether x / divisor == x * (1.0 / divisor), for every double x.
+static constexpr bool CanDivideByReciprocalMultiplyDouble(int64_t divisor) {
+ // True, if the most significant bits of divisor are 0.
+ return ((divisor & ((UINT64_C(1) << 52) - 1)) == 0);
+}
+
void InstructionSimplifierVisitor::VisitDiv(HDiv* instruction) {
HConstant* input_cst = instruction->GetConstantRight();
HInstruction* input_other = instruction->GetLeastConstantLeft();
diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc
index 433dced9d7..95551c8fd9 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.cc
+++ b/compiler/optimizing/intrinsics_arm_vixl.cc
@@ -47,6 +47,9 @@ using helpers::SRegisterFrom;
using namespace vixl::aarch32; // NOLINT(build/namespaces)
+using vixl::ExactAssemblyScope;
+using vixl::CodeBufferCheckScope;
+
ArmVIXLAssembler* IntrinsicCodeGeneratorARMVIXL::GetAssembler() {
return codegen_->GetAssembler();
}
@@ -467,9 +470,9 @@ static void GenMinMax(HInvoke* invoke, bool is_min, ArmVIXLAssembler* assembler)
__ Cmp(op1, op2);
{
- AssemblerAccurateScope aas(assembler->GetVIXLAssembler(),
- 3 * kMaxInstructionSizeInBytes,
- CodeBufferCheckScope::kMaximumSize);
+ ExactAssemblyScope aas(assembler->GetVIXLAssembler(),
+ 3 * kMaxInstructionSizeInBytes,
+ CodeBufferCheckScope::kMaximumSize);
__ ite(is_min ? lt : gt);
__ mov(is_min ? lt : gt, out, op1);
@@ -1050,9 +1053,9 @@ static void GenCas(HInvoke* invoke, Primitive::Type type, CodeGeneratorARMVIXL*
__ Subs(tmp, tmp, expected);
{
- AssemblerAccurateScope aas(assembler->GetVIXLAssembler(),
- 3 * kMaxInstructionSizeInBytes,
- CodeBufferCheckScope::kMaximumSize);
+ ExactAssemblyScope aas(assembler->GetVIXLAssembler(),
+ 3 * kMaxInstructionSizeInBytes,
+ CodeBufferCheckScope::kMaximumSize);
__ itt(eq);
__ strex(eq, tmp, value, MemOperand(tmp_ptr));
@@ -1066,9 +1069,9 @@ static void GenCas(HInvoke* invoke, Primitive::Type type, CodeGeneratorARMVIXL*
__ Rsbs(out, tmp, 1);
{
- AssemblerAccurateScope aas(assembler->GetVIXLAssembler(),
- 2 * kMaxInstructionSizeInBytes,
- CodeBufferCheckScope::kMaximumSize);
+ ExactAssemblyScope aas(assembler->GetVIXLAssembler(),
+ 2 * kMaxInstructionSizeInBytes,
+ CodeBufferCheckScope::kMaximumSize);
__ it(cc);
__ mov(cc, out, 0);
@@ -1185,9 +1188,9 @@ void IntrinsicCodeGeneratorARMVIXL::VisitStringCompareTo(HInvoke* invoke) {
// temp0 = min(len(str), len(arg)).
{
- AssemblerAccurateScope aas(assembler->GetVIXLAssembler(),
- 2 * kMaxInstructionSizeInBytes,
- CodeBufferCheckScope::kMaximumSize);
+ ExactAssemblyScope aas(assembler->GetVIXLAssembler(),
+ 2 * kMaxInstructionSizeInBytes,
+ CodeBufferCheckScope::kMaximumSize);
__ it(gt);
__ mov(gt, temp0, temp1);
@@ -1207,9 +1210,9 @@ void IntrinsicCodeGeneratorARMVIXL::VisitStringCompareTo(HInvoke* invoke) {
// This could in theory exceed INT32_MAX, so treat temp0 as unsigned.
__ Lsls(temp3, temp3, 31u); // Extract purely the compression flag.
- AssemblerAccurateScope aas(assembler->GetVIXLAssembler(),
- 2 * kMaxInstructionSizeInBytes,
- CodeBufferCheckScope::kMaximumSize);
+ ExactAssemblyScope aas(assembler->GetVIXLAssembler(),
+ 2 * kMaxInstructionSizeInBytes,
+ CodeBufferCheckScope::kMaximumSize);
__ it(ne);
__ add(ne, temp0, temp0, temp0);
@@ -1324,9 +1327,9 @@ void IntrinsicCodeGeneratorARMVIXL::VisitStringCompareTo(HInvoke* invoke) {
__ Mov(temp2, arg);
__ Lsrs(temp3, temp3, 1u); // Continue the move of the compression flag.
{
- AssemblerAccurateScope aas(assembler->GetVIXLAssembler(),
- 3 * kMaxInstructionSizeInBytes,
- CodeBufferCheckScope::kMaximumSize);
+ ExactAssemblyScope aas(assembler->GetVIXLAssembler(),
+ 3 * kMaxInstructionSizeInBytes,
+ CodeBufferCheckScope::kMaximumSize);
__ itt(cs); // Interleave with selection of temp1 and temp2.
__ mov(cs, temp1, arg); // Preserves flags.
__ mov(cs, temp2, str); // Preserves flags.
@@ -1361,9 +1364,9 @@ void IntrinsicCodeGeneratorARMVIXL::VisitStringCompareTo(HInvoke* invoke) {
static_assert(static_cast<uint32_t>(mirror::StringCompressionFlag::kCompressed) == 0u,
"Expecting 0=compressed, 1=uncompressed");
- AssemblerAccurateScope aas(assembler->GetVIXLAssembler(),
- 2 * kMaxInstructionSizeInBytes,
- CodeBufferCheckScope::kMaximumSize);
+ ExactAssemblyScope aas(assembler->GetVIXLAssembler(),
+ 2 * kMaxInstructionSizeInBytes,
+ CodeBufferCheckScope::kMaximumSize);
__ it(cc);
__ rsb(cc, out, out, 0);
}
@@ -1457,9 +1460,9 @@ void IntrinsicCodeGeneratorARMVIXL::VisitStringEquals(HInvoke* invoke) {
// For string compression, calculate the number of bytes to compare (not chars).
// This could in theory exceed INT32_MAX, so treat temp as unsigned.
__ Lsrs(temp, temp, 1u); // Extract length and check compression flag.
- AssemblerAccurateScope aas(assembler->GetVIXLAssembler(),
- 2 * kMaxInstructionSizeInBytes,
- CodeBufferCheckScope::kMaximumSize);
+ ExactAssemblyScope aas(assembler->GetVIXLAssembler(),
+ 2 * kMaxInstructionSizeInBytes,
+ CodeBufferCheckScope::kMaximumSize);
__ it(cs); // If uncompressed,
__ add(cs, temp, temp, temp); // double the byte count.
}
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 594255c625..1e946d67b6 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -2440,6 +2440,17 @@ std::ostream& operator<<(std::ostream& os, HInvokeStaticOrDirect::ClinitCheckReq
}
}
+// Helper for InstructionDataEquals to fetch the mirror Class out
+// from a kJitTableAddress LoadClass kind.
+// NO_THREAD_SAFETY_ANALYSIS because even though we're accessing
+// mirrors, they are stored in a variable size handle scope which is always
+// visited during a pause. Also, the only caller of this helper
+// only uses the mirror for pointer comparison.
+static inline mirror::Class* AsMirrorInternal(uint64_t address)
+ NO_THREAD_SAFETY_ANALYSIS {
+ return reinterpret_cast<StackReference<mirror::Class>*>(address)->AsMirrorPtr();
+}
+
bool HLoadClass::InstructionDataEquals(const HInstruction* other) const {
const HLoadClass* other_load_class = other->AsLoadClass();
// TODO: To allow GVN for HLoadClass from different dex files, we should compare the type
@@ -2448,16 +2459,14 @@ bool HLoadClass::InstructionDataEquals(const HInstruction* other) const {
GetPackedFields() != other_load_class->GetPackedFields()) {
return false;
}
- LoadKind load_kind = GetLoadKind();
- if (HasAddress(load_kind)) {
- return GetAddress() == other_load_class->GetAddress();
- } else if (HasTypeReference(load_kind)) {
- return IsSameDexFile(GetDexFile(), other_load_class->GetDexFile());
- } else {
- DCHECK(HasDexCacheReference(load_kind)) << load_kind;
- // If the type indexes and dex files are the same, dex cache element offsets
- // must also be the same, so we don't need to compare them.
- return IsSameDexFile(GetDexFile(), other_load_class->GetDexFile());
+ switch (GetLoadKind()) {
+ case LoadKind::kBootImageAddress:
+ return GetAddress() == other_load_class->GetAddress();
+ case LoadKind::kJitTableAddress:
+ return AsMirrorInternal(GetAddress()) == AsMirrorInternal(other_load_class->GetAddress());
+ default:
+ DCHECK(HasTypeReference(GetLoadKind()) || HasDexCacheReference(GetLoadKind()));
+ return IsSameDexFile(GetDexFile(), other_load_class->GetDexFile());
}
}
@@ -2487,8 +2496,8 @@ std::ostream& operator<<(std::ostream& os, HLoadClass::LoadKind rhs) {
return os << "BootImageLinkTimePcRelative";
case HLoadClass::LoadKind::kBootImageAddress:
return os << "BootImageAddress";
- case HLoadClass::LoadKind::kDexCacheAddress:
- return os << "DexCacheAddress";
+ case HLoadClass::LoadKind::kJitTableAddress:
+ return os << "JitTableAddress";
case HLoadClass::LoadKind::kDexCachePcRelative:
return os << "DexCachePcRelative";
case HLoadClass::LoadKind::kDexCacheViaMethod:
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index e3f4d8f035..4a8cfcb158 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -125,6 +125,11 @@ enum GraphAnalysisResult {
kAnalysisSuccess,
};
+template <typename T>
+static inline typename std::make_unsigned<T>::type MakeUnsigned(T x) {
+ return static_cast<typename std::make_unsigned<T>::type>(x);
+}
+
class HInstructionList : public ValueObject {
public:
HInstructionList() : first_instruction_(nullptr), last_instruction_(nullptr) {}
@@ -5493,9 +5498,8 @@ class HLoadClass FINAL : public HInstruction {
// GetIncludePatchInformation().
kBootImageAddress,
- // Load from the resolved types array at an absolute address.
- // Used for classes outside the boot image referenced by JIT-compiled code.
- kDexCacheAddress,
+ // Load from the root table associated with the JIT compiled method.
+ kJitTableAddress,
// Load from resolved types array in the dex cache using a PC-relative load.
// Used for classes outside boot image when we know that we can access
@@ -5588,7 +5592,6 @@ class HLoadClass FINAL : public HInstruction {
NeedsAccessCheck();
}
-
bool CanThrow() const OVERRIDE {
return CanCallRuntime();
}
@@ -5613,7 +5616,9 @@ class HLoadClass FINAL : public HInstruction {
return load_data_.address;
}
- bool NeedsDexCacheOfDeclaringClass() const OVERRIDE { return !IsReferrersClass(); }
+ bool NeedsDexCacheOfDeclaringClass() const OVERRIDE {
+ return !IsReferrersClass();
+ }
static SideEffects SideEffectsForArchRuntimeCalls() {
return SideEffects::CanTriggerGC();
@@ -5672,7 +5677,8 @@ class HLoadClass FINAL : public HInstruction {
}
static bool HasAddress(LoadKind load_kind) {
- return load_kind == LoadKind::kBootImageAddress || load_kind == LoadKind::kDexCacheAddress;
+ return load_kind == LoadKind::kBootImageAddress ||
+ load_kind == LoadKind::kJitTableAddress;
}
static bool HasDexCacheReference(LoadKind load_kind) {
@@ -5691,7 +5697,7 @@ class HLoadClass FINAL : public HInstruction {
union {
uint32_t dex_cache_element_index; // Only for dex cache reference.
- uint64_t address; // Up to 64-bit, needed for kDexCacheAddress on 64-bit targets.
+ uint64_t address; // Up to 64-bit, needed for kJitTableAddress on 64-bit targets.
} load_data_;
ReferenceTypeInfo loaded_class_rti_;
diff --git a/compiler/optimizing/optimizing_cfi_test.cc b/compiler/optimizing/optimizing_cfi_test.cc
index 013e110b87..0e02311672 100644
--- a/compiler/optimizing/optimizing_cfi_test.cc
+++ b/compiler/optimizing/optimizing_cfi_test.cc
@@ -24,12 +24,22 @@
#include "optimizing/code_generator.h"
#include "optimizing/optimizing_unit_test.h"
#include "utils/assembler.h"
+#ifdef ART_USE_VIXL_ARM_BACKEND
+#include "utils/arm/assembler_arm_vixl.h"
+#else
#include "utils/arm/assembler_thumb2.h"
+#endif
#include "utils/mips/assembler_mips.h"
#include "utils/mips64/assembler_mips64.h"
#include "optimizing/optimizing_cfi_test_expected.inc"
+#ifdef ART_USE_VIXL_ARM_BACKEND
+namespace vixl32 = vixl::aarch32;
+
+using vixl32::r0;
+#endif
+
namespace art {
// Run the tests only on host.
@@ -158,8 +168,7 @@ class OptimizingCFITest : public CFITest {
TestImpl(isa, #isa, expected_asm, expected_cfi); \
}
-// TODO(VIXL): Support this test for the VIXL backend.
-#if defined(ART_ENABLE_CODEGEN_arm) && !defined(ART_USE_VIXL_ARM_BACKEND)
+#ifdef ART_ENABLE_CODEGEN_arm
TEST_ISA(kThumb2)
#endif
#ifdef ART_ENABLE_CODEGEN_arm64
@@ -178,8 +187,7 @@ TEST_ISA(kMips)
TEST_ISA(kMips64)
#endif
-// TODO(VIXL): Support this test for the VIXL backend.
-#if defined(ART_ENABLE_CODEGEN_arm) && !defined(ART_USE_VIXL_ARM_BACKEND)
+#ifdef ART_ENABLE_CODEGEN_arm
TEST_F(OptimizingCFITest, kThumb2Adjust) {
std::vector<uint8_t> expected_asm(
expected_asm_kThumb2_adjust,
@@ -188,6 +196,16 @@ TEST_F(OptimizingCFITest, kThumb2Adjust) {
expected_cfi_kThumb2_adjust,
expected_cfi_kThumb2_adjust + arraysize(expected_cfi_kThumb2_adjust));
SetUpFrame(kThumb2);
+#ifdef ART_USE_VIXL_ARM_BACKEND
+#define __ down_cast<arm::ArmVIXLAssembler*>(GetCodeGenerator() \
+ ->GetAssembler())->GetVIXLAssembler()->
+ vixl32::Label target;
+ __ CompareAndBranchIfZero(r0, &target);
+ // Push the target out of range of CBZ.
+ for (size_t i = 0; i != 65; ++i) {
+ __ Ldr(r0, vixl32::MemOperand(r0));
+ }
+#else
#define __ down_cast<arm::Thumb2Assembler*>(GetCodeGenerator()->GetAssembler())->
Label target;
__ CompareAndBranchIfZero(arm::R0, &target);
@@ -195,6 +213,7 @@ TEST_F(OptimizingCFITest, kThumb2Adjust) {
for (size_t i = 0; i != 65; ++i) {
__ ldr(arm::R0, arm::Address(arm::R0));
}
+#endif
__ Bind(&target);
#undef __
Finish();
diff --git a/compiler/optimizing/optimizing_cfi_test_expected.inc b/compiler/optimizing/optimizing_cfi_test_expected.inc
index f735dc8cb3..82670c38fe 100644
--- a/compiler/optimizing/optimizing_cfi_test_expected.inc
+++ b/compiler/optimizing/optimizing_cfi_test_expected.inc
@@ -223,8 +223,16 @@ static constexpr uint8_t expected_cfi_kMips64[] = {
// 0x00000040: .cfi_def_cfa_offset: 64
static constexpr uint8_t expected_asm_kThumb2_adjust[] = {
+#ifdef ART_USE_VIXL_ARM_BACKEND
+ // VIXL emits an extra 2 bytes here for a 32-bit beq as there is no
+ // optimistic 16-bit emit and subsequent fixup for out of reach targets
+ // as with the current assembler.
+ 0x60, 0xB5, 0x2D, 0xED, 0x02, 0x8A, 0x8B, 0xB0, 0x00, 0x28, 0x00, 0xF0,
+ 0x41, 0x80, 0x00, 0x68, 0x00, 0x68, 0x00, 0x68, 0x00, 0x68, 0x00, 0x68,
+#else
0x60, 0xB5, 0x2D, 0xED, 0x02, 0x8A, 0x8B, 0xB0, 0x00, 0x28,
0x40, 0xD0, 0x00, 0x68, 0x00, 0x68, 0x00, 0x68, 0x00, 0x68, 0x00, 0x68,
+#endif
0x00, 0x68, 0x00, 0x68, 0x00, 0x68, 0x00, 0x68, 0x00, 0x68, 0x00, 0x68,
0x00, 0x68, 0x00, 0x68, 0x00, 0x68, 0x00, 0x68, 0x00, 0x68, 0x00, 0x68,
0x00, 0x68, 0x00, 0x68, 0x00, 0x68, 0x00, 0x68, 0x00, 0x68, 0x00, 0x68,
@@ -239,7 +247,11 @@ static constexpr uint8_t expected_asm_kThumb2_adjust[] = {
};
static constexpr uint8_t expected_cfi_kThumb2_adjust[] = {
0x42, 0x0E, 0x0C, 0x85, 0x03, 0x86, 0x02, 0x8E, 0x01, 0x44, 0x0E, 0x14,
+#ifdef ART_USE_VIXL_ARM_BACKEND
+ 0x05, 0x50, 0x05, 0x05, 0x51, 0x04, 0x42, 0x0E, 0x40, 0x02, 0x88, 0x0A,
+#else
0x05, 0x50, 0x05, 0x05, 0x51, 0x04, 0x42, 0x0E, 0x40, 0x02, 0x86, 0x0A,
+#endif
0x42, 0x0E, 0x14, 0x44, 0x0E, 0x0C, 0x06, 0x50, 0x06, 0x51, 0x42, 0x0B,
0x0E, 0x40,
};
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 8ea2b06530..ba7012ab1a 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -22,6 +22,8 @@
#include <stdint.h>
+#include "android-base/strings.h"
+
#ifdef ART_ENABLE_CODEGEN_arm
#include "dex_cache_array_fixups_arm.h"
#endif
@@ -375,7 +377,8 @@ class OptimizingCompiler FINAL : public Compiler {
const DexFile& dex_file,
Handle<mirror::DexCache> dex_cache,
ArtMethod* method,
- bool osr) const;
+ bool osr,
+ VariableSizedHandleScope* handles) const;
void MaybeRunInliner(HGraph* graph,
CodeGenerator* codegen,
@@ -495,7 +498,7 @@ static HOptimization* BuildOptimization(
number_of_dex_registers,
/* depth */ 0);
} else if (opt_name == HSharpening::kSharpeningPassName) {
- return new (arena) HSharpening(graph, codegen, dex_compilation_unit, driver);
+ return new (arena) HSharpening(graph, codegen, dex_compilation_unit, driver, handles);
} else if (opt_name == HSelectGenerator::kSelectGeneratorPassName) {
return new (arena) HSelectGenerator(graph, stats);
} else if (opt_name == HInductionVarAnalysis::kInductionPassName) {
@@ -767,7 +770,8 @@ void OptimizingCompiler::RunOptimizations(HGraph* graph,
HInductionVarAnalysis* induction = new (arena) HInductionVarAnalysis(graph);
BoundsCheckElimination* bce = new (arena) BoundsCheckElimination(graph, *side_effects, induction);
HLoopOptimization* loop = new (arena) HLoopOptimization(graph, induction);
- HSharpening* sharpening = new (arena) HSharpening(graph, codegen, dex_compilation_unit, driver);
+ HSharpening* sharpening = new (arena) HSharpening(
+ graph, codegen, dex_compilation_unit, driver, handles);
InstructionSimplifier* simplify2 = new (arena) InstructionSimplifier(
graph, stats, "instruction_simplifier$after_inlining");
InstructionSimplifier* simplify3 = new (arena) InstructionSimplifier(
@@ -866,7 +870,8 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* arena,
const DexFile& dex_file,
Handle<mirror::DexCache> dex_cache,
ArtMethod* method,
- bool osr) const {
+ bool osr,
+ VariableSizedHandleScope* handles) const {
MaybeRecordStat(MethodCompilationStat::kAttemptCompilation);
CompilerDriver* compiler_driver = GetCompilerDriver();
InstructionSet instruction_set = compiler_driver->GetInstructionSet();
@@ -976,63 +981,55 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* arena,
compiler_driver,
dump_mutex_);
- VLOG(compiler) << "Building " << pass_observer.GetMethodName();
-
{
- ScopedObjectAccess soa(Thread::Current());
- VariableSizedHandleScope handles(soa.Self());
- // Do not hold `mutator_lock_` between optimizations.
- ScopedThreadSuspension sts(soa.Self(), kNative);
-
- {
- PassScope scope(HGraphBuilder::kBuilderPassName, &pass_observer);
- HGraphBuilder builder(graph,
- &dex_compilation_unit,
- &dex_compilation_unit,
- &dex_file,
- *code_item,
- compiler_driver,
- compilation_stats_.get(),
- interpreter_metadata,
- dex_cache,
- &handles);
- GraphAnalysisResult result = builder.BuildGraph();
- if (result != kAnalysisSuccess) {
- switch (result) {
- case kAnalysisSkipped:
- MaybeRecordStat(MethodCompilationStat::kNotCompiledSkipped);
- break;
- case kAnalysisInvalidBytecode:
- MaybeRecordStat(MethodCompilationStat::kNotCompiledInvalidBytecode);
- break;
- case kAnalysisFailThrowCatchLoop:
- MaybeRecordStat(MethodCompilationStat::kNotCompiledThrowCatchLoop);
- break;
- case kAnalysisFailAmbiguousArrayOp:
- MaybeRecordStat(MethodCompilationStat::kNotCompiledAmbiguousArrayOp);
- break;
- case kAnalysisSuccess:
- UNREACHABLE();
- }
- pass_observer.SetGraphInBadState();
- return nullptr;
+ VLOG(compiler) << "Building " << pass_observer.GetMethodName();
+ PassScope scope(HGraphBuilder::kBuilderPassName, &pass_observer);
+ HGraphBuilder builder(graph,
+ &dex_compilation_unit,
+ &dex_compilation_unit,
+ &dex_file,
+ *code_item,
+ compiler_driver,
+ compilation_stats_.get(),
+ interpreter_metadata,
+ dex_cache,
+ handles);
+ GraphAnalysisResult result = builder.BuildGraph();
+ if (result != kAnalysisSuccess) {
+ switch (result) {
+ case kAnalysisSkipped:
+ MaybeRecordStat(MethodCompilationStat::kNotCompiledSkipped);
+ break;
+ case kAnalysisInvalidBytecode:
+ MaybeRecordStat(MethodCompilationStat::kNotCompiledInvalidBytecode);
+ break;
+ case kAnalysisFailThrowCatchLoop:
+ MaybeRecordStat(MethodCompilationStat::kNotCompiledThrowCatchLoop);
+ break;
+ case kAnalysisFailAmbiguousArrayOp:
+ MaybeRecordStat(MethodCompilationStat::kNotCompiledAmbiguousArrayOp);
+ break;
+ case kAnalysisSuccess:
+ UNREACHABLE();
}
+ pass_observer.SetGraphInBadState();
+ return nullptr;
}
+ }
- RunOptimizations(graph,
- codegen.get(),
- compiler_driver,
- dex_compilation_unit,
- &pass_observer,
- &handles);
+ RunOptimizations(graph,
+ codegen.get(),
+ compiler_driver,
+ dex_compilation_unit,
+ &pass_observer,
+ handles);
- RegisterAllocator::Strategy regalloc_strategy =
- compiler_options.GetRegisterAllocationStrategy();
- AllocateRegisters(graph, codegen.get(), &pass_observer, regalloc_strategy);
+ RegisterAllocator::Strategy regalloc_strategy =
+ compiler_options.GetRegisterAllocationStrategy();
+ AllocateRegisters(graph, codegen.get(), &pass_observer, regalloc_strategy);
- codegen->Compile(code_allocator);
- pass_observer.DumpDisassembly();
- }
+ codegen->Compile(code_allocator);
+ pass_observer.DumpDisassembly();
return codegen.release();
}
@@ -1055,19 +1052,27 @@ CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
verified_method->GetEncounteredVerificationFailures())) {
ArenaAllocator arena(Runtime::Current()->GetArenaPool());
CodeVectorAllocator code_allocator(&arena);
- std::unique_ptr<CodeGenerator> codegen(
- TryCompile(&arena,
- &code_allocator,
- code_item,
- access_flags,
- invoke_type,
- class_def_idx,
- method_idx,
- jclass_loader,
- dex_file,
- dex_cache,
- nullptr,
- /* osr */ false));
+ std::unique_ptr<CodeGenerator> codegen;
+ {
+ ScopedObjectAccess soa(Thread::Current());
+ VariableSizedHandleScope handles(soa.Self());
+ // Go to native so that we don't block GC during compilation.
+ ScopedThreadSuspension sts(soa.Self(), kNative);
+ codegen.reset(
+ TryCompile(&arena,
+ &code_allocator,
+ code_item,
+ access_flags,
+ invoke_type,
+ class_def_idx,
+ method_idx,
+ jclass_loader,
+ dex_file,
+ dex_cache,
+ nullptr,
+ /* osr */ false,
+ &handles));
+ }
if (codegen.get() != nullptr) {
MaybeRecordStat(MethodCompilationStat::kCompiled);
method = Emit(&arena, &code_allocator, codegen.get(), compiler_driver, code_item);
@@ -1112,7 +1117,8 @@ Compiler* CreateOptimizingCompiler(CompilerDriver* driver) {
bool IsCompilingWithCoreImage() {
const std::string& image = Runtime::Current()->GetImageLocation();
// TODO: This is under-approximating...
- if (EndsWith(image, "core.art") || EndsWith(image, "core-optimizing.art")) {
+ if (android::base::EndsWith(image, "core.art") ||
+ android::base::EndsWith(image, "core-optimizing.art")) {
return true;
}
return false;
@@ -1138,6 +1144,8 @@ bool OptimizingCompiler::JitCompile(Thread* self,
ArenaAllocator arena(Runtime::Current()->GetJitArenaPool());
CodeVectorAllocator code_allocator(&arena);
+ VariableSizedHandleScope handles(self);
+
std::unique_ptr<CodeGenerator> codegen;
{
// Go to native so that we don't block GC during compilation.
@@ -1154,7 +1162,8 @@ bool OptimizingCompiler::JitCompile(Thread* self,
*dex_file,
dex_cache,
method,
- osr));
+ osr,
+ &handles));
if (codegen.get() == nullptr) {
return false;
}
diff --git a/compiler/optimizing/sharpening.cc b/compiler/optimizing/sharpening.cc
index daf160a483..91efb80015 100644
--- a/compiler/optimizing/sharpening.cc
+++ b/compiler/optimizing/sharpening.cc
@@ -151,7 +151,7 @@ void HSharpening::ProcessLoadClass(HLoadClass* load_class) {
bool is_in_dex_cache = false;
bool is_in_boot_image = false;
- HLoadClass::LoadKind desired_load_kind;
+ HLoadClass::LoadKind desired_load_kind = static_cast<HLoadClass::LoadKind>(-1);
uint64_t address = 0u; // Class or dex cache element address.
{
ScopedObjectAccess soa(Thread::Current());
@@ -190,18 +190,21 @@ void HSharpening::ProcessLoadClass(HLoadClass* load_class) {
// TODO: Use direct pointers for all non-moving spaces, not just boot image. Bug: 29530787
desired_load_kind = HLoadClass::LoadKind::kBootImageAddress;
address = reinterpret_cast64<uint64_t>(klass);
+ } else if (is_in_dex_cache) {
+ desired_load_kind = HLoadClass::LoadKind::kJitTableAddress;
+ // We store in the address field the location of the stack reference maintained
+ // by the handle. We do this now so that the code generation does not need to figure
+ // out which class loader to use.
+ address = reinterpret_cast<uint64_t>(handles_->NewHandle(klass).GetReference());
} else {
- // Note: If the class is not in the dex cache or isn't initialized, the
- // instruction needs environment and will not be inlined across dex files.
- // Within a dex file, the slow-path helper loads the correct class and
- // inlined frames are used correctly for OOM stack trace.
- // TODO: Write a test for this. Bug: 29416588
- desired_load_kind = HLoadClass::LoadKind::kDexCacheAddress;
- void* dex_cache_element_address = &dex_cache->GetResolvedTypes()[type_index.index_];
- address = reinterpret_cast64<uint64_t>(dex_cache_element_address);
+ // Class not loaded yet. This happens when the dex code requesting
+ // this `HLoadClass` hasn't been executed in the interpreter.
+ // Fallback to the dex cache.
+ // TODO(ngeoffray): Generate HDeoptimize instead.
+ desired_load_kind = HLoadClass::LoadKind::kDexCacheViaMethod;
}
- // AOT app compilation. Check if the class is in the boot image.
} else if (is_in_boot_image && !codegen_->GetCompilerOptions().GetCompilePic()) {
+ // AOT app compilation. Check if the class is in the boot image.
desired_load_kind = HLoadClass::LoadKind::kBootImageAddress;
address = reinterpret_cast64<uint64_t>(klass);
} else {
@@ -215,6 +218,7 @@ void HSharpening::ProcessLoadClass(HLoadClass* load_class) {
}
}
}
+ DCHECK_NE(desired_load_kind, static_cast<HLoadClass::LoadKind>(-1));
if (is_in_boot_image) {
load_class->MarkInBootImage();
@@ -245,7 +249,7 @@ void HSharpening::ProcessLoadClass(HLoadClass* load_class) {
load_class->SetLoadKindWithTypeReference(load_kind, dex_file, type_index);
break;
case HLoadClass::LoadKind::kBootImageAddress:
- case HLoadClass::LoadKind::kDexCacheAddress:
+ case HLoadClass::LoadKind::kJitTableAddress:
DCHECK_NE(address, 0u);
load_class->SetLoadKindWithAddress(load_kind, address);
break;
diff --git a/compiler/optimizing/sharpening.h b/compiler/optimizing/sharpening.h
index d35ae66e05..74189549fd 100644
--- a/compiler/optimizing/sharpening.h
+++ b/compiler/optimizing/sharpening.h
@@ -35,11 +35,13 @@ class HSharpening : public HOptimization {
HSharpening(HGraph* graph,
CodeGenerator* codegen,
const DexCompilationUnit& compilation_unit,
- CompilerDriver* compiler_driver)
+ CompilerDriver* compiler_driver,
+ VariableSizedHandleScope* handles)
: HOptimization(graph, kSharpeningPassName),
codegen_(codegen),
compilation_unit_(compilation_unit),
- compiler_driver_(compiler_driver) { }
+ compiler_driver_(compiler_driver),
+ handles_(handles) { }
void Run() OVERRIDE;
@@ -53,6 +55,7 @@ class HSharpening : public HOptimization {
CodeGenerator* codegen_;
const DexCompilationUnit& compilation_unit_;
CompilerDriver* compiler_driver_;
+ VariableSizedHandleScope* handles_;
};
} // namespace art
diff --git a/compiler/utils/arm/assembler_arm_vixl.cc b/compiler/utils/arm/assembler_arm_vixl.cc
index 1614d04a95..76a94e8315 100644
--- a/compiler/utils/arm/assembler_arm_vixl.cc
+++ b/compiler/utils/arm/assembler_arm_vixl.cc
@@ -23,6 +23,9 @@
using namespace vixl::aarch32; // NOLINT(build/namespaces)
+using vixl::ExactAssemblyScope;
+using vixl::CodeBufferCheckScope;
+
namespace art {
namespace arm {
@@ -459,9 +462,9 @@ void ArmVIXLMacroAssembler::B(vixl32::Label* label) {
if (!label->IsBound()) {
// Try to use 16-bit T2 encoding of B instruction.
DCHECK(OutsideITBlock());
- AssemblerAccurateScope ass(this,
- kMaxInstructionSizeInBytes,
- CodeBufferCheckScope::kMaximumSize);
+ ExactAssemblyScope ass(this,
+ kMaxInstructionSizeInBytes,
+ CodeBufferCheckScope::kMaximumSize);
b(al, Narrow, label);
AddBranchLabel(label);
return;
diff --git a/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc b/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
index 2d026b83f9..4e64f13d55 100644
--- a/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
+++ b/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
@@ -24,6 +24,9 @@
using namespace vixl::aarch32; // NOLINT(build/namespaces)
namespace vixl32 = vixl::aarch32;
+using vixl::ExactAssemblyScope;
+using vixl::CodeBufferCheckScope;
+
namespace art {
namespace arm {
@@ -455,16 +458,16 @@ void ArmVIXLJNIMacroAssembler::CreateHandleScopeEntry(ManagedRegister mout_reg,
if (asm_.ShifterOperandCanHold(ADD, handle_scope_offset.Int32Value(), kCcDontCare)) {
if (!out_reg.Equals(in_reg)) {
- AssemblerAccurateScope guard(asm_.GetVIXLAssembler(),
- 3 * vixl32::kMaxInstructionSizeInBytes,
- CodeBufferCheckScope::kMaximumSize);
+ ExactAssemblyScope guard(asm_.GetVIXLAssembler(),
+ 3 * vixl32::kMaxInstructionSizeInBytes,
+ CodeBufferCheckScope::kMaximumSize);
___ it(eq, 0xc);
___ mov(eq, out_reg.AsVIXLRegister(), 0);
asm_.AddConstantInIt(out_reg.AsVIXLRegister(), sp, handle_scope_offset.Int32Value(), ne);
} else {
- AssemblerAccurateScope guard(asm_.GetVIXLAssembler(),
- 2 * vixl32::kMaxInstructionSizeInBytes,
- CodeBufferCheckScope::kMaximumSize);
+ ExactAssemblyScope guard(asm_.GetVIXLAssembler(),
+ 2 * vixl32::kMaxInstructionSizeInBytes,
+ CodeBufferCheckScope::kMaximumSize);
___ it(ne, 0x8);
asm_.AddConstantInIt(out_reg.AsVIXLRegister(), sp, handle_scope_offset.Int32Value(), ne);
}
@@ -493,9 +496,9 @@ void ArmVIXLJNIMacroAssembler::CreateHandleScopeEntry(FrameOffset out_off,
___ Cmp(scratch.AsVIXLRegister(), 0);
if (asm_.ShifterOperandCanHold(ADD, handle_scope_offset.Int32Value(), kCcDontCare)) {
- AssemblerAccurateScope guard(asm_.GetVIXLAssembler(),
- 2 * vixl32::kMaxInstructionSizeInBytes,
- CodeBufferCheckScope::kMaximumSize);
+ ExactAssemblyScope guard(asm_.GetVIXLAssembler(),
+ 2 * vixl32::kMaxInstructionSizeInBytes,
+ CodeBufferCheckScope::kMaximumSize);
___ it(ne, 0x8);
asm_.AddConstantInIt(scratch.AsVIXLRegister(), sp, handle_scope_offset.Int32Value(), ne);
} else {
@@ -586,9 +589,9 @@ void ArmVIXLJNIMacroAssembler::ExceptionPoll(ManagedRegister m_scratch, size_t s
___ Cmp(scratch.AsVIXLRegister(), 0);
{
- AssemblerAccurateScope guard(asm_.GetVIXLAssembler(),
- vixl32::kMaxInstructionSizeInBytes,
- CodeBufferCheckScope::kMaximumSize);
+ ExactAssemblyScope guard(asm_.GetVIXLAssembler(),
+ vixl32::kMaxInstructionSizeInBytes,
+ CodeBufferCheckScope::kMaximumSize);
___ b(ne, Narrow, exception_blocks_.back()->Entry());
}
// TODO: think about using CBNZ here.
diff --git a/compiler/utils/assembler_test_base.h b/compiler/utils/assembler_test_base.h
index ac24ee95eb..e7edf96722 100644
--- a/compiler/utils/assembler_test_base.h
+++ b/compiler/utils/assembler_test_base.h
@@ -23,6 +23,8 @@
#include <iterator>
#include <sys/stat.h>
+#include "android-base/strings.h"
+
#include "common_runtime_test.h" // For ScratchFile
#include "utils.h"
@@ -221,7 +223,7 @@ class AssemblerTestInfrastructure {
args.push_back("-o");
args.push_back(to_file);
args.push_back(from_file);
- std::string cmd = Join(args, ' ');
+ std::string cmd = android::base::Join(args, ' ');
args.clear();
args.push_back("/bin/sh");
@@ -257,7 +259,7 @@ class AssemblerTestInfrastructure {
args.push_back(file);
args.push_back(">");
args.push_back(file+".dump");
- std::string cmd = Join(args, ' ');
+ std::string cmd = android::base::Join(args, ' ');
args.clear();
args.push_back("/bin/sh");
@@ -338,7 +340,7 @@ class AssemblerTestInfrastructure {
args.push_back("| sed -n \'/<.data>/,$p\' | sed -e \'s/.*://\'");
args.push_back(">");
args.push_back(file+".dis");
- std::string cmd = Join(args, ' ');
+ std::string cmd = android::base::Join(args, ' ');
args.clear();
args.push_back("/bin/sh");
@@ -500,7 +502,7 @@ class AssemblerTestInfrastructure {
std::string tmp_file = GetTmpnam();
args.push_back(">");
args.push_back(tmp_file);
- std::string sh_args = Join(args, ' ');
+ std::string sh_args = android::base::Join(args, ' ');
args.clear();
args.push_back("/bin/sh");
@@ -541,7 +543,7 @@ class AssemblerTestInfrastructure {
args.push_back("sort");
args.push_back(">");
args.push_back(tmp_file);
- std::string sh_args = Join(args, ' ');
+ std::string sh_args = android::base::Join(args, ' ');
args.clear();
args.push_back("/bin/sh");
diff --git a/compiler/utils/mips64/assembler_mips64.cc b/compiler/utils/mips64/assembler_mips64.cc
index 1a21df939e..84280b9c98 100644
--- a/compiler/utils/mips64/assembler_mips64.cc
+++ b/compiler/utils/mips64/assembler_mips64.cc
@@ -35,6 +35,7 @@ void Mips64Assembler::FinalizeCode() {
for (auto& exception_block : exception_blocks_) {
EmitExceptionPoll(&exception_block);
}
+ EmitLiterals();
PromoteBranches();
}
@@ -450,6 +451,21 @@ void Mips64Assembler::Lwu(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
EmitI(0x27, rs, rt, imm16);
}
+void Mips64Assembler::Lwpc(GpuRegister rs, uint32_t imm19) {
+ CHECK(IsUint<19>(imm19)) << imm19;
+ EmitI21(0x3B, rs, (0x01 << 19) | imm19);
+}
+
+void Mips64Assembler::Lwupc(GpuRegister rs, uint32_t imm19) {
+ CHECK(IsUint<19>(imm19)) << imm19;
+ EmitI21(0x3B, rs, (0x02 << 19) | imm19);
+}
+
+void Mips64Assembler::Ldpc(GpuRegister rs, uint32_t imm18) {
+ CHECK(IsUint<18>(imm18)) << imm18;
+ EmitI21(0x3B, rs, (0x06 << 18) | imm18);
+}
+
void Mips64Assembler::Lui(GpuRegister rt, uint16_t imm16) {
EmitI(0xf, static_cast<GpuRegister>(0), rt, imm16);
}
@@ -548,6 +564,10 @@ void Mips64Assembler::Bc(uint32_t imm26) {
EmitI26(0x32, imm26);
}
+void Mips64Assembler::Balc(uint32_t imm26) {
+ EmitI26(0x3A, imm26);
+}
+
void Mips64Assembler::Jic(GpuRegister rt, uint16_t imm16) {
EmitI(0x36, static_cast<GpuRegister>(0), rt, imm16);
}
@@ -1064,19 +1084,37 @@ void Mips64Assembler::Branch::InitShortOrLong(Mips64Assembler::Branch::OffsetBit
type_ = (offset_size <= branch_info_[short_type].offset_size) ? short_type : long_type;
}
-void Mips64Assembler::Branch::InitializeType(bool is_call) {
+void Mips64Assembler::Branch::InitializeType(Type initial_type) {
OffsetBits offset_size = GetOffsetSizeNeeded(location_, target_);
- if (is_call) {
- InitShortOrLong(offset_size, kCall, kLongCall);
- } else if (condition_ == kUncond) {
- InitShortOrLong(offset_size, kUncondBranch, kLongUncondBranch);
- } else {
- if (condition_ == kCondEQZ || condition_ == kCondNEZ) {
- // Special case for beqzc/bnezc with longer offset than in other b<cond>c instructions.
- type_ = (offset_size <= kOffset23) ? kCondBranch : kLongCondBranch;
- } else {
- InitShortOrLong(offset_size, kCondBranch, kLongCondBranch);
- }
+ switch (initial_type) {
+ case kLabel:
+ case kLiteral:
+ case kLiteralUnsigned:
+ case kLiteralLong:
+ CHECK(!IsResolved());
+ type_ = initial_type;
+ break;
+ case kCall:
+ InitShortOrLong(offset_size, kCall, kLongCall);
+ break;
+ case kCondBranch:
+ switch (condition_) {
+ case kUncond:
+ InitShortOrLong(offset_size, kUncondBranch, kLongUncondBranch);
+ break;
+ case kCondEQZ:
+ case kCondNEZ:
+ // Special case for beqzc/bnezc with longer offset than in other b<cond>c instructions.
+ type_ = (offset_size <= kOffset23) ? kCondBranch : kLongCondBranch;
+ break;
+ default:
+ InitShortOrLong(offset_size, kCondBranch, kLongCondBranch);
+ break;
+ }
+ break;
+ default:
+ LOG(FATAL) << "Unexpected branch type " << initial_type;
+ UNREACHABLE();
}
old_type_ = type_;
}
@@ -1109,14 +1147,14 @@ bool Mips64Assembler::Branch::IsUncond(BranchCondition condition,
}
}
-Mips64Assembler::Branch::Branch(uint32_t location, uint32_t target)
+Mips64Assembler::Branch::Branch(uint32_t location, uint32_t target, bool is_call)
: old_location_(location),
location_(location),
target_(target),
lhs_reg_(ZERO),
rhs_reg_(ZERO),
condition_(kUncond) {
- InitializeType(false);
+ InitializeType(is_call ? kCall : kCondBranch);
}
Mips64Assembler::Branch::Branch(uint32_t location,
@@ -1164,19 +1202,18 @@ Mips64Assembler::Branch::Branch(uint32_t location,
// Branch condition is always true, make the branch unconditional.
condition_ = kUncond;
}
- InitializeType(false);
+ InitializeType(kCondBranch);
}
-Mips64Assembler::Branch::Branch(uint32_t location, uint32_t target, GpuRegister indirect_reg)
+Mips64Assembler::Branch::Branch(uint32_t location, GpuRegister dest_reg, Type label_or_literal_type)
: old_location_(location),
location_(location),
- target_(target),
- lhs_reg_(indirect_reg),
+ target_(kUnresolved),
+ lhs_reg_(dest_reg),
rhs_reg_(ZERO),
condition_(kUncond) {
- CHECK_NE(indirect_reg, ZERO);
- CHECK_NE(indirect_reg, AT);
- InitializeType(true);
+ CHECK_NE(dest_reg, ZERO);
+ InitializeType(label_or_literal_type);
}
Mips64Assembler::BranchCondition Mips64Assembler::Branch::OppositeCondition(
@@ -1278,11 +1315,23 @@ bool Mips64Assembler::Branch::IsLong() const {
case kUncondBranch:
case kCondBranch:
case kCall:
+ // Near label.
+ case kLabel:
+ // Near literals.
+ case kLiteral:
+ case kLiteralUnsigned:
+ case kLiteralLong:
return false;
// Long branches.
case kLongUncondBranch:
case kLongCondBranch:
case kLongCall:
+ // Far label.
+ case kFarLabel:
+ // Far literals.
+ case kFarLiteral:
+ case kFarLiteralUnsigned:
+ case kFarLiteralLong:
return true;
}
UNREACHABLE();
@@ -1351,6 +1400,20 @@ void Mips64Assembler::Branch::PromoteToLong() {
case kCall:
type_ = kLongCall;
break;
+ // Near label.
+ case kLabel:
+ type_ = kFarLabel;
+ break;
+ // Near literals.
+ case kLiteral:
+ type_ = kFarLiteral;
+ break;
+ case kLiteralUnsigned:
+ type_ = kFarLiteralUnsigned;
+ break;
+ case kLiteralLong:
+ type_ = kFarLiteralLong;
+ break;
default:
// Note: 'type_' is already long.
break;
@@ -1397,7 +1460,15 @@ uint32_t Mips64Assembler::Branch::GetOffset() const {
uint32_t ofs_mask = 0xFFFFFFFF >> (32 - GetOffsetSize());
// Calculate the byte distance between instructions and also account for
// different PC-relative origins.
- uint32_t offset = target_ - GetOffsetLocation() - branch_info_[type_].pc_org * sizeof(uint32_t);
+ uint32_t offset_location = GetOffsetLocation();
+ if (type_ == kLiteralLong) {
+ // Special case for the ldpc instruction, whose address (PC) is rounded down to
+ // a multiple of 8 before adding the offset.
+ // Note, branch promotion has already taken care of aligning `target_` to an
+ // address that's a multiple of 8.
+ offset_location = RoundDown(offset_location, sizeof(uint64_t));
+ }
+ uint32_t offset = target_ - offset_location - branch_info_[type_].pc_org * sizeof(uint32_t);
// Prepare the offset for encoding into the instruction(s).
offset = (offset & ofs_mask) >> branch_info_[type_].offset_shift;
return offset;
@@ -1444,7 +1515,7 @@ void Mips64Assembler::Bind(Mips64Label* label) {
label->BindTo(bound_pc);
}
-uint32_t Mips64Assembler::GetLabelLocation(Mips64Label* label) const {
+uint32_t Mips64Assembler::GetLabelLocation(const Mips64Label* label) const {
CHECK(label->IsBound());
uint32_t target = label->Position();
if (label->prev_branch_id_plus_one_) {
@@ -1500,7 +1571,7 @@ void Mips64Assembler::FinalizeLabeledBranch(Mips64Label* label) {
void Mips64Assembler::Buncond(Mips64Label* label) {
uint32_t target = label->IsBound() ? GetLabelLocation(label) : Branch::kUnresolved;
- branches_.emplace_back(buffer_.Size(), target);
+ branches_.emplace_back(buffer_.Size(), target, /* is_call */ false);
FinalizeLabeledBranch(label);
}
@@ -1517,12 +1588,87 @@ void Mips64Assembler::Bcond(Mips64Label* label,
FinalizeLabeledBranch(label);
}
-void Mips64Assembler::Call(Mips64Label* label, GpuRegister indirect_reg) {
+void Mips64Assembler::Call(Mips64Label* label) {
uint32_t target = label->IsBound() ? GetLabelLocation(label) : Branch::kUnresolved;
- branches_.emplace_back(buffer_.Size(), target, indirect_reg);
+ branches_.emplace_back(buffer_.Size(), target, /* is_call */ true);
FinalizeLabeledBranch(label);
}
+void Mips64Assembler::LoadLabelAddress(GpuRegister dest_reg, Mips64Label* label) {
+ // Label address loads are treated as pseudo branches since they require very similar handling.
+ DCHECK(!label->IsBound());
+ branches_.emplace_back(buffer_.Size(), dest_reg, Branch::kLabel);
+ FinalizeLabeledBranch(label);
+}
+
+Literal* Mips64Assembler::NewLiteral(size_t size, const uint8_t* data) {
+ // We don't support byte and half-word literals.
+ if (size == 4u) {
+ literals_.emplace_back(size, data);
+ return &literals_.back();
+ } else {
+ DCHECK_EQ(size, 8u);
+ long_literals_.emplace_back(size, data);
+ return &long_literals_.back();
+ }
+}
+
+void Mips64Assembler::LoadLiteral(GpuRegister dest_reg,
+ LoadOperandType load_type,
+ Literal* literal) {
+ // Literal loads are treated as pseudo branches since they require very similar handling.
+ Branch::Type literal_type;
+ switch (load_type) {
+ case kLoadWord:
+ DCHECK_EQ(literal->GetSize(), 4u);
+ literal_type = Branch::kLiteral;
+ break;
+ case kLoadUnsignedWord:
+ DCHECK_EQ(literal->GetSize(), 4u);
+ literal_type = Branch::kLiteralUnsigned;
+ break;
+ case kLoadDoubleword:
+ DCHECK_EQ(literal->GetSize(), 8u);
+ literal_type = Branch::kLiteralLong;
+ break;
+ default:
+ LOG(FATAL) << "Unexpected literal load type " << load_type;
+ UNREACHABLE();
+ }
+ Mips64Label* label = literal->GetLabel();
+ DCHECK(!label->IsBound());
+ branches_.emplace_back(buffer_.Size(), dest_reg, literal_type);
+ FinalizeLabeledBranch(label);
+}
+
+void Mips64Assembler::EmitLiterals() {
+ if (!literals_.empty()) {
+ for (Literal& literal : literals_) {
+ Mips64Label* label = literal.GetLabel();
+ Bind(label);
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ DCHECK_EQ(literal.GetSize(), 4u);
+ for (size_t i = 0, size = literal.GetSize(); i != size; ++i) {
+ buffer_.Emit<uint8_t>(literal.GetData()[i]);
+ }
+ }
+ }
+ if (!long_literals_.empty()) {
+ // Reserve 4 bytes for potential alignment. If after the branch promotion the 64-bit
+ // literals don't end up 8-byte-aligned, they will be moved down 4 bytes.
+ Emit(0); // NOP.
+ for (Literal& literal : long_literals_) {
+ Mips64Label* label = literal.GetLabel();
+ Bind(label);
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ DCHECK_EQ(literal.GetSize(), 8u);
+ for (size_t i = 0, size = literal.GetSize(); i != size; ++i) {
+ buffer_.Emit<uint8_t>(literal.GetData()[i]);
+ }
+ }
+ }
+}
+
void Mips64Assembler::PromoteBranches() {
// Promote short branches to long as necessary.
bool changed;
@@ -1561,6 +1707,35 @@ void Mips64Assembler::PromoteBranches() {
end = branch.GetOldLocation();
}
}
+
+ // Align 64-bit literals by moving them down by 4 bytes if needed.
+ // This will reduce the PC-relative distance, which should be safe for both near and far literals.
+ if (!long_literals_.empty()) {
+ uint32_t first_literal_location = GetLabelLocation(long_literals_.front().GetLabel());
+ size_t lit_size = long_literals_.size() * sizeof(uint64_t);
+ size_t buf_size = buffer_.Size();
+ // 64-bit literals must be at the very end of the buffer.
+ CHECK_EQ(first_literal_location + lit_size, buf_size);
+ if (!IsAligned<sizeof(uint64_t)>(first_literal_location)) {
+ buffer_.Move(first_literal_location - sizeof(uint32_t), first_literal_location, lit_size);
+ // The 4 reserved bytes proved useless, reduce the buffer size.
+ buffer_.Resize(buf_size - sizeof(uint32_t));
+ // Reduce target addresses in literal and address loads by 4 bytes in order for correct
+ // offsets from PC to be generated.
+ for (auto& branch : branches_) {
+ uint32_t target = branch.GetTarget();
+ if (target >= first_literal_location) {
+ branch.Resolve(target - sizeof(uint32_t));
+ }
+ }
+ // If after this we ever call GetLabelLocation() to get the location of a 64-bit literal,
+ // we need to adjust the location of the literal's label as well.
+ for (Literal& literal : long_literals_) {
+ // Bound label's position is negative, hence incrementing it instead of decrementing.
+ literal.GetLabel()->position_ += sizeof(uint32_t);
+ }
+ }
+ }
}
// Note: make sure branch_info_[] and EmitBranch() are kept synchronized.
@@ -1569,11 +1744,23 @@ const Mips64Assembler::Branch::BranchInfo Mips64Assembler::Branch::branch_info_[
{ 1, 0, 1, Mips64Assembler::Branch::kOffset28, 2 }, // kUncondBranch
{ 2, 0, 1, Mips64Assembler::Branch::kOffset18, 2 }, // kCondBranch
// Exception: kOffset23 for beqzc/bnezc
- { 2, 0, 0, Mips64Assembler::Branch::kOffset21, 2 }, // kCall
+ { 1, 0, 1, Mips64Assembler::Branch::kOffset28, 2 }, // kCall
+ // Near label.
+ { 1, 0, 0, Mips64Assembler::Branch::kOffset21, 2 }, // kLabel
+ // Near literals.
+ { 1, 0, 0, Mips64Assembler::Branch::kOffset21, 2 }, // kLiteral
+ { 1, 0, 0, Mips64Assembler::Branch::kOffset21, 2 }, // kLiteralUnsigned
+ { 1, 0, 0, Mips64Assembler::Branch::kOffset21, 3 }, // kLiteralLong
// Long branches.
{ 2, 0, 0, Mips64Assembler::Branch::kOffset32, 0 }, // kLongUncondBranch
{ 3, 1, 0, Mips64Assembler::Branch::kOffset32, 0 }, // kLongCondBranch
- { 3, 0, 0, Mips64Assembler::Branch::kOffset32, 0 }, // kLongCall
+ { 2, 0, 0, Mips64Assembler::Branch::kOffset32, 0 }, // kLongCall
+ // Far label.
+ { 2, 0, 0, Mips64Assembler::Branch::kOffset32, 0 }, // kFarLabel
+ // Far literals.
+ { 2, 0, 0, Mips64Assembler::Branch::kOffset32, 0 }, // kFarLiteral
+ { 2, 0, 0, Mips64Assembler::Branch::kOffset32, 0 }, // kFarLiteralUnsigned
+ { 2, 0, 0, Mips64Assembler::Branch::kOffset32, 0 }, // kFarLiteralLong
};
// Note: make sure branch_info_[] and EmitBranch() are kept synchronized.
@@ -1597,8 +1784,26 @@ void Mips64Assembler::EmitBranch(Mips64Assembler::Branch* branch) {
break;
case Branch::kCall:
CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
+ Balc(offset);
+ break;
+
+ // Near label.
+ case Branch::kLabel:
+ CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
Addiupc(lhs, offset);
- Jialc(lhs, 0);
+ break;
+ // Near literals.
+ case Branch::kLiteral:
+ CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
+ Lwpc(lhs, offset);
+ break;
+ case Branch::kLiteralUnsigned:
+ CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
+ Lwupc(lhs, offset);
+ break;
+ case Branch::kLiteralLong:
+ CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
+ Ldpc(lhs, offset);
break;
// Long branches.
@@ -1616,11 +1821,37 @@ void Mips64Assembler::EmitBranch(Mips64Assembler::Branch* branch) {
Jic(AT, Low16Bits(offset));
break;
case Branch::kLongCall:
- offset += (offset & 0x8000) << 1; // Account for sign extension in daddiu.
+ offset += (offset & 0x8000) << 1; // Account for sign extension in jialc.
+ CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
+ Auipc(AT, High16Bits(offset));
+ Jialc(AT, Low16Bits(offset));
+ break;
+
+ // Far label.
+ case Branch::kFarLabel:
+ offset += (offset & 0x8000) << 1; // Account for sign extension in addiu.
CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
- Auipc(lhs, High16Bits(offset));
- Daddiu(lhs, lhs, Low16Bits(offset));
- Jialc(lhs, 0);
+ Auipc(AT, High16Bits(offset));
+ Addiu(lhs, AT, Low16Bits(offset));
+ break;
+ // Far literals.
+ case Branch::kFarLiteral:
+ offset += (offset & 0x8000) << 1; // Account for sign extension in lw.
+ CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
+ Auipc(AT, High16Bits(offset));
+ Lw(lhs, AT, Low16Bits(offset));
+ break;
+ case Branch::kFarLiteralUnsigned:
+ offset += (offset & 0x8000) << 1; // Account for sign extension in lwu.
+ CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
+ Auipc(AT, High16Bits(offset));
+ Lwu(lhs, AT, Low16Bits(offset));
+ break;
+ case Branch::kFarLiteralLong:
+ offset += (offset & 0x8000) << 1; // Account for sign extension in ld.
+ CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
+ Auipc(AT, High16Bits(offset));
+ Ld(lhs, AT, Low16Bits(offset));
break;
}
CHECK_EQ(overwrite_location_, branch->GetEndLocation());
@@ -1631,8 +1862,8 @@ void Mips64Assembler::Bc(Mips64Label* label) {
Buncond(label);
}
-void Mips64Assembler::Jialc(Mips64Label* label, GpuRegister indirect_reg) {
- Call(label, indirect_reg);
+void Mips64Assembler::Balc(Mips64Label* label) {
+ Call(label);
}
void Mips64Assembler::Bltc(GpuRegister rs, GpuRegister rt, Mips64Label* label) {
diff --git a/compiler/utils/mips64/assembler_mips64.h b/compiler/utils/mips64/assembler_mips64.h
index 238cb9d765..08a55ed41f 100644
--- a/compiler/utils/mips64/assembler_mips64.h
+++ b/compiler/utils/mips64/assembler_mips64.h
@@ -17,9 +17,11 @@
#ifndef ART_COMPILER_UTILS_MIPS64_ASSEMBLER_MIPS64_H_
#define ART_COMPILER_UTILS_MIPS64_ASSEMBLER_MIPS64_H_
+#include <deque>
#include <utility>
#include <vector>
+#include "base/arena_containers.h"
#include "base/enums.h"
#include "base/macros.h"
#include "constants_mips64.h"
@@ -312,6 +314,49 @@ class Mips64Label : public Label {
DISALLOW_COPY_AND_ASSIGN(Mips64Label);
};
+// Assembler literal is a value embedded in code, retrieved using a PC-relative load.
+class Literal {
+ public:
+ static constexpr size_t kMaxSize = 8;
+
+ Literal(uint32_t size, const uint8_t* data)
+ : label_(), size_(size) {
+ DCHECK_LE(size, Literal::kMaxSize);
+ memcpy(data_, data, size);
+ }
+
+ template <typename T>
+ T GetValue() const {
+ DCHECK_EQ(size_, sizeof(T));
+ T value;
+ memcpy(&value, data_, sizeof(T));
+ return value;
+ }
+
+ uint32_t GetSize() const {
+ return size_;
+ }
+
+ const uint8_t* GetData() const {
+ return data_;
+ }
+
+ Mips64Label* GetLabel() {
+ return &label_;
+ }
+
+ const Mips64Label* GetLabel() const {
+ return &label_;
+ }
+
+ private:
+ Mips64Label label_;
+ const uint32_t size_;
+ uint8_t data_[kMaxSize];
+
+ DISALLOW_COPY_AND_ASSIGN(Literal);
+};
+
// Slowpath entered when Thread::Current()->_exception is non-null.
class Mips64ExceptionSlowPath {
public:
@@ -341,6 +386,8 @@ class Mips64Assembler FINAL : public Assembler, public JNIMacroAssembler<Pointer
: Assembler(arena),
overwriting_(false),
overwrite_location_(0),
+ literals_(arena->Adapter(kArenaAllocAssembler)),
+ long_literals_(arena->Adapter(kArenaAllocAssembler)),
last_position_adjustment_(0),
last_old_position_(0),
last_branch_id_(0) {
@@ -386,18 +433,18 @@ class Mips64Assembler FINAL : public Assembler, public JNIMacroAssembler<Pointer
void Nor(GpuRegister rd, GpuRegister rs, GpuRegister rt);
void Bitswap(GpuRegister rd, GpuRegister rt);
- void Dbitswap(GpuRegister rd, GpuRegister rt);
+ void Dbitswap(GpuRegister rd, GpuRegister rt); // MIPS64
void Seb(GpuRegister rd, GpuRegister rt);
void Seh(GpuRegister rd, GpuRegister rt);
- void Dsbh(GpuRegister rd, GpuRegister rt);
- void Dshd(GpuRegister rd, GpuRegister rt);
+ void Dsbh(GpuRegister rd, GpuRegister rt); // MIPS64
+ void Dshd(GpuRegister rd, GpuRegister rt); // MIPS64
void Dext(GpuRegister rs, GpuRegister rt, int pos, int size); // MIPS64
void Dinsu(GpuRegister rt, GpuRegister rs, int pos, int size); // MIPS64
void Wsbh(GpuRegister rd, GpuRegister rt);
void Sc(GpuRegister rt, GpuRegister base, int16_t imm9 = 0);
- void Scd(GpuRegister rt, GpuRegister base, int16_t imm9 = 0);
+ void Scd(GpuRegister rt, GpuRegister base, int16_t imm9 = 0); // MIPS64
void Ll(GpuRegister rt, GpuRegister base, int16_t imm9 = 0);
- void Lld(GpuRegister rt, GpuRegister base, int16_t imm9 = 0);
+ void Lld(GpuRegister rt, GpuRegister base, int16_t imm9 = 0); // MIPS64
void Sll(GpuRegister rd, GpuRegister rt, int shamt);
void Srl(GpuRegister rd, GpuRegister rt, int shamt);
@@ -409,7 +456,7 @@ class Mips64Assembler FINAL : public Assembler, public JNIMacroAssembler<Pointer
void Srav(GpuRegister rd, GpuRegister rt, GpuRegister rs);
void Dsll(GpuRegister rd, GpuRegister rt, int shamt); // MIPS64
void Dsrl(GpuRegister rd, GpuRegister rt, int shamt); // MIPS64
- void Drotr(GpuRegister rd, GpuRegister rt, int shamt);
+ void Drotr(GpuRegister rd, GpuRegister rt, int shamt); // MIPS64
void Dsra(GpuRegister rd, GpuRegister rt, int shamt); // MIPS64
void Dsll32(GpuRegister rd, GpuRegister rt, int shamt); // MIPS64
void Dsrl32(GpuRegister rd, GpuRegister rt, int shamt); // MIPS64
@@ -427,6 +474,9 @@ class Mips64Assembler FINAL : public Assembler, public JNIMacroAssembler<Pointer
void Lbu(GpuRegister rt, GpuRegister rs, uint16_t imm16);
void Lhu(GpuRegister rt, GpuRegister rs, uint16_t imm16);
void Lwu(GpuRegister rt, GpuRegister rs, uint16_t imm16); // MIPS64
+ void Lwpc(GpuRegister rs, uint32_t imm19);
+ void Lwupc(GpuRegister rs, uint32_t imm19); // MIPS64
+ void Ldpc(GpuRegister rs, uint32_t imm18); // MIPS64
void Lui(GpuRegister rt, uint16_t imm16);
void Dahi(GpuRegister rs, uint16_t imm16); // MIPS64
void Dati(GpuRegister rs, uint16_t imm16); // MIPS64
@@ -445,8 +495,8 @@ class Mips64Assembler FINAL : public Assembler, public JNIMacroAssembler<Pointer
void Selnez(GpuRegister rd, GpuRegister rs, GpuRegister rt);
void Clz(GpuRegister rd, GpuRegister rs);
void Clo(GpuRegister rd, GpuRegister rs);
- void Dclz(GpuRegister rd, GpuRegister rs);
- void Dclo(GpuRegister rd, GpuRegister rs);
+ void Dclz(GpuRegister rd, GpuRegister rs); // MIPS64
+ void Dclo(GpuRegister rd, GpuRegister rs); // MIPS64
void Jalr(GpuRegister rd, GpuRegister rs);
void Jalr(GpuRegister rs);
@@ -454,6 +504,7 @@ class Mips64Assembler FINAL : public Assembler, public JNIMacroAssembler<Pointer
void Auipc(GpuRegister rs, uint16_t imm16);
void Addiupc(GpuRegister rs, uint32_t imm19);
void Bc(uint32_t imm26);
+ void Balc(uint32_t imm26);
void Jic(GpuRegister rt, uint16_t imm16);
void Jialc(GpuRegister rt, uint16_t imm16);
void Bltc(GpuRegister rs, GpuRegister rt, uint16_t imm16);
@@ -605,8 +656,26 @@ class Mips64Assembler FINAL : public Assembler, public JNIMacroAssembler<Pointer
UNREACHABLE();
}
+ // Create a new literal with a given value.
+ // NOTE: Force the template parameter to be explicitly specified.
+ template <typename T>
+ Literal* NewLiteral(typename Identity<T>::type value) {
+ static_assert(std::is_integral<T>::value, "T must be an integral type.");
+ return NewLiteral(sizeof(value), reinterpret_cast<const uint8_t*>(&value));
+ }
+
+ // Load label address using PC-relative loads. To be used with data labels in the literal /
+ // jump table area only and not with regular code labels.
+ void LoadLabelAddress(GpuRegister dest_reg, Mips64Label* label);
+
+ // Create a new literal with the given data.
+ Literal* NewLiteral(size_t size, const uint8_t* data);
+
+ // Load literal using PC-relative loads.
+ void LoadLiteral(GpuRegister dest_reg, LoadOperandType load_type, Literal* literal);
+
void Bc(Mips64Label* label);
- void Jialc(Mips64Label* label, GpuRegister indirect_reg);
+ void Balc(Mips64Label* label);
void Bltc(GpuRegister rs, GpuRegister rt, Mips64Label* label);
void Bltzc(GpuRegister rt, Mips64Label* label);
void Bgtzc(GpuRegister rt, Mips64Label* label);
@@ -756,12 +825,15 @@ class Mips64Assembler FINAL : public Assembler, public JNIMacroAssembler<Pointer
// Returns the (always-)current location of a label (can be used in class CodeGeneratorMIPS64,
// must be used instead of Mips64Label::GetPosition()).
- uint32_t GetLabelLocation(Mips64Label* label) const;
+ uint32_t GetLabelLocation(const Mips64Label* label) const;
// Get the final position of a label after local fixup based on the old position
// recorded before FinalizeCode().
uint32_t GetAdjustedPosition(uint32_t old_position);
+ // Note that PC-relative literal loads are handled as pseudo branches because they need very
+ // similar relocation and may similarly expand in size to accomodate for larger offsets relative
+ // to PC.
enum BranchCondition {
kCondLT,
kCondGE,
@@ -791,10 +863,22 @@ class Mips64Assembler FINAL : public Assembler, public JNIMacroAssembler<Pointer
kUncondBranch,
kCondBranch,
kCall,
+ // Near label.
+ kLabel,
+ // Near literals.
+ kLiteral,
+ kLiteralUnsigned,
+ kLiteralLong,
// Long branches.
kLongUncondBranch,
kLongCondBranch,
kLongCall,
+ // Far label.
+ kFarLabel,
+ // Far literals.
+ kFarLiteral,
+ kFarLiteralUnsigned,
+ kFarLiteralLong,
};
// Bit sizes of offsets defined as enums to minimize chance of typos.
@@ -830,16 +914,16 @@ class Mips64Assembler FINAL : public Assembler, public JNIMacroAssembler<Pointer
};
static const BranchInfo branch_info_[/* Type */];
- // Unconditional branch.
- Branch(uint32_t location, uint32_t target);
+ // Unconditional branch or call.
+ Branch(uint32_t location, uint32_t target, bool is_call);
// Conditional branch.
Branch(uint32_t location,
uint32_t target,
BranchCondition condition,
GpuRegister lhs_reg,
- GpuRegister rhs_reg = ZERO);
- // Call (branch and link) that stores the target address in a given register (i.e. T9).
- Branch(uint32_t location, uint32_t target, GpuRegister indirect_reg);
+ GpuRegister rhs_reg);
+ // Label address (in literal area) or literal.
+ Branch(uint32_t location, GpuRegister dest_reg, Type label_or_literal_type);
// Some conditional branches with lhs = rhs are effectively NOPs, while some
// others are effectively unconditional. MIPSR6 conditional branches require lhs != rhs.
@@ -923,7 +1007,7 @@ class Mips64Assembler FINAL : public Assembler, public JNIMacroAssembler<Pointer
private:
// Completes branch construction by determining and recording its type.
- void InitializeType(bool is_call);
+ void InitializeType(Type initial_type);
// Helper for the above.
void InitShortOrLong(OffsetBits ofs_size, Type short_type, Type long_type);
@@ -932,7 +1016,7 @@ class Mips64Assembler FINAL : public Assembler, public JNIMacroAssembler<Pointer
uint32_t target_; // Offset into assembler buffer in bytes.
GpuRegister lhs_reg_; // Left-hand side register in conditional branches or
- // indirect call register.
+ // destination register in literals.
GpuRegister rhs_reg_; // Right-hand side register in conditional branches.
BranchCondition condition_; // Condition for conditional branches.
@@ -957,12 +1041,13 @@ class Mips64Assembler FINAL : public Assembler, public JNIMacroAssembler<Pointer
BranchCondition condition,
GpuRegister lhs,
GpuRegister rhs = ZERO);
- void Call(Mips64Label* label, GpuRegister indirect_reg);
+ void Call(Mips64Label* label);
void FinalizeLabeledBranch(Mips64Label* label);
Branch* GetBranch(uint32_t branch_id);
const Branch* GetBranch(uint32_t branch_id) const;
+ void EmitLiterals();
void PromoteBranches();
void EmitBranch(Branch* branch);
void EmitBranches();
@@ -981,6 +1066,11 @@ class Mips64Assembler FINAL : public Assembler, public JNIMacroAssembler<Pointer
// The current overwrite location.
uint32_t overwrite_location_;
+ // Use std::deque<> for literal labels to allow insertions at the end
+ // without invalidating pointers and references to existing elements.
+ ArenaDeque<Literal> literals_;
+ ArenaDeque<Literal> long_literals_; // 64-bit literals separated for alignment reasons.
+
// Data for AdjustedPosition(), see the description there.
uint32_t last_position_adjustment_;
uint32_t last_old_position_;
diff --git a/compiler/utils/mips64/assembler_mips64_test.cc b/compiler/utils/mips64/assembler_mips64_test.cc
index ba8f25ea77..f62822d73a 100644
--- a/compiler/utils/mips64/assembler_mips64_test.cc
+++ b/compiler/utils/mips64/assembler_mips64_test.cc
@@ -576,83 +576,83 @@ TEST_F(AssemblerMIPS64Test, Jalr) {
RepeatRRNoDupes(&mips64::Mips64Assembler::Jalr, "jalr ${reg1}, ${reg2}"), "jalr");
}
-TEST_F(AssemblerMIPS64Test, Jialc) {
+TEST_F(AssemblerMIPS64Test, Balc) {
mips64::Mips64Label label1, label2;
- __ Jialc(&label1, mips64::T9);
+ __ Balc(&label1);
constexpr size_t kAdduCount1 = 63;
for (size_t i = 0; i != kAdduCount1; ++i) {
__ Addu(mips64::ZERO, mips64::ZERO, mips64::ZERO);
}
__ Bind(&label1);
- __ Jialc(&label2, mips64::T9);
+ __ Balc(&label2);
constexpr size_t kAdduCount2 = 64;
for (size_t i = 0; i != kAdduCount2; ++i) {
__ Addu(mips64::ZERO, mips64::ZERO, mips64::ZERO);
}
__ Bind(&label2);
- __ Jialc(&label1, mips64::T9);
+ __ Balc(&label1);
std::string expected =
".set noreorder\n"
- "lapc $t9, 1f\n"
- "jialc $t9, 0\n" +
+ "balc 1f\n" +
RepeatInsn(kAdduCount1, "addu $zero, $zero, $zero\n") +
"1:\n"
- "lapc $t9, 2f\n"
- "jialc $t9, 0\n" +
+ "balc 2f\n" +
RepeatInsn(kAdduCount2, "addu $zero, $zero, $zero\n") +
"2:\n"
- "lapc $t9, 1b\n"
- "jialc $t9, 0\n";
- DriverStr(expected, "Jialc");
+ "balc 1b\n";
+ DriverStr(expected, "Balc");
}
-TEST_F(AssemblerMIPS64Test, LongJialc) {
+TEST_F(AssemblerMIPS64Test, LongBalc) {
+ constexpr uint32_t kNopCount1 = (1u << 25) + 1;
+ constexpr uint32_t kNopCount2 = (1u << 25) + 1;
+ constexpr uint32_t kRequiredCapacity = (kNopCount1 + kNopCount2 + 6u) * 4u;
+ ASSERT_LT(__ GetBuffer()->Capacity(), kRequiredCapacity);
+ __ GetBuffer()->ExtendCapacity(kRequiredCapacity);
mips64::Mips64Label label1, label2;
- __ Jialc(&label1, mips64::T9);
- constexpr uint32_t kAdduCount1 = (1u << 18) + 1;
- for (uint32_t i = 0; i != kAdduCount1; ++i) {
- __ Addu(mips64::ZERO, mips64::ZERO, mips64::ZERO);
+ __ Balc(&label1);
+ for (uint32_t i = 0; i != kNopCount1; ++i) {
+ __ Nop();
}
__ Bind(&label1);
- __ Jialc(&label2, mips64::T9);
- constexpr uint32_t kAdduCount2 = (1u << 18) + 1;
- for (uint32_t i = 0; i != kAdduCount2; ++i) {
- __ Addu(mips64::ZERO, mips64::ZERO, mips64::ZERO);
+ __ Balc(&label2);
+ for (uint32_t i = 0; i != kNopCount2; ++i) {
+ __ Nop();
}
__ Bind(&label2);
- __ Jialc(&label1, mips64::T9);
+ __ Balc(&label1);
- uint32_t offset_forward1 = 3 + kAdduCount1; // 3: account for auipc, daddiu and jic.
+ uint32_t offset_forward1 = 2 + kNopCount1; // 2: account for auipc and jialc.
offset_forward1 <<= 2;
- offset_forward1 += (offset_forward1 & 0x8000) << 1; // Account for sign extension in daddiu.
+ offset_forward1 += (offset_forward1 & 0x8000) << 1; // Account for sign extension in jialc.
- uint32_t offset_forward2 = 3 + kAdduCount2; // 3: account for auipc, daddiu and jic.
+ uint32_t offset_forward2 = 2 + kNopCount2; // 2: account for auipc and jialc.
offset_forward2 <<= 2;
- offset_forward2 += (offset_forward2 & 0x8000) << 1; // Account for sign extension in daddiu.
+ offset_forward2 += (offset_forward2 & 0x8000) << 1; // Account for sign extension in jialc.
- uint32_t offset_back = -(3 + kAdduCount2); // 3: account for auipc, daddiu and jic.
+ uint32_t offset_back = -(2 + kNopCount2); // 2: account for auipc and jialc.
offset_back <<= 2;
- offset_back += (offset_back & 0x8000) << 1; // Account for sign extension in daddiu.
+ offset_back += (offset_back & 0x8000) << 1; // Account for sign extension in jialc.
+ // Note, we're using the ".fill" directive to tell the assembler to generate many NOPs
+ // instead of generating them ourselves in the source code. This saves a few minutes
+ // of test time.
std::ostringstream oss;
oss <<
".set noreorder\n"
- "auipc $t9, 0x" << std::hex << High16Bits(offset_forward1) << "\n"
- "daddiu $t9, 0x" << std::hex << Low16Bits(offset_forward1) << "\n"
- "jialc $t9, 0\n" <<
- RepeatInsn(kAdduCount1, "addu $zero, $zero, $zero\n") <<
+ "auipc $at, 0x" << std::hex << High16Bits(offset_forward1) << "\n"
+ "jialc $at, 0x" << std::hex << Low16Bits(offset_forward1) << "\n"
+ ".fill 0x" << std::hex << kNopCount1 << " , 4, 0\n"
"1:\n"
- "auipc $t9, 0x" << std::hex << High16Bits(offset_forward2) << "\n"
- "daddiu $t9, 0x" << std::hex << Low16Bits(offset_forward2) << "\n"
- "jialc $t9, 0\n" <<
- RepeatInsn(kAdduCount2, "addu $zero, $zero, $zero\n") <<
+ "auipc $at, 0x" << std::hex << High16Bits(offset_forward2) << "\n"
+ "jialc $at, 0x" << std::hex << Low16Bits(offset_forward2) << "\n"
+ ".fill 0x" << std::hex << kNopCount2 << " , 4, 0\n"
"2:\n"
- "auipc $t9, 0x" << std::hex << High16Bits(offset_back) << "\n"
- "daddiu $t9, 0x" << std::hex << Low16Bits(offset_back) << "\n"
- "jialc $t9, 0\n";
+ "auipc $at, 0x" << std::hex << High16Bits(offset_back) << "\n"
+ "jialc $at, 0x" << std::hex << Low16Bits(offset_back) << "\n";
std::string expected = oss.str();
- DriverStr(expected, "LongJialc");
+ DriverStr(expected, "LongBalc");
}
TEST_F(AssemblerMIPS64Test, Bc) {
@@ -827,6 +827,258 @@ TEST_F(AssemblerMIPS64Test, LongBeqc) {
// MISC //
//////////
+TEST_F(AssemblerMIPS64Test, Lwpc) {
+ // Lwpc() takes an unsigned 19-bit immediate, while the GNU assembler needs a signed offset,
+ // hence the sign extension from bit 18 with `imm - ((imm & 0x40000) << 1)`.
+ // The GNU assembler also wants the offset to be a multiple of 4, which it will shift right
+ // by 2 positions when encoding, hence `<< 2` to compensate for that shift.
+ // We capture the value of the immediate with `.set imm, {imm}` because the value is needed
+ // twice for the sign extension, but `{imm}` is substituted only once.
+ const char* code = ".set imm, {imm}\nlw ${reg}, ((imm - ((imm & 0x40000) << 1)) << 2)($pc)";
+ DriverStr(RepeatRIb(&mips64::Mips64Assembler::Lwpc, 19, code), "Lwpc");
+}
+
+TEST_F(AssemblerMIPS64Test, Lwupc) {
+ // The comment for the Lwpc test applies here as well.
+ const char* code = ".set imm, {imm}\nlwu ${reg}, ((imm - ((imm & 0x40000) << 1)) << 2)($pc)";
+ DriverStr(RepeatRIb(&mips64::Mips64Assembler::Lwupc, 19, code), "Lwupc");
+}
+
+TEST_F(AssemblerMIPS64Test, Ldpc) {
+ // The comment for the Lwpc test applies here as well.
+ const char* code = ".set imm, {imm}\nld ${reg}, ((imm - ((imm & 0x20000) << 1)) << 3)($pc)";
+ DriverStr(RepeatRIb(&mips64::Mips64Assembler::Ldpc, 18, code), "Ldpc");
+}
+
+TEST_F(AssemblerMIPS64Test, LoadFarthestNearLabelAddress) {
+ mips64::Mips64Label label;
+ __ LoadLabelAddress(mips64::V0, &label);
+ constexpr uint32_t kAdduCount = 0x3FFDE;
+ for (uint32_t i = 0; i != kAdduCount; ++i) {
+ __ Addu(mips64::ZERO, mips64::ZERO, mips64::ZERO);
+ }
+ __ Bind(&label);
+
+ std::string expected =
+ "lapc $v0, 1f\n" +
+ RepeatInsn(kAdduCount, "addu $zero, $zero, $zero\n") +
+ "1:\n";
+ DriverStr(expected, "LoadFarthestNearLabelAddress");
+ EXPECT_EQ(__ GetLabelLocation(&label), (1 + kAdduCount) * 4);
+}
+
+TEST_F(AssemblerMIPS64Test, LoadNearestFarLabelAddress) {
+ mips64::Mips64Label label;
+ __ LoadLabelAddress(mips64::V0, &label);
+ constexpr uint32_t kAdduCount = 0x3FFDF;
+ for (uint32_t i = 0; i != kAdduCount; ++i) {
+ __ Addu(mips64::ZERO, mips64::ZERO, mips64::ZERO);
+ }
+ __ Bind(&label);
+
+ std::string expected =
+ "1:\n"
+ "auipc $at, %hi(2f - 1b)\n"
+ "addiu $v0, $at, %lo(2f - 1b)\n" +
+ RepeatInsn(kAdduCount, "addu $zero, $zero, $zero\n") +
+ "2:\n";
+ DriverStr(expected, "LoadNearestFarLabelAddress");
+ EXPECT_EQ(__ GetLabelLocation(&label), (2 + kAdduCount) * 4);
+}
+
+TEST_F(AssemblerMIPS64Test, LoadFarthestNearLiteral) {
+ mips64::Literal* literal = __ NewLiteral<uint32_t>(0x12345678);
+ __ LoadLiteral(mips64::V0, mips64::kLoadWord, literal);
+ constexpr uint32_t kAdduCount = 0x3FFDE;
+ for (uint32_t i = 0; i != kAdduCount; ++i) {
+ __ Addu(mips64::ZERO, mips64::ZERO, mips64::ZERO);
+ }
+
+ std::string expected =
+ "lwpc $v0, 1f\n" +
+ RepeatInsn(kAdduCount, "addu $zero, $zero, $zero\n") +
+ "1:\n"
+ ".word 0x12345678\n";
+ DriverStr(expected, "LoadFarthestNearLiteral");
+ EXPECT_EQ(__ GetLabelLocation(literal->GetLabel()), (1 + kAdduCount) * 4);
+}
+
+TEST_F(AssemblerMIPS64Test, LoadNearestFarLiteral) {
+ mips64::Literal* literal = __ NewLiteral<uint32_t>(0x12345678);
+ __ LoadLiteral(mips64::V0, mips64::kLoadWord, literal);
+ constexpr uint32_t kAdduCount = 0x3FFDF;
+ for (uint32_t i = 0; i != kAdduCount; ++i) {
+ __ Addu(mips64::ZERO, mips64::ZERO, mips64::ZERO);
+ }
+
+ std::string expected =
+ "1:\n"
+ "auipc $at, %hi(2f - 1b)\n"
+ "lw $v0, %lo(2f - 1b)($at)\n" +
+ RepeatInsn(kAdduCount, "addu $zero, $zero, $zero\n") +
+ "2:\n"
+ ".word 0x12345678\n";
+ DriverStr(expected, "LoadNearestFarLiteral");
+ EXPECT_EQ(__ GetLabelLocation(literal->GetLabel()), (2 + kAdduCount) * 4);
+}
+
+TEST_F(AssemblerMIPS64Test, LoadFarthestNearLiteralUnsigned) {
+ mips64::Literal* literal = __ NewLiteral<uint32_t>(0x12345678);
+ __ LoadLiteral(mips64::V0, mips64::kLoadUnsignedWord, literal);
+ constexpr uint32_t kAdduCount = 0x3FFDE;
+ for (uint32_t i = 0; i != kAdduCount; ++i) {
+ __ Addu(mips64::ZERO, mips64::ZERO, mips64::ZERO);
+ }
+
+ std::string expected =
+ "lwupc $v0, 1f\n" +
+ RepeatInsn(kAdduCount, "addu $zero, $zero, $zero\n") +
+ "1:\n"
+ ".word 0x12345678\n";
+ DriverStr(expected, "LoadFarthestNearLiteralUnsigned");
+ EXPECT_EQ(__ GetLabelLocation(literal->GetLabel()), (1 + kAdduCount) * 4);
+}
+
+TEST_F(AssemblerMIPS64Test, LoadNearestFarLiteralUnsigned) {
+ mips64::Literal* literal = __ NewLiteral<uint32_t>(0x12345678);
+ __ LoadLiteral(mips64::V0, mips64::kLoadUnsignedWord, literal);
+ constexpr uint32_t kAdduCount = 0x3FFDF;
+ for (uint32_t i = 0; i != kAdduCount; ++i) {
+ __ Addu(mips64::ZERO, mips64::ZERO, mips64::ZERO);
+ }
+
+ std::string expected =
+ "1:\n"
+ "auipc $at, %hi(2f - 1b)\n"
+ "lwu $v0, %lo(2f - 1b)($at)\n" +
+ RepeatInsn(kAdduCount, "addu $zero, $zero, $zero\n") +
+ "2:\n"
+ ".word 0x12345678\n";
+ DriverStr(expected, "LoadNearestFarLiteralUnsigned");
+ EXPECT_EQ(__ GetLabelLocation(literal->GetLabel()), (2 + kAdduCount) * 4);
+}
+
+TEST_F(AssemblerMIPS64Test, LoadFarthestNearLiteralLong) {
+ mips64::Literal* literal = __ NewLiteral<uint64_t>(UINT64_C(0x0123456789ABCDEF));
+ __ LoadLiteral(mips64::V0, mips64::kLoadDoubleword, literal);
+ constexpr uint32_t kAdduCount = 0x3FFDD;
+ for (uint32_t i = 0; i != kAdduCount; ++i) {
+ __ Addu(mips64::ZERO, mips64::ZERO, mips64::ZERO);
+ }
+
+ std::string expected =
+ "ldpc $v0, 1f\n" +
+ RepeatInsn(kAdduCount, "addu $zero, $zero, $zero\n") +
+ "1:\n"
+ ".dword 0x0123456789ABCDEF\n";
+ DriverStr(expected, "LoadFarthestNearLiteralLong");
+ EXPECT_EQ(__ GetLabelLocation(literal->GetLabel()), (1 + kAdduCount) * 4);
+}
+
+TEST_F(AssemblerMIPS64Test, LoadNearestFarLiteralLong) {
+ mips64::Literal* literal = __ NewLiteral<uint64_t>(UINT64_C(0x0123456789ABCDEF));
+ __ LoadLiteral(mips64::V0, mips64::kLoadDoubleword, literal);
+ constexpr uint32_t kAdduCount = 0x3FFDE;
+ for (uint32_t i = 0; i != kAdduCount; ++i) {
+ __ Addu(mips64::ZERO, mips64::ZERO, mips64::ZERO);
+ }
+
+ std::string expected =
+ "1:\n"
+ "auipc $at, %hi(2f - 1b)\n"
+ "ld $v0, %lo(2f - 1b)($at)\n" +
+ RepeatInsn(kAdduCount, "addu $zero, $zero, $zero\n") +
+ "2:\n"
+ ".dword 0x0123456789ABCDEF\n";
+ DriverStr(expected, "LoadNearestFarLiteralLong");
+ EXPECT_EQ(__ GetLabelLocation(literal->GetLabel()), (2 + kAdduCount) * 4);
+}
+
+TEST_F(AssemblerMIPS64Test, LongLiteralAlignmentNop) {
+ mips64::Literal* literal1 = __ NewLiteral<uint64_t>(UINT64_C(0x0123456789ABCDEF));
+ mips64::Literal* literal2 = __ NewLiteral<uint64_t>(UINT64_C(0x5555555555555555));
+ mips64::Literal* literal3 = __ NewLiteral<uint64_t>(UINT64_C(0xAAAAAAAAAAAAAAAA));
+ __ LoadLiteral(mips64::A1, mips64::kLoadDoubleword, literal1);
+ __ LoadLiteral(mips64::A2, mips64::kLoadDoubleword, literal2);
+ __ LoadLiteral(mips64::A3, mips64::kLoadDoubleword, literal3);
+ __ LoadLabelAddress(mips64::V0, literal1->GetLabel());
+ __ LoadLabelAddress(mips64::V1, literal2->GetLabel());
+ // A nop will be inserted here before the 64-bit literals.
+
+ std::string expected =
+ "ldpc $a1, 1f\n"
+ // The GNU assembler incorrectly requires the ldpc instruction to be located
+ // at an address that's a multiple of 8. TODO: Remove this workaround if/when
+ // the assembler is fixed.
+ // "ldpc $a2, 2f\n"
+ ".word 0xECD80004\n"
+ "ldpc $a3, 3f\n"
+ "lapc $v0, 1f\n"
+ "lapc $v1, 2f\n"
+ "nop\n"
+ "1:\n"
+ ".dword 0x0123456789ABCDEF\n"
+ "2:\n"
+ ".dword 0x5555555555555555\n"
+ "3:\n"
+ ".dword 0xAAAAAAAAAAAAAAAA\n";
+ DriverStr(expected, "LongLiteralAlignmentNop");
+ EXPECT_EQ(__ GetLabelLocation(literal1->GetLabel()), 6 * 4u);
+ EXPECT_EQ(__ GetLabelLocation(literal2->GetLabel()), 8 * 4u);
+ EXPECT_EQ(__ GetLabelLocation(literal3->GetLabel()), 10 * 4u);
+}
+
+TEST_F(AssemblerMIPS64Test, LongLiteralAlignmentNoNop) {
+ mips64::Literal* literal1 = __ NewLiteral<uint64_t>(UINT64_C(0x0123456789ABCDEF));
+ mips64::Literal* literal2 = __ NewLiteral<uint64_t>(UINT64_C(0x5555555555555555));
+ __ LoadLiteral(mips64::A1, mips64::kLoadDoubleword, literal1);
+ __ LoadLiteral(mips64::A2, mips64::kLoadDoubleword, literal2);
+ __ LoadLabelAddress(mips64::V0, literal1->GetLabel());
+ __ LoadLabelAddress(mips64::V1, literal2->GetLabel());
+
+ std::string expected =
+ "ldpc $a1, 1f\n"
+ // The GNU assembler incorrectly requires the ldpc instruction to be located
+ // at an address that's a multiple of 8. TODO: Remove this workaround if/when
+ // the assembler is fixed.
+ // "ldpc $a2, 2f\n"
+ ".word 0xECD80003\n"
+ "lapc $v0, 1f\n"
+ "lapc $v1, 2f\n"
+ "1:\n"
+ ".dword 0x0123456789ABCDEF\n"
+ "2:\n"
+ ".dword 0x5555555555555555\n";
+ DriverStr(expected, "LongLiteralAlignmentNoNop");
+ EXPECT_EQ(__ GetLabelLocation(literal1->GetLabel()), 4 * 4u);
+ EXPECT_EQ(__ GetLabelLocation(literal2->GetLabel()), 6 * 4u);
+}
+
+TEST_F(AssemblerMIPS64Test, FarLongLiteralAlignmentNop) {
+ mips64::Literal* literal = __ NewLiteral<uint64_t>(UINT64_C(0x0123456789ABCDEF));
+ __ LoadLiteral(mips64::V0, mips64::kLoadDoubleword, literal);
+ __ LoadLabelAddress(mips64::V1, literal->GetLabel());
+ constexpr uint32_t kAdduCount = 0x3FFDF;
+ for (uint32_t i = 0; i != kAdduCount; ++i) {
+ __ Addu(mips64::ZERO, mips64::ZERO, mips64::ZERO);
+ }
+ // A nop will be inserted here before the 64-bit literal.
+
+ std::string expected =
+ "1:\n"
+ "auipc $at, %hi(3f - 1b)\n"
+ "ld $v0, %lo(3f - 1b)($at)\n"
+ "2:\n"
+ "auipc $at, %hi(3f - 2b)\n"
+ "addiu $v1, $at, %lo(3f - 2b)\n" +
+ RepeatInsn(kAdduCount, "addu $zero, $zero, $zero\n") +
+ "nop\n"
+ "3:\n"
+ ".dword 0x0123456789ABCDEF\n";
+ DriverStr(expected, "FarLongLiteralAlignmentNop");
+ EXPECT_EQ(__ GetLabelLocation(literal->GetLabel()), (5 + kAdduCount) * 4);
+}
+
TEST_F(AssemblerMIPS64Test, Bitswap) {
DriverStr(RepeatRR(&mips64::Mips64Assembler::Bitswap, "bitswap ${reg1}, ${reg2}"), "bitswap");
}
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 8fb40402b7..5a0f0c6e50 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -33,6 +33,8 @@
#include <sys/utsname.h>
#endif
+#include "android-base/strings.h"
+
#include "arch/instruction_set_features.h"
#include "arch/mips/instruction_set_features_mips.h"
#include "art_method-inl.h"
@@ -96,7 +98,7 @@ static std::string CommandLine() {
for (int i = 0; i < original_argc; ++i) {
command.push_back(original_argv[i]);
}
- return Join(command, ' ');
+ return android::base::Join(command, ' ');
}
// A stripped version. Remove some less essential parameters. If we see a "--zip-fd=" parameter, be
@@ -108,7 +110,7 @@ static std::string StrippedCommandLine() {
// Do a pre-pass to look for zip-fd.
bool saw_zip_fd = false;
for (int i = 0; i < original_argc; ++i) {
- if (StartsWith(original_argv[i], "--zip-fd=")) {
+ if (android::base::StartsWith(original_argv[i], "--zip-fd=")) {
saw_zip_fd = true;
break;
}
@@ -123,17 +125,17 @@ static std::string StrippedCommandLine() {
}
// Any instruction-setXXX is dropped.
- if (StartsWith(original_argv[i], "--instruction-set")) {
+ if (android::base::StartsWith(original_argv[i], "--instruction-set")) {
continue;
}
// The boot image is dropped.
- if (StartsWith(original_argv[i], "--boot-image=")) {
+ if (android::base::StartsWith(original_argv[i], "--boot-image=")) {
continue;
}
// The image format is dropped.
- if (StartsWith(original_argv[i], "--image-format=")) {
+ if (android::base::StartsWith(original_argv[i], "--image-format=")) {
continue;
}
@@ -142,11 +144,11 @@ static std::string StrippedCommandLine() {
// However, we prefer to drop this when we saw --zip-fd.
if (saw_zip_fd) {
// Drop anything --zip-X, --dex-X, --oat-X, --swap-X, or --app-image-X
- if (StartsWith(original_argv[i], "--zip-") ||
- StartsWith(original_argv[i], "--dex-") ||
- StartsWith(original_argv[i], "--oat-") ||
- StartsWith(original_argv[i], "--swap-") ||
- StartsWith(original_argv[i], "--app-image-")) {
+ if (android::base::StartsWith(original_argv[i], "--zip-") ||
+ android::base::StartsWith(original_argv[i], "--dex-") ||
+ android::base::StartsWith(original_argv[i], "--oat-") ||
+ android::base::StartsWith(original_argv[i], "--swap-") ||
+ android::base::StartsWith(original_argv[i], "--app-image-")) {
continue;
}
}
@@ -159,7 +161,7 @@ static std::string StrippedCommandLine() {
// It seems only "/system/bin/dex2oat" is left, or not even that. Use a pretty line.
return "Starting dex2oat.";
}
- return Join(command, ' ');
+ return android::base::Join(command, ' ');
}
static void UsageErrorV(const char* fmt, va_list ap) {
@@ -999,7 +1001,7 @@ class Dex2Oat FINAL {
if (last_dex_dot != std::string::npos) {
dex_file = dex_file.substr(0, last_dex_dot);
}
- if (StartsWith(dex_file, "core-")) {
+ if (android::base::StartsWith(dex_file, "core-")) {
infix = dex_file.substr(strlen("core"));
}
}
@@ -1059,7 +1061,7 @@ class Dex2Oat FINAL {
in.insert(last_dot, infix);
}
}
- if (EndsWith(in, ".jar")) {
+ if (android::base::EndsWith(in, ".jar")) {
in = in.substr(0, in.length() - strlen(".jar")) +
(replace_suffix != nullptr ? replace_suffix : "");
}
@@ -1484,7 +1486,7 @@ class Dex2Oat FINAL {
for (const gc::space::ImageSpace* image_space : image_spaces) {
image_filenames.push_back(image_space->GetImageFilename());
}
- std::string image_file_location = Join(image_filenames, ':');
+ std::string image_file_location = android::base::Join(image_filenames, ':');
if (!image_file_location.empty()) {
key_value_store_->Put(OatHeader::kImageLocationKey, image_file_location);
}
@@ -1687,7 +1689,7 @@ class Dex2Oat FINAL {
}
}
- if (StartsWith(dex_location, filter.c_str())) {
+ if (android::base::StartsWith(dex_location, filter.c_str())) {
VLOG(compiler) << "Disabling inlining from " << dex_file->GetLocation();
no_inline_from_dex_files_.push_back(dex_file);
break;
@@ -2362,10 +2364,10 @@ class Dex2Oat FINAL {
RuntimeOptions raw_options;
if (boot_image_filename_.empty()) {
std::string boot_class_path = "-Xbootclasspath:";
- boot_class_path += Join(dex_filenames_, ':');
+ boot_class_path += android::base::Join(dex_filenames_, ':');
raw_options.push_back(std::make_pair(boot_class_path, nullptr));
std::string boot_class_path_locations = "-Xbootclasspath-locations:";
- boot_class_path_locations += Join(dex_locations_, ':');
+ boot_class_path_locations += android::base::Join(dex_locations_, ':');
raw_options.push_back(std::make_pair(boot_class_path_locations, nullptr));
} else {
std::string boot_image_option = "-Ximage:";
@@ -2579,7 +2581,7 @@ class Dex2Oat FINAL {
while (in_stream.good()) {
std::string dot;
std::getline(in_stream, dot);
- if (StartsWith(dot, "#") || dot.empty()) {
+ if (android::base::StartsWith(dot, "#") || dot.empty()) {
continue;
}
if (process != nullptr) {
diff --git a/disassembler/disassembler_mips.cc b/disassembler/disassembler_mips.cc
index 47873951fe..c82600b109 100644
--- a/disassembler/disassembler_mips.cc
+++ b/disassembler/disassembler_mips.cc
@@ -22,6 +22,8 @@
#include "android-base/logging.h"
#include "android-base/stringprintf.h"
+#include "base/bit_utils.h"
+
using android::base::StringPrintf;
namespace art {
@@ -154,6 +156,7 @@ static const MipsInstruction gMipsInstructions[] = {
{ kSpecial3Mask | 0x3f, (31 << kOpcodeShift), "ext", "TSAZ", },
{ kSpecial3Mask | 0x3f, (31 << kOpcodeShift) | 3, "dext", "TSAZ", },
{ kSpecial3Mask | 0x3f, (31 << kOpcodeShift) | 4, "ins", "TSAz", },
+ { kSpecial3Mask | 0x3f, (31 << kOpcodeShift) | 6, "dinsu", "TSFz", },
{ kSpecial3Mask | (0x1f << 21) | (0x1f << 6) | 0x3f,
(31 << kOpcodeShift) | (16 << 6) | 32,
"seb",
@@ -218,8 +221,8 @@ static const MipsInstruction gMipsInstructions[] = {
{ kITypeMask, 12 << kOpcodeShift, "andi", "TSi", },
{ kITypeMask, 13 << kOpcodeShift, "ori", "TSi", },
{ kITypeMask, 14 << kOpcodeShift, "xori", "TSi", },
- { kITypeMask | (0x1f << 21), 15 << kOpcodeShift, "lui", "TI", },
- { kITypeMask, 15 << kOpcodeShift, "aui", "TSI", },
+ { kITypeMask | (0x1f << 21), 15 << kOpcodeShift, "lui", "Ti", },
+ { kITypeMask, 15 << kOpcodeShift, "aui", "TSi", },
{ kITypeMask | (0x3e3 << 16), (17 << kOpcodeShift) | (8 << 21), "bc1f", "cB" },
{ kITypeMask | (0x3e3 << 16), (17 << kOpcodeShift) | (8 << 21) | (1 << 16), "bc1t", "cB" },
@@ -335,6 +338,8 @@ static const MipsInstruction gMipsInstructions[] = {
{ kITypeMask | (0x1f << 16), (59u << kOpcodeShift) | (30 << 16), "auipc", "Si" },
{ kITypeMask | (0x3 << 19), (59u << kOpcodeShift) | (0 << 19), "addiupc", "Sp" },
{ kITypeMask | (0x3 << 19), (59u << kOpcodeShift) | (1 << 19), "lwpc", "So" },
+ { kITypeMask | (0x3 << 19), (59u << kOpcodeShift) | (2 << 19), "lwupc", "So" },
+ { kITypeMask | (0x7 << 18), (59u << kOpcodeShift) | (6 << 18), "ldpc", "S0" },
{ kITypeMask, 61u << kOpcodeShift, "sdc1", "tO", },
{ kITypeMask | (0x1f << 21), 62u << kOpcodeShift, "jialc", "Ti" },
{ kITypeMask | (1 << 21), (62u << kOpcodeShift) | (1 << 21), "bnezc", "Sb" }, // TODO: de-dup?
@@ -468,6 +473,7 @@ size_t DisassemblerMips::Dump(std::ostream& os, const uint8_t* instr_ptr) {
case 'D': args << 'r' << rd; break;
case 'd': args << 'f' << rd; break;
case 'a': args << 'f' << sa; break;
+ case 'F': args << (sa + 32); break; // dinsu position.
case 'f': // Floating point "fmt".
{
size_t fmt = (instruction >> 21) & 0x7; // TODO: other fmts?
@@ -481,9 +487,6 @@ size_t DisassemblerMips::Dump(std::ostream& os, const uint8_t* instr_ptr) {
}
continue; // No ", ".
}
- case 'I': // Upper 16-bit immediate.
- args << reinterpret_cast<void*>((instruction & 0xffff) << 16);
- break;
case 'i': // Sign-extended lower 16-bit immediate.
args << static_cast<int16_t>(instruction & 0xffff);
break;
@@ -512,7 +515,7 @@ size_t DisassemblerMips::Dump(std::ostream& os, const uint8_t* instr_ptr) {
}
}
break;
- case 'o': // 19-bit offset in lwpc.
+ case 'o': // 19-bit offset in lwpc and lwupc.
{
int32_t offset = (instruction & 0x7ffff) - ((instruction & 0x40000) << 1);
offset <<= 2;
@@ -520,6 +523,15 @@ size_t DisassemblerMips::Dump(std::ostream& os, const uint8_t* instr_ptr) {
args << StringPrintf(" ; %+d", offset);
}
break;
+ case '0': // 18-bit offset in ldpc.
+ {
+ int32_t offset = (instruction & 0x3ffff) - ((instruction & 0x20000) << 1);
+ offset <<= 3;
+ uintptr_t ptr = RoundDown(reinterpret_cast<uintptr_t>(instr_ptr), 8);
+ args << FormatInstructionPointer(reinterpret_cast<const uint8_t*>(ptr + offset));
+ args << StringPrintf(" ; %+d", offset);
+ }
+ break;
case 'P': // 26-bit offset in bc and balc.
{
int32_t offset = (instruction & 0x3ffffff) - ((instruction & 0x2000000) << 1);
@@ -541,7 +553,7 @@ size_t DisassemblerMips::Dump(std::ostream& os, const uint8_t* instr_ptr) {
case 'T': args << 'r' << rt; break;
case 't': args << 'f' << rt; break;
case 'Z': args << (rd + 1); break; // sz ([d]ext size).
- case 'z': args << (rd - sa + 1); break; // sz ([d]ins size).
+ case 'z': args << (rd - sa + 1); break; // sz ([d]ins, dinsu size).
}
if (*(args_fmt + 1)) {
args << ", ";
@@ -551,17 +563,14 @@ size_t DisassemblerMips::Dump(std::ostream& os, const uint8_t* instr_ptr) {
}
}
- // TODO: Simplify this once these sequences are simplified in the compiler.
// Special cases for sequences of:
// pc-relative +/- 2GB branch:
// auipc reg, imm
// jic reg, imm
// pc-relative +/- 2GB branch and link:
// auipc reg, imm
- // daddiu reg, reg, imm
- // jialc reg, 0
- if (((op == 0x36 && rs == 0 && rt != 0) || // jic
- (op == 0x19 && rs == rt && rt != 0)) && // daddiu
+ // jialc reg, imm
+ if (((op == 0x36 || op == 0x3E) && rs == 0 && rt != 0) && // ji[al]c
last_ptr_ && (intptr_t)instr_ptr - (intptr_t)last_ptr_ == 4 &&
(last_instr_ & 0xFC1F0000) == 0xEC1E0000 && // auipc
((last_instr_ >> 21) & 0x1F) == rt) {
@@ -569,9 +578,9 @@ size_t DisassemblerMips::Dump(std::ostream& os, const uint8_t* instr_ptr) {
offset -= (offset & 0x8000) << 1;
offset -= 4;
if (op == 0x36) {
- args << " ; b ";
+ args << " ; bc ";
} else {
- args << " ; move r" << rt << ", ";
+ args << " ; balc ";
}
args << FormatInstructionPointer(instr_ptr + (int32_t)offset);
args << StringPrintf(" ; %+d", (int32_t)offset);
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index 80c7113175..e4462d8305 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -26,6 +26,8 @@
#include <unordered_set>
#include <vector>
+#include "android-base/strings.h"
+
#include "arch/instruction_set_features.h"
#include "art_field-inl.h"
#include "art_method-inl.h"
@@ -668,6 +670,12 @@ class OatDumper {
}
private:
+ // All of the elements from one container to another.
+ template <typename Dest, typename Src>
+ static void AddAll(Dest& dest, const Src& src) {
+ dest.insert(src.begin(), src.end());
+ }
+
void WalkClass(const DexFile& dex_file, const DexFile::ClassDef& class_def) {
const uint8_t* class_data = dex_file.GetClassData(class_def);
if (class_data == nullptr) { // empty class such as a marker interface?
@@ -2952,7 +2960,7 @@ class IMTDumper {
table_index++;
std::string p_name = ptr2->PrettyMethod(true);
- if (StartsWith(p_name, method.c_str())) {
+ if (android::base::StartsWith(p_name, method.c_str())) {
std::cerr << " Slot "
<< index
<< " ("
@@ -2965,7 +2973,7 @@ class IMTDumper {
}
} else {
std::string p_name = ptr->PrettyMethod(true);
- if (StartsWith(p_name, method.c_str())) {
+ if (android::base::StartsWith(p_name, method.c_str())) {
std::cerr << " Slot " << index << " (1)" << std::endl;
std::cerr << " " << p_name << std::endl;
} else {
@@ -2978,7 +2986,7 @@ class IMTDumper {
for (ArtMethod& iface_method : iface->GetMethods(pointer_size)) {
if (ImTable::GetImtIndex(&iface_method) == index) {
std::string i_name = iface_method.PrettyMethod(true);
- if (StartsWith(i_name, method.c_str())) {
+ if (android::base::StartsWith(i_name, method.c_str())) {
std::cerr << " Slot " << index << " (1)" << std::endl;
std::cerr << " " << p_name << " (" << i_name << ")" << std::endl;
}
@@ -2997,7 +3005,7 @@ class IMTDumper {
while (in_stream.good()) {
std::string dot;
std::getline(in_stream, dot);
- if (StartsWith(dot, "#") || dot.empty()) {
+ if (android::base::StartsWith(dot, "#") || dot.empty()) {
continue;
}
output.push_back(dot);
diff --git a/oatdump/oatdump_test.cc b/oatdump/oatdump_test.cc
index 22db818086..a2eba4514b 100644
--- a/oatdump/oatdump_test.cc
+++ b/oatdump/oatdump_test.cc
@@ -18,6 +18,8 @@
#include <string>
#include <vector>
+#include "android-base/strings.h"
+
#include "common_runtime_test.h"
#include "base/stringprintf.h"
@@ -143,7 +145,7 @@ class OatDumpTest : public CommonRuntimeTest {
}
argv.push_back(nullptr);
UNUSED(execv(argv[0], &argv[0]));
- const std::string command_line(Join(exec_argv, ' '));
+ const std::string command_line(android::base::Join(exec_argv, ' '));
PLOG(ERROR) << "Failed to execv(" << command_line << ")";
// _exit to avoid atexit handlers in child.
_exit(1);
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
index cb5a79068a..62d1ddff75 100644
--- a/patchoat/patchoat.cc
+++ b/patchoat/patchoat.cc
@@ -24,6 +24,8 @@
#include <string>
#include <vector>
+#include "android-base/strings.h"
+
#include "art_field-inl.h"
#include "art_method-inl.h"
#include "base/dumpable.h"
@@ -286,8 +288,8 @@ bool PatchOat::Patch(const std::string& image_location,
std::string converted_image_filename = space->GetImageLocation();
std::replace(converted_image_filename.begin() + 1, converted_image_filename.end(), '/', '@');
std::string output_image_filename = output_directory +
- (StartsWith(converted_image_filename, "/") ? "" : "/") +
- converted_image_filename;
+ (android::base::StartsWith(converted_image_filename, "/") ? "" : "/") +
+ converted_image_filename;
std::string output_vdex_filename =
ImageHeader::GetVdexLocationFromImageLocation(output_image_filename);
std::string output_oat_filename =
@@ -343,8 +345,8 @@ bool PatchOat::Patch(const std::string& image_location,
std::string converted_image_filename = space->GetImageLocation();
std::replace(converted_image_filename.begin() + 1, converted_image_filename.end(), '/', '@');
std::string output_image_filename = output_directory +
- (StartsWith(converted_image_filename, "/") ? "" : "/") +
- converted_image_filename;
+ (android::base::StartsWith(converted_image_filename, "/") ? "" : "/") +
+ converted_image_filename;
bool new_oat_out;
std::unique_ptr<File>
output_image_file(CreateOrOpen(output_image_filename.c_str(), &new_oat_out));
@@ -932,7 +934,7 @@ static std::string CommandLine() {
for (int i = 0; i < orig_argc; ++i) {
command.push_back(orig_argv[i]);
}
- return Join(command, ' ');
+ return android::base::Join(command, ' ');
}
static void UsageErrorV(const char* fmt, va_list ap) {
diff --git a/profman/profman.cc b/profman/profman.cc
index bfef834bd9..0b2d172726 100644
--- a/profman/profman.cc
+++ b/profman/profman.cc
@@ -25,6 +25,8 @@
#include <string>
#include <vector>
+#include "android-base/strings.h"
+
#include "base/dumpable.h"
#include "base/scoped_flock.h"
#include "base/stringpiece.h"
@@ -48,7 +50,7 @@ static std::string CommandLine() {
for (int i = 0; i < original_argc; ++i) {
command.push_back(original_argv[i]);
}
- return Join(command, ' ');
+ return android::base::Join(command, ' ');
}
static constexpr int kInvalidFd = -1;
diff --git a/runtime/Android.bp b/runtime/Android.bp
index 08be5b2979..32ebee24c8 100644
--- a/runtime/Android.bp
+++ b/runtime/Android.bp
@@ -517,6 +517,7 @@ art_cc_test {
"base/unix_file/fd_file_test.cc",
"cha_test.cc",
"class_linker_test.cc",
+ "class_table_test.cc",
"compiler_filter_test.cc",
"dex_file_test.cc",
"dex_file_verifier_test.cc",
diff --git a/runtime/arch/arm/instruction_set_features_arm.cc b/runtime/arch/arm/instruction_set_features_arm.cc
index c81a93c368..f264b82448 100644
--- a/runtime/arch/arm/instruction_set_features_arm.cc
+++ b/runtime/arch/arm/instruction_set_features_arm.cc
@@ -24,6 +24,8 @@
#include "signal.h"
#include <fstream>
+#include "android-base/strings.h"
+
#include "base/stringprintf.h"
#include "utils.h" // For Trim.
@@ -271,7 +273,7 @@ ArmInstructionSetFeatures::AddFeaturesFromSplitString(
bool has_atomic_ldrd_strd = has_atomic_ldrd_strd_;
bool has_div = has_div_;
for (auto i = features.begin(); i != features.end(); i++) {
- std::string feature = Trim(*i);
+ std::string feature = android::base::Trim(*i);
if (feature == "div") {
has_div = true;
} else if (feature == "-div") {
diff --git a/runtime/arch/arm64/instruction_set_features_arm64.cc b/runtime/arch/arm64/instruction_set_features_arm64.cc
index 4e7dea3f48..f7b5a7649a 100644
--- a/runtime/arch/arm64/instruction_set_features_arm64.cc
+++ b/runtime/arch/arm64/instruction_set_features_arm64.cc
@@ -19,6 +19,8 @@
#include <fstream>
#include <sstream>
+#include "android-base/strings.h"
+
#include "base/stl_util.h"
#include "base/stringprintf.h"
#include "utils.h" // For Trim.
@@ -137,7 +139,7 @@ Arm64InstructionSetFeatures::AddFeaturesFromSplitString(
const bool smp, const std::vector<std::string>& features, std::string* error_msg) const {
bool is_a53 = fix_cortex_a53_835769_;
for (auto i = features.begin(); i != features.end(); i++) {
- std::string feature = Trim(*i);
+ std::string feature = android::base::Trim(*i);
if (feature == "a53") {
is_a53 = true;
} else if (feature == "-a53") {
diff --git a/runtime/arch/instruction_set_features.cc b/runtime/arch/instruction_set_features.cc
index b32391f6b0..db004e7495 100644
--- a/runtime/arch/instruction_set_features.cc
+++ b/runtime/arch/instruction_set_features.cc
@@ -16,6 +16,8 @@
#include "instruction_set_features.h"
+#include "android-base/strings.h"
+
#include "base/casts.h"
#include "utils.h"
@@ -224,7 +226,7 @@ std::unique_ptr<const InstructionSetFeatures> InstructionSetFeatures::AddFeature
*error_msg = "Unexpected instruction set features after 'default'";
return std::unique_ptr<const InstructionSetFeatures>();
}
- std::string feature = Trim(*it);
+ std::string feature = android::base::Trim(*it);
bool erase = false;
if (feature == "default") {
if (!first) {
diff --git a/runtime/arch/mips/instruction_set_features_mips.cc b/runtime/arch/mips/instruction_set_features_mips.cc
index a95b6f604c..a65c967efd 100644
--- a/runtime/arch/mips/instruction_set_features_mips.cc
+++ b/runtime/arch/mips/instruction_set_features_mips.cc
@@ -19,6 +19,8 @@
#include <fstream>
#include <sstream>
+#include "android-base/strings.h"
+
#include "base/stl_util.h"
#include "base/stringprintf.h"
#include "utils.h" // For Trim.
@@ -210,7 +212,7 @@ MipsInstructionSetFeatures::AddFeaturesFromSplitString(
bool mips_isa_gte2 = mips_isa_gte2_;
bool r6 = r6_;
for (auto i = features.begin(); i != features.end(); i++) {
- std::string feature = Trim(*i);
+ std::string feature = android::base::Trim(*i);
if (feature == "fpu32") {
fpu_32bit = true;
} else if (feature == "-fpu32") {
diff --git a/runtime/arch/mips64/instruction_set_features_mips64.cc b/runtime/arch/mips64/instruction_set_features_mips64.cc
index 490a8d2df3..e564d1eab5 100644
--- a/runtime/arch/mips64/instruction_set_features_mips64.cc
+++ b/runtime/arch/mips64/instruction_set_features_mips64.cc
@@ -19,6 +19,8 @@
#include <fstream>
#include <sstream>
+#include "android-base/strings.h"
+
#include "base/stringprintf.h"
#include "utils.h" // For Trim.
@@ -105,7 +107,7 @@ Mips64InstructionSetFeatures::AddFeaturesFromSplitString(
auto i = features.begin();
if (i != features.end()) {
// We don't have any features.
- std::string feature = Trim(*i);
+ std::string feature = android::base::Trim(*i);
*error_msg = StringPrintf("Unknown instruction set feature: '%s'", feature.c_str());
return nullptr;
}
diff --git a/runtime/arch/x86/instruction_set_features_x86.cc b/runtime/arch/x86/instruction_set_features_x86.cc
index 90b55a97f6..cc102ecedd 100644
--- a/runtime/arch/x86/instruction_set_features_x86.cc
+++ b/runtime/arch/x86/instruction_set_features_x86.cc
@@ -19,6 +19,8 @@
#include <fstream>
#include <sstream>
+#include "android-base/strings.h"
+
#include "arch/x86_64/instruction_set_features_x86_64.h"
#include "base/stringprintf.h"
#include "utils.h" // For Trim.
@@ -293,7 +295,7 @@ std::unique_ptr<const InstructionSetFeatures> X86InstructionSetFeatures::AddFeat
bool has_AVX2 = has_AVX2_;
bool has_POPCNT = has_POPCNT_;
for (auto i = features.begin(); i != features.end(); i++) {
- std::string feature = Trim(*i);
+ std::string feature = android::base::Trim(*i);
if (feature == "ssse3") {
has_SSSE3 = true;
} else if (feature == "-ssse3") {
diff --git a/runtime/cha.cc b/runtime/cha.cc
index be675a82c8..d94b091ebd 100644
--- a/runtime/cha.cc
+++ b/runtime/cha.cc
@@ -100,7 +100,11 @@ class CHAStackVisitor FINAL : public StackVisitor {
bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* method = GetMethod();
- if (method == nullptr || method->IsRuntimeMethod() || method->IsNative()) {
+ // Avoid types of methods that do not have an oat quick method header.
+ if (method == nullptr ||
+ method->IsRuntimeMethod() ||
+ method->IsNative() ||
+ method->IsProxyMethod()) {
return true;
}
if (GetCurrentQuickFrame() == nullptr) {
@@ -110,6 +114,7 @@ class CHAStackVisitor FINAL : public StackVisitor {
// Method may have multiple versions of compiled code. Check
// the method header to see if it has should_deoptimize flag.
const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
+ DCHECK(method_header != nullptr);
if (!method_header->HasShouldDeoptimizeFlag()) {
// This compiled version doesn't have should_deoptimize flag. Skip.
return true;
diff --git a/runtime/class_linker-inl.h b/runtime/class_linker-inl.h
index 0a65cd11aa..213986ae45 100644
--- a/runtime/class_linker-inl.h
+++ b/runtime/class_linker-inl.h
@@ -25,6 +25,7 @@
#include "mirror/class_loader.h"
#include "mirror/dex_cache-inl.h"
#include "mirror/iftable.h"
+#include "mirror/throwable.h"
#include "mirror/object_array.h"
#include "handle_scope-inl.h"
#include "scoped_thread_state_change-inl.h"
@@ -89,17 +90,28 @@ inline mirror::String* ClassLinker::ResolveString(dex::StringIndex string_idx,
inline mirror::Class* ClassLinker::ResolveType(dex::TypeIndex type_idx, ArtMethod* referrer) {
Thread::PoisonObjectPointersIfDebug();
+ if (kIsDebugBuild) {
+ Thread::Current()->AssertNoPendingException();
+ }
ObjPtr<mirror::Class> resolved_type =
referrer->GetDexCacheResolvedType(type_idx, image_pointer_size_);
if (UNLIKELY(resolved_type == nullptr)) {
- ObjPtr<mirror::Class> declaring_class = referrer->GetDeclaringClass();
StackHandleScope<2> hs(Thread::Current());
- Handle<mirror::DexCache> dex_cache(hs.NewHandle(declaring_class->GetDexCache()));
- Handle<mirror::ClassLoader> class_loader(hs.NewHandle(declaring_class->GetClassLoader()));
- const DexFile& dex_file = *dex_cache->GetDexFile();
- resolved_type = ResolveType(dex_file, type_idx, dex_cache, class_loader);
- // Note: We cannot check here to see whether we added the type to the cache. The type
- // might be an erroneous class, which results in it being hidden from us.
+ // There could be an out of bounds exception from GetDexCacheResolvedType, don't call
+ // ResolveType for this case.
+ if (LIKELY(!hs.Self()->IsExceptionPending())) {
+ ObjPtr<mirror::Class> declaring_class = referrer->GetDeclaringClass();
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(declaring_class->GetDexCache()));
+ Handle<mirror::ClassLoader> class_loader(hs.NewHandle(declaring_class->GetClassLoader()));
+ const DexFile& dex_file = *dex_cache->GetDexFile();
+ resolved_type = ResolveType(dex_file, type_idx, dex_cache, class_loader);
+ // Note: We cannot check here to see whether we added the type to the cache. The type
+ // might be an erroneous class, which results in it being hidden from us.
+ } else {
+ // Make sure its an array out of bounds exception.
+ DCHECK(hs.Self()->GetException()->GetClass()->DescriptorEquals(
+ "Ljava/lang/ArrayIndexOutOfBoundsException;"));
+ }
}
return resolved_type.Ptr();
}
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index 862585af92..685677bd80 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -19,6 +19,8 @@
#include <memory>
#include <string>
+#include "android-base/strings.h"
+
#include "art_field-inl.h"
#include "art_method-inl.h"
#include "base/enums.h"
@@ -800,12 +802,12 @@ TEST_F(ClassLinkerTest, GetDexFiles) {
jobject jclass_loader = LoadDex("Nested");
std::vector<const DexFile*> dex_files(GetDexFiles(jclass_loader));
ASSERT_EQ(dex_files.size(), 1U);
- EXPECT_TRUE(EndsWith(dex_files[0]->GetLocation(), "Nested.jar"));
+ EXPECT_TRUE(android::base::EndsWith(dex_files[0]->GetLocation(), "Nested.jar"));
jobject jclass_loader2 = LoadDex("MultiDex");
std::vector<const DexFile*> dex_files2(GetDexFiles(jclass_loader2));
ASSERT_EQ(dex_files2.size(), 2U);
- EXPECT_TRUE(EndsWith(dex_files2[0]->GetLocation(), "MultiDex.jar"));
+ EXPECT_TRUE(android::base::EndsWith(dex_files2[0]->GetLocation(), "MultiDex.jar"));
}
TEST_F(ClassLinkerTest, FindClassNested) {
diff --git a/runtime/class_table-inl.h b/runtime/class_table-inl.h
index 229cd477dd..dfe8949134 100644
--- a/runtime/class_table-inl.h
+++ b/runtime/class_table-inl.h
@@ -71,6 +71,19 @@ bool ClassTable::Visit(Visitor& visitor) {
return true;
}
+template <typename Visitor>
+bool ClassTable::Visit(const Visitor& visitor) {
+ ReaderMutexLock mu(Thread::Current(), lock_);
+ for (ClassSet& class_set : classes_) {
+ for (TableSlot& table_slot : class_set) {
+ if (!visitor(table_slot.Read())) {
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
template<ReadBarrierOption kReadBarrierOption>
inline mirror::Class* ClassTable::TableSlot::Read() const {
const uint32_t before = data_.LoadRelaxed();
diff --git a/runtime/class_table.cc b/runtime/class_table.cc
index ec33e5ef80..0f985c6424 100644
--- a/runtime/class_table.cc
+++ b/runtime/class_table.cc
@@ -33,8 +33,9 @@ void ClassTable::FreezeSnapshot() {
bool ClassTable::Contains(ObjPtr<mirror::Class> klass) {
ReaderMutexLock mu(Thread::Current(), lock_);
+ TableSlot slot(klass);
for (ClassSet& class_set : classes_) {
- auto it = class_set.Find(TableSlot(klass));
+ auto it = class_set.Find(slot);
if (it != class_set.end()) {
return it->Read() == klass;
}
@@ -44,8 +45,9 @@ bool ClassTable::Contains(ObjPtr<mirror::Class> klass) {
mirror::Class* ClassTable::LookupByDescriptor(ObjPtr<mirror::Class> klass) {
ReaderMutexLock mu(Thread::Current(), lock_);
+ TableSlot slot(klass);
for (ClassSet& class_set : classes_) {
- auto it = class_set.Find(TableSlot(klass));
+ auto it = class_set.Find(slot);
if (it != class_set.end()) {
return it->Read();
}
@@ -110,8 +112,8 @@ size_t ClassTable::NumNonZygoteClasses(ObjPtr<mirror::ClassLoader> defining_load
}
mirror::Class* ClassTable::Lookup(const char* descriptor, size_t hash) {
- ReaderMutexLock mu(Thread::Current(), lock_);
DescriptorHashPair pair(descriptor, hash);
+ ReaderMutexLock mu(Thread::Current(), lock_);
for (ClassSet& class_set : classes_) {
auto it = class_set.FindWithHash(pair, hash);
if (it != class_set.end()) {
@@ -122,12 +124,14 @@ mirror::Class* ClassTable::Lookup(const char* descriptor, size_t hash) {
}
void ClassTable::Insert(ObjPtr<mirror::Class> klass) {
+ const uint32_t hash = TableSlot::HashDescriptor(klass);
WriterMutexLock mu(Thread::Current(), lock_);
- classes_.back().Insert(TableSlot(klass));
+ classes_.back().InsertWithHash(TableSlot(klass, hash), hash);
}
void ClassTable::InsertWithoutLocks(ObjPtr<mirror::Class> klass) {
- classes_.back().Insert(TableSlot(klass));
+ const uint32_t hash = TableSlot::HashDescriptor(klass);
+ classes_.back().InsertWithHash(TableSlot(klass, hash), hash);
}
void ClassTable::InsertWithHash(ObjPtr<mirror::Class> klass, size_t hash) {
@@ -136,8 +140,8 @@ void ClassTable::InsertWithHash(ObjPtr<mirror::Class> klass, size_t hash) {
}
bool ClassTable::Remove(const char* descriptor) {
- WriterMutexLock mu(Thread::Current(), lock_);
DescriptorHashPair pair(descriptor, ComputeModifiedUtf8Hash(descriptor));
+ WriterMutexLock mu(Thread::Current(), lock_);
for (ClassSet& class_set : classes_) {
auto it = class_set.Find(pair);
if (it != class_set.end()) {
@@ -250,10 +254,12 @@ void ClassTable::ClearStrongRoots() {
strong_roots_.clear();
}
-ClassTable::TableSlot::TableSlot(ObjPtr<mirror::Class> klass) {
+ClassTable::TableSlot::TableSlot(ObjPtr<mirror::Class> klass)
+ : TableSlot(klass, HashDescriptor(klass)) {}
+
+uint32_t ClassTable::TableSlot::HashDescriptor(ObjPtr<mirror::Class> klass) {
std::string temp;
- data_.StoreRelaxed(Encode(klass.Ptr(),
- MaskHash(ComputeModifiedUtf8Hash(klass->GetDescriptor(&temp)))));
+ return ComputeModifiedUtf8Hash(klass->GetDescriptor(&temp));
}
} // namespace art
diff --git a/runtime/class_table.h b/runtime/class_table.h
index 104871ff21..f27d8093ce 100644
--- a/runtime/class_table.h
+++ b/runtime/class_table.h
@@ -73,6 +73,9 @@ class ClassTable {
return MaskHash(other) == Hash();
}
+ static uint32_t HashDescriptor(ObjPtr<mirror::Class> klass)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
mirror::Class* Read() const REQUIRES_SHARED(Locks::mutator_lock_);
@@ -174,6 +177,10 @@ class ClassTable {
bool Visit(Visitor& visitor)
REQUIRES(!lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
+ template <typename Visitor>
+ bool Visit(const Visitor& visitor)
+ REQUIRES(!lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Return the first class that matches the descriptor. Returns null if there are none.
mirror::Class* Lookup(const char* descriptor, size_t hash)
diff --git a/runtime/class_table_test.cc b/runtime/class_table_test.cc
new file mode 100644
index 0000000000..f1248eb00c
--- /dev/null
+++ b/runtime/class_table_test.cc
@@ -0,0 +1,163 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "class_table-inl.h"
+
+#include "art_field-inl.h"
+#include "art_method-inl.h"
+#include "class_linker-inl.h"
+#include "common_runtime_test.h"
+#include "dex_file.h"
+#include "gc/accounting/card_table-inl.h"
+#include "gc/heap.h"
+#include "handle_scope-inl.h"
+#include "mirror/class-inl.h"
+#include "obj_ptr.h"
+#include "scoped_thread_state_change-inl.h"
+
+namespace art {
+namespace mirror {
+
+class CollectRootVisitor {
+ public:
+ CollectRootVisitor() {}
+
+ template <class MirrorType>
+ ALWAYS_INLINE void VisitRootIfNonNull(GcRoot<MirrorType>& root) const
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (!root.IsNull()) {
+ VisitRoot(root);
+ }
+ }
+
+ template <class MirrorType>
+ ALWAYS_INLINE void VisitRootIfNonNull(mirror::CompressedReference<MirrorType>* root) const
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (!root->IsNull()) {
+ VisitRoot(root);
+ }
+ }
+
+ template <class MirrorType>
+ void VisitRoot(GcRoot<MirrorType>& root) const REQUIRES_SHARED(Locks::mutator_lock_) {
+ VisitRoot(root.AddressWithoutBarrier());
+ }
+
+ template <class MirrorType>
+ void VisitRoot(mirror::CompressedReference<MirrorType>* root) const
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ roots_.insert(root->AsMirrorPtr());
+ }
+
+ mutable std::set<mirror::Object*> roots_;
+};
+
+
+class ClassTableTest : public CommonRuntimeTest {};
+
+TEST_F(ClassTableTest, ClassTable) {
+ ScopedObjectAccess soa(Thread::Current());
+ jobject jclass_loader = LoadDex("XandY");
+ VariableSizedHandleScope hs(soa.Self());
+ Handle<ClassLoader> class_loader(hs.NewHandle(soa.Decode<ClassLoader>(jclass_loader)));
+ const char* descriptor_x = "LX;";
+ const char* descriptor_y = "LY;";
+ Handle<mirror::Class> h_X(
+ hs.NewHandle(class_linker_->FindClass(soa.Self(), descriptor_x, class_loader)));
+ Handle<mirror::Class> h_Y(
+ hs.NewHandle(class_linker_->FindClass(soa.Self(), descriptor_y, class_loader)));
+ Handle<mirror::Object> obj_X = hs.NewHandle(h_X->AllocObject(soa.Self()));
+ ASSERT_TRUE(obj_X.Get() != nullptr);
+ ClassTable table;
+ EXPECT_EQ(table.NumZygoteClasses(class_loader.Get()), 0u);
+ EXPECT_EQ(table.NumNonZygoteClasses(class_loader.Get()), 0u);
+
+ // Add h_X to the class table.
+ table.Insert(h_X.Get());
+ EXPECT_EQ(table.LookupByDescriptor(h_X.Get()), h_X.Get());
+ EXPECT_EQ(table.Lookup(descriptor_x, ComputeModifiedUtf8Hash(descriptor_x)), h_X.Get());
+ EXPECT_EQ(table.Lookup("NOT_THERE", ComputeModifiedUtf8Hash("NOT_THERE")), nullptr);
+ EXPECT_EQ(table.NumZygoteClasses(class_loader.Get()), 0u);
+ EXPECT_EQ(table.NumNonZygoteClasses(class_loader.Get()), 1u);
+
+ // Create the zygote snapshot and ensure the accounting is correct.
+ table.FreezeSnapshot();
+ EXPECT_EQ(table.NumZygoteClasses(class_loader.Get()), 1u);
+ EXPECT_EQ(table.NumNonZygoteClasses(class_loader.Get()), 0u);
+
+ // Test inserting and related lookup functions.
+ EXPECT_EQ(table.LookupByDescriptor(h_Y.Get()), nullptr);
+ EXPECT_FALSE(table.Contains(h_Y.Get()));
+ table.Insert(h_Y.Get());
+ EXPECT_EQ(table.LookupByDescriptor(h_X.Get()), h_X.Get());
+ EXPECT_EQ(table.LookupByDescriptor(h_Y.Get()), h_Y.Get());
+ EXPECT_TRUE(table.Contains(h_X.Get()));
+ EXPECT_TRUE(table.Contains(h_Y.Get()));
+
+ EXPECT_EQ(table.NumZygoteClasses(class_loader.Get()), 1u);
+ EXPECT_EQ(table.NumNonZygoteClasses(class_loader.Get()), 1u);
+
+ // Test adding / clearing strong roots.
+ EXPECT_TRUE(table.InsertStrongRoot(obj_X.Get()));
+ EXPECT_FALSE(table.InsertStrongRoot(obj_X.Get()));
+ table.ClearStrongRoots();
+ EXPECT_TRUE(table.InsertStrongRoot(obj_X.Get()));
+
+ // Collect all the roots and make sure there is nothing missing.
+ CollectRootVisitor roots;
+ table.VisitRoots(roots);
+ EXPECT_TRUE(roots.roots_.find(h_X.Get()) != roots.roots_.end());
+ EXPECT_TRUE(roots.roots_.find(h_Y.Get()) != roots.roots_.end());
+ EXPECT_TRUE(roots.roots_.find(obj_X.Get()) != roots.roots_.end());
+
+ // Checks that vising only classes works.
+ std::set<mirror::Class*> classes;
+ table.Visit([&classes](ObjPtr<mirror::Class> klass) REQUIRES_SHARED(Locks::mutator_lock_) {
+ classes.insert(klass.Ptr());
+ return true;
+ });
+ EXPECT_TRUE(classes.find(h_X.Get()) != classes.end());
+ EXPECT_TRUE(classes.find(h_Y.Get()) != classes.end());
+ EXPECT_EQ(classes.size(), 2u);
+ classes.clear();
+ table.Visit([&classes](ObjPtr<mirror::Class> klass) REQUIRES_SHARED(Locks::mutator_lock_) {
+ classes.insert(klass.Ptr());
+ // Return false to exit the Visit early.
+ return false;
+ });
+ EXPECT_EQ(classes.size(), 1u);
+
+ // Test remove.
+ table.Remove(descriptor_x);
+ EXPECT_FALSE(table.Contains(h_X.Get()));
+
+ // Test that WriteToMemory and ReadFromMemory work.
+ table.Insert(h_X.Get());
+ const size_t count = table.WriteToMemory(nullptr);
+ std::unique_ptr<uint8_t[]> buffer(new uint8_t[count]());
+ ASSERT_EQ(table.WriteToMemory(&buffer[0]), count);
+ ClassTable table2;
+ size_t count2 = table2.ReadFromMemory(&buffer[0]);
+ EXPECT_EQ(count, count2);
+ // Strong roots are not serialized, only classes.
+ EXPECT_TRUE(table2.Contains(h_X.Get()));
+ EXPECT_TRUE(table2.Contains(h_Y.Get()));
+
+ // TODO: Add tests for UpdateClass, InsertOatFile.
+}
+
+} // namespace mirror
+} // namespace art
diff --git a/runtime/elf_file.cc b/runtime/elf_file.cc
index 2ea7bb6778..ee0f34002b 100644
--- a/runtime/elf_file.cc
+++ b/runtime/elf_file.cc
@@ -20,6 +20,8 @@
#include <sys/types.h>
#include <unistd.h>
+#include "android-base/strings.h"
+
#include "arch/instruction_set.h"
#include "base/logging.h"
#include "base/stringprintf.h"
@@ -1451,7 +1453,7 @@ bool ElfFileImpl<ElfTypes>::Strip(File* file, std::string* error_msg) {
section_headers_original_indexes.push_back(0);
continue;
}
- if (StartsWith(name, ".debug")
+ if (android::base::StartsWith(name, ".debug")
|| (strcmp(name, ".strtab") == 0)
|| (strcmp(name, ".symtab") == 0)) {
continue;
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index fbab73f022..b8899137bc 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -1360,9 +1360,10 @@ inline void ConcurrentCopying::ProcessMarkStackRef(mirror::Object* to_ref) {
<< " is_marked=" << IsMarked(to_ref);
}
#ifdef USE_BAKER_OR_BROOKS_READ_BARRIER
+ mirror::Object* referent = nullptr;
if (UNLIKELY((to_ref->GetClass<kVerifyNone, kWithoutReadBarrier>()->IsTypeOfReferenceClass() &&
- to_ref->AsReference()->GetReferent<kWithoutReadBarrier>() != nullptr &&
- !IsInToSpace(to_ref->AsReference()->GetReferent<kWithoutReadBarrier>())))) {
+ (referent = to_ref->AsReference()->GetReferent<kWithoutReadBarrier>()) != nullptr &&
+ !IsInToSpace(referent)))) {
// Leave this reference gray in the queue so that GetReferent() will trigger a read barrier. We
// will change it to white later in ReferenceQueue::DequeuePendingReference().
DCHECK(to_ref->AsReference()->GetPendingNext() != nullptr) << "Left unenqueued ref gray " << to_ref;
diff --git a/runtime/gc/reference_processor.cc b/runtime/gc/reference_processor.cc
index 2cde7d5731..081be968eb 100644
--- a/runtime/gc/reference_processor.cc
+++ b/runtime/gc/reference_processor.cc
@@ -75,11 +75,10 @@ ObjPtr<mirror::Object> ReferenceProcessor::GetReferent(Thread* self,
MutexLock mu(self, *Locks::reference_processor_lock_);
while ((!kUseReadBarrier && SlowPathEnabled()) ||
(kUseReadBarrier && !self->GetWeakRefAccessEnabled())) {
- mirror::HeapReference<mirror::Object>* const referent_addr =
- reference->GetReferentReferenceAddr();
+ ObjPtr<mirror::Object> referent = reference->GetReferent<kWithoutReadBarrier>();
// If the referent became cleared, return it. Don't need barrier since thread roots can't get
// updated until after we leave the function due to holding the mutator lock.
- if (referent_addr->AsMirrorPtr() == nullptr) {
+ if (referent == nullptr) {
return nullptr;
}
// Try to see if the referent is already marked by using the is_marked_callback. We can return
@@ -91,10 +90,15 @@ ObjPtr<mirror::Object> ReferenceProcessor::GetReferent(Thread* self,
// case only black nodes can be safely returned. If the GC is preserving references, the
// mutator could take a white field from a grey or white node and move it somewhere else
// in the heap causing corruption since this field would get swept.
- if (collector_->IsMarkedHeapReference(referent_addr)) {
+ // Use the cached referent instead of calling GetReferent since other threads could call
+ // Reference.clear() after we did the null check resulting in a null pointer being
+ // incorrectly passed to IsMarked. b/33569625
+ ObjPtr<mirror::Object> forwarded_ref = collector_->IsMarked(referent.Ptr());
+ if (forwarded_ref != nullptr) {
+ // Non null means that it is marked.
if (!preserving_references_ ||
(LIKELY(!reference->IsFinalizerReferenceInstance()) && reference->IsUnprocessed())) {
- return referent_addr->AsMirrorPtr();
+ return forwarded_ref;
}
}
}
@@ -265,11 +269,23 @@ void ReferenceProcessor::EnqueueClearedReferences(Thread* self) {
}
}
-bool ReferenceProcessor::MakeCircularListIfUnenqueued(
- ObjPtr<mirror::FinalizerReference> reference) {
+void ReferenceProcessor::ClearReferent(ObjPtr<mirror::Reference> ref) {
Thread* self = Thread::Current();
MutexLock mu(self, *Locks::reference_processor_lock_);
- // Wait untul we are done processing reference.
+ // Need to wait until reference processing is done since IsMarkedHeapReference does not have a
+ // CAS. If we do not wait, it can result in the GC un-clearing references due to race conditions.
+ // This also handles the race where the referent gets cleared after a null check but before
+ // IsMarkedHeapReference is called.
+ WaitUntilDoneProcessingReferences(self);
+ if (Runtime::Current()->IsActiveTransaction()) {
+ ref->ClearReferent<true>();
+ } else {
+ ref->ClearReferent<false>();
+ }
+}
+
+void ReferenceProcessor::WaitUntilDoneProcessingReferences(Thread* self) {
+ // Wait until we are done processing reference.
while ((!kUseReadBarrier && SlowPathEnabled()) ||
(kUseReadBarrier && !self->GetWeakRefAccessEnabled())) {
// Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
@@ -277,6 +293,13 @@ bool ReferenceProcessor::MakeCircularListIfUnenqueued(
self->CheckEmptyCheckpoint();
condition_.WaitHoldingLocks(self);
}
+}
+
+bool ReferenceProcessor::MakeCircularListIfUnenqueued(
+ ObjPtr<mirror::FinalizerReference> reference) {
+ Thread* self = Thread::Current();
+ MutexLock mu(self, *Locks::reference_processor_lock_);
+ WaitUntilDoneProcessingReferences(self);
// At this point, since the sentinel of the reference is live, it is guaranteed to not be
// enqueued if we just finished processing references. Otherwise, we may be doing the main GC
// phase. Since we are holding the reference processor lock, it guarantees that reference
diff --git a/runtime/gc/reference_processor.h b/runtime/gc/reference_processor.h
index 759b7e129c..b15544d549 100644
--- a/runtime/gc/reference_processor.h
+++ b/runtime/gc/reference_processor.h
@@ -73,6 +73,9 @@ class ReferenceProcessor {
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::reference_processor_lock_,
!Locks::reference_queue_finalizer_references_lock_);
+ void ClearReferent(ObjPtr<mirror::Reference> ref)
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(!Locks::reference_processor_lock_);
private:
bool SlowPathEnabled() REQUIRES_SHARED(Locks::mutator_lock_);
@@ -84,6 +87,10 @@ class ReferenceProcessor {
// referents.
void StartPreservingReferences(Thread* self) REQUIRES(!Locks::reference_processor_lock_);
void StopPreservingReferences(Thread* self) REQUIRES(!Locks::reference_processor_lock_);
+ // Wait until reference processing is done.
+ void WaitUntilDoneProcessingReferences(Thread* self)
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(Locks::reference_processor_lock_);
// Collector which is clearing references, used by the GetReferent to return referents which are
// already marked.
collector::GarbageCollector* collector_ GUARDED_BY(Locks::reference_processor_lock_);
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index c7269440f0..76f3692f41 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -22,6 +22,8 @@
#include <sys/types.h>
#include <unistd.h>
+#include "android-base/strings.h"
+
#include "art_method.h"
#include "base/enums.h"
#include "base/macros.h"
@@ -137,7 +139,7 @@ static bool GenerateImage(const std::string& image_filename,
arg_vector.push_back(compiler_options[i].c_str());
}
- std::string command_line(Join(arg_vector, ' '));
+ std::string command_line(android::base::Join(arg_vector, ' '));
LOG(INFO) << "GenerateImage: " << command_line;
return Exec(arg_vector, error_msg);
}
@@ -257,7 +259,7 @@ static bool RelocateImage(const char* image_location,
argv.push_back(instruction_set_arg);
argv.push_back(base_offset_arg);
- std::string command_line(Join(argv, ' '));
+ std::string command_line(android::base::Join(argv, ' '));
LOG(INFO) << "RelocateImage: " << command_line;
return Exec(argv, error_msg);
}
diff --git a/runtime/interpreter/interpreter_switch_impl.cc b/runtime/interpreter/interpreter_switch_impl.cc
index 52eacd564f..b0d7fb247a 100644
--- a/runtime/interpreter/interpreter_switch_impl.cc
+++ b/runtime/interpreter/interpreter_switch_impl.cc
@@ -92,6 +92,16 @@ namespace interpreter {
} \
} while (false)
+#define HANDLE_BACKWARD_BRANCH(offset) \
+ do { \
+ if (IsBackwardBranch(offset)) { \
+ HOTNESS_UPDATE(); \
+ /* Record new dex pc early to have consistent suspend point at loop header. */ \
+ shadow_frame.SetDexPC(inst->GetDexPc(insns)); \
+ self->AllowThreadSuspension(); \
+ } \
+ } while (false)
+
template<bool do_access_check, bool transaction_active>
JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
ShadowFrame& shadow_frame, JValue result_register,
@@ -594,55 +604,40 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
PREAMBLE();
int8_t offset = inst->VRegA_10t(inst_data);
BRANCH_INSTRUMENTATION(offset);
- if (IsBackwardBranch(offset)) {
- HOTNESS_UPDATE();
- self->AllowThreadSuspension();
- }
inst = inst->RelativeAt(offset);
+ HANDLE_BACKWARD_BRANCH(offset);
break;
}
case Instruction::GOTO_16: {
PREAMBLE();
int16_t offset = inst->VRegA_20t();
BRANCH_INSTRUMENTATION(offset);
- if (IsBackwardBranch(offset)) {
- HOTNESS_UPDATE();
- self->AllowThreadSuspension();
- }
inst = inst->RelativeAt(offset);
+ HANDLE_BACKWARD_BRANCH(offset);
break;
}
case Instruction::GOTO_32: {
PREAMBLE();
int32_t offset = inst->VRegA_30t();
BRANCH_INSTRUMENTATION(offset);
- if (IsBackwardBranch(offset)) {
- HOTNESS_UPDATE();
- self->AllowThreadSuspension();
- }
inst = inst->RelativeAt(offset);
+ HANDLE_BACKWARD_BRANCH(offset);
break;
}
case Instruction::PACKED_SWITCH: {
PREAMBLE();
int32_t offset = DoPackedSwitch(inst, shadow_frame, inst_data);
BRANCH_INSTRUMENTATION(offset);
- if (IsBackwardBranch(offset)) {
- HOTNESS_UPDATE();
- self->AllowThreadSuspension();
- }
inst = inst->RelativeAt(offset);
+ HANDLE_BACKWARD_BRANCH(offset);
break;
}
case Instruction::SPARSE_SWITCH: {
PREAMBLE();
int32_t offset = DoSparseSwitch(inst, shadow_frame, inst_data);
BRANCH_INSTRUMENTATION(offset);
- if (IsBackwardBranch(offset)) {
- HOTNESS_UPDATE();
- self->AllowThreadSuspension();
- }
inst = inst->RelativeAt(offset);
+ HANDLE_BACKWARD_BRANCH(offset);
break;
}
@@ -739,11 +734,8 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
int16_t offset = inst->VRegC_22t();
BRANCH_INSTRUMENTATION(offset);
- if (IsBackwardBranch(offset)) {
- HOTNESS_UPDATE();
- self->AllowThreadSuspension();
- }
inst = inst->RelativeAt(offset);
+ HANDLE_BACKWARD_BRANCH(offset);
} else {
BRANCH_INSTRUMENTATION(2);
inst = inst->Next_2xx();
@@ -756,11 +748,8 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
int16_t offset = inst->VRegC_22t();
BRANCH_INSTRUMENTATION(offset);
- if (IsBackwardBranch(offset)) {
- HOTNESS_UPDATE();
- self->AllowThreadSuspension();
- }
inst = inst->RelativeAt(offset);
+ HANDLE_BACKWARD_BRANCH(offset);
} else {
BRANCH_INSTRUMENTATION(2);
inst = inst->Next_2xx();
@@ -773,11 +762,8 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
int16_t offset = inst->VRegC_22t();
BRANCH_INSTRUMENTATION(offset);
- if (IsBackwardBranch(offset)) {
- HOTNESS_UPDATE();
- self->AllowThreadSuspension();
- }
inst = inst->RelativeAt(offset);
+ HANDLE_BACKWARD_BRANCH(offset);
} else {
BRANCH_INSTRUMENTATION(2);
inst = inst->Next_2xx();
@@ -790,11 +776,8 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
int16_t offset = inst->VRegC_22t();
BRANCH_INSTRUMENTATION(offset);
- if (IsBackwardBranch(offset)) {
- HOTNESS_UPDATE();
- self->AllowThreadSuspension();
- }
inst = inst->RelativeAt(offset);
+ HANDLE_BACKWARD_BRANCH(offset);
} else {
BRANCH_INSTRUMENTATION(2);
inst = inst->Next_2xx();
@@ -807,11 +790,8 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
int16_t offset = inst->VRegC_22t();
BRANCH_INSTRUMENTATION(offset);
- if (IsBackwardBranch(offset)) {
- HOTNESS_UPDATE();
- self->AllowThreadSuspension();
- }
inst = inst->RelativeAt(offset);
+ HANDLE_BACKWARD_BRANCH(offset);
} else {
BRANCH_INSTRUMENTATION(2);
inst = inst->Next_2xx();
@@ -824,11 +804,8 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
int16_t offset = inst->VRegC_22t();
BRANCH_INSTRUMENTATION(offset);
- if (IsBackwardBranch(offset)) {
- HOTNESS_UPDATE();
- self->AllowThreadSuspension();
- }
inst = inst->RelativeAt(offset);
+ HANDLE_BACKWARD_BRANCH(offset);
} else {
BRANCH_INSTRUMENTATION(2);
inst = inst->Next_2xx();
@@ -840,11 +817,8 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) == 0) {
int16_t offset = inst->VRegB_21t();
BRANCH_INSTRUMENTATION(offset);
- if (IsBackwardBranch(offset)) {
- HOTNESS_UPDATE();
- self->AllowThreadSuspension();
- }
inst = inst->RelativeAt(offset);
+ HANDLE_BACKWARD_BRANCH(offset);
} else {
BRANCH_INSTRUMENTATION(2);
inst = inst->Next_2xx();
@@ -856,11 +830,8 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) != 0) {
int16_t offset = inst->VRegB_21t();
BRANCH_INSTRUMENTATION(offset);
- if (IsBackwardBranch(offset)) {
- HOTNESS_UPDATE();
- self->AllowThreadSuspension();
- }
inst = inst->RelativeAt(offset);
+ HANDLE_BACKWARD_BRANCH(offset);
} else {
BRANCH_INSTRUMENTATION(2);
inst = inst->Next_2xx();
@@ -872,11 +843,8 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) < 0) {
int16_t offset = inst->VRegB_21t();
BRANCH_INSTRUMENTATION(offset);
- if (IsBackwardBranch(offset)) {
- HOTNESS_UPDATE();
- self->AllowThreadSuspension();
- }
inst = inst->RelativeAt(offset);
+ HANDLE_BACKWARD_BRANCH(offset);
} else {
BRANCH_INSTRUMENTATION(2);
inst = inst->Next_2xx();
@@ -888,11 +856,8 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) >= 0) {
int16_t offset = inst->VRegB_21t();
BRANCH_INSTRUMENTATION(offset);
- if (IsBackwardBranch(offset)) {
- HOTNESS_UPDATE();
- self->AllowThreadSuspension();
- }
inst = inst->RelativeAt(offset);
+ HANDLE_BACKWARD_BRANCH(offset);
} else {
BRANCH_INSTRUMENTATION(2);
inst = inst->Next_2xx();
@@ -904,11 +869,8 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) > 0) {
int16_t offset = inst->VRegB_21t();
BRANCH_INSTRUMENTATION(offset);
- if (IsBackwardBranch(offset)) {
- HOTNESS_UPDATE();
- self->AllowThreadSuspension();
- }
inst = inst->RelativeAt(offset);
+ HANDLE_BACKWARD_BRANCH(offset);
} else {
BRANCH_INSTRUMENTATION(2);
inst = inst->Next_2xx();
@@ -920,11 +882,8 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) <= 0) {
int16_t offset = inst->VRegB_21t();
BRANCH_INSTRUMENTATION(offset);
- if (IsBackwardBranch(offset)) {
- HOTNESS_UPDATE();
- self->AllowThreadSuspension();
- }
inst = inst->RelativeAt(offset);
+ HANDLE_BACKWARD_BRANCH(offset);
} else {
BRANCH_INSTRUMENTATION(2);
inst = inst->Next_2xx();
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 93f50ad2b1..1b0ad8341b 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -297,10 +297,11 @@ static void FillRootTable(uint8_t* roots_data, Handle<mirror::ObjectArray<mirror
ObjPtr<mirror::Object> object = roots->Get(i);
if (kIsDebugBuild) {
// Ensure the string is strongly interned. b/32995596
- CHECK(object->IsString());
- ObjPtr<mirror::String> str = reinterpret_cast<mirror::String*>(object.Ptr());
- ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- CHECK(class_linker->GetInternTable()->LookupStrong(Thread::Current(), str) != nullptr);
+ if (object->IsString()) {
+ ObjPtr<mirror::String> str = reinterpret_cast<mirror::String*>(object.Ptr());
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ CHECK(class_linker->GetInternTable()->LookupStrong(Thread::Current(), str) != nullptr);
+ }
}
gc_roots[i] = GcRoot<mirror::Object>(object);
}
@@ -316,6 +317,31 @@ static uint8_t* GetRootTable(const void* code_ptr, uint32_t* number_of_roots = n
return data - ComputeRootTableSize(roots);
}
+// Helper for the GC to process a weak class in a JIT root table.
+static inline void ProcessWeakClass(GcRoot<mirror::Class>* root_ptr, IsMarkedVisitor* visitor)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ // This does not need a read barrier because this is called by GC.
+ mirror::Class* cls = root_ptr->Read<kWithoutReadBarrier>();
+ if (cls != nullptr) {
+ DCHECK((cls->IsClass<kDefaultVerifyFlags, kWithoutReadBarrier>()));
+ // Look at the classloader of the class to know if it has been unloaded.
+ // This does not need a read barrier because this is called by GC.
+ mirror::Object* class_loader =
+ cls->GetClassLoader<kDefaultVerifyFlags, kWithoutReadBarrier>();
+ if (class_loader == nullptr || visitor->IsMarked(class_loader) != nullptr) {
+ // The class loader is live, update the entry if the class has moved.
+ mirror::Class* new_cls = down_cast<mirror::Class*>(visitor->IsMarked(cls));
+ // Note that new_object can be null for CMS and newly allocated objects.
+ if (new_cls != nullptr && new_cls != cls) {
+ *root_ptr = GcRoot<mirror::Class>(new_cls);
+ }
+ } else {
+ // The class loader is not live, clear the entry.
+ *root_ptr = GcRoot<mirror::Class>(nullptr);
+ }
+ }
+}
+
void JitCodeCache::SweepRootTables(IsMarkedVisitor* visitor) {
MutexLock mu(Thread::Current(), lock_);
for (const auto& entry : method_code_map_) {
@@ -325,17 +351,22 @@ void JitCodeCache::SweepRootTables(IsMarkedVisitor* visitor) {
for (uint32_t i = 0; i < number_of_roots; ++i) {
// This does not need a read barrier because this is called by GC.
mirror::Object* object = roots[i].Read<kWithoutReadBarrier>();
- DCHECK(object != nullptr);
- mirror::Object* new_object = visitor->IsMarked(object);
- // We know the string is marked because it's a strongly-interned string that
- // is always alive. The IsMarked implementation of the CMS collector returns
- // null for newly allocated objects, but we know those haven't moved. Therefore,
- // only update the entry if we get a different non-null string.
- // TODO: Do not use IsMarked for j.l.Class, and adjust once we move this method
- // out of the weak access/creation pause. b/32167580
- if (new_object != nullptr && new_object != object) {
- DCHECK(new_object->IsString());
- roots[i] = GcRoot<mirror::Object>(new_object);
+ if (object == nullptr) {
+ // entry got deleted in a previous sweep.
+ } else if (object->IsString<kDefaultVerifyFlags, kWithoutReadBarrier>()) {
+ mirror::Object* new_object = visitor->IsMarked(object);
+ // We know the string is marked because it's a strongly-interned string that
+ // is always alive. The IsMarked implementation of the CMS collector returns
+ // null for newly allocated objects, but we know those haven't moved. Therefore,
+ // only update the entry if we get a different non-null string.
+ // TODO: Do not use IsMarked for j.l.Class, and adjust once we move this method
+ // out of the weak access/creation pause. b/32167580
+ if (new_object != nullptr && new_object != object) {
+ DCHECK(new_object->IsString());
+ roots[i] = GcRoot<mirror::Object>(new_object);
+ }
+ } else {
+ ProcessWeakClass(reinterpret_cast<GcRoot<mirror::Class>*>(&roots[i]), visitor);
}
}
}
@@ -344,26 +375,7 @@ void JitCodeCache::SweepRootTables(IsMarkedVisitor* visitor) {
for (size_t i = 0; i < info->number_of_inline_caches_; ++i) {
InlineCache* cache = &info->cache_[i];
for (size_t j = 0; j < InlineCache::kIndividualCacheSize; ++j) {
- // This does not need a read barrier because this is called by GC.
- mirror::Class* cls = cache->classes_[j].Read<kWithoutReadBarrier>();
- if (cls != nullptr) {
- // Look at the classloader of the class to know if it has been
- // unloaded.
- // This does not need a read barrier because this is called by GC.
- mirror::Object* class_loader =
- cls->GetClassLoader<kDefaultVerifyFlags, kWithoutReadBarrier>();
- if (class_loader == nullptr || visitor->IsMarked(class_loader) != nullptr) {
- // The class loader is live, update the entry if the class has moved.
- mirror::Class* new_cls = down_cast<mirror::Class*>(visitor->IsMarked(cls));
- // Note that new_object can be null for CMS and newly allocated objects.
- if (new_cls != nullptr && new_cls != cls) {
- cache->classes_[j] = GcRoot<mirror::Class>(new_cls);
- }
- } else {
- // The class loader is not live, clear the entry.
- cache->classes_[j] = GcRoot<mirror::Class>(nullptr);
- }
- }
+ ProcessWeakClass(&cache->classes_[j], visitor);
}
}
}
diff --git a/runtime/jit/profile_saver.cc b/runtime/jit/profile_saver.cc
index 11d601e8c8..025d10ccc0 100644
--- a/runtime/jit/profile_saver.cc
+++ b/runtime/jit/profile_saver.cc
@@ -20,6 +20,8 @@
#include <sys/stat.h>
#include <fcntl.h>
+#include "android-base/strings.h"
+
#include "art_method-inl.h"
#include "base/enums.h"
#include "base/systrace.h"
@@ -412,7 +414,7 @@ void ProfileSaver::Start(const ProfileSaverOptions& options,
}
VLOG(profiler) << "Starting profile saver using output file: " << output_filename
- << ". Tracking: " << Join(code_paths_to_profile, ':');
+ << ". Tracking: " << android::base::Join(code_paths_to_profile, ':');
instance_ = new ProfileSaver(options,
output_filename,
diff --git a/runtime/mirror/dex_cache-inl.h b/runtime/mirror/dex_cache-inl.h
index b54e416bc0..a59bb7b880 100644
--- a/runtime/mirror/dex_cache-inl.h
+++ b/runtime/mirror/dex_cache-inl.h
@@ -72,7 +72,7 @@ inline void DexCache::ClearString(dex::StringIndex string_idx) {
inline Class* DexCache::GetResolvedType(dex::TypeIndex type_idx) {
// It is theorized that a load acquire is not required since obtaining the resolved class will
- // always have an address depedency or a lock.
+ // always have an address dependency or a lock.
DCHECK_LT(type_idx.index_, NumResolvedTypes());
return GetResolvedTypes()[type_idx.index_].Read();
}
diff --git a/runtime/mirror/string-inl.h b/runtime/mirror/string-inl.h
index 6870fda075..95516ace9f 100644
--- a/runtime/mirror/string-inl.h
+++ b/runtime/mirror/string-inl.h
@@ -305,8 +305,11 @@ inline int32_t String::GetHashCode() {
template<typename MemoryType>
bool String::AllASCII(const MemoryType* const chars, const int length) {
+ static_assert(std::is_unsigned<MemoryType>::value, "Expecting unsigned MemoryType");
for (int i = 0; i < length; ++i) {
- if (chars[i] >= 0x80) {
+ // Valid ASCII characters are in range 1..0x7f. Zero is not considered ASCII
+ // because it would complicate the detection of ASCII strings in Modified-UTF8.
+ if ((chars[i] - 1u) >= 0x7fu) {
return false;
}
}
diff --git a/runtime/native/java_lang_ref_Reference.cc b/runtime/native/java_lang_ref_Reference.cc
index bedca109aa..c778068bc4 100644
--- a/runtime/native/java_lang_ref_Reference.cc
+++ b/runtime/native/java_lang_ref_Reference.cc
@@ -33,8 +33,15 @@ static jobject Reference_getReferent(JNIEnv* env, jobject javaThis) {
return soa.AddLocalReference<jobject>(referent);
}
+static void Reference_clearReferent(JNIEnv* env, jobject javaThis) {
+ ScopedFastNativeObjectAccess soa(env);
+ ObjPtr<mirror::Reference> ref = soa.Decode<mirror::Reference>(javaThis);
+ Runtime::Current()->GetHeap()->GetReferenceProcessor()->ClearReferent(ref);
+}
+
static JNINativeMethod gMethods[] = {
NATIVE_METHOD(Reference, getReferent, "!()Ljava/lang/Object;"),
+ NATIVE_METHOD(Reference, clearReferent, "!()V"),
};
void register_java_lang_ref_Reference(JNIEnv* env) {
diff --git a/runtime/oat_file_assistant.cc b/runtime/oat_file_assistant.cc
index 6a62a166a3..7f7b1b5b06 100644
--- a/runtime/oat_file_assistant.cc
+++ b/runtime/oat_file_assistant.cc
@@ -19,6 +19,9 @@
#include <sstream>
#include <sys/stat.h>
+
+#include "android-base/strings.h"
+
#include "base/logging.h"
#include "base/stringprintf.h"
#include "compiler_filter.h"
@@ -456,7 +459,7 @@ OatFileAssistant::RelocateOatFile(const std::string* input_file, std::string* er
argv.push_back("--output-oat-file=" + oat_file_name);
argv.push_back("--patched-image-location=" + image_info->location);
- std::string command_line(Join(argv, ' '));
+ std::string command_line(android::base::Join(argv, ' '));
if (!Exec(argv, error_msg)) {
// Manually delete the file. This ensures there is no garbage left over if
// the process unexpectedly died.
@@ -605,7 +608,7 @@ bool OatFileAssistant::Dex2Oat(const std::vector<std::string>& args,
argv.insert(argv.end(), args.begin(), args.end());
- std::string command_line(Join(argv, ' '));
+ std::string command_line(android::base::Join(argv, ' '));
return Exec(argv, error_msg);
}
diff --git a/runtime/oat_file_assistant_test.cc b/runtime/oat_file_assistant_test.cc
index 94c12af199..26dbaab367 100644
--- a/runtime/oat_file_assistant_test.cc
+++ b/runtime/oat_file_assistant_test.cc
@@ -20,6 +20,7 @@
#include <vector>
#include <sys/param.h>
+#include "android-base/strings.h"
#include <backtrace/BacktraceMap.h>
#include <gtest/gtest.h>
@@ -1057,7 +1058,7 @@ static std::string MakePathRelative(const std::string& target) {
// Reverse again to get the right path order, and join to get the result.
std::reverse(target_path.begin(), target_path.end());
- return Join(target_path, '/');
+ return android::base::Join(target_path, '/');
}
// Case: Non-absolute path to Dex location.
@@ -1134,7 +1135,7 @@ class RaceGenerateTask : public Task {
/*dex_elements*/nullptr,
&oat_file,
&error_msgs);
- CHECK(!dex_files.empty()) << Join(error_msgs, '\n');
+ CHECK(!dex_files.empty()) << android::base::Join(error_msgs, '\n');
CHECK(dex_files[0]->GetOatDexFile() != nullptr) << dex_files[0]->GetLocation();
loaded_oat_file_ = dex_files[0]->GetOatDexFile()->GetOatFile();
CHECK_EQ(loaded_oat_file_, oat_file);
diff --git a/runtime/openjdkjvmti/OpenjdkJvmTi.cc b/runtime/openjdkjvmti/OpenjdkJvmTi.cc
index 1ad3f0814f..5f97b60079 100644
--- a/runtime/openjdkjvmti/OpenjdkJvmTi.cc
+++ b/runtime/openjdkjvmti/OpenjdkJvmTi.cc
@@ -700,7 +700,7 @@ class JvmtiFunctions {
jmethodID method,
jint* entry_count_ptr,
jvmtiLineNumberEntry** table_ptr) {
- return ERR(NOT_IMPLEMENTED);
+ return MethodUtil::GetLineNumberTable(env, method, entry_count_ptr, table_ptr);
}
static jvmtiError GetMethodLocation(jvmtiEnv* env,
diff --git a/runtime/openjdkjvmti/ti_method.cc b/runtime/openjdkjvmti/ti_method.cc
index ffa5ac7e32..a0a09239fa 100644
--- a/runtime/openjdkjvmti/ti_method.cc
+++ b/runtime/openjdkjvmti/ti_method.cc
@@ -130,4 +130,63 @@ jvmtiError MethodUtil::GetMethodModifiers(jvmtiEnv* env ATTRIBUTE_UNUSED,
return ERR(NONE);
}
+using LineNumberContext = std::vector<jvmtiLineNumberEntry>;
+
+static bool CollectLineNumbers(void* void_context, const art::DexFile::PositionInfo& entry) {
+ LineNumberContext* context = reinterpret_cast<LineNumberContext*>(void_context);
+ jvmtiLineNumberEntry jvmti_entry = { static_cast<jlocation>(entry.address_),
+ static_cast<jint>(entry.line_) };
+ context->push_back(jvmti_entry);
+ return false; // Collect all, no early exit.
+}
+
+jvmtiError MethodUtil::GetLineNumberTable(jvmtiEnv* env,
+ jmethodID method,
+ jint* entry_count_ptr,
+ jvmtiLineNumberEntry** table_ptr) {
+ if (method == nullptr) {
+ return ERR(NULL_POINTER);
+ }
+ art::ArtMethod* art_method = art::jni::DecodeArtMethod(method);
+ DCHECK(!art_method->IsRuntimeMethod());
+
+ const art::DexFile::CodeItem* code_item;
+ const art::DexFile* dex_file;
+ {
+ art::ScopedObjectAccess soa(art::Thread::Current());
+
+ if (art_method->IsProxyMethod()) {
+ return ERR(ABSENT_INFORMATION);
+ }
+ if (art_method->IsNative()) {
+ return ERR(NATIVE_METHOD);
+ }
+ if (entry_count_ptr == nullptr || table_ptr == nullptr) {
+ return ERR(NULL_POINTER);
+ }
+
+ code_item = art_method->GetCodeItem();
+ dex_file = art_method->GetDexFile();
+ DCHECK(code_item != nullptr) << art_method->PrettyMethod() << " " << dex_file->GetLocation();
+ }
+
+ LineNumberContext context;
+ bool success = dex_file->DecodeDebugPositionInfo(code_item, CollectLineNumbers, &context);
+ if (!success) {
+ return ERR(ABSENT_INFORMATION);
+ }
+
+ unsigned char* data;
+ jlong mem_size = context.size() * sizeof(jvmtiLineNumberEntry);
+ jvmtiError alloc_error = env->Allocate(mem_size, &data);
+ if (alloc_error != ERR(NONE)) {
+ return alloc_error;
+ }
+ *table_ptr = reinterpret_cast<jvmtiLineNumberEntry*>(data);
+ memcpy(*table_ptr, context.data(), mem_size);
+ *entry_count_ptr = static_cast<jint>(context.size());
+
+ return ERR(NONE);
+}
+
} // namespace openjdkjvmti
diff --git a/runtime/openjdkjvmti/ti_method.h b/runtime/openjdkjvmti/ti_method.h
index 43f11f97ec..fb2fbb2b27 100644
--- a/runtime/openjdkjvmti/ti_method.h
+++ b/runtime/openjdkjvmti/ti_method.h
@@ -52,6 +52,11 @@ class MethodUtil {
static jvmtiError GetMethodModifiers(jvmtiEnv* env,
jmethodID method,
jint* modifiers_ptr);
+
+ static jvmtiError GetLineNumberTable(jvmtiEnv* env,
+ jmethodID method,
+ jint* entry_count_ptr,
+ jvmtiLineNumberEntry** table_ptr);
};
} // namespace openjdkjvmti
diff --git a/runtime/openjdkjvmti/ti_stack.cc b/runtime/openjdkjvmti/ti_stack.cc
index 6f8976f03d..579fb50ecc 100644
--- a/runtime/openjdkjvmti/ti_stack.cc
+++ b/runtime/openjdkjvmti/ti_stack.cc
@@ -67,14 +67,10 @@ struct GetStackTraceVisitor : public art::StackVisitor {
m = m->GetInterfaceMethodIfProxy(art::kRuntimePointerSize);
jmethodID id = art::jni::EncodeArtMethod(m);
- art::mirror::DexCache* dex_cache = m->GetDexCache();
- int32_t line_number = -1;
- if (dex_cache != nullptr) { // be tolerant of bad input
- const art::DexFile* dex_file = dex_cache->GetDexFile();
- line_number = art::annotations::GetLineNumFromPC(dex_file, m, GetDexPc(false));
- }
+ uint32_t dex_pc = GetDexPc(false);
+ jlong dex_location = (dex_pc == art::DexFile::kDexNoIndex) ? -1 : static_cast<jlong>(dex_pc);
- jvmtiFrameInfo info = { id, static_cast<jlong>(line_number) };
+ jvmtiFrameInfo info = { id, dex_location };
frames.push_back(info);
if (stop == 1) {
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 14628f0403..59c596170d 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -37,6 +37,8 @@
#include <vector>
#include <fcntl.h>
+#include "android-base/strings.h"
+
#include "JniConstants.h"
#include "ScopedLocalRef.h"
#include "arch/arm/quick_method_frame_info_arm.h"
@@ -869,7 +871,7 @@ static bool OpenDexFilesFromImage(const std::string& image_location,
ImageHeader::GetOatLocationFromImageLocation(image_locations[index].c_str());
// Note: in the multi-image case, the image location may end in ".jar," and not ".art." Handle
// that here.
- if (EndsWith(oat_location, ".jar")) {
+ if (android::base::EndsWith(oat_location, ".jar")) {
oat_location.replace(oat_location.length() - 3, 3, "oat");
}
std::string error_msg;
@@ -1225,7 +1227,7 @@ bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) {
for (const DexFile* dex_file : boot_class_path) {
dex_locations.push_back(dex_file->GetLocation());
}
- boot_class_path_string_ = Join(dex_locations, ':');
+ boot_class_path_string_ = android::base::Join(dex_locations, ':');
}
{
ScopedTrace trace2("AddImageStringsToTable");
@@ -1892,7 +1894,7 @@ void Runtime::RegisterAppInfo(const std::vector<std::string>& code_paths,
}
VLOG(profiler) << "Register app with " << profile_output_filename
- << " " << Join(code_paths, ':');
+ << " " << android::base::Join(code_paths, ':');
if (profile_output_filename.empty()) {
LOG(WARNING) << "JIT profile information will not be recorded: profile filename is empty.";
diff --git a/runtime/thread.cc b/runtime/thread.cc
index bc133d1370..d79bf36380 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -873,6 +873,62 @@ void Thread::SetThreadName(const char* name) {
Dbg::DdmSendThreadNotification(this, CHUNK_TYPE("THNM"));
}
+static void GetThreadStack(pthread_t thread,
+ void** stack_base,
+ size_t* stack_size,
+ size_t* guard_size) {
+#if defined(__APPLE__)
+ *stack_size = pthread_get_stacksize_np(thread);
+ void* stack_addr = pthread_get_stackaddr_np(thread);
+
+ // Check whether stack_addr is the base or end of the stack.
+ // (On Mac OS 10.7, it's the end.)
+ int stack_variable;
+ if (stack_addr > &stack_variable) {
+ *stack_base = reinterpret_cast<uint8_t*>(stack_addr) - *stack_size;
+ } else {
+ *stack_base = stack_addr;
+ }
+
+ // This is wrong, but there doesn't seem to be a way to get the actual value on the Mac.
+ pthread_attr_t attributes;
+ CHECK_PTHREAD_CALL(pthread_attr_init, (&attributes), __FUNCTION__);
+ CHECK_PTHREAD_CALL(pthread_attr_getguardsize, (&attributes, guard_size), __FUNCTION__);
+ CHECK_PTHREAD_CALL(pthread_attr_destroy, (&attributes), __FUNCTION__);
+#else
+ pthread_attr_t attributes;
+ CHECK_PTHREAD_CALL(pthread_getattr_np, (thread, &attributes), __FUNCTION__);
+ CHECK_PTHREAD_CALL(pthread_attr_getstack, (&attributes, stack_base, stack_size), __FUNCTION__);
+ CHECK_PTHREAD_CALL(pthread_attr_getguardsize, (&attributes, guard_size), __FUNCTION__);
+ CHECK_PTHREAD_CALL(pthread_attr_destroy, (&attributes), __FUNCTION__);
+
+#if defined(__GLIBC__)
+ // If we're the main thread, check whether we were run with an unlimited stack. In that case,
+ // glibc will have reported a 2GB stack for our 32-bit process, and our stack overflow detection
+ // will be broken because we'll die long before we get close to 2GB.
+ bool is_main_thread = (::art::GetTid() == getpid());
+ if (is_main_thread) {
+ rlimit stack_limit;
+ if (getrlimit(RLIMIT_STACK, &stack_limit) == -1) {
+ PLOG(FATAL) << "getrlimit(RLIMIT_STACK) failed";
+ }
+ if (stack_limit.rlim_cur == RLIM_INFINITY) {
+ size_t old_stack_size = *stack_size;
+
+ // Use the kernel default limit as our size, and adjust the base to match.
+ *stack_size = 8 * MB;
+ *stack_base = reinterpret_cast<uint8_t*>(*stack_base) + (old_stack_size - *stack_size);
+
+ VLOG(threads) << "Limiting unlimited stack (reported as " << PrettySize(old_stack_size) << ")"
+ << " to " << PrettySize(*stack_size)
+ << " with base " << *stack_base;
+ }
+ }
+#endif
+
+#endif
+}
+
bool Thread::InitStackHwm() {
void* read_stack_base;
size_t read_stack_size;
@@ -1322,6 +1378,32 @@ void Thread::FullSuspendCheck() {
VLOG(threads) << this << " self-reviving";
}
+static std::string GetSchedulerGroupName(pid_t tid) {
+ // /proc/<pid>/cgroup looks like this:
+ // 2:devices:/
+ // 1:cpuacct,cpu:/
+ // We want the third field from the line whose second field contains the "cpu" token.
+ std::string cgroup_file;
+ if (!ReadFileToString(StringPrintf("/proc/self/task/%d/cgroup", tid), &cgroup_file)) {
+ return "";
+ }
+ std::vector<std::string> cgroup_lines;
+ Split(cgroup_file, '\n', &cgroup_lines);
+ for (size_t i = 0; i < cgroup_lines.size(); ++i) {
+ std::vector<std::string> cgroup_fields;
+ Split(cgroup_lines[i], ':', &cgroup_fields);
+ std::vector<std::string> cgroups;
+ Split(cgroup_fields[1], ',', &cgroups);
+ for (size_t j = 0; j < cgroups.size(); ++j) {
+ if (cgroups[j] == "cpu") {
+ return cgroup_fields[2].substr(1); // Skip the leading slash.
+ }
+ }
+ }
+ return "";
+}
+
+
void Thread::DumpState(std::ostream& os, const Thread* thread, pid_t tid) {
std::string group_name;
int priority;
diff --git a/runtime/utils.cc b/runtime/utils.cc
index 66739a9d2e..4732f59ae1 100644
--- a/runtime/utils.cc
+++ b/runtime/utils.cc
@@ -25,6 +25,8 @@
#include <unistd.h>
#include <memory>
+#include "android-base/strings.h"
+
#include "base/stl_util.h"
#include "base/unix_file/fd_file.h"
#include "dex_file-inl.h"
@@ -139,59 +141,6 @@ std::string GetThreadName(pid_t tid) {
return result;
}
-void GetThreadStack(pthread_t thread, void** stack_base, size_t* stack_size, size_t* guard_size) {
-#if defined(__APPLE__)
- *stack_size = pthread_get_stacksize_np(thread);
- void* stack_addr = pthread_get_stackaddr_np(thread);
-
- // Check whether stack_addr is the base or end of the stack.
- // (On Mac OS 10.7, it's the end.)
- int stack_variable;
- if (stack_addr > &stack_variable) {
- *stack_base = reinterpret_cast<uint8_t*>(stack_addr) - *stack_size;
- } else {
- *stack_base = stack_addr;
- }
-
- // This is wrong, but there doesn't seem to be a way to get the actual value on the Mac.
- pthread_attr_t attributes;
- CHECK_PTHREAD_CALL(pthread_attr_init, (&attributes), __FUNCTION__);
- CHECK_PTHREAD_CALL(pthread_attr_getguardsize, (&attributes, guard_size), __FUNCTION__);
- CHECK_PTHREAD_CALL(pthread_attr_destroy, (&attributes), __FUNCTION__);
-#else
- pthread_attr_t attributes;
- CHECK_PTHREAD_CALL(pthread_getattr_np, (thread, &attributes), __FUNCTION__);
- CHECK_PTHREAD_CALL(pthread_attr_getstack, (&attributes, stack_base, stack_size), __FUNCTION__);
- CHECK_PTHREAD_CALL(pthread_attr_getguardsize, (&attributes, guard_size), __FUNCTION__);
- CHECK_PTHREAD_CALL(pthread_attr_destroy, (&attributes), __FUNCTION__);
-
-#if defined(__GLIBC__)
- // If we're the main thread, check whether we were run with an unlimited stack. In that case,
- // glibc will have reported a 2GB stack for our 32-bit process, and our stack overflow detection
- // will be broken because we'll die long before we get close to 2GB.
- bool is_main_thread = (::art::GetTid() == getpid());
- if (is_main_thread) {
- rlimit stack_limit;
- if (getrlimit(RLIMIT_STACK, &stack_limit) == -1) {
- PLOG(FATAL) << "getrlimit(RLIMIT_STACK) failed";
- }
- if (stack_limit.rlim_cur == RLIM_INFINITY) {
- size_t old_stack_size = *stack_size;
-
- // Use the kernel default limit as our size, and adjust the base to match.
- *stack_size = 8 * MB;
- *stack_base = reinterpret_cast<uint8_t*>(*stack_base) + (old_stack_size - *stack_size);
-
- VLOG(threads) << "Limiting unlimited stack (reported as " << PrettySize(old_stack_size) << ")"
- << " to " << PrettySize(*stack_size)
- << " with base " << *stack_base;
- }
- }
-#endif
-
-#endif
-}
-
bool ReadFileToString(const std::string& file_name, std::string* result) {
File file(file_name, O_RDONLY, false);
if (!file.IsOpened()) {
@@ -411,6 +360,10 @@ std::string PrettySize(int64_t byte_count) {
negative_str, byte_count / kBytesPerUnit[i], kUnitStrings[i]);
}
+static inline constexpr bool NeedsEscaping(uint16_t ch) {
+ return (ch < ' ' || ch > '~');
+}
+
std::string PrintableChar(uint16_t ch) {
std::string result;
result += '\'';
@@ -782,67 +735,6 @@ void Split(const std::string& s, char separator, std::vector<std::string>* resul
}
}
-std::string Trim(const std::string& s) {
- std::string result;
- unsigned int start_index = 0;
- unsigned int end_index = s.size() - 1;
-
- // Skip initial whitespace.
- while (start_index < s.size()) {
- if (!isspace(s[start_index])) {
- break;
- }
- start_index++;
- }
-
- // Skip terminating whitespace.
- while (end_index >= start_index) {
- if (!isspace(s[end_index])) {
- break;
- }
- end_index--;
- }
-
- // All spaces, no beef.
- if (end_index < start_index) {
- return "";
- }
- // Start_index is the first non-space, end_index is the last one.
- return s.substr(start_index, end_index - start_index + 1);
-}
-
-template <typename StringT>
-std::string Join(const std::vector<StringT>& strings, char separator) {
- if (strings.empty()) {
- return "";
- }
-
- std::string result(strings[0]);
- for (size_t i = 1; i < strings.size(); ++i) {
- result += separator;
- result += strings[i];
- }
- return result;
-}
-
-// Explicit instantiations.
-template std::string Join<std::string>(const std::vector<std::string>& strings, char separator);
-template std::string Join<const char*>(const std::vector<const char*>& strings, char separator);
-
-bool StartsWith(const std::string& s, const char* prefix) {
- return s.compare(0, strlen(prefix), prefix) == 0;
-}
-
-bool EndsWith(const std::string& s, const char* suffix) {
- size_t suffix_length = strlen(suffix);
- size_t string_length = s.size();
- if (suffix_length > string_length) {
- return false;
- }
- size_t offset = string_length - suffix_length;
- return s.compare(offset, suffix_length, suffix) == 0;
-}
-
void SetThreadName(const char* thread_name) {
int hasAt = 0;
int hasDot = 0;
@@ -892,31 +784,6 @@ void GetTaskStats(pid_t tid, char* state, int* utime, int* stime, int* task_cpu)
*task_cpu = strtoull(fields[36].c_str(), nullptr, 10);
}
-std::string GetSchedulerGroupName(pid_t tid) {
- // /proc/<pid>/cgroup looks like this:
- // 2:devices:/
- // 1:cpuacct,cpu:/
- // We want the third field from the line whose second field contains the "cpu" token.
- std::string cgroup_file;
- if (!ReadFileToString(StringPrintf("/proc/self/task/%d/cgroup", tid), &cgroup_file)) {
- return "";
- }
- std::vector<std::string> cgroup_lines;
- Split(cgroup_file, '\n', &cgroup_lines);
- for (size_t i = 0; i < cgroup_lines.size(); ++i) {
- std::vector<std::string> cgroup_fields;
- Split(cgroup_lines[i], ':', &cgroup_fields);
- std::vector<std::string> cgroups;
- Split(cgroup_fields[1], ',', &cgroups);
- for (size_t j = 0; j < cgroups.size(); ++j) {
- if (cgroups[j] == "cpu") {
- return cgroup_fields[2].substr(1); // Skip the leading slash.
- }
- }
- }
- return "";
-}
-
const char* GetAndroidRoot() {
const char* android_root = getenv("ANDROID_ROOT");
if (android_root == nullptr) {
@@ -1005,7 +872,9 @@ bool GetDalvikCacheFilename(const char* location, const char* cache_location,
return false;
}
std::string cache_file(&location[1]); // skip leading slash
- if (!EndsWith(location, ".dex") && !EndsWith(location, ".art") && !EndsWith(location, ".oat")) {
+ if (!android::base::EndsWith(location, ".dex") &&
+ !android::base::EndsWith(location, ".art") &&
+ !android::base::EndsWith(location, ".oat")) {
cache_file += "/";
cache_file += DexFile::kClassesDex;
}
@@ -1032,7 +901,7 @@ std::string GetSystemImageFilename(const char* location, const InstructionSet is
}
int ExecAndReturnCode(std::vector<std::string>& arg_vector, std::string* error_msg) {
- const std::string command_line(Join(arg_vector, ' '));
+ const std::string command_line(android::base::Join(arg_vector, ' '));
CHECK_GE(arg_vector.size(), 1U) << command_line;
// Convert the args to char pointers.
@@ -1091,7 +960,7 @@ int ExecAndReturnCode(std::vector<std::string>& arg_vector, std::string* error_m
bool Exec(std::vector<std::string>& arg_vector, std::string* error_msg) {
int status = ExecAndReturnCode(arg_vector, error_msg);
if (status != 0) {
- const std::string command_line(Join(arg_vector, ' '));
+ const std::string command_line(android::base::Join(arg_vector, ' '));
*error_msg = StringPrintf("Failed execv(%s) because non-0 exit status",
command_line.c_str());
return false;
diff --git a/runtime/utils.h b/runtime/utils.h
index 1e9805790a..04e0dded27 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -64,45 +64,12 @@ bool ParseInt(const char* in, T* out) {
return true;
}
-// Return whether x / divisor == x * (1.0f / divisor), for every float x.
-static constexpr bool CanDivideByReciprocalMultiplyFloat(int32_t divisor) {
- // True, if the most significant bits of divisor are 0.
- return ((divisor & 0x7fffff) == 0);
-}
-
-// Return whether x / divisor == x * (1.0 / divisor), for every double x.
-static constexpr bool CanDivideByReciprocalMultiplyDouble(int64_t divisor) {
- // True, if the most significant bits of divisor are 0.
- return ((divisor & ((UINT64_C(1) << 52) - 1)) == 0);
-}
-
static inline uint32_t PointerToLowMemUInt32(const void* p) {
uintptr_t intp = reinterpret_cast<uintptr_t>(p);
DCHECK_LE(intp, 0xFFFFFFFFU);
return intp & 0xFFFFFFFFU;
}
-static inline bool NeedsEscaping(uint16_t ch) {
- return (ch < ' ' || ch > '~');
-}
-
-template <typename T> T SafeAbs(T value) {
- // std::abs has undefined behavior on min limits.
- DCHECK_NE(value, std::numeric_limits<T>::min());
- return std::abs(value);
-}
-
-template <typename T> T AbsOrMin(T value) {
- return (value == std::numeric_limits<T>::min())
- ? value
- : std::abs(value);
-}
-
-template <typename T>
-inline typename std::make_unsigned<T>::type MakeUnsigned(T x) {
- return static_cast<typename std::make_unsigned<T>::type>(x);
-}
-
uint8_t* DecodeBase64(const char* src, size_t* dst_size);
std::string PrintableChar(uint16_t ch);
@@ -111,12 +78,6 @@ std::string PrintableChar(uint16_t ch);
// Java escapes are used for non-ASCII characters.
std::string PrintableString(const char* utf8);
-// Tests whether 's' starts with 'prefix'.
-bool StartsWith(const std::string& s, const char* prefix);
-
-// Tests whether 's' ends with 'suffix'.
-bool EndsWith(const std::string& s, const char* suffix);
-
// Used to implement PrettyClass, PrettyField, PrettyMethod, and PrettyTypeOf,
// one of which is probably more useful to you.
// Returns a human-readable equivalent of 'descriptor'. So "I" would be "int",
@@ -167,27 +128,15 @@ bool PrintFileToLog(const std::string& file_name, LogSeverity level);
// strings. Empty strings will be omitted.
void Split(const std::string& s, char separator, std::vector<std::string>* result);
-// Trims whitespace off both ends of the given string.
-std::string Trim(const std::string& s);
-
-// Joins a vector of strings into a single string, using the given separator.
-template <typename StringT> std::string Join(const std::vector<StringT>& strings, char separator);
-
// Returns the calling thread's tid. (The C libraries don't expose this.)
pid_t GetTid();
// Returns the given thread's name.
std::string GetThreadName(pid_t tid);
-// Returns details of the given thread's stack.
-void GetThreadStack(pthread_t thread, void** stack_base, size_t* stack_size, size_t* guard_size);
-
// Reads data from "/proc/self/task/${tid}/stat".
void GetTaskStats(pid_t tid, char* state, int* utime, int* stime, int* task_cpu);
-// Returns the name of the scheduler group for the given thread the current process, or the empty string.
-std::string GetSchedulerGroupName(pid_t tid);
-
// Sets the name of the current thread. The name may be truncated to an
// implementation-defined limit.
void SetThreadName(const char* thread_name);
@@ -251,15 +200,6 @@ class VoidFunctor {
}
};
-template <typename Vector>
-void Push32(Vector* buf, int32_t data) {
- static_assert(std::is_same<typename Vector::value_type, uint8_t>::value, "Invalid value type");
- buf->push_back(data & 0xff);
- buf->push_back((data >> 8) & 0xff);
- buf->push_back((data >> 16) & 0xff);
- buf->push_back((data >> 24) & 0xff);
-}
-
inline bool TestBitmap(size_t idx, const uint8_t* bitmap) {
return ((bitmap[idx / kBitsPerByte] >> (idx % kBitsPerByte)) & 0x01) != 0;
}
@@ -334,12 +274,6 @@ static T GetRandomNumber(T min, T max) {
return dist(rng);
}
-// All of the elements from one container to another.
-template <typename Dest, typename Src>
-static void AddAll(Dest& dest, const Src& src) {
- dest.insert(src.begin(), src.end());
-}
-
// Return the file size in bytes or -1 if the file does not exists.
int64_t GetFileSizeBytes(const std::string& filename);
diff --git a/runtime/utils_test.cc b/runtime/utils_test.cc
index be4d394464..82d92fc2fc 100644
--- a/runtime/utils_test.cc
+++ b/runtime/utils_test.cc
@@ -273,58 +273,6 @@ TEST_F(UtilsTest, Split) {
EXPECT_EQ(expected, actual);
}
-TEST_F(UtilsTest, Join) {
- std::vector<std::string> strings;
-
- strings.clear();
- EXPECT_EQ("", Join(strings, ':'));
-
- strings.clear();
- strings.push_back("foo");
- EXPECT_EQ("foo", Join(strings, ':'));
-
- strings.clear();
- strings.push_back("");
- strings.push_back("foo");
- EXPECT_EQ(":foo", Join(strings, ':'));
-
- strings.clear();
- strings.push_back("foo");
- strings.push_back("");
- EXPECT_EQ("foo:", Join(strings, ':'));
-
- strings.clear();
- strings.push_back("");
- strings.push_back("foo");
- strings.push_back("");
- EXPECT_EQ(":foo:", Join(strings, ':'));
-
- strings.clear();
- strings.push_back("foo");
- strings.push_back("bar");
- EXPECT_EQ("foo:bar", Join(strings, ':'));
-
- strings.clear();
- strings.push_back("foo");
- strings.push_back("bar");
- strings.push_back("baz");
- EXPECT_EQ("foo:bar:baz", Join(strings, ':'));
-}
-
-TEST_F(UtilsTest, StartsWith) {
- EXPECT_FALSE(StartsWith("foo", "bar"));
- EXPECT_TRUE(StartsWith("foo", "foo"));
- EXPECT_TRUE(StartsWith("food", "foo"));
- EXPECT_FALSE(StartsWith("fo", "foo"));
-}
-
-TEST_F(UtilsTest, EndsWith) {
- EXPECT_FALSE(EndsWith("foo", "bar"));
- EXPECT_TRUE(EndsWith("foo", "foo"));
- EXPECT_TRUE(EndsWith("foofoo", "foo"));
- EXPECT_FALSE(EndsWith("oo", "foo"));
-}
-
TEST_F(UtilsTest, GetDalvikCacheFilename) {
std::string name;
std::string error;
diff --git a/runtime/verifier/method_verifier_test.cc b/runtime/verifier/method_verifier_test.cc
index 52be2df06b..be5c18b9eb 100644
--- a/runtime/verifier/method_verifier_test.cc
+++ b/runtime/verifier/method_verifier_test.cc
@@ -19,6 +19,8 @@
#include <stdio.h>
#include <memory>
+#include "android-base/strings.h"
+
#include "class_linker-inl.h"
#include "common_runtime_test.h"
#include "dex_file.h"
@@ -42,7 +44,7 @@ class MethodVerifierTest : public CommonRuntimeTest {
MethodVerifier::FailureKind failure = MethodVerifier::VerifyClass(
self, klass, nullptr, true, HardFailLogMode::kLogWarning, &error_msg);
- if (StartsWith(descriptor, "Ljava/lang/invoke")) {
+ if (android::base::StartsWith(descriptor, "Ljava/lang/invoke")) {
ASSERT_TRUE(failure == MethodVerifier::kSoftFailure ||
failure == MethodVerifier::kNoFailure) << error_msg;
diff --git a/runtime/verifier/verifier_deps.cc b/runtime/verifier/verifier_deps.cc
index f9bff23054..3af7c013fd 100644
--- a/runtime/verifier/verifier_deps.cc
+++ b/runtime/verifier/verifier_deps.cc
@@ -345,8 +345,15 @@ void VerifierDeps::AddAssignability(const DexFile& dex_file,
// merely on no issues with linking (valid access flags, superclass and
// implemented interfaces). If the class at any point reached the IsResolved
// status, the requirement holds. This is guaranteed by RegTypeCache::ResolveClass.
- DCHECK(destination != nullptr && !destination->IsPrimitive());
- DCHECK(source != nullptr && !source->IsPrimitive());
+ DCHECK(destination != nullptr);
+ DCHECK(source != nullptr);
+
+ if (destination->IsPrimitive() || source->IsPrimitive()) {
+ // Primitive types are trivially non-assignable to anything else.
+ // We do not need to record trivial assignability, as it will
+ // not change across releases.
+ return;
+ }
if (destination == source ||
destination->IsObjectClass() ||
diff --git a/test/021-string2/expected.txt b/test/021-string2/expected.txt
index a9c6eb87bd..f269c7cf3e 100644
--- a/test/021-string2/expected.txt
+++ b/test/021-string2/expected.txt
@@ -1,2 +1,6 @@
Got expected npe
OK
+ true true true true
+ true true true true
+ true true true true
+ true true true true
diff --git a/test/021-string2/src/Main.java b/test/021-string2/src/Main.java
index 51351e1835..df0a3ddf48 100644
--- a/test/021-string2/src/Main.java
+++ b/test/021-string2/src/Main.java
@@ -92,6 +92,31 @@ public class Main {
testCompareToAndEquals();
testIndexOf();
+
+ String s0_0 = "\u0000";
+ String s0_1 = new String(s0_0);
+ String s0_2 = new String(new char[] { '\u0000' });
+ String s0_3 = s0_0 + "";
+ System.out.println(
+ " " + $noinline$equals(s0_0, s0_0) +
+ " " + $noinline$equals(s0_0, s0_1) +
+ " " + $noinline$equals(s0_0, s0_2) +
+ " " + $noinline$equals(s0_0, s0_3));
+ System.out.println(
+ " " + $noinline$equals(s0_1, s0_0) +
+ " " + $noinline$equals(s0_1, s0_1) +
+ " " + $noinline$equals(s0_1, s0_2) +
+ " " + $noinline$equals(s0_1, s0_3));
+ System.out.println(
+ " " + $noinline$equals(s0_2, s0_0) +
+ " " + $noinline$equals(s0_2, s0_1) +
+ " " + $noinline$equals(s0_2, s0_2) +
+ " " + $noinline$equals(s0_2, s0_3));
+ System.out.println(
+ " " + $noinline$equals(s0_3, s0_0) +
+ " " + $noinline$equals(s0_3, s0_1) +
+ " " + $noinline$equals(s0_3, s0_2) +
+ " " + $noinline$equals(s0_3, s0_3));
}
public static void testCompareToAndEquals() {
diff --git a/test/153-reference-stress/expected.txt b/test/153-reference-stress/expected.txt
new file mode 100644
index 0000000000..7ef22e9a43
--- /dev/null
+++ b/test/153-reference-stress/expected.txt
@@ -0,0 +1 @@
+PASS
diff --git a/test/153-reference-stress/info.txt b/test/153-reference-stress/info.txt
new file mode 100644
index 0000000000..6bc00404ab
--- /dev/null
+++ b/test/153-reference-stress/info.txt
@@ -0,0 +1 @@
+Tests java.lang.ref.Reference.get() and GC running in parallel.
diff --git a/test/153-reference-stress/src/Main.java b/test/153-reference-stress/src/Main.java
new file mode 100644
index 0000000000..fc6f9ccb35
--- /dev/null
+++ b/test/153-reference-stress/src/Main.java
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.ref.WeakReference;
+
+public class Main {
+ static final int numWeakReferences = 16 * 1024;
+ static WeakReference[] weakReferences = new WeakReference[numWeakReferences];
+ static volatile boolean done = false;
+ static Object keepAlive;
+
+ public static void main(String[] args) throws Exception {
+ // Try to call Reference.get repeatedly while the GC is running.
+ Thread gcThread = new GcThread();
+ Thread[] readerThread = new ReaderThread[4];
+ for (int i = 0; i < readerThread.length; ++i) {
+ readerThread[i] = new ReaderThread();
+ }
+ gcThread.start();
+ for (int i = 0; i < readerThread.length; ++i) {
+ readerThread[i].start();
+ }
+ gcThread.join();
+ for (int i = 0; i < readerThread.length; ++i) {
+ readerThread[i].join();
+ }
+ System.out.println("PASS");
+ }
+
+ static class GcThread extends Thread {
+ GcThread() {
+ Object temp = new Object();
+ for (int j = 0; j < weakReferences.length; ++j) {
+ weakReferences[j] = new WeakReference(temp);
+ }
+ }
+ public void run() {
+ for (int i = 0; i < 1000; ++i) {
+ Object o = new Object();
+ for (int j = 0; j < weakReferences.length; ++j) {
+ weakReferences[j] = new WeakReference(o);
+ }
+ }
+ done = true;
+ }
+ }
+
+ static class ReaderThread extends Thread {
+ public void run() {
+ while (!done) {
+ for (int j = 0; j < weakReferences.length; ++j) {
+ keepAlive = weakReferences[j].get();
+ }
+ for (int j = 0; j < weakReferences.length; ++j) {
+ weakReferences[j].clear();
+ }
+ }
+ }
+ }
+}
diff --git a/test/616-cha-regression-proxy-method/expected.txt b/test/616-cha-regression-proxy-method/expected.txt
new file mode 100644
index 0000000000..6a5618ebc6
--- /dev/null
+++ b/test/616-cha-regression-proxy-method/expected.txt
@@ -0,0 +1 @@
+JNI_OnLoad called
diff --git a/test/616-cha-regression-proxy-method/info.txt b/test/616-cha-regression-proxy-method/info.txt
new file mode 100644
index 0000000000..386a07f398
--- /dev/null
+++ b/test/616-cha-regression-proxy-method/info.txt
@@ -0,0 +1 @@
+Regression test for Class Hierarchy Analysis (CHA) on visiting proxy method frame.
diff --git a/test/616-cha-regression-proxy-method/src/Main.java b/test/616-cha-regression-proxy-method/src/Main.java
new file mode 100644
index 0000000000..19c92be006
--- /dev/null
+++ b/test/616-cha-regression-proxy-method/src/Main.java
@@ -0,0 +1,131 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.InvocationHandler;
+import java.lang.reflect.Method;
+import java.lang.reflect.Proxy;
+
+class Main1 {
+ void foo(int i) {
+ if (i != 1) {
+ printError("error1");
+ }
+ }
+
+ void printError(String msg) {
+ System.out.println(msg);
+ }
+}
+
+class Main2 extends Main1 {
+ void foo(int i) {
+ if (i != 2) {
+ printError("error2");
+ }
+ }
+}
+
+class Proxied implements Runnable {
+ public void run() {
+ synchronized(Main.class) {
+ Main.sOtherThreadStarted = true;
+ // Wait for Main2 to be linked and deoptimization is triggered.
+ try {
+ Main.class.wait();
+ } catch (Exception e) {
+ }
+ }
+ }
+}
+
+class MyInvocationHandler implements InvocationHandler {
+ private final Proxied proxied;
+
+ public MyInvocationHandler(Proxied proxied) {
+ this.proxied = proxied;
+ }
+
+ public Object invoke(Object proxy, Method method, Object[] args) throws Throwable {
+ return method.invoke(proxied, args);
+ }
+}
+
+public class Main {
+ static Main1 sMain1;
+ static Main1 sMain2;
+ static volatile boolean sOtherThreadStarted;
+
+ // sMain1.foo() will be always be Main1.foo() before Main2 is loaded/linked.
+ // So sMain1.foo() can be devirtualized to Main1.foo() and be inlined.
+ // After Dummy.createMain2() which links in Main2, live testOverride() on stack
+ // should be deoptimized.
+ static void testOverride() {
+ sMain1.foo(sMain1.getClass() == Main1.class ? 1 : 2);
+
+ // Wait for the other thread to start.
+ while (!sOtherThreadStarted);
+ // Create an Main2 instance and assign it to sMain2.
+ // sMain1 is kept the same.
+ sMain2 = Dummy.createMain2();
+ // Wake up the other thread.
+ synchronized(Main.class) {
+ Main.class.notify();
+ }
+
+ // There should be a deoptimization here right after Main2 is linked by
+ // calling Dummy.createMain2(), even though sMain1 didn't change.
+ // The behavior here would be different if inline-cache is used, which
+ // doesn't deoptimize since sMain1 still hits the type cache.
+ sMain1.foo(sMain1.getClass() == Main1.class ? 1 : 2);
+ if (sMain2 != null) {
+ sMain2.foo(sMain2.getClass() == Main1.class ? 1 : 2);
+ }
+ }
+
+ // Test scenarios under which CHA-based devirtualization happens,
+ // and class loading that overrides a method can invalidate compiled code.
+ // Also create a proxy method such that a proxy method's frame is visited
+ // during stack walking.
+ public static void main(String[] args) {
+ System.loadLibrary(args[0]);
+ // sMain1 is an instance of Main1. Main2 hasn't bee loaded yet.
+ sMain1 = new Main1();
+
+ // Create another thread that calls a proxy method.
+ new Thread() {
+ public void run() {
+ Runnable proxy = (Runnable)Proxy.newProxyInstance(
+ Proxied.class.getClassLoader(),
+ new Class[] { Runnable.class },
+ new MyInvocationHandler(new Proxied()));
+ proxy.run();
+ }
+ }.start();
+
+ ensureJitCompiled(Main.class, "testOverride");
+ // This will create Main2 instance in the middle of testOverride().
+ testOverride();
+ }
+
+ private static native void ensureJitCompiled(Class<?> itf, String method_name);
+}
+
+// Put createMain2() in another class to avoid class loading due to verifier.
+class Dummy {
+ static Main1 createMain2() {
+ return new Main2();
+ }
+}
diff --git a/test/616-cha/src/Main.java b/test/616-cha/src/Main.java
index 787318dd67..b6179449df 100644
--- a/test/616-cha/src/Main.java
+++ b/test/616-cha/src/Main.java
@@ -179,7 +179,7 @@ public class Main {
}
}
- // Test scanerios under which CHA-based devirtualization happens,
+ // Test scenarios under which CHA-based devirtualization happens,
// and class loading that overrides a method can invalidate compiled code.
// Also test pure non-overriding case, which is more for checking generated
// code form.
@@ -206,11 +206,6 @@ public class Main {
// sMain1 is an instance of Main1. Main2 hasn't bee loaded yet.
sMain1 = new Main1();
- // Loop enough to get testOverride() JITed.
- for (int i=0; i<100; i++) {
- testOverride(false, false, false);
- }
-
ensureJitCompiled(Main.class, "testOverride");
testOverride(false, false, true);
@@ -244,7 +239,7 @@ public class Main {
private static native boolean hasSingleImplementation(Class<?> clazz, String method_name);
}
-// Do it in another class to avoid class loading due to verifier.
+// Put createMain2() in another class to avoid class loading due to verifier.
class Dummy {
static Main1 createMain2() {
return new Main2();
diff --git a/test/630-safecast-array/expected.txt b/test/630-safecast-array/expected.txt
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/630-safecast-array/expected.txt
diff --git a/test/630-safecast-array/info.txt b/test/630-safecast-array/info.txt
new file mode 100644
index 0000000000..e10516784e
--- /dev/null
+++ b/test/630-safecast-array/info.txt
@@ -0,0 +1,3 @@
+Regression test for vdex, which used to crash in AddAssignability
+called by the dex2dex compiler, not anticipating arrays of primitive
+type.
diff --git a/test/630-safecast-array/smali/Main.smali b/test/630-safecast-array/smali/Main.smali
new file mode 100644
index 0000000000..a50f37ccbc
--- /dev/null
+++ b/test/630-safecast-array/smali/Main.smali
@@ -0,0 +1,33 @@
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class LMain;
+.super Ljava/lang/Object;
+
+.method public static main([Ljava/lang/String;)V
+.registers 1
+ return-void
+.end method
+
+.method public static testPrimitiveDestination([Ljava/lang/String;)V
+.registers 1
+ check-cast p0, [B
+ return-void
+.end method
+
+.method public static testPrimitiveSource([B)V
+.registers 1
+ check-cast p0, [Ljava/lang/String;
+ return-void
+.end method
diff --git a/test/911-get-stack-trace/expected.txt b/test/911-get-stack-trace/expected.txt
index 20bab7814e..f8c97ce475 100644
--- a/test/911-get-stack-trace/expected.txt
+++ b/test/911-get-stack-trace/expected.txt
@@ -3,206 +3,206 @@
###################
From top
---------
- getStackTrace (Ljava/lang/Thread;II)[Ljava/lang/String;
- print (Ljava/lang/Thread;II)V
- printOrWait (IILMain$ControlData;)V
- baz (IIILMain$ControlData;)Ljava/lang/Object;
- bar (IIILMain$ControlData;)J
- foo (IIILMain$ControlData;)I
- baz (IIILMain$ControlData;)Ljava/lang/Object;
- bar (IIILMain$ControlData;)J
- foo (IIILMain$ControlData;)I
- baz (IIILMain$ControlData;)Ljava/lang/Object;
- bar (IIILMain$ControlData;)J
- foo (IIILMain$ControlData;)I
- baz (IIILMain$ControlData;)Ljava/lang/Object;
- bar (IIILMain$ControlData;)J
- foo (IIILMain$ControlData;)I
- baz (IIILMain$ControlData;)Ljava/lang/Object;
- bar (IIILMain$ControlData;)J
- foo (IIILMain$ControlData;)I
- doTest ()V
- main ([Ljava/lang/String;)V
----------
- print (Ljava/lang/Thread;II)V
- printOrWait (IILMain$ControlData;)V
- baz (IIILMain$ControlData;)Ljava/lang/Object;
- bar (IIILMain$ControlData;)J
- foo (IIILMain$ControlData;)I
- baz (IIILMain$ControlData;)Ljava/lang/Object;
- bar (IIILMain$ControlData;)J
- foo (IIILMain$ControlData;)I
- baz (IIILMain$ControlData;)Ljava/lang/Object;
- bar (IIILMain$ControlData;)J
- foo (IIILMain$ControlData;)I
- baz (IIILMain$ControlData;)Ljava/lang/Object;
- bar (IIILMain$ControlData;)J
- foo (IIILMain$ControlData;)I
- baz (IIILMain$ControlData;)Ljava/lang/Object;
- bar (IIILMain$ControlData;)J
- foo (IIILMain$ControlData;)I
- doTest ()V
- main ([Ljava/lang/String;)V
----------
- getStackTrace (Ljava/lang/Thread;II)[Ljava/lang/String;
- print (Ljava/lang/Thread;II)V
- printOrWait (IILMain$ControlData;)V
- baz (IIILMain$ControlData;)Ljava/lang/Object;
- bar (IIILMain$ControlData;)J
----------
- printOrWait (IILMain$ControlData;)V
- baz (IIILMain$ControlData;)Ljava/lang/Object;
- bar (IIILMain$ControlData;)J
- foo (IIILMain$ControlData;)I
- baz (IIILMain$ControlData;)Ljava/lang/Object;
+ getStackTrace (Ljava/lang/Thread;II)[[Ljava/lang/String; -1 -2
+ print (Ljava/lang/Thread;II)V 0 124
+ printOrWait (IILMain$ControlData;)V 6 151
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 2 142
+ bar (IIILMain$ControlData;)J 0 136
+ foo (IIILMain$ControlData;)I 0 131
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
+ bar (IIILMain$ControlData;)J 0 136
+ foo (IIILMain$ControlData;)I 0 131
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
+ bar (IIILMain$ControlData;)J 0 136
+ foo (IIILMain$ControlData;)I 0 131
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
+ bar (IIILMain$ControlData;)J 0 136
+ foo (IIILMain$ControlData;)I 0 131
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
+ bar (IIILMain$ControlData;)J 0 136
+ foo (IIILMain$ControlData;)I 0 131
+ doTest ()V 38 34
+ main ([Ljava/lang/String;)V 6 24
+---------
+ print (Ljava/lang/Thread;II)V 0 124
+ printOrWait (IILMain$ControlData;)V 6 151
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 2 142
+ bar (IIILMain$ControlData;)J 0 136
+ foo (IIILMain$ControlData;)I 0 131
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
+ bar (IIILMain$ControlData;)J 0 136
+ foo (IIILMain$ControlData;)I 0 131
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
+ bar (IIILMain$ControlData;)J 0 136
+ foo (IIILMain$ControlData;)I 0 131
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
+ bar (IIILMain$ControlData;)J 0 136
+ foo (IIILMain$ControlData;)I 0 131
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
+ bar (IIILMain$ControlData;)J 0 136
+ foo (IIILMain$ControlData;)I 0 131
+ doTest ()V 42 35
+ main ([Ljava/lang/String;)V 6 24
+---------
+ getStackTrace (Ljava/lang/Thread;II)[[Ljava/lang/String; -1 -2
+ print (Ljava/lang/Thread;II)V 0 124
+ printOrWait (IILMain$ControlData;)V 6 151
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 2 142
+ bar (IIILMain$ControlData;)J 0 136
+---------
+ printOrWait (IILMain$ControlData;)V 6 151
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 2 142
+ bar (IIILMain$ControlData;)J 0 136
+ foo (IIILMain$ControlData;)I 0 131
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
From bottom
---------
- main ([Ljava/lang/String;)V
+ main ([Ljava/lang/String;)V 6 24
---------
- baz (IIILMain$ControlData;)Ljava/lang/Object;
- bar (IIILMain$ControlData;)J
- foo (IIILMain$ControlData;)I
- doTest ()V
- main ([Ljava/lang/String;)V
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
+ bar (IIILMain$ControlData;)J 0 136
+ foo (IIILMain$ControlData;)I 0 131
+ doTest ()V 65 41
+ main ([Ljava/lang/String;)V 6 24
---------
- bar (IIILMain$ControlData;)J
- foo (IIILMain$ControlData;)I
- baz (IIILMain$ControlData;)Ljava/lang/Object;
- bar (IIILMain$ControlData;)J
- foo (IIILMain$ControlData;)I
+ bar (IIILMain$ControlData;)J 0 136
+ foo (IIILMain$ControlData;)I 0 131
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
+ bar (IIILMain$ControlData;)J 0 136
+ foo (IIILMain$ControlData;)I 0 131
################################
### Other thread (suspended) ###
################################
From top
---------
- wait ()V
- printOrWait (IILMain$ControlData;)V
- baz (IIILMain$ControlData;)Ljava/lang/Object;
- bar (IIILMain$ControlData;)J
- foo (IIILMain$ControlData;)I
- baz (IIILMain$ControlData;)Ljava/lang/Object;
- bar (IIILMain$ControlData;)J
- foo (IIILMain$ControlData;)I
- baz (IIILMain$ControlData;)Ljava/lang/Object;
- bar (IIILMain$ControlData;)J
- foo (IIILMain$ControlData;)I
- baz (IIILMain$ControlData;)Ljava/lang/Object;
- bar (IIILMain$ControlData;)J
- foo (IIILMain$ControlData;)I
- baz (IIILMain$ControlData;)Ljava/lang/Object;
- bar (IIILMain$ControlData;)J
- foo (IIILMain$ControlData;)I
- run ()V
----------
- printOrWait (IILMain$ControlData;)V
- baz (IIILMain$ControlData;)Ljava/lang/Object;
- bar (IIILMain$ControlData;)J
- foo (IIILMain$ControlData;)I
- baz (IIILMain$ControlData;)Ljava/lang/Object;
- bar (IIILMain$ControlData;)J
- foo (IIILMain$ControlData;)I
- baz (IIILMain$ControlData;)Ljava/lang/Object;
- bar (IIILMain$ControlData;)J
- foo (IIILMain$ControlData;)I
- baz (IIILMain$ControlData;)Ljava/lang/Object;
- bar (IIILMain$ControlData;)J
- foo (IIILMain$ControlData;)I
- baz (IIILMain$ControlData;)Ljava/lang/Object;
- bar (IIILMain$ControlData;)J
- foo (IIILMain$ControlData;)I
- run ()V
----------
- wait ()V
- printOrWait (IILMain$ControlData;)V
- baz (IIILMain$ControlData;)Ljava/lang/Object;
- bar (IIILMain$ControlData;)J
- foo (IIILMain$ControlData;)I
----------
- baz (IIILMain$ControlData;)Ljava/lang/Object;
- bar (IIILMain$ControlData;)J
- foo (IIILMain$ControlData;)I
- baz (IIILMain$ControlData;)Ljava/lang/Object;
- bar (IIILMain$ControlData;)J
+ wait ()V -1 -2
+ printOrWait (IILMain$ControlData;)V 24 157
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 2 142
+ bar (IIILMain$ControlData;)J 0 136
+ foo (IIILMain$ControlData;)I 0 131
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
+ bar (IIILMain$ControlData;)J 0 136
+ foo (IIILMain$ControlData;)I 0 131
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
+ bar (IIILMain$ControlData;)J 0 136
+ foo (IIILMain$ControlData;)I 0 131
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
+ bar (IIILMain$ControlData;)J 0 136
+ foo (IIILMain$ControlData;)I 0 131
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
+ bar (IIILMain$ControlData;)J 0 136
+ foo (IIILMain$ControlData;)I 0 131
+ run ()V 4 54
+---------
+ printOrWait (IILMain$ControlData;)V 24 157
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 2 142
+ bar (IIILMain$ControlData;)J 0 136
+ foo (IIILMain$ControlData;)I 0 131
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
+ bar (IIILMain$ControlData;)J 0 136
+ foo (IIILMain$ControlData;)I 0 131
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
+ bar (IIILMain$ControlData;)J 0 136
+ foo (IIILMain$ControlData;)I 0 131
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
+ bar (IIILMain$ControlData;)J 0 136
+ foo (IIILMain$ControlData;)I 0 131
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
+ bar (IIILMain$ControlData;)J 0 136
+ foo (IIILMain$ControlData;)I 0 131
+ run ()V 4 54
+---------
+ wait ()V -1 -2
+ printOrWait (IILMain$ControlData;)V 24 157
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 2 142
+ bar (IIILMain$ControlData;)J 0 136
+ foo (IIILMain$ControlData;)I 0 131
+---------
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 2 142
+ bar (IIILMain$ControlData;)J 0 136
+ foo (IIILMain$ControlData;)I 0 131
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
+ bar (IIILMain$ControlData;)J 0 136
From bottom
---------
- run ()V
+ run ()V 4 54
---------
- foo (IIILMain$ControlData;)I
- baz (IIILMain$ControlData;)Ljava/lang/Object;
- bar (IIILMain$ControlData;)J
- foo (IIILMain$ControlData;)I
- run ()V
+ foo (IIILMain$ControlData;)I 0 131
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
+ bar (IIILMain$ControlData;)J 0 136
+ foo (IIILMain$ControlData;)I 0 131
+ run ()V 4 54
---------
- baz (IIILMain$ControlData;)Ljava/lang/Object;
- bar (IIILMain$ControlData;)J
- foo (IIILMain$ControlData;)I
- baz (IIILMain$ControlData;)Ljava/lang/Object;
- bar (IIILMain$ControlData;)J
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
+ bar (IIILMain$ControlData;)J 0 136
+ foo (IIILMain$ControlData;)I 0 131
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
+ bar (IIILMain$ControlData;)J 0 136
###########################
### Other thread (live) ###
###########################
From top
---------
- printOrWait (IILMain$ControlData;)V
- baz (IIILMain$ControlData;)Ljava/lang/Object;
- bar (IIILMain$ControlData;)J
- foo (IIILMain$ControlData;)I
- baz (IIILMain$ControlData;)Ljava/lang/Object;
- bar (IIILMain$ControlData;)J
- foo (IIILMain$ControlData;)I
- baz (IIILMain$ControlData;)Ljava/lang/Object;
- bar (IIILMain$ControlData;)J
- foo (IIILMain$ControlData;)I
- baz (IIILMain$ControlData;)Ljava/lang/Object;
- bar (IIILMain$ControlData;)J
- foo (IIILMain$ControlData;)I
- baz (IIILMain$ControlData;)Ljava/lang/Object;
- bar (IIILMain$ControlData;)J
- foo (IIILMain$ControlData;)I
- run ()V
----------
- baz (IIILMain$ControlData;)Ljava/lang/Object;
- bar (IIILMain$ControlData;)J
- foo (IIILMain$ControlData;)I
- baz (IIILMain$ControlData;)Ljava/lang/Object;
- bar (IIILMain$ControlData;)J
- foo (IIILMain$ControlData;)I
- baz (IIILMain$ControlData;)Ljava/lang/Object;
- bar (IIILMain$ControlData;)J
- foo (IIILMain$ControlData;)I
- baz (IIILMain$ControlData;)Ljava/lang/Object;
- bar (IIILMain$ControlData;)J
- foo (IIILMain$ControlData;)I
- baz (IIILMain$ControlData;)Ljava/lang/Object;
- bar (IIILMain$ControlData;)J
- foo (IIILMain$ControlData;)I
- run ()V
----------
- printOrWait (IILMain$ControlData;)V
- baz (IIILMain$ControlData;)Ljava/lang/Object;
- bar (IIILMain$ControlData;)J
- foo (IIILMain$ControlData;)I
- baz (IIILMain$ControlData;)Ljava/lang/Object;
----------
- bar (IIILMain$ControlData;)J
- foo (IIILMain$ControlData;)I
- baz (IIILMain$ControlData;)Ljava/lang/Object;
- bar (IIILMain$ControlData;)J
- foo (IIILMain$ControlData;)I
+ printOrWait (IILMain$ControlData;)V 44 164
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 2 142
+ bar (IIILMain$ControlData;)J 0 136
+ foo (IIILMain$ControlData;)I 0 131
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
+ bar (IIILMain$ControlData;)J 0 136
+ foo (IIILMain$ControlData;)I 0 131
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
+ bar (IIILMain$ControlData;)J 0 136
+ foo (IIILMain$ControlData;)I 0 131
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
+ bar (IIILMain$ControlData;)J 0 136
+ foo (IIILMain$ControlData;)I 0 131
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
+ bar (IIILMain$ControlData;)J 0 136
+ foo (IIILMain$ControlData;)I 0 131
+ run ()V 4 88
+---------
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 2 142
+ bar (IIILMain$ControlData;)J 0 136
+ foo (IIILMain$ControlData;)I 0 131
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
+ bar (IIILMain$ControlData;)J 0 136
+ foo (IIILMain$ControlData;)I 0 131
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
+ bar (IIILMain$ControlData;)J 0 136
+ foo (IIILMain$ControlData;)I 0 131
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
+ bar (IIILMain$ControlData;)J 0 136
+ foo (IIILMain$ControlData;)I 0 131
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
+ bar (IIILMain$ControlData;)J 0 136
+ foo (IIILMain$ControlData;)I 0 131
+ run ()V 4 88
+---------
+ printOrWait (IILMain$ControlData;)V 44 164
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 2 142
+ bar (IIILMain$ControlData;)J 0 136
+ foo (IIILMain$ControlData;)I 0 131
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
+---------
+ bar (IIILMain$ControlData;)J 0 136
+ foo (IIILMain$ControlData;)I 0 131
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
+ bar (IIILMain$ControlData;)J 0 136
+ foo (IIILMain$ControlData;)I 0 131
From bottom
---------
- run ()V
+ run ()V 4 88
---------
- foo (IIILMain$ControlData;)I
- baz (IIILMain$ControlData;)Ljava/lang/Object;
- bar (IIILMain$ControlData;)J
- foo (IIILMain$ControlData;)I
- run ()V
+ foo (IIILMain$ControlData;)I 0 131
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
+ bar (IIILMain$ControlData;)J 0 136
+ foo (IIILMain$ControlData;)I 0 131
+ run ()V 4 88
---------
- baz (IIILMain$ControlData;)Ljava/lang/Object;
- bar (IIILMain$ControlData;)J
- foo (IIILMain$ControlData;)I
- baz (IIILMain$ControlData;)Ljava/lang/Object;
- bar (IIILMain$ControlData;)J
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
+ bar (IIILMain$ControlData;)J 0 136
+ foo (IIILMain$ControlData;)I 0 131
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
+ bar (IIILMain$ControlData;)J 0 136
diff --git a/test/911-get-stack-trace/src/Main.java b/test/911-get-stack-trace/src/Main.java
index df4501d338..722bee8056 100644
--- a/test/911-get-stack-trace/src/Main.java
+++ b/test/911-get-stack-trace/src/Main.java
@@ -109,13 +109,14 @@ public class Main {
t.join();
}
- public static void print(String[] stack) {
+ public static void print(String[][] stack) {
System.out.println("---------");
- for (int i = 0; i < stack.length; i += 2) {
- System.out.print(' ');
- System.out.print(stack[i]);
- System.out.print(' ');
- System.out.println(stack[i + 1]);
+ for (String[] stackElement : stack) {
+ for (String part : stackElement) {
+ System.out.print(' ');
+ System.out.print(part);
+ }
+ System.out.println();
}
}
@@ -174,5 +175,5 @@ public class Main {
volatile boolean stop = false;
}
- public static native String[] getStackTrace(Thread thread, int start, int max);
+ public static native String[][] getStackTrace(Thread thread, int start, int max);
}
diff --git a/test/911-get-stack-trace/stack_trace.cc b/test/911-get-stack-trace/stack_trace.cc
index e7d9380469..9092f2f7d3 100644
--- a/test/911-get-stack-trace/stack_trace.cc
+++ b/test/911-get-stack-trace/stack_trace.cc
@@ -16,10 +16,13 @@
#include "stack_trace.h"
+#include <inttypes.h>
#include <memory>
#include <stdio.h>
#include "base/logging.h"
+#include "base/macros.h"
+#include "base/stringprintf.h"
#include "jni.h"
#include "openjdkjvmti/jvmti.h"
#include "ScopedLocalRef.h"
@@ -29,6 +32,23 @@
namespace art {
namespace Test911GetStackTrace {
+static jint FindLineNumber(jint line_number_count,
+ jvmtiLineNumberEntry* line_number_table,
+ jlocation location) {
+ if (line_number_table == nullptr) {
+ return -2;
+ }
+
+ jint line_number = -1;
+ for (jint i = 0; i != line_number_count; ++i) {
+ if (line_number_table[i].start_location > location) {
+ return line_number;
+ }
+ line_number = line_number_table[i].line_number;
+ }
+ return line_number;
+}
+
extern "C" JNIEXPORT jobjectArray JNICALL Java_Main_getStackTrace(
JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jthread thread, jint start, jint max) {
std::unique_ptr<jvmtiFrameInfo[]> frames(new jvmtiFrameInfo[max]);
@@ -44,8 +64,7 @@ extern "C" JNIEXPORT jobjectArray JNICALL Java_Main_getStackTrace(
}
}
- auto callback = [&](jint i) -> jstring {
- size_t method_index = static_cast<size_t>(i) / 2;
+ auto callback = [&](jint method_index) -> jobjectArray {
char* name;
char* sig;
char* gen;
@@ -58,13 +77,47 @@ extern "C" JNIEXPORT jobjectArray JNICALL Java_Main_getStackTrace(
return nullptr;
}
}
- jstring callback_result;
- if (i % 2 == 0) {
- callback_result = name == nullptr ? nullptr : env->NewStringUTF(name);
- } else {
- callback_result = sig == nullptr ? nullptr : env->NewStringUTF(sig);
+
+ jint line_number_count;
+ jvmtiLineNumberEntry* line_number_table;
+ {
+ jvmtiError line_result = jvmti_env->GetLineNumberTable(frames[method_index].method,
+ &line_number_count,
+ &line_number_table);
+ if (line_result != JVMTI_ERROR_NONE) {
+ // Accept absent info and native method errors.
+ if (line_result != JVMTI_ERROR_ABSENT_INFORMATION &&
+ line_result != JVMTI_ERROR_NATIVE_METHOD) {
+ char* err;
+ jvmti_env->GetErrorName(line_result, &err);
+ printf("Failure running GetLineNumberTable: %s\n", err);
+ return nullptr;
+ }
+ line_number_table = nullptr;
+ line_number_count = 0;
+ }
}
+ auto inner_callback = [&](jint component_index) -> jstring {
+ switch (component_index) {
+ case 0:
+ return (name == nullptr) ? nullptr : env->NewStringUTF(name);
+ case 1:
+ return (sig == nullptr) ? nullptr : env->NewStringUTF(sig);
+ case 2:
+ return env->NewStringUTF(StringPrintf("%" PRId64, frames[method_index].location).c_str());
+ case 3: {
+ jint line_number = FindLineNumber(line_number_count,
+ line_number_table,
+ frames[method_index].location);
+ return env->NewStringUTF(StringPrintf("%d", line_number).c_str());
+ }
+ }
+ LOG(FATAL) << "Unreachable";
+ UNREACHABLE();
+ };
+ jobjectArray inner_array = CreateObjectArray(env, 4, "java/lang/String", inner_callback);
+
if (name != nullptr) {
jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(name));
}
@@ -74,9 +127,13 @@ extern "C" JNIEXPORT jobjectArray JNICALL Java_Main_getStackTrace(
if (gen != nullptr) {
jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(gen));
}
- return callback_result;
+ if (line_number_table != nullptr) {
+ jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(line_number_table));
+ }
+
+ return inner_array;
};
- return CreateObjectArray(env, 2 * count, "java/lang/String", callback);
+ return CreateObjectArray(env, count, "[Ljava/lang/String;", callback);
}
// Don't do anything
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 2f1ca6d295..b515130a49 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -514,9 +514,11 @@ TEST_ART_BROKEN_INTERPRETER_RUN_TESTS :=
# Test 906 iterates the heap filtering with different options. No instances should be created
# between those runs to be able to have precise checks.
# Test 902 hits races with the JIT compiler. b/32821077
+# Test 626-const-class-linking can deadlock with JIT. b/33567581
# Test 629 requires compilation.
TEST_ART_BROKEN_JIT_RUN_TESTS := \
137-cfi \
+ 626-const-class-linking \
629-vdex-speed \
902-hello-transformation \
904-object-allocation \