summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--benchmark/string-indexof/info.txt1
-rw-r--r--benchmark/string-indexof/src/StringIndexOfBenchmark.java122
-rw-r--r--cmdline/cmdline.h4
-rw-r--r--cmdline/cmdline_parser.h2
-rw-r--r--cmdline/cmdline_parser_test.cc6
-rw-r--r--cmdline/cmdline_types.h2
-rw-r--r--cmdline/detail/cmdline_parse_argument_detail.h4
-rw-r--r--compiler/jni/jni_compiler_test.cc9
-rw-r--r--compiler/optimizing/bytecode_utils.h3
-rw-r--r--compiler/optimizing/code_generator_arm.cc107
-rw-r--r--compiler/optimizing/code_generator_arm64.cc92
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.cc32
-rw-r--r--compiler/optimizing/code_generator_mips.cc21
-rw-r--r--compiler/optimizing/code_generator_mips64.cc21
-rw-r--r--compiler/optimizing/code_generator_x86.cc87
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc99
-rw-r--r--compiler/optimizing/codegen_test.cc4
-rw-r--r--compiler/optimizing/constant_folding.h3
-rw-r--r--compiler/optimizing/constant_folding_test.cc8
-rw-r--r--compiler/optimizing/dead_code_elimination.cc20
-rw-r--r--compiler/optimizing/dead_code_elimination.h4
-rw-r--r--compiler/optimizing/dead_code_elimination_test.cc2
-rw-r--r--compiler/optimizing/inliner.cc4
-rw-r--r--compiler/optimizing/load_store_elimination.cc68
-rw-r--r--compiler/optimizing/optimizing_compiler.cc2
-rw-r--r--dex2oat/dex2oat_test.cc4
-rw-r--r--dexlayout/dexlayout.cc2
-rw-r--r--dexlayout/dexlayout_test.cc10
-rw-r--r--imgdiag/imgdiag.cc2
-rw-r--r--patchoat/patchoat.cc2
-rw-r--r--runtime/arch/arm/entrypoints_init_arm.cc5
-rw-r--r--runtime/arch/arm/quick_entrypoints_arm.S15
-rw-r--r--runtime/arch/arm64/entrypoints_init_arm64.cc5
-rw-r--r--runtime/arch/arm64/quick_entrypoints_arm64.S15
-rw-r--r--runtime/arch/mips/entrypoints_init_mips.cc7
-rw-r--r--runtime/arch/mips/quick_entrypoints_mips.S15
-rw-r--r--runtime/arch/mips64/entrypoints_init_mips64.cc6
-rw-r--r--runtime/arch/mips64/quick_entrypoints_mips64.S15
-rw-r--r--runtime/arch/stub_test.cc89
-rw-r--r--runtime/arch/x86/entrypoints_init_x86.cc5
-rw-r--r--runtime/arch/x86/quick_entrypoints_x86.S12
-rw-r--r--runtime/arch/x86_64/entrypoints_init_x86_64.cc5
-rw-r--r--runtime/arch/x86_64/quick_entrypoints_x86_64.S8
-rw-r--r--runtime/base/variant_map_test.cc4
-rw-r--r--runtime/check_jni.cc43
-rw-r--r--runtime/class_linker.cc37
-rw-r--r--runtime/common_runtime_test.cc12
-rw-r--r--runtime/debugger.cc36
-rw-r--r--runtime/dex_file_annotations.cc5
-rw-r--r--runtime/dex_file_verifier_test.cc2
-rw-r--r--runtime/entrypoints/quick/quick_cast_entrypoints.cc8
-rw-r--r--runtime/entrypoints/quick/quick_default_externs.h2
-rw-r--r--runtime/entrypoints/quick/quick_entrypoints_list.h4
-rw-r--r--runtime/entrypoints/quick/quick_throw_entrypoints.cc8
-rw-r--r--runtime/entrypoints_order_test.cc5
-rw-r--r--runtime/jni_internal.cc38
-rw-r--r--runtime/jni_internal.h28
-rw-r--r--runtime/native/dalvik_system_VMDebug.cc6
-rw-r--r--runtime/native/java_lang_reflect_Executable.cc2
-rw-r--r--runtime/native_bridge_art_interface.cc7
-rw-r--r--runtime/native_bridge_art_interface.h4
-rw-r--r--runtime/native_stack_dump.cc2
-rw-r--r--runtime/oat_file_assistant_test.cc2
-rw-r--r--runtime/oat_file_manager.cc28
-rw-r--r--runtime/openjdkjvmti/ti_method.cc11
-rw-r--r--runtime/openjdkjvmti/ti_stack.cc3
-rw-r--r--runtime/openjdkjvmti/transform.cc2
-rw-r--r--runtime/openjdkjvmti/transform.h2
-rw-r--r--runtime/plugin.h4
-rw-r--r--runtime/reflection.cc15
-rw-r--r--runtime/reflection_test.cc95
-rw-r--r--runtime/runtime.cc5
-rw-r--r--runtime/scoped_thread_state_change-inl.h24
-rw-r--r--runtime/scoped_thread_state_change.h8
-rw-r--r--runtime/thread.cc45
-rw-r--r--runtime/thread.h3
-rw-r--r--runtime/utf_test.cc4
-rw-r--r--runtime/well_known_classes.cc11
-rw-r--r--test/530-checker-lse/expected.txt1
-rw-r--r--test/530-checker-lse/src/Main.java41
-rw-r--r--test/530-checker-lse2/expected.txt8
-rw-r--r--test/530-checker-lse2/info.txt2
-rw-r--r--test/530-checker-lse2/src/Main.java208
-rw-r--r--test/622-simplifyifs-exception-edges/expected.txt0
-rw-r--r--test/622-simplifyifs-exception-edges/info.txt2
-rw-r--r--test/622-simplifyifs-exception-edges/smali/Test.smali76
-rw-r--r--test/622-simplifyifs-exception-edges/src/Main.java43
-rw-r--r--tools/cpp-define-generator/main.cc4
88 files changed, 1144 insertions, 705 deletions
diff --git a/benchmark/string-indexof/info.txt b/benchmark/string-indexof/info.txt
new file mode 100644
index 0000000000..cc042172fb
--- /dev/null
+++ b/benchmark/string-indexof/info.txt
@@ -0,0 +1 @@
+Benchmarks for repeating String.indexOf() instructions in a loop.
diff --git a/benchmark/string-indexof/src/StringIndexOfBenchmark.java b/benchmark/string-indexof/src/StringIndexOfBenchmark.java
new file mode 100644
index 0000000000..481a27a051
--- /dev/null
+++ b/benchmark/string-indexof/src/StringIndexOfBenchmark.java
@@ -0,0 +1,122 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class StringIndexOfBenchmark {
+ public static final String string36 = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"; // length = 36
+
+ public void timeIndexOf0(int count) {
+ final char c = '0';
+ String s = string36;
+ for (int i = 0; i < count; ++i) {
+ $noinline$indexOf(s, c);
+ }
+ }
+
+ public void timeIndexOf1(int count) {
+ final char c = '1';
+ String s = string36;
+ for (int i = 0; i < count; ++i) {
+ $noinline$indexOf(s, c);
+ }
+ }
+
+ public void timeIndexOf2(int count) {
+ final char c = '2';
+ String s = string36;
+ for (int i = 0; i < count; ++i) {
+ $noinline$indexOf(s, c);
+ }
+ }
+
+ public void timeIndexOf3(int count) {
+ final char c = '3';
+ String s = string36;
+ for (int i = 0; i < count; ++i) {
+ $noinline$indexOf(s, c);
+ }
+ }
+
+ public void timeIndexOf4(int count) {
+ final char c = '4';
+ String s = string36;
+ for (int i = 0; i < count; ++i) {
+ $noinline$indexOf(s, c);
+ }
+ }
+
+ public void timeIndexOf7(int count) {
+ final char c = '7';
+ String s = string36;
+ for (int i = 0; i < count; ++i) {
+ $noinline$indexOf(s, c);
+ }
+ }
+
+ public void timeIndexOf8(int count) {
+ final char c = '8';
+ String s = string36;
+ for (int i = 0; i < count; ++i) {
+ $noinline$indexOf(s, c);
+ }
+ }
+
+ public void timeIndexOfF(int count) {
+ final char c = 'F';
+ String s = string36;
+ for (int i = 0; i < count; ++i) {
+ $noinline$indexOf(s, c);
+ }
+ }
+
+ public void timeIndexOfG(int count) {
+ final char c = 'G';
+ String s = string36;
+ for (int i = 0; i < count; ++i) {
+ $noinline$indexOf(s, c);
+ }
+ }
+
+ public void timeIndexOfV(int count) {
+ final char c = 'V';
+ String s = string36;
+ for (int i = 0; i < count; ++i) {
+ $noinline$indexOf(s, c);
+ }
+ }
+
+ public void timeIndexOfW(int count) {
+ final char c = 'W';
+ String s = string36;
+ for (int i = 0; i < count; ++i) {
+ $noinline$indexOf(s, c);
+ }
+ }
+
+ public void timeIndexOf_(int count) {
+ final char c = '_';
+ String s = string36;
+ for (int i = 0; i < count; ++i) {
+ $noinline$indexOf(s, c);
+ }
+ }
+
+ static int $noinline$indexOf(String s, char c) {
+ if (doThrow) { throw new Error(); }
+ return s.indexOf(c);
+ }
+
+ public static boolean doThrow = false;
+}
diff --git a/cmdline/cmdline.h b/cmdline/cmdline.h
index dec9c83165..6e042c3c27 100644
--- a/cmdline/cmdline.h
+++ b/cmdline/cmdline.h
@@ -234,7 +234,7 @@ struct CmdlineArgs {
// Checks for --boot-image location.
{
std::string boot_image_location = boot_image_location_;
- size_t file_name_idx = boot_image_location.rfind("/");
+ size_t file_name_idx = boot_image_location.rfind('/');
if (file_name_idx == std::string::npos) { // Prevent a InsertIsaDirectory check failure.
*error_msg = "Boot image location must have a / in it";
return false;
@@ -244,7 +244,7 @@ struct CmdlineArgs {
// This prevents a common error "Could not create an image space..." when initing the Runtime.
if (file_name_idx != std::string::npos) {
std::string no_file_name = boot_image_location.substr(0, file_name_idx);
- size_t ancestor_dirs_idx = no_file_name.rfind("/");
+ size_t ancestor_dirs_idx = no_file_name.rfind('/');
std::string parent_dir_name;
if (ancestor_dirs_idx != std::string::npos) {
diff --git a/cmdline/cmdline_parser.h b/cmdline/cmdline_parser.h
index cfc096728f..d82fd488e9 100644
--- a/cmdline/cmdline_parser.h
+++ b/cmdline/cmdline_parser.h
@@ -390,7 +390,7 @@ struct CmdlineParser {
// Unlike regular argument definitions, when a value gets parsed into its
// stronger type, we just throw it away.
- if (ign.find("_") != std::string::npos) { // Does the arg-def have a wildcard?
+ if (ign.find('_') != std::string::npos) { // Does the arg-def have a wildcard?
// pretend this is a string, e.g. -Xjitconfig:<anythinggoeshere>
auto&& builder = Define(ignore_name).template WithType<std::string>().IntoIgnore();
assert(&builder == this);
diff --git a/cmdline/cmdline_parser_test.cc b/cmdline/cmdline_parser_test.cc
index cad51045aa..550e8c4605 100644
--- a/cmdline/cmdline_parser_test.cc
+++ b/cmdline/cmdline_parser_test.cc
@@ -78,7 +78,7 @@ namespace art {
return memcmp(std::addressof(expected), std::addressof(actual), sizeof(expected)) == 0;
}
- bool UsuallyEquals(const char* expected, std::string actual) {
+ bool UsuallyEquals(const char* expected, const std::string& actual) {
return std::string(expected) == actual;
}
@@ -129,7 +129,7 @@ class CmdlineParserTest : public ::testing::Test {
parser_ = ParsedOptions::MakeParser(false); // do not ignore unrecognized options
}
- static ::testing::AssertionResult IsResultSuccessful(CmdlineResult result) {
+ static ::testing::AssertionResult IsResultSuccessful(const CmdlineResult& result) {
if (result.IsSuccess()) {
return ::testing::AssertionSuccess();
} else {
@@ -138,7 +138,7 @@ class CmdlineParserTest : public ::testing::Test {
}
}
- static ::testing::AssertionResult IsResultFailure(CmdlineResult result,
+ static ::testing::AssertionResult IsResultFailure(const CmdlineResult& result,
CmdlineResult::Status failure_status) {
if (result.IsSuccess()) {
return ::testing::AssertionFailure() << " got success but expected failure: "
diff --git a/cmdline/cmdline_types.h b/cmdline/cmdline_types.h
index a5bb117509..3f55eefa0e 100644
--- a/cmdline/cmdline_types.h
+++ b/cmdline/cmdline_types.h
@@ -696,7 +696,7 @@ struct CmdlineType<ProfileSaverOptions> : CmdlineTypeParser<ProfileSaverOptions>
}
static std::string RemovePrefix(const std::string& source) {
- size_t prefix_idx = source.find(":");
+ size_t prefix_idx = source.find(':');
if (prefix_idx == std::string::npos) {
return "";
diff --git a/cmdline/detail/cmdline_parse_argument_detail.h b/cmdline/detail/cmdline_parse_argument_detail.h
index 84beff59c7..14eac30aa1 100644
--- a/cmdline/detail/cmdline_parse_argument_detail.h
+++ b/cmdline/detail/cmdline_parse_argument_detail.h
@@ -108,7 +108,7 @@ namespace art {
// If this is true, then the wildcard matching later on can still fail, so this is not
// a guarantee that the argument is correct, it's more of a strong hint that the
// user-provided input *probably* was trying to match this argument.
- size_t MaybeMatches(TokenRange token_list) const {
+ size_t MaybeMatches(const TokenRange& token_list) const {
auto best_match = FindClosestMatch(token_list);
return best_match.second;
@@ -118,7 +118,7 @@ namespace art {
//
// Returns the token range that was the closest match and the # of tokens that
// this range was matched up until.
- std::pair<const TokenRange*, size_t> FindClosestMatch(TokenRange token_list) const {
+ std::pair<const TokenRange*, size_t> FindClosestMatch(const TokenRange& token_list) const {
const TokenRange* best_match_ptr = nullptr;
size_t best_match = 0;
diff --git a/compiler/jni/jni_compiler_test.cc b/compiler/jni/jni_compiler_test.cc
index 4960a7343e..a9044a2047 100644
--- a/compiler/jni/jni_compiler_test.cc
+++ b/compiler/jni/jni_compiler_test.cc
@@ -2202,8 +2202,7 @@ void JniCompilerTest::NormalNativeImpl() {
"()V",
CURRENT_JNI_WRAPPER(Java_MyClassNatives_normalNative));
- ScopedObjectAccess soa(Thread::Current());
- ArtMethod* method = soa.DecodeMethod(jmethod_);
+ ArtMethod* method = jni::DecodeArtMethod(jmethod_);
ASSERT_TRUE(method != nullptr);
EXPECT_FALSE(method->IsAnnotatedWithCriticalNative());
@@ -2225,8 +2224,7 @@ void JniCompilerTest::FastNativeImpl() {
"()V",
CURRENT_JNI_WRAPPER(Java_MyClassNatives_fastNative));
- ScopedObjectAccess soa(Thread::Current());
- ArtMethod* method = soa.DecodeMethod(jmethod_);
+ ArtMethod* method = jni::DecodeArtMethod(jmethod_);
ASSERT_TRUE(method != nullptr);
EXPECT_FALSE(method->IsAnnotatedWithCriticalNative());
@@ -2255,8 +2253,7 @@ void JniCompilerTest::CriticalNativeImpl() {
UpdateCurrentJni(JniKind::kCritical);
ASSERT_TRUE(IsCurrentJniCritical());
- ScopedObjectAccess soa(Thread::Current());
- ArtMethod* method = soa.DecodeMethod(jmethod_);
+ ArtMethod* method = jni::DecodeArtMethod(jmethod_);
ASSERT_TRUE(method != nullptr);
EXPECT_TRUE(method->IsAnnotatedWithCriticalNative());
diff --git a/compiler/optimizing/bytecode_utils.h b/compiler/optimizing/bytecode_utils.h
index 6dfffce117..133afa47fe 100644
--- a/compiler/optimizing/bytecode_utils.h
+++ b/compiler/optimizing/bytecode_utils.h
@@ -26,7 +26,8 @@ namespace art {
class CodeItemIterator : public ValueObject {
public:
- CodeItemIterator(const DexFile::CodeItem& code_item, uint32_t start_dex_pc = 0u)
+ explicit CodeItemIterator(const DexFile::CodeItem& code_item) : CodeItemIterator(code_item, 0u) {}
+ CodeItemIterator(const DexFile::CodeItem& code_item, uint32_t start_dex_pc)
: code_ptr_(code_item.insns_ + start_dex_pc),
code_end_(code_item.insns_ + code_item.insns_size_in_code_units_),
dex_pc_(start_dex_pc) {}
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 7cab97d2e5..08d22f84d0 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -489,8 +489,14 @@ class TypeCheckSlowPathARM : public SlowPathCodeARM {
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
- Location object_class = instruction_->IsCheckCast() ? locations->GetTemp(0)
- : locations->Out();
+ Location arg0, arg1;
+ if (instruction_->IsInstanceOf()) {
+ arg0 = locations->InAt(1);
+ arg1 = locations->Out();
+ } else {
+ arg0 = locations->InAt(0);
+ arg1 = locations->InAt(1);
+ }
DCHECK(instruction_->IsCheckCast()
|| !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
@@ -504,26 +510,26 @@ class TypeCheckSlowPathARM : public SlowPathCodeARM {
// We're moving two locations to locations that could overlap, so we need a parallel
// move resolver.
InvokeRuntimeCallingConvention calling_convention;
- codegen->EmitParallelMoves(
- locations->InAt(1),
- Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
- Primitive::kPrimNot,
- object_class,
- Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
- Primitive::kPrimNot);
-
+ codegen->EmitParallelMoves(arg0,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ Primitive::kPrimNot,
+ arg1,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
+ Primitive::kPrimNot);
if (instruction_->IsInstanceOf()) {
arm_codegen->InvokeRuntime(kQuickInstanceofNonTrivial,
instruction_,
instruction_->GetDexPc(),
this);
- CheckEntrypointTypes<
- kQuickInstanceofNonTrivial, size_t, const mirror::Class*, const mirror::Class*>();
+ CheckEntrypointTypes<kQuickInstanceofNonTrivial, size_t, mirror::Class*, mirror::Class*>();
arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
} else {
DCHECK(instruction_->IsCheckCast());
- arm_codegen->InvokeRuntime(kQuickCheckCast, instruction_, instruction_->GetDexPc(), this);
- CheckEntrypointTypes<kQuickCheckCast, void, const mirror::Class*, const mirror::Class*>();
+ arm_codegen->InvokeRuntime(kQuickCheckInstanceOf,
+ instruction_,
+ instruction_->GetDexPc(),
+ this);
+ CheckEntrypointTypes<kQuickCheckInstanceOf, void, mirror::Object*, mirror::Class*>();
}
if (!is_fatal_) {
@@ -6297,26 +6303,16 @@ void InstructionCodeGeneratorARM::VisitCheckCast(HCheckCast* instruction) {
case TypeCheckKind::kAbstractClassCheck: {
// If the class is abstract, we eagerly fetch the super class of the
// object to avoid doing a comparison we know will fail.
- Label loop, compare_classes;
+ Label loop;
__ Bind(&loop);
// /* HeapReference<Class> */ temp = temp->super_class_
GenerateReferenceLoadOneRegister(instruction, temp_loc, super_offset, maybe_temp2_loc);
- // If the class reference currently in `temp` is not null, jump
- // to the `compare_classes` label to compare it with the checked
- // class.
- __ CompareAndBranchIfNonZero(temp, &compare_classes);
- // Otherwise, jump to the slow path to throw the exception.
- //
- // But before, move back the object's class into `temp` before
- // going into the slow path, as it has been overwritten in the
- // meantime.
- // /* HeapReference<Class> */ temp = obj->klass_
- GenerateReferenceLoadTwoRegisters(
- instruction, temp_loc, obj_loc, class_offset, maybe_temp2_loc);
- __ b(type_check_slow_path->GetEntryLabel());
+ // If the class reference currently in `temp` is null, jump to the slow path to throw the
+ // exception.
+ __ CompareAndBranchIfZero(temp, type_check_slow_path->GetEntryLabel());
- __ Bind(&compare_classes);
+ // Otherwise, compare the classes.
__ cmp(temp, ShifterOperand(cls));
__ b(&loop, NE);
break;
@@ -6332,55 +6328,29 @@ void InstructionCodeGeneratorARM::VisitCheckCast(HCheckCast* instruction) {
// /* HeapReference<Class> */ temp = temp->super_class_
GenerateReferenceLoadOneRegister(instruction, temp_loc, super_offset, maybe_temp2_loc);
- // If the class reference currently in `temp` is not null, jump
- // back at the beginning of the loop.
- __ CompareAndBranchIfNonZero(temp, &loop);
- // Otherwise, jump to the slow path to throw the exception.
- //
- // But before, move back the object's class into `temp` before
- // going into the slow path, as it has been overwritten in the
- // meantime.
- // /* HeapReference<Class> */ temp = obj->klass_
- GenerateReferenceLoadTwoRegisters(
- instruction, temp_loc, obj_loc, class_offset, maybe_temp2_loc);
- __ b(type_check_slow_path->GetEntryLabel());
+ // If the class reference currently in `temp` is null, jump to the slow path to throw the
+ // exception.
+ __ CompareAndBranchIfZero(temp, type_check_slow_path->GetEntryLabel());
+ // Otherwise, jump to the beginning of the loop.
+ __ b(&loop);
break;
}
case TypeCheckKind::kArrayObjectCheck: {
// Do an exact check.
- Label check_non_primitive_component_type;
__ cmp(temp, ShifterOperand(cls));
__ b(&done, EQ);
// Otherwise, we need to check that the object's class is a non-primitive array.
// /* HeapReference<Class> */ temp = temp->component_type_
GenerateReferenceLoadOneRegister(instruction, temp_loc, component_offset, maybe_temp2_loc);
-
- // If the component type is not null (i.e. the object is indeed
- // an array), jump to label `check_non_primitive_component_type`
- // to further check that this component type is not a primitive
- // type.
- __ CompareAndBranchIfNonZero(temp, &check_non_primitive_component_type);
- // Otherwise, jump to the slow path to throw the exception.
- //
- // But before, move back the object's class into `temp` before
- // going into the slow path, as it has been overwritten in the
- // meantime.
- // /* HeapReference<Class> */ temp = obj->klass_
- GenerateReferenceLoadTwoRegisters(
- instruction, temp_loc, obj_loc, class_offset, maybe_temp2_loc);
- __ b(type_check_slow_path->GetEntryLabel());
-
- __ Bind(&check_non_primitive_component_type);
+ // If the component type is null, jump to the slow path to throw the exception.
+ __ CompareAndBranchIfZero(temp, type_check_slow_path->GetEntryLabel());
+ // Otherwise,the object is indeed an array, jump to label `check_non_primitive_component_type`
+ // to further check that this component type is not a primitive type.
__ LoadFromOffset(kLoadUnsignedHalfword, temp, temp, primitive_offset);
static_assert(Primitive::kPrimNot == 0, "Expected 0 for art::Primitive::kPrimNot");
- __ CompareAndBranchIfZero(temp, &done);
- // Same comment as above regarding `temp` and the slow path.
- // /* HeapReference<Class> */ temp = obj->klass_
- GenerateReferenceLoadTwoRegisters(
- instruction, temp_loc, obj_loc, class_offset, maybe_temp2_loc);
- __ b(type_check_slow_path->GetEntryLabel());
+ __ CompareAndBranchIfNonZero(temp, type_check_slow_path->GetEntryLabel());
break;
}
@@ -6396,13 +6366,6 @@ void InstructionCodeGeneratorARM::VisitCheckCast(HCheckCast* instruction) {
// instruction (following the runtime calling convention), which
// might be cluttered by the potential first read barrier
// emission at the beginning of this method.
- //
- // TODO: Introduce a new runtime entry point taking the object
- // to test (instead of its class) as argument, and let it deal
- // with the read barrier issues. This will let us refactor this
- // case of the `switch` code as it was previously (with a direct
- // call to the runtime not using a type checking slow path).
- // This should also be beneficial for the other cases above.
__ b(type_check_slow_path->GetEntryLabel());
break;
}
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index d868984387..f5119df4e9 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -459,9 +459,15 @@ class TypeCheckSlowPathARM64 : public SlowPathCodeARM64 {
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
- Location class_to_check = locations->InAt(1);
- Location object_class = instruction_->IsCheckCast() ? locations->GetTemp(0)
- : locations->Out();
+ Location arg0, arg1;
+ if (instruction_->IsInstanceOf()) {
+ arg0 = locations->InAt(1);
+ arg1 = locations->Out();
+ } else {
+ arg0 = locations->InAt(0);
+ arg1 = locations->InAt(1);
+ }
+
DCHECK(instruction_->IsCheckCast()
|| !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
@@ -476,21 +482,22 @@ class TypeCheckSlowPathARM64 : public SlowPathCodeARM64 {
// We're moving two locations to locations that could overlap, so we need a parallel
// move resolver.
InvokeRuntimeCallingConvention calling_convention;
- codegen->EmitParallelMoves(
- class_to_check, LocationFrom(calling_convention.GetRegisterAt(0)), Primitive::kPrimNot,
- object_class, LocationFrom(calling_convention.GetRegisterAt(1)), Primitive::kPrimNot);
-
+ codegen->EmitParallelMoves(arg0,
+ LocationFrom(calling_convention.GetRegisterAt(0)),
+ Primitive::kPrimNot,
+ arg1,
+ LocationFrom(calling_convention.GetRegisterAt(1)),
+ Primitive::kPrimNot);
if (instruction_->IsInstanceOf()) {
arm64_codegen->InvokeRuntime(kQuickInstanceofNonTrivial, instruction_, dex_pc, this);
- CheckEntrypointTypes<kQuickInstanceofNonTrivial, size_t,
- const mirror::Class*, const mirror::Class*>();
+ CheckEntrypointTypes<kQuickInstanceofNonTrivial, size_t, mirror::Class*, mirror::Class*>();
Primitive::Type ret_type = instruction_->GetType();
Location ret_loc = calling_convention.GetReturnLocation(ret_type);
arm64_codegen->MoveLocation(locations->Out(), ret_loc, ret_type);
} else {
DCHECK(instruction_->IsCheckCast());
- arm64_codegen->InvokeRuntime(kQuickCheckCast, instruction_, dex_pc, this);
- CheckEntrypointTypes<kQuickCheckCast, void, const mirror::Class*, const mirror::Class*>();
+ arm64_codegen->InvokeRuntime(kQuickCheckInstanceOf, instruction_, dex_pc, this);
+ CheckEntrypointTypes<kQuickCheckInstanceOf, void, mirror::Object*, mirror::Class*>();
}
if (!is_fatal_) {
@@ -3594,26 +3601,15 @@ void InstructionCodeGeneratorARM64::VisitCheckCast(HCheckCast* instruction) {
case TypeCheckKind::kAbstractClassCheck: {
// If the class is abstract, we eagerly fetch the super class of the
// object to avoid doing a comparison we know will fail.
- vixl::aarch64::Label loop, compare_classes;
+ vixl::aarch64::Label loop;
__ Bind(&loop);
// /* HeapReference<Class> */ temp = temp->super_class_
GenerateReferenceLoadOneRegister(instruction, temp_loc, super_offset, maybe_temp2_loc);
- // If the class reference currently in `temp` is not null, jump
- // to the `compare_classes` label to compare it with the checked
- // class.
- __ Cbnz(temp, &compare_classes);
- // Otherwise, jump to the slow path to throw the exception.
- //
- // But before, move back the object's class into `temp` before
- // going into the slow path, as it has been overwritten in the
- // meantime.
- // /* HeapReference<Class> */ temp = obj->klass_
- GenerateReferenceLoadTwoRegisters(
- instruction, temp_loc, obj_loc, class_offset, maybe_temp2_loc);
- __ B(type_check_slow_path->GetEntryLabel());
-
- __ Bind(&compare_classes);
+ // If the class reference currently in `temp` is null, jump to the slow path to throw the
+ // exception.
+ __ Cbz(temp, type_check_slow_path->GetEntryLabel());
+ // Otherwise, compare classes.
__ Cmp(temp, cls);
__ B(ne, &loop);
break;
@@ -3633,20 +3629,12 @@ void InstructionCodeGeneratorARM64::VisitCheckCast(HCheckCast* instruction) {
// back at the beginning of the loop.
__ Cbnz(temp, &loop);
// Otherwise, jump to the slow path to throw the exception.
- //
- // But before, move back the object's class into `temp` before
- // going into the slow path, as it has been overwritten in the
- // meantime.
- // /* HeapReference<Class> */ temp = obj->klass_
- GenerateReferenceLoadTwoRegisters(
- instruction, temp_loc, obj_loc, class_offset, maybe_temp2_loc);
__ B(type_check_slow_path->GetEntryLabel());
break;
}
case TypeCheckKind::kArrayObjectCheck: {
// Do an exact check.
- vixl::aarch64::Label check_non_primitive_component_type;
__ Cmp(temp, cls);
__ B(eq, &done);
@@ -3654,30 +3642,13 @@ void InstructionCodeGeneratorARM64::VisitCheckCast(HCheckCast* instruction) {
// /* HeapReference<Class> */ temp = temp->component_type_
GenerateReferenceLoadOneRegister(instruction, temp_loc, component_offset, maybe_temp2_loc);
- // If the component type is not null (i.e. the object is indeed
- // an array), jump to label `check_non_primitive_component_type`
- // to further check that this component type is not a primitive
- // type.
- __ Cbnz(temp, &check_non_primitive_component_type);
- // Otherwise, jump to the slow path to throw the exception.
- //
- // But before, move back the object's class into `temp` before
- // going into the slow path, as it has been overwritten in the
- // meantime.
- // /* HeapReference<Class> */ temp = obj->klass_
- GenerateReferenceLoadTwoRegisters(
- instruction, temp_loc, obj_loc, class_offset, maybe_temp2_loc);
- __ B(type_check_slow_path->GetEntryLabel());
-
- __ Bind(&check_non_primitive_component_type);
+ // If the component type is null, jump to the slow path to throw the exception.
+ __ Cbz(temp, type_check_slow_path->GetEntryLabel());
+ // Otherwise, the object is indeed an array. Further check that this component type is not a
+ // primitive type.
__ Ldrh(temp, HeapOperand(temp, primitive_offset));
static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
- __ Cbz(temp, &done);
- // Same comment as above regarding `temp` and the slow path.
- // /* HeapReference<Class> */ temp = obj->klass_
- GenerateReferenceLoadTwoRegisters(
- instruction, temp_loc, obj_loc, class_offset, maybe_temp2_loc);
- __ B(type_check_slow_path->GetEntryLabel());
+ __ Cbnz(temp, type_check_slow_path->GetEntryLabel());
break;
}
@@ -3693,13 +3664,6 @@ void InstructionCodeGeneratorARM64::VisitCheckCast(HCheckCast* instruction) {
// instruction (following the runtime calling convention), which
// might be cluttered by the potential first read barrier
// emission at the beginning of this method.
- //
- // TODO: Introduce a new runtime entry point taking the object
- // to test (instead of its class) as argument, and let it deal
- // with the read barrier issues. This will let us refactor this
- // case of the `switch` code as it was previously (with a direct
- // call to the runtime not using a type checking slow path).
- // This should also be beneficial for the other cases above.
__ B(type_check_slow_path->GetEntryLabel());
break;
}
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index e69528e43f..b9814b63e9 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -443,8 +443,14 @@ class TypeCheckSlowPathARMVIXL : public SlowPathCodeARMVIXL {
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
- Location object_class = instruction_->IsCheckCast() ? locations->GetTemp(0)
- : locations->Out();
+ Location arg0, arg1;
+ if (instruction_->IsInstanceOf()) {
+ arg0 = locations->InAt(1);
+ arg1 = locations->Out();
+ } else {
+ arg0 = locations->InAt(0);
+ arg1 = locations->InAt(1);
+ }
DCHECK(instruction_->IsCheckCast()
|| !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
@@ -458,20 +464,22 @@ class TypeCheckSlowPathARMVIXL : public SlowPathCodeARMVIXL {
// We're moving two locations to locations that could overlap, so we need a parallel
// move resolver.
InvokeRuntimeCallingConventionARMVIXL calling_convention;
- codegen->EmitParallelMoves(
- locations->InAt(1),
- LocationFrom(calling_convention.GetRegisterAt(0)),
- Primitive::kPrimNot,
- object_class,
- LocationFrom(calling_convention.GetRegisterAt(1)),
- Primitive::kPrimNot);
+ codegen->EmitParallelMoves(arg0,
+ LocationFrom(calling_convention.GetRegisterAt(0)),
+ Primitive::kPrimNot,
+ arg1,
+ LocationFrom(calling_convention.GetRegisterAt(1)),
+ Primitive::kPrimNot);
if (instruction_->IsInstanceOf()) {
TODO_VIXL32(FATAL);
} else {
DCHECK(instruction_->IsCheckCast());
- arm_codegen->InvokeRuntime(kQuickCheckCast, instruction_, instruction_->GetDexPc(), this);
- CheckEntrypointTypes<kQuickCheckCast, void, const mirror::Class*, const mirror::Class*>();
+ arm_codegen->InvokeRuntime(kQuickCheckInstanceOf,
+ instruction_,
+ instruction_->GetDexPc(),
+ this);
+ CheckEntrypointTypes<kQuickCheckInstanceOf, void, mirror::Object*, mirror::Class*>();
}
if (!is_fatal_) {
@@ -660,7 +668,7 @@ CodeGeneratorARMVIXL::CodeGeneratorARMVIXL(HGraph* graph,
GetVIXLAssembler()->GetScratchVRegisterList()->Combine(d15);
}
-#define __ reinterpret_cast<ArmVIXLAssembler*>(GetAssembler())->GetVIXLAssembler()->
+#define __ reinterpret_cast<ArmVIXLAssembler*>(GetAssembler())->GetVIXLAssembler()-> // NOLINT
void CodeGeneratorARMVIXL::Finalize(CodeAllocator* allocator) {
GetAssembler()->FinalizeCode();
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 12b1ab9abb..6e9fbd2560 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -378,7 +378,14 @@ class TypeCheckSlowPathMIPS : public SlowPathCodeMIPS {
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
- Location object_class = instruction_->IsCheckCast() ? locations->GetTemp(0) : locations->Out();
+ Location arg0, arg1;
+ if (instruction_->IsInstanceOf()) {
+ arg0 = locations->InAt(1);
+ arg1 = locations->Out();
+ } else {
+ arg0 = locations->InAt(0);
+ arg1 = locations->InAt(1);
+ }
uint32_t dex_pc = instruction_->GetDexPc();
DCHECK(instruction_->IsCheckCast()
|| !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
@@ -390,24 +397,22 @@ class TypeCheckSlowPathMIPS : public SlowPathCodeMIPS {
// We're moving two locations to locations that could overlap, so we need a parallel
// move resolver.
InvokeRuntimeCallingConvention calling_convention;
- codegen->EmitParallelMoves(locations->InAt(1),
+ codegen->EmitParallelMoves(arg0,
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
Primitive::kPrimNot,
- object_class,
+ arg1,
Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
Primitive::kPrimNot);
-
if (instruction_->IsInstanceOf()) {
mips_codegen->InvokeRuntime(kQuickInstanceofNonTrivial, instruction_, dex_pc, this);
- CheckEntrypointTypes<
- kQuickInstanceofNonTrivial, size_t, const mirror::Class*, const mirror::Class*>();
+ CheckEntrypointTypes<kQuickInstanceofNonTrivial, size_t, mirror::Class*, mirror::Class*>();
Primitive::Type ret_type = instruction_->GetType();
Location ret_loc = calling_convention.GetReturnLocation(ret_type);
mips_codegen->MoveLocation(locations->Out(), ret_loc, ret_type);
} else {
DCHECK(instruction_->IsCheckCast());
- mips_codegen->InvokeRuntime(kQuickCheckCast, instruction_, dex_pc, this);
- CheckEntrypointTypes<kQuickCheckCast, void, const mirror::Class*, const mirror::Class*>();
+ mips_codegen->InvokeRuntime(kQuickCheckInstanceOf, instruction_, dex_pc, this);
+ CheckEntrypointTypes<kQuickCheckInstanceOf, void, mirror::Object*, mirror::Class*>();
}
RestoreLiveRegisters(codegen, locations);
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 010bf24232..7598740d3c 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -322,7 +322,15 @@ class TypeCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
- Location object_class = instruction_->IsCheckCast() ? locations->GetTemp(0) : locations->Out();
+ Location arg0, arg1;
+ if (instruction_->IsInstanceOf()) {
+ arg0 = locations->InAt(1);
+ arg1 = locations->Out();
+ } else {
+ arg0 = locations->InAt(0);
+ arg1 = locations->InAt(1);
+ }
+
uint32_t dex_pc = instruction_->GetDexPc();
DCHECK(instruction_->IsCheckCast()
|| !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
@@ -334,24 +342,23 @@ class TypeCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
// We're moving two locations to locations that could overlap, so we need a parallel
// move resolver.
InvokeRuntimeCallingConvention calling_convention;
- codegen->EmitParallelMoves(locations->InAt(1),
+ codegen->EmitParallelMoves(arg0,
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
Primitive::kPrimNot,
- object_class,
+ arg1,
Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
Primitive::kPrimNot);
-
if (instruction_->IsInstanceOf()) {
mips64_codegen->InvokeRuntime(kQuickInstanceofNonTrivial, instruction_, dex_pc, this);
CheckEntrypointTypes<
- kQuickInstanceofNonTrivial, size_t, const mirror::Class*, const mirror::Class*>();
+ kQuickInstanceofNonTrivial, size_t, mirror::Class*, mirror::Class*>();
Primitive::Type ret_type = instruction_->GetType();
Location ret_loc = calling_convention.GetReturnLocation(ret_type);
mips64_codegen->MoveLocation(locations->Out(), ret_loc, ret_type);
} else {
DCHECK(instruction_->IsCheckCast());
- mips64_codegen->InvokeRuntime(kQuickCheckCast, instruction_, dex_pc, this);
- CheckEntrypointTypes<kQuickCheckCast, void, const mirror::Class*, const mirror::Class*>();
+ mips64_codegen->InvokeRuntime(kQuickCheckInstanceOf, instruction_, dex_pc, this);
+ CheckEntrypointTypes<kQuickCheckInstanceOf, void, mirror::Object*, mirror::Class*>();
}
RestoreLiveRegisters(codegen, locations);
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 2f946e4263..c90227930c 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -312,8 +312,14 @@ class TypeCheckSlowPathX86 : public SlowPathCode {
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
- Location object_class = instruction_->IsCheckCast() ? locations->GetTemp(0)
- : locations->Out();
+ Location arg0, arg1;
+ if (instruction_->IsInstanceOf()) {
+ arg0 = locations->InAt(1);
+ arg1 = locations->Out();
+ } else {
+ arg0 = locations->InAt(0);
+ arg1 = locations->InAt(1);
+ }
DCHECK(instruction_->IsCheckCast()
|| !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
@@ -327,25 +333,25 @@ class TypeCheckSlowPathX86 : public SlowPathCode {
// We're moving two locations to locations that could overlap, so we need a parallel
// move resolver.
InvokeRuntimeCallingConvention calling_convention;
- x86_codegen->EmitParallelMoves(
- locations->InAt(1),
- Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
- Primitive::kPrimNot,
- object_class,
- Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
- Primitive::kPrimNot);
-
+ x86_codegen->EmitParallelMoves(arg0,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ Primitive::kPrimNot,
+ arg1,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
+ Primitive::kPrimNot);
if (instruction_->IsInstanceOf()) {
x86_codegen->InvokeRuntime(kQuickInstanceofNonTrivial,
instruction_,
instruction_->GetDexPc(),
this);
- CheckEntrypointTypes<
- kQuickInstanceofNonTrivial, size_t, const mirror::Class*, const mirror::Class*>();
+ CheckEntrypointTypes<kQuickInstanceofNonTrivial, size_t, mirror::Class*, mirror::Class*>();
} else {
DCHECK(instruction_->IsCheckCast());
- x86_codegen->InvokeRuntime(kQuickCheckCast, instruction_, instruction_->GetDexPc(), this);
- CheckEntrypointTypes<kQuickCheckCast, void, const mirror::Class*, const mirror::Class*>();
+ x86_codegen->InvokeRuntime(kQuickCheckInstanceOf,
+ instruction_,
+ instruction_->GetDexPc(),
+ this);
+ CheckEntrypointTypes<kQuickCheckInstanceOf, void, mirror::Object*, mirror::Class*>();
}
if (!is_fatal_) {
@@ -6645,26 +6651,17 @@ void InstructionCodeGeneratorX86::VisitCheckCast(HCheckCast* instruction) {
case TypeCheckKind::kAbstractClassCheck: {
// If the class is abstract, we eagerly fetch the super class of the
// object to avoid doing a comparison we know will fail.
- NearLabel loop, compare_classes;
+ NearLabel loop;
__ Bind(&loop);
// /* HeapReference<Class> */ temp = temp->super_class_
GenerateReferenceLoadOneRegister(instruction, temp_loc, super_offset, maybe_temp2_loc);
- // If the class reference currently in `temp` is not null, jump
- // to the `compare_classes` label to compare it with the checked
- // class.
+ // If the class reference currently in `temp` is null, jump to the slow path to throw the
+ // exception.
__ testl(temp, temp);
- __ j(kNotEqual, &compare_classes);
- // Otherwise, jump to the slow path to throw the exception.
- //
- // But before, move back the object's class into `temp` before
- // going into the slow path, as it has been overwritten in the
- // meantime.
- // /* HeapReference<Class> */ temp = obj->klass_
- GenerateReferenceLoadTwoRegisters(instruction, temp_loc, obj_loc, class_offset);
- __ jmp(type_check_slow_path->GetEntryLabel());
+ __ j(kZero, type_check_slow_path->GetEntryLabel());
- __ Bind(&compare_classes);
+ // Otherwise, compare the classes
if (cls.IsRegister()) {
__ cmpl(temp, cls.AsRegister<Register>());
} else {
@@ -6693,21 +6690,14 @@ void InstructionCodeGeneratorX86::VisitCheckCast(HCheckCast* instruction) {
// If the class reference currently in `temp` is not null, jump
// back at the beginning of the loop.
__ testl(temp, temp);
- __ j(kNotEqual, &loop);
- // Otherwise, jump to the slow path to throw the exception.
- //
- // But before, move back the object's class into `temp` before
- // going into the slow path, as it has been overwritten in the
- // meantime.
- // /* HeapReference<Class> */ temp = obj->klass_
- GenerateReferenceLoadTwoRegisters(instruction, temp_loc, obj_loc, class_offset);
+ __ j(kNotZero, &loop);
+ // Otherwise, jump to the slow path to throw the exception.;
__ jmp(type_check_slow_path->GetEntryLabel());
break;
}
case TypeCheckKind::kArrayObjectCheck: {
// Do an exact check.
- NearLabel check_non_primitive_component_type;
if (cls.IsRegister()) {
__ cmpl(temp, cls.AsRegister<Register>());
} else {
@@ -6720,28 +6710,13 @@ void InstructionCodeGeneratorX86::VisitCheckCast(HCheckCast* instruction) {
// /* HeapReference<Class> */ temp = temp->component_type_
GenerateReferenceLoadOneRegister(instruction, temp_loc, component_offset, maybe_temp2_loc);
- // If the component type is not null (i.e. the object is indeed
- // an array), jump to label `check_non_primitive_component_type`
- // to further check that this component type is not a primitive
- // type.
+ // If the component type is null (i.e. the object not an array), jump to the slow path to
+ // throw the exception. Otherwise proceed with the check.
__ testl(temp, temp);
- __ j(kNotEqual, &check_non_primitive_component_type);
- // Otherwise, jump to the slow path to throw the exception.
- //
- // But before, move back the object's class into `temp` before
- // going into the slow path, as it has been overwritten in the
- // meantime.
- // /* HeapReference<Class> */ temp = obj->klass_
- GenerateReferenceLoadTwoRegisters(instruction, temp_loc, obj_loc, class_offset);
- __ jmp(type_check_slow_path->GetEntryLabel());
+ __ j(kZero, type_check_slow_path->GetEntryLabel());
- __ Bind(&check_non_primitive_component_type);
__ cmpw(Address(temp, primitive_offset), Immediate(Primitive::kPrimNot));
- __ j(kEqual, &done);
- // Same comment as above regarding `temp` and the slow path.
- // /* HeapReference<Class> */ temp = obj->klass_
- GenerateReferenceLoadTwoRegisters(instruction, temp_loc, obj_loc, class_offset);
- __ jmp(type_check_slow_path->GetEntryLabel());
+ __ j(kNotEqual, type_check_slow_path->GetEntryLabel());
break;
}
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 232c3b3cbb..89b16d3f77 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -332,8 +332,14 @@ class TypeCheckSlowPathX86_64 : public SlowPathCode {
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
- Location object_class = instruction_->IsCheckCast() ? locations->GetTemp(0)
- : locations->Out();
+ Location arg0, arg1;
+ if (instruction_->IsInstanceOf()) {
+ arg0 = locations->InAt(1);
+ arg1 = locations->Out();
+ } else {
+ arg0 = locations->InAt(0);
+ arg1 = locations->InAt(1);
+ }
uint32_t dex_pc = instruction_->GetDexPc();
DCHECK(instruction_->IsCheckCast()
|| !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
@@ -348,22 +354,19 @@ class TypeCheckSlowPathX86_64 : public SlowPathCode {
// We're moving two locations to locations that could overlap, so we need a parallel
// move resolver.
InvokeRuntimeCallingConvention calling_convention;
- codegen->EmitParallelMoves(
- locations->InAt(1),
- Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
- Primitive::kPrimNot,
- object_class,
- Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
- Primitive::kPrimNot);
-
+ codegen->EmitParallelMoves(arg0,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ Primitive::kPrimNot,
+ arg1,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
+ Primitive::kPrimNot);
if (instruction_->IsInstanceOf()) {
x86_64_codegen->InvokeRuntime(kQuickInstanceofNonTrivial, instruction_, dex_pc, this);
- CheckEntrypointTypes<
- kQuickInstanceofNonTrivial, size_t, const mirror::Class*, const mirror::Class*>();
+ CheckEntrypointTypes<kQuickInstanceofNonTrivial, size_t, mirror::Class*, mirror::Class*>();
} else {
DCHECK(instruction_->IsCheckCast());
- x86_64_codegen->InvokeRuntime(kQuickCheckCast, instruction_, dex_pc, this);
- CheckEntrypointTypes<kQuickCheckCast, void, const mirror::Class*, const mirror::Class*>();
+ x86_64_codegen->InvokeRuntime(kQuickCheckInstanceOf, instruction_, dex_pc, this);
+ CheckEntrypointTypes<kQuickCheckInstanceOf, void, mirror::Object*, mirror::Class*>();
}
if (!is_fatal_) {
@@ -6100,30 +6103,16 @@ void InstructionCodeGeneratorX86_64::VisitCheckCast(HCheckCast* instruction) {
kEmitCompilerReadBarrier);
// If the class is abstract, we eagerly fetch the super class of the
// object to avoid doing a comparison we know will fail.
- NearLabel loop, compare_classes;
+ NearLabel loop;
__ Bind(&loop);
// /* HeapReference<Class> */ temp = temp->super_class_
GenerateReferenceLoadOneRegister(instruction, temp_loc, super_offset, maybe_temp2_loc);
- // If the class reference currently in `temp` is not null, jump
- // to the `compare_classes` label to compare it with the checked
- // class.
+ // If the class reference currently in `temp` is null, jump to the slow path to throw the
+ // exception.
__ testl(temp, temp);
- __ j(kNotEqual, &compare_classes);
- // Otherwise, jump to the slow path to throw the exception.
- //
- // But before, move back the object's class into `temp` before
- // going into the slow path, as it has been overwritten in the
- // meantime.
- // /* HeapReference<Class> */ temp = obj->klass_
- GenerateReferenceLoadTwoRegisters(instruction,
- temp_loc,
- obj_loc,
- class_offset,
- kEmitCompilerReadBarrier);
- __ jmp(type_check_slow_path->GetEntryLabel());
-
- __ Bind(&compare_classes);
+ // Otherwise, compare the classes.
+ __ j(kZero, type_check_slow_path->GetEntryLabel());
if (cls.IsRegister()) {
__ cmpl(temp, cls.AsRegister<CpuRegister>());
} else {
@@ -6166,18 +6155,8 @@ void InstructionCodeGeneratorX86_64::VisitCheckCast(HCheckCast* instruction) {
// If the class reference currently in `temp` is not null, jump
// back at the beginning of the loop.
__ testl(temp, temp);
- __ j(kNotEqual, &loop);
+ __ j(kNotZero, &loop);
// Otherwise, jump to the slow path to throw the exception.
- //
- // But before, move back the object's class into `temp` before
- // going into the slow path, as it has been overwritten in the
- // meantime.
- // /* HeapReference<Class> */ temp = obj->klass_
- GenerateReferenceLoadTwoRegisters(instruction,
- temp_loc,
- obj_loc,
- class_offset,
- kEmitCompilerReadBarrier);
__ jmp(type_check_slow_path->GetEntryLabel());
__ Bind(&done);
break;
@@ -6220,31 +6199,10 @@ void InstructionCodeGeneratorX86_64::VisitCheckCast(HCheckCast* instruction) {
// to further check that this component type is not a primitive
// type.
__ testl(temp, temp);
- __ j(kNotEqual, &check_non_primitive_component_type);
// Otherwise, jump to the slow path to throw the exception.
- //
- // But before, move back the object's class into `temp` before
- // going into the slow path, as it has been overwritten in the
- // meantime.
- // /* HeapReference<Class> */ temp = obj->klass_
- GenerateReferenceLoadTwoRegisters(instruction,
- temp_loc,
- obj_loc,
- class_offset,
- kEmitCompilerReadBarrier);
- __ jmp(type_check_slow_path->GetEntryLabel());
-
- __ Bind(&check_non_primitive_component_type);
+ __ j(kZero, type_check_slow_path->GetEntryLabel());
__ cmpw(Address(temp, primitive_offset), Immediate(Primitive::kPrimNot));
- __ j(kEqual, &done);
- // Same comment as above regarding `temp` and the slow path.
- // /* HeapReference<Class> */ temp = obj->klass_
- GenerateReferenceLoadTwoRegisters(instruction,
- temp_loc,
- obj_loc,
- class_offset,
- kEmitCompilerReadBarrier);
- __ jmp(type_check_slow_path->GetEntryLabel());
+ __ j(kNotEqual, type_check_slow_path->GetEntryLabel());
__ Bind(&done);
break;
}
@@ -6316,13 +6274,6 @@ void InstructionCodeGeneratorX86_64::VisitCheckCast(HCheckCast* instruction) {
__ j(kNotZero, &start_loop);
__ Bind(&is_null);
}
-
- // Since we clobbered temp_loc holding the class, we need to reload it.
- GenerateReferenceLoadTwoRegisters(instruction,
- temp_loc,
- obj_loc,
- class_offset,
- kEmitCompilerReadBarrier);
__ jmp(type_check_slow_path->GetEntryLabel());
__ Bind(&done);
break;
diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc
index 9ec32df578..ac83bd9b0c 100644
--- a/compiler/optimizing/codegen_test.cc
+++ b/compiler/optimizing/codegen_test.cc
@@ -259,7 +259,7 @@ static void ValidateGraph(HGraph* graph) {
GraphChecker graph_checker(graph);
graph_checker.Run();
if (!graph_checker.IsValid()) {
- for (auto error : graph_checker.GetErrors()) {
+ for (const auto& error : graph_checker.GetErrors()) {
std::cout << error << std::endl;
}
}
@@ -269,7 +269,7 @@ static void ValidateGraph(HGraph* graph) {
template <typename Expected>
static void RunCodeNoCheck(CodeGenerator* codegen,
HGraph* graph,
- std::function<void(HGraph*)> hook_before_codegen,
+ const std::function<void(HGraph*)>& hook_before_codegen,
bool has_result,
Expected expected) {
SsaLivenessAnalysis liveness(graph, codegen);
diff --git a/compiler/optimizing/constant_folding.h b/compiler/optimizing/constant_folding.h
index e10b1d6b2e..05c6df4a93 100644
--- a/compiler/optimizing/constant_folding.h
+++ b/compiler/optimizing/constant_folding.h
@@ -39,8 +39,7 @@ namespace art {
*/
class HConstantFolding : public HOptimization {
public:
- HConstantFolding(HGraph* graph, const char* name = kConstantFoldingPassName)
- : HOptimization(graph, name) {}
+ HConstantFolding(HGraph* graph, const char* name) : HOptimization(graph, name) {}
void Run() OVERRIDE;
diff --git a/compiler/optimizing/constant_folding_test.cc b/compiler/optimizing/constant_folding_test.cc
index d1a2a2649a..5fac3acb8a 100644
--- a/compiler/optimizing/constant_folding_test.cc
+++ b/compiler/optimizing/constant_folding_test.cc
@@ -42,7 +42,7 @@ class ConstantFoldingTest : public CommonCompilerTest {
const std::string& expected_before,
const std::string& expected_after_cf,
const std::string& expected_after_dce,
- std::function<void(HGraph*)> check_after_cf,
+ const std::function<void(HGraph*)>& check_after_cf,
Primitive::Type return_type = Primitive::kPrimInt) {
graph_ = CreateCFG(&allocator_, data, return_type);
TestCodeOnReadyGraph(expected_before,
@@ -54,7 +54,7 @@ class ConstantFoldingTest : public CommonCompilerTest {
void TestCodeOnReadyGraph(const std::string& expected_before,
const std::string& expected_after_cf,
const std::string& expected_after_dce,
- std::function<void(HGraph*)> check_after_cf) {
+ const std::function<void(HGraph*)>& check_after_cf) {
ASSERT_NE(graph_, nullptr);
StringPrettyPrinter printer_before(graph_);
@@ -65,7 +65,7 @@ class ConstantFoldingTest : public CommonCompilerTest {
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegenX86(graph_, *features_x86.get(), CompilerOptions());
- HConstantFolding(graph_).Run();
+ HConstantFolding(graph_, "constant_folding").Run();
GraphChecker graph_checker_cf(graph_);
graph_checker_cf.Run();
ASSERT_TRUE(graph_checker_cf.IsValid());
@@ -77,7 +77,7 @@ class ConstantFoldingTest : public CommonCompilerTest {
check_after_cf(graph_);
- HDeadCodeElimination(graph_).Run();
+ HDeadCodeElimination(graph_, nullptr /* stats */, "dead_code_elimination").Run();
GraphChecker graph_checker_dce(graph_);
graph_checker_dce.Run();
ASSERT_TRUE(graph_checker_dce.IsValid());
diff --git a/compiler/optimizing/dead_code_elimination.cc b/compiler/optimizing/dead_code_elimination.cc
index 9de521ad8d..c31c66a056 100644
--- a/compiler/optimizing/dead_code_elimination.cc
+++ b/compiler/optimizing/dead_code_elimination.cc
@@ -161,8 +161,21 @@ static HConstant* Evaluate(HCondition* condition, HInstruction* left, HInstructi
// | | |
// B4 B5 B?
//
-// This simplification cannot be applied for loop headers, as they
-// contain a suspend check.
+// Note that individual edges can be redirected (for example B2->B3
+// can be redirected as B2->B5) without applying this optimization
+// to other incoming edges.
+//
+// This simplification cannot be applied to catch blocks, because
+// exception handler edges do not represent normal control flow.
+// Though in theory this could still apply to normal control flow
+// going directly to a catch block, we cannot support it at the
+// moment because the catch Phi's inputs do not correspond to the
+// catch block's predecessors, so we cannot identify which
+// predecessor corresponds to a given statically evaluated input.
+//
+// We do not apply this optimization to loop headers as this could
+// create irreducible loops. We rely on the suspend check in the
+// loop header to prevent the pattern match.
//
// Note that we rely on the dead code elimination to get rid of B3.
bool HDeadCodeElimination::SimplifyIfs() {
@@ -172,7 +185,8 @@ bool HDeadCodeElimination::SimplifyIfs() {
for (HBasicBlock* block : graph_->GetReversePostOrder()) {
HInstruction* last = block->GetLastInstruction();
HInstruction* first = block->GetFirstInstruction();
- if (last->IsIf() &&
+ if (!block->IsCatchBlock() &&
+ last->IsIf() &&
block->HasSinglePhi() &&
block->GetFirstPhi()->HasOnlyOneNonEnvironmentUse()) {
bool has_only_phi_and_if = (last == first) && (last->InputAt(0) == block->GetFirstPhi());
diff --git a/compiler/optimizing/dead_code_elimination.h b/compiler/optimizing/dead_code_elimination.h
index 58e700deba..84fd890eee 100644
--- a/compiler/optimizing/dead_code_elimination.h
+++ b/compiler/optimizing/dead_code_elimination.h
@@ -29,9 +29,7 @@ namespace art {
*/
class HDeadCodeElimination : public HOptimization {
public:
- HDeadCodeElimination(HGraph* graph,
- OptimizingCompilerStats* stats = nullptr,
- const char* name = kDeadCodeEliminationPassName)
+ HDeadCodeElimination(HGraph* graph, OptimizingCompilerStats* stats, const char* name)
: HOptimization(graph, name, stats) {}
void Run() OVERRIDE;
diff --git a/compiler/optimizing/dead_code_elimination_test.cc b/compiler/optimizing/dead_code_elimination_test.cc
index fe52aacef7..fdd77e7261 100644
--- a/compiler/optimizing/dead_code_elimination_test.cc
+++ b/compiler/optimizing/dead_code_elimination_test.cc
@@ -44,7 +44,7 @@ static void TestCode(const uint16_t* data,
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegenX86(graph, *features_x86.get(), CompilerOptions());
- HDeadCodeElimination(graph).Run();
+ HDeadCodeElimination(graph, nullptr /* stats */, "dead_code_elimination").Run();
GraphChecker graph_checker(graph);
graph_checker.Run();
ASSERT_TRUE(graph_checker.IsValid());
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index cc420b3260..9e816237dd 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -1315,8 +1315,8 @@ size_t HInliner::RunOptimizations(HGraph* callee_graph,
const DexCompilationUnit& dex_compilation_unit) {
// Note: if the outermost_graph_ is being compiled OSR, we should not run any
// optimization that could lead to a HDeoptimize. The following optimizations do not.
- HDeadCodeElimination dce(callee_graph, stats_);
- HConstantFolding fold(callee_graph);
+ HDeadCodeElimination dce(callee_graph, stats_, "dead_code_elimination$inliner");
+ HConstantFolding fold(callee_graph, "constant_folding$inliner");
HSharpening sharpening(callee_graph, codegen_, dex_compilation_unit, compiler_driver_);
InstructionSimplifier simplify(callee_graph, stats_);
IntrinsicsRecognizer intrinsics(callee_graph, stats_);
diff --git a/compiler/optimizing/load_store_elimination.cc b/compiler/optimizing/load_store_elimination.cc
index 5b2cbf783d..15e605971e 100644
--- a/compiler/optimizing/load_store_elimination.cc
+++ b/compiler/optimizing/load_store_elimination.cc
@@ -33,11 +33,11 @@ class ReferenceInfo : public ArenaObject<kArenaAllocMisc> {
public:
ReferenceInfo(HInstruction* reference, size_t pos) : reference_(reference), position_(pos) {
is_singleton_ = true;
- is_singleton_and_not_returned_ = true;
+ is_singleton_and_non_escaping_ = true;
if (!reference_->IsNewInstance() && !reference_->IsNewArray()) {
// For references not allocated in the method, don't assume anything.
is_singleton_ = false;
- is_singleton_and_not_returned_ = false;
+ is_singleton_and_non_escaping_ = false;
return;
}
@@ -50,7 +50,7 @@ class ReferenceInfo : public ArenaObject<kArenaAllocMisc> {
// BoundType shouldn't normally be necessary for a NewInstance.
// Just be conservative for the uncommon cases.
is_singleton_ = false;
- is_singleton_and_not_returned_ = false;
+ is_singleton_and_non_escaping_ = false;
return;
}
if (user->IsPhi() || user->IsSelect() || user->IsInvoke() ||
@@ -62,21 +62,37 @@ class ReferenceInfo : public ArenaObject<kArenaAllocMisc> {
// reference_ is merged to HPhi/HSelect, passed to a callee, or stored to heap.
// reference_ isn't the only name that can refer to its value anymore.
is_singleton_ = false;
- is_singleton_and_not_returned_ = false;
+ is_singleton_and_non_escaping_ = false;
return;
}
if ((user->IsUnresolvedInstanceFieldGet() && (reference_ == user->InputAt(0))) ||
(user->IsUnresolvedInstanceFieldSet() && (reference_ == user->InputAt(0)))) {
- // The field is accessed in an unresolved way. We mark the object as a singleton to
- // disable load/store optimizations on it.
+ // The field is accessed in an unresolved way. We mark the object as a non-singleton
+ // to disable load/store optimizations on it.
// Note that we could optimize this case and still perform some optimizations until
// we hit the unresolved access, but disabling is the simplest.
is_singleton_ = false;
- is_singleton_and_not_returned_ = false;
+ is_singleton_and_non_escaping_ = false;
return;
}
if (user->IsReturn()) {
- is_singleton_and_not_returned_ = false;
+ is_singleton_and_non_escaping_ = false;
+ }
+ }
+
+ if (!is_singleton_ || !is_singleton_and_non_escaping_) {
+ return;
+ }
+
+ // Look at Environment uses and if it's for HDeoptimize, it's treated the same
+ // as a return which escapes at the end of executing the compiled code. We don't
+ // do store elimination for singletons that escape through HDeoptimize.
+ // Other Environment uses are fine since LSE is disabled for debuggable.
+ for (const HUseListNode<HEnvironment*>& use : reference_->GetEnvUses()) {
+ HEnvironment* user = use.GetUser();
+ if (user->GetHolder()->IsDeoptimize()) {
+ is_singleton_and_non_escaping_ = false;
+ break;
}
}
}
@@ -96,17 +112,22 @@ class ReferenceInfo : public ArenaObject<kArenaAllocMisc> {
return is_singleton_;
}
- // Returns true if reference_ is a singleton and not returned to the caller.
+ // Returns true if reference_ is a singleton and not returned to the caller or
+ // used as an environment local of an HDeoptimize instruction.
// The allocation and stores into reference_ may be eliminated for such cases.
- bool IsSingletonAndNotReturned() const {
- return is_singleton_and_not_returned_;
+ bool IsSingletonAndNonEscaping() const {
+ return is_singleton_and_non_escaping_;
}
private:
HInstruction* const reference_;
const size_t position_; // position in HeapLocationCollector's ref_info_array_.
bool is_singleton_; // can only be referred to by a single name in the method.
- bool is_singleton_and_not_returned_; // reference_ is singleton and not returned to caller.
+
+ // reference_ is singleton and does not escape in the end either by
+ // returning to the caller, or being used as an environment local of an
+ // HDeoptimize instruction.
+ bool is_singleton_and_non_escaping_;
DISALLOW_COPY_AND_ASSIGN(ReferenceInfo);
};
@@ -202,8 +223,7 @@ class HeapLocationCollector : public HGraphVisitor {
kArenaAllocLSE),
has_heap_stores_(false),
has_volatile_(false),
- has_monitor_operations_(false),
- may_deoptimize_(false) {}
+ has_monitor_operations_(false) {}
size_t GetNumberOfHeapLocations() const {
return heap_locations_.size();
@@ -236,13 +256,6 @@ class HeapLocationCollector : public HGraphVisitor {
return has_monitor_operations_;
}
- // Returns whether this method may be deoptimized.
- // Currently we don't have meta data support for deoptimizing
- // a method that eliminates allocations/stores.
- bool MayDeoptimize() const {
- return may_deoptimize_;
- }
-
// Find and return the heap location index in heap_locations_.
size_t FindHeapLocationIndex(ReferenceInfo* ref_info,
size_t offset,
@@ -493,10 +506,6 @@ class HeapLocationCollector : public HGraphVisitor {
CreateReferenceInfoForReferenceType(instruction);
}
- void VisitDeoptimize(HDeoptimize* instruction ATTRIBUTE_UNUSED) OVERRIDE {
- may_deoptimize_ = true;
- }
-
void VisitMonitorOperation(HMonitorOperation* monitor ATTRIBUTE_UNUSED) OVERRIDE {
has_monitor_operations_ = true;
}
@@ -508,7 +517,6 @@ class HeapLocationCollector : public HGraphVisitor {
// alias analysis and won't be as effective.
bool has_volatile_; // If there are volatile field accesses.
bool has_monitor_operations_; // If there are monitor operations.
- bool may_deoptimize_; // Only true for HDeoptimize with single-frame deoptimization.
DISALLOW_COPY_AND_ASSIGN(HeapLocationCollector);
};
@@ -671,7 +679,7 @@ class LSEVisitor : public HGraphVisitor {
bool from_all_predecessors = true;
ReferenceInfo* ref_info = heap_location_collector_.GetHeapLocation(i)->GetReferenceInfo();
HInstruction* singleton_ref = nullptr;
- if (ref_info->IsSingletonAndNotReturned()) {
+ if (ref_info->IsSingletonAndNonEscaping()) {
// We do more analysis of liveness when merging heap values for such
// cases since stores into such references may potentially be eliminated.
singleton_ref = ref_info->GetReference();
@@ -844,8 +852,7 @@ class LSEVisitor : public HGraphVisitor {
} else if (index != nullptr) {
// For array element, don't eliminate stores since it can be easily aliased
// with non-constant index.
- } else if (!heap_location_collector_.MayDeoptimize() &&
- ref_info->IsSingletonAndNotReturned()) {
+ } else if (ref_info->IsSingletonAndNonEscaping()) {
// Store into a field of a singleton that's not returned. The value cannot be
// killed due to aliasing/invocation. It can be redundant since future loads can
// directly get the value set by this instruction. The value can still be killed due to
@@ -1019,8 +1026,7 @@ class LSEVisitor : public HGraphVisitor {
// new_instance isn't used for field accesses. No need to process it.
return;
}
- if (!heap_location_collector_.MayDeoptimize() &&
- ref_info->IsSingletonAndNotReturned() &&
+ if (ref_info->IsSingletonAndNonEscaping() &&
!new_instance->IsFinalizable() &&
!new_instance->NeedsAccessCheck()) {
singleton_new_instances_.push_back(new_instance);
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index a4847601f5..6f84cdcc4f 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -758,7 +758,7 @@ void OptimizingCompiler::RunOptimizations(HGraph* graph,
graph, stats, "dead_code_elimination$after_inlining");
HDeadCodeElimination* dce3 = new (arena) HDeadCodeElimination(
graph, stats, "dead_code_elimination$final");
- HConstantFolding* fold1 = new (arena) HConstantFolding(graph);
+ HConstantFolding* fold1 = new (arena) HConstantFolding(graph, "constant_folding");
InstructionSimplifier* simplify1 = new (arena) InstructionSimplifier(graph, stats);
HSelectGenerator* select_generator = new (arena) HSelectGenerator(graph, stats);
HConstantFolding* fold2 = new (arena) HConstantFolding(
diff --git a/dex2oat/dex2oat_test.cc b/dex2oat/dex2oat_test.cc
index 58dd047081..fa32178796 100644
--- a/dex2oat/dex2oat_test.cc
+++ b/dex2oat/dex2oat_test.cc
@@ -438,9 +438,7 @@ class Dex2oatVeryLargeTest : public Dex2oatTest {
Copy(GetDexSrc1(), dex_location);
- std::vector<std::string> copy(extra_args);
-
- GenerateOdexForTest(dex_location, odex_location, filter, copy);
+ GenerateOdexForTest(dex_location, odex_location, filter, extra_args);
CheckValidity();
ASSERT_TRUE(success_);
diff --git a/dexlayout/dexlayout.cc b/dexlayout/dexlayout.cc
index 2b30a1be08..aa806557c2 100644
--- a/dexlayout/dexlayout.cc
+++ b/dexlayout/dexlayout.cc
@@ -1527,7 +1527,7 @@ static void ProcessDexFile(const char* file_name, const DexFile* dex_file, size_
// Output dex file.
if (options_.output_dex_directory_ != nullptr) {
std::string output_location(options_.output_dex_directory_);
- size_t last_slash = dex_file->GetLocation().rfind("/");
+ size_t last_slash = dex_file->GetLocation().rfind('/');
output_location.append(dex_file->GetLocation().substr(last_slash));
DexWriter::OutputDexFile(*header, output_location.c_str());
}
diff --git a/dexlayout/dexlayout_test.cc b/dexlayout/dexlayout_test.cc
index 89544d7ef4..c7f36be905 100644
--- a/dexlayout/dexlayout_test.cc
+++ b/dexlayout/dexlayout_test.cc
@@ -37,12 +37,12 @@ class DexLayoutTest : public CommonRuntimeTest {
bool FullPlainOutputExec(std::string* error_msg) {
// TODO: dexdump2 -> dexdump ?
ScratchFile dexdump_output;
- std::string dexdump_filename = dexdump_output.GetFilename();
+ const std::string& dexdump_filename = dexdump_output.GetFilename();
std::string dexdump = GetTestAndroidRoot() + "/bin/dexdump2";
EXPECT_TRUE(OS::FileExists(dexdump.c_str())) << dexdump << " should be a valid file path";
ScratchFile dexlayout_output;
- std::string dexlayout_filename = dexlayout_output.GetFilename();
+ const std::string& dexlayout_filename = dexlayout_output.GetFilename();
std::string dexlayout = GetTestAndroidRoot() + "/bin/dexlayout";
EXPECT_TRUE(OS::FileExists(dexlayout.c_str())) << dexlayout << " should be a valid file path";
@@ -70,8 +70,8 @@ class DexLayoutTest : public CommonRuntimeTest {
// Runs DexFileOutput test.
bool DexFileOutputExec(std::string* error_msg) {
ScratchFile tmp_file;
- std::string tmp_name = tmp_file.GetFilename();
- size_t tmp_last_slash = tmp_name.rfind("/");
+ const std::string& tmp_name = tmp_file.GetFilename();
+ size_t tmp_last_slash = tmp_name.rfind('/');
std::string tmp_dir = tmp_name.substr(0, tmp_last_slash + 1);
std::string dexlayout = GetTestAndroidRoot() + "/bin/dexlayout";
EXPECT_TRUE(OS::FileExists(dexlayout.c_str())) << dexlayout << " should be a valid file path";
@@ -84,7 +84,7 @@ class DexLayoutTest : public CommonRuntimeTest {
return false;
}
- size_t dex_file_last_slash = dex_file.rfind("/");
+ size_t dex_file_last_slash = dex_file.rfind('/');
std::string dex_file_name = dex_file.substr(dex_file_last_slash + 1);
std::vector<std::string> unzip_exec_argv =
{ "/usr/bin/unzip", dex_file, "classes.dex", "-d", tmp_dir};
diff --git a/imgdiag/imgdiag.cc b/imgdiag/imgdiag.cc
index d1d127d980..a374686dc5 100644
--- a/imgdiag/imgdiag.cc
+++ b/imgdiag/imgdiag.cc
@@ -89,7 +89,7 @@ class ImgDiagDumper {
// Return suffix of the file path after the last /. (e.g. /foo/bar -> bar, bar -> bar)
static std::string BaseName(const std::string& str) {
- size_t idx = str.rfind("/");
+ size_t idx = str.rfind('/');
if (idx == std::string::npos) {
return str;
}
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
index 7ea5beab37..3c8c1a397c 100644
--- a/patchoat/patchoat.cc
+++ b/patchoat/patchoat.cc
@@ -1068,7 +1068,7 @@ static int patchoat_image(TimingLogger& timings,
TimingLogger::ScopedTiming pt("patch image and oat", &timings);
std::string output_directory =
- output_image_filename.substr(0, output_image_filename.find_last_of("/"));
+ output_image_filename.substr(0, output_image_filename.find_last_of('/'));
bool ret = PatchOat::Patch(input_image_location, base_delta, output_directory, isa, &timings);
if (kIsDebugBuild) {
diff --git a/runtime/arch/arm/entrypoints_init_arm.cc b/runtime/arch/arm/entrypoints_init_arm.cc
index cb8edffb94..01b3f349d4 100644
--- a/runtime/arch/arm/entrypoints_init_arm.cc
+++ b/runtime/arch/arm/entrypoints_init_arm.cc
@@ -30,8 +30,7 @@
namespace art {
// Cast entrypoints.
-extern "C" size_t artIsAssignableFromCode(const mirror::Class* klass,
- const mirror::Class* ref_class);
+extern "C" size_t artIsAssignableFromCode(mirror::Class* klass, mirror::Class* ref_class);
// Read barrier entrypoints.
// art_quick_read_barrier_mark_regX uses an non-standard calling
@@ -73,7 +72,7 @@ void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) {
// Cast
qpoints->pInstanceofNonTrivial = artIsAssignableFromCode;
- qpoints->pCheckCast = art_quick_check_cast;
+ qpoints->pCheckInstanceOf = art_quick_check_instance_of;
// Math
qpoints->pIdivmod = __aeabi_idivmod;
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index 0135260de8..550f8c7727 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -764,11 +764,12 @@ ENTRY art_quick_unlock_object_no_inline
END art_quick_unlock_object_no_inline
/*
- * Entry from managed code that calls artIsAssignableFromCode and on failure calls
- * artThrowClassCastException.
+ * Entry from managed code that calls artInstanceOfFromCode and on failure calls
+ * artThrowClassCastExceptionForObject.
*/
- .extern artThrowClassCastException
-ENTRY art_quick_check_cast
+ .extern artInstanceOfFromCode
+ .extern artThrowClassCastExceptionForObject
+ENTRY art_quick_check_instance_of
push {r0-r1, lr} @ save arguments, link register and pad
.cfi_adjust_cfa_offset 12
.cfi_rel_offset r0, 0
@@ -776,7 +777,7 @@ ENTRY art_quick_check_cast
.cfi_rel_offset lr, 8
sub sp, #4
.cfi_adjust_cfa_offset 4
- bl artIsAssignableFromCode
+ bl artInstanceOfFromCode
cbz r0, .Lthrow_class_cast_exception
add sp, #4
.cfi_adjust_cfa_offset -4
@@ -792,9 +793,9 @@ ENTRY art_quick_check_cast
.cfi_restore lr
SETUP_SAVE_ALL_CALLEE_SAVES_FRAME r2 @ save all registers as basis for long jump context
mov r2, r9 @ pass Thread::Current
- bl artThrowClassCastException @ (Class*, Class*, Thread*)
+ bl artThrowClassCastExceptionForObject @ (Object*, Class*, Thread*)
bkpt
-END art_quick_check_cast
+END art_quick_check_instance_of
// Restore rReg's value from [sp, #offset] if rReg is not the same as rExclude.
.macro POP_REG_NE rReg, offset, rExclude
diff --git a/runtime/arch/arm64/entrypoints_init_arm64.cc b/runtime/arch/arm64/entrypoints_init_arm64.cc
index c2078f02c1..3c77672aac 100644
--- a/runtime/arch/arm64/entrypoints_init_arm64.cc
+++ b/runtime/arch/arm64/entrypoints_init_arm64.cc
@@ -30,8 +30,7 @@
namespace art {
// Cast entrypoints.
-extern "C" size_t artIsAssignableFromCode(const mirror::Class* klass,
- const mirror::Class* ref_class);
+extern "C" size_t artIsAssignableFromCode(mirror::Class* klass, mirror::Class* ref_class);
// Read barrier entrypoints.
// art_quick_read_barrier_mark_regX uses an non-standard calling
@@ -76,7 +75,7 @@ void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) {
// Cast
qpoints->pInstanceofNonTrivial = artIsAssignableFromCode;
- qpoints->pCheckCast = art_quick_check_cast;
+ qpoints->pCheckInstanceOf = art_quick_check_instance_of;
// Math
// TODO null entrypoints not needed for ARM64 - generate inline.
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index d806715ec9..d8ebe262bc 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -1294,18 +1294,19 @@ ENTRY art_quick_unlock_object_no_inline
END art_quick_unlock_object_no_inline
/*
- * Entry from managed code that calls artIsAssignableFromCode and on failure calls
- * artThrowClassCastException.
+ * Entry from managed code that calls artInstanceOfFromCode and on failure calls
+ * artThrowClassCastExceptionForObject.
*/
- .extern artThrowClassCastException
-ENTRY art_quick_check_cast
+ .extern artInstanceOfFromCode
+ .extern artThrowClassCastExceptionForObject
+ENTRY art_quick_check_instance_of
// Store arguments and link register
// Stack needs to be 16B aligned on calls.
SAVE_TWO_REGS_INCREASE_FRAME x0, x1, 32
SAVE_REG xLR, 24
// Call runtime code
- bl artIsAssignableFromCode
+ bl artInstanceOfFromCode
// Check for exception
cbz x0, .Lthrow_class_cast_exception
@@ -1324,9 +1325,9 @@ ENTRY art_quick_check_cast
SETUP_SAVE_ALL_CALLEE_SAVES_FRAME // save all registers as basis for long jump context
mov x2, xSELF // pass Thread::Current
- bl artThrowClassCastException // (Class*, Class*, Thread*)
+ bl artThrowClassCastExceptionForObject // (Object*, Class*, Thread*)
brk 0 // We should not return here...
-END art_quick_check_cast
+END art_quick_check_instance_of
// Restore xReg's value from [sp, #offset] if xReg is not the same as xExclude.
.macro POP_REG_NE xReg, offset, xExclude
diff --git a/runtime/arch/mips/entrypoints_init_mips.cc b/runtime/arch/mips/entrypoints_init_mips.cc
index e10d4e6a74..e3230f65dd 100644
--- a/runtime/arch/mips/entrypoints_init_mips.cc
+++ b/runtime/arch/mips/entrypoints_init_mips.cc
@@ -30,8 +30,7 @@
namespace art {
// Cast entrypoints.
-extern "C" size_t artIsAssignableFromCode(const mirror::Class* klass,
- const mirror::Class* ref_class);
+extern "C" size_t artIsAssignableFromCode(mirror::Class* klass, mirror::Class* ref_class);
// Math entrypoints.
extern int32_t CmpgDouble(double a, double b);
@@ -73,8 +72,8 @@ void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) {
// Cast
qpoints->pInstanceofNonTrivial = artIsAssignableFromCode;
static_assert(IsDirectEntrypoint(kQuickInstanceofNonTrivial), "Direct C stub not marked direct.");
- qpoints->pCheckCast = art_quick_check_cast;
- static_assert(!IsDirectEntrypoint(kQuickCheckCast), "Non-direct C stub marked direct.");
+ qpoints->pCheckInstanceOf = art_quick_check_instance_of;
+ static_assert(!IsDirectEntrypoint(kQuickCheckInstanceOf), "Non-direct C stub marked direct.");
// DexCache
qpoints->pInitializeStaticStorage = art_quick_initialize_static_storage;
diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S
index c3c188233b..34e34b40ff 100644
--- a/runtime/arch/mips/quick_entrypoints_mips.S
+++ b/runtime/arch/mips/quick_entrypoints_mips.S
@@ -1171,10 +1171,11 @@ ENTRY art_quick_unlock_object_no_inline
END art_quick_unlock_object_no_inline
/*
- * Entry from managed code that calls artCheckCastFromCode and delivers exception on failure.
+ * Entry from managed code that calls artInstanceOfFromCode and delivers exception on failure.
*/
- .extern artThrowClassCastException
-ENTRY art_quick_check_cast
+ .extern artInstanceOfFromCode
+ .extern artThrowClassCastExceptionForObject
+ENTRY art_quick_check_instance_of
addiu $sp, $sp, -32
.cfi_adjust_cfa_offset 32
sw $gp, 16($sp)
@@ -1183,7 +1184,7 @@ ENTRY art_quick_check_cast
sw $t9, 8($sp)
sw $a1, 4($sp)
sw $a0, 0($sp)
- la $t9, artIsAssignableFromCode
+ la $t9, artInstanceOfFromCode
jalr $t9
addiu $sp, $sp, -16 # reserve argument slots on the stack
addiu $sp, $sp, 16
@@ -1200,10 +1201,10 @@ ENTRY art_quick_check_cast
addiu $sp, $sp, 32
.cfi_adjust_cfa_offset -32
SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
- la $t9, artThrowClassCastException
- jalr $zero, $t9 # artThrowClassCastException (Class*, Class*, Thread*)
+ la $t9, artThrowClassCastExceptionForObject
+ jalr $zero, $t9 # artThrowClassCastException (Object*, Class*, Thread*)
move $a2, rSELF # pass Thread::Current
-END art_quick_check_cast
+END art_quick_check_instance_of
/*
* Restore rReg's value from offset($sp) if rReg is not the same as rExclude.
diff --git a/runtime/arch/mips64/entrypoints_init_mips64.cc b/runtime/arch/mips64/entrypoints_init_mips64.cc
index a0379053bc..43b73f127a 100644
--- a/runtime/arch/mips64/entrypoints_init_mips64.cc
+++ b/runtime/arch/mips64/entrypoints_init_mips64.cc
@@ -30,8 +30,8 @@
namespace art {
// Cast entrypoints.
-extern "C" size_t artIsAssignableFromCode(const mirror::Class* klass,
- const mirror::Class* ref_class);
+extern "C" size_t artIsAssignableFromCode(mirror::Class* klass, mirror::Class* ref_class);
+
// Math entrypoints.
extern int32_t CmpgDouble(double a, double b);
extern int32_t CmplDouble(double a, double b);
@@ -64,7 +64,7 @@ void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) {
// Cast
qpoints->pInstanceofNonTrivial = artIsAssignableFromCode;
- qpoints->pCheckCast = art_quick_check_cast;
+ qpoints->pCheckInstanceOf = art_quick_check_instance_of;
// Math
qpoints->pCmpgDouble = CmpgDouble;
diff --git a/runtime/arch/mips64/quick_entrypoints_mips64.S b/runtime/arch/mips64/quick_entrypoints_mips64.S
index cb2d1c816b..0861d2d73e 100644
--- a/runtime/arch/mips64/quick_entrypoints_mips64.S
+++ b/runtime/arch/mips64/quick_entrypoints_mips64.S
@@ -1256,10 +1256,11 @@ ENTRY_NO_GP art_quick_unlock_object_no_inline
END art_quick_unlock_object_no_inline
/*
- * Entry from managed code that calls artCheckCastFromCode and delivers exception on failure.
+ * Entry from managed code that calls artInstanceOfFromCode and delivers exception on failure.
*/
- .extern artThrowClassCastException
-ENTRY art_quick_check_cast
+ .extern artInstanceOfFromCode
+ .extern artThrowClassCastExceptionForObject
+ENTRY art_quick_check_instance_of
daddiu $sp, $sp, -32
.cfi_adjust_cfa_offset 32
sd $ra, 24($sp)
@@ -1267,7 +1268,7 @@ ENTRY art_quick_check_cast
sd $t9, 16($sp)
sd $a1, 8($sp)
sd $a0, 0($sp)
- jal artIsAssignableFromCode
+ jal artInstanceOfFromCode
.cpreturn # Restore gp from t8 in branch delay slot.
# t8 may be clobbered in artIsAssignableFromCode.
beq $v0, $zero, .Lthrow_class_cast_exception
@@ -1283,10 +1284,10 @@ ENTRY art_quick_check_cast
.cfi_adjust_cfa_offset -32
SETUP_GP
SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
- dla $t9, artThrowClassCastException
- jalr $zero, $t9 # artThrowClassCastException (Class*, Class*, Thread*)
+ dla $t9, artThrowClassCastExceptionForObject
+ jalr $zero, $t9 # artThrowClassCastException (Object*, Class*, Thread*)
move $a2, rSELF # pass Thread::Current
-END art_quick_check_cast
+END art_quick_check_instance_of
/*
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
index c151f00289..bbf9a8b93c 100644
--- a/runtime/arch/stub_test.cc
+++ b/runtime/arch/stub_test.cc
@@ -23,6 +23,7 @@
#include "common_runtime_test.h"
#include "entrypoints/quick/quick_entrypoints_enum.h"
#include "imt_conflict_table.h"
+#include "jni_internal.h"
#include "linear_alloc.h"
#include "mirror/class-inl.h"
#include "mirror/string-inl.h"
@@ -805,7 +806,7 @@ TEST_F(StubTest, UnlockObject) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
(defined(__x86_64__) && !defined(__APPLE__))
-extern "C" void art_quick_check_cast(void);
+extern "C" void art_quick_check_instance_of(void);
#endif
TEST_F(StubTest, CheckCast) {
@@ -813,65 +814,89 @@ TEST_F(StubTest, CheckCast) {
(defined(__x86_64__) && !defined(__APPLE__))
Thread* self = Thread::Current();
- const uintptr_t art_quick_check_cast = StubTest::GetEntrypoint(self, kQuickCheckCast);
+ const uintptr_t art_quick_check_instance_of =
+ StubTest::GetEntrypoint(self, kQuickCheckInstanceOf);
// Find some classes.
ScopedObjectAccess soa(self);
// garbage is created during ClassLinker::Init
- StackHandleScope<4> hs(soa.Self());
- Handle<mirror::Class> c(
- hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/Object;")));
- Handle<mirror::Class> c2(
- hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/String;")));
- Handle<mirror::Class> list(
- hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "[Ljava/util/List;")));
- Handle<mirror::Class> array_list(
- hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "[Ljava/util/ArrayList;")));
+ VariableSizedHandleScope hs(soa.Self());
+ Handle<mirror::Class> klass_obj(
+ hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;")));
+ Handle<mirror::Class> klass_str(
+ hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/String;")));
+ Handle<mirror::Class> klass_list(
+ hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/util/List;")));
+ Handle<mirror::Class> klass_cloneable(
+ hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Cloneable;")));
+ Handle<mirror::Class> klass_array_list(
+ hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/util/ArrayList;")));
+ Handle<mirror::Object> obj(hs.NewHandle(klass_obj->AllocObject(soa.Self())));
+ Handle<mirror::String> string(hs.NewHandle(
+ mirror::String::AllocFromModifiedUtf8(soa.Self(), "ABCD")));
+ Handle<mirror::Object> array_list(hs.NewHandle(klass_array_list->AllocObject(soa.Self())));
EXPECT_FALSE(self->IsExceptionPending());
- Invoke3(reinterpret_cast<size_t>(c.Get()),
- reinterpret_cast<size_t>(c.Get()),
+ Invoke3(reinterpret_cast<size_t>(obj.Get()),
+ reinterpret_cast<size_t>(klass_obj.Get()),
0U,
- art_quick_check_cast,
+ art_quick_check_instance_of,
self);
EXPECT_FALSE(self->IsExceptionPending());
- Invoke3(reinterpret_cast<size_t>(c2.Get()),
- reinterpret_cast<size_t>(c2.Get()),
+ // Expected true: Test string instance of java.lang.String.
+ Invoke3(reinterpret_cast<size_t>(string.Get()),
+ reinterpret_cast<size_t>(klass_str.Get()),
0U,
- art_quick_check_cast,
+ art_quick_check_instance_of,
self);
EXPECT_FALSE(self->IsExceptionPending());
- Invoke3(reinterpret_cast<size_t>(c.Get()),
- reinterpret_cast<size_t>(c2.Get()),
+ // Expected true: Test string instance of java.lang.Object.
+ Invoke3(reinterpret_cast<size_t>(string.Get()),
+ reinterpret_cast<size_t>(klass_obj.Get()),
+ 0U,
+ art_quick_check_instance_of,
+ self);
+ EXPECT_FALSE(self->IsExceptionPending());
+
+ // Expected false: Test object instance of java.lang.String.
+ Invoke3(reinterpret_cast<size_t>(obj.Get()),
+ reinterpret_cast<size_t>(klass_str.Get()),
+ 0U,
+ art_quick_check_instance_of,
+ self);
+ EXPECT_TRUE(self->IsExceptionPending());
+ self->ClearException();
+
+ Invoke3(reinterpret_cast<size_t>(array_list.Get()),
+ reinterpret_cast<size_t>(klass_list.Get()),
0U,
- art_quick_check_cast,
+ art_quick_check_instance_of,
self);
EXPECT_FALSE(self->IsExceptionPending());
- Invoke3(reinterpret_cast<size_t>(list.Get()),
- reinterpret_cast<size_t>(array_list.Get()),
+ Invoke3(reinterpret_cast<size_t>(array_list.Get()),
+ reinterpret_cast<size_t>(klass_cloneable.Get()),
0U,
- art_quick_check_cast,
+ art_quick_check_instance_of,
self);
EXPECT_FALSE(self->IsExceptionPending());
- Invoke3(reinterpret_cast<size_t>(list.Get()),
- reinterpret_cast<size_t>(c2.Get()),
+ Invoke3(reinterpret_cast<size_t>(string.Get()),
+ reinterpret_cast<size_t>(klass_array_list.Get()),
0U,
- art_quick_check_cast,
+ art_quick_check_instance_of,
self);
EXPECT_TRUE(self->IsExceptionPending());
self->ClearException();
- // TODO: Make the following work. But that would require correct managed frames.
- Invoke3(reinterpret_cast<size_t>(c2.Get()),
- reinterpret_cast<size_t>(c.Get()),
+ Invoke3(reinterpret_cast<size_t>(string.Get()),
+ reinterpret_cast<size_t>(klass_cloneable.Get()),
0U,
- art_quick_check_cast,
+ art_quick_check_instance_of,
self);
EXPECT_TRUE(self->IsExceptionPending());
self->ClearException();
@@ -1990,7 +2015,7 @@ TEST_F(StubTest, DISABLED_IMT) {
ASSERT_NE(nullptr, add_jmethod);
// Get representation.
- ArtMethod* contains_amethod = soa.DecodeMethod(contains_jmethod);
+ ArtMethod* contains_amethod = jni::DecodeArtMethod(contains_jmethod);
// Patch up ArrayList.contains.
if (contains_amethod->GetEntryPointFromQuickCompiledCode() == nullptr) {
@@ -2008,7 +2033,7 @@ TEST_F(StubTest, DISABLED_IMT) {
ASSERT_NE(nullptr, inf_contains_jmethod);
// Get mirror representation.
- ArtMethod* inf_contains = soa.DecodeMethod(inf_contains_jmethod);
+ ArtMethod* inf_contains = jni::DecodeArtMethod(inf_contains_jmethod);
// Object
diff --git a/runtime/arch/x86/entrypoints_init_x86.cc b/runtime/arch/x86/entrypoints_init_x86.cc
index 0a10a3cceb..877df8f7b0 100644
--- a/runtime/arch/x86/entrypoints_init_x86.cc
+++ b/runtime/arch/x86/entrypoints_init_x86.cc
@@ -27,8 +27,7 @@
namespace art {
// Cast entrypoints.
-extern "C" size_t art_quick_is_assignable(const mirror::Class* klass,
- const mirror::Class* ref_class);
+extern "C" size_t art_quick_is_assignable(mirror::Class* klass, mirror::Class* ref_class);
// Read barrier entrypoints.
// art_quick_read_barrier_mark_regX uses an non-standard calling
@@ -50,7 +49,7 @@ void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) {
// Cast
qpoints->pInstanceofNonTrivial = art_quick_is_assignable;
- qpoints->pCheckCast = art_quick_check_cast;
+ qpoints->pCheckInstanceOf = art_quick_check_instance_of;
// More math.
qpoints->pCos = cos;
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index 98739d3cc8..635bfa3c2c 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -1361,11 +1361,11 @@ DEFINE_FUNCTION art_quick_is_assignable
ret
END_FUNCTION art_quick_is_assignable
-DEFINE_FUNCTION art_quick_check_cast
+DEFINE_FUNCTION art_quick_check_instance_of
PUSH eax // alignment padding
- PUSH ecx // pass arg2 - obj->klass
- PUSH eax // pass arg1 - checked class
- call SYMBOL(artIsAssignableFromCode) // (Class* klass, Class* ref_klass)
+ PUSH ecx // pass arg2 - checked class
+ PUSH eax // pass arg1 - obj
+ call SYMBOL(artInstanceOfFromCode) // (Object* obj, Class* ref_klass)
testl %eax, %eax
jz 1f // jump forward if not assignable
addl LITERAL(12), %esp // pop arguments
@@ -1385,9 +1385,9 @@ DEFINE_FUNCTION art_quick_check_cast
CFI_ADJUST_CFA_OFFSET(4)
PUSH ecx // pass arg2
PUSH eax // pass arg1
- call SYMBOL(artThrowClassCastException) // (Class* a, Class* b, Thread*)
+ call SYMBOL(artThrowClassCastExceptionForObject) // (Object* src, Class* dest, Thread*)
UNREACHABLE
-END_FUNCTION art_quick_check_cast
+END_FUNCTION art_quick_check_instance_of
// Restore reg's value if reg is not the same as exclude_reg, otherwise just adjust stack.
MACRO2(POP_REG_NE, reg, exclude_reg)
diff --git a/runtime/arch/x86_64/entrypoints_init_x86_64.cc b/runtime/arch/x86_64/entrypoints_init_x86_64.cc
index 8c425d53d3..59c9dfeb6f 100644
--- a/runtime/arch/x86_64/entrypoints_init_x86_64.cc
+++ b/runtime/arch/x86_64/entrypoints_init_x86_64.cc
@@ -30,8 +30,7 @@
namespace art {
// Cast entrypoints.
-extern "C" size_t art_quick_assignable_from_code(const mirror::Class* klass,
- const mirror::Class* ref_class);
+extern "C" size_t art_quick_assignable_from_code(mirror::Class* klass, mirror::Class* ref_class);
// Read barrier entrypoints.
// art_quick_read_barrier_mark_regX uses an non-standard calling
@@ -65,7 +64,7 @@ void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) {
// Cast
qpoints->pInstanceofNonTrivial = art_quick_assignable_from_code;
- qpoints->pCheckCast = art_quick_check_cast;
+ qpoints->pCheckInstanceOf = art_quick_check_instance_of;
// More math.
qpoints->pCos = cos;
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index 185e55e114..72a03ebcbe 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -1480,14 +1480,14 @@ DEFINE_FUNCTION art_quick_unlock_object_no_inline
RETURN_IF_EAX_ZERO
END_FUNCTION art_quick_unlock_object_no_inline
-DEFINE_FUNCTION art_quick_check_cast
+DEFINE_FUNCTION art_quick_check_instance_of
// We could check the super classes here but that is usually already checked in the caller.
PUSH rdi // Save args for exc
PUSH rsi
subq LITERAL(8), %rsp // Alignment padding.
CFI_ADJUST_CFA_OFFSET(8)
SETUP_FP_CALLEE_SAVE_FRAME
- call SYMBOL(artIsAssignableFromCode) // (Class* klass, Class* ref_klass)
+ call SYMBOL(artInstanceOfFromCode) // (Object* obj, Class* ref_klass)
testq %rax, %rax
jz 1f // jump forward if not assignable
RESTORE_FP_CALLEE_SAVE_FRAME
@@ -1506,9 +1506,9 @@ DEFINE_FUNCTION art_quick_check_cast
POP rdi
SETUP_SAVE_ALL_CALLEE_SAVES_FRAME // save all registers as basis for long jump context
mov %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current()
- call SYMBOL(artThrowClassCastException) // (Class* a, Class* b, Thread*)
+ call SYMBOL(artThrowClassCastExceptionForObject) // (Object* src, Class* dest, Thread*)
UNREACHABLE
-END_FUNCTION art_quick_check_cast
+END_FUNCTION art_quick_check_instance_of
// Restore reg's value if reg is not the same as exclude_reg, otherwise just adjust stack.
diff --git a/runtime/base/variant_map_test.cc b/runtime/base/variant_map_test.cc
index ccb22eb64d..93336e0ac3 100644
--- a/runtime/base/variant_map_test.cc
+++ b/runtime/base/variant_map_test.cc
@@ -107,8 +107,8 @@ TEST(VariantMaps, RuleOfFive) {
fmFilled.Set(FruitMap::Orange, 555.0);
EXPECT_EQ(size_t(2), fmFilled.Size());
- // Test copy constructor
- FruitMap fmEmptyCopy(fmEmpty);
+ // Test copy constructor (NOLINT as a reference is suggested, instead)
+ FruitMap fmEmptyCopy(fmEmpty); // NOLINT
EXPECT_EQ(size_t(0), fmEmptyCopy.Size());
// Test copy constructor
diff --git a/runtime/check_jni.cc b/runtime/check_jni.cc
index 5399dc5206..6c27bc61e4 100644
--- a/runtime/check_jni.cc
+++ b/runtime/check_jni.cc
@@ -282,7 +282,7 @@ class ScopedCheck {
return false;
}
- ArtField* f = CheckFieldID(soa, fid);
+ ArtField* f = CheckFieldID(fid);
if (f == nullptr) {
return false;
}
@@ -313,7 +313,7 @@ class ScopedCheck {
bool CheckMethodAndSig(ScopedObjectAccess& soa, jobject jobj, jclass jc,
jmethodID mid, Primitive::Type type, InvokeType invoke)
REQUIRES_SHARED(Locks::mutator_lock_) {
- ArtMethod* m = CheckMethodID(soa, mid);
+ ArtMethod* m = CheckMethodID(mid);
if (m == nullptr) {
return false;
}
@@ -362,7 +362,7 @@ class ScopedCheck {
bool CheckStaticFieldID(ScopedObjectAccess& soa, jclass java_class, jfieldID fid)
REQUIRES_SHARED(Locks::mutator_lock_) {
ObjPtr<mirror::Class> c = soa.Decode<mirror::Class>(java_class);
- ArtField* f = CheckFieldID(soa, fid);
+ ArtField* f = CheckFieldID(fid);
if (f == nullptr) {
return false;
}
@@ -385,7 +385,7 @@ class ScopedCheck {
*/
bool CheckStaticMethod(ScopedObjectAccess& soa, jclass java_class, jmethodID mid)
REQUIRES_SHARED(Locks::mutator_lock_) {
- ArtMethod* m = CheckMethodID(soa, mid);
+ ArtMethod* m = CheckMethodID(mid);
if (m == nullptr) {
return false;
}
@@ -407,7 +407,7 @@ class ScopedCheck {
*/
bool CheckVirtualMethod(ScopedObjectAccess& soa, jobject java_object, jmethodID mid)
REQUIRES_SHARED(Locks::mutator_lock_) {
- ArtMethod* m = CheckMethodID(soa, mid);
+ ArtMethod* m = CheckMethodID(mid);
if (m == nullptr) {
return false;
}
@@ -577,9 +577,8 @@ class ScopedCheck {
return true;
}
- bool CheckConstructor(ScopedObjectAccess& soa, jmethodID mid)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- ArtMethod* method = soa.DecodeMethod(mid);
+ bool CheckConstructor(jmethodID mid) REQUIRES_SHARED(Locks::mutator_lock_) {
+ ArtMethod* method = jni::DecodeArtMethod(mid);
if (method == nullptr) {
AbortF("expected non-null constructor");
return false;
@@ -682,7 +681,7 @@ class ScopedCheck {
if (!is_static && !CheckInstanceFieldID(soa, obj, fid)) {
return false;
}
- ArtField* field = soa.DecodeField(fid);
+ ArtField* field = jni::DecodeArtField(fid);
DCHECK(field != nullptr); // Already checked by Check.
if (is_static != field->IsStatic()) {
AbortF("attempt to access %s field %s: %p",
@@ -844,9 +843,9 @@ class ScopedCheck {
case 'c': // jclass
return CheckInstance(soa, kClass, arg.c, false);
case 'f': // jfieldID
- return CheckFieldID(soa, arg.f) != nullptr;
+ return CheckFieldID(arg.f) != nullptr;
case 'm': // jmethodID
- return CheckMethodID(soa, arg.m) != nullptr;
+ return CheckMethodID(arg.m) != nullptr;
case 'r': // release int
return CheckReleaseMode(arg.r);
case 's': // jstring
@@ -868,7 +867,7 @@ class ScopedCheck {
REQUIRES_SHARED(Locks::mutator_lock_) {
CHECK(args_p != nullptr);
VarArgs args(args_p->Clone());
- ArtMethod* m = CheckMethodID(soa, args.GetMethodID());
+ ArtMethod* m = CheckMethodID(args.GetMethodID());
if (m == nullptr) {
return false;
}
@@ -962,7 +961,7 @@ class ScopedCheck {
}
case 'f': { // jfieldID
jfieldID fid = arg.f;
- ArtField* f = soa.DecodeField(fid);
+ ArtField* f = jni::DecodeArtField(fid);
*msg += ArtField::PrettyField(f);
if (!entry) {
StringAppendF(msg, " (%p)", fid);
@@ -971,7 +970,7 @@ class ScopedCheck {
}
case 'm': { // jmethodID
jmethodID mid = arg.m;
- ArtMethod* m = soa.DecodeMethod(mid);
+ ArtMethod* m = jni::DecodeArtMethod(mid);
*msg += ArtMethod::PrettyMethod(m);
if (!entry) {
StringAppendF(msg, " (%p)", mid);
@@ -981,7 +980,7 @@ class ScopedCheck {
case '.': {
const VarArgs* va = arg.va;
VarArgs args(va->Clone());
- ArtMethod* m = soa.DecodeMethod(args.GetMethodID());
+ ArtMethod* m = jni::DecodeArtMethod(args.GetMethodID());
uint32_t len;
const char* shorty = m->GetShorty(&len);
CHECK_GE(len, 1u);
@@ -1147,13 +1146,12 @@ class ScopedCheck {
return true;
}
- ArtField* CheckFieldID(ScopedObjectAccess& soa, jfieldID fid)
- REQUIRES_SHARED(Locks::mutator_lock_) {
+ ArtField* CheckFieldID(jfieldID fid) REQUIRES_SHARED(Locks::mutator_lock_) {
if (fid == nullptr) {
AbortF("jfieldID was NULL");
return nullptr;
}
- ArtField* f = soa.DecodeField(fid);
+ ArtField* f = jni::DecodeArtField(fid);
// TODO: Better check here.
if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(f->GetDeclaringClass().Ptr())) {
Runtime::Current()->GetHeap()->DumpSpaces(LOG_STREAM(ERROR));
@@ -1163,13 +1161,12 @@ class ScopedCheck {
return f;
}
- ArtMethod* CheckMethodID(ScopedObjectAccess& soa, jmethodID mid)
- REQUIRES_SHARED(Locks::mutator_lock_) {
+ ArtMethod* CheckMethodID(jmethodID mid) REQUIRES_SHARED(Locks::mutator_lock_) {
if (mid == nullptr) {
AbortF("jmethodID was NULL");
return nullptr;
}
- ArtMethod* m = soa.DecodeMethod(mid);
+ ArtMethod* m = jni::DecodeArtMethod(mid);
// TODO: Better check here.
if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(m->GetDeclaringClass())) {
Runtime::Current()->GetHeap()->DumpSpaces(LOG_STREAM(ERROR));
@@ -2005,7 +2002,7 @@ class CheckJNI {
VarArgs rest(mid, vargs);
JniValueType args[4] = {{.E = env}, {.c = c}, {.m = mid}, {.va = &rest}};
if (sc.Check(soa, true, "Ecm.", args) && sc.CheckInstantiableNonArray(soa, c) &&
- sc.CheckConstructor(soa, mid)) {
+ sc.CheckConstructor(mid)) {
JniValueType result;
result.L = baseEnv(env)->NewObjectV(env, c, mid, vargs);
if (sc.Check(soa, false, "L", &result)) {
@@ -2029,7 +2026,7 @@ class CheckJNI {
VarArgs rest(mid, vargs);
JniValueType args[4] = {{.E = env}, {.c = c}, {.m = mid}, {.va = &rest}};
if (sc.Check(soa, true, "Ecm.", args) && sc.CheckInstantiableNonArray(soa, c) &&
- sc.CheckConstructor(soa, mid)) {
+ sc.CheckConstructor(mid)) {
JniValueType result;
result.L = baseEnv(env)->NewObjectA(env, c, mid, vargs);
if (sc.Check(soa, false, "L", &result)) {
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index e7e5be7b75..6d45dad28f 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -63,6 +63,7 @@
#include "jit/jit.h"
#include "jit/jit_code_cache.h"
#include "jit/offline_profiling_info.h"
+#include "jni_internal.h"
#include "leb128.h"
#include "linear_alloc.h"
#include "mirror/class.h"
@@ -1124,13 +1125,12 @@ bool ClassLinker::IsBootClassLoader(ScopedObjectAccessAlreadyRunnable& soa,
class_loader->GetClass();
}
-static mirror::String* GetDexPathListElementName(ScopedObjectAccessUnchecked& soa,
- ObjPtr<mirror::Object> element)
+static mirror::String* GetDexPathListElementName(ObjPtr<mirror::Object> element)
REQUIRES_SHARED(Locks::mutator_lock_) {
ArtField* const dex_file_field =
- soa.DecodeField(WellKnownClasses::dalvik_system_DexPathList__Element_dexFile);
+ jni::DecodeArtField(WellKnownClasses::dalvik_system_DexPathList__Element_dexFile);
ArtField* const dex_file_name_field =
- soa.DecodeField(WellKnownClasses::dalvik_system_DexFile_fileName);
+ jni::DecodeArtField(WellKnownClasses::dalvik_system_DexFile_fileName);
DCHECK(dex_file_field != nullptr);
DCHECK(dex_file_name_field != nullptr);
DCHECK(element != nullptr);
@@ -1154,9 +1154,9 @@ static bool FlattenPathClassLoader(ObjPtr<mirror::ClassLoader> class_loader,
DCHECK(error_msg != nullptr);
ScopedObjectAccessUnchecked soa(Thread::Current());
ArtField* const dex_path_list_field =
- soa.DecodeField(WellKnownClasses::dalvik_system_BaseDexClassLoader_pathList);
+ jni::DecodeArtField(WellKnownClasses::dalvik_system_BaseDexClassLoader_pathList);
ArtField* const dex_elements_field =
- soa.DecodeField(WellKnownClasses::dalvik_system_DexPathList_dexElements);
+ jni::DecodeArtField(WellKnownClasses::dalvik_system_DexPathList_dexElements);
CHECK(dex_path_list_field != nullptr);
CHECK(dex_elements_field != nullptr);
while (!ClassLinker::IsBootClassLoader(soa, class_loader)) {
@@ -1183,7 +1183,7 @@ static bool FlattenPathClassLoader(ObjPtr<mirror::ClassLoader> class_loader,
*error_msg = StringPrintf("Null dex element at index %d", i);
return false;
}
- ObjPtr<mirror::String> const name = GetDexPathListElementName(soa, element);
+ ObjPtr<mirror::String> const name = GetDexPathListElementName(element);
if (name == nullptr) {
*error_msg = StringPrintf("Null name for dex element at index %d", i);
return false;
@@ -1733,7 +1733,7 @@ bool ClassLinker::AddImageSpace(
ObjPtr<mirror::Object> element = elements->GetWithoutChecks(i);
if (element != nullptr) {
// If we are somewhere in the middle of the array, there may be nulls at the end.
- loader_dex_file_names.push_back(GetDexPathListElementName(soa, element));
+ loader_dex_file_names.push_back(GetDexPathListElementName(element));
}
}
// Ignore the number of image dex files since we are adding those to the class loader anyways.
@@ -2425,16 +2425,17 @@ bool ClassLinker::FindClassInBaseDexClassLoader(ScopedObjectAccessAlreadyRunnabl
// Handle as if this is the child PathClassLoader.
// The class loader is a PathClassLoader which inherits from BaseDexClassLoader.
// We need to get the DexPathList and loop through it.
- ArtField* const cookie_field = soa.DecodeField(WellKnownClasses::dalvik_system_DexFile_cookie);
+ ArtField* const cookie_field =
+ jni::DecodeArtField(WellKnownClasses::dalvik_system_DexFile_cookie);
ArtField* const dex_file_field =
- soa.DecodeField(WellKnownClasses::dalvik_system_DexPathList__Element_dexFile);
+ jni::DecodeArtField(WellKnownClasses::dalvik_system_DexPathList__Element_dexFile);
ObjPtr<mirror::Object> dex_path_list =
- soa.DecodeField(WellKnownClasses::dalvik_system_BaseDexClassLoader_pathList)->
- GetObject(class_loader.Get());
+ jni::DecodeArtField(WellKnownClasses::dalvik_system_BaseDexClassLoader_pathList)->
+ GetObject(class_loader.Get());
if (dex_path_list != nullptr && dex_file_field != nullptr && cookie_field != nullptr) {
// DexPathList has an array dexElements of Elements[] which each contain a dex file.
ObjPtr<mirror::Object> dex_elements_obj =
- soa.DecodeField(WellKnownClasses::dalvik_system_DexPathList_dexElements)->
+ jni::DecodeArtField(WellKnownClasses::dalvik_system_DexPathList_dexElements)->
GetObject(dex_path_list);
// Loop through each dalvik.system.DexPathList$Element's dalvik.system.DexFile and look
// at the mCookie which is a DexFile vector.
@@ -8137,7 +8138,7 @@ jobject ClassLinker::CreatePathClassLoader(Thread* self,
StackHandleScope<11> hs(self);
ArtField* dex_elements_field =
- soa.DecodeField(WellKnownClasses::dalvik_system_DexPathList_dexElements);
+ jni::DecodeArtField(WellKnownClasses::dalvik_system_DexPathList_dexElements);
Handle<mirror::Class> dex_elements_class(hs.NewHandle(dex_elements_field->GetType<true>()));
DCHECK(dex_elements_class.Get() != nullptr);
@@ -8150,13 +8151,13 @@ jobject ClassLinker::CreatePathClassLoader(Thread* self,
hs.NewHandle(dex_elements_class->GetComponentType());
ArtField* element_file_field =
- soa.DecodeField(WellKnownClasses::dalvik_system_DexPathList__Element_dexFile);
+ jni::DecodeArtField(WellKnownClasses::dalvik_system_DexPathList__Element_dexFile);
DCHECK_EQ(h_dex_element_class.Get(), element_file_field->GetDeclaringClass());
- ArtField* cookie_field = soa.DecodeField(WellKnownClasses::dalvik_system_DexFile_cookie);
+ ArtField* cookie_field = jni::DecodeArtField(WellKnownClasses::dalvik_system_DexFile_cookie);
DCHECK_EQ(cookie_field->GetDeclaringClass(), element_file_field->GetType<false>());
- ArtField* file_name_field = soa.DecodeField(WellKnownClasses::dalvik_system_DexFile_fileName);
+ ArtField* file_name_field = jni::DecodeArtField(WellKnownClasses::dalvik_system_DexFile_fileName);
DCHECK_EQ(file_name_field->GetDeclaringClass(), element_file_field->GetType<false>());
// Fill the elements array.
@@ -8206,7 +8207,7 @@ jobject ClassLinker::CreatePathClassLoader(Thread* self,
DCHECK(h_path_class_loader.Get() != nullptr);
// Set DexPathList.
ArtField* path_list_field =
- soa.DecodeField(WellKnownClasses::dalvik_system_BaseDexClassLoader_pathList);
+ jni::DecodeArtField(WellKnownClasses::dalvik_system_BaseDexClassLoader_pathList);
DCHECK(path_list_field != nullptr);
path_list_field->SetObject<false>(h_path_class_loader.Get(), h_dex_path_list.Get());
diff --git a/runtime/common_runtime_test.cc b/runtime/common_runtime_test.cc
index 3409938c50..8226e6049e 100644
--- a/runtime/common_runtime_test.cc
+++ b/runtime/common_runtime_test.cc
@@ -520,17 +520,17 @@ std::vector<const DexFile*> CommonRuntimeTestImpl::GetDexFiles(jobject jclass_lo
// The class loader is a PathClassLoader which inherits from BaseDexClassLoader.
// We need to get the DexPathList and loop through it.
- ArtField* cookie_field = soa.DecodeField(WellKnownClasses::dalvik_system_DexFile_cookie);
+ ArtField* cookie_field = jni::DecodeArtField(WellKnownClasses::dalvik_system_DexFile_cookie);
ArtField* dex_file_field =
- soa.DecodeField(WellKnownClasses::dalvik_system_DexPathList__Element_dexFile);
+ jni::DecodeArtField(WellKnownClasses::dalvik_system_DexPathList__Element_dexFile);
ObjPtr<mirror::Object> dex_path_list =
- soa.DecodeField(WellKnownClasses::dalvik_system_BaseDexClassLoader_pathList)->
- GetObject(class_loader.Get());
+ jni::DecodeArtField(WellKnownClasses::dalvik_system_BaseDexClassLoader_pathList)->
+ GetObject(class_loader.Get());
if (dex_path_list != nullptr && dex_file_field!= nullptr && cookie_field != nullptr) {
// DexPathList has an array dexElements of Elements[] which each contain a dex file.
ObjPtr<mirror::Object> dex_elements_obj =
- soa.DecodeField(WellKnownClasses::dalvik_system_DexPathList_dexElements)->
- GetObject(dex_path_list);
+ jni::DecodeArtField(WellKnownClasses::dalvik_system_DexPathList_dexElements)->
+ GetObject(dex_path_list);
// Loop through each dalvik.system.DexPathList$Element's dalvik.system.DexFile and look
// at the mCookie which is a DexFile vector.
if (dex_elements_obj != nullptr) {
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 1da888e4b7..dc2ae2e215 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -39,6 +39,7 @@
#include "handle_scope.h"
#include "jdwp/jdwp_priv.h"
#include "jdwp/object_registry.h"
+#include "jni_internal.h"
#include "jvalue-inl.h"
#include "mirror/class.h"
#include "mirror/class-inl.h"
@@ -2007,7 +2008,7 @@ JDWP::JdwpError Dbg::GetThreadName(JDWP::ObjectId thread_id, std::string* name)
mirror::Object* thread_object = gRegistry->Get<mirror::Object*>(thread_id, &error);
CHECK(thread_object != nullptr) << error;
ArtField* java_lang_Thread_name_field =
- soa.DecodeField(WellKnownClasses::java_lang_Thread_name);
+ jni::DecodeArtField(WellKnownClasses::java_lang_Thread_name);
ObjPtr<mirror::String> s(java_lang_Thread_name_field->GetObject(thread_object)->AsString());
if (s != nullptr) {
*name = s->ToModifiedUtf8();
@@ -2032,7 +2033,7 @@ JDWP::JdwpError Dbg::GetThreadGroup(JDWP::ObjectId thread_id, JDWP::ExpandBuf* p
} else if (error == JDWP::ERR_NONE) {
ObjPtr<mirror::Class> c = soa.Decode<mirror::Class>(WellKnownClasses::java_lang_Thread);
CHECK(c != nullptr);
- ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_group);
+ ArtField* f = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_group);
CHECK(f != nullptr);
ObjPtr<mirror::Object> group = f->GetObject(thread_object);
CHECK(group != nullptr);
@@ -2074,7 +2075,7 @@ JDWP::JdwpError Dbg::GetThreadGroupName(JDWP::ObjectId thread_group_id, JDWP::Ex
return error;
}
ScopedAssertNoThreadSuspension ants("Debugger: GetThreadGroupName");
- ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_name);
+ ArtField* f = jni::DecodeArtField(WellKnownClasses::java_lang_ThreadGroup_name);
CHECK(f != nullptr);
ObjPtr<mirror::String> s = f->GetObject(thread_group)->AsString();
@@ -2093,7 +2094,7 @@ JDWP::JdwpError Dbg::GetThreadGroupParent(JDWP::ObjectId thread_group_id, JDWP::
ObjPtr<mirror::Object> parent;
{
ScopedAssertNoThreadSuspension ants("Debugger: GetThreadGroupParent");
- ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_parent);
+ ArtField* f = jni::DecodeArtField(WellKnownClasses::java_lang_ThreadGroup_parent);
CHECK(f != nullptr);
parent = f->GetObject(thread_group);
}
@@ -2102,13 +2103,13 @@ JDWP::JdwpError Dbg::GetThreadGroupParent(JDWP::ObjectId thread_group_id, JDWP::
return JDWP::ERR_NONE;
}
-static void GetChildThreadGroups(ScopedObjectAccessUnchecked& soa, mirror::Object* thread_group,
+static void GetChildThreadGroups(mirror::Object* thread_group,
std::vector<JDWP::ObjectId>* child_thread_group_ids)
REQUIRES_SHARED(Locks::mutator_lock_) {
CHECK(thread_group != nullptr);
// Get the int "ngroups" count of this thread group...
- ArtField* ngroups_field = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_ngroups);
+ ArtField* ngroups_field = jni::DecodeArtField(WellKnownClasses::java_lang_ThreadGroup_ngroups);
CHECK(ngroups_field != nullptr);
const int32_t size = ngroups_field->GetInt(thread_group);
if (size == 0) {
@@ -2116,7 +2117,7 @@ static void GetChildThreadGroups(ScopedObjectAccessUnchecked& soa, mirror::Objec
}
// Get the ThreadGroup[] "groups" out of this thread group...
- ArtField* groups_field = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_groups);
+ ArtField* groups_field = jni::DecodeArtField(WellKnownClasses::java_lang_ThreadGroup_groups);
ObjPtr<mirror::Object> groups_array = groups_field->GetObject(thread_group);
CHECK(groups_array != nullptr);
@@ -2154,7 +2155,7 @@ JDWP::JdwpError Dbg::GetThreadGroupChildren(JDWP::ObjectId thread_group_id,
// Add child thread groups.
{
std::vector<JDWP::ObjectId> child_thread_groups_ids;
- GetChildThreadGroups(soa, thread_group, &child_thread_groups_ids);
+ GetChildThreadGroups(thread_group, &child_thread_groups_ids);
expandBufAdd4BE(pReply, child_thread_groups_ids.size());
for (JDWP::ObjectId child_thread_group_id : child_thread_groups_ids) {
expandBufAddObjectId(pReply, child_thread_group_id);
@@ -2166,7 +2167,7 @@ JDWP::JdwpError Dbg::GetThreadGroupChildren(JDWP::ObjectId thread_group_id,
JDWP::ObjectId Dbg::GetSystemThreadGroupId() {
ScopedObjectAccessUnchecked soa(Thread::Current());
- ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_systemThreadGroup);
+ ArtField* f = jni::DecodeArtField(WellKnownClasses::java_lang_ThreadGroup_systemThreadGroup);
ObjPtr<mirror::Object> group = f->GetObject(f->GetDeclaringClass());
return gRegistry->Add(group);
}
@@ -2256,14 +2257,13 @@ JDWP::JdwpError Dbg::Interrupt(JDWP::ObjectId thread_id) {
return JDWP::ERR_NONE;
}
-static bool IsInDesiredThreadGroup(ScopedObjectAccessUnchecked& soa,
- mirror::Object* desired_thread_group, mirror::Object* peer)
+static bool IsInDesiredThreadGroup(mirror::Object* desired_thread_group, mirror::Object* peer)
REQUIRES_SHARED(Locks::mutator_lock_) {
// Do we want threads from all thread groups?
if (desired_thread_group == nullptr) {
return true;
}
- ArtField* thread_group_field = soa.DecodeField(WellKnownClasses::java_lang_Thread_group);
+ ArtField* thread_group_field = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_group);
DCHECK(thread_group_field != nullptr);
ObjPtr<mirror::Object> group = thread_group_field->GetObject(peer);
return (group == desired_thread_group);
@@ -2296,7 +2296,7 @@ void Dbg::GetThreads(mirror::Object* thread_group, std::vector<JDWP::ObjectId>*
// Doing so might help us report ZOMBIE threads too.
continue;
}
- if (IsInDesiredThreadGroup(soa, thread_group, peer)) {
+ if (IsInDesiredThreadGroup(thread_group, peer)) {
thread_ids->push_back(gRegistry->Add(peer));
}
}
@@ -4093,7 +4093,7 @@ void Dbg::ExecuteMethodWithoutPendingException(ScopedObjectAccess& soa, DebugInv
// Invoke the method.
ScopedLocalRef<jobject> ref(soa.Env(), soa.AddLocalReference<jobject>(pReq->receiver.Read()));
- JValue result = InvokeWithJValues(soa, ref.get(), soa.EncodeMethod(m),
+ JValue result = InvokeWithJValues(soa, ref.get(), jni::EncodeArtMethod(m),
reinterpret_cast<jvalue*>(pReq->arg_values.get()));
// Prepare JDWP ids for the reply.
@@ -4371,7 +4371,7 @@ void Dbg::DdmSendThreadNotification(Thread* t, uint32_t type) {
CHECK(type == CHUNK_TYPE("THCR") || type == CHUNK_TYPE("THNM")) << type;
ScopedObjectAccessUnchecked soa(Thread::Current());
StackHandleScope<1> hs(soa.Self());
- Handle<mirror::String> name(hs.NewHandle(t->GetThreadName(soa)));
+ Handle<mirror::String> name(hs.NewHandle(t->GetThreadName()));
size_t char_count = (name.Get() != nullptr) ? name->GetLength() : 0;
const jchar* chars = (name.Get() != nullptr) ? name->GetValue() : nullptr;
bool is_compressed = (name.Get() != nullptr) ? name->IsCompressed() : false;
@@ -5117,13 +5117,11 @@ jbyteArray Dbg::GetRecentAllocations() {
}
ArtMethod* DeoptimizationRequest::Method() const {
- ScopedObjectAccessUnchecked soa(Thread::Current());
- return soa.DecodeMethod(method_);
+ return jni::DecodeArtMethod(method_);
}
void DeoptimizationRequest::SetMethod(ArtMethod* m) {
- ScopedObjectAccessUnchecked soa(Thread::Current());
- method_ = soa.EncodeMethod(m);
+ method_ = jni::EncodeArtMethod(m);
}
void Dbg::VisitRoots(RootVisitor* visitor) {
diff --git a/runtime/dex_file_annotations.cc b/runtime/dex_file_annotations.cc
index 0765465db4..835f456c9b 100644
--- a/runtime/dex_file_annotations.cc
+++ b/runtime/dex_file_annotations.cc
@@ -22,6 +22,7 @@
#include "art_method-inl.h"
#include "class_linker-inl.h"
#include "dex_file-inl.h"
+#include "jni_internal.h"
#include "jvalue-inl.h"
#include "mirror/field.h"
#include "mirror/method.h"
@@ -281,7 +282,7 @@ mirror::Object* ProcessEncodedAnnotation(Handle<mirror::Class> klass, const uint
JValue result;
ArtMethod* create_annotation_method =
- soa.DecodeMethod(WellKnownClasses::libcore_reflect_AnnotationFactory_createAnnotation);
+ jni::DecodeArtMethod(WellKnownClasses::libcore_reflect_AnnotationFactory_createAnnotation);
uint32_t args[2] = { static_cast<uint32_t>(reinterpret_cast<uintptr_t>(annotation_class.Get())),
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(h_element_array.Get())) };
create_annotation_method->Invoke(self, args, sizeof(args), &result, "LLL");
@@ -633,7 +634,7 @@ mirror::Object* CreateAnnotationMember(Handle<mirror::Class> klass,
JValue result;
ArtMethod* annotation_member_init =
- soa.DecodeMethod(WellKnownClasses::libcore_reflect_AnnotationMember_init);
+ jni::DecodeArtMethod(WellKnownClasses::libcore_reflect_AnnotationMember_init);
uint32_t args[5] = { static_cast<uint32_t>(reinterpret_cast<uintptr_t>(new_member.Get())),
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(string_name.Get())),
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(value_object.Get())),
diff --git a/runtime/dex_file_verifier_test.cc b/runtime/dex_file_verifier_test.cc
index e39287018a..3801c228c0 100644
--- a/runtime/dex_file_verifier_test.cc
+++ b/runtime/dex_file_verifier_test.cc
@@ -58,7 +58,7 @@ class DexFileVerifierTest : public CommonRuntimeTest {
void VerifyModification(const char* dex_file_base64_content,
const char* location,
- std::function<void(DexFile*)> f,
+ const std::function<void(DexFile*)>& f,
const char* expected_error) {
size_t length;
std::unique_ptr<uint8_t[]> dex_bytes(DecodeBase64(dex_file_base64_content, &length));
diff --git a/runtime/entrypoints/quick/quick_cast_entrypoints.cc b/runtime/entrypoints/quick/quick_cast_entrypoints.cc
index 2732d687b5..083d5786ce 100644
--- a/runtime/entrypoints/quick/quick_cast_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_cast_entrypoints.cc
@@ -27,4 +27,12 @@ extern "C" size_t artIsAssignableFromCode(mirror::Class* klass, mirror::Class* r
return klass->IsAssignableFrom(ref_class) ? 1 : 0;
}
+// Is assignable test for code, won't throw. Null and equality test already performed.
+extern "C" size_t artInstanceOfFromCode(mirror::Object* obj, mirror::Class* ref_class)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ DCHECK(obj != nullptr);
+ DCHECK(ref_class != nullptr);
+ return obj->InstanceOf(ref_class) ? 1 : 0;
+}
+
} // namespace art
diff --git a/runtime/entrypoints/quick/quick_default_externs.h b/runtime/entrypoints/quick/quick_default_externs.h
index cfa5325e45..64030f36bc 100644
--- a/runtime/entrypoints/quick/quick_default_externs.h
+++ b/runtime/entrypoints/quick/quick_default_externs.h
@@ -31,7 +31,7 @@ class ArtMethod;
// These are extern declarations of assembly stubs with common names.
// Cast entrypoints.
-extern "C" void art_quick_check_cast(const art::mirror::Class*, const art::mirror::Class*);
+extern "C" void art_quick_check_instance_of(art::mirror::Object*, art::mirror::Class*);
// DexCache entrypoints.
extern "C" void* art_quick_initialize_static_storage(uint32_t);
diff --git a/runtime/entrypoints/quick/quick_entrypoints_list.h b/runtime/entrypoints/quick/quick_entrypoints_list.h
index 3cfee45462..dd8fe55420 100644
--- a/runtime/entrypoints/quick/quick_entrypoints_list.h
+++ b/runtime/entrypoints/quick/quick_entrypoints_list.h
@@ -33,8 +33,8 @@
V(AllocStringFromChars, void*, int32_t, int32_t, void*) \
V(AllocStringFromString, void*, void*) \
\
- V(InstanceofNonTrivial, size_t, const mirror::Class*, const mirror::Class*) \
- V(CheckCast, void, const mirror::Class*, const mirror::Class*) \
+ V(InstanceofNonTrivial, size_t, mirror::Class*, mirror::Class*) \
+ V(CheckInstanceOf, void, mirror::Object*, mirror::Class*) \
\
V(InitializeStaticStorage, void*, uint32_t) \
V(InitializeTypeAndVerifyAccess, void*, uint32_t) \
diff --git a/runtime/entrypoints/quick/quick_throw_entrypoints.cc b/runtime/entrypoints/quick/quick_throw_entrypoints.cc
index a205b17f1b..c8ee99a5d9 100644
--- a/runtime/entrypoints/quick/quick_throw_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_throw_entrypoints.cc
@@ -111,6 +111,14 @@ extern "C" NO_RETURN void artThrowClassCastException(mirror::Class* dest_type,
self->QuickDeliverException();
}
+extern "C" NO_RETURN void artThrowClassCastExceptionForObject(mirror::Object* obj,
+ mirror::Class* dest_type,
+ Thread* self)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ DCHECK(obj != nullptr);
+ artThrowClassCastException(dest_type, obj->GetClass(), self);
+}
+
extern "C" NO_RETURN void artThrowArrayStoreException(mirror::Object* array, mirror::Object* value,
Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) {
diff --git a/runtime/entrypoints_order_test.cc b/runtime/entrypoints_order_test.cc
index cdb1051e08..b0463d7f11 100644
--- a/runtime/entrypoints_order_test.cc
+++ b/runtime/entrypoints_order_test.cc
@@ -174,8 +174,9 @@ class EntrypointsOrderTest : public CommonRuntimeTest {
sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAllocStringFromString, pInstanceofNonTrivial,
sizeof(void*));
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pInstanceofNonTrivial, pCheckCast, sizeof(void*));
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pCheckCast, pInitializeStaticStorage, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pInstanceofNonTrivial, pCheckInstanceOf, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pCheckInstanceOf, pInitializeStaticStorage,
+ sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pInitializeStaticStorage, pInitializeTypeAndVerifyAccess,
sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pInitializeTypeAndVerifyAccess, pInitializeType,
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index 0217a67559..01a2ad8f23 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -157,14 +157,14 @@ static jmethodID FindMethodID(ScopedObjectAccess& soa, jclass jni_class,
ThrowNoSuchMethodError(soa, c, name, sig, is_static ? "static" : "non-static");
return nullptr;
}
- return soa.EncodeMethod(method);
+ return jni::EncodeArtMethod(method);
}
static ObjPtr<mirror::ClassLoader> GetClassLoader(const ScopedObjectAccess& soa)
REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* method = soa.Self()->GetCurrentMethod(nullptr);
// If we are running Runtime.nativeLoad, use the overriding ClassLoader it set.
- if (method == soa.DecodeMethod(WellKnownClasses::java_lang_Runtime_nativeLoad)) {
+ if (method == jni::DecodeArtMethod(WellKnownClasses::java_lang_Runtime_nativeLoad)) {
return soa.Decode<mirror::ClassLoader>(soa.Self()->GetClassLoaderOverride());
}
// If we have a method, use its ClassLoader for context.
@@ -235,7 +235,7 @@ static jfieldID FindFieldID(const ScopedObjectAccess& soa, jclass jni_class, con
sig, name, c->GetDescriptor(&temp));
return nullptr;
}
- return soa.EncodeField(field);
+ return jni::EncodeArtField(field);
}
static void ThrowAIOOBE(ScopedObjectAccess& soa, mirror::Array* array, jsize start,
@@ -368,7 +368,7 @@ class JNI {
static jmethodID FromReflectedMethod(JNIEnv* env, jobject jlr_method) {
CHECK_NON_NULL_ARGUMENT(jlr_method);
ScopedObjectAccess soa(env);
- return soa.EncodeMethod(ArtMethod::FromReflectedMethod(soa, jlr_method));
+ return jni::EncodeArtMethod(ArtMethod::FromReflectedMethod(soa, jlr_method));
}
static jfieldID FromReflectedField(JNIEnv* env, jobject jlr_field) {
@@ -380,13 +380,13 @@ class JNI {
return nullptr;
}
ObjPtr<mirror::Field> field = ObjPtr<mirror::Field>::DownCast(obj_field);
- return soa.EncodeField(field->GetArtField());
+ return jni::EncodeArtField(field->GetArtField());
}
static jobject ToReflectedMethod(JNIEnv* env, jclass, jmethodID mid, jboolean) {
CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
- ArtMethod* m = soa.DecodeMethod(mid);
+ ArtMethod* m = jni::DecodeArtMethod(mid);
mirror::Executable* method;
DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize);
DCHECK(!Runtime::Current()->IsActiveTransaction());
@@ -401,7 +401,7 @@ class JNI {
static jobject ToReflectedField(JNIEnv* env, jclass, jfieldID fid, jboolean) {
CHECK_NON_NULL_ARGUMENT(fid);
ScopedObjectAccess soa(env);
- ArtField* f = soa.DecodeField(fid);
+ ArtField* f = jni::DecodeArtField(fid);
return soa.AddLocalReference<jobject>(
mirror::Field::CreateFromArtField<kRuntimePointerSize>(soa.Self(), f, true));
}
@@ -631,8 +631,8 @@ class JNI {
}
if (c->IsStringClass()) {
// Replace calls to String.<init> with equivalent StringFactory call.
- jmethodID sf_mid = soa.EncodeMethod(
- WellKnownClasses::StringInitToStringFactory(soa.DecodeMethod(mid)));
+ jmethodID sf_mid = jni::EncodeArtMethod(
+ WellKnownClasses::StringInitToStringFactory(jni::DecodeArtMethod(mid)));
return CallStaticObjectMethodV(env, WellKnownClasses::java_lang_StringFactory, sf_mid, args);
}
ObjPtr<mirror::Object> result = c->AllocObject(soa.Self());
@@ -658,8 +658,8 @@ class JNI {
}
if (c->IsStringClass()) {
// Replace calls to String.<init> with equivalent StringFactory call.
- jmethodID sf_mid = soa.EncodeMethod(
- WellKnownClasses::StringInitToStringFactory(soa.DecodeMethod(mid)));
+ jmethodID sf_mid = jni::EncodeArtMethod(
+ WellKnownClasses::StringInitToStringFactory(jni::DecodeArtMethod(mid)));
return CallStaticObjectMethodA(env, WellKnownClasses::java_lang_StringFactory, sf_mid, args);
}
ObjPtr<mirror::Object> result = c->AllocObject(soa.Self());
@@ -1237,14 +1237,14 @@ class JNI {
CHECK_NON_NULL_ARGUMENT(fid);
ScopedObjectAccess soa(env);
ObjPtr<mirror::Object> o = soa.Decode<mirror::Object>(obj);
- ArtField* f = soa.DecodeField(fid);
+ ArtField* f = jni::DecodeArtField(fid);
return soa.AddLocalReference<jobject>(f->GetObject(o));
}
static jobject GetStaticObjectField(JNIEnv* env, jclass, jfieldID fid) {
CHECK_NON_NULL_ARGUMENT(fid);
ScopedObjectAccess soa(env);
- ArtField* f = soa.DecodeField(fid);
+ ArtField* f = jni::DecodeArtField(fid);
return soa.AddLocalReference<jobject>(f->GetObject(f->GetDeclaringClass()));
}
@@ -1254,7 +1254,7 @@ class JNI {
ScopedObjectAccess soa(env);
ObjPtr<mirror::Object> o = soa.Decode<mirror::Object>(java_object);
ObjPtr<mirror::Object> v = soa.Decode<mirror::Object>(java_value);
- ArtField* f = soa.DecodeField(fid);
+ ArtField* f = jni::DecodeArtField(fid);
f->SetObject<false>(o, v);
}
@@ -1262,7 +1262,7 @@ class JNI {
CHECK_NON_NULL_ARGUMENT_RETURN_VOID(fid);
ScopedObjectAccess soa(env);
ObjPtr<mirror::Object> v = soa.Decode<mirror::Object>(java_value);
- ArtField* f = soa.DecodeField(fid);
+ ArtField* f = jni::DecodeArtField(fid);
f->SetObject<false>(f->GetDeclaringClass(), v);
}
@@ -1271,13 +1271,13 @@ class JNI {
CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(fid); \
ScopedObjectAccess soa(env); \
ObjPtr<mirror::Object> o = soa.Decode<mirror::Object>(instance); \
- ArtField* f = soa.DecodeField(fid); \
+ ArtField* f = jni::DecodeArtField(fid); \
return f->Get ##fn (o)
#define GET_STATIC_PRIMITIVE_FIELD(fn) \
CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(fid); \
ScopedObjectAccess soa(env); \
- ArtField* f = soa.DecodeField(fid); \
+ ArtField* f = jni::DecodeArtField(fid); \
return f->Get ##fn (f->GetDeclaringClass())
#define SET_PRIMITIVE_FIELD(fn, instance, value) \
@@ -1285,13 +1285,13 @@ class JNI {
CHECK_NON_NULL_ARGUMENT_RETURN_VOID(fid); \
ScopedObjectAccess soa(env); \
ObjPtr<mirror::Object> o = soa.Decode<mirror::Object>(instance); \
- ArtField* f = soa.DecodeField(fid); \
+ ArtField* f = jni::DecodeArtField(fid); \
f->Set ##fn <false>(o, value)
#define SET_STATIC_PRIMITIVE_FIELD(fn, value) \
CHECK_NON_NULL_ARGUMENT_RETURN_VOID(fid); \
ScopedObjectAccess soa(env); \
- ArtField* f = soa.DecodeField(fid); \
+ ArtField* f = jni::DecodeArtField(fid); \
f->Set ##fn <false>(f->GetDeclaringClass(), value)
static jboolean GetBooleanField(JNIEnv* env, jobject obj, jfieldID fid) {
diff --git a/runtime/jni_internal.h b/runtime/jni_internal.h
index b829934dd7..b3837c409d 100644
--- a/runtime/jni_internal.h
+++ b/runtime/jni_internal.h
@@ -20,6 +20,8 @@
#include <jni.h>
#include <iosfwd>
+#include "base/macros.h"
+
#ifndef NATIVE_METHOD
#define NATIVE_METHOD(className, functionName, signature) \
{ #functionName, signature, reinterpret_cast<void*>(className ## _ ## functionName) }
@@ -36,6 +38,9 @@
namespace art {
+class ArtField;
+class ArtMethod;
+
const JNINativeInterface* GetJniNativeInterface();
const JNINativeInterface* GetRuntimeShutdownNativeInterface();
@@ -46,6 +51,29 @@ void RegisterNativeMethods(JNIEnv* env, const char* jni_class_name, const JNINat
int ThrowNewException(JNIEnv* env, jclass exception_class, const char* msg, jobject cause);
+namespace jni {
+
+ALWAYS_INLINE
+static inline ArtField* DecodeArtField(jfieldID fid) {
+ return reinterpret_cast<ArtField*>(fid);
+}
+
+ALWAYS_INLINE
+static inline jfieldID EncodeArtField(ArtField* field) {
+ return reinterpret_cast<jfieldID>(field);
+}
+
+ALWAYS_INLINE
+static inline jmethodID EncodeArtMethod(ArtMethod* art_method) {
+ return reinterpret_cast<jmethodID>(art_method);
+}
+
+ALWAYS_INLINE
+static inline ArtMethod* DecodeArtMethod(jmethodID method_id) {
+ return reinterpret_cast<ArtMethod*>(method_id);
+}
+
+} // namespace jni
} // namespace art
std::ostream& operator<<(std::ostream& os, const jobjectRefType& rhs);
diff --git a/runtime/native/dalvik_system_VMDebug.cc b/runtime/native/dalvik_system_VMDebug.cc
index 8d85425c10..adf35b6f01 100644
--- a/runtime/native/dalvik_system_VMDebug.cc
+++ b/runtime/native/dalvik_system_VMDebug.cc
@@ -420,8 +420,10 @@ static jobject VMDebug_getRuntimeStatInternal(JNIEnv* env, jclass, jint statId)
}
}
-static bool SetRuntimeStatValue(JNIEnv* env, jobjectArray result, VMDebugRuntimeStatId id,
- std::string value) {
+static bool SetRuntimeStatValue(JNIEnv* env,
+ jobjectArray result,
+ VMDebugRuntimeStatId id,
+ const std::string& value) {
ScopedLocalRef<jstring> jvalue(env, env->NewStringUTF(value.c_str()));
if (jvalue.get() == nullptr) {
return false;
diff --git a/runtime/native/java_lang_reflect_Executable.cc b/runtime/native/java_lang_reflect_Executable.cc
index 1b128fb187..73b81a71f8 100644
--- a/runtime/native/java_lang_reflect_Executable.cc
+++ b/runtime/native/java_lang_reflect_Executable.cc
@@ -136,7 +136,7 @@ static jobjectArray Executable_getParameters0(JNIEnv* env, jobject javaMethod) {
Handle<mirror::Class> parameter_class =
hs.NewHandle(soa.Decode<mirror::Class>(WellKnownClasses::java_lang_reflect_Parameter));
ArtMethod* parameter_init =
- soa.DecodeMethod(WellKnownClasses::java_lang_reflect_Parameter_init);
+ jni::DecodeArtMethod(WellKnownClasses::java_lang_reflect_Parameter_init);
// Mutable handles used in the loop below to ensure cleanup without scaling the number of
// handles by the number of parameters.
diff --git a/runtime/native_bridge_art_interface.cc b/runtime/native_bridge_art_interface.cc
index 5ab6097aa4..c58854b13e 100644
--- a/runtime/native_bridge_art_interface.cc
+++ b/runtime/native_bridge_art_interface.cc
@@ -25,6 +25,7 @@
#include "base/logging.h"
#include "base/macros.h"
#include "dex_file-inl.h"
+#include "jni_internal.h"
#include "mirror/class-inl.h"
#include "scoped_thread_state_change-inl.h"
#include "sigchain.h"
@@ -33,7 +34,7 @@ namespace art {
static const char* GetMethodShorty(JNIEnv* env, jmethodID mid) {
ScopedObjectAccess soa(env);
- ArtMethod* m = soa.DecodeMethod(mid);
+ ArtMethod* m = jni::DecodeArtMethod(mid);
return m->GetShorty();
}
@@ -90,14 +91,14 @@ static android::NativeBridgeRuntimeCallbacks native_bridge_art_callbacks_ {
GetMethodShorty, GetNativeMethodCount, GetNativeMethods
};
-bool LoadNativeBridge(std::string& native_bridge_library_filename) {
+bool LoadNativeBridge(const std::string& native_bridge_library_filename) {
VLOG(startup) << "Runtime::Setup native bridge library: "
<< (native_bridge_library_filename.empty() ? "(empty)" : native_bridge_library_filename);
return android::LoadNativeBridge(native_bridge_library_filename.c_str(),
&native_bridge_art_callbacks_);
}
-void PreInitializeNativeBridge(std::string dir) {
+void PreInitializeNativeBridge(const std::string& dir) {
VLOG(startup) << "Runtime::Pre-initialize native bridge";
#ifndef __APPLE__ // Mac OS does not support CLONE_NEWNS.
if (unshare(CLONE_NEWNS) == -1) {
diff --git a/runtime/native_bridge_art_interface.h b/runtime/native_bridge_art_interface.h
index 090cddb9b6..c86e5da0df 100644
--- a/runtime/native_bridge_art_interface.h
+++ b/runtime/native_bridge_art_interface.h
@@ -26,10 +26,10 @@ namespace art {
// Mirror libnativebridge interface. Done to have the ART callbacks out of line, and not require
// the system/core header file in other files.
-bool LoadNativeBridge(std::string& native_bridge_library_filename);
+bool LoadNativeBridge(const std::string& native_bridge_library_filename);
// This is mostly for testing purposes, as in a full system this is called by Zygote code.
-void PreInitializeNativeBridge(std::string dir);
+void PreInitializeNativeBridge(const std::string& dir);
void InitializeNativeBridge(JNIEnv* env, const char* instruction_set);
diff --git a/runtime/native_stack_dump.cc b/runtime/native_stack_dump.cc
index 00ab5771da..23768899bd 100644
--- a/runtime/native_stack_dump.cc
+++ b/runtime/native_stack_dump.cc
@@ -256,7 +256,7 @@ static void Addr2line(const std::string& map_src,
Drain(2U, prefix, pipe, os);
}
-static bool RunCommand(std::string cmd) {
+static bool RunCommand(const std::string& cmd) {
FILE* stream = popen(cmd.c_str(), "r");
if (stream) {
pclose(stream);
diff --git a/runtime/oat_file_assistant_test.cc b/runtime/oat_file_assistant_test.cc
index d18e9464e6..d4337b971b 100644
--- a/runtime/oat_file_assistant_test.cc
+++ b/runtime/oat_file_assistant_test.cc
@@ -999,7 +999,7 @@ TEST_F(OatFileAssistantTest, GenNoDex) {
// Turn an absolute path into a path relative to the current working
// directory.
-static std::string MakePathRelative(std::string target) {
+static std::string MakePathRelative(const std::string& target) {
char buf[MAXPATHLEN];
std::string cwd = getcwd(buf, MAXPATHLEN);
diff --git a/runtime/oat_file_manager.cc b/runtime/oat_file_manager.cc
index cf9efe0782..651a6ee681 100644
--- a/runtime/oat_file_manager.cc
+++ b/runtime/oat_file_manager.cc
@@ -28,6 +28,7 @@
#include "gc/scoped_gc_critical_section.h"
#include "gc/space/image_space.h"
#include "handle_scope-inl.h"
+#include "jni_internal.h"
#include "mirror/class_loader.h"
#include "oat_file_assistant.h"
#include "obj_ptr-inl.h"
@@ -224,9 +225,10 @@ static void AddNext(/*inout*/DexFileAndClassPair* original,
}
}
+template <typename T>
static void IterateOverJavaDexFile(ObjPtr<mirror::Object> dex_file,
ArtField* const cookie_field,
- std::function<bool(const DexFile*)> fn)
+ const T& fn)
REQUIRES_SHARED(Locks::mutator_lock_) {
if (dex_file != nullptr) {
mirror::LongArray* long_array = cookie_field->GetObject(dex_file)->AsLongArray();
@@ -247,26 +249,27 @@ static void IterateOverJavaDexFile(ObjPtr<mirror::Object> dex_file,
}
}
+template <typename T>
static void IterateOverPathClassLoader(
- ScopedObjectAccessAlreadyRunnable& soa,
Handle<mirror::ClassLoader> class_loader,
MutableHandle<mirror::ObjectArray<mirror::Object>> dex_elements,
- std::function<bool(const DexFile*)> fn) REQUIRES_SHARED(Locks::mutator_lock_) {
+ const T& fn) REQUIRES_SHARED(Locks::mutator_lock_) {
// Handle this step.
// Handle as if this is the child PathClassLoader.
// The class loader is a PathClassLoader which inherits from BaseDexClassLoader.
// We need to get the DexPathList and loop through it.
- ArtField* const cookie_field = soa.DecodeField(WellKnownClasses::dalvik_system_DexFile_cookie);
+ ArtField* const cookie_field =
+ jni::DecodeArtField(WellKnownClasses::dalvik_system_DexFile_cookie);
ArtField* const dex_file_field =
- soa.DecodeField(WellKnownClasses::dalvik_system_DexPathList__Element_dexFile);
+ jni::DecodeArtField(WellKnownClasses::dalvik_system_DexPathList__Element_dexFile);
ObjPtr<mirror::Object> dex_path_list =
- soa.DecodeField(WellKnownClasses::dalvik_system_BaseDexClassLoader_pathList)->
- GetObject(class_loader.Get());
+ jni::DecodeArtField(WellKnownClasses::dalvik_system_BaseDexClassLoader_pathList)->
+ GetObject(class_loader.Get());
if (dex_path_list != nullptr && dex_file_field != nullptr && cookie_field != nullptr) {
// DexPathList has an array dexElements of Elements[] which each contain a dex file.
ObjPtr<mirror::Object> dex_elements_obj =
- soa.DecodeField(WellKnownClasses::dalvik_system_DexPathList_dexElements)->
- GetObject(dex_path_list);
+ jni::DecodeArtField(WellKnownClasses::dalvik_system_DexPathList_dexElements)->
+ GetObject(dex_path_list);
// Loop through each dalvik.system.DexPathList$Element's dalvik.system.DexFile and look
// at the mCookie which is a DexFile vector.
if (dex_elements_obj != nullptr) {
@@ -323,7 +326,7 @@ static bool GetDexFilesFromClassLoader(
hs.NewHandle<mirror::ObjectArray<mirror::Object>>(nullptr));
Handle<mirror::ClassLoader> h_class_loader(hs.NewHandle(class_loader));
- IterateOverPathClassLoader(soa, h_class_loader, dex_elements, GetDexFilesFn);
+ IterateOverPathClassLoader(h_class_loader, dex_elements, GetDexFilesFn);
return true;
}
@@ -337,9 +340,10 @@ static void GetDexFilesFromDexElementsArray(
return;
}
- ArtField* const cookie_field = soa.DecodeField(WellKnownClasses::dalvik_system_DexFile_cookie);
+ ArtField* const cookie_field =
+ jni::DecodeArtField(WellKnownClasses::dalvik_system_DexFile_cookie);
ArtField* const dex_file_field =
- soa.DecodeField(WellKnownClasses::dalvik_system_DexPathList__Element_dexFile);
+ jni::DecodeArtField(WellKnownClasses::dalvik_system_DexPathList__Element_dexFile);
ObjPtr<mirror::Class> const element_class = soa.Decode<mirror::Class>(
WellKnownClasses::dalvik_system_DexPathList__Element);
ObjPtr<mirror::Class> const dexfile_class = soa.Decode<mirror::Class>(
diff --git a/runtime/openjdkjvmti/ti_method.cc b/runtime/openjdkjvmti/ti_method.cc
index 6210936772..e391a9d680 100644
--- a/runtime/openjdkjvmti/ti_method.cc
+++ b/runtime/openjdkjvmti/ti_method.cc
@@ -34,6 +34,7 @@
#include "art_jvmti.h"
#include "art_method-inl.h"
#include "base/enums.h"
+#include "jni_internal.h"
#include "modifiers.h"
#include "scoped_thread_state_change-inl.h"
@@ -45,7 +46,7 @@ jvmtiError MethodUtil::GetMethodName(jvmtiEnv* env,
char** signature_ptr,
char** generic_ptr) {
art::ScopedObjectAccess soa(art::Thread::Current());
- art::ArtMethod* art_method = soa.DecodeMethod(method);
+ art::ArtMethod* art_method = art::jni::DecodeArtMethod(method);
art_method = art_method->GetInterfaceMethodIfProxy(art::kRuntimePointerSize);
JvmtiUniquePtr name_copy;
@@ -93,10 +94,10 @@ jvmtiError MethodUtil::GetMethodDeclaringClass(jvmtiEnv* env ATTRIBUTE_UNUSED,
return ERR(NULL_POINTER);
}
- art::ScopedObjectAccess soa(art::Thread::Current());
- art::ArtMethod* art_method = soa.DecodeMethod(method);
+ art::ArtMethod* art_method = art::jni::DecodeArtMethod(method);
// Note: No GetInterfaceMethodIfProxy, we want to actual class.
+ art::ScopedObjectAccess soa(art::Thread::Current());
art::mirror::Class* klass = art_method->GetDeclaringClass();
*declaring_class_ptr = soa.AddLocalReference<jclass>(klass);
@@ -110,9 +111,7 @@ jvmtiError MethodUtil::GetMethodModifiers(jvmtiEnv* env ATTRIBUTE_UNUSED,
return ERR(NULL_POINTER);
}
- art::ScopedObjectAccess soa(art::Thread::Current());
- art::ArtMethod* art_method = soa.DecodeMethod(method);
-
+ art::ArtMethod* art_method = art::jni::DecodeArtMethod(method);
uint32_t modifiers = art_method->GetAccessFlags();
// Note: Keep this code in sync with Executable.fixMethodFlags.
diff --git a/runtime/openjdkjvmti/ti_stack.cc b/runtime/openjdkjvmti/ti_stack.cc
index 33e677f4b1..6f8976f03d 100644
--- a/runtime/openjdkjvmti/ti_stack.cc
+++ b/runtime/openjdkjvmti/ti_stack.cc
@@ -37,6 +37,7 @@
#include "dex_file.h"
#include "dex_file_annotations.h"
#include "jni_env_ext.h"
+#include "jni_internal.h"
#include "mirror/class.h"
#include "mirror/dex_cache.h"
#include "scoped_thread_state_change-inl.h"
@@ -64,7 +65,7 @@ struct GetStackTraceVisitor : public art::StackVisitor {
if (start == 0) {
m = m->GetInterfaceMethodIfProxy(art::kRuntimePointerSize);
- jmethodID id = soa.EncodeMethod(m);
+ jmethodID id = art::jni::EncodeArtMethod(m);
art::mirror::DexCache* dex_cache = m->GetDexCache();
int32_t line_number = -1;
diff --git a/runtime/openjdkjvmti/transform.cc b/runtime/openjdkjvmti/transform.cc
index 3443aea744..fa2983c19c 100644
--- a/runtime/openjdkjvmti/transform.cc
+++ b/runtime/openjdkjvmti/transform.cc
@@ -283,7 +283,7 @@ jvmtiError GetTransformationData(ArtJvmTiEnv* env,
// Install the new dex file.
// TODO do error checks for bad state (method in a stack, changes to number of methods/fields/etc).
jvmtiError MoveTransformedFileIntoRuntime(jclass jklass,
- std::string original_location,
+ const std::string& original_location,
jint data_len,
unsigned char* dex_data) {
const char* dex_file_name = "Ldalvik/system/DexFile;";
diff --git a/runtime/openjdkjvmti/transform.h b/runtime/openjdkjvmti/transform.h
index 85bcb00eca..a76ed939b5 100644
--- a/runtime/openjdkjvmti/transform.h
+++ b/runtime/openjdkjvmti/transform.h
@@ -54,7 +54,7 @@ jvmtiError GetTransformationData(ArtJvmTiEnv* env,
// Install the new dex file.
jvmtiError MoveTransformedFileIntoRuntime(jclass jklass,
- std::string original_location,
+ const std::string& original_location,
jint data_len,
unsigned char* dex_data);
diff --git a/runtime/plugin.h b/runtime/plugin.h
index 18f3977bd5..f077aaf3fb 100644
--- a/runtime/plugin.h
+++ b/runtime/plugin.h
@@ -34,7 +34,7 @@ using PluginDeinitializationFunction = bool (*)();
// single-threaded fashion so not much need
class Plugin {
public:
- static Plugin Create(std::string lib) {
+ static Plugin Create(const std::string& lib) {
return Plugin(lib);
}
@@ -66,7 +66,7 @@ class Plugin {
}
private:
- explicit Plugin(std::string library) : library_(library), dlopen_handle_(nullptr) { }
+ explicit Plugin(const std::string& library) : library_(library), dlopen_handle_(nullptr) { }
std::string library_;
void* dlopen_handle_;
diff --git a/runtime/reflection.cc b/runtime/reflection.cc
index f88309baa1..3128380f76 100644
--- a/runtime/reflection.cc
+++ b/runtime/reflection.cc
@@ -453,7 +453,7 @@ JValue InvokeWithVarArgs(const ScopedObjectAccessAlreadyRunnable& soa, jobject o
return JValue();
}
- ArtMethod* method = soa.DecodeMethod(mid);
+ ArtMethod* method = jni::DecodeArtMethod(mid);
bool is_string_init = method->GetDeclaringClass()->IsStringClass() && method->IsConstructor();
if (is_string_init) {
// Replace calls to String.<init> with equivalent StringFactory call.
@@ -484,7 +484,7 @@ JValue InvokeWithJValues(const ScopedObjectAccessAlreadyRunnable& soa, jobject o
return JValue();
}
- ArtMethod* method = soa.DecodeMethod(mid);
+ ArtMethod* method = jni::DecodeArtMethod(mid);
bool is_string_init = method->GetDeclaringClass()->IsStringClass() && method->IsConstructor();
if (is_string_init) {
// Replace calls to String.<init> with equivalent StringFactory call.
@@ -516,7 +516,7 @@ JValue InvokeVirtualOrInterfaceWithJValues(const ScopedObjectAccessAlreadyRunnab
}
ObjPtr<mirror::Object> receiver = soa.Decode<mirror::Object>(obj);
- ArtMethod* method = FindVirtualMethod(receiver, soa.DecodeMethod(mid));
+ ArtMethod* method = FindVirtualMethod(receiver, jni::DecodeArtMethod(mid));
bool is_string_init = method->GetDeclaringClass()->IsStringClass() && method->IsConstructor();
if (is_string_init) {
// Replace calls to String.<init> with equivalent StringFactory call.
@@ -548,7 +548,7 @@ JValue InvokeVirtualOrInterfaceWithVarArgs(const ScopedObjectAccessAlreadyRunnab
}
ObjPtr<mirror::Object> receiver = soa.Decode<mirror::Object>(obj);
- ArtMethod* method = FindVirtualMethod(receiver, soa.DecodeMethod(mid));
+ ArtMethod* method = FindVirtualMethod(receiver, jni::DecodeArtMethod(mid));
bool is_string_init = method->GetDeclaringClass()->IsStringClass() && method->IsConstructor();
if (is_string_init) {
// Replace calls to String.<init> with equivalent StringFactory call.
@@ -739,8 +739,11 @@ ObjPtr<mirror::Object> BoxPrimitive(Primitive::Type src_class, const JValue& val
arg_array.Append(value.GetI());
}
- soa.DecodeMethod(m)->Invoke(soa.Self(), arg_array.GetArray(), arg_array.GetNumBytes(),
- &result, shorty);
+ jni::DecodeArtMethod(m)->Invoke(soa.Self(),
+ arg_array.GetArray(),
+ arg_array.GetNumBytes(),
+ &result,
+ shorty);
return result.GetL();
}
diff --git a/runtime/reflection_test.cc b/runtime/reflection_test.cc
index 22076bbc05..e254dfe627 100644
--- a/runtime/reflection_test.cc
+++ b/runtime/reflection_test.cc
@@ -23,6 +23,7 @@
#include "art_method-inl.h"
#include "base/enums.h"
#include "common_compiler_test.h"
+#include "jni_internal.h"
#include "scoped_thread_state_change-inl.h"
namespace art {
@@ -136,7 +137,7 @@ class ReflectionTest : public CommonCompilerTest {
ObjPtr<mirror::Object> receiver;
ReflectionTestMakeExecutable(&method, &receiver, is_static, "nop", "()V");
ScopedLocalRef<jobject> receiver_ref(soa.Env(), soa.AddLocalReference<jobject>(receiver));
- InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), nullptr);
+ InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), nullptr);
}
void InvokeIdentityByteMethod(bool is_static) {
@@ -148,20 +149,20 @@ class ReflectionTest : public CommonCompilerTest {
jvalue args[1];
args[0].b = 0;
- JValue result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+ JValue result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
EXPECT_EQ(0, result.GetB());
args[0].b = -1;
- result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+ result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
EXPECT_EQ(-1, result.GetB());
args[0].b = SCHAR_MAX;
- result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+ result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
EXPECT_EQ(SCHAR_MAX, result.GetB());
static_assert(SCHAR_MIN == -128, "SCHAR_MIN unexpected");
args[0].b = SCHAR_MIN;
- result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+ result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
EXPECT_EQ(SCHAR_MIN, result.GetB());
}
@@ -174,19 +175,19 @@ class ReflectionTest : public CommonCompilerTest {
jvalue args[1];
args[0].i = 0;
- JValue result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+ JValue result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
EXPECT_EQ(0, result.GetI());
args[0].i = -1;
- result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+ result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
EXPECT_EQ(-1, result.GetI());
args[0].i = INT_MAX;
- result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+ result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
EXPECT_EQ(INT_MAX, result.GetI());
args[0].i = INT_MIN;
- result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+ result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
EXPECT_EQ(INT_MIN, result.GetI());
}
@@ -199,19 +200,19 @@ class ReflectionTest : public CommonCompilerTest {
jvalue args[1];
args[0].d = 0.0;
- JValue result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+ JValue result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
EXPECT_DOUBLE_EQ(0.0, result.GetD());
args[0].d = -1.0;
- result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+ result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
EXPECT_DOUBLE_EQ(-1.0, result.GetD());
args[0].d = DBL_MAX;
- result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+ result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
EXPECT_DOUBLE_EQ(DBL_MAX, result.GetD());
args[0].d = DBL_MIN;
- result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+ result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
EXPECT_DOUBLE_EQ(DBL_MIN, result.GetD());
}
@@ -225,22 +226,22 @@ class ReflectionTest : public CommonCompilerTest {
args[0].i = 1;
args[1].i = 2;
- JValue result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+ JValue result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
EXPECT_EQ(3, result.GetI());
args[0].i = -2;
args[1].i = 5;
- result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+ result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
EXPECT_EQ(3, result.GetI());
args[0].i = INT_MAX;
args[1].i = INT_MIN;
- result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+ result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
EXPECT_EQ(-1, result.GetI());
args[0].i = INT_MAX;
args[1].i = INT_MAX;
- result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+ result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
EXPECT_EQ(-2, result.GetI());
}
@@ -255,31 +256,31 @@ class ReflectionTest : public CommonCompilerTest {
args[0].i = 0;
args[1].i = 0;
args[2].i = 0;
- JValue result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+ JValue result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
EXPECT_EQ(0, result.GetI());
args[0].i = 1;
args[1].i = 2;
args[2].i = 3;
- result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+ result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
EXPECT_EQ(6, result.GetI());
args[0].i = -1;
args[1].i = 2;
args[2].i = -3;
- result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+ result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
EXPECT_EQ(-2, result.GetI());
args[0].i = INT_MAX;
args[1].i = INT_MIN;
args[2].i = INT_MAX;
- result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+ result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
EXPECT_EQ(2147483646, result.GetI());
args[0].i = INT_MAX;
args[1].i = INT_MAX;
args[2].i = INT_MAX;
- result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+ result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
EXPECT_EQ(2147483645, result.GetI());
}
@@ -295,35 +296,35 @@ class ReflectionTest : public CommonCompilerTest {
args[1].i = 0;
args[2].i = 0;
args[3].i = 0;
- JValue result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+ JValue result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
EXPECT_EQ(0, result.GetI());
args[0].i = 1;
args[1].i = 2;
args[2].i = 3;
args[3].i = 4;
- result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+ result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
EXPECT_EQ(10, result.GetI());
args[0].i = -1;
args[1].i = 2;
args[2].i = -3;
args[3].i = 4;
- result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+ result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
EXPECT_EQ(2, result.GetI());
args[0].i = INT_MAX;
args[1].i = INT_MIN;
args[2].i = INT_MAX;
args[3].i = INT_MIN;
- result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+ result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
EXPECT_EQ(-2, result.GetI());
args[0].i = INT_MAX;
args[1].i = INT_MAX;
args[2].i = INT_MAX;
args[3].i = INT_MAX;
- result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+ result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
EXPECT_EQ(-4, result.GetI());
}
@@ -340,7 +341,7 @@ class ReflectionTest : public CommonCompilerTest {
args[2].i = 0;
args[3].i = 0;
args[4].i = 0;
- JValue result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+ JValue result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
EXPECT_EQ(0, result.GetI());
args[0].i = 1;
@@ -348,7 +349,7 @@ class ReflectionTest : public CommonCompilerTest {
args[2].i = 3;
args[3].i = 4;
args[4].i = 5;
- result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+ result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
EXPECT_EQ(15, result.GetI());
args[0].i = -1;
@@ -356,7 +357,7 @@ class ReflectionTest : public CommonCompilerTest {
args[2].i = -3;
args[3].i = 4;
args[4].i = -5;
- result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+ result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
EXPECT_EQ(-3, result.GetI());
args[0].i = INT_MAX;
@@ -364,7 +365,7 @@ class ReflectionTest : public CommonCompilerTest {
args[2].i = INT_MAX;
args[3].i = INT_MIN;
args[4].i = INT_MAX;
- result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+ result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
EXPECT_EQ(2147483645, result.GetI());
args[0].i = INT_MAX;
@@ -372,7 +373,7 @@ class ReflectionTest : public CommonCompilerTest {
args[2].i = INT_MAX;
args[3].i = INT_MAX;
args[4].i = INT_MAX;
- result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+ result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
EXPECT_EQ(2147483643, result.GetI());
}
@@ -386,27 +387,27 @@ class ReflectionTest : public CommonCompilerTest {
args[0].d = 0.0;
args[1].d = 0.0;
- JValue result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+ JValue result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
EXPECT_DOUBLE_EQ(0.0, result.GetD());
args[0].d = 1.0;
args[1].d = 2.0;
- result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+ result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
EXPECT_DOUBLE_EQ(3.0, result.GetD());
args[0].d = 1.0;
args[1].d = -2.0;
- result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+ result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
EXPECT_DOUBLE_EQ(-1.0, result.GetD());
args[0].d = DBL_MAX;
args[1].d = DBL_MIN;
- result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+ result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
EXPECT_DOUBLE_EQ(1.7976931348623157e308, result.GetD());
args[0].d = DBL_MAX;
args[1].d = DBL_MAX;
- result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+ result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
EXPECT_DOUBLE_EQ(INFINITY, result.GetD());
}
@@ -421,19 +422,19 @@ class ReflectionTest : public CommonCompilerTest {
args[0].d = 0.0;
args[1].d = 0.0;
args[2].d = 0.0;
- JValue result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+ JValue result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
EXPECT_DOUBLE_EQ(0.0, result.GetD());
args[0].d = 1.0;
args[1].d = 2.0;
args[2].d = 3.0;
- result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+ result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
EXPECT_DOUBLE_EQ(6.0, result.GetD());
args[0].d = 1.0;
args[1].d = -2.0;
args[2].d = 3.0;
- result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+ result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
EXPECT_DOUBLE_EQ(2.0, result.GetD());
}
@@ -449,21 +450,21 @@ class ReflectionTest : public CommonCompilerTest {
args[1].d = 0.0;
args[2].d = 0.0;
args[3].d = 0.0;
- JValue result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+ JValue result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
EXPECT_DOUBLE_EQ(0.0, result.GetD());
args[0].d = 1.0;
args[1].d = 2.0;
args[2].d = 3.0;
args[3].d = 4.0;
- result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+ result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
EXPECT_DOUBLE_EQ(10.0, result.GetD());
args[0].d = 1.0;
args[1].d = -2.0;
args[2].d = 3.0;
args[3].d = -4.0;
- result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+ result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
EXPECT_DOUBLE_EQ(-2.0, result.GetD());
}
@@ -480,7 +481,7 @@ class ReflectionTest : public CommonCompilerTest {
args[2].d = 0.0;
args[3].d = 0.0;
args[4].d = 0.0;
- JValue result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+ JValue result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
EXPECT_DOUBLE_EQ(0.0, result.GetD());
args[0].d = 1.0;
@@ -488,7 +489,7 @@ class ReflectionTest : public CommonCompilerTest {
args[2].d = 3.0;
args[3].d = 4.0;
args[4].d = 5.0;
- result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+ result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
EXPECT_DOUBLE_EQ(15.0, result.GetD());
args[0].d = 1.0;
@@ -496,7 +497,7 @@ class ReflectionTest : public CommonCompilerTest {
args[2].d = 3.0;
args[3].d = -4.0;
args[4].d = 5.0;
- result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
+ result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args);
EXPECT_DOUBLE_EQ(3.0, result.GetD());
}
@@ -531,7 +532,7 @@ TEST_F(ReflectionTest, StaticMainMethod) {
jvalue args[1];
args[0].l = nullptr;
- InvokeWithJValues(soa, nullptr, soa.EncodeMethod(method), args);
+ InvokeWithJValues(soa, nullptr, jni::EncodeArtMethod(method), args);
}
TEST_F(ReflectionTest, StaticNopMethod) {
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index b868563634..ca65c2bea3 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -559,7 +559,10 @@ static jobject CreateSystemClassLoader(Runtime* runtime) {
"getSystemClassLoader", "()Ljava/lang/ClassLoader;", pointer_size);
CHECK(getSystemClassLoader != nullptr);
- JValue result = InvokeWithJValues(soa, nullptr, soa.EncodeMethod(getSystemClassLoader), nullptr);
+ JValue result = InvokeWithJValues(soa,
+ nullptr,
+ jni::EncodeArtMethod(getSystemClassLoader),
+ nullptr);
JNIEnv* env = soa.Self()->GetJniEnv();
ScopedLocalRef<jobject> system_class_loader(env, soa.AddLocalReference<jobject>(result.GetL()));
CHECK(system_class_loader.get() != nullptr);
diff --git a/runtime/scoped_thread_state_change-inl.h b/runtime/scoped_thread_state_change-inl.h
index bde23c8028..d4469f4357 100644
--- a/runtime/scoped_thread_state_change-inl.h
+++ b/runtime/scoped_thread_state_change-inl.h
@@ -86,30 +86,6 @@ inline ObjPtr<T, kPoison> ScopedObjectAccessAlreadyRunnable::Decode(jobject obj)
return ObjPtr<T, kPoison>::DownCast(Self()->DecodeJObject(obj));
}
-inline ArtField* ScopedObjectAccessAlreadyRunnable::DecodeField(jfieldID fid) const {
- Locks::mutator_lock_->AssertSharedHeld(Self());
- DCHECK(IsRunnable()); // Don't work with raw objects in non-runnable states.
- return reinterpret_cast<ArtField*>(fid);
-}
-
-inline jfieldID ScopedObjectAccessAlreadyRunnable::EncodeField(ArtField* field) const {
- Locks::mutator_lock_->AssertSharedHeld(Self());
- DCHECK(IsRunnable()); // Don't work with raw objects in non-runnable states.
- return reinterpret_cast<jfieldID>(field);
-}
-
-inline ArtMethod* ScopedObjectAccessAlreadyRunnable::DecodeMethod(jmethodID mid) const {
- Locks::mutator_lock_->AssertSharedHeld(Self());
- DCHECK(IsRunnable()); // Don't work with raw objects in non-runnable states.
- return reinterpret_cast<ArtMethod*>(mid);
-}
-
-inline jmethodID ScopedObjectAccessAlreadyRunnable::EncodeMethod(ArtMethod* method) const {
- Locks::mutator_lock_->AssertSharedHeld(Self());
- DCHECK(IsRunnable()); // Don't work with raw objects in non-runnable states.
- return reinterpret_cast<jmethodID>(method);
-}
-
inline bool ScopedObjectAccessAlreadyRunnable::IsRunnable() const {
return self_->GetState() == kRunnable;
}
diff --git a/runtime/scoped_thread_state_change.h b/runtime/scoped_thread_state_change.h
index 04fd9141ea..b4992586ce 100644
--- a/runtime/scoped_thread_state_change.h
+++ b/runtime/scoped_thread_state_change.h
@@ -94,14 +94,6 @@ class ScopedObjectAccessAlreadyRunnable : public ValueObject {
template<typename T, bool kPoison = kIsDebugBuild>
ObjPtr<T, kPoison> Decode(jobject obj) const REQUIRES_SHARED(Locks::mutator_lock_);
- ArtField* DecodeField(jfieldID fid) const REQUIRES_SHARED(Locks::mutator_lock_);
-
- jfieldID EncodeField(ArtField* field) const REQUIRES_SHARED(Locks::mutator_lock_);
-
- ArtMethod* DecodeMethod(jmethodID mid) const REQUIRES_SHARED(Locks::mutator_lock_);
-
- jmethodID EncodeMethod(ArtMethod* method) const REQUIRES_SHARED(Locks::mutator_lock_);
-
ALWAYS_INLINE bool IsRunnable() const;
protected:
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 3f7d0868b0..8ce9661dfe 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -410,9 +410,9 @@ void* Thread::CreateCallback(void* arg) {
self->tlsPtr_.opeer = soa.Decode<mirror::Object>(self->tlsPtr_.jpeer).Ptr();
self->GetJniEnv()->DeleteGlobalRef(self->tlsPtr_.jpeer);
self->tlsPtr_.jpeer = nullptr;
- self->SetThreadName(self->GetThreadName(soa)->ToModifiedUtf8().c_str());
+ self->SetThreadName(self->GetThreadName()->ToModifiedUtf8().c_str());
- ArtField* priorityField = soa.DecodeField(WellKnownClasses::java_lang_Thread_priority);
+ ArtField* priorityField = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_priority);
self->SetNativePriority(priorityField->GetInt(self->tlsPtr_.opeer));
Dbg::PostThreadStart(self);
@@ -430,7 +430,7 @@ void* Thread::CreateCallback(void* arg) {
Thread* Thread::FromManagedThread(const ScopedObjectAccessAlreadyRunnable& soa,
mirror::Object* thread_peer) {
- ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_nativePeer);
+ ArtField* f = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_nativePeer);
Thread* result = reinterpret_cast<Thread*>(static_cast<uintptr_t>(f->GetLong(thread_peer)));
// Sanity check that if we have a result it is either suspended or we hold the thread_list_lock_
// to stop it from going away.
@@ -562,7 +562,7 @@ void Thread::CreateNativeThread(JNIEnv* env, jobject java_peer, size_t stack_siz
if (VLOG_IS_ON(threads)) {
ScopedObjectAccess soa(env);
- ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_name);
+ ArtField* f = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_name);
ObjPtr<mirror::String> java_name =
f->GetObject(soa.Decode<mirror::Object>(java_peer))->AsString();
std::string thread_name;
@@ -823,7 +823,7 @@ void Thread::CreatePeer(const char* name, bool as_daemon, jobject thread_group)
ScopedObjectAccess soa(self);
StackHandleScope<1> hs(self);
- MutableHandle<mirror::String> peer_thread_name(hs.NewHandle(GetThreadName(soa)));
+ MutableHandle<mirror::String> peer_thread_name(hs.NewHandle(GetThreadName()));
if (peer_thread_name.Get() == nullptr) {
// The Thread constructor should have set the Thread.name to a
// non-null value. However, because we can run without code
@@ -834,7 +834,7 @@ void Thread::CreatePeer(const char* name, bool as_daemon, jobject thread_group)
} else {
InitPeer<false>(soa, thread_is_daemon, thread_group, thread_name.get(), thread_priority);
}
- peer_thread_name.Assign(GetThreadName(soa));
+ peer_thread_name.Assign(GetThreadName());
}
// 'thread_name' may have been null, so don't trust 'peer_thread_name' to be non-null.
if (peer_thread_name.Get() != nullptr) {
@@ -845,13 +845,13 @@ void Thread::CreatePeer(const char* name, bool as_daemon, jobject thread_group)
template<bool kTransactionActive>
void Thread::InitPeer(ScopedObjectAccess& soa, jboolean thread_is_daemon, jobject thread_group,
jobject thread_name, jint thread_priority) {
- soa.DecodeField(WellKnownClasses::java_lang_Thread_daemon)->
+ jni::DecodeArtField(WellKnownClasses::java_lang_Thread_daemon)->
SetBoolean<kTransactionActive>(tlsPtr_.opeer, thread_is_daemon);
- soa.DecodeField(WellKnownClasses::java_lang_Thread_group)->
+ jni::DecodeArtField(WellKnownClasses::java_lang_Thread_group)->
SetObject<kTransactionActive>(tlsPtr_.opeer, soa.Decode<mirror::Object>(thread_group));
- soa.DecodeField(WellKnownClasses::java_lang_Thread_name)->
+ jni::DecodeArtField(WellKnownClasses::java_lang_Thread_name)->
SetObject<kTransactionActive>(tlsPtr_.opeer, soa.Decode<mirror::Object>(thread_name));
- soa.DecodeField(WellKnownClasses::java_lang_Thread_priority)->
+ jni::DecodeArtField(WellKnownClasses::java_lang_Thread_priority)->
SetInt<kTransactionActive>(tlsPtr_.opeer, thread_priority);
}
@@ -947,8 +947,8 @@ void Thread::Dump(std::ostream& os, bool dump_native_stack, BacktraceMap* backtr
DumpStack(os, dump_native_stack, backtrace_map);
}
-mirror::String* Thread::GetThreadName(const ScopedObjectAccessAlreadyRunnable& soa) const {
- ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_name);
+mirror::String* Thread::GetThreadName() const {
+ ArtField* f = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_name);
if (tlsPtr_.opeer == nullptr) {
return nullptr;
}
@@ -1306,17 +1306,18 @@ void Thread::DumpState(std::ostream& os, const Thread* thread, pid_t tid) {
// cause ScopedObjectAccessUnchecked to deadlock.
if (gAborting == 0 && self != nullptr && thread != nullptr && thread->tlsPtr_.opeer != nullptr) {
ScopedObjectAccessUnchecked soa(self);
- priority = soa.DecodeField(WellKnownClasses::java_lang_Thread_priority)
+ priority = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_priority)
->GetInt(thread->tlsPtr_.opeer);
- is_daemon = soa.DecodeField(WellKnownClasses::java_lang_Thread_daemon)
+ is_daemon = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_daemon)
->GetBoolean(thread->tlsPtr_.opeer);
ObjPtr<mirror::Object> thread_group =
- soa.DecodeField(WellKnownClasses::java_lang_Thread_group)->GetObject(thread->tlsPtr_.opeer);
+ jni::DecodeArtField(WellKnownClasses::java_lang_Thread_group)
+ ->GetObject(thread->tlsPtr_.opeer);
if (thread_group != nullptr) {
ArtField* group_name_field =
- soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_name);
+ jni::DecodeArtField(WellKnownClasses::java_lang_ThreadGroup_name);
ObjPtr<mirror::String> group_name_string =
group_name_field->GetObject(thread_group)->AsString();
group_name = (group_name_string != nullptr) ? group_name_string->ToModifiedUtf8() : "<null>";
@@ -1792,10 +1793,10 @@ void Thread::Destroy() {
// this.nativePeer = 0;
if (Runtime::Current()->IsActiveTransaction()) {
- soa.DecodeField(WellKnownClasses::java_lang_Thread_nativePeer)
+ jni::DecodeArtField(WellKnownClasses::java_lang_Thread_nativePeer)
->SetLong<true>(tlsPtr_.opeer, 0);
} else {
- soa.DecodeField(WellKnownClasses::java_lang_Thread_nativePeer)
+ jni::DecodeArtField(WellKnownClasses::java_lang_Thread_nativePeer)
->SetLong<false>(tlsPtr_.opeer, 0);
}
Dbg::PostThreadDeath(self);
@@ -1803,7 +1804,7 @@ void Thread::Destroy() {
// Thread.join() is implemented as an Object.wait() on the Thread.lock object. Signal anyone
// who is waiting.
ObjPtr<mirror::Object> lock =
- soa.DecodeField(WellKnownClasses::java_lang_Thread_lock)->GetObject(tlsPtr_.opeer);
+ jni::DecodeArtField(WellKnownClasses::java_lang_Thread_lock)->GetObject(tlsPtr_.opeer);
// (This conditional is only needed for tests, where Thread.lock won't have been set.)
if (lock != nullptr) {
StackHandleScope<1> hs(self);
@@ -1894,7 +1895,7 @@ void Thread::HandleUncaughtExceptions(ScopedObjectAccess& soa) {
void Thread::RemoveFromThreadGroup(ScopedObjectAccess& soa) {
// this.group.removeThread(this);
// group can be null if we're in the compiler or a test.
- ObjPtr<mirror::Object> ogroup = soa.DecodeField(WellKnownClasses::java_lang_Thread_group)
+ ObjPtr<mirror::Object> ogroup = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_group)
->GetObject(tlsPtr_.opeer);
if (ogroup != nullptr) {
ScopedLocalRef<jobject> group(soa.Env(), soa.AddLocalReference<jobject>(ogroup));
@@ -2414,7 +2415,7 @@ void Thread::ThrowNewWrappedException(const char* exception_class_descriptor,
++i;
}
ScopedLocalRef<jobject> ref(soa.Env(), soa.AddLocalReference<jobject>(exception.Get()));
- InvokeWithJValues(soa, ref.get(), soa.EncodeMethod(exception_init_method), jv_args);
+ InvokeWithJValues(soa, ref.get(), jni::EncodeArtMethod(exception_init_method), jv_args);
if (LIKELY(!IsExceptionPending())) {
SetException(exception.Get());
}
@@ -2503,7 +2504,7 @@ void Thread::DumpThreadOffset(std::ostream& os, uint32_t offset) {
QUICK_ENTRY_POINT_INFO(pAllocStringFromChars)
QUICK_ENTRY_POINT_INFO(pAllocStringFromString)
QUICK_ENTRY_POINT_INFO(pInstanceofNonTrivial)
- QUICK_ENTRY_POINT_INFO(pCheckCast)
+ QUICK_ENTRY_POINT_INFO(pCheckInstanceOf)
QUICK_ENTRY_POINT_INFO(pInitializeStaticStorage)
QUICK_ENTRY_POINT_INFO(pInitializeTypeAndVerifyAccess)
QUICK_ENTRY_POINT_INFO(pInitializeType)
diff --git a/runtime/thread.h b/runtime/thread.h
index 4f26803726..f3001be202 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -333,8 +333,7 @@ class Thread {
}
// Returns the java.lang.Thread's name, or null if this Thread* doesn't have a peer.
- mirror::String* GetThreadName(const ScopedObjectAccessAlreadyRunnable& ts) const
- REQUIRES_SHARED(Locks::mutator_lock_);
+ mirror::String* GetThreadName() const REQUIRES_SHARED(Locks::mutator_lock_);
// Sets 'name' to the java.lang.Thread's name. This requires no transition to managed code,
// allocation, or locking.
diff --git a/runtime/utf_test.cc b/runtime/utf_test.cc
index 328492523f..d1e97515d3 100644
--- a/runtime/utf_test.cc
+++ b/runtime/utf_test.cc
@@ -113,8 +113,8 @@ TEST_F(UtfTest, CountModifiedUtf8Chars) {
EXPECT_EQ(2u, CountModifiedUtf8Chars(reinterpret_cast<const char *>(kSurrogateEncoding)));
}
-static void AssertConversion(const std::vector<uint16_t> input,
- const std::vector<uint8_t> expected) {
+static void AssertConversion(const std::vector<uint16_t>& input,
+ const std::vector<uint8_t>& expected) {
ASSERT_EQ(expected.size(), CountUtf8Bytes(&input[0], input.size()));
std::vector<uint8_t> output(expected.size());
diff --git a/runtime/well_known_classes.cc b/runtime/well_known_classes.cc
index 53d717a8ff..35495867c7 100644
--- a/runtime/well_known_classes.cc
+++ b/runtime/well_known_classes.cc
@@ -22,6 +22,7 @@
#include "base/logging.h"
#include "entrypoints/quick/quick_entrypoints_enum.h"
+#include "jni_internal.h"
#include "mirror/class.h"
#include "mirror/throwable.h"
#include "obj_ptr-inl.h"
@@ -219,9 +220,9 @@ void WellKnownClasses::InitStringInit(JNIEnv* env) {
ScopedObjectAccess soa(Thread::Current());
#define LOAD_STRING_INIT(init_runtime_name, init_signature, new_runtime_name, \
new_java_name, new_signature, ...) \
- init_runtime_name = soa.DecodeMethod( \
+ init_runtime_name = jni::DecodeArtMethod( \
CacheMethod(env, java_lang_String, false, "<init>", init_signature)); \
- new_runtime_name = soa.DecodeMethod( \
+ new_runtime_name = jni::DecodeArtMethod( \
CacheMethod(env, java_lang_StringFactory, true, new_java_name, new_signature));
STRING_INIT_LIST(LOAD_STRING_INIT)
#undef LOAD_STRING_INIT
@@ -239,8 +240,8 @@ void Thread::InitStringEntryPoints() {
ArtMethod* WellKnownClasses::StringInitToStringFactory(ArtMethod* string_init) {
#define TO_STRING_FACTORY(init_runtime_name, init_signature, new_runtime_name, \
new_java_name, new_signature, entry_point_name) \
- if (string_init == init_runtime_name) { \
- return new_runtime_name; \
+ if (string_init == (init_runtime_name)) { \
+ return (new_runtime_name); \
}
STRING_INIT_LIST(TO_STRING_FACTORY)
#undef TO_STRING_FACTORY
@@ -251,7 +252,7 @@ ArtMethod* WellKnownClasses::StringInitToStringFactory(ArtMethod* string_init) {
uint32_t WellKnownClasses::StringInitToEntryPoint(ArtMethod* string_init) {
#define TO_ENTRY_POINT(init_runtime_name, init_signature, new_runtime_name, \
new_java_name, new_signature, entry_point_name) \
- if (string_init == init_runtime_name) { \
+ if (string_init == (init_runtime_name)) { \
return kQuick ## entry_point_name; \
}
STRING_INIT_LIST(TO_ENTRY_POINT)
diff --git a/test/530-checker-lse/expected.txt b/test/530-checker-lse/expected.txt
index e69de29bb2..ddae16aff4 100644
--- a/test/530-checker-lse/expected.txt
+++ b/test/530-checker-lse/expected.txt
@@ -0,0 +1 @@
+java.lang.ArrayIndexOutOfBoundsException: length=3; index=3
diff --git a/test/530-checker-lse/src/Main.java b/test/530-checker-lse/src/Main.java
index a61b9a0c06..9f4be6c227 100644
--- a/test/530-checker-lse/src/Main.java
+++ b/test/530-checker-lse/src/Main.java
@@ -18,6 +18,9 @@ class Circle {
Circle(double radius) {
this.radius = radius;
}
+ public double getRadius() {
+ return radius;
+ }
public double getArea() {
return radius * radius * Math.PI;
}
@@ -758,6 +761,30 @@ public class Main {
return area;
}
+ /// CHECK-START: double Main.testDeoptimize(int[], double[], double) load_store_elimination (before)
+ /// CHECK: Deoptimize
+ /// CHECK: NewInstance
+ /// CHECK: Deoptimize
+ /// CHECK: NewInstance
+
+ /// CHECK-START: double Main.testDeoptimize(int[], double[], double) load_store_elimination (after)
+ /// CHECK: Deoptimize
+ /// CHECK: NewInstance
+ /// CHECK: Deoptimize
+ /// CHECK-NOT: NewInstance
+
+ private static double testDeoptimize(int[] iarr, double[] darr, double radius) {
+ iarr[0] = 1; // One HDeoptimize here. Not triggered.
+ iarr[1] = 1;
+ Circle circle1 = new Circle(radius);
+ iarr[2] = 1;
+ darr[0] = circle1.getRadius(); // One HDeoptimize here, which holds circle1 live. Triggered.
+ darr[1] = circle1.getRadius();
+ darr[2] = circle1.getRadius();
+ darr[3] = circle1.getRadius();
+ return new Circle(Math.PI).getArea();
+ }
+
static void assertIntEquals(int result, int expected) {
if (expected != result) {
throw new Error("Expected: " + expected + ", found: " + result);
@@ -824,6 +851,20 @@ public class Main {
assertFloatEquals(mF, 0f);
assertDoubleEquals(Math.PI * Math.PI * Math.PI, getCircleArea(Math.PI, true));
assertDoubleEquals(0d, getCircleArea(Math.PI, false));
+
+ int[] iarray = {0, 0, 0};
+ double[] darray = {0d, 0d, 0d};
+ try {
+ assertDoubleEquals(Math.PI * Math.PI * Math.PI, testDeoptimize(iarray, darray, Math.PI));
+ } catch (Exception e) {
+ System.out.println(e);
+ }
+ assertIntEquals(iarray[0], 1);
+ assertIntEquals(iarray[1], 1);
+ assertIntEquals(iarray[2], 1);
+ assertDoubleEquals(darray[0], Math.PI);
+ assertDoubleEquals(darray[1], Math.PI);
+ assertDoubleEquals(darray[2], Math.PI);
}
static boolean sFlag;
diff --git a/test/530-checker-lse2/expected.txt b/test/530-checker-lse2/expected.txt
new file mode 100644
index 0000000000..e18fc7e1f8
--- /dev/null
+++ b/test/530-checker-lse2/expected.txt
@@ -0,0 +1,8 @@
+Start....
+r = 9.649776E8
+mZ = false
+mI = 0
+mJ = -576460752303423488
+mF = NaN
+mD = NaN
+Done....
diff --git a/test/530-checker-lse2/info.txt b/test/530-checker-lse2/info.txt
new file mode 100644
index 0000000000..8dd3f502bf
--- /dev/null
+++ b/test/530-checker-lse2/info.txt
@@ -0,0 +1,2 @@
+Checker test for testing store/allocation elimination in presence of
+HDeoptimize.
diff --git a/test/530-checker-lse2/src/Main.java b/test/530-checker-lse2/src/Main.java
new file mode 100644
index 0000000000..0fe3d873ea
--- /dev/null
+++ b/test/530-checker-lse2/src/Main.java
@@ -0,0 +1,208 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Arrays;
+
+// Modified from a fuzz test.
+public class Main {
+
+ private interface X {
+ int x();
+ }
+
+ private class A {
+ public int a() {
+ return (+ (Math.multiplyExact(mI, mI)));
+ }
+ }
+
+ private class B extends A implements X {
+ public int a() {
+ return super.a() + ((int) (Math.max(364746077.0f, ((float) mD))));
+ }
+ public int x() {
+ return (mI >> (mI++));
+ }
+ }
+
+ private static class C implements X {
+ public static int s() {
+ return 671468641;
+ }
+ public int c() {
+ return -383762838;
+ }
+ public int x() {
+ return -138813312;
+ }
+ }
+
+ private A mA = new B();
+ private B mB = new B();
+ private X mBX = new B();
+ private C mC = new C();
+ private X mCX = new C();
+
+ private boolean mZ = false;
+ private int mI = 0;
+ private long mJ = 0;
+ private float mF = 0;
+ private double mD = 0;
+
+ private boolean[] mArray = new boolean[576];
+
+ private Main() {
+ boolean a = false;
+ for (int i0 = 0; i0 < 576; i0++) {
+ mArray[i0] = a;
+ a = !a;
+ }
+ }
+
+ /// CHECK-START: float Main.testMethod() load_store_elimination (before)
+ /// CHECK-DAG: Deoptimize
+ /// CHECK-DAG: Deoptimize
+ /// CHECK-DAG: NewInstance
+ /// CHECK-DAG: NewInstance
+ /// CHECK-DAG: NewInstance
+ /// CHECK-DAG: NewInstance
+ /// CHECK-DAG: NewInstance
+ /// CHECK-DAG: NewInstance
+ /// CHECK-DAG: NewInstance
+ /// CHECK-DAG: NewInstance
+ /// CHECK-DAG: NewInstance
+ /// CHECK-DAG: NewInstance
+ /// CHECK-DAG: NewInstance
+ /// CHECK-DAG: NewInstance
+ /// CHECK-DAG: NewInstance
+ /// CHECK-DAG: NewInstance
+ /// CHECK-NOT: NewInstance
+
+ /// CHECK-START: float Main.testMethod() load_store_elimination (after)
+ /// CHECK-DAG: Deoptimize
+ /// CHECK-DAG: Deoptimize
+ /// CHECK-NOT: NewInstance
+
+ private float testMethod() {
+ {
+ int lI0 = (-1456058746 << mI);
+ mD = ((double)(int)(double) mD);
+ for (int i0 = 56 - 1; i0 >= 0; i0--) {
+ mArray[i0] &= (Boolean.logicalOr(((true ? ((boolean) new Boolean((mZ))) : mZ) || mArray[i0]), (mZ)));
+ mF *= (mF * mF);
+ if ((mZ ^ true)) {
+ mF *= ((float)(int)(float) 267827331.0f);
+ mZ ^= ((false & ((boolean) new Boolean(false))) | mZ);
+ for (int i1 = 576 - 1; i1 >= 0; i1--) {
+ mZ &= ((mArray[279]) | ((boolean) new Boolean(true)));
+ mD -= (--mD);
+ for (int i2 = 56 - 1; i2 >= 0; i2--) {
+ mF /= (mF - mF);
+ mI = (Math.min(((int) new Integer(mI)), (766538816 * (++mI))));
+ mF += (mZ ? (mB.a()) : ((! mZ) ? -752042357.0f : (++mF)));
+ mJ |= ((long) new Long((-2084191070L + (mJ | mJ))));
+ lI0 |= ((int) new Integer(((int) new Integer(mI))));
+ if (((boolean) new Boolean(false))) {
+ mZ &= (mZ);
+ mF *= (mF--);
+ mD = (Double.POSITIVE_INFINITY);
+ mF += ((float)(int)(float) (-2026938813.0f * 638401585.0f));
+ mJ = (--mJ);
+ for (int i3 = 56 - 1; i3 >= 0; i3--) {
+ mI &= (- mI);
+ mD = (--mD);
+ mArray[426] = (mZ || false);
+ mF -= (((this instanceof Main) ? mF : mF) + 976981405.0f);
+ mZ &= ((mZ) & (this instanceof Main));
+ }
+ mZ ^= (Float.isFinite(-1975953895.0f));
+ } else {
+ mJ /= ((long) (Math.nextDown(-1519600008.0f)));
+ mJ <<= (Math.round(1237681786.0));
+ }
+ }
+ mArray[i0] &= (false || ((1256071300.0f != -353296391.0f) ? false : (mZ ^ mArray[i0])));
+ mF *= (+ ((float) mD));
+ for (int i2 = 0; i2 < 576; i2++) {
+ mD *= ((double) lI0);
+ lI0 = (lI0 & (Integer.MIN_VALUE));
+ mF -= (--mF);
+ }
+ if ((this instanceof Main)) {
+ mZ ^= ((boolean) new Boolean(true));
+ } else {
+ {
+ int lI1 = (mZ ? (--lI0) : 1099574344);
+ mJ >>= (Math.incrementExact(mJ));
+ mJ = (~ -2103354070L);
+ }
+ }
+ }
+ } else {
+ mJ *= (- ((long) new Long(479832084L)));
+ mJ %= (Long.MAX_VALUE);
+ mD /= (--mD);
+ if ((mI > ((mBX.x()) << mI))) {
+ {
+ long lJ0 = (mJ--);
+ mI >>>= (mBX.x());
+ }
+ mF = (+ 505094603.0f);
+ mD *= (((boolean) new Boolean((! false))) ? mD : 1808773781.0);
+ mI *= (Integer.MIN_VALUE);
+ for (int i1 = 576 - 1; i1 >= 0; i1--) {
+ if (((boolean) new Boolean(false))) {
+ mD += ((double)(float)(double) -1051436901.0);
+ } else {
+ mF -= ((float)(int)(float) (Float.min(mF, (mF--))));
+ }
+ for (int i2 = 0; i2 < 576; i2++) {
+ mJ -= ((long) new Long(-1968644857L));
+ mJ ^= (+ (mC.s()));
+ }
+ }
+ } else {
+ mF -= ((- mF) + -2145489966.0f);
+ }
+ mD -= (mD++);
+ mD = (949112777.0 * 1209996119.0);
+ }
+ mZ &= (Boolean.logicalAnd(true, ((mZ) & (((boolean) new Boolean(true)) && true))));
+ }
+ }
+ return ((float) 964977619L);
+ }
+
+ public static void main(String[] args) {
+ System.out.println("Start....");
+ Main t = new Main();
+ float r = 1883600237.0f;
+ try {
+ r = t.testMethod();
+ } catch (Exception e) {
+ // Arithmetic, null pointer, index out of bounds, etc.
+ System.out.println("An exception was caught.");
+ }
+ System.out.println("r = " + r);
+ System.out.println("mZ = " + t.mZ);
+ System.out.println("mI = " + t.mI);
+ System.out.println("mJ = " + t.mJ);
+ System.out.println("mF = " + t.mF);
+ System.out.println("mD = " + t.mD);
+ System.out.println("Done....");
+ }
+}
+
diff --git a/test/622-simplifyifs-exception-edges/expected.txt b/test/622-simplifyifs-exception-edges/expected.txt
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/622-simplifyifs-exception-edges/expected.txt
diff --git a/test/622-simplifyifs-exception-edges/info.txt b/test/622-simplifyifs-exception-edges/info.txt
new file mode 100644
index 0000000000..58c4bfbbcf
--- /dev/null
+++ b/test/622-simplifyifs-exception-edges/info.txt
@@ -0,0 +1,2 @@
+Regression test for the SimplifyIfs() graph simplification erroneously trying
+to redirect exception handler edges. \ No newline at end of file
diff --git a/test/622-simplifyifs-exception-edges/smali/Test.smali b/test/622-simplifyifs-exception-edges/smali/Test.smali
new file mode 100644
index 0000000000..5e91258edc
--- /dev/null
+++ b/test/622-simplifyifs-exception-edges/smali/Test.smali
@@ -0,0 +1,76 @@
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LTest;
+
+.super Ljava/lang/Object;
+
+.method public static test([I)I
+ .locals 2
+ const/4 v0, 0
+ :try1_begin
+ array-length v1, p0
+ :try1_end
+ add-int/lit8 v0, v1, -1
+ :try2_begin
+ aget v0, p0, v0
+ :try2_end
+ :end
+ return v0
+
+ :catch_all
+ # Regression test for bug 32545860:
+ # SimplifyIfs() would have redirected exception handler edges leading here.
+ # Note: There is no move-exception here to prevent matching the SimplifyIfs() pattern.
+ if-eqz v0, :is_zero
+ const/4 v0, -1
+ goto :end
+ :is_zero
+ const/4 v0, -2
+ goto :end
+
+ .catchall {:try1_begin .. :try1_end } :catch_all
+ .catchall {:try2_begin .. :try2_end } :catch_all
+.end method
+
+.method public static test2([II)I
+ .locals 3
+ move v0, p1
+ :try_begin
+ array-length v1, p0
+ add-int/lit8 v1, v1, -1
+ add-int/lit8 v0, v0, 1
+ aget v1, p0, v1
+ const/4 v0, 2
+ aget v2, p0, p1
+ const/4 v0, 3
+ :try_end
+ :end
+ return v0
+
+ :catch_all
+ # Regression test for bug 32546110:
+ # SimplifyIfs() would have looked at predecessors of this block based on the indexes
+ # of the catch Phi's inputs. For catch blocks these two arrays are unrelated, so
+ # this caused out-of-range access triggering a DCHECK() in dchecked_vector<>.
+ # Note: There is no move-exception here to prevent matching the SimplifyIfs() pattern.
+ if-eqz v0, :is_zero
+ const/4 v0, -1
+ goto :end
+ :is_zero
+ const/4 v0, -2
+ goto :end
+
+ .catchall {:try_begin .. :try_end } :catch_all
+.end method
diff --git a/test/622-simplifyifs-exception-edges/src/Main.java b/test/622-simplifyifs-exception-edges/src/Main.java
new file mode 100644
index 0000000000..636f047195
--- /dev/null
+++ b/test/622-simplifyifs-exception-edges/src/Main.java
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Method;
+import java.lang.reflect.InvocationTargetException;
+
+public class Main {
+ public static void main(String[] args) throws Exception {
+ Class<?> c = Class.forName("Test");
+ Method test = c.getDeclaredMethod("test", int[].class);
+ assertIntEquals(-2, (int)test.invoke(null, new Object[] { null }));
+ assertIntEquals(-1, (int)test.invoke(null, new Object[] { new int[0] }));
+ assertIntEquals(42, (int)test.invoke(null, new Object[] { new int[] { 42 } }));
+
+ Method test2 = c.getDeclaredMethod("test2", int[].class, int.class);
+ assertIntEquals(-2, (int)test2.invoke(null, new Object[] { null, 0 }));
+ assertIntEquals(-1, (int)test2.invoke(null, new Object[] { new int[0], 0 }));
+ assertIntEquals(-1, (int)test2.invoke(null, new Object[] { new int[0], 1 }));
+ assertIntEquals(3, (int)test2.invoke(null, new Object[] { new int[] { 42 }, 0 }));
+ }
+
+ public static void assertIntEquals(int expected, int result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ // Workaround for non-zero field ids offset in dex file with no fields. Bug: 18051191
+ static final boolean dummy = false;
+}
diff --git a/tools/cpp-define-generator/main.cc b/tools/cpp-define-generator/main.cc
index a1b463a92d..fc99f8abc7 100644
--- a/tools/cpp-define-generator/main.cc
+++ b/tools/cpp-define-generator/main.cc
@@ -59,12 +59,12 @@ pretty_format(T value) {
}
template <typename T>
-void cpp_define(std::string name, T value) {
+void cpp_define(const std::string& name, T value) {
std::cout << "#define " << name << " " << pretty_format(value) << std::endl;
}
template <typename T>
-void emit_check_eq(T value, std::string expr) {
+void emit_check_eq(T value, const std::string& expr) {
std::cout << "DEFINE_CHECK_EQ(" << value << ", (" << expr << "))" << std::endl;
}