summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--compiler/Android.bp1
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.cc21
-rw-r--r--compiler/optimizing/critical_native_abi_fixup_arm.cc110
-rw-r--r--compiler/optimizing/critical_native_abi_fixup_arm.h40
-rw-r--r--compiler/optimizing/instruction_builder.cc4
-rw-r--r--compiler/optimizing/nodes.cc3
-rw-r--r--compiler/optimizing/nodes.h8
-rw-r--r--compiler/optimizing/optimization.cc8
-rw-r--r--compiler/optimizing/optimization.h1
-rw-r--r--compiler/optimizing/optimizing_compiler.cc14
-rw-r--r--runtime/well_known_classes.cc9
-rw-r--r--runtime/well_known_classes.h2
12 files changed, 208 insertions, 13 deletions
diff --git a/compiler/Android.bp b/compiler/Android.bp
index 513394e6f5..8896c84bf7 100644
--- a/compiler/Android.bp
+++ b/compiler/Android.bp
@@ -103,6 +103,7 @@ art_cc_defaults {
"jni/quick/arm/calling_convention_arm.cc",
"optimizing/code_generator_arm_vixl.cc",
"optimizing/code_generator_vector_arm_vixl.cc",
+ "optimizing/critical_native_abi_fixup_arm.cc",
"optimizing/instruction_simplifier_arm.cc",
"optimizing/instruction_simplifier_shared.cc",
"optimizing/intrinsics_arm_vixl.cc",
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 0f47a8bd21..d9a965cd36 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -9029,29 +9029,24 @@ void CodeGeneratorARMVIXL::GenerateReadBarrierForRootSlow(HInstruction* instruct
HInvokeStaticOrDirect::DispatchInfo CodeGeneratorARMVIXL::GetSupportedInvokeStaticOrDirectDispatch(
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
ArtMethod* method) {
- if (desired_dispatch_info.code_ptr_location ==
+ if (method->IsIntrinsic() &&
+ desired_dispatch_info.code_ptr_location ==
HInvokeStaticOrDirect::CodePtrLocation::kCallCriticalNative) {
- // TODO: Work around CheckTypeConsistency() in code_generator.cc that does not allow
- // putting FP values in core registers as we need to do for the soft-float native ABI.
+ // As a work-around for soft-float native ABI interfering with type checks, we are
+ // inserting fake calls to Float.floatToRawIntBits() or Double.doubleToRawLongBits()
+ // when a float or double argument is passed in core registers but we cannot do that
+ // for actual intrinsic implementations that expect them in FP registers. Therefore
+ // we do not use `kCallCriticalNative` for intrinsics with FP arguments; if they are
+ // properly intrinsified, the dispatch type does not matter anyway.
ScopedObjectAccess soa(Thread::Current());
uint32_t shorty_len;
const char* shorty = method->GetShorty(&shorty_len);
- size_t reg = 0u;
for (uint32_t i = 1; i != shorty_len; ++i) {
- size_t next_reg = reg + 1u;
- if (shorty[i] == 'D' || shorty[i] == 'J') {
- reg = RoundUp(reg, 2u);
- next_reg = reg + 2u;
- }
- if (reg == 4u) {
- break;
- }
if (shorty[i] == 'D' || shorty[i] == 'F') {
HInvokeStaticOrDirect::DispatchInfo dispatch_info = desired_dispatch_info;
dispatch_info.code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod;
return dispatch_info;
}
- reg = next_reg;
}
}
return desired_dispatch_info;
diff --git a/compiler/optimizing/critical_native_abi_fixup_arm.cc b/compiler/optimizing/critical_native_abi_fixup_arm.cc
new file mode 100644
index 0000000000..8441423a12
--- /dev/null
+++ b/compiler/optimizing/critical_native_abi_fixup_arm.cc
@@ -0,0 +1,110 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "critical_native_abi_fixup_arm.h"
+
+#include "art_method-inl.h"
+#include "intrinsics.h"
+#include "jni/jni_internal.h"
+#include "nodes.h"
+#include "scoped_thread_state_change-inl.h"
+#include "well_known_classes.h"
+
+namespace art {
+namespace arm {
+
+// Fix up FP arguments passed in core registers for call to @CriticalNative by inserting fake calls
+// to Float.floatToRawIntBits() or Double.doubleToRawLongBits() to satisfy type consistency checks.
+static void FixUpArguments(HInvokeStaticOrDirect* invoke) {
+ DCHECK_EQ(invoke->GetCodePtrLocation(),
+ HInvokeStaticOrDirect::CodePtrLocation::kCallCriticalNative);
+ size_t reg = 0u;
+ for (size_t i = 0, num_args = invoke->GetNumberOfArguments(); i != num_args; ++i) {
+ HInstruction* input = invoke->InputAt(i);
+ DataType::Type input_type = input->GetType();
+ size_t next_reg = reg + 1u;
+ if (DataType::Is64BitType(input_type)) {
+ reg = RoundUp(reg, 2u);
+ next_reg = reg + 2u;
+ }
+ if (reg == 4u) {
+ break; // Remaining arguments are passed on stack.
+ }
+ if (DataType::IsFloatingPointType(input_type)) {
+ bool is_double = (input_type == DataType::Type::kFloat64);
+ DataType::Type converted_type = is_double ? DataType::Type::kInt64 : DataType::Type::kInt32;
+ jmethodID known_method = is_double ? WellKnownClasses::java_lang_Double_doubleToRawLongBits
+ : WellKnownClasses::java_lang_Float_floatToRawIntBits;
+ ArtMethod* resolved_method = jni::DecodeArtMethod(known_method);
+ DCHECK(resolved_method != nullptr);
+ MethodReference target_method(nullptr, 0);
+ {
+ ScopedObjectAccess soa(Thread::Current());
+ target_method =
+ MethodReference(resolved_method->GetDexFile(), resolved_method->GetDexMethodIndex());
+ }
+ // Use arbitrary dispatch info that does not require the method argument.
+ HInvokeStaticOrDirect::DispatchInfo dispatch_info = {
+ HInvokeStaticOrDirect::MethodLoadKind::kBssEntry,
+ HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod,
+ /*method_load_data=*/ 0u
+ };
+ HBasicBlock* block = invoke->GetBlock();
+ ArenaAllocator* allocator = block->GetGraph()->GetAllocator();
+ HInvokeStaticOrDirect* new_input = new (allocator) HInvokeStaticOrDirect(
+ allocator,
+ /*number_of_arguments=*/ 1u,
+ converted_type,
+ invoke->GetDexPc(),
+ /*method_index=*/ dex::kDexNoIndex,
+ resolved_method,
+ dispatch_info,
+ kStatic,
+ target_method,
+ HInvokeStaticOrDirect::ClinitCheckRequirement::kNone);
+ // The intrinsic has no side effects and does not need environment or dex cache on ARM.
+ new_input->SetSideEffects(SideEffects::None());
+ IntrinsicOptimizations opt(new_input);
+ opt.SetDoesNotNeedDexCache();
+ opt.SetDoesNotNeedEnvironment();
+ new_input->SetRawInputAt(0u, input);
+ block->InsertInstructionBefore(new_input, invoke);
+ invoke->ReplaceInput(new_input, i);
+ }
+ reg = next_reg;
+ }
+}
+
+bool CriticalNativeAbiFixupArm::Run() {
+ if (!graph_->HasDirectCriticalNativeCall()) {
+ return false;
+ }
+
+ for (HBasicBlock* block : graph_->GetReversePostOrder()) {
+ for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
+ HInstruction* instruction = it.Current();
+ if (instruction->IsInvokeStaticOrDirect() &&
+ instruction->AsInvokeStaticOrDirect()->GetCodePtrLocation() ==
+ HInvokeStaticOrDirect::CodePtrLocation::kCallCriticalNative) {
+ FixUpArguments(instruction->AsInvokeStaticOrDirect());
+ }
+ }
+ }
+ return true;
+}
+
+} // namespace arm
+} // namespace art
diff --git a/compiler/optimizing/critical_native_abi_fixup_arm.h b/compiler/optimizing/critical_native_abi_fixup_arm.h
new file mode 100644
index 0000000000..faa3c7a5fe
--- /dev/null
+++ b/compiler/optimizing/critical_native_abi_fixup_arm.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_CRITICAL_NATIVE_ABI_FIXUP_ARM_H_
+#define ART_COMPILER_OPTIMIZING_CRITICAL_NATIVE_ABI_FIXUP_ARM_H_
+
+#include "nodes.h"
+#include "optimization.h"
+
+namespace art {
+namespace arm {
+
+class CriticalNativeAbiFixupArm : public HOptimization {
+ public:
+ CriticalNativeAbiFixupArm(HGraph* graph, OptimizingCompilerStats* stats)
+ : HOptimization(graph, kCriticalNativeAbiFixupArmPassName, stats) {}
+
+ static constexpr const char* kCriticalNativeAbiFixupArmPassName =
+ "critical_native_abi_fixup_arm";
+
+ bool Run() override;
+};
+
+} // namespace arm
+} // namespace art
+
+#endif // ART_COMPILER_OPTIMIZING_CRITICAL_NATIVE_ABI_FIXUP_ARM_H_
diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc
index ac714abd59..c96ea5d945 100644
--- a/compiler/optimizing/instruction_builder.cc
+++ b/compiler/optimizing/instruction_builder.cc
@@ -1038,6 +1038,10 @@ bool HInstructionBuilder::BuildInvoke(const Instruction& instruction,
HInvokeStaticOrDirect::DispatchInfo dispatch_info =
HSharpening::SharpenInvokeStaticOrDirect(resolved_method, code_generator_);
+ if (dispatch_info.code_ptr_location ==
+ HInvokeStaticOrDirect::CodePtrLocation::kCallCriticalNative) {
+ graph_->SetHasDirectCriticalNativeCall(true);
+ }
invoke = new (allocator_) HInvokeStaticOrDirect(allocator_,
number_of_arguments,
return_type,
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 22e165767d..50cedd2502 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -2450,6 +2450,9 @@ HInstruction* HGraph::InlineInto(HGraph* outer_graph, HInvoke* invoke) {
if (HasIrreducibleLoops()) {
outer_graph->SetHasIrreducibleLoops(true);
}
+ if (HasDirectCriticalNativeCall()) {
+ outer_graph->SetHasDirectCriticalNativeCall(true);
+ }
if (HasTryCatch()) {
outer_graph->SetHasTryCatch(true);
}
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 63ba3c0d8f..55e579baf4 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -399,6 +399,7 @@ class HGraph : public ArenaObject<kArenaAllocGraph> {
has_simd_(false),
has_loops_(false),
has_irreducible_loops_(false),
+ has_direct_critical_native_call_(false),
dead_reference_safe_(dead_reference_safe),
debuggable_(debuggable),
current_instruction_id_(start_instruction_id),
@@ -677,6 +678,9 @@ class HGraph : public ArenaObject<kArenaAllocGraph> {
bool HasIrreducibleLoops() const { return has_irreducible_loops_; }
void SetHasIrreducibleLoops(bool value) { has_irreducible_loops_ = value; }
+ bool HasDirectCriticalNativeCall() const { return has_direct_critical_native_call_; }
+ void SetHasDirectCriticalNativeCall(bool value) { has_direct_critical_native_call_ = value; }
+
ArtMethod* GetArtMethod() const { return art_method_; }
void SetArtMethod(ArtMethod* method) { art_method_ = method; }
@@ -788,6 +792,10 @@ class HGraph : public ArenaObject<kArenaAllocGraph> {
// so there might be false positives.
bool has_irreducible_loops_;
+ // Flag whether there are any direct calls to native code registered
+ // for @CriticalNative methods.
+ bool has_direct_critical_native_call_;
+
// Is the code known to be robust against eliminating dead references
// and the effects of early finalization? If false, dead reference variables
// are kept if they might be visible to the garbage collector.
diff --git a/compiler/optimizing/optimization.cc b/compiler/optimizing/optimization.cc
index 3a2d067f4d..b28bda6e65 100644
--- a/compiler/optimizing/optimization.cc
+++ b/compiler/optimizing/optimization.cc
@@ -17,6 +17,7 @@
#include "optimization.h"
#ifdef ART_ENABLE_CODEGEN_arm
+#include "critical_native_abi_fixup_arm.h"
#include "instruction_simplifier_arm.h"
#endif
#ifdef ART_ENABLE_CODEGEN_arm64
@@ -97,6 +98,8 @@ const char* OptimizationPassName(OptimizationPass pass) {
#ifdef ART_ENABLE_CODEGEN_arm
case OptimizationPass::kInstructionSimplifierArm:
return arm::InstructionSimplifierArm::kInstructionSimplifierArmPassName;
+ case OptimizationPass::kCriticalNativeAbiFixupArm:
+ return arm::CriticalNativeAbiFixupArm::kCriticalNativeAbiFixupArmPassName;
#endif
#ifdef ART_ENABLE_CODEGEN_arm64
case OptimizationPass::kInstructionSimplifierArm64:
@@ -143,6 +146,7 @@ OptimizationPass OptimizationPassByName(const std::string& pass_name) {
X(OptimizationPass::kSideEffectsAnalysis);
#ifdef ART_ENABLE_CODEGEN_arm
X(OptimizationPass::kInstructionSimplifierArm);
+ X(OptimizationPass::kCriticalNativeAbiFixupArm);
#endif
#ifdef ART_ENABLE_CODEGEN_arm64
X(OptimizationPass::kInstructionSimplifierArm64);
@@ -277,6 +281,10 @@ ArenaVector<HOptimization*> ConstructOptimizations(
DCHECK(alt_name == nullptr) << "arch-specific pass does not support alternative name";
opt = new (allocator) arm::InstructionSimplifierArm(graph, stats);
break;
+ case OptimizationPass::kCriticalNativeAbiFixupArm:
+ DCHECK(alt_name == nullptr) << "arch-specific pass does not support alternative name";
+ opt = new (allocator) arm::CriticalNativeAbiFixupArm(graph, stats);
+ break;
#endif
#ifdef ART_ENABLE_CODEGEN_arm64
case OptimizationPass::kInstructionSimplifierArm64:
diff --git a/compiler/optimizing/optimization.h b/compiler/optimizing/optimization.h
index f8aea9680d..2113df0c81 100644
--- a/compiler/optimizing/optimization.h
+++ b/compiler/optimizing/optimization.h
@@ -85,6 +85,7 @@ enum class OptimizationPass {
kSideEffectsAnalysis,
#ifdef ART_ENABLE_CODEGEN_arm
kInstructionSimplifierArm,
+ kCriticalNativeAbiFixupArm,
#endif
#ifdef ART_ENABLE_CODEGEN_arm64
kInstructionSimplifierArm64,
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index f201f806d6..169f8b6446 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -468,6 +468,19 @@ bool OptimizingCompiler::RunBaselineOptimizations(HGraph* graph,
const DexCompilationUnit& dex_compilation_unit,
PassObserver* pass_observer) const {
switch (codegen->GetCompilerOptions().GetInstructionSet()) {
+#if defined(ART_ENABLE_CODEGEN_arm)
+ case InstructionSet::kThumb2:
+ case InstructionSet::kArm: {
+ OptimizationDef arm_optimizations[] = {
+ OptDef(OptimizationPass::kCriticalNativeAbiFixupArm),
+ };
+ return RunOptimizations(graph,
+ codegen,
+ dex_compilation_unit,
+ pass_observer,
+ arm_optimizations);
+ }
+#endif
#ifdef ART_ENABLE_CODEGEN_x86
case InstructionSet::kX86: {
OptimizationDef x86_optimizations[] = {
@@ -501,6 +514,7 @@ bool OptimizingCompiler::RunArchOptimizations(HGraph* graph,
OptDef(OptimizationPass::kInstructionSimplifierArm),
OptDef(OptimizationPass::kSideEffectsAnalysis),
OptDef(OptimizationPass::kGlobalValueNumbering, "GVN$after_arch"),
+ OptDef(OptimizationPass::kCriticalNativeAbiFixupArm),
OptDef(OptimizationPass::kScheduling)
};
return RunOptimizations(graph,
diff --git a/runtime/well_known_classes.cc b/runtime/well_known_classes.cc
index 838eb8a6a9..8f0581e4db 100644
--- a/runtime/well_known_classes.cc
+++ b/runtime/well_known_classes.cc
@@ -97,7 +97,9 @@ jmethodID WellKnownClasses::java_lang_ClassNotFoundException_init;
jmethodID WellKnownClasses::java_lang_Daemons_start;
jmethodID WellKnownClasses::java_lang_Daemons_stop;
jmethodID WellKnownClasses::java_lang_Daemons_waitForDaemonStart;
+jmethodID WellKnownClasses::java_lang_Double_doubleToRawLongBits;
jmethodID WellKnownClasses::java_lang_Double_valueOf;
+jmethodID WellKnownClasses::java_lang_Float_floatToRawIntBits;
jmethodID WellKnownClasses::java_lang_Float_valueOf;
jmethodID WellKnownClasses::java_lang_Integer_valueOf;
jmethodID WellKnownClasses::java_lang_invoke_MethodHandles_lookup;
@@ -472,6 +474,11 @@ void WellKnownClasses::InitFieldsAndMethodsOnly(JNIEnv* env) {
java_lang_Integer_valueOf = CachePrimitiveBoxingMethod(env, 'I', "java/lang/Integer");
java_lang_Long_valueOf = CachePrimitiveBoxingMethod(env, 'J', "java/lang/Long");
java_lang_Short_valueOf = CachePrimitiveBoxingMethod(env, 'S', "java/lang/Short");
+
+ java_lang_Double_doubleToRawLongBits =
+ CacheMethod(env, "java/lang/Double", /*is_static=*/ true, "doubleToRawLongBits", "(D)J");
+ java_lang_Float_floatToRawIntBits =
+ CacheMethod(env, "java/lang/Float", /*is_static=*/ true, "floatToRawIntBits", "(F)I");
}
void WellKnownClasses::LateInit(JNIEnv* env) {
@@ -557,7 +564,9 @@ void WellKnownClasses::Clear() {
java_lang_ClassNotFoundException_init = nullptr;
java_lang_Daemons_start = nullptr;
java_lang_Daemons_stop = nullptr;
+ java_lang_Double_doubleToRawLongBits = nullptr;
java_lang_Double_valueOf = nullptr;
+ java_lang_Float_floatToRawIntBits = nullptr;
java_lang_Float_valueOf = nullptr;
java_lang_Integer_valueOf = nullptr;
java_lang_invoke_MethodHandles_lookup = nullptr;
diff --git a/runtime/well_known_classes.h b/runtime/well_known_classes.h
index 5c933bd463..35c007d9f2 100644
--- a/runtime/well_known_classes.h
+++ b/runtime/well_known_classes.h
@@ -110,7 +110,9 @@ struct WellKnownClasses {
static jmethodID java_lang_Daemons_start;
static jmethodID java_lang_Daemons_stop;
static jmethodID java_lang_Daemons_waitForDaemonStart;
+ static jmethodID java_lang_Double_doubleToRawLongBits;
static jmethodID java_lang_Double_valueOf;
+ static jmethodID java_lang_Float_floatToRawIntBits;
static jmethodID java_lang_Float_valueOf;
static jmethodID java_lang_Integer_valueOf;
static jmethodID java_lang_invoke_MethodHandles_lookup;