Faster @CriticalNative for boot image.
The @CriticalNative call does not need the target method, so
we can avoid one instruction on x86, x86-64 and arm64. The
current approach for arm does not allow such optimization.
Test: m test-art-host-gtest
Test: testrunner.py --host --optimizing
Test: aosp_blueline-userdebug boots.
Test: run-gtests.sh
Test: testrunner.py --target --64 --optimizing
Bug: 112189621
Change-Id: I11b7e415be2697757cbb11c9cccf4058d1d72d7d
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 85337ed..76b8be1 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -9140,6 +9140,12 @@
GenerateInvokeStaticOrDirectRuntimeCall(invoke, temp, slow_path);
return; // No code pointer retrieval; the runtime performs the call directly.
}
+ case MethodLoadKind::kBootImageLinkTimePcRelative:
+ // Note: Unlike arm64, x86 and x86-64, we do not avoid the materialization of method
+ // pointer for kCallCriticalNative because it would not save us an instruction from
+ // the current sequence MOVW+MOVT+ADD(pc)+LDR+BL. The ADD(pc) separates the patched
+ // offset instructions MOVW+MOVT from the entrypoint load, so they cannot be fused.
+ FALLTHROUGH_INTENDED;
default: {
LoadMethod(invoke->GetMethodLoadKind(), temp, invoke);
break;