Merge "Pass the debug_info_offset explicitly."
diff --git a/compiler/Android.bp b/compiler/Android.bp
index 249aaf5..3699d66 100644
--- a/compiler/Android.bp
+++ b/compiler/Android.bp
@@ -320,7 +320,6 @@
"exception_test.cc",
"jni/jni_compiler_test.cc",
"linker/linker_patch_test.cc",
- "linker/method_bss_mapping_encoder_test.cc",
"linker/output_stream_test.cc",
"optimizing/bounds_check_elimination_test.cc",
"optimizing/cloner_test.cc",
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 726401d..e4dd544 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -2025,28 +2025,19 @@
ClassReference ref(manager_->GetDexFile(), class_def_index);
manager_->GetCompiler()->RecordClassStatus(ref, klass->GetStatus());
- // It is *very* problematic if there are verification errors in the boot classpath.
- // For example, we rely on things working OK without verification when the decryption dialog
- // is brought up. So abort in a debug build if we find this violated.
+ // It is *very* problematic if there are resolution errors in the boot classpath.
+ //
+ // It is also bad if classes fail verification. For example, we rely on things working
+ // OK without verification when the decryption dialog is brought up. It is thus highly
+ // recommended to compile the boot classpath with
+ // --abort-on-hard-verifier-error --abort-on-soft-verifier-error
+ // which is the default build system configuration.
if (kIsDebugBuild) {
if (manager_->GetCompiler()->GetCompilerOptions().IsBootImage()) {
- if (!klass->IsVerified()) {
- // Re-run verification to get all failure messages if it soft-failed.
- if (!klass->IsErroneous()) {
- gLogVerbosity.verifier = true;
- // Note: We can't call ClassLinker::VerifyClass, as it will elide the second
- // verification.
- Runtime* runtime = Runtime::Current();
- std::string v_error;
- verifier::MethodVerifier::VerifyClass(soa.Self(),
- klass.Get(),
- runtime->GetCompilerCallbacks(),
- runtime->IsAotCompiler(),
- verifier::HardFailLogMode::kLogInternalFatal,
- &v_error);
- }
+ if (!klass->IsResolved() || klass->IsErroneous()) {
LOG(FATAL) << "Boot classpath class " << klass->PrettyClass()
- << " failed to fully verify: state= " << klass->GetStatus();
+ << " failed to resolve/is erroneous: state= " << klass->GetStatus();
+ UNREACHABLE();
}
}
if (klass->IsVerified()) {
diff --git a/compiler/linker/method_bss_mapping_encoder.h b/compiler/linker/method_bss_mapping_encoder.h
deleted file mode 100644
index b2922ec..0000000
--- a/compiler/linker/method_bss_mapping_encoder.h
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_LINKER_METHOD_BSS_MAPPING_ENCODER_H_
-#define ART_COMPILER_LINKER_METHOD_BSS_MAPPING_ENCODER_H_
-
-#include "base/enums.h"
-#include "base/logging.h"
-#include "dex_file.h"
-#include "method_bss_mapping.h"
-
-namespace art {
-namespace linker {
-
-// Helper class for encoding compressed MethodBssMapping.
-class MethodBssMappingEncoder {
- public:
- explicit MethodBssMappingEncoder(PointerSize pointer_size)
- : pointer_size_(static_cast<size_t>(pointer_size)) {
- entry_.method_index = DexFile::kDexNoIndex16;
- entry_.index_mask = 0u;
- entry_.bss_offset = static_cast<uint32_t>(-1);
- }
-
- // Try to merge the next method_index -> bss_offset mapping into the current entry.
- // Return true on success, false on failure.
- bool TryMerge(uint32_t method_index, uint32_t bss_offset) {
- DCHECK_NE(method_index, entry_.method_index);
- if (entry_.bss_offset + pointer_size_ != bss_offset) {
- return false;
- }
- uint32_t diff = method_index - entry_.method_index;
- if (diff > 16u) {
- return false;
- }
- if ((entry_.index_mask & ~(static_cast<uint32_t>(-1) << diff)) != 0u) {
- return false;
- }
- entry_.method_index = method_index;
- // Insert the bit indicating the method index we've just overwritten
- // and shift bits indicating method indexes before that.
- entry_.index_mask = dchecked_integral_cast<uint16_t>(
- (static_cast<uint32_t>(entry_.index_mask) | 0x10000u) >> diff);
- entry_.bss_offset = bss_offset;
- return true;
- }
-
- void Reset(uint32_t method_index, uint32_t bss_offset) {
- entry_.method_index = method_index;
- entry_.index_mask = 0u;
- entry_.bss_offset = bss_offset;
- }
-
- MethodBssMappingEntry GetEntry() {
- return entry_;
- }
-
- private:
- size_t pointer_size_;
- MethodBssMappingEntry entry_;
-};
-
-} // namespace linker
-} // namespace art
-
-#endif // ART_COMPILER_LINKER_METHOD_BSS_MAPPING_ENCODER_H_
diff --git a/compiler/linker/method_bss_mapping_encoder_test.cc b/compiler/linker/method_bss_mapping_encoder_test.cc
deleted file mode 100644
index 1240389..0000000
--- a/compiler/linker/method_bss_mapping_encoder_test.cc
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "method_bss_mapping_encoder.h"
-
-#include "gtest/gtest.h"
-
-namespace art {
-namespace linker {
-
-TEST(MethodBssMappingEncoder, TryMerge) {
- for (PointerSize pointer_size : {PointerSize::k32, PointerSize::k64}) {
- size_t raw_pointer_size = static_cast<size_t>(pointer_size);
- MethodBssMappingEncoder encoder(pointer_size);
- encoder.Reset(1u, 0u);
- ASSERT_FALSE(encoder.TryMerge(5u, raw_pointer_size + 1)); // Wrong bss_offset difference.
- ASSERT_FALSE(encoder.TryMerge(18u, raw_pointer_size)); // Method index out of range.
- ASSERT_TRUE(encoder.TryMerge(5u, raw_pointer_size));
- ASSERT_TRUE(encoder.GetEntry().CoversIndex(1u));
- ASSERT_TRUE(encoder.GetEntry().CoversIndex(5u));
- ASSERT_FALSE(encoder.GetEntry().CoversIndex(17u));
- ASSERT_FALSE(encoder.TryMerge(17u, 2 * raw_pointer_size + 1)); // Wrong bss_offset difference.
- ASSERT_FALSE(encoder.TryMerge(18u, 2 * raw_pointer_size)); // Method index out of range.
- ASSERT_TRUE(encoder.TryMerge(17u, 2 * raw_pointer_size));
- ASSERT_TRUE(encoder.GetEntry().CoversIndex(1u));
- ASSERT_TRUE(encoder.GetEntry().CoversIndex(5u));
- ASSERT_TRUE(encoder.GetEntry().CoversIndex(17u));
- ASSERT_EQ(0u, encoder.GetEntry().GetBssOffset(1u, raw_pointer_size));
- ASSERT_EQ(raw_pointer_size, encoder.GetEntry().GetBssOffset(5u, raw_pointer_size));
- ASSERT_EQ(2 * raw_pointer_size, encoder.GetEntry().GetBssOffset(17u, raw_pointer_size));
- ASSERT_EQ(0x0011u, encoder.GetEntry().index_mask);
- ASSERT_FALSE(encoder.TryMerge(18u, 2 * raw_pointer_size)); // Method index out of range.
- }
-}
-
-} // namespace linker
-} // namespace art
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index a0cb43e..5054a29 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -311,40 +311,23 @@
LoadClassSlowPathARM64(HLoadClass* cls,
HInstruction* at,
uint32_t dex_pc,
- bool do_clinit,
- vixl::aarch64::Register bss_entry_temp = vixl::aarch64::Register(),
- vixl::aarch64::Label* bss_entry_adrp_label = nullptr)
+ bool do_clinit)
: SlowPathCodeARM64(at),
cls_(cls),
dex_pc_(dex_pc),
- do_clinit_(do_clinit),
- bss_entry_temp_(bss_entry_temp),
- bss_entry_adrp_label_(bss_entry_adrp_label) {
+ do_clinit_(do_clinit) {
DCHECK(at->IsLoadClass() || at->IsClinitCheck());
}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
Location out = locations->Out();
- constexpr bool call_saves_everything_except_r0_ip0 = (!kUseReadBarrier || kUseBakerReadBarrier);
CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
- InvokeRuntimeCallingConvention calling_convention;
- // For HLoadClass/kBssEntry/kSaveEverything, the page address of the entry is in a temp
- // register, make sure it's not clobbered by the call or by saving/restoring registers.
- DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
- bool is_load_class_bss_entry =
- (cls_ == instruction_) && (cls_->GetLoadKind() == HLoadClass::LoadKind::kBssEntry);
- if (is_load_class_bss_entry) {
- DCHECK(bss_entry_temp_.IsValid());
- DCHECK(!bss_entry_temp_.Is(calling_convention.GetRegisterAt(0)));
- DCHECK(
- !UseScratchRegisterScope(arm64_codegen->GetVIXLAssembler()).IsAvailable(bss_entry_temp_));
- }
-
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
+ InvokeRuntimeCallingConvention calling_convention;
dex::TypeIndex type_index = cls_->GetTypeIndex();
__ Mov(calling_convention.GetRegisterAt(0).W(), type_index.index_);
QuickEntrypointEnum entrypoint = do_clinit_ ? kQuickInitializeStaticStorage
@@ -363,26 +346,6 @@
arm64_codegen->MoveLocation(out, calling_convention.GetReturnLocation(type), type);
}
RestoreLiveRegisters(codegen, locations);
- // For HLoadClass/kBssEntry, store the resolved Class to the BSS entry.
- if (is_load_class_bss_entry) {
- DCHECK(out.IsValid());
- const DexFile& dex_file = cls_->GetDexFile();
- if (call_saves_everything_except_r0_ip0) {
- // The class entry page address was preserved in bss_entry_temp_ thanks to kSaveEverything.
- } else {
- // For non-Baker read barrier, we need to re-calculate the address of the class entry page.
- bss_entry_adrp_label_ = arm64_codegen->NewBssEntryTypePatch(dex_file, type_index);
- arm64_codegen->EmitAdrpPlaceholder(bss_entry_adrp_label_, bss_entry_temp_);
- }
- vixl::aarch64::Label* strp_label =
- arm64_codegen->NewBssEntryTypePatch(dex_file, type_index, bss_entry_adrp_label_);
- {
- SingleEmissionCheckScope guard(arm64_codegen->GetVIXLAssembler());
- __ Bind(strp_label);
- __ str(RegisterFrom(locations->Out(), DataType::Type::kReference),
- MemOperand(bss_entry_temp_, /* offset placeholder */ 0));
- }
- }
__ B(GetExitLabel());
}
@@ -398,34 +361,23 @@
// Whether to initialize the class.
const bool do_clinit_;
- // For HLoadClass/kBssEntry, the temp register and the label of the ADRP where it was loaded.
- vixl::aarch64::Register bss_entry_temp_;
- vixl::aarch64::Label* bss_entry_adrp_label_;
-
DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathARM64);
};
class LoadStringSlowPathARM64 : public SlowPathCodeARM64 {
public:
- LoadStringSlowPathARM64(HLoadString* instruction, Register temp, vixl::aarch64::Label* adrp_label)
- : SlowPathCodeARM64(instruction),
- temp_(temp),
- adrp_label_(adrp_label) {}
+ explicit LoadStringSlowPathARM64(HLoadString* instruction)
+ : SlowPathCodeARM64(instruction) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
- InvokeRuntimeCallingConvention calling_convention;
- // Make sure `temp_` is not clobbered by the call or by saving/restoring registers.
- DCHECK(temp_.IsValid());
- DCHECK(!temp_.Is(calling_convention.GetRegisterAt(0)));
- DCHECK(!UseScratchRegisterScope(arm64_codegen->GetVIXLAssembler()).IsAvailable(temp_));
-
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
+ InvokeRuntimeCallingConvention calling_convention;
const dex::StringIndex string_index = instruction_->AsLoadString()->GetStringIndex();
__ Mov(calling_convention.GetRegisterAt(0).W(), string_index.index_);
arm64_codegen->InvokeRuntime(kQuickResolveString, instruction_, instruction_->GetDexPc(), this);
@@ -435,33 +387,12 @@
RestoreLiveRegisters(codegen, locations);
- // Store the resolved String to the BSS entry.
- const DexFile& dex_file = instruction_->AsLoadString()->GetDexFile();
- if (!kUseReadBarrier || kUseBakerReadBarrier) {
- // The string entry page address was preserved in temp_ thanks to kSaveEverything.
- } else {
- // For non-Baker read barrier, we need to re-calculate the address of the string entry page.
- adrp_label_ = arm64_codegen->NewStringBssEntryPatch(dex_file, string_index);
- arm64_codegen->EmitAdrpPlaceholder(adrp_label_, temp_);
- }
- vixl::aarch64::Label* strp_label =
- arm64_codegen->NewStringBssEntryPatch(dex_file, string_index, adrp_label_);
- {
- SingleEmissionCheckScope guard(arm64_codegen->GetVIXLAssembler());
- __ Bind(strp_label);
- __ str(RegisterFrom(locations->Out(), DataType::Type::kReference),
- MemOperand(temp_, /* offset placeholder */ 0));
- }
-
__ B(GetExitLabel());
}
const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathARM64"; }
private:
- const Register temp_;
- vixl::aarch64::Label* adrp_label_;
-
DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathARM64);
};
@@ -4883,7 +4814,6 @@
if (cls->GetLoadKind() == HLoadClass::LoadKind::kBssEntry) {
if (!kUseReadBarrier || kUseBakerReadBarrier) {
// Rely on the type resolution or initialization and marking to save everything we need.
- locations->AddTemp(FixedTempLocation());
RegisterSet caller_saves = RegisterSet::Empty();
InvokeRuntimeCallingConvention calling_convention;
caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0).GetCode()));
@@ -4910,8 +4840,6 @@
Location out_loc = cls->GetLocations()->Out();
Register out = OutputRegister(cls);
- Register bss_entry_temp;
- vixl::aarch64::Label* bss_entry_adrp_label = nullptr;
const ReadBarrierOption read_barrier_option = cls->IsInBootImage()
? kWithoutReadBarrier
@@ -4975,16 +4903,16 @@
// Add ADRP with its PC-relative Class .bss entry patch.
const DexFile& dex_file = cls->GetDexFile();
dex::TypeIndex type_index = cls->GetTypeIndex();
- bss_entry_temp = XRegisterFrom(cls->GetLocations()->GetTemp(0));
- bss_entry_adrp_label = codegen_->NewBssEntryTypePatch(dex_file, type_index);
- codegen_->EmitAdrpPlaceholder(bss_entry_adrp_label, bss_entry_temp);
+ vixl::aarch64::Register temp = XRegisterFrom(out_loc);
+ vixl::aarch64::Label* adrp_label = codegen_->NewBssEntryTypePatch(dex_file, type_index);
+ codegen_->EmitAdrpPlaceholder(adrp_label, temp);
// Add LDR with its PC-relative Class patch.
vixl::aarch64::Label* ldr_label =
- codegen_->NewBssEntryTypePatch(dex_file, type_index, bss_entry_adrp_label);
+ codegen_->NewBssEntryTypePatch(dex_file, type_index, adrp_label);
// /* GcRoot<mirror::Class> */ out = *(base_address + offset) /* PC-relative */
GenerateGcRootFieldLoad(cls,
out_loc,
- bss_entry_temp,
+ temp,
/* offset placeholder */ 0u,
ldr_label,
read_barrier_option);
@@ -5013,7 +4941,7 @@
if (generate_null_check || do_clinit) {
DCHECK(cls->CanCallRuntime());
SlowPathCodeARM64* slow_path = new (codegen_->GetScopedAllocator()) LoadClassSlowPathARM64(
- cls, cls, cls->GetDexPc(), do_clinit, bss_entry_temp, bss_entry_adrp_label);
+ cls, cls, cls->GetDexPc(), do_clinit);
codegen_->AddSlowPath(slow_path);
if (generate_null_check) {
__ Cbz(out, slow_path->GetEntryLabel());
@@ -5078,7 +5006,6 @@
if (load->GetLoadKind() == HLoadString::LoadKind::kBssEntry) {
if (!kUseReadBarrier || kUseBakerReadBarrier) {
// Rely on the pResolveString and marking to save everything we need.
- locations->AddTemp(FixedTempLocation());
RegisterSet caller_saves = RegisterSet::Empty();
InvokeRuntimeCallingConvention calling_convention;
caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0).GetCode()));
@@ -5138,7 +5065,7 @@
const DexFile& dex_file = load->GetDexFile();
const dex::StringIndex string_index = load->GetStringIndex();
DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
- Register temp = XRegisterFrom(load->GetLocations()->GetTemp(0));
+ Register temp = XRegisterFrom(out_loc);
vixl::aarch64::Label* adrp_label = codegen_->NewStringBssEntryPatch(dex_file, string_index);
codegen_->EmitAdrpPlaceholder(adrp_label, temp);
// Add LDR with its .bss entry String patch.
@@ -5152,7 +5079,7 @@
ldr_label,
kCompilerReadBarrierOption);
SlowPathCodeARM64* slow_path =
- new (codegen_->GetScopedAllocator()) LoadStringSlowPathARM64(load, temp, adrp_label);
+ new (codegen_->GetScopedAllocator()) LoadStringSlowPathARM64(load);
codegen_->AddSlowPath(slow_path);
__ Cbz(out.X(), slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 9e7455d..3f8f0c4 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -532,29 +532,12 @@
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
Location out = locations->Out();
- constexpr bool call_saves_everything_except_r0 = (!kUseReadBarrier || kUseBakerReadBarrier);
CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen);
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
InvokeRuntimeCallingConventionARMVIXL calling_convention;
- // For HLoadClass/kBssEntry/kSaveEverything, make sure we preserve the address of the entry.
- DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
- bool is_load_class_bss_entry =
- (cls_ == instruction_) && (cls_->GetLoadKind() == HLoadClass::LoadKind::kBssEntry);
- vixl32::Register entry_address;
- if (is_load_class_bss_entry && call_saves_everything_except_r0) {
- vixl32::Register temp = RegisterFrom(locations->GetTemp(0));
- // In the unlucky case that the `temp` is R0, we preserve the address in `out` across
- // the kSaveEverything call.
- bool temp_is_r0 = temp.Is(calling_convention.GetRegisterAt(0));
- entry_address = temp_is_r0 ? RegisterFrom(out) : temp;
- DCHECK(!entry_address.Is(calling_convention.GetRegisterAt(0)));
- if (temp_is_r0) {
- __ Mov(entry_address, temp);
- }
- }
dex::TypeIndex type_index = cls_->GetTypeIndex();
__ Mov(calling_convention.GetRegisterAt(0), type_index.index_);
QuickEntrypointEnum entrypoint = do_clinit_ ? kQuickInitializeStaticStorage
@@ -566,22 +549,6 @@
CheckEntrypointTypes<kQuickInitializeType, void*, uint32_t>();
}
- // For HLoadClass/kBssEntry, store the resolved Class to the BSS entry.
- if (is_load_class_bss_entry) {
- if (call_saves_everything_except_r0) {
- // The class entry address was preserved in `entry_address` thanks to kSaveEverything.
- __ Str(r0, MemOperand(entry_address));
- } else {
- // For non-Baker read barrier, we need to re-calculate the address of the string entry.
- UseScratchRegisterScope temps(
- down_cast<CodeGeneratorARMVIXL*>(codegen)->GetVIXLAssembler());
- vixl32::Register temp = temps.Acquire();
- CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
- arm_codegen->NewTypeBssEntryPatch(cls_->GetDexFile(), type_index);
- arm_codegen->EmitMovwMovtPlaceholder(labels, temp);
- __ Str(r0, MemOperand(temp));
- }
- }
// Move the class to the desired location.
if (out.IsValid()) {
DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
@@ -616,48 +583,17 @@
DCHECK_EQ(instruction_->AsLoadString()->GetLoadKind(), HLoadString::LoadKind::kBssEntry);
LocationSummary* locations = instruction_->GetLocations();
DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
- HLoadString* load = instruction_->AsLoadString();
- const dex::StringIndex string_index = load->GetStringIndex();
- vixl32::Register out = OutputRegister(load);
- constexpr bool call_saves_everything_except_r0 = (!kUseReadBarrier || kUseBakerReadBarrier);
+ const dex::StringIndex string_index = instruction_->AsLoadString()->GetStringIndex();
CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen);
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
InvokeRuntimeCallingConventionARMVIXL calling_convention;
- // In the unlucky case that the `temp` is R0, we preserve the address in `out` across
- // the kSaveEverything call.
- vixl32::Register entry_address;
- if (call_saves_everything_except_r0) {
- vixl32::Register temp = RegisterFrom(locations->GetTemp(0));
- bool temp_is_r0 = (temp.Is(calling_convention.GetRegisterAt(0)));
- entry_address = temp_is_r0 ? out : temp;
- DCHECK(!entry_address.Is(calling_convention.GetRegisterAt(0)));
- if (temp_is_r0) {
- __ Mov(entry_address, temp);
- }
- }
-
__ Mov(calling_convention.GetRegisterAt(0), string_index.index_);
arm_codegen->InvokeRuntime(kQuickResolveString, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
- // Store the resolved String to the .bss entry.
- if (call_saves_everything_except_r0) {
- // The string entry address was preserved in `entry_address` thanks to kSaveEverything.
- __ Str(r0, MemOperand(entry_address));
- } else {
- // For non-Baker read barrier, we need to re-calculate the address of the string entry.
- UseScratchRegisterScope temps(
- down_cast<CodeGeneratorARMVIXL*>(codegen)->GetVIXLAssembler());
- vixl32::Register temp = temps.Acquire();
- CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
- arm_codegen->NewStringBssEntryPatch(load->GetDexFile(), string_index);
- arm_codegen->EmitMovwMovtPlaceholder(labels, temp);
- __ Str(r0, MemOperand(temp));
- }
-
arm_codegen->Move32(locations->Out(), LocationFrom(r0));
RestoreLiveRegisters(codegen, locations);
@@ -7104,9 +7040,6 @@
if (load_kind == HLoadClass::LoadKind::kBssEntry) {
if (!kUseReadBarrier || kUseBakerReadBarrier) {
// Rely on the type resolution or initialization and marking to save everything we need.
- // Note that IP may be clobbered by saving/restoring the live register (only one thanks
- // to the custom calling convention) or by marking, so we request a different temp.
- locations->AddTemp(Location::RequiresRegister());
RegisterSet caller_saves = RegisterSet::Empty();
InvokeRuntimeCallingConventionARMVIXL calling_convention;
caller_saves.Add(LocationFrom(calling_convention.GetRegisterAt(0)));
@@ -7189,13 +7122,10 @@
break;
}
case HLoadClass::LoadKind::kBssEntry: {
- vixl32::Register temp = (!kUseReadBarrier || kUseBakerReadBarrier)
- ? RegisterFrom(locations->GetTemp(0))
- : out;
CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
codegen_->NewTypeBssEntryPatch(cls->GetDexFile(), cls->GetTypeIndex());
- codegen_->EmitMovwMovtPlaceholder(labels, temp);
- GenerateGcRootFieldLoad(cls, out_loc, temp, /* offset */ 0, read_barrier_option);
+ codegen_->EmitMovwMovtPlaceholder(labels, out);
+ GenerateGcRootFieldLoad(cls, out_loc, out, /* offset */ 0, read_barrier_option);
generate_null_check = true;
break;
}
@@ -7296,9 +7226,6 @@
if (load_kind == HLoadString::LoadKind::kBssEntry) {
if (!kUseReadBarrier || kUseBakerReadBarrier) {
// Rely on the pResolveString and marking to save everything we need, including temps.
- // Note that IP may be clobbered by saving/restoring the live register (only one thanks
- // to the custom calling convention) or by marking, so we request a different temp.
- locations->AddTemp(Location::RequiresRegister());
RegisterSet caller_saves = RegisterSet::Empty();
InvokeRuntimeCallingConventionARMVIXL calling_convention;
caller_saves.Add(LocationFrom(calling_convention.GetRegisterAt(0)));
@@ -7348,13 +7275,10 @@
}
case HLoadString::LoadKind::kBssEntry: {
DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
- vixl32::Register temp = (!kUseReadBarrier || kUseBakerReadBarrier)
- ? RegisterFrom(locations->GetTemp(0))
- : out;
CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
codegen_->NewStringBssEntryPatch(load->GetDexFile(), load->GetStringIndex());
- codegen_->EmitMovwMovtPlaceholder(labels, temp);
- GenerateGcRootFieldLoad(load, out_loc, temp, /* offset */ 0, kCompilerReadBarrierOption);
+ codegen_->EmitMovwMovtPlaceholder(labels, out);
+ GenerateGcRootFieldLoad(load, out_loc, out, /* offset */ 0, kCompilerReadBarrierOption);
LoadStringSlowPathARMVIXL* slow_path =
new (codegen_->GetScopedAllocator()) LoadStringSlowPathARMVIXL(load);
codegen_->AddSlowPath(slow_path);
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index ddec0cc..d6922d2 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -220,13 +220,11 @@
LoadClassSlowPathMIPS(HLoadClass* cls,
HInstruction* at,
uint32_t dex_pc,
- bool do_clinit,
- const CodeGeneratorMIPS::PcRelativePatchInfo* bss_info_high = nullptr)
+ bool do_clinit)
: SlowPathCodeMIPS(at),
cls_(cls),
dex_pc_(dex_pc),
- do_clinit_(do_clinit),
- bss_info_high_(bss_info_high) {
+ do_clinit_(do_clinit) {
DCHECK(at->IsLoadClass() || at->IsClinitCheck());
}
@@ -234,28 +232,11 @@
LocationSummary* locations = instruction_->GetLocations();
Location out = locations->Out();
CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
- const bool baker_or_no_read_barriers = (!kUseReadBarrier || kUseBakerReadBarrier);
InvokeRuntimeCallingConvention calling_convention;
DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
- const bool is_load_class_bss_entry =
- (cls_ == instruction_) && (cls_->GetLoadKind() == HLoadClass::LoadKind::kBssEntry);
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
- // For HLoadClass/kBssEntry/kSaveEverything, make sure we preserve the address of the entry.
- Register entry_address = kNoRegister;
- if (is_load_class_bss_entry && baker_or_no_read_barriers) {
- Register temp = locations->GetTemp(0).AsRegister<Register>();
- bool temp_is_a0 = (temp == calling_convention.GetRegisterAt(0));
- // In the unlucky case that `temp` is A0, we preserve the address in `out` across the
- // kSaveEverything call.
- entry_address = temp_is_a0 ? out.AsRegister<Register>() : temp;
- DCHECK_NE(entry_address, calling_convention.GetRegisterAt(0));
- if (temp_is_a0) {
- __ Move(entry_address, temp);
- }
- }
-
dex::TypeIndex type_index = cls_->GetTypeIndex();
__ LoadConst32(calling_convention.GetRegisterAt(0), type_index.index_);
QuickEntrypointEnum entrypoint = do_clinit_ ? kQuickInitializeStaticStorage
@@ -267,18 +248,6 @@
CheckEntrypointTypes<kQuickInitializeType, void*, uint32_t>();
}
- // For HLoadClass/kBssEntry, store the resolved class to the BSS entry.
- if (is_load_class_bss_entry && baker_or_no_read_barriers) {
- // The class entry address was preserved in `entry_address` thanks to kSaveEverything.
- DCHECK(bss_info_high_);
- CodeGeneratorMIPS::PcRelativePatchInfo* info_low =
- mips_codegen->NewTypeBssEntryPatch(cls_->GetDexFile(), type_index, bss_info_high_);
- __ Sw(calling_convention.GetRegisterAt(0),
- entry_address,
- /* placeholder */ 0x5678,
- &info_low->label);
- }
-
// Move the class to the desired location.
if (out.IsValid()) {
DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
@@ -289,21 +258,6 @@
}
RestoreLiveRegisters(codegen, locations);
- // For HLoadClass/kBssEntry, store the resolved class to the BSS entry.
- if (is_load_class_bss_entry && !baker_or_no_read_barriers) {
- // For non-Baker read barriers we need to re-calculate the address of
- // the class entry.
- const bool isR6 = mips_codegen->GetInstructionSetFeatures().IsR6();
- const bool has_irreducible_loops = codegen->GetGraph()->HasIrreducibleLoops();
- Register base =
- (isR6 || has_irreducible_loops) ? ZERO : locations->InAt(0).AsRegister<Register>();
- CodeGeneratorMIPS::PcRelativePatchInfo* info_high =
- mips_codegen->NewTypeBssEntryPatch(cls_->GetDexFile(), type_index);
- CodeGeneratorMIPS::PcRelativePatchInfo* info_low =
- mips_codegen->NewTypeBssEntryPatch(cls_->GetDexFile(), type_index, info_high);
- mips_codegen->EmitPcRelativeAddressPlaceholderHigh(info_high, TMP, base);
- __ Sw(out.AsRegister<Register>(), TMP, /* placeholder */ 0x5678, &info_low->label);
- }
__ B(GetExitLabel());
}
@@ -319,92 +273,41 @@
// Whether to initialize the class.
const bool do_clinit_;
- // Pointer to the high half PC-relative patch info for HLoadClass/kBssEntry.
- const CodeGeneratorMIPS::PcRelativePatchInfo* bss_info_high_;
-
DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathMIPS);
};
class LoadStringSlowPathMIPS : public SlowPathCodeMIPS {
public:
- explicit LoadStringSlowPathMIPS(HLoadString* instruction,
- const CodeGeneratorMIPS::PcRelativePatchInfo* bss_info_high)
- : SlowPathCodeMIPS(instruction), bss_info_high_(bss_info_high) {}
+ explicit LoadStringSlowPathMIPS(HLoadString* instruction)
+ : SlowPathCodeMIPS(instruction) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
DCHECK(instruction_->IsLoadString());
DCHECK_EQ(instruction_->AsLoadString()->GetLoadKind(), HLoadString::LoadKind::kBssEntry);
LocationSummary* locations = instruction_->GetLocations();
DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
- HLoadString* load = instruction_->AsLoadString();
- const dex::StringIndex string_index = load->GetStringIndex();
- Register out = locations->Out().AsRegister<Register>();
+ const dex::StringIndex string_index = instruction_->AsLoadString()->GetStringIndex();
CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
- const bool baker_or_no_read_barriers = (!kUseReadBarrier || kUseBakerReadBarrier);
InvokeRuntimeCallingConvention calling_convention;
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
- // For HLoadString/kBssEntry/kSaveEverything, make sure we preserve the address of the entry.
- Register entry_address = kNoRegister;
- if (baker_or_no_read_barriers) {
- Register temp = locations->GetTemp(0).AsRegister<Register>();
- bool temp_is_a0 = (temp == calling_convention.GetRegisterAt(0));
- // In the unlucky case that `temp` is A0, we preserve the address in `out` across the
- // kSaveEverything call.
- entry_address = temp_is_a0 ? out : temp;
- DCHECK_NE(entry_address, calling_convention.GetRegisterAt(0));
- if (temp_is_a0) {
- __ Move(entry_address, temp);
- }
- }
-
__ LoadConst32(calling_convention.GetRegisterAt(0), string_index.index_);
mips_codegen->InvokeRuntime(kQuickResolveString, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
- // Store the resolved string to the BSS entry.
- if (baker_or_no_read_barriers) {
- // The string entry address was preserved in `entry_address` thanks to kSaveEverything.
- DCHECK(bss_info_high_);
- CodeGeneratorMIPS::PcRelativePatchInfo* info_low =
- mips_codegen->NewStringBssEntryPatch(load->GetDexFile(), string_index, bss_info_high_);
- __ Sw(calling_convention.GetRegisterAt(0),
- entry_address,
- /* placeholder */ 0x5678,
- &info_low->label);
- }
-
DataType::Type type = instruction_->GetType();
mips_codegen->MoveLocation(locations->Out(),
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
type);
RestoreLiveRegisters(codegen, locations);
- // Store the resolved string to the BSS entry.
- if (!baker_or_no_read_barriers) {
- // For non-Baker read barriers we need to re-calculate the address of
- // the string entry.
- const bool isR6 = mips_codegen->GetInstructionSetFeatures().IsR6();
- const bool has_irreducible_loops = codegen->GetGraph()->HasIrreducibleLoops();
- Register base =
- (isR6 || has_irreducible_loops) ? ZERO : locations->InAt(0).AsRegister<Register>();
- CodeGeneratorMIPS::PcRelativePatchInfo* info_high =
- mips_codegen->NewStringBssEntryPatch(load->GetDexFile(), string_index);
- CodeGeneratorMIPS::PcRelativePatchInfo* info_low =
- mips_codegen->NewStringBssEntryPatch(load->GetDexFile(), string_index, info_high);
- mips_codegen->EmitPcRelativeAddressPlaceholderHigh(info_high, TMP, base);
- __ Sw(out, TMP, /* placeholder */ 0x5678, &info_low->label);
- }
__ B(GetExitLabel());
}
const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathMIPS"; }
private:
- // Pointer to the high half PC-relative patch info.
- const CodeGeneratorMIPS::PcRelativePatchInfo* bss_info_high_;
-
DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathMIPS);
};
@@ -7736,8 +7639,6 @@
if (load_kind == HLoadClass::LoadKind::kBssEntry) {
if (!kUseReadBarrier || kUseBakerReadBarrier) {
// Rely on the type resolution or initialization and marking to save everything we need.
- // Request a temp to hold the BSS entry location for the slow path.
- locations->AddTemp(Location::RequiresRegister());
RegisterSet caller_saves = RegisterSet::Empty();
InvokeRuntimeCallingConvention calling_convention;
caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
@@ -7786,7 +7687,6 @@
? kWithoutReadBarrier
: kCompilerReadBarrierOption;
bool generate_null_check = false;
- CodeGeneratorMIPS::PcRelativePatchInfo* bss_info_high = nullptr;
switch (load_kind) {
case HLoadClass::LoadKind::kReferrersClass: {
DCHECK(!cls->CanCallRuntime());
@@ -7845,17 +7745,16 @@
break;
}
case HLoadClass::LoadKind::kBssEntry: {
- bss_info_high = codegen_->NewTypeBssEntryPatch(cls->GetDexFile(), cls->GetTypeIndex());
+ CodeGeneratorMIPS::PcRelativePatchInfo* bss_info_high =
+ codegen_->NewTypeBssEntryPatch(cls->GetDexFile(), cls->GetTypeIndex());
CodeGeneratorMIPS::PcRelativePatchInfo* info_low =
codegen_->NewTypeBssEntryPatch(cls->GetDexFile(), cls->GetTypeIndex(), bss_info_high);
- constexpr bool non_baker_read_barrier = kUseReadBarrier && !kUseBakerReadBarrier;
- Register temp = non_baker_read_barrier ? out : locations->GetTemp(0).AsRegister<Register>();
codegen_->EmitPcRelativeAddressPlaceholderHigh(bss_info_high,
- temp,
+ out,
base_or_current_method_reg);
GenerateGcRootFieldLoad(cls,
out_loc,
- temp,
+ out,
/* placeholder */ 0x5678,
read_barrier_option,
&info_low->label);
@@ -7887,7 +7786,7 @@
if (generate_null_check || cls->MustGenerateClinitCheck()) {
DCHECK(cls->CanCallRuntime());
SlowPathCodeMIPS* slow_path = new (codegen_->GetScopedAllocator()) LoadClassSlowPathMIPS(
- cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck(), bss_info_high);
+ cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
codegen_->AddSlowPath(slow_path);
if (generate_null_check) {
__ Beqz(out, slow_path->GetEntryLabel());
@@ -7960,8 +7859,6 @@
if (load_kind == HLoadString::LoadKind::kBssEntry) {
if (!kUseReadBarrier || kUseBakerReadBarrier) {
// Rely on the pResolveString and marking to save everything we need.
- // Request a temp to hold the BSS entry location for the slow path.
- locations->AddTemp(Location::RequiresRegister());
RegisterSet caller_saves = RegisterSet::Empty();
InvokeRuntimeCallingConvention calling_convention;
caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
@@ -8041,19 +7938,17 @@
codegen_->NewStringBssEntryPatch(load->GetDexFile(), load->GetStringIndex());
CodeGeneratorMIPS::PcRelativePatchInfo* info_low =
codegen_->NewStringBssEntryPatch(load->GetDexFile(), load->GetStringIndex(), info_high);
- constexpr bool non_baker_read_barrier = kUseReadBarrier && !kUseBakerReadBarrier;
- Register temp = non_baker_read_barrier ? out : locations->GetTemp(0).AsRegister<Register>();
codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high,
- temp,
+ out,
base_or_current_method_reg);
GenerateGcRootFieldLoad(load,
out_loc,
- temp,
+ out,
/* placeholder */ 0x5678,
kCompilerReadBarrierOption,
&info_low->label);
SlowPathCodeMIPS* slow_path =
- new (codegen_->GetScopedAllocator()) LoadStringSlowPathMIPS(load, info_high);
+ new (codegen_->GetScopedAllocator()) LoadStringSlowPathMIPS(load);
codegen_->AddSlowPath(slow_path);
__ Beqz(out, slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 0a6d915..ee33b3f 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -175,13 +175,11 @@
LoadClassSlowPathMIPS64(HLoadClass* cls,
HInstruction* at,
uint32_t dex_pc,
- bool do_clinit,
- const CodeGeneratorMIPS64::PcRelativePatchInfo* bss_info_high = nullptr)
+ bool do_clinit)
: SlowPathCodeMIPS64(at),
cls_(cls),
dex_pc_(dex_pc),
- do_clinit_(do_clinit),
- bss_info_high_(bss_info_high) {
+ do_clinit_(do_clinit) {
DCHECK(at->IsLoadClass() || at->IsClinitCheck());
}
@@ -189,28 +187,11 @@
LocationSummary* locations = instruction_->GetLocations();
Location out = locations->Out();
CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
- const bool baker_or_no_read_barriers = (!kUseReadBarrier || kUseBakerReadBarrier);
InvokeRuntimeCallingConvention calling_convention;
DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
- const bool is_load_class_bss_entry =
- (cls_ == instruction_) && (cls_->GetLoadKind() == HLoadClass::LoadKind::kBssEntry);
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
- // For HLoadClass/kBssEntry/kSaveEverything, make sure we preserve the address of the entry.
- GpuRegister entry_address = kNoGpuRegister;
- if (is_load_class_bss_entry && baker_or_no_read_barriers) {
- GpuRegister temp = locations->GetTemp(0).AsRegister<GpuRegister>();
- bool temp_is_a0 = (temp == calling_convention.GetRegisterAt(0));
- // In the unlucky case that `temp` is A0, we preserve the address in `out` across the
- // kSaveEverything call.
- entry_address = temp_is_a0 ? out.AsRegister<GpuRegister>() : temp;
- DCHECK_NE(entry_address, calling_convention.GetRegisterAt(0));
- if (temp_is_a0) {
- __ Move(entry_address, temp);
- }
- }
-
dex::TypeIndex type_index = cls_->GetTypeIndex();
__ LoadConst32(calling_convention.GetRegisterAt(0), type_index.index_);
QuickEntrypointEnum entrypoint = do_clinit_ ? kQuickInitializeStaticStorage
@@ -222,19 +203,6 @@
CheckEntrypointTypes<kQuickInitializeType, void*, uint32_t>();
}
- // For HLoadClass/kBssEntry, store the resolved class to the BSS entry.
- if (is_load_class_bss_entry && baker_or_no_read_barriers) {
- // The class entry address was preserved in `entry_address` thanks to kSaveEverything.
- DCHECK(bss_info_high_);
- CodeGeneratorMIPS64::PcRelativePatchInfo* info_low =
- mips64_codegen->NewTypeBssEntryPatch(cls_->GetDexFile(), type_index, bss_info_high_);
- __ Bind(&info_low->label);
- __ StoreToOffset(kStoreWord,
- calling_convention.GetRegisterAt(0),
- entry_address,
- /* placeholder */ 0x5678);
- }
-
// Move the class to the desired location.
if (out.IsValid()) {
DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
@@ -245,17 +213,6 @@
}
RestoreLiveRegisters(codegen, locations);
- // For HLoadClass/kBssEntry, store the resolved class to the BSS entry.
- if (is_load_class_bss_entry && !baker_or_no_read_barriers) {
- // For non-Baker read barriers we need to re-calculate the address of
- // the class entry.
- CodeGeneratorMIPS64::PcRelativePatchInfo* info_high =
- mips64_codegen->NewTypeBssEntryPatch(cls_->GetDexFile(), type_index);
- CodeGeneratorMIPS64::PcRelativePatchInfo* info_low =
- mips64_codegen->NewTypeBssEntryPatch(cls_->GetDexFile(), type_index, info_high);
- mips64_codegen->EmitPcRelativeAddressPlaceholderHigh(info_high, TMP, info_low);
- __ StoreToOffset(kStoreWord, out.AsRegister<GpuRegister>(), TMP, /* placeholder */ 0x5678);
- }
__ Bc(GetExitLabel());
}
@@ -271,46 +228,25 @@
// Whether to initialize the class.
const bool do_clinit_;
- // Pointer to the high half PC-relative patch info for HLoadClass/kBssEntry.
- const CodeGeneratorMIPS64::PcRelativePatchInfo* bss_info_high_;
-
DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathMIPS64);
};
class LoadStringSlowPathMIPS64 : public SlowPathCodeMIPS64 {
public:
- explicit LoadStringSlowPathMIPS64(HLoadString* instruction,
- const CodeGeneratorMIPS64::PcRelativePatchInfo* bss_info_high)
- : SlowPathCodeMIPS64(instruction), bss_info_high_(bss_info_high) {}
+ explicit LoadStringSlowPathMIPS64(HLoadString* instruction)
+ : SlowPathCodeMIPS64(instruction) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
DCHECK(instruction_->IsLoadString());
DCHECK_EQ(instruction_->AsLoadString()->GetLoadKind(), HLoadString::LoadKind::kBssEntry);
LocationSummary* locations = instruction_->GetLocations();
DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
- HLoadString* load = instruction_->AsLoadString();
- const dex::StringIndex string_index = load->GetStringIndex();
- GpuRegister out = locations->Out().AsRegister<GpuRegister>();
+ const dex::StringIndex string_index = instruction_->AsLoadString()->GetStringIndex();
CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
- const bool baker_or_no_read_barriers = (!kUseReadBarrier || kUseBakerReadBarrier);
InvokeRuntimeCallingConvention calling_convention;
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
- // For HLoadString/kBssEntry/kSaveEverything, make sure we preserve the address of the entry.
- GpuRegister entry_address = kNoGpuRegister;
- if (baker_or_no_read_barriers) {
- GpuRegister temp = locations->GetTemp(0).AsRegister<GpuRegister>();
- bool temp_is_a0 = (temp == calling_convention.GetRegisterAt(0));
- // In the unlucky case that `temp` is A0, we preserve the address in `out` across the
- // kSaveEverything call.
- entry_address = temp_is_a0 ? out : temp;
- DCHECK_NE(entry_address, calling_convention.GetRegisterAt(0));
- if (temp_is_a0) {
- __ Move(entry_address, temp);
- }
- }
-
__ LoadConst32(calling_convention.GetRegisterAt(0), string_index.index_);
mips64_codegen->InvokeRuntime(kQuickResolveString,
instruction_,
@@ -318,47 +254,18 @@
this);
CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
- // Store the resolved string to the BSS entry.
- if (baker_or_no_read_barriers) {
- // The string entry address was preserved in `entry_address` thanks to kSaveEverything.
- DCHECK(bss_info_high_);
- CodeGeneratorMIPS64::PcRelativePatchInfo* info_low =
- mips64_codegen->NewStringBssEntryPatch(load->GetDexFile(),
- string_index,
- bss_info_high_);
- __ Bind(&info_low->label);
- __ StoreToOffset(kStoreWord,
- calling_convention.GetRegisterAt(0),
- entry_address,
- /* placeholder */ 0x5678);
- }
-
DataType::Type type = instruction_->GetType();
mips64_codegen->MoveLocation(locations->Out(),
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
type);
RestoreLiveRegisters(codegen, locations);
- // Store the resolved string to the BSS entry.
- if (!baker_or_no_read_barriers) {
- // For non-Baker read barriers we need to re-calculate the address of
- // the string entry.
- CodeGeneratorMIPS64::PcRelativePatchInfo* info_high =
- mips64_codegen->NewStringBssEntryPatch(load->GetDexFile(), string_index);
- CodeGeneratorMIPS64::PcRelativePatchInfo* info_low =
- mips64_codegen->NewStringBssEntryPatch(load->GetDexFile(), string_index, info_high);
- mips64_codegen->EmitPcRelativeAddressPlaceholderHigh(info_high, TMP, info_low);
- __ StoreToOffset(kStoreWord, out, TMP, /* placeholder */ 0x5678);
- }
__ Bc(GetExitLabel());
}
const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathMIPS64"; }
private:
- // Pointer to the high half PC-relative patch info.
- const CodeGeneratorMIPS64::PcRelativePatchInfo* bss_info_high_;
-
DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathMIPS64);
};
@@ -5979,8 +5886,6 @@
if (load_kind == HLoadClass::LoadKind::kBssEntry) {
if (!kUseReadBarrier || kUseBakerReadBarrier) {
// Rely on the type resolution or initialization and marking to save everything we need.
- // Request a temp to hold the BSS entry location for the slow path.
- locations->AddTemp(Location::RequiresRegister());
RegisterSet caller_saves = RegisterSet::Empty();
InvokeRuntimeCallingConvention calling_convention;
caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
@@ -6014,7 +5919,6 @@
? kWithoutReadBarrier
: kCompilerReadBarrierOption;
bool generate_null_check = false;
- CodeGeneratorMIPS64::PcRelativePatchInfo* bss_info_high = nullptr;
switch (load_kind) {
case HLoadClass::LoadKind::kReferrersClass:
DCHECK(!cls->CanCallRuntime());
@@ -6064,17 +5968,14 @@
break;
}
case HLoadClass::LoadKind::kBssEntry: {
- bss_info_high = codegen_->NewTypeBssEntryPatch(cls->GetDexFile(), cls->GetTypeIndex());
+ CodeGeneratorMIPS64::PcRelativePatchInfo* bss_info_high =
+ codegen_->NewTypeBssEntryPatch(cls->GetDexFile(), cls->GetTypeIndex());
CodeGeneratorMIPS64::PcRelativePatchInfo* info_low =
codegen_->NewTypeBssEntryPatch(cls->GetDexFile(), cls->GetTypeIndex(), bss_info_high);
- constexpr bool non_baker_read_barrier = kUseReadBarrier && !kUseBakerReadBarrier;
- GpuRegister temp = non_baker_read_barrier
- ? out
- : locations->GetTemp(0).AsRegister<GpuRegister>();
- codegen_->EmitPcRelativeAddressPlaceholderHigh(bss_info_high, temp);
+ codegen_->EmitPcRelativeAddressPlaceholderHigh(bss_info_high, out);
GenerateGcRootFieldLoad(cls,
out_loc,
- temp,
+ out,
/* placeholder */ 0x5678,
read_barrier_option,
&info_low->label);
@@ -6098,7 +5999,7 @@
if (generate_null_check || cls->MustGenerateClinitCheck()) {
DCHECK(cls->CanCallRuntime());
SlowPathCodeMIPS64* slow_path = new (codegen_->GetScopedAllocator()) LoadClassSlowPathMIPS64(
- cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck(), bss_info_high);
+ cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
codegen_->AddSlowPath(slow_path);
if (generate_null_check) {
__ Beqzc(out, slow_path->GetEntryLabel());
@@ -6146,8 +6047,6 @@
if (load_kind == HLoadString::LoadKind::kBssEntry) {
if (!kUseReadBarrier || kUseBakerReadBarrier) {
// Rely on the pResolveString and marking to save everything we need.
- // Request a temp to hold the BSS entry location for the slow path.
- locations->AddTemp(Location::RequiresRegister());
RegisterSet caller_saves = RegisterSet::Empty();
InvokeRuntimeCallingConvention calling_convention;
caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
@@ -6203,19 +6102,15 @@
codegen_->NewStringBssEntryPatch(load->GetDexFile(), load->GetStringIndex());
CodeGeneratorMIPS64::PcRelativePatchInfo* info_low =
codegen_->NewStringBssEntryPatch(load->GetDexFile(), load->GetStringIndex(), info_high);
- constexpr bool non_baker_read_barrier = kUseReadBarrier && !kUseBakerReadBarrier;
- GpuRegister temp = non_baker_read_barrier
- ? out
- : locations->GetTemp(0).AsRegister<GpuRegister>();
- codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high, temp);
+ codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high, out);
GenerateGcRootFieldLoad(load,
out_loc,
- temp,
+ out,
/* placeholder */ 0x5678,
kCompilerReadBarrierOption,
&info_low->label);
SlowPathCodeMIPS64* slow_path =
- new (codegen_->GetScopedAllocator()) LoadStringSlowPathMIPS64(load, info_high);
+ new (codegen_->GetScopedAllocator()) LoadStringSlowPathMIPS64(load);
codegen_->AddSlowPath(slow_path);
__ Beqzc(out, slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index ad0e71a..2e8170e 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -240,13 +240,6 @@
x86_codegen->Move32(locations->Out(), Location::RegisterLocation(EAX));
RestoreLiveRegisters(codegen, locations);
- // Store the resolved String to the BSS entry.
- Register method_address = locations->InAt(0).AsRegister<Register>();
- __ movl(Address(method_address, CodeGeneratorX86::kDummy32BitOffset),
- locations->Out().AsRegister<Register>());
- Label* fixup_label = x86_codegen->NewStringBssEntryPatch(instruction_->AsLoadString());
- __ Bind(fixup_label);
-
__ jmp(GetExitLabel());
}
@@ -293,16 +286,6 @@
x86_codegen->Move32(out, Location::RegisterLocation(EAX));
}
RestoreLiveRegisters(codegen, locations);
- // For HLoadClass/kBssEntry, store the resolved Class to the BSS entry.
- DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
- if (cls_ == instruction_ && cls_->GetLoadKind() == HLoadClass::LoadKind::kBssEntry) {
- DCHECK(out.IsValid());
- Register method_address = locations->InAt(0).AsRegister<Register>();
- __ movl(Address(method_address, CodeGeneratorX86::kDummy32BitOffset),
- locations->Out().AsRegister<Register>());
- Label* fixup_label = x86_codegen->NewTypeBssEntryPatch(cls_);
- __ Bind(fixup_label);
- }
__ jmp(GetExitLabel());
}
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index d64a497..e25688c 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -273,15 +273,6 @@
}
RestoreLiveRegisters(codegen, locations);
- // For HLoadClass/kBssEntry, store the resolved Class to the BSS entry.
- DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
- if (cls_ == instruction_ && cls_->GetLoadKind() == HLoadClass::LoadKind::kBssEntry) {
- DCHECK(out.IsValid());
- __ movl(Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false),
- locations->Out().AsRegister<CpuRegister>());
- Label* fixup_label = x86_64_codegen->NewTypeBssEntryPatch(cls_);
- __ Bind(fixup_label);
- }
__ jmp(GetExitLabel());
}
@@ -323,12 +314,6 @@
x86_64_codegen->Move(locations->Out(), Location::RegisterLocation(RAX));
RestoreLiveRegisters(codegen, locations);
- // Store the resolved String to the BSS entry.
- __ movl(Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false),
- locations->Out().AsRegister<CpuRegister>());
- Label* fixup_label = x86_64_codegen->NewStringBssEntryPatch(instruction_->AsLoadString());
- __ Bind(fixup_label);
-
__ jmp(GetExitLabel());
}
diff --git a/compiler/optimizing/induction_var_analysis.cc b/compiler/optimizing/induction_var_analysis.cc
index ad29ba5..d270c6a 100644
--- a/compiler/optimizing/induction_var_analysis.cc
+++ b/compiler/optimizing/induction_var_analysis.cc
@@ -93,6 +93,136 @@
}
}
+/**
+ * Returns true if loop is guarded by "a cmp b" on entry.
+ */
+static bool IsGuardedBy(HLoopInformation* loop,
+ IfCondition cmp,
+ HInstruction* a,
+ HInstruction* b) {
+ // Chase back through straightline code to the first potential
+ // block that has a control dependence.
+ // guard: if (x) bypass
+ // |
+ // entry: straightline code
+ // |
+ // preheader
+ // |
+ // header
+ HBasicBlock* guard = loop->GetPreHeader();
+ HBasicBlock* entry = loop->GetHeader();
+ while (guard->GetPredecessors().size() == 1 &&
+ guard->GetSuccessors().size() == 1) {
+ entry = guard;
+ guard = guard->GetSinglePredecessor();
+ }
+ // Find guard.
+ HInstruction* control = guard->GetLastInstruction();
+ if (!control->IsIf()) {
+ return false;
+ }
+ HIf* ifs = control->AsIf();
+ HInstruction* if_expr = ifs->InputAt(0);
+ if (if_expr->IsCondition()) {
+ IfCondition other_cmp = ifs->IfTrueSuccessor() == entry
+ ? if_expr->AsCondition()->GetCondition()
+ : if_expr->AsCondition()->GetOppositeCondition();
+ if (if_expr->InputAt(0) == a && if_expr->InputAt(1) == b) {
+ return cmp == other_cmp;
+ } else if (if_expr->InputAt(1) == a && if_expr->InputAt(0) == b) {
+ switch (cmp) {
+ case kCondLT: return other_cmp == kCondGT;
+ case kCondLE: return other_cmp == kCondGE;
+ case kCondGT: return other_cmp == kCondLT;
+ case kCondGE: return other_cmp == kCondLE;
+ default: LOG(FATAL) << "unexpected cmp: " << cmp;
+ }
+ }
+ }
+ return false;
+}
+
+/* Finds first loop header phi use. */
+HInstruction* FindFirstLoopHeaderPhiUse(HLoopInformation* loop, HInstruction* instruction) {
+ for (const HUseListNode<HInstruction*>& use : instruction->GetUses()) {
+ if (use.GetUser()->GetBlock() == loop->GetHeader() &&
+ use.GetUser()->IsPhi() &&
+ use.GetUser()->InputAt(1) == instruction) {
+ return use.GetUser();
+ }
+ }
+ return nullptr;
+}
+
+/**
+ * Relinks the Phi structure after break-loop rewriting.
+ */
+bool FixOutsideUse(HLoopInformation* loop,
+ HInstruction* instruction,
+ HInstruction* replacement,
+ bool rewrite) {
+ // Deal with regular uses.
+ const HUseList<HInstruction*>& uses = instruction->GetUses();
+ for (auto it = uses.begin(), end = uses.end(); it != end; ) {
+ HInstruction* user = it->GetUser();
+ size_t index = it->GetIndex();
+ ++it; // increment prior to potential removal
+ if (user->GetBlock()->GetLoopInformation() != loop) {
+ if (replacement == nullptr) {
+ return false;
+ } else if (rewrite) {
+ user->ReplaceInput(replacement, index);
+ }
+ }
+ }
+ // Deal with environment uses.
+ const HUseList<HEnvironment*>& env_uses = instruction->GetEnvUses();
+ for (auto it = env_uses.begin(), end = env_uses.end(); it != end;) {
+ HEnvironment* user = it->GetUser();
+ size_t index = it->GetIndex();
+ ++it; // increment prior to potential removal
+ if (user->GetHolder()->GetBlock()->GetLoopInformation() != loop) {
+ if (replacement == nullptr) {
+ return false;
+ } else if (rewrite) {
+ user->RemoveAsUserOfInput(index);
+ user->SetRawEnvAt(index, replacement);
+ replacement->AddEnvUseAt(user, index);
+ }
+ }
+ }
+ return true;
+}
+
+/**
+ * Test and rewrite the loop body of a break-loop. Returns true on success.
+ */
+bool RewriteBreakLoopBody(HLoopInformation* loop,
+ HBasicBlock* body,
+ HInstruction* cond,
+ HInstruction* index,
+ HInstruction* upper,
+ bool rewrite) {
+ // Deal with Phis. Outside use prohibited, except for index (which gets exit value).
+ for (HInstructionIterator it(loop->GetHeader()->GetPhis()); !it.Done(); it.Advance()) {
+ HInstruction* exit_value = it.Current() == index ? upper : nullptr;
+ if (!FixOutsideUse(loop, it.Current(), exit_value, rewrite)) {
+ return false;
+ }
+ }
+ // Deal with other statements in header.
+ for (HInstruction* m = cond->GetPrevious(), *p = nullptr; m && !m->IsSuspendCheck(); m = p) {
+ p = m->GetPrevious();
+ if (rewrite) {
+ m->MoveBefore(body->GetFirstInstruction(), false);
+ }
+ if (!FixOutsideUse(loop, m, FindFirstLoopHeaderPhiUse(loop, m), rewrite)) {
+ return false;
+ }
+ }
+ return true;
+}
+
//
// Class methods.
//
@@ -754,6 +884,10 @@
return nullptr;
}
+//
+// Loop trip count analysis methods.
+//
+
void HInductionVarAnalysis::VisitControl(HLoopInformation* loop) {
HInstruction* control = loop->GetHeader()->GetLastInstruction();
if (control->IsIf()) {
@@ -774,15 +908,16 @@
if (a == nullptr || b == nullptr) {
return; // Loop control is not a sequence.
} else if (if_true->GetLoopInformation() != loop && if_false->GetLoopInformation() == loop) {
- VisitCondition(loop, a, b, type, condition->GetOppositeCondition());
+ VisitCondition(loop, if_false, a, b, type, condition->GetOppositeCondition());
} else if (if_true->GetLoopInformation() == loop && if_false->GetLoopInformation() != loop) {
- VisitCondition(loop, a, b, type, condition->GetCondition());
+ VisitCondition(loop, if_true, a, b, type, condition->GetCondition());
}
}
}
}
void HInductionVarAnalysis::VisitCondition(HLoopInformation* loop,
+ HBasicBlock* body,
InductionInfo* a,
InductionInfo* b,
DataType::Type type,
@@ -790,11 +925,11 @@
if (a->induction_class == kInvariant && b->induction_class == kLinear) {
// Swap condition if induction is at right-hand-side (e.g. U > i is same as i < U).
switch (cmp) {
- case kCondLT: VisitCondition(loop, b, a, type, kCondGT); break;
- case kCondLE: VisitCondition(loop, b, a, type, kCondGE); break;
- case kCondGT: VisitCondition(loop, b, a, type, kCondLT); break;
- case kCondGE: VisitCondition(loop, b, a, type, kCondLE); break;
- case kCondNE: VisitCondition(loop, b, a, type, kCondNE); break;
+ case kCondLT: VisitCondition(loop, body, b, a, type, kCondGT); break;
+ case kCondLE: VisitCondition(loop, body, b, a, type, kCondGE); break;
+ case kCondGT: VisitCondition(loop, body, b, a, type, kCondLT); break;
+ case kCondGE: VisitCondition(loop, body, b, a, type, kCondLE); break;
+ case kCondNE: VisitCondition(loop, body, b, a, type, kCondNE); break;
default: break;
}
} else if (a->induction_class == kLinear && b->induction_class == kInvariant) {
@@ -802,24 +937,30 @@
InductionInfo* lower_expr = a->op_b;
InductionInfo* upper_expr = b;
InductionInfo* stride_expr = a->op_a;
- // Constant stride?
+ // Test for constant stride and integral condition.
int64_t stride_value = 0;
if (!IsExact(stride_expr, &stride_value)) {
- return;
+ return; // unknown stride
+ } else if (type != DataType::Type::kInt32 && type != DataType::Type::kInt64) {
+ return; // not integral
}
- // Rewrite condition i != U into strict end condition i < U or i > U if this end condition
- // is reached exactly (tested by verifying if the loop has a unit stride and the non-strict
- // condition would be always taken).
+ // Since loops with a i != U condition will not be normalized by the method below, first
+ // try to rewrite a break-loop with terminating condition i != U into an equivalent loop
+ // with non-strict end condition i <= U or i >= U if such a rewriting is possible and safe.
+ if (cmp == kCondNE && RewriteBreakLoop(loop, body, stride_value, type)) {
+ cmp = stride_value > 0 ? kCondLE : kCondGE;
+ }
+ // If this rewriting failed, try to rewrite condition i != U into strict end condition i < U
+ // or i > U if this end condition is reached exactly (tested by verifying if the loop has a
+ // unit stride and the non-strict condition would be always taken).
if (cmp == kCondNE && ((stride_value == +1 && IsTaken(lower_expr, upper_expr, kCondLE)) ||
(stride_value == -1 && IsTaken(lower_expr, upper_expr, kCondGE)))) {
cmp = stride_value > 0 ? kCondLT : kCondGT;
}
- // Only accept integral condition. A mismatch between the type of condition and the induction
- // is only allowed if the, necessarily narrower, induction range fits the narrower control.
- if (type != DataType::Type::kInt32 && type != DataType::Type::kInt64) {
- return; // not integral
- } else if (type != a->type &&
- !FitsNarrowerControl(lower_expr, upper_expr, stride_value, a->type, cmp)) {
+ // A mismatch between the type of condition and the induction is only allowed if the,
+ // necessarily narrower, induction range fits the narrower control.
+ if (type != a->type &&
+ !FitsNarrowerControl(lower_expr, upper_expr, stride_value, a->type, cmp)) {
return; // mismatched type
}
// Normalize a linear loop control with a nonzero stride:
@@ -984,6 +1125,69 @@
IsAtMost(upper_expr, &value) && value <= max;
}
+bool HInductionVarAnalysis::RewriteBreakLoop(HLoopInformation* loop,
+ HBasicBlock* body,
+ int64_t stride_value,
+ DataType::Type type) {
+ // Only accept unit stride.
+ if (std::abs(stride_value) != 1) {
+ return false;
+ }
+ // Simple terminating i != U condition, used nowhere else.
+ HIf* ifs = loop->GetHeader()->GetLastInstruction()->AsIf();
+ HInstruction* cond = ifs->InputAt(0);
+ if (ifs->GetPrevious() != cond || !cond->HasOnlyOneNonEnvironmentUse()) {
+ return false;
+ }
+ int c = LookupInfo(loop, cond->InputAt(0))->induction_class == kLinear ? 0 : 1;
+ HInstruction* index = cond->InputAt(c);
+ HInstruction* upper = cond->InputAt(1 - c);
+ // Safe to rewrite into i <= U?
+ IfCondition cmp = stride_value > 0 ? kCondLE : kCondGE;
+ if (!index->IsPhi() || !IsFinite(LookupInfo(loop, upper), stride_value, type, cmp)) {
+ return false;
+ }
+ // Body consists of update to index i only, used nowhere else.
+ if (body->GetSuccessors().size() != 1 ||
+ body->GetSingleSuccessor() != loop->GetHeader() ||
+ !body->GetPhis().IsEmpty() ||
+ body->GetInstructions().IsEmpty() ||
+ body->GetFirstInstruction() != index->InputAt(1) ||
+ !body->GetFirstInstruction()->HasOnlyOneNonEnvironmentUse() ||
+ !body->GetFirstInstruction()->GetNext()->IsGoto()) {
+ return false;
+ }
+ // Always taken or guarded by enclosing condition.
+ if (!IsTaken(LookupInfo(loop, index)->op_b, LookupInfo(loop, upper), cmp) &&
+ !IsGuardedBy(loop, cmp, index->InputAt(0), upper)) {
+ return false;
+ }
+ // Test if break-loop body can be written, and do so on success.
+ if (RewriteBreakLoopBody(loop, body, cond, index, upper, /*rewrite*/ false)) {
+ RewriteBreakLoopBody(loop, body, cond, index, upper, /*rewrite*/ true);
+ } else {
+ return false;
+ }
+ // Rewrite condition in HIR.
+ if (ifs->IfTrueSuccessor() != body) {
+ cmp = (cmp == kCondLE) ? kCondGT : kCondLT;
+ }
+ HInstruction* rep = nullptr;
+ switch (cmp) {
+ case kCondLT: rep = new (graph_->GetAllocator()) HLessThan(index, upper); break;
+ case kCondGT: rep = new (graph_->GetAllocator()) HGreaterThan(index, upper); break;
+ case kCondLE: rep = new (graph_->GetAllocator()) HLessThanOrEqual(index, upper); break;
+ case kCondGE: rep = new (graph_->GetAllocator()) HGreaterThanOrEqual(index, upper); break;
+ default: LOG(FATAL) << cmp; UNREACHABLE();
+ }
+ loop->GetHeader()->ReplaceAndRemoveInstructionWith(cond, rep);
+ return true;
+}
+
+//
+// Helper methods.
+//
+
void HInductionVarAnalysis::AssignInfo(HLoopInformation* loop,
HInstruction* instruction,
InductionInfo* info) {
diff --git a/compiler/optimizing/induction_var_analysis.h b/compiler/optimizing/induction_var_analysis.h
index 8737b89..acad77d 100644
--- a/compiler/optimizing/induction_var_analysis.h
+++ b/compiler/optimizing/induction_var_analysis.h
@@ -195,9 +195,14 @@
HInstruction* entry_phi,
HTypeConversion* conversion);
+ //
+ // Loop trip count analysis methods.
+ //
+
// Trip count information.
void VisitControl(HLoopInformation* loop);
void VisitCondition(HLoopInformation* loop,
+ HBasicBlock* body,
InductionInfo* a,
InductionInfo* b,
DataType::Type type,
@@ -219,6 +224,14 @@
int64_t stride_value,
DataType::Type type,
IfCondition cmp);
+ bool RewriteBreakLoop(HLoopInformation* loop,
+ HBasicBlock* body,
+ int64_t stride_value,
+ DataType::Type type);
+
+ //
+ // Helper methods.
+ //
// Assign and lookup.
void AssignInfo(HLoopInformation* loop, HInstruction* instruction, InductionInfo* info);
diff --git a/dex2oat/Android.bp b/dex2oat/Android.bp
index 13d2655..6bebf7d 100644
--- a/dex2oat/Android.bp
+++ b/dex2oat/Android.bp
@@ -220,6 +220,7 @@
"linker/elf_writer_test.cc",
"linker/image_test.cc",
"linker/image_write_read_test.cc",
+ "linker/index_bss_mapping_encoder_test.cc",
"linker/multi_oat_relative_patcher_test.cc",
"linker/oat_writer_test.cc",
],
diff --git a/dex2oat/linker/index_bss_mapping_encoder.h b/dex2oat/linker/index_bss_mapping_encoder.h
new file mode 100644
index 0000000..9bc1432
--- /dev/null
+++ b/dex2oat/linker/index_bss_mapping_encoder.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_DEX2OAT_LINKER_INDEX_BSS_MAPPING_ENCODER_H_
+#define ART_DEX2OAT_LINKER_INDEX_BSS_MAPPING_ENCODER_H_
+
+#include "base/bit_utils.h"
+#include "base/bit_vector-inl.h"
+#include "base/logging.h"
+#include "index_bss_mapping.h"
+
+namespace art {
+namespace linker {
+
+// Helper class for encoding compressed IndexBssMapping.
+class IndexBssMappingEncoder {
+ public:
+ IndexBssMappingEncoder(size_t number_of_indexes, size_t slot_size)
+ : index_bits_(IndexBssMappingEntry::IndexBits(number_of_indexes)),
+ slot_size_(slot_size) {
+ entry_.index_and_mask = static_cast<uint32_t>(-1);
+ entry_.bss_offset = static_cast<uint32_t>(-1);
+ DCHECK_NE(number_of_indexes, 0u);
+ }
+
+ // Try to merge the next index -> bss_offset mapping into the current entry.
+ // Return true on success, false on failure.
+ bool TryMerge(uint32_t index, uint32_t bss_offset) {
+ DCHECK_LE(MinimumBitsToStore(index), index_bits_);
+ DCHECK_NE(index, entry_.GetIndex(index_bits_));
+ if (entry_.bss_offset + slot_size_ != bss_offset) {
+ return false;
+ }
+ uint32_t diff = index - entry_.GetIndex(index_bits_);
+ if (diff > 32u - index_bits_) {
+ return false;
+ }
+ uint32_t mask = entry_.GetMask(index_bits_);
+ if ((mask & ~(static_cast<uint32_t>(-1) << diff)) != 0u) {
+ return false;
+ }
+ // Insert the bit indicating the index we've just overwritten
+ // and shift bits indicating indexes before that.
+ mask = ((mask << index_bits_) >> diff) | (static_cast<uint32_t>(1u) << (32 - diff));
+ entry_.index_and_mask = mask | index;
+ entry_.bss_offset = bss_offset;
+ return true;
+ }
+
+ void Reset(uint32_t method_index, uint32_t bss_offset) {
+ DCHECK_LE(MinimumBitsToStore(method_index), index_bits_);
+ entry_.index_and_mask = method_index; // Mask bits set to 0.
+ entry_.bss_offset = bss_offset;
+ }
+
+ IndexBssMappingEntry GetEntry() {
+ return entry_;
+ }
+
+ size_t GetIndexBits() const {
+ return index_bits_;
+ }
+
+ private:
+ const size_t index_bits_;
+ const size_t slot_size_;
+ IndexBssMappingEntry entry_;
+};
+
+} // namespace linker
+} // namespace art
+
+#endif // ART_DEX2OAT_LINKER_INDEX_BSS_MAPPING_ENCODER_H_
diff --git a/dex2oat/linker/index_bss_mapping_encoder_test.cc b/dex2oat/linker/index_bss_mapping_encoder_test.cc
new file mode 100644
index 0000000..d7ca2a5
--- /dev/null
+++ b/dex2oat/linker/index_bss_mapping_encoder_test.cc
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "index_bss_mapping_encoder.h"
+
+#include "base/enums.h"
+#include "gtest/gtest.h"
+
+namespace art {
+namespace linker {
+
+TEST(IndexBssMappingEncoder, TryMerge16BitIndex) {
+ for (PointerSize pointer_size : {PointerSize::k32, PointerSize::k64}) {
+ size_t raw_pointer_size = static_cast<size_t>(pointer_size);
+ IndexBssMappingEncoder encoder(/* number_of_indexes */ 0x10000, raw_pointer_size);
+ encoder.Reset(1u, 0u);
+ ASSERT_FALSE(encoder.TryMerge(5u, raw_pointer_size + 1)); // Wrong bss_offset difference.
+ ASSERT_FALSE(encoder.TryMerge(18u, raw_pointer_size)); // Index out of range.
+ ASSERT_TRUE(encoder.TryMerge(5u, raw_pointer_size));
+ ASSERT_EQ(0u, encoder.GetEntry().GetBssOffset(encoder.GetIndexBits(), 1u, raw_pointer_size));
+ ASSERT_EQ(raw_pointer_size,
+ encoder.GetEntry().GetBssOffset(encoder.GetIndexBits(), 5u, raw_pointer_size));
+ ASSERT_EQ(IndexBssMappingLookup::npos,
+ encoder.GetEntry().GetBssOffset(encoder.GetIndexBits(), 17u, raw_pointer_size));
+ ASSERT_FALSE(encoder.TryMerge(17u, 2 * raw_pointer_size + 1)); // Wrong bss_offset difference.
+ ASSERT_FALSE(encoder.TryMerge(18u, 2 * raw_pointer_size)); // Index out of range.
+ ASSERT_TRUE(encoder.TryMerge(17u, 2 * raw_pointer_size));
+ ASSERT_EQ(0u, encoder.GetEntry().GetBssOffset(encoder.GetIndexBits(), 1u, raw_pointer_size));
+ ASSERT_EQ(raw_pointer_size,
+ encoder.GetEntry().GetBssOffset(encoder.GetIndexBits(), 5u, raw_pointer_size));
+ ASSERT_EQ(2 * raw_pointer_size,
+ encoder.GetEntry().GetBssOffset(encoder.GetIndexBits(), 17u, raw_pointer_size));
+ ASSERT_EQ(0x00110000u | 17u, encoder.GetEntry().index_and_mask);
+ ASSERT_FALSE(encoder.TryMerge(18u, 3 * raw_pointer_size)); // Index out of range.
+ }
+}
+
+TEST(IndexBssMappingEncoder, TryMerge8BitIndex) {
+ for (PointerSize pointer_size : {PointerSize::k32, PointerSize::k64}) {
+ size_t raw_pointer_size = static_cast<size_t>(pointer_size);
+ IndexBssMappingEncoder encoder(/* number_of_indexes */ 0x100, raw_pointer_size);
+ encoder.Reset(1u, 0u);
+ ASSERT_FALSE(encoder.TryMerge(5u, raw_pointer_size + 1)); // Wrong bss_offset difference.
+ ASSERT_FALSE(encoder.TryMerge(26u, raw_pointer_size)); // Index out of range.
+ ASSERT_TRUE(encoder.TryMerge(5u, raw_pointer_size));
+ ASSERT_EQ(0u, encoder.GetEntry().GetBssOffset(encoder.GetIndexBits(), 1u, raw_pointer_size));
+ ASSERT_EQ(raw_pointer_size,
+ encoder.GetEntry().GetBssOffset(encoder.GetIndexBits(), 5u, raw_pointer_size));
+ ASSERT_EQ(IndexBssMappingLookup::npos,
+ encoder.GetEntry().GetBssOffset(encoder.GetIndexBits(), 17u, raw_pointer_size));
+ ASSERT_FALSE(encoder.TryMerge(25u, 2 * raw_pointer_size + 1)); // Wrong bss_offset difference.
+ ASSERT_FALSE(encoder.TryMerge(26u, 2 * raw_pointer_size)); // Index out of range.
+ ASSERT_TRUE(encoder.TryMerge(25u, 2 * raw_pointer_size));
+ ASSERT_EQ(0u, encoder.GetEntry().GetBssOffset(encoder.GetIndexBits(), 1u, raw_pointer_size));
+ ASSERT_EQ(raw_pointer_size,
+ encoder.GetEntry().GetBssOffset(encoder.GetIndexBits(), 5u, raw_pointer_size));
+ ASSERT_EQ(2 * raw_pointer_size,
+ encoder.GetEntry().GetBssOffset(encoder.GetIndexBits(), 25u, raw_pointer_size));
+ ASSERT_EQ(0x00001100u | 25u, encoder.GetEntry().index_and_mask);
+ ASSERT_FALSE(encoder.TryMerge(26u, 3 * raw_pointer_size)); // Index out of range.
+ }
+}
+
+TEST(IndexBssMappingEncoder, TryMerge20BitIndex) {
+ for (PointerSize pointer_size : {PointerSize::k32, PointerSize::k64}) {
+ size_t raw_pointer_size = static_cast<size_t>(pointer_size);
+ IndexBssMappingEncoder encoder(/* number_of_indexes */ 0x100000, raw_pointer_size);
+ encoder.Reset(1u, 0u);
+ ASSERT_FALSE(encoder.TryMerge(5u, raw_pointer_size + 1)); // Wrong bss_offset difference.
+ ASSERT_FALSE(encoder.TryMerge(14u, raw_pointer_size)); // Index out of range.
+ ASSERT_TRUE(encoder.TryMerge(5u, raw_pointer_size));
+ ASSERT_EQ(0u, encoder.GetEntry().GetBssOffset(encoder.GetIndexBits(), 1u, raw_pointer_size));
+ ASSERT_EQ(raw_pointer_size,
+ encoder.GetEntry().GetBssOffset(encoder.GetIndexBits(), 5u, raw_pointer_size));
+ ASSERT_EQ(IndexBssMappingLookup::npos,
+ encoder.GetEntry().GetBssOffset(encoder.GetIndexBits(), 17u, raw_pointer_size));
+ ASSERT_FALSE(encoder.TryMerge(13u, 2 * raw_pointer_size + 1)); // Wrong bss_offset difference.
+ ASSERT_FALSE(encoder.TryMerge(14u, 2 * raw_pointer_size)); // Index out of range.
+ ASSERT_TRUE(encoder.TryMerge(13u, 2 * raw_pointer_size));
+ ASSERT_EQ(0u, encoder.GetEntry().GetBssOffset(encoder.GetIndexBits(), 1u, raw_pointer_size));
+ ASSERT_EQ(raw_pointer_size,
+ encoder.GetEntry().GetBssOffset(encoder.GetIndexBits(), 5u, raw_pointer_size));
+ ASSERT_EQ(2 * raw_pointer_size,
+ encoder.GetEntry().GetBssOffset(encoder.GetIndexBits(), 13u, raw_pointer_size));
+ ASSERT_EQ(0x01100000u | 13u, encoder.GetEntry().index_and_mask);
+ ASSERT_FALSE(encoder.TryMerge(14u, 3 * raw_pointer_size)); // Index out of range.
+ }
+}
+
+} // namespace linker
+} // namespace art
diff --git a/dex2oat/linker/oat_writer.cc b/dex2oat/linker/oat_writer.cc
index 663a889..99c6258 100644
--- a/dex2oat/linker/oat_writer.cc
+++ b/dex2oat/linker/oat_writer.cc
@@ -45,8 +45,8 @@
#include "image_writer.h"
#include "linker/buffered_output_stream.h"
#include "linker/file_output_stream.h"
+#include "linker/index_bss_mapping_encoder.h"
#include "linker/linker_patch.h"
-#include "linker/method_bss_mapping_encoder.h"
#include "linker/multi_oat_relative_patcher.h"
#include "linker/output_stream.h"
#include "mirror/array.h"
@@ -310,6 +310,8 @@
uint32_t class_offsets_offset_;
uint32_t lookup_table_offset_;
uint32_t method_bss_mapping_offset_;
+ uint32_t type_bss_mapping_offset_;
+ uint32_t string_bss_mapping_offset_;
uint32_t dex_sections_layout_offset_;
// Data to write to a separate section.
@@ -396,6 +398,8 @@
size_oat_dex_file_dex_layout_sections_(0),
size_oat_dex_file_dex_layout_sections_alignment_(0),
size_oat_dex_file_method_bss_mapping_offset_(0),
+ size_oat_dex_file_type_bss_mapping_offset_(0),
+ size_oat_dex_file_string_bss_mapping_offset_(0),
size_oat_lookup_table_alignment_(0),
size_oat_lookup_table_(0),
size_oat_class_offsets_alignment_(0),
@@ -405,6 +409,8 @@
size_oat_class_method_bitmaps_(0),
size_oat_class_method_offsets_(0),
size_method_bss_mappings_(0u),
+ size_type_bss_mappings_(0u),
+ size_string_bss_mappings_(0u),
relative_patcher_(nullptr),
absolute_patch_locations_(),
profile_compilation_info_(info),
@@ -632,8 +638,8 @@
offset = InitOatClasses(offset);
}
{
- TimingLogger::ScopedTiming split("InitMethodBssMappings", timings_);
- offset = InitMethodBssMappings(offset);
+ TimingLogger::ScopedTiming split("InitIndexBssMappings", timings_);
+ offset = InitIndexBssMappings(offset);
}
{
TimingLogger::ScopedTiming split("InitOatMaps", timings_);
@@ -761,23 +767,22 @@
for (const LinkerPatch& patch : compiled_method->GetPatches()) {
if (patch.GetType() == LinkerPatch::Type::kMethodBssEntry) {
MethodReference target_method = patch.TargetMethod();
- auto refs_it = writer_->bss_method_entry_references_.find(target_method.dex_file);
- if (refs_it == writer_->bss_method_entry_references_.end()) {
- refs_it = writer_->bss_method_entry_references_.Put(
- target_method.dex_file,
- BitVector(target_method.dex_file->NumMethodIds(),
- /* expandable */ false,
- Allocator::GetMallocAllocator()));
- refs_it->second.ClearAllBits();
- }
- refs_it->second.SetBit(target_method.index);
+ AddBssReference(target_method,
+ target_method.dex_file->NumMethodIds(),
+ &writer_->bss_method_entry_references_);
writer_->bss_method_entries_.Overwrite(target_method, /* placeholder */ 0u);
} else if (patch.GetType() == LinkerPatch::Type::kTypeBssEntry) {
- TypeReference ref(patch.TargetTypeDexFile(), patch.TargetTypeIndex());
- writer_->bss_type_entries_.Overwrite(ref, /* placeholder */ 0u);
+ TypeReference target_type(patch.TargetTypeDexFile(), patch.TargetTypeIndex());
+ AddBssReference(target_type,
+ target_type.dex_file->NumTypeIds(),
+ &writer_->bss_type_entry_references_);
+ writer_->bss_type_entries_.Overwrite(target_type, /* placeholder */ 0u);
} else if (patch.GetType() == LinkerPatch::Type::kStringBssEntry) {
- StringReference ref(patch.TargetStringDexFile(), patch.TargetStringIndex());
- writer_->bss_string_entries_.Overwrite(ref, /* placeholder */ 0u);
+ StringReference target_string(patch.TargetStringDexFile(), patch.TargetStringIndex());
+ AddBssReference(target_string,
+ target_string.dex_file->NumStringIds(),
+ &writer_->bss_string_entry_references_);
+ writer_->bss_string_entries_.Overwrite(target_string, /* placeholder */ 0u);
} else if (patch.GetType() == LinkerPatch::Type::kStringInternTable ||
patch.GetType() == LinkerPatch::Type::kTypeClassTable) {
writer_->map_boot_image_tables_to_bss_ = true;
@@ -788,6 +793,24 @@
}
return true;
}
+
+ private:
+ void AddBssReference(const DexFileReference& ref,
+ size_t number_of_indexes,
+ /*inout*/ SafeMap<const DexFile*, BitVector>* references) {
+ // We currently support inlining of throwing instructions only when they originate in the
+ // same dex file as the outer method. All .bss references are used by throwing instructions.
+ DCHECK_EQ(dex_file_, ref.dex_file);
+
+ auto refs_it = references->find(ref.dex_file);
+ if (refs_it == references->end()) {
+ refs_it = references->Put(
+ ref.dex_file,
+ BitVector(number_of_indexes, /* expandable */ false, Allocator::GetMallocAllocator()));
+ refs_it->second.ClearAllBits();
+ }
+ refs_it->second.SetBit(ref.index);
+ }
};
class OatWriter::InitOatClassesMethodVisitor : public DexMethodVisitor {
@@ -2192,38 +2215,101 @@
return offset;
}
-size_t OatWriter::InitMethodBssMappings(size_t offset) {
- size_t number_of_dex_files = 0u;
- for (size_t i = 0, size = dex_files_->size(); i != size; ++i) {
- const DexFile* dex_file = (*dex_files_)[i];
- auto it = bss_method_entry_references_.find(dex_file);
- if (it != bss_method_entry_references_.end()) {
- const BitVector& method_indexes = it->second;
- ++number_of_dex_files;
- // If there are any classes, the class offsets allocation aligns the offset
- // and we cannot have method bss mappings without class offsets.
- static_assert(alignof(MethodBssMapping) == 4u, "MethodBssMapping alignment check.");
- DCHECK_ALIGNED(offset, 4u);
- oat_dex_files_[i].method_bss_mapping_offset_ = offset;
-
- MethodBssMappingEncoder encoder(
- GetInstructionSetPointerSize(oat_header_->GetInstructionSet()));
- size_t number_of_entries = 0u;
- bool first_index = true;
- for (uint32_t method_index : method_indexes.Indexes()) {
- uint32_t bss_offset = bss_method_entries_.Get(MethodReference(dex_file, method_index));
- if (first_index || !encoder.TryMerge(method_index, bss_offset)) {
- encoder.Reset(method_index, bss_offset);
- ++number_of_entries;
- first_index = false;
- }
- }
- DCHECK_NE(number_of_entries, 0u);
- offset += MethodBssMapping::ComputeSize(number_of_entries);
+template <typename GetBssOffset>
+static size_t CalculateNumberOfIndexBssMappingEntries(size_t number_of_indexes,
+ size_t slot_size,
+ const BitVector& indexes,
+ GetBssOffset get_bss_offset) {
+ IndexBssMappingEncoder encoder(number_of_indexes, slot_size);
+ size_t number_of_entries = 0u;
+ bool first_index = true;
+ for (uint32_t index : indexes.Indexes()) {
+ uint32_t bss_offset = get_bss_offset(index);
+ if (first_index || !encoder.TryMerge(index, bss_offset)) {
+ encoder.Reset(index, bss_offset);
+ ++number_of_entries;
+ first_index = false;
}
}
- // Check that all dex files targeted by method bss entries are in `*dex_files_`.
- CHECK_EQ(number_of_dex_files, bss_method_entry_references_.size());
+ DCHECK_NE(number_of_entries, 0u);
+ return number_of_entries;
+}
+
+template <typename GetBssOffset>
+static size_t CalculateIndexBssMappingSize(size_t number_of_indexes,
+ size_t slot_size,
+ const BitVector& indexes,
+ GetBssOffset get_bss_offset) {
+ size_t number_of_entries = CalculateNumberOfIndexBssMappingEntries(number_of_indexes,
+ slot_size,
+ indexes,
+ get_bss_offset);
+ return IndexBssMapping::ComputeSize(number_of_entries);
+}
+
+size_t OatWriter::InitIndexBssMappings(size_t offset) {
+ if (bss_method_entry_references_.empty() &&
+ bss_type_entry_references_.empty() &&
+ bss_string_entry_references_.empty()) {
+ return offset;
+ }
+ // If there are any classes, the class offsets allocation aligns the offset
+ // and we cannot have any index bss mappings without class offsets.
+ static_assert(alignof(IndexBssMapping) == 4u, "IndexBssMapping alignment check.");
+ DCHECK_ALIGNED(offset, 4u);
+
+ size_t number_of_method_dex_files = 0u;
+ size_t number_of_type_dex_files = 0u;
+ size_t number_of_string_dex_files = 0u;
+ PointerSize pointer_size = GetInstructionSetPointerSize(oat_header_->GetInstructionSet());
+ for (size_t i = 0, size = dex_files_->size(); i != size; ++i) {
+ const DexFile* dex_file = (*dex_files_)[i];
+ auto method_it = bss_method_entry_references_.find(dex_file);
+ if (method_it != bss_method_entry_references_.end()) {
+ const BitVector& method_indexes = method_it->second;
+ ++number_of_method_dex_files;
+ oat_dex_files_[i].method_bss_mapping_offset_ = offset;
+ offset += CalculateIndexBssMappingSize(
+ dex_file->NumMethodIds(),
+ static_cast<size_t>(pointer_size),
+ method_indexes,
+ [=](uint32_t index) {
+ return bss_method_entries_.Get({dex_file, index});
+ });
+ }
+
+ auto type_it = bss_type_entry_references_.find(dex_file);
+ if (type_it != bss_type_entry_references_.end()) {
+ const BitVector& type_indexes = type_it->second;
+ ++number_of_type_dex_files;
+ oat_dex_files_[i].type_bss_mapping_offset_ = offset;
+ offset += CalculateIndexBssMappingSize(
+ dex_file->NumTypeIds(),
+ sizeof(GcRoot<mirror::Class>),
+ type_indexes,
+ [=](uint32_t index) {
+ return bss_type_entries_.Get({dex_file, dex::TypeIndex(index)});
+ });
+ }
+
+ auto string_it = bss_string_entry_references_.find(dex_file);
+ if (string_it != bss_string_entry_references_.end()) {
+ const BitVector& string_indexes = string_it->second;
+ ++number_of_string_dex_files;
+ oat_dex_files_[i].string_bss_mapping_offset_ = offset;
+ offset += CalculateIndexBssMappingSize(
+ dex_file->NumStringIds(),
+ sizeof(GcRoot<mirror::String>),
+ string_indexes,
+ [=](uint32_t index) {
+ return bss_string_entries_.Get({dex_file, dex::StringIndex(index)});
+ });
+ }
+ }
+ // Check that all dex files targeted by bss entries are in `*dex_files_`.
+ CHECK_EQ(number_of_method_dex_files, bss_method_entry_references_.size());
+ CHECK_EQ(number_of_type_dex_files, bss_type_entry_references_.size());
+ CHECK_EQ(number_of_string_dex_files, bss_string_entry_references_.size());
return offset;
}
@@ -2423,7 +2509,7 @@
return false;
}
- relative_offset = WriteMethodBssMappings(out, file_offset, relative_offset);
+ relative_offset = WriteIndexBssMappings(out, file_offset, relative_offset);
if (relative_offset == 0) {
PLOG(ERROR) << "Failed to write method bss mappings to " << out->GetLocation();
return false;
@@ -2744,6 +2830,8 @@
DO_STAT(size_oat_dex_file_dex_layout_sections_);
DO_STAT(size_oat_dex_file_dex_layout_sections_alignment_);
DO_STAT(size_oat_dex_file_method_bss_mapping_offset_);
+ DO_STAT(size_oat_dex_file_type_bss_mapping_offset_);
+ DO_STAT(size_oat_dex_file_string_bss_mapping_offset_);
DO_STAT(size_oat_lookup_table_alignment_);
DO_STAT(size_oat_lookup_table_);
DO_STAT(size_oat_class_offsets_alignment_);
@@ -2753,6 +2841,8 @@
DO_STAT(size_oat_class_method_bitmaps_);
DO_STAT(size_oat_class_method_offsets_);
DO_STAT(size_method_bss_mappings_);
+ DO_STAT(size_type_bss_mappings_);
+ DO_STAT(size_string_bss_mappings_);
#undef DO_STAT
VLOG(compiler) << "size_total=" << PrettySize(size_total) << " (" << size_total << "B)";
@@ -2892,64 +2982,131 @@
return relative_offset;
}
-size_t OatWriter::WriteMethodBssMappings(OutputStream* out,
- size_t file_offset,
- size_t relative_offset) {
- TimingLogger::ScopedTiming split("WriteMethodBssMappings", timings_);
+template <typename GetBssOffset>
+size_t WriteIndexBssMapping(OutputStream* out,
+ size_t number_of_indexes,
+ size_t slot_size,
+ const BitVector& indexes,
+ GetBssOffset get_bss_offset) {
+ // Allocate the IndexBssMapping.
+ size_t number_of_entries = CalculateNumberOfIndexBssMappingEntries(
+ number_of_indexes, slot_size, indexes, get_bss_offset);
+ size_t mappings_size = IndexBssMapping::ComputeSize(number_of_entries);
+ DCHECK_ALIGNED(mappings_size, sizeof(uint32_t));
+ std::unique_ptr<uint32_t[]> storage(new uint32_t[mappings_size / sizeof(uint32_t)]);
+ IndexBssMapping* mappings = new(storage.get()) IndexBssMapping(number_of_entries);
+ mappings->ClearPadding();
+ // Encode the IndexBssMapping.
+ IndexBssMappingEncoder encoder(number_of_indexes, slot_size);
+ auto init_it = mappings->begin();
+ bool first_index = true;
+ for (uint32_t index : indexes.Indexes()) {
+ size_t bss_offset = get_bss_offset(index);
+ if (first_index) {
+ first_index = false;
+ encoder.Reset(index, bss_offset);
+ } else if (!encoder.TryMerge(index, bss_offset)) {
+ *init_it = encoder.GetEntry();
+ ++init_it;
+ encoder.Reset(index, bss_offset);
+ }
+ }
+ // Store the last entry.
+ *init_it = encoder.GetEntry();
+ ++init_it;
+ DCHECK(init_it == mappings->end());
+
+ if (!out->WriteFully(storage.get(), mappings_size)) {
+ return 0u;
+ }
+ return mappings_size;
+}
+
+size_t OatWriter::WriteIndexBssMappings(OutputStream* out,
+ size_t file_offset,
+ size_t relative_offset) {
+ TimingLogger::ScopedTiming split("WriteMethodBssMappings", timings_);
+ if (bss_method_entry_references_.empty() &&
+ bss_type_entry_references_.empty() &&
+ bss_string_entry_references_.empty()) {
+ return relative_offset;
+ }
+ // If there are any classes, the class offsets allocation aligns the offset
+ // and we cannot have method bss mappings without class offsets.
+ static_assert(alignof(IndexBssMapping) == sizeof(uint32_t),
+ "IndexBssMapping alignment check.");
+ DCHECK_ALIGNED(relative_offset, sizeof(uint32_t));
+
+ PointerSize pointer_size = GetInstructionSetPointerSize(oat_header_->GetInstructionSet());
for (size_t i = 0, size = dex_files_->size(); i != size; ++i) {
const DexFile* dex_file = (*dex_files_)[i];
OatDexFile* oat_dex_file = &oat_dex_files_[i];
- auto it = bss_method_entry_references_.find(dex_file);
- if (it != bss_method_entry_references_.end()) {
- const BitVector& method_indexes = it->second;
- // If there are any classes, the class offsets allocation aligns the offset
- // and we cannot have method bss mappings without class offsets.
- static_assert(alignof(MethodBssMapping) == sizeof(uint32_t),
- "MethodBssMapping alignment check.");
- DCHECK_ALIGNED(relative_offset, sizeof(uint32_t));
-
- MethodBssMappingEncoder encoder(
- GetInstructionSetPointerSize(oat_header_->GetInstructionSet()));
- // Allocate a sufficiently large MethodBssMapping.
- size_t number_of_method_indexes = method_indexes.NumSetBits();
- DCHECK_NE(number_of_method_indexes, 0u);
- size_t max_mappings_size = MethodBssMapping::ComputeSize(number_of_method_indexes);
- DCHECK_ALIGNED(max_mappings_size, sizeof(uint32_t));
- std::unique_ptr<uint32_t[]> storage(new uint32_t[max_mappings_size / sizeof(uint32_t)]);
- MethodBssMapping* mappings = new(storage.get()) MethodBssMapping(number_of_method_indexes);
- mappings->ClearPadding();
- // Encode the MethodBssMapping.
- auto init_it = mappings->begin();
- bool first_index = true;
- for (uint32_t method_index : method_indexes.Indexes()) {
- size_t bss_offset = bss_method_entries_.Get(MethodReference(dex_file, method_index));
- if (first_index) {
- first_index = false;
- encoder.Reset(method_index, bss_offset);
- } else if (!encoder.TryMerge(method_index, bss_offset)) {
- *init_it = encoder.GetEntry();
- ++init_it;
- encoder.Reset(method_index, bss_offset);
- }
- }
- // Store the last entry and shrink the mapping to the actual size.
- *init_it = encoder.GetEntry();
- ++init_it;
- DCHECK(init_it <= mappings->end());
- mappings->SetSize(std::distance(mappings->begin(), init_it));
- size_t mappings_size = MethodBssMapping::ComputeSize(mappings->size());
-
+ auto method_it = bss_method_entry_references_.find(dex_file);
+ if (method_it != bss_method_entry_references_.end()) {
+ const BitVector& method_indexes = method_it->second;
DCHECK_EQ(relative_offset, oat_dex_file->method_bss_mapping_offset_);
DCHECK_OFFSET();
- if (!out->WriteFully(storage.get(), mappings_size)) {
+ size_t method_mappings_size = WriteIndexBssMapping(
+ out,
+ dex_file->NumMethodIds(),
+ static_cast<size_t>(pointer_size),
+ method_indexes,
+ [=](uint32_t index) {
+ return bss_method_entries_.Get({dex_file, index});
+ });
+ if (method_mappings_size == 0u) {
return 0u;
}
- size_method_bss_mappings_ += mappings_size;
- relative_offset += mappings_size;
+ size_method_bss_mappings_ += method_mappings_size;
+ relative_offset += method_mappings_size;
} else {
DCHECK_EQ(0u, oat_dex_file->method_bss_mapping_offset_);
}
+
+ auto type_it = bss_type_entry_references_.find(dex_file);
+ if (type_it != bss_type_entry_references_.end()) {
+ const BitVector& type_indexes = type_it->second;
+ DCHECK_EQ(relative_offset, oat_dex_file->type_bss_mapping_offset_);
+ DCHECK_OFFSET();
+ size_t type_mappings_size = WriteIndexBssMapping(
+ out,
+ dex_file->NumTypeIds(),
+ sizeof(GcRoot<mirror::Class>),
+ type_indexes,
+ [=](uint32_t index) {
+ return bss_type_entries_.Get({dex_file, dex::TypeIndex(index)});
+ });
+ if (type_mappings_size == 0u) {
+ return 0u;
+ }
+ size_type_bss_mappings_ += type_mappings_size;
+ relative_offset += type_mappings_size;
+ } else {
+ DCHECK_EQ(0u, oat_dex_file->type_bss_mapping_offset_);
+ }
+
+ auto string_it = bss_string_entry_references_.find(dex_file);
+ if (string_it != bss_string_entry_references_.end()) {
+ const BitVector& string_indexes = string_it->second;
+ DCHECK_EQ(relative_offset, oat_dex_file->string_bss_mapping_offset_);
+ DCHECK_OFFSET();
+ size_t string_mappings_size = WriteIndexBssMapping(
+ out,
+ dex_file->NumStringIds(),
+ sizeof(GcRoot<mirror::String>),
+ string_indexes,
+ [=](uint32_t index) {
+ return bss_string_entries_.Get({dex_file, dex::StringIndex(index)});
+ });
+ if (string_mappings_size == 0u) {
+ return 0u;
+ }
+ size_string_bss_mappings_ += string_mappings_size;
+ relative_offset += string_mappings_size;
+ } else {
+ DCHECK_EQ(0u, oat_dex_file->string_bss_mapping_offset_);
+ }
}
return relative_offset;
}
@@ -3748,6 +3905,8 @@
class_offsets_offset_(0u),
lookup_table_offset_(0u),
method_bss_mapping_offset_(0u),
+ type_bss_mapping_offset_(0u),
+ string_bss_mapping_offset_(0u),
dex_sections_layout_offset_(0u),
class_offsets_() {
}
@@ -3760,6 +3919,8 @@
+ sizeof(class_offsets_offset_)
+ sizeof(lookup_table_offset_)
+ sizeof(method_bss_mapping_offset_)
+ + sizeof(type_bss_mapping_offset_)
+ + sizeof(string_bss_mapping_offset_)
+ sizeof(dex_sections_layout_offset_);
}
@@ -3815,6 +3976,18 @@
}
oat_writer->size_oat_dex_file_method_bss_mapping_offset_ += sizeof(method_bss_mapping_offset_);
+ if (!out->WriteFully(&type_bss_mapping_offset_, sizeof(type_bss_mapping_offset_))) {
+ PLOG(ERROR) << "Failed to write type bss mapping offset to " << out->GetLocation();
+ return false;
+ }
+ oat_writer->size_oat_dex_file_type_bss_mapping_offset_ += sizeof(type_bss_mapping_offset_);
+
+ if (!out->WriteFully(&string_bss_mapping_offset_, sizeof(string_bss_mapping_offset_))) {
+ PLOG(ERROR) << "Failed to write string bss mapping offset to " << out->GetLocation();
+ return false;
+ }
+ oat_writer->size_oat_dex_file_string_bss_mapping_offset_ += sizeof(string_bss_mapping_offset_);
+
return true;
}
diff --git a/dex2oat/linker/oat_writer.h b/dex2oat/linker/oat_writer.h
index 6a82fd1..e0cb7ec 100644
--- a/dex2oat/linker/oat_writer.h
+++ b/dex2oat/linker/oat_writer.h
@@ -310,7 +310,7 @@
size_t InitClassOffsets(size_t offset);
size_t InitOatClasses(size_t offset);
size_t InitOatMaps(size_t offset);
- size_t InitMethodBssMappings(size_t offset);
+ size_t InitIndexBssMappings(size_t offset);
size_t InitOatDexFiles(size_t offset);
size_t InitOatCode(size_t offset);
size_t InitOatCodeDexFiles(size_t offset);
@@ -319,7 +319,7 @@
size_t WriteClassOffsets(OutputStream* out, size_t file_offset, size_t relative_offset);
size_t WriteClasses(OutputStream* out, size_t file_offset, size_t relative_offset);
size_t WriteMaps(OutputStream* out, size_t file_offset, size_t relative_offset);
- size_t WriteMethodBssMappings(OutputStream* out, size_t file_offset, size_t relative_offset);
+ size_t WriteIndexBssMappings(OutputStream* out, size_t file_offset, size_t relative_offset);
size_t WriteOatDexFiles(OutputStream* out, size_t file_offset, size_t relative_offset);
size_t WriteCode(OutputStream* out, size_t file_offset, size_t relative_offset);
size_t WriteCodeDexFiles(OutputStream* out, size_t file_offset, size_t relative_offset);
@@ -403,6 +403,12 @@
// Map for recording references to ArtMethod entries in .bss.
SafeMap<const DexFile*, BitVector> bss_method_entry_references_;
+ // Map for recording references to GcRoot<mirror::Class> entries in .bss.
+ SafeMap<const DexFile*, BitVector> bss_type_entry_references_;
+
+ // Map for recording references to GcRoot<mirror::String> entries in .bss.
+ SafeMap<const DexFile*, BitVector> bss_string_entry_references_;
+
// Map for allocating ArtMethod entries in .bss. Indexed by MethodReference for the target
// method in the dex file with the "method reference value comparator" for deduplication.
// The value is the target offset for patching, starting at `bss_start_ + bss_methods_offset_`.
@@ -476,6 +482,8 @@
uint32_t size_oat_dex_file_dex_layout_sections_;
uint32_t size_oat_dex_file_dex_layout_sections_alignment_;
uint32_t size_oat_dex_file_method_bss_mapping_offset_;
+ uint32_t size_oat_dex_file_type_bss_mapping_offset_;
+ uint32_t size_oat_dex_file_string_bss_mapping_offset_;
uint32_t size_oat_lookup_table_alignment_;
uint32_t size_oat_lookup_table_;
uint32_t size_oat_class_offsets_alignment_;
@@ -485,6 +493,8 @@
uint32_t size_oat_class_method_bitmaps_;
uint32_t size_oat_class_method_offsets_;
uint32_t size_method_bss_mappings_;
+ uint32_t size_type_bss_mappings_;
+ uint32_t size_string_bss_mappings_;
// The helper for processing relative patches is external so that we can patch across oat files.
MultiOatRelativePatcher* relative_patcher_;
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index 5a3d34c..2c150876 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -32,6 +32,7 @@
#include "arch/instruction_set_features.h"
#include "art_field-inl.h"
#include "art_method-inl.h"
+#include "base/bit_utils_iterator.h"
#include "base/stl_util.h"
#include "base/unix_file/fd_file.h"
#include "class_linker-inl.h"
@@ -51,6 +52,7 @@
#include "imtable-inl.h"
#include "indenter.h"
#include "subtype_check.h"
+#include "index_bss_mapping.h"
#include "interpreter/unstarted_runtime.h"
#include "linker/buffered_output_stream.h"
#include "linker/elf_builder.h"
@@ -535,6 +537,29 @@
}
cumulative.Add(data);
+
+ // Dump .bss entries.
+ DumpBssEntries(
+ os,
+ "ArtMethod",
+ oat_dex_file->GetMethodBssMapping(),
+ dex_file->NumMethodIds(),
+ static_cast<size_t>(GetInstructionSetPointerSize(instruction_set_)),
+ [=](uint32_t index) { return dex_file->PrettyMethod(index); });
+ DumpBssEntries(
+ os,
+ "Class",
+ oat_dex_file->GetTypeBssMapping(),
+ dex_file->NumTypeIds(),
+ sizeof(GcRoot<mirror::Class>),
+ [=](uint32_t index) { return dex_file->PrettyType(dex::TypeIndex(index)); });
+ DumpBssEntries(
+ os,
+ "String",
+ oat_dex_file->GetStringBssMapping(),
+ dex_file->NumStringIds(),
+ sizeof(GcRoot<mirror::Class>),
+ [=](uint32_t index) { return dex_file->StringDataByIdx(dex::StringIndex(index)); });
}
os << "Cumulative dex file data\n";
cumulative.Dump(os);
@@ -1872,6 +1897,40 @@
}
}
+ template <typename NameGetter>
+ void DumpBssEntries(std::ostream& os,
+ const char* slot_type,
+ const IndexBssMapping* mapping,
+ uint32_t number_of_indexes,
+ size_t slot_size,
+ NameGetter name) {
+ os << ".bss mapping for " << slot_type << ": ";
+ if (mapping == nullptr) {
+ os << "empty.\n";
+ return;
+ }
+ size_t index_bits = IndexBssMappingEntry::IndexBits(number_of_indexes);
+ size_t num_valid_indexes = 0u;
+ for (const IndexBssMappingEntry& entry : *mapping) {
+ num_valid_indexes += 1u + POPCOUNT(entry.GetMask(index_bits));
+ }
+ os << mapping->size() << " entries for " << num_valid_indexes << " valid indexes.\n";
+ os << std::hex;
+ for (const IndexBssMappingEntry& entry : *mapping) {
+ uint32_t index = entry.GetIndex(index_bits);
+ uint32_t mask = entry.GetMask(index_bits);
+ size_t bss_offset = entry.bss_offset - POPCOUNT(mask) * slot_size;
+ for (uint32_t n : LowToHighBits(mask)) {
+ size_t current_index = index - (32u - index_bits) + n;
+ os << " 0x" << bss_offset << ": " << slot_type << ": " << name(current_index) << "\n";
+ bss_offset += slot_size;
+ }
+ DCHECK_EQ(bss_offset, entry.bss_offset);
+ os << " 0x" << bss_offset << ": " << slot_type << ": " << name(index) << "\n";
+ }
+ os << std::dec;
+ }
+
const OatFile& oat_file_;
const std::vector<const OatFile::OatDexFile*> oat_dex_files_;
const OatDumperOptions& options_;
diff --git a/profman/profman.cc b/profman/profman.cc
index 31d28e4..a5a5546 100644
--- a/profman/profman.cc
+++ b/profman/profman.cc
@@ -786,7 +786,7 @@
method_str = line.substr(method_sep_index + kMethodSep.size());
}
- TypeReference class_ref;
+ TypeReference class_ref(/* dex_file */ nullptr, dex::TypeIndex());
if (!FindClass(dex_files, klass, &class_ref)) {
LOG(WARNING) << "Could not find class: " << klass;
return false;
@@ -860,7 +860,8 @@
if (!HasSingleInvoke(class_ref, method_index, &dex_pc)) {
return false;
}
- std::vector<TypeReference> classes(inline_cache_elems.size());
+ std::vector<TypeReference> classes(inline_cache_elems.size(),
+ TypeReference(/* dex_file */ nullptr, dex::TypeIndex()));
size_t class_it = 0;
for (const std::string& ic_class : inline_cache_elems) {
if (!FindClass(dex_files, ic_class, &(classes[class_it++]))) {
diff --git a/runtime/Android.bp b/runtime/Android.bp
index 69e4434..a136ccb 100644
--- a/runtime/Android.bp
+++ b/runtime/Android.bp
@@ -103,6 +103,7 @@
"gc/verification.cc",
"hprof/hprof.cc",
"image.cc",
+ "index_bss_mapping.cc",
"indirect_reference_table.cc",
"instrumentation.cc",
"intern_table.cc",
diff --git a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
index 5355267..4ac9967 100644
--- a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
@@ -32,29 +32,74 @@
namespace art {
-static inline void BssWriteBarrier(ArtMethod* outer_method) REQUIRES_SHARED(Locks::mutator_lock_) {
- // For AOT code, we need a write barrier for the class loader that holds the
- // GC roots in the .bss.
- const DexFile* dex_file = outer_method->GetDexFile();
- if (dex_file != nullptr &&
- dex_file->GetOatDexFile() != nullptr &&
- !dex_file->GetOatDexFile()->GetOatFile()->GetBssGcRoots().empty()) {
+static void StoreObjectInBss(ArtMethod* outer_method,
+ const OatFile* oat_file,
+ size_t bss_offset,
+ ObjPtr<mirror::Object> object) REQUIRES_SHARED(Locks::mutator_lock_) {
+ // Used for storing Class or String in .bss GC roots.
+ static_assert(sizeof(GcRoot<mirror::Class>) == sizeof(GcRoot<mirror::Object>), "Size check.");
+ static_assert(sizeof(GcRoot<mirror::String>) == sizeof(GcRoot<mirror::Object>), "Size check.");
+ DCHECK_NE(bss_offset, IndexBssMappingLookup::npos);
+ DCHECK_ALIGNED(bss_offset, sizeof(GcRoot<mirror::Object>));
+ GcRoot<mirror::Object>* slot = reinterpret_cast<GcRoot<mirror::Object>*>(
+ const_cast<uint8_t*>(oat_file->BssBegin() + bss_offset));
+ DCHECK_GE(slot, oat_file->GetBssGcRoots().data());
+ DCHECK_LT(slot, oat_file->GetBssGcRoots().data() + oat_file->GetBssGcRoots().size());
+ if (slot->IsNull()) {
+ // This may race with another thread trying to store the very same value but that's OK.
+ *slot = GcRoot<mirror::Object>(object);
+ // We need a write barrier for the class loader that holds the GC roots in the .bss.
ObjPtr<mirror::ClassLoader> class_loader = outer_method->GetClassLoader();
+ Runtime* runtime = Runtime::Current();
if (kIsDebugBuild) {
- ClassTable* class_table =
- Runtime::Current()->GetClassLinker()->ClassTableForClassLoader(class_loader);
- CHECK(class_table != nullptr &&
- !class_table->InsertOatFile(dex_file->GetOatDexFile()->GetOatFile()))
+ ClassTable* class_table = runtime->GetClassLinker()->ClassTableForClassLoader(class_loader);
+ CHECK(class_table != nullptr && !class_table->InsertOatFile(oat_file))
<< "Oat file with .bss GC roots was not registered in class table: "
- << dex_file->GetOatDexFile()->GetOatFile()->GetLocation();
+ << oat_file->GetLocation();
}
if (class_loader != nullptr) {
- // Note that we emit the barrier before the compiled code stores the String or Class
- // as a GC root. This is OK as there is no suspend point point in between.
- Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(class_loader);
+ runtime->GetHeap()->WriteBarrierEveryFieldOf(class_loader);
} else {
- Runtime::Current()->GetClassLinker()->WriteBarrierForBootOatFileBssRoots(
- dex_file->GetOatDexFile()->GetOatFile());
+ runtime->GetClassLinker()->WriteBarrierForBootOatFileBssRoots(oat_file);
+ }
+ } else {
+ // Each slot serves to store exactly one Class or String.
+ DCHECK_EQ(object, slot->Read());
+ }
+}
+
+static inline void StoreTypeInBss(ArtMethod* outer_method,
+ dex::TypeIndex type_idx,
+ ObjPtr<mirror::Class> resolved_type)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ const DexFile* dex_file = outer_method->GetDexFile();
+ DCHECK(dex_file != nullptr);
+ const OatDexFile* oat_dex_file = dex_file->GetOatDexFile();
+ if (oat_dex_file != nullptr) {
+ size_t bss_offset = IndexBssMappingLookup::GetBssOffset(oat_dex_file->GetTypeBssMapping(),
+ type_idx.index_,
+ dex_file->NumTypeIds(),
+ sizeof(GcRoot<mirror::Class>));
+ if (bss_offset != IndexBssMappingLookup::npos) {
+ StoreObjectInBss(outer_method, oat_dex_file->GetOatFile(), bss_offset, resolved_type);
+ }
+ }
+}
+
+static inline void StoreStringInBss(ArtMethod* outer_method,
+ dex::StringIndex string_idx,
+ ObjPtr<mirror::String> resolved_string)
+ REQUIRES_SHARED(Locks::mutator_lock_) __attribute__((optnone)) {
+ const DexFile* dex_file = outer_method->GetDexFile();
+ DCHECK(dex_file != nullptr);
+ const OatDexFile* oat_dex_file = dex_file->GetOatDexFile();
+ if (oat_dex_file != nullptr) {
+ size_t bss_offset = IndexBssMappingLookup::GetBssOffset(oat_dex_file->GetStringBssMapping(),
+ string_idx.index_,
+ dex_file->NumStringIds(),
+ sizeof(GcRoot<mirror::Class>));
+ if (bss_offset != IndexBssMappingLookup::npos) {
+ StoreObjectInBss(outer_method, oat_dex_file->GetOatFile(), bss_offset, resolved_string);
}
}
}
@@ -71,14 +116,14 @@
mirror::Class* result =
ResolveVerifyAndClinit(dex::TypeIndex(type_idx), caller, self, true, false);
if (LIKELY(result != nullptr)) {
- BssWriteBarrier(caller_and_outer.outer_method);
+ StoreTypeInBss(caller_and_outer.outer_method, dex::TypeIndex(type_idx), result);
}
return result;
}
extern "C" mirror::Class* artInitializeTypeFromCode(uint32_t type_idx, Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) {
- // Called when method->dex_cache_resolved_types_[] misses.
+ // Called when the .bss slot was empty or for main-path runtime call.
ScopedQuickEntrypointChecks sqec(self);
auto caller_and_outer = GetCalleeSaveMethodCallerAndOuterMethod(
self, CalleeSaveType::kSaveEverythingForClinit);
@@ -86,24 +131,21 @@
mirror::Class* result =
ResolveVerifyAndClinit(dex::TypeIndex(type_idx), caller, self, false, false);
if (LIKELY(result != nullptr)) {
- BssWriteBarrier(caller_and_outer.outer_method);
+ StoreTypeInBss(caller_and_outer.outer_method, dex::TypeIndex(type_idx), result);
}
return result;
}
extern "C" mirror::Class* artInitializeTypeAndVerifyAccessFromCode(uint32_t type_idx, Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) {
- // Called when caller isn't guaranteed to have access to a type and the dex cache may be
- // unpopulated.
+ // Called when caller isn't guaranteed to have access to a type.
ScopedQuickEntrypointChecks sqec(self);
auto caller_and_outer = GetCalleeSaveMethodCallerAndOuterMethod(self,
CalleeSaveType::kSaveEverything);
ArtMethod* caller = caller_and_outer.caller;
mirror::Class* result =
ResolveVerifyAndClinit(dex::TypeIndex(type_idx), caller, self, false, true);
- if (LIKELY(result != nullptr)) {
- BssWriteBarrier(caller_and_outer.outer_method);
- }
+ // Do not StoreTypeInBss(); access check entrypoint is never used together with .bss.
return result;
}
@@ -115,7 +157,7 @@
ArtMethod* caller = caller_and_outer.caller;
mirror::String* result = ResolveStringFromCode(caller, dex::StringIndex(string_idx));
if (LIKELY(result != nullptr)) {
- BssWriteBarrier(caller_and_outer.outer_method);
+ StoreStringInBss(caller_and_outer.outer_method, dex::StringIndex(string_idx), result);
}
return result;
}
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 22c9a1d..2496aa0 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -28,10 +28,10 @@
#include "gc/accounting/card_table-inl.h"
#include "imt_conflict_table.h"
#include "imtable-inl.h"
+#include "index_bss_mapping.h"
#include "instrumentation.h"
#include "interpreter/interpreter.h"
#include "linear_alloc.h"
-#include "method_bss_mapping.h"
#include "method_handles.h"
#include "method_reference.h"
#include "mirror/class-inl.h"
@@ -1214,27 +1214,20 @@
// Update .bss entry in oat file if any.
if (called != nullptr && called_method.dex_file->GetOatDexFile() != nullptr) {
- const MethodBssMapping* mapping =
- called_method.dex_file->GetOatDexFile()->GetMethodBssMapping();
- if (mapping != nullptr) {
- auto pp = std::partition_point(
- mapping->begin(),
- mapping->end(),
- [called_method](const MethodBssMappingEntry& entry) {
- return entry.method_index < called_method.index;
- });
- if (pp != mapping->end() && pp->CoversIndex(called_method.index)) {
- size_t bss_offset = pp->GetBssOffset(called_method.index,
- static_cast<size_t>(kRuntimePointerSize));
- DCHECK_ALIGNED(bss_offset, static_cast<size_t>(kRuntimePointerSize));
- const OatFile* oat_file = called_method.dex_file->GetOatDexFile()->GetOatFile();
- ArtMethod** method_entry = reinterpret_cast<ArtMethod**>(const_cast<uint8_t*>(
- oat_file->BssBegin() + bss_offset));
- DCHECK_GE(method_entry, oat_file->GetBssMethods().data());
- DCHECK_LT(method_entry,
- oat_file->GetBssMethods().data() + oat_file->GetBssMethods().size());
- *method_entry = called;
- }
+ size_t bss_offset = IndexBssMappingLookup::GetBssOffset(
+ called_method.dex_file->GetOatDexFile()->GetMethodBssMapping(),
+ called_method.index,
+ called_method.dex_file->NumMethodIds(),
+ static_cast<size_t>(kRuntimePointerSize));
+ if (bss_offset != IndexBssMappingLookup::npos) {
+ DCHECK_ALIGNED(bss_offset, static_cast<size_t>(kRuntimePointerSize));
+ const OatFile* oat_file = called_method.dex_file->GetOatDexFile()->GetOatFile();
+ ArtMethod** method_entry = reinterpret_cast<ArtMethod**>(const_cast<uint8_t*>(
+ oat_file->BssBegin() + bss_offset));
+ DCHECK_GE(method_entry, oat_file->GetBssMethods().data());
+ DCHECK_LT(method_entry,
+ oat_file->GetBssMethods().data() + oat_file->GetBssMethods().size());
+ *method_entry = called;
}
}
}
diff --git a/runtime/index_bss_mapping.cc b/runtime/index_bss_mapping.cc
new file mode 100644
index 0000000..8d9d8cf
--- /dev/null
+++ b/runtime/index_bss_mapping.cc
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <algorithm>
+
+#include "index_bss_mapping.h"
+
+#include "base/bit_utils.h"
+#include "base/length_prefixed_array.h"
+
+namespace art {
+
+size_t IndexBssMappingEntry::GetBssOffset(size_t index_bits,
+ uint32_t index,
+ size_t slot_size) const {
+ uint32_t diff = GetIndex(index_bits) - index;
+ if (diff == 0u) {
+ return bss_offset;
+ }
+ size_t mask_bits = 32u - index_bits;
+ if (diff > mask_bits) {
+ return IndexBssMappingLookup::npos;
+ }
+ // Shift out the index bits and bits for lower indexes.
+ // Note that `index_bits + (mask_bits - diff) == 32 - diff`.
+ uint32_t mask_from_index = index_and_mask >> (32u - diff);
+ if ((mask_from_index & 1u) != 0u) {
+ return bss_offset - POPCOUNT(mask_from_index) * slot_size;
+ } else {
+ return IndexBssMappingLookup::npos;
+ }
+}
+
+constexpr size_t IndexBssMappingLookup::npos;
+
+size_t IndexBssMappingLookup::GetBssOffset(const IndexBssMapping* mapping,
+ uint32_t index,
+ uint32_t number_of_indexes,
+ size_t slot_size) {
+ DCHECK_LT(index, number_of_indexes);
+ if (mapping == nullptr) {
+ return npos;
+ }
+ size_t index_bits = IndexBssMappingEntry::IndexBits(number_of_indexes);
+ uint32_t index_mask = IndexBssMappingEntry::IndexMask(index_bits);
+ auto it = std::partition_point(
+ mapping->begin(),
+ mapping->end(),
+ [=](const struct IndexBssMappingEntry& entry) {
+ return (entry.index_and_mask & index_mask) < index;
+ });
+ if (it == mapping->end()) {
+ return npos;
+ }
+ const IndexBssMappingEntry& entry = *it;
+ return entry.GetBssOffset(index_bits, index, slot_size);
+}
+
+} // namespace art
diff --git a/runtime/index_bss_mapping.h b/runtime/index_bss_mapping.h
new file mode 100644
index 0000000..d9f4e66
--- /dev/null
+++ b/runtime/index_bss_mapping.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_INDEX_BSS_MAPPING_H_
+#define ART_RUNTIME_INDEX_BSS_MAPPING_H_
+
+#include "base/bit_utils.h"
+#include "base/logging.h"
+
+namespace art {
+
+template<typename T> class LengthPrefixedArray;
+
+// IndexBssMappingEntry describes a mapping of one or more indexes to their offsets in the .bss.
+// A sorted array of IndexBssMappingEntry is used to describe the mapping of method indexes,
+// type indexes or string indexes to offsets of their assigned slots in the .bss.
+//
+// The highest index and a mask are stored in a single `uint32_t index_and_mask` and the split
+// between the index and the mask is provided externally. The "mask" bits specify whether some
+// of the previous indexes are mapped to immediately preceding slots. This is permissible only
+// if the slots are consecutive and in the same order as indexes.
+//
+// The .bss offset of the slot associated with the highest index is stored in plain form as
+// `bss_offset`. If the mask specifies any smaller indexes being mapped to immediately
+// preceding slots, their offsets are calculated using an externally supplied size of the slot.
+struct IndexBssMappingEntry {
+ static size_t IndexBits(uint32_t number_of_indexes) {
+ DCHECK_NE(number_of_indexes, 0u);
+ return MinimumBitsToStore(number_of_indexes - 1u);
+ }
+
+ static uint32_t IndexMask(size_t index_bits) {
+ DCHECK_LE(index_bits, 32u);
+ constexpr uint32_t kAllOnes = static_cast<uint32_t>(-1);
+ // Handle `index_bits == 32u` explicitly; shifting uint32_t left by 32 is undefined behavior.
+ return (index_bits == 32u) ? kAllOnes : ~(kAllOnes << index_bits);
+ }
+
+ uint32_t GetIndex(size_t index_bits) const {
+ return index_and_mask & IndexMask(index_bits);
+ }
+
+ uint32_t GetMask(size_t index_bits) const {
+ DCHECK_LT(index_bits, 32u); // GetMask() is valid only if there is at least 1 mask bit.
+ return index_and_mask >> index_bits;
+ }
+
+ size_t GetBssOffset(size_t index_bits, uint32_t index, size_t slot_size) const;
+
+ uint32_t index_and_mask;
+ uint32_t bss_offset;
+};
+
+using IndexBssMapping = LengthPrefixedArray<IndexBssMappingEntry>;
+
+class IndexBssMappingLookup {
+ public:
+ static constexpr size_t npos = static_cast<size_t>(-1);
+
+ static size_t GetBssOffset(const IndexBssMapping* mapping,
+ uint32_t index,
+ uint32_t number_of_indexes,
+ size_t slot_size);
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_INDEX_BSS_MAPPING_H_
diff --git a/runtime/method_bss_mapping.h b/runtime/method_bss_mapping.h
deleted file mode 100644
index 1476f93..0000000
--- a/runtime/method_bss_mapping.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_METHOD_BSS_MAPPING_H_
-#define ART_RUNTIME_METHOD_BSS_MAPPING_H_
-
-#include "base/bit_utils.h"
-#include "base/length_prefixed_array.h"
-
-namespace art {
-
-// MethodBssMappingEntry describes a mapping of up to 17 method indexes to their offsets
-// in the .bss. The highest index and its associated .bss offset are stored in plain form
-// as `method_index` and `bss_offset`, respectively, while the additional indexes can be
-// stored in compressed form if their associated .bss entries are consecutive and in the
-// method index order. Each of the 16 bits of the `index_mask` corresponds to one of the
-// previous 16 method indexes and indicates whether there is a .bss entry for that index.
-//
-struct MethodBssMappingEntry {
- bool CoversIndex(uint32_t method_idx) const {
- uint32_t diff = method_index - method_idx;
- return (diff == 0) || (diff <= 16 && ((index_mask >> (16u - diff)) & 1u) != 0);
- }
-
- uint32_t GetBssOffset(uint32_t method_idx, size_t entry_size) const {
- DCHECK(CoversIndex(method_idx));
- uint32_t diff = method_index - method_idx;
- if (diff == 0) {
- return bss_offset;
- } else {
- return bss_offset - POPCOUNT(index_mask >> (16u - diff)) * entry_size;
- }
- }
-
- uint16_t method_index;
- uint16_t index_mask;
- uint32_t bss_offset;
-};
-
-using MethodBssMapping = LengthPrefixedArray<MethodBssMappingEntry>;
-
-} // namespace art
-
-#endif // ART_RUNTIME_METHOD_BSS_MAPPING_H_
diff --git a/runtime/oat.h b/runtime/oat.h
index a3e8eef..9d21180 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -32,8 +32,8 @@
class PACKED(4) OatHeader {
public:
static constexpr uint8_t kOatMagic[] = { 'o', 'a', 't', '\n' };
- // Last oat version changed reason: Map boot image InternTable and ClassTable into app .bss.
- static constexpr uint8_t kOatVersion[] = { '1', '3', '4', '\0' };
+ // Last oat version changed reason: .bss index mapping change.
+ static constexpr uint8_t kOatVersion[] = { '1', '3', '5', '\0' };
static constexpr const char* kImageLocationKey = "image-location";
static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline";
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index 726fbd0..5f54d5d 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -404,6 +404,79 @@
return true;
}
+static bool ReadIndexBssMapping(OatFile* oat_file,
+ /*inout*/const uint8_t** oat,
+ size_t dex_file_index,
+ const std::string& dex_file_location,
+ const char* tag,
+ /*out*/const IndexBssMapping** mapping,
+ std::string* error_msg) {
+ uint32_t index_bss_mapping_offset;
+ if (UNLIKELY(!ReadOatDexFileData(*oat_file, oat, &index_bss_mapping_offset))) {
+ *error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zd for '%s' truncated "
+ "after %s bss mapping offset",
+ oat_file->GetLocation().c_str(),
+ dex_file_index,
+ dex_file_location.c_str(),
+ tag);
+ return false;
+ }
+ const bool readable_index_bss_mapping_size =
+ index_bss_mapping_offset != 0u &&
+ index_bss_mapping_offset <= oat_file->Size() &&
+ IsAligned<alignof(IndexBssMapping)>(index_bss_mapping_offset) &&
+ oat_file->Size() - index_bss_mapping_offset >= IndexBssMapping::ComputeSize(0);
+ const IndexBssMapping* index_bss_mapping = readable_index_bss_mapping_size
+ ? reinterpret_cast<const IndexBssMapping*>(oat_file->Begin() + index_bss_mapping_offset)
+ : nullptr;
+ if (index_bss_mapping_offset != 0u &&
+ (UNLIKELY(index_bss_mapping == nullptr) ||
+ UNLIKELY(index_bss_mapping->size() == 0u) ||
+ UNLIKELY(oat_file->Size() - index_bss_mapping_offset <
+ IndexBssMapping::ComputeSize(index_bss_mapping->size())))) {
+ *error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zu for '%s' with unaligned or "
+ " truncated %s bss mapping, offset %u of %zu, length %zu",
+ oat_file->GetLocation().c_str(),
+ dex_file_index,
+ dex_file_location.c_str(),
+ tag,
+ index_bss_mapping_offset,
+ oat_file->Size(),
+ index_bss_mapping != nullptr ? index_bss_mapping->size() : 0u);
+ return false;
+ }
+
+ *mapping = index_bss_mapping;
+ return true;
+}
+
+static void DCheckIndexToBssMapping(OatFile* oat_file,
+ uint32_t number_of_indexes,
+ size_t slot_size,
+ const IndexBssMapping* index_bss_mapping) {
+ if (kIsDebugBuild && index_bss_mapping != nullptr) {
+ size_t index_bits = IndexBssMappingEntry::IndexBits(number_of_indexes);
+ const IndexBssMappingEntry* prev_entry = nullptr;
+ for (const IndexBssMappingEntry& entry : *index_bss_mapping) {
+ CHECK_ALIGNED_PARAM(entry.bss_offset, slot_size);
+ // When loading a non-executable ElfOatFile, .bss symbols are not even
+ // looked up, so we cannot verify the offset against BssSize().
+ if (oat_file->IsExecutable()) {
+ CHECK_LT(entry.bss_offset, oat_file->BssSize());
+ }
+ uint32_t mask = entry.GetMask(index_bits);
+ CHECK_LE(POPCOUNT(mask) * slot_size, entry.bss_offset);
+ size_t index_mask_span = (mask != 0u) ? 32u - index_bits - CTZ(mask) : 0u;
+ CHECK_LE(index_mask_span, entry.GetIndex(index_bits));
+ if (prev_entry != nullptr) {
+ CHECK_LT(prev_entry->GetIndex(index_bits), entry.GetIndex(index_bits) - index_mask_span);
+ }
+ prev_entry = &entry;
+ }
+ CHECK_LT(prev_entry->GetIndex(index_bits), number_of_indexes);
+ }
+}
+
bool OatFileBase::Setup(const char* abs_dex_location, std::string* error_msg) {
if (!GetOatHeader().IsValid()) {
std::string cause = GetOatHeader().GetValidationErrorMessage();
@@ -658,54 +731,23 @@
? reinterpret_cast<const DexLayoutSections*>(Begin() + dex_layout_sections_offset)
: nullptr;
- uint32_t method_bss_mapping_offset;
- if (UNLIKELY(!ReadOatDexFileData(*this, &oat, &method_bss_mapping_offset))) {
- *error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zd for '%s' truncated "
- "after method bss mapping offset",
- GetLocation().c_str(),
- i,
- dex_file_location.c_str());
+ const IndexBssMapping* method_bss_mapping;
+ const IndexBssMapping* type_bss_mapping;
+ const IndexBssMapping* string_bss_mapping;
+ if (!ReadIndexBssMapping(
+ this, &oat, i, dex_file_location, "method", &method_bss_mapping, error_msg) ||
+ !ReadIndexBssMapping(
+ this, &oat, i, dex_file_location, "type", &type_bss_mapping, error_msg) ||
+ !ReadIndexBssMapping(
+ this, &oat, i, dex_file_location, "string", &string_bss_mapping, error_msg)) {
return false;
}
- const bool readable_method_bss_mapping_size =
- method_bss_mapping_offset != 0u &&
- method_bss_mapping_offset <= Size() &&
- IsAligned<alignof(MethodBssMapping)>(method_bss_mapping_offset) &&
- Size() - method_bss_mapping_offset >= MethodBssMapping::ComputeSize(0);
- const MethodBssMapping* method_bss_mapping = readable_method_bss_mapping_size
- ? reinterpret_cast<const MethodBssMapping*>(Begin() + method_bss_mapping_offset)
- : nullptr;
- if (method_bss_mapping_offset != 0u &&
- (UNLIKELY(method_bss_mapping == nullptr) ||
- UNLIKELY(method_bss_mapping->size() == 0u) ||
- UNLIKELY(Size() - method_bss_mapping_offset <
- MethodBssMapping::ComputeSize(method_bss_mapping->size())))) {
- *error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zu for '%s' with unaligned or "
- " truncated method bss mapping, offset %u of %zu, length %zu",
- GetLocation().c_str(),
- i,
- dex_file_location.c_str(),
- method_bss_mapping_offset,
- Size(),
- method_bss_mapping != nullptr ? method_bss_mapping->size() : 0u);
- return false;
- }
- if (kIsDebugBuild && method_bss_mapping != nullptr) {
- const MethodBssMappingEntry* prev_entry = nullptr;
- for (const MethodBssMappingEntry& entry : *method_bss_mapping) {
- CHECK_ALIGNED_PARAM(entry.bss_offset, static_cast<size_t>(pointer_size));
- CHECK_LT(entry.bss_offset, BssSize());
- CHECK_LE(POPCOUNT(entry.index_mask) * static_cast<size_t>(pointer_size), entry.bss_offset);
- size_t index_mask_span = (entry.index_mask != 0u) ? 16u - CTZ(entry.index_mask) : 0u;
- CHECK_LE(index_mask_span, entry.method_index);
- if (prev_entry != nullptr) {
- CHECK_LT(prev_entry->method_index, entry.method_index - index_mask_span);
- }
- prev_entry = &entry;
- }
- CHECK_LT(prev_entry->method_index,
- reinterpret_cast<const DexFile::Header*>(dex_file_pointer)->method_ids_size_);
- }
+ DCheckIndexToBssMapping(
+ this, header->method_ids_size_, static_cast<size_t>(pointer_size), method_bss_mapping);
+ DCheckIndexToBssMapping(
+ this, header->type_ids_size_, sizeof(GcRoot<mirror::Class>), type_bss_mapping);
+ DCheckIndexToBssMapping(
+ this, header->string_ids_size_, sizeof(GcRoot<mirror::String>), string_bss_mapping);
std::string canonical_location =
DexFileLoader::GetDexCanonicalLocation(dex_file_location.c_str());
@@ -718,6 +760,8 @@
dex_file_pointer,
lookup_table_data,
method_bss_mapping,
+ type_bss_mapping,
+ string_bss_mapping,
class_offsets_pointer,
dex_layout_sections);
oat_dex_files_storage_.push_back(oat_dex_file);
@@ -1538,7 +1582,9 @@
uint32_t dex_file_location_checksum,
const uint8_t* dex_file_pointer,
const uint8_t* lookup_table_data,
- const MethodBssMapping* method_bss_mapping_data,
+ const IndexBssMapping* method_bss_mapping_data,
+ const IndexBssMapping* type_bss_mapping_data,
+ const IndexBssMapping* string_bss_mapping_data,
const uint32_t* oat_class_offsets_pointer,
const DexLayoutSections* dex_layout_sections)
: oat_file_(oat_file),
@@ -1548,6 +1594,8 @@
dex_file_pointer_(dex_file_pointer),
lookup_table_data_(lookup_table_data),
method_bss_mapping_(method_bss_mapping_data),
+ type_bss_mapping_(type_bss_mapping_data),
+ string_bss_mapping_(string_bss_mapping_data),
oat_class_offsets_pointer_(oat_class_offsets_pointer),
dex_layout_sections_(dex_layout_sections) {
// Initialize TypeLookupTable.
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index 73d64e0..36a4d7b 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -27,7 +27,7 @@
#include "compiler_filter.h"
#include "dex_file.h"
#include "dex_file_layout.h"
-#include "method_bss_mapping.h"
+#include "index_bss_mapping.h"
#include "mirror/class.h"
#include "oat.h"
#include "os.h"
@@ -444,10 +444,18 @@
return lookup_table_data_;
}
- const MethodBssMapping* GetMethodBssMapping() const {
+ const IndexBssMapping* GetMethodBssMapping() const {
return method_bss_mapping_;
}
+ const IndexBssMapping* GetTypeBssMapping() const {
+ return type_bss_mapping_;
+ }
+
+ const IndexBssMapping* GetStringBssMapping() const {
+ return string_bss_mapping_;
+ }
+
const uint8_t* GetDexFilePointer() const {
return dex_file_pointer_;
}
@@ -482,7 +490,9 @@
uint32_t dex_file_checksum,
const uint8_t* dex_file_pointer,
const uint8_t* lookup_table_data,
- const MethodBssMapping* method_bss_mapping,
+ const IndexBssMapping* method_bss_mapping,
+ const IndexBssMapping* type_bss_mapping,
+ const IndexBssMapping* string_bss_mapping,
const uint32_t* oat_class_offsets_pointer,
const DexLayoutSections* dex_layout_sections);
@@ -494,7 +504,9 @@
const uint32_t dex_file_location_checksum_ = 0u;
const uint8_t* const dex_file_pointer_ = nullptr;
const uint8_t* const lookup_table_data_ = nullptr;
- const MethodBssMapping* const method_bss_mapping_ = nullptr;
+ const IndexBssMapping* const method_bss_mapping_ = nullptr;
+ const IndexBssMapping* const type_bss_mapping_ = nullptr;
+ const IndexBssMapping* const string_bss_mapping_ = nullptr;
const uint32_t* const oat_class_offsets_pointer_ = 0u;
mutable std::unique_ptr<TypeLookupTable> lookup_table_;
const DexLayoutSections* const dex_layout_sections_ = nullptr;
diff --git a/runtime/type_reference.h b/runtime/type_reference.h
index 70bdc32..f7daa2b 100644
--- a/runtime/type_reference.h
+++ b/runtime/type_reference.h
@@ -30,7 +30,7 @@
// A type is located by its DexFile and the string_ids_ table index into that DexFile.
class TypeReference : public DexFileReference {
public:
- explicit TypeReference(const DexFile* file = nullptr, dex::TypeIndex index = dex::TypeIndex())
+ TypeReference(const DexFile* file, dex::TypeIndex index)
: DexFileReference(file, index.index_) {}
dex::TypeIndex TypeIndex() const {
diff --git a/test/669-checker-break/expected.txt b/test/669-checker-break/expected.txt
new file mode 100644
index 0000000..b0aad4d
--- /dev/null
+++ b/test/669-checker-break/expected.txt
@@ -0,0 +1 @@
+passed
diff --git a/test/669-checker-break/info.txt b/test/669-checker-break/info.txt
new file mode 100644
index 0000000..3408b3b
--- /dev/null
+++ b/test/669-checker-break/info.txt
@@ -0,0 +1 @@
+Test optimizations of "break" loops.
diff --git a/test/669-checker-break/src/Main.java b/test/669-checker-break/src/Main.java
new file mode 100644
index 0000000..e59061b
--- /dev/null
+++ b/test/669-checker-break/src/Main.java
@@ -0,0 +1,328 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Tests for optimizations of break-loops, i.e. loops that break
+ * out of a while-true loop when the end condition is satisfied.
+ * In particular, the tests focus on break-loops that can be
+ * rewritten into regular countable loops (this may improve certain
+ * loops generated by the Kotlin compiler for inclusive ranges).
+ */
+public class Main {
+
+ /// CHECK-START: int Main.breakLoop(int[]) induction_var_analysis (before)
+ /// CHECK-DAG: <<Par:l\d+>> ParameterValue loop:none
+ /// CHECK-DAG: <<Zero:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<One:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Nil:l\d+>> NullCheck [<<Par>>] loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi [<<Zero>>,<<AddI:i\d+>>] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Bnd:i\d+>> BoundsCheck [<<Phi>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet [<<Nil>>,<<Bnd>>,<<One>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<NE:z\d+>> NotEqual [{{i\d+}},<<Phi>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: If [<<NE>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<AddI>> Add [<<Phi>>,<<One>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START: int Main.breakLoop(int[]) induction_var_analysis (after)
+ /// CHECK-DAG: <<Par:l\d+>> ParameterValue loop:none
+ /// CHECK-DAG: <<Zero:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<One:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Nil:l\d+>> NullCheck [<<Par>>] loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi [<<Zero>>,<<AddI:i\d+>>] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<LE:z\d+>> LessThanOrEqual [<<Phi>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: If [<<LE>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Bnd:i\d+>> BoundsCheck [<<Phi>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet [<<Nil>>,<<Bnd>>,<<One>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<AddI>> Add [<<Phi>>,<<One>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: int Main.breakLoop(int[]) loop_optimization (after)
+ /// CHECK-DAG: <<Par:l\d+>> ParameterValue loop:none
+ /// CHECK-DAG: <<One:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Four:i\d+>> IntConstant 4 loop:none
+ /// CHECK-DAG: <<Nil:l\d+>> NullCheck [<<Par>>] loop:none
+ /// CHECK-DAG: <<Rep:d\d+>> VecReplicateScalar [<<One>>] loop:none
+ /// CHECK-DAG: VecStore [<<Nil>>,<<Phi:i\d+>>,<<Rep>>] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi>>,<<Four>>] loop:<<Loop>> outer_loop:none
+ static int breakLoop(int[] a) {
+ int l = 0;
+ int u = a.length - 1;
+ int i = l;
+ if (l <= u) {
+ while (true) {
+ a[i] = 1;
+ if (i == u) break;
+ i++;
+ }
+ }
+ return i;
+ }
+
+ /// CHECK-START: int Main.breakLoopDown(int[]) induction_var_analysis (before)
+ /// CHECK-DAG: <<Par:l\d+>> ParameterValue loop:none
+ /// CHECK-DAG: <<Zero:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<MOne:i\d+>> IntConstant -1 loop:none
+ /// CHECK-DAG: <<Two:i\d+>> IntConstant 2 loop:none
+ /// CHECK-DAG: <<Nil:l\d+>> NullCheck [<<Par>>] loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi [{{i\d+}},<<AddI:i\d+>>] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Bnd:i\d+>> BoundsCheck [<<Phi>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet [<<Nil>>,<<Bnd>>,<<Two>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<NE:z\d+>> NotEqual [<<Phi>>,<<Zero>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: If [<<NE>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<AddI>> Add [<<Phi>>,<<MOne>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START: int Main.breakLoopDown(int[]) induction_var_analysis (after)
+ /// CHECK-DAG: <<Par:l\d+>> ParameterValue loop:none
+ /// CHECK-DAG: <<Zero:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<MOne:i\d+>> IntConstant -1 loop:none
+ /// CHECK-DAG: <<Two:i\d+>> IntConstant 2 loop:none
+ /// CHECK-DAG: <<Nil:l\d+>> NullCheck [<<Par>>] loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi [{{i\d+}},<<AddI:i\d+>>] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<GE:z\d+>> GreaterThanOrEqual [<<Phi>>,<<Zero>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: If [<<GE>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Bnd:i\d+>> BoundsCheck [<<Phi>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet [<<Nil>>,<<Bnd>>,<<Two>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<AddI>> Add [<<Phi>>,<<MOne>>] loop:<<Loop>> outer_loop:none
+ static int breakLoopDown(int[] a) {
+ int l = 0;
+ int u = a.length - 1;
+ int i = u;
+ if (u >= l) {
+ while (true) {
+ a[i] = 2;
+ if (i == l) break;
+ i--;
+ }
+ }
+ return i;
+ }
+
+ /// CHECK-START: int Main.breakLoopSafeConst(int[]) induction_var_analysis (before)
+ /// CHECK-DAG: <<Par:l\d+>> ParameterValue loop:none
+ /// CHECK-DAG: <<One:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Three:i\d+>> IntConstant 3 loop:none
+ /// CHECK-DAG: <<L1:i\d+>> IntConstant 2147483631 loop:none
+ /// CHECK-DAG: <<L2:i\d+>> IntConstant 2147483646 loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi [<<L1>>,<<AddI:i\d+>>] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Sub:i\d+>> Sub [<<Phi>>,<<L1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Nil:l\d+>> NullCheck [<<Par>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Bnd:i\d+>> BoundsCheck [<<Sub>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet [<<Nil>>,<<Bnd>>,<<Three>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<NE:z\d+>> NotEqual [<<Phi>>,<<L2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: If [<<NE>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<AddI>> Add [<<Phi>>,<<One>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START: int Main.breakLoopSafeConst(int[]) induction_var_analysis (after)
+ /// CHECK-DAG: <<Par:l\d+>> ParameterValue loop:none
+ /// CHECK-DAG: <<One:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Three:i\d+>> IntConstant 3 loop:none
+ /// CHECK-DAG: <<L1:i\d+>> IntConstant 2147483631 loop:none
+ /// CHECK-DAG: <<L2:i\d+>> IntConstant 2147483646 loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi [<<L1>>,<<AddI:i\d+>>] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<LE:z\d+>> LessThanOrEqual [<<Phi>>,<<L2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Sub:i\d+>> Sub [<<Phi>>,<<L1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Nil:l\d+>> NullCheck [<<Par>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Bnd:i\d+>> BoundsCheck [<<Sub>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet [<<Nil>>,<<Bnd>>,<<Three>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<AddI>> Add [<<Phi>>,<<One>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START-ARM64: int Main.breakLoopSafeConst(int[]) loop_optimization (after)
+ /// CHECK-DAG: <<Par:l\d+>> ParameterValue loop:none
+ /// CHECK-DAG: <<One:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Three:i\d+>> IntConstant 3 loop:none
+ /// CHECK-DAG: <<Four:i\d+>> IntConstant 4 loop:none
+ /// CHECK-DAG: <<Rep:d\d+>> VecReplicateScalar [<<Three>>] loop:none
+ /// CHECK-DAG: VecStore [<<Par>>,<<Phi:i\d+>>,<<Rep>>] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi>>,<<Four>>] loop:<<Loop>> outer_loop:none
+ static int breakLoopSafeConst(int[] a) {
+ int l = Integer.MAX_VALUE - 16;
+ int u = Integer.MAX_VALUE - 1;
+ int i = l;
+ if (l <= u) { // will be removed by simplifier
+ while (true) {
+ a[i - l] = 3;
+ if (i == u) break;
+ i++;
+ }
+ }
+ return i;
+ }
+
+ /// CHECK-START: int Main.breakLoopUnsafeConst(int[]) induction_var_analysis (before)
+ /// CHECK-DAG: <<Par:l\d+>> ParameterValue loop:none
+ /// CHECK-DAG: <<One:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Four:i\d+>> IntConstant 4 loop:none
+ /// CHECK-DAG: <<L1:i\d+>> IntConstant 2147483632 loop:none
+ /// CHECK-DAG: <<L2:i\d+>> IntConstant 2147483647 loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi [<<L1>>,<<AddI:i\d+>>] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Sub:i\d+>> Sub [<<Phi>>,<<L1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Nil:l\d+>> NullCheck [<<Par>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Bnd:i\d+>> BoundsCheck [<<Sub>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet [<<Nil>>,<<Bnd>>,<<Four>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<NE:z\d+>> NotEqual [<<Phi>>,<<L2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: If [<<NE>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<AddI>> Add [<<Phi>>,<<One>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START: int Main.breakLoopUnsafeConst(int[]) induction_var_analysis (after)
+ /// CHECK-DAG: NotEqual
+ /// CHECK-NOT: LessThanOrEqual
+ static int breakLoopUnsafeConst(int[] a) {
+ int l = Integer.MAX_VALUE - 15;
+ int u = Integer.MAX_VALUE;
+ int i = l;
+ if (l <= u) { // will be removed by simplifier
+ while (true) {
+ a[i - l] = 4;
+ if (i == u) break; // rewriting exit not safe!
+ i++;
+ }
+ }
+ return i;
+ }
+
+ /// CHECK-START: int Main.breakLoopNastyPhi(int[]) induction_var_analysis (before)
+ /// CHECK-DAG: <<Par:l\d+>> ParameterValue loop:none
+ /// CHECK-DAG: <<Zero:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<One:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Five:i\d+>> IntConstant 5 loop:none
+ /// CHECK-DAG: <<M123:i\d+>> IntConstant -123 loop:none
+ /// CHECK-DAG: <<Nil:l\d+>> NullCheck [<<Par>>] loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi [<<Zero>>,<<AddI:i\d+>>] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Wrap:i\d+>> Phi [<<M123>>,<<Phi>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Bnd:i\d+>> BoundsCheck [<<Phi>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet [<<Nil>>,<<Bnd>>,<<Five>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<NE:z\d+>> NotEqual [{{i\d+}},<<Phi>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: If [<<NE>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<AddI>> Add [<<Phi>>,<<One>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Comb:i\d+>> Phi [<<M123>>,<<Wrap>>] loop:none
+ /// CHECK-DAG: Return [<<Comb>>] loop:none
+ //
+ /// CHECK-START: int Main.breakLoopNastyPhi(int[]) induction_var_analysis (after)
+ /// CHECK-DAG: NotEqual
+ /// CHECK-NOT: LessThanOrEqual
+ static int breakLoopNastyPhi(int[] a) {
+ int l = 0;
+ int u = a.length - 1;
+ int x = -123;
+ if (l <= u) {
+ int i = l;
+ while (true) {
+ a[i] = 5;
+ if (i == u) break;
+ x = i;
+ i++;
+ }
+ }
+ return x; // keep another phi live
+ }
+
+ /// CHECK-START: int Main.breakLoopReduction(int[]) induction_var_analysis (before)
+ /// CHECK-DAG: <<Par:l\d+>> ParameterValue loop:none
+ /// CHECK-DAG: <<Zero:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<One:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Nil:l\d+>> NullCheck [<<Par>>] loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi [<<Zero>>,<<AddI:i\d+>>] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Red:i\d+>> Phi [<<Zero>>,<<RedI:i\d+>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Bnd:i\d+>> BoundsCheck [<<Phi>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get:i\d+>> ArrayGet [<<Nil>>,<<Bnd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<RedI>> Add [<<Red>>,<<Get>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<NE:z\d+>> NotEqual [{{i\d+}},<<Phi>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: If [<<NE>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<AddI>> Add [<<Phi>>,<<One>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Comb:i\d+>> Phi [<<Zero>>,<<RedI>>] loop:none
+ /// CHECK-DAG: Return [<<Comb>>] loop:none
+ //
+ /// CHECK-START: int Main.breakLoopReduction(int[]) induction_var_analysis (after)
+ /// CHECK-DAG: <<Par:l\d+>> ParameterValue loop:none
+ /// CHECK-DAG: <<Zero:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<One:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Nil:l\d+>> NullCheck [<<Par>>] loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi [<<Zero>>,<<AddI:i\d+>>] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Red:i\d+>> Phi [<<Zero>>,<<RedI:i\d+>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<LE:z\d+>> LessThanOrEqual [<<Phi>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: If [<<LE>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Bnd:i\d+>> BoundsCheck [<<Phi>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get:i\d+>> ArrayGet [<<Nil>>,<<Bnd>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<RedI>> Add [<<Red>>,<<Get>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<AddI>> Add [<<Phi>>,<<One>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Comb:i\d+>> Phi [<<Zero>>,<<Red>>] loop:none
+ /// CHECK-DAG: Return [<<Comb>>] loop:none
+ //
+ /// CHECK-START-ARM64: int Main.breakLoopReduction(int[]) loop_optimization (after)
+ /// CHECK-DAG: <<Par:l\d+>> ParameterValue loop:none
+ /// CHECK-DAG: <<Zero:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Exp:d\d+>> VecSetScalars [<<Zero>>] loop:none
+ /// CHECK-DAG: <<VPhi:d\d+>> Phi [<<Exp>>,<<VAdd:d\d+>>] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<VLoad:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<VAdd>> VecAdd [<<VPhi>>,<<VLoad>>] loop:<<Loop>> outer_loop:none
+ static int breakLoopReduction(int[] a) {
+ int l = 0;
+ int u = a.length - 1;
+ int x = 0;
+ if (l <= u) {
+ int i = l;
+ while (true) {
+ x += a[i];
+ if (i == u) break;
+ i++;
+ }
+ }
+ return x;
+ }
+
+ //
+ // Test driver.
+ //
+
+ public static void main(String[] args) {
+ int[] a = new int[100];
+
+ expectEquals(99, breakLoop(a));
+ for (int i = 0; i < a.length; i++) {
+ expectEquals(1, a[i]);
+ }
+
+ expectEquals(0, breakLoopDown(a));
+ for (int i = 0; i < a.length; i++) {
+ expectEquals(2, a[i]);
+ }
+
+ expectEquals(Integer.MAX_VALUE - 1, breakLoopSafeConst(a));
+ for (int i = 0; i < a.length; i++) {
+ int e = i < 16 ? 3 : 2;
+ expectEquals(e, a[i]);
+ }
+
+ expectEquals(Integer.MAX_VALUE, breakLoopUnsafeConst(a));
+ for (int i = 0; i < a.length; i++) {
+ int e = i < 16 ? 4 : 2;
+ expectEquals(e, a[i]);
+ }
+
+ expectEquals(98, breakLoopNastyPhi(a));
+ for (int i = 0; i < a.length; i++) {
+ expectEquals(5, a[i]);
+ }
+
+ expectEquals(500, breakLoopReduction(a));
+
+ System.out.println("passed");
+ }
+
+ private static void expectEquals(int expected, int result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+}