summaryrefslogtreecommitdiff
path: root/compiler
diff options
context:
space:
mode:
Diffstat (limited to 'compiler')
-rw-r--r--compiler/Android.mk63
-rw-r--r--compiler/dex/quick/mips/call_mips.cc26
-rw-r--r--compiler/dex/quick/mips/codegen_mips.h1
-rw-r--r--compiler/dex/quick/mips/int_mips.cc6
-rw-r--r--compiler/dex/quick/mips/target_mips.cc2
-rw-r--r--compiler/dex/quick/mips/utility_mips.cc18
-rw-r--r--compiler/dex/quick/mir_to_lir.cc3
-rwxr-xr-xcompiler/dex/quick/x86/int_x86.cc15
-rw-r--r--compiler/image_writer.cc15
-rw-r--r--compiler/image_writer.h2
-rw-r--r--compiler/jit/jit_compiler.cc90
-rw-r--r--compiler/optimizing/builder.cc300
-rw-r--r--compiler/optimizing/builder.h18
-rw-r--r--compiler/optimizing/code_generator.h6
-rw-r--r--compiler/optimizing/code_generator_arm.cc12
-rw-r--r--compiler/optimizing/code_generator_arm64.cc34
-rw-r--r--compiler/optimizing/code_generator_mips64.cc12
-rw-r--r--compiler/optimizing/code_generator_x86.cc12
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc12
-rw-r--r--compiler/optimizing/common_arm64.h9
-rw-r--r--compiler/optimizing/graph_visualizer.cc4
-rw-r--r--compiler/optimizing/instruction_simplifier.cc43
-rw-r--r--compiler/optimizing/locations.h4
-rw-r--r--compiler/optimizing/nodes.h62
-rw-r--r--compiler/optimizing/optimizing_compiler.cc2
-rw-r--r--compiler/optimizing/reference_type_propagation.cc7
-rw-r--r--compiler/optimizing/register_allocator.cc7
-rw-r--r--compiler/utils/arm/assembler_thumb2.cc1
28 files changed, 579 insertions, 207 deletions
diff --git a/compiler/Android.mk b/compiler/Android.mk
index dd214067a6..39470785ab 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -181,6 +181,7 @@ LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES := \
# $(1): target or host
# $(2): ndebug or debug
+# $(3): static or shared (empty means shared, applies only for host)
define build-libart-compiler
ifneq ($(1),target)
ifneq ($(1),host)
@@ -195,6 +196,7 @@ define build-libart-compiler
art_target_or_host := $(1)
art_ndebug_or_debug := $(2)
+ art_static_or_shared := $(3)
include $(CLEAR_VARS)
ifeq ($$(art_target_or_host),host)
@@ -203,17 +205,29 @@ define build-libart-compiler
LOCAL_CPP_EXTENSION := $(ART_CPP_EXTENSION)
ifeq ($$(art_ndebug_or_debug),ndebug)
LOCAL_MODULE := libart-compiler
- LOCAL_SHARED_LIBRARIES += libart
+ ifeq ($$(art_static_or_shared), static)
+ LOCAL_STATIC_LIBRARIES += libart
+ else
+ LOCAL_SHARED_LIBRARIES += libart
+ endif
ifeq ($$(art_target_or_host),target)
LOCAL_FDO_SUPPORT := true
endif
else # debug
LOCAL_MODULE := libartd-compiler
- LOCAL_SHARED_LIBRARIES += libartd
+ ifeq ($$(art_static_or_shared), static)
+ LOCAL_STATIC_LIBRARIES += libartd
+ else
+ LOCAL_SHARED_LIBRARIES += libartd
+ endif
endif
LOCAL_MODULE_TAGS := optional
- LOCAL_MODULE_CLASS := SHARED_LIBRARIES
+ ifeq ($$(art_static_or_shared), static)
+ LOCAL_MODULE_CLASS := STATIC_LIBRARIES
+ else
+ LOCAL_MODULE_CLASS := SHARED_LIBRARIES
+ endif
LOCAL_SRC_FILES := $$(LIBART_COMPILER_SRC_FILES)
@@ -237,6 +251,9 @@ $$(ENUM_OPERATOR_OUT_GEN): $$(GENERATED_SRC_DIR)/%_operator_out.cc : $(LOCAL_PAT
LOCAL_CFLAGS += $(ART_HOST_CFLAGS)
LOCAL_ASFLAGS += $(ART_HOST_ASFLAGS)
LOCAL_LDLIBS := $(ART_HOST_LDLIBS)
+ ifeq ($$(art_static_or_shared),static)
+ LOCAL_LDFLAGS += -static
+ endif
ifeq ($$(art_ndebug_or_debug),debug)
LOCAL_CFLAGS += $(ART_HOST_DEBUG_CFLAGS)
else
@@ -254,9 +271,17 @@ $$(ENUM_OPERATOR_OUT_GEN): $$(GENERATED_SRC_DIR)/%_operator_out.cc : $(LOCAL_PAT
LOCAL_ADDITIONAL_DEPENDENCIES += $(LOCAL_PATH)/Android.mk
# Vixl assembly support for ARM64 targets.
ifeq ($$(art_ndebug_or_debug),debug)
- LOCAL_SHARED_LIBRARIES += libvixld
+ ifeq ($$(art_static_or_shared), static)
+ LOCAL_WHOLESTATIC_LIBRARIES += libvixld
+ else
+ LOCAL_SHARED_LIBRARIES += libvixld
+ endif
else
- LOCAL_SHARED_LIBRARIES += libvixl
+ ifeq ($$(art_static_or_shared), static)
+ LOCAL_WHOLE_STATIC_LIBRARIES += libvixl
+ else
+ LOCAL_SHARED_LIBRARIES += libvixl
+ endif
endif
LOCAL_NATIVE_COVERAGE := $(ART_COVERAGE)
@@ -267,7 +292,11 @@ $$(ENUM_OPERATOR_OUT_GEN): $$(GENERATED_SRC_DIR)/%_operator_out.cc : $(LOCAL_PAT
include $(BUILD_SHARED_LIBRARY)
else # host
LOCAL_MULTILIB := both
- include $(BUILD_HOST_SHARED_LIBRARY)
+ ifeq ($$(art_static_or_shared), static)
+ include $(BUILD_HOST_STATIC_LIBRARY)
+ else
+ include $(BUILD_HOST_SHARED_LIBRARY)
+ endif
endif
ifeq ($$(art_target_or_host),target)
@@ -278,20 +307,38 @@ $$(ENUM_OPERATOR_OUT_GEN): $$(GENERATED_SRC_DIR)/%_operator_out.cc : $(LOCAL_PAT
endif
else # host
ifeq ($$(art_ndebug_or_debug),debug)
- $(HOST_OUT_EXECUTABLES)/dex2oatd: $$(LOCAL_INSTALLED_MODULE)
+ ifeq ($$(art_static_or_shared),static)
+ $(HOST_OUT_EXECUTABLES)/dex2oatds: $$(LOCAL_INSTALLED_MODULE)
+ else
+ $(HOST_OUT_EXECUTABLES)/dex2oatd: $$(LOCAL_INSTALLED_MODULE)
+ endif
else
- $(HOST_OUT_EXECUTABLES)/dex2oat: $$(LOCAL_INSTALLED_MODULE)
+ ifeq ($$(art_static_or_shared),static)
+ $(HOST_OUT_EXECUTABLES)/dex2oats: $$(LOCAL_INSTALLED_MODULE)
+ else
+ $(HOST_OUT_EXECUTABLES)/dex2oat: $$(LOCAL_INSTALLED_MODULE)
+ endif
endif
endif
+ # Clear locally defined variables.
+ art_target_or_host :=
+ art_ndebug_or_debug :=
+ art_static_or_shared :=
endef
# We always build dex2oat and dependencies, even if the host build is otherwise disabled, since they are used to cross compile for the target.
ifeq ($(ART_BUILD_HOST_NDEBUG),true)
$(eval $(call build-libart-compiler,host,ndebug))
+ ifeq ($(ART_BUILD_HOST_STATIC),true)
+ $(eval $(call build-libart-compiler,host,ndebug,static))
+ endif
endif
ifeq ($(ART_BUILD_HOST_DEBUG),true)
$(eval $(call build-libart-compiler,host,debug))
+ ifeq ($(ART_BUILD_HOST_STATIC),true)
+ $(eval $(call build-libart-compiler,host,debug,static))
+ endif
endif
ifeq ($(ART_BUILD_TARGET_NDEBUG),true)
$(eval $(call build-libart-compiler,target,ndebug))
diff --git a/compiler/dex/quick/mips/call_mips.cc b/compiler/dex/quick/mips/call_mips.cc
index da12d8e3bf..853980d10a 100644
--- a/compiler/dex/quick/mips/call_mips.cc
+++ b/compiler/dex/quick/mips/call_mips.cc
@@ -24,6 +24,7 @@
#include "dex/quick/dex_file_to_method_inliner_map.h"
#include "dex/quick/mir_to_lir-inl.h"
#include "driver/compiler_driver.h"
+#include "driver/compiler_options.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "gc/accounting/card_table.h"
#include "mips_lir.h"
@@ -285,12 +286,25 @@ void MipsMir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method)
RegStorage check_reg = AllocPtrSizeTemp();
RegStorage new_sp = AllocPtrSizeTemp();
const RegStorage rs_sp = TargetPtrReg(kSp);
+ const size_t kStackOverflowReservedUsableBytes = GetStackOverflowReservedBytes(target);
+ const bool large_frame = static_cast<size_t>(frame_size_) > kStackOverflowReservedUsableBytes;
+ bool generate_explicit_stack_overflow_check = large_frame ||
+ !cu_->compiler_driver->GetCompilerOptions().GetImplicitStackOverflowChecks();
+
if (!skip_overflow_check) {
- // Load stack limit.
- if (cu_->target64) {
- LoadWordDisp(TargetPtrReg(kSelf), Thread::StackEndOffset<8>().Int32Value(), check_reg);
+ if (generate_explicit_stack_overflow_check) {
+ // Load stack limit.
+ if (cu_->target64) {
+ LoadWordDisp(TargetPtrReg(kSelf), Thread::StackEndOffset<8>().Int32Value(), check_reg);
+ } else {
+ Load32Disp(TargetPtrReg(kSelf), Thread::StackEndOffset<4>().Int32Value(), check_reg);
+ }
} else {
- Load32Disp(TargetPtrReg(kSelf), Thread::StackEndOffset<4>().Int32Value(), check_reg);
+ // Implicit stack overflow check.
+ // Generate a load from [sp, #-overflowsize]. If this is in the stack
+ // redzone we will get a segmentation fault.
+ Load32Disp(rs_sp, -kStackOverflowReservedUsableBytes, rs_rZERO);
+ MarkPossibleStackOverflowException();
}
}
// Spill core callee saves.
@@ -298,7 +312,7 @@ void MipsMir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method)
// NOTE: promotion of FP regs currently unsupported, thus no FP spill.
DCHECK_EQ(num_fp_spills_, 0);
const int frame_sub = frame_size_ - spill_count * ptr_size;
- if (!skip_overflow_check) {
+ if (!skip_overflow_check && generate_explicit_stack_overflow_check) {
class StackOverflowSlowPath : public LIRSlowPath {
public:
StackOverflowSlowPath(Mir2Lir* m2l, LIR* branch, size_t sp_displace)
@@ -329,6 +343,8 @@ void MipsMir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method)
OpRegCopy(rs_sp, new_sp); // Establish stack.
cfi_.AdjustCFAOffset(frame_sub);
} else {
+ // Here if skip_overflow_check or doing implicit stack overflow check.
+ // Just make room on the stack for the frame now.
OpRegImm(kOpSub, rs_sp, frame_sub);
cfi_.AdjustCFAOffset(frame_sub);
}
diff --git a/compiler/dex/quick/mips/codegen_mips.h b/compiler/dex/quick/mips/codegen_mips.h
index 713264e0d9..43fbcbdd2b 100644
--- a/compiler/dex/quick/mips/codegen_mips.h
+++ b/compiler/dex/quick/mips/codegen_mips.h
@@ -79,6 +79,7 @@ class MipsMir2Lir FINAL : public Mir2Lir {
OVERRIDE;
LIR* CheckSuspendUsingLoad() OVERRIDE;
RegStorage LoadHelper(QuickEntrypointEnum trampoline) OVERRIDE;
+ void ForceImplicitNullCheck(RegStorage reg, int opt_flags, bool is_wide);
LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size,
VolatileKind is_volatile) OVERRIDE;
LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale,
diff --git a/compiler/dex/quick/mips/int_mips.cc b/compiler/dex/quick/mips/int_mips.cc
index f5ad7c7c33..1099303f7d 100644
--- a/compiler/dex/quick/mips/int_mips.cc
+++ b/compiler/dex/quick/mips/int_mips.cc
@@ -691,6 +691,9 @@ void MipsMir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
reg_len = AllocTemp();
// Get len.
Load32Disp(rl_array.reg, len_offset, reg_len);
+ MarkPossibleNullPointerException(opt_flags);
+ } else {
+ ForceImplicitNullCheck(rl_array.reg, opt_flags, false);
}
// reg_ptr -> array data.
OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg, data_offset);
@@ -781,6 +784,9 @@ void MipsMir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
// NOTE: max live temps(4) here.
// Get len.
Load32Disp(rl_array.reg, len_offset, reg_len);
+ MarkPossibleNullPointerException(opt_flags);
+ } else {
+ ForceImplicitNullCheck(rl_array.reg, opt_flags, false);
}
// reg_ptr -> array data.
OpRegImm(kOpAdd, reg_ptr, data_offset);
diff --git a/compiler/dex/quick/mips/target_mips.cc b/compiler/dex/quick/mips/target_mips.cc
index 4c0bd8378b..b098bc2b5d 100644
--- a/compiler/dex/quick/mips/target_mips.cc
+++ b/compiler/dex/quick/mips/target_mips.cc
@@ -791,6 +791,7 @@ LIR* MipsMir2Lir::GenAtomic64Load(RegStorage r_base, int displacement, RegStorag
RegStorage reg_ptr = TargetReg(kArg0);
OpRegRegImm(kOpAdd, reg_ptr, r_base, displacement);
RegStorage r_tgt = LoadHelper(kQuickA64Load);
+ ForceImplicitNullCheck(reg_ptr, 0, true); // is_wide = true
LIR *ret = OpReg(kOpBlx, r_tgt);
RegStorage reg_ret;
if (cu_->target64) {
@@ -813,6 +814,7 @@ LIR* MipsMir2Lir::GenAtomic64Store(RegStorage r_base, int displacement, RegStora
LockCallTemps(); // Using fixed registers.
RegStorage temp_ptr = AllocTemp();
OpRegRegImm(kOpAdd, temp_ptr, r_base, displacement);
+ ForceImplicitNullCheck(temp_ptr, 0, true); // is_wide = true
RegStorage temp_value = AllocTempWide();
OpRegCopyWide(temp_value, r_src);
if (cu_->target64) {
diff --git a/compiler/dex/quick/mips/utility_mips.cc b/compiler/dex/quick/mips/utility_mips.cc
index 95c61cd4ed..37e5804f18 100644
--- a/compiler/dex/quick/mips/utility_mips.cc
+++ b/compiler/dex/quick/mips/utility_mips.cc
@@ -21,7 +21,9 @@
#include "base/logging.h"
#include "dex/quick/mir_to_lir-inl.h"
#include "dex/reg_storage_eq.h"
+#include "dex/mir_graph.h"
#include "driver/compiler_driver.h"
+#include "driver/compiler_options.h"
#include "mips_lir.h"
namespace art {
@@ -830,6 +832,22 @@ LIR* MipsMir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStora
return res;
}
+void MipsMir2Lir::ForceImplicitNullCheck(RegStorage reg, int opt_flags, bool is_wide) {
+ if (cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
+ if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) {
+ return;
+ }
+ // Force an implicit null check by performing a memory operation (load) from the given
+ // register with offset 0. This will cause a signal if the register contains 0 (null).
+ LIR* load = Load32Disp(reg, LOWORD_OFFSET, rs_rZERO);
+ MarkSafepointPC(load);
+ if (is_wide) {
+ load = Load32Disp(reg, HIWORD_OFFSET, rs_rZERO);
+ MarkSafepointPC(load);
+ }
+ }
+}
+
LIR* MipsMir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size,
VolatileKind is_volatile) {
if (UNLIKELY(is_volatile == kVolatile && (size == k64 || size == kDouble))
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index 7ca03cf0ee..c50246d182 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -193,7 +193,8 @@ void Mir2Lir::LoadArgDirect(size_t in_position, RegLocation rl_dest) {
}
if (!reg_arg.Valid()) {
- LoadBaseDisp(TargetPtrReg(kSp), offset, rl_dest.reg, rl_dest.wide ? k64 : k32, kNotVolatile);
+ OpSize op_size = rl_dest.wide ? k64 : (rl_dest.ref ? kReference : k32);
+ LoadBaseDisp(TargetPtrReg(kSp), offset, rl_dest.reg, op_size, kNotVolatile);
} else {
if (rl_dest.wide) {
OpRegCopyWide(rl_dest.reg, reg_arg);
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
index d993d934a5..d1fe167bb4 100755
--- a/compiler/dex/quick/x86/int_x86.cc
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -1336,9 +1336,24 @@ bool X86Mir2Lir::GenInlinedReverseBits(CallInfo* info, OpSize size) {
}
OpRegReg(kOpRev, rl_result.reg.GetLow(), rl_i.reg.GetHigh());
OpRegReg(kOpRev, rl_result.reg.GetHigh(), r_i_low);
+ // Free up at least one input register if it was a temp. Otherwise we may be in the bad
+ // situation of not having a temp available for SwapBits. Make sure it's not overlapping
+ // with the output, though.
if (rl_i.reg.GetLowReg() == rl_result.reg.GetLowReg()) {
+ // There's definitely a free temp after this.
FreeTemp(r_i_low);
+ } else {
+ // We opportunistically release both here. That saves duplication of the register state
+ // lookup (to see if it's actually a temp).
+ if (rl_i.reg.GetLowReg() != rl_result.reg.GetHighReg()) {
+ FreeTemp(rl_i.reg.GetLow());
+ }
+ if (rl_i.reg.GetHighReg() != rl_result.reg.GetLowReg() &&
+ rl_i.reg.GetHighReg() != rl_result.reg.GetHighReg()) {
+ FreeTemp(rl_i.reg.GetHigh());
+ }
}
+
SwapBits(rl_result.reg.GetLow(), 1, 0x55555555);
SwapBits(rl_result.reg.GetLow(), 2, 0x33333333);
SwapBits(rl_result.reg.GetLow(), 4, 0x0f0f0f0f);
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 73e121f1cd..fdfeb485fd 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -989,6 +989,8 @@ void ImageWriter::CreateHeader(size_t oat_loaded_size, size_t oat_data_offset) {
CHECK_EQ(image_objects_offset_begin_ + bin_slot_previous_sizes_[kBinArtMethodClean],
methods_section->Offset());
cur_pos = methods_section->End();
+ // Round up to the alignment the string table expects. See HashSet::WriteToMemory.
+ cur_pos = RoundUp(cur_pos, sizeof(uint64_t));
// Calculate the size of the interned strings.
auto* interned_strings_section = &sections[ImageHeader::kSectionInternedStrings];
*interned_strings_section = ImageSection(cur_pos, intern_table_bytes_);
@@ -1417,9 +1419,6 @@ void ImageWriter::CopyAndFixupMethod(ArtMethod* orig, ArtMethod* copy) {
if (UNLIKELY(orig->IsAbstract())) {
copy->SetEntryPointFromQuickCompiledCodePtrSize(
GetOatAddress(quick_to_interpreter_bridge_offset_), target_ptr_size_);
- copy->SetEntryPointFromInterpreterPtrSize(
- reinterpret_cast<EntryPointFromInterpreter*>(const_cast<uint8_t*>(
- GetOatAddress(interpreter_to_interpreter_bridge_offset_))), target_ptr_size_);
} else {
bool quick_is_interpreted;
const uint8_t* quick_code = GetQuickCode(orig, &quick_is_interpreted);
@@ -1432,16 +1431,6 @@ void ImageWriter::CopyAndFixupMethod(ArtMethod* orig, ArtMethod* copy) {
copy->SetEntryPointFromJniPtrSize(
GetOatAddress(jni_dlsym_lookup_offset_), target_ptr_size_);
}
-
- // Interpreter entrypoint:
- // Set the interpreter entrypoint depending on whether there is compiled code or not.
- uint32_t interpreter_code = (quick_is_interpreted)
- ? interpreter_to_interpreter_bridge_offset_
- : interpreter_to_compiled_code_bridge_offset_;
- EntryPointFromInterpreter* interpreter_entrypoint =
- reinterpret_cast<EntryPointFromInterpreter*>(
- const_cast<uint8_t*>(GetOatAddress(interpreter_code)));
- copy->SetEntryPointFromInterpreterPtrSize(interpreter_entrypoint, target_ptr_size_);
}
}
}
diff --git a/compiler/image_writer.h b/compiler/image_writer.h
index 9d45ce2bd4..754fe844f8 100644
--- a/compiler/image_writer.h
+++ b/compiler/image_writer.h
@@ -18,7 +18,7 @@
#define ART_COMPILER_IMAGE_WRITER_H_
#include <stdint.h>
-#include <valgrind.h>
+#include "base/memory_tool.h"
#include <cstddef>
#include <memory>
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index a1d8226f36..a122cebf50 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -71,18 +71,18 @@ JitCompiler::JitCompiler() : total_time_(0) {
CompilerOptions::kDefaultSmallMethodThreshold,
CompilerOptions::kDefaultTinyMethodThreshold,
CompilerOptions::kDefaultNumDexMethodsThreshold,
- false,
+ /* include_patch_information */ false,
CompilerOptions::kDefaultTopKProfileThreshold,
- false, // TODO: Think about debuggability of JIT-compiled code.
+ Runtime::Current()->IsDebuggable(),
CompilerOptions::kDefaultGenerateDebugInfo,
- false,
- false,
- false,
- false, // pic
- nullptr,
+ /* implicit_null_checks */ true,
+ /* implicit_so_checks */ true,
+ /* implicit_suspend_checks */ false,
+ /* pic */ true, // TODO: Support non-PIC in optimizing.
+ /* verbose_methods */ nullptr,
pass_manager_options,
- nullptr,
- false));
+ /* init_failure_output */ nullptr,
+ /* abort_on_hard_verifier_failure */ false));
const InstructionSet instruction_set = kRuntimeISA;
instruction_set_features_.reset(InstructionSetFeatures::FromCppDefines());
cumulative_logger_.reset(new CumulativeLogger("jit times"));
@@ -92,10 +92,23 @@ JitCompiler::JitCompiler() : total_time_(0) {
method_inliner_map_.get(),
CompilerCallbacks::CallbackMode::kCompileApp));
compiler_driver_.reset(new CompilerDriver(
- compiler_options_.get(), verification_results_.get(), method_inliner_map_.get(),
- Compiler::kQuick, instruction_set, instruction_set_features_.get(), false,
- nullptr, nullptr, nullptr, 1, false, true,
- std::string(), cumulative_logger_.get(), -1, std::string()));
+ compiler_options_.get(),
+ verification_results_.get(),
+ method_inliner_map_.get(),
+ Compiler::kOptimizing,
+ instruction_set,
+ instruction_set_features_.get(),
+ /* image */ false,
+ /* image_classes */ nullptr,
+ /* compiled_classes */ nullptr,
+ /* compiled_methods */ nullptr,
+ /* thread_count */ 1,
+ /* dump_stats */ false,
+ /* dump_passes */ false,
+ /* dump_cfg_file_name */ "",
+ cumulative_logger_.get(),
+ /* swap_fd */ -1,
+ /* profile_file */ ""));
// Disable dedupe so we can remove compiled methods.
compiler_driver_->SetDedupeEnabled(false);
compiler_driver_->SetSupportBootImageFixup(false);
@@ -195,9 +208,14 @@ uint8_t* JitCompiler::WriteMethodHeaderAndCode(const CompiledMethod* compiled_me
std::copy(quick_code->data(), quick_code->data() + code_size, code_ptr);
// After we are done writing we need to update the method header.
// Write out the method header last.
- method_header = new(method_header)OatQuickMethodHeader(
- code_ptr - mapping_table, code_ptr - vmap_table, code_ptr - gc_map, frame_size_in_bytes,
- core_spill_mask, fp_spill_mask, code_size);
+ method_header = new(method_header) OatQuickMethodHeader(
+ (mapping_table == nullptr) ? 0 : code_ptr - mapping_table,
+ (vmap_table == nullptr) ? 0 : code_ptr - vmap_table,
+ (gc_map == nullptr) ? 0 : code_ptr - gc_map,
+ frame_size_in_bytes,
+ core_spill_mask,
+ fp_spill_mask,
+ code_size);
// Return the code ptr.
return code_ptr;
}
@@ -216,23 +234,35 @@ bool JitCompiler::AddToCodeCache(ArtMethod* method, const CompiledMethod* compil
auto* const mapping_table = compiled_method->GetMappingTable();
auto* const vmap_table = compiled_method->GetVmapTable();
auto* const gc_map = compiled_method->GetGcMap();
- CHECK(gc_map != nullptr) << PrettyMethod(method);
- // Write out pre-header stuff.
- uint8_t* const mapping_table_ptr = code_cache->AddDataArray(
- self, mapping_table->data(), mapping_table->data() + mapping_table->size());
- if (mapping_table_ptr == nullptr) {
- return false; // Out of data cache.
+ uint8_t* mapping_table_ptr = nullptr;
+ uint8_t* vmap_table_ptr = nullptr;
+ uint8_t* gc_map_ptr = nullptr;
+
+ if (mapping_table != nullptr) {
+ // Write out pre-header stuff.
+ mapping_table_ptr = code_cache->AddDataArray(
+ self, mapping_table->data(), mapping_table->data() + mapping_table->size());
+ if (mapping_table_ptr == nullptr) {
+ return false; // Out of data cache.
+ }
}
- uint8_t* const vmap_table_ptr = code_cache->AddDataArray(
- self, vmap_table->data(), vmap_table->data() + vmap_table->size());
- if (vmap_table_ptr == nullptr) {
- return false; // Out of data cache.
+
+ if (vmap_table != nullptr) {
+ vmap_table_ptr = code_cache->AddDataArray(
+ self, vmap_table->data(), vmap_table->data() + vmap_table->size());
+ if (vmap_table_ptr == nullptr) {
+ return false; // Out of data cache.
+ }
}
- uint8_t* const gc_map_ptr = code_cache->AddDataArray(
- self, gc_map->data(), gc_map->data() + gc_map->size());
- if (gc_map_ptr == nullptr) {
- return false; // Out of data cache.
+
+ if (gc_map != nullptr) {
+ gc_map_ptr = code_cache->AddDataArray(
+ self, gc_map->data(), gc_map->data() + gc_map->size());
+ if (gc_map_ptr == nullptr) {
+ return false; // Out of data cache.
+ }
}
+
// Don't touch this until you protect / unprotect the code.
const size_t reserve_size = sizeof(OatQuickMethodHeader) + quick_code->size() + 32;
uint8_t* const code_reserve = code_cache->ReserveCode(self, reserve_size);
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index a7eb36c2f8..1319f2c62a 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -259,14 +259,20 @@ bool HGraphBuilder::SkipCompilation(const DexFile::CodeItem& code_item,
return false;
}
-bool HGraphBuilder::IsBlockInPcRange(HBasicBlock* block,
- uint32_t dex_pc_start,
- uint32_t dex_pc_end) {
- uint32_t dex_pc = block->GetDexPc();
- return block != entry_block_
- && block != exit_block_
- && dex_pc >= dex_pc_start
- && dex_pc < dex_pc_end;
+static const DexFile::TryItem* GetTryItem(HBasicBlock* block,
+ const DexFile::CodeItem& code_item,
+ const ArenaBitVector& can_block_throw) {
+ DCHECK(!block->IsSingleTryBoundary());
+
+ // Block does not contain throwing instructions. Even if it is covered by
+ // a TryItem, we will consider it not in a try block.
+ if (!can_block_throw.IsBitSet(block->GetBlockId())) {
+ return nullptr;
+ }
+
+ // Instructions in the block may throw. Find a TryItem covering this block.
+ int32_t try_item_idx = DexFile::FindTryItem(code_item, block->GetDexPc());
+ return (try_item_idx == -1) ? nullptr : DexFile::GetTryItems(code_item, try_item_idx);
}
void HGraphBuilder::CreateBlocksForTryCatch(const DexFile::CodeItem& code_item) {
@@ -327,108 +333,129 @@ void HGraphBuilder::InsertTryBoundaryBlocks(const DexFile::CodeItem& code_item)
return;
}
- for (size_t idx = 0; idx < code_item.tries_size_; ++idx) {
- const DexFile::TryItem* try_item = DexFile::GetTryItems(code_item, idx);
- uint32_t try_start = try_item->start_addr_;
- uint32_t try_end = try_start + try_item->insn_count_;
-
- // Iterate over all blocks in the dex pc range of the TryItem and:
- // (a) split edges which enter/exit the try range,
- // (b) create TryBoundary instructions in the new blocks,
- // (c) link the new blocks to corresponding exception handlers.
- for (uint32_t inner_pc = try_start; inner_pc < try_end; ++inner_pc) {
- HBasicBlock* try_block = FindBlockStartingAt(inner_pc);
- if (try_block == nullptr) {
- continue;
+ // Bit vector stores information on which blocks contain throwing instructions.
+ // Must be expandable because catch blocks may be split into two.
+ ArenaBitVector can_block_throw(arena_, graph_->GetBlocks().Size(), /* expandable */ true);
+
+ // Scan blocks and mark those which contain throwing instructions.
+ for (size_t block_id = 0, e = graph_->GetBlocks().Size(); block_id < e; ++block_id) {
+ HBasicBlock* block = graph_->GetBlocks().Get(block_id);
+ bool can_throw = false;
+ for (HInstructionIterator insn(block->GetInstructions()); !insn.Done(); insn.Advance()) {
+ if (insn.Current()->CanThrow()) {
+ can_throw = true;
+ break;
}
+ }
- if (try_block->IsCatchBlock()) {
+ if (can_throw) {
+ if (block->IsCatchBlock()) {
// Catch blocks are always considered an entry point into the TryItem in
- // order to avoid splitting exceptional edges (they might not have been
- // created yet). We separate the move-exception (if present) from the
- // rest of the block and insert a TryBoundary after it, creating a
- // landing pad for the exceptional edges.
- HInstruction* first_insn = try_block->GetFirstInstruction();
- HInstruction* split_position = nullptr;
+ // order to avoid splitting exceptional edges. We split the block after
+ // the move-exception (if present) and mark the first part non-throwing.
+ // Later on, a TryBoundary will be inserted between the two blocks.
+ HInstruction* first_insn = block->GetFirstInstruction();
if (first_insn->IsLoadException()) {
// Catch block starts with a LoadException. Split the block after the
// StoreLocal that must come after the load.
DCHECK(first_insn->GetNext()->IsStoreLocal());
- split_position = first_insn->GetNext()->GetNext();
+ block = block->SplitBefore(first_insn->GetNext()->GetNext());
} else {
- // Catch block does not obtain the exception. Split at the beginning
- // to create an empty catch block.
- split_position = first_insn;
+ // Catch block does not load the exception. Split at the beginning to
+ // create an empty catch block.
+ block = block->SplitBefore(first_insn);
}
- DCHECK(split_position != nullptr);
- HBasicBlock* catch_block = try_block;
- try_block = catch_block->SplitBefore(split_position);
- SplitTryBoundaryEdge(catch_block, try_block, HTryBoundary::kEntry, code_item, *try_item);
- } else {
- // For non-catch blocks, find predecessors which are not covered by the
- // same TryItem range. Such edges enter the try block and will have
- // a TryBoundary inserted.
- for (size_t i = 0; i < try_block->GetPredecessors().Size(); ++i) {
- HBasicBlock* predecessor = try_block->GetPredecessors().Get(i);
- if (predecessor->IsSingleTryBoundary()) {
- // The edge was already split because of an exit from a neighbouring
- // TryItem. We split it again and insert an entry point.
- if (kIsDebugBuild) {
- HTryBoundary* last_insn = predecessor->GetLastInstruction()->AsTryBoundary();
- DCHECK(!last_insn->IsEntry());
- DCHECK_EQ(last_insn->GetNormalFlowSuccessor(), try_block);
- DCHECK(try_block->IsFirstIndexOfPredecessor(predecessor, i));
- DCHECK(!IsBlockInPcRange(predecessor->GetSinglePredecessor(), try_start, try_end));
- }
- } else if (!IsBlockInPcRange(predecessor, try_start, try_end)) {
- // This is an entry point into the TryItem and the edge has not been
- // split yet. That means that `predecessor` is not in a TryItem, or
- // it is in a different TryItem and we happened to iterate over this
- // block first. We split the edge and insert an entry point.
- } else {
- // Not an edge on the boundary of the try block.
- continue;
- }
- SplitTryBoundaryEdge(predecessor, try_block, HTryBoundary::kEntry, code_item, *try_item);
+ }
+ can_block_throw.SetBit(block->GetBlockId());
+ }
+ }
+
+ // Iterate over all blocks, find those covered by some TryItem and:
+ // (a) split edges which enter/exit the try range,
+ // (b) create TryBoundary instructions in the new blocks,
+ // (c) link the new blocks to corresponding exception handlers.
+ // We cannot iterate only over blocks in `branch_targets_` because switch-case
+ // blocks share the same dex_pc.
+ for (size_t block_id = 0, e = graph_->GetBlocks().Size(); block_id < e; ++block_id) {
+ HBasicBlock* try_block = graph_->GetBlocks().Get(block_id);
+
+ // TryBoundary blocks are added at the end of the list and not iterated over.
+ DCHECK(!try_block->IsSingleTryBoundary());
+
+ // Find the TryItem for this block.
+ const DexFile::TryItem* try_item = GetTryItem(try_block, code_item, can_block_throw);
+ if (try_item == nullptr) {
+ continue;
+ }
+
+ // Catch blocks were split earlier and cannot throw.
+ DCHECK(!try_block->IsCatchBlock());
+
+ // Find predecessors which are not covered by the same TryItem range. Such
+ // edges enter the try block and will have a TryBoundary inserted.
+ for (size_t i = 0; i < try_block->GetPredecessors().Size(); ++i) {
+ HBasicBlock* predecessor = try_block->GetPredecessors().Get(i);
+ if (predecessor->IsSingleTryBoundary()) {
+ // The edge was already split because of an exit from a neighbouring
+ // TryItem. We split it again and insert an entry point.
+ if (kIsDebugBuild) {
+ HTryBoundary* last_insn = predecessor->GetLastInstruction()->AsTryBoundary();
+ const DexFile::TryItem* predecessor_try_item =
+ GetTryItem(predecessor->GetSinglePredecessor(), code_item, can_block_throw);
+ DCHECK(!last_insn->IsEntry());
+ DCHECK_EQ(last_insn->GetNormalFlowSuccessor(), try_block);
+ DCHECK(try_block->IsFirstIndexOfPredecessor(predecessor, i));
+ DCHECK_NE(try_item, predecessor_try_item);
}
+ } else if (GetTryItem(predecessor, code_item, can_block_throw) != try_item) {
+ // This is an entry point into the TryItem and the edge has not been
+ // split yet. That means that `predecessor` is not in a TryItem, or
+ // it is in a different TryItem and we happened to iterate over this
+ // block first. We split the edge and insert an entry point.
+ } else {
+ // Not an edge on the boundary of the try block.
+ continue;
}
-
- // Find successors which are not covered by the same TryItem range. Such
- // edges exit the try block and will have a TryBoundary inserted.
- for (size_t i = 0; i < try_block->GetSuccessors().Size(); ++i) {
- HBasicBlock* successor = try_block->GetSuccessors().Get(i);
- if (successor->IsCatchBlock()) {
- // A catch block is always considered an entry point into its TryItem.
- // We therefore assume this is an exit point, regardless of whether
- // the catch block is in a different TryItem or not.
- } else if (successor->IsSingleTryBoundary()) {
- // The edge was already split because of an entry into a neighbouring
- // TryItem. We split it again and insert an exit.
- if (kIsDebugBuild) {
- HTryBoundary* last_insn = successor->GetLastInstruction()->AsTryBoundary();
- DCHECK_EQ(try_block, successor->GetSinglePredecessor());
- DCHECK(last_insn->IsEntry());
- DCHECK(!IsBlockInPcRange(last_insn->GetNormalFlowSuccessor(), try_start, try_end));
- }
- } else if (!IsBlockInPcRange(successor, try_start, try_end)) {
- // This is an exit out of the TryItem and the edge has not been split
- // yet. That means that either `successor` is not in a TryItem, or it
- // is in a different TryItem and we happened to iterate over this
- // block first. We split the edge and insert an exit.
- HInstruction* last_instruction = try_block->GetLastInstruction();
- if (last_instruction->IsReturn() || last_instruction->IsReturnVoid()) {
- DCHECK_EQ(successor, exit_block_);
- // Control flow exits the try block with a Return(Void). Because
- // splitting the edge would invalidate the invariant that Return
- // always jumps to Exit, we move the Return outside the try block.
- successor = try_block->SplitBefore(last_instruction);
- }
- } else {
- // Not an edge on the boundary of the try block.
- continue;
+ SplitTryBoundaryEdge(predecessor, try_block, HTryBoundary::kEntry, code_item, *try_item);
+ }
+
+ // Find successors which are not covered by the same TryItem range. Such
+ // edges exit the try block and will have a TryBoundary inserted.
+ for (size_t i = 0; i < try_block->GetSuccessors().Size(); ++i) {
+ HBasicBlock* successor = try_block->GetSuccessors().Get(i);
+ if (successor->IsCatchBlock()) {
+ // A catch block is always considered an entry point into its TryItem.
+ // We therefore assume this is an exit point, regardless of whether
+ // the catch block is in a different TryItem or not.
+ } else if (successor->IsSingleTryBoundary()) {
+ // The edge was already split because of an entry into a neighbouring
+ // TryItem. We split it again and insert an exit.
+ if (kIsDebugBuild) {
+ HTryBoundary* last_insn = successor->GetLastInstruction()->AsTryBoundary();
+ const DexFile::TryItem* successor_try_item =
+ GetTryItem(last_insn->GetNormalFlowSuccessor(), code_item, can_block_throw);
+ DCHECK_EQ(try_block, successor->GetSinglePredecessor());
+ DCHECK(last_insn->IsEntry());
+ DCHECK_NE(try_item, successor_try_item);
+ }
+ } else if (GetTryItem(successor, code_item, can_block_throw) != try_item) {
+ // This is an exit out of the TryItem and the edge has not been split
+ // yet. That means that either `successor` is not in a TryItem, or it
+ // is in a different TryItem and we happened to iterate over this
+ // block first. We split the edge and insert an exit.
+ HInstruction* last_instruction = try_block->GetLastInstruction();
+ if (last_instruction->IsReturn() || last_instruction->IsReturnVoid()) {
+ DCHECK_EQ(successor, exit_block_);
+ // Control flow exits the try block with a Return(Void). Because
+ // splitting the edge would invalidate the invariant that Return
+ // always jumps to Exit, we move the Return outside the try block.
+ successor = try_block->SplitBefore(last_instruction);
}
- SplitTryBoundaryEdge(try_block, successor, HTryBoundary::kExit, code_item, *try_item);
+ } else {
+ // Not an edge on the boundary of the try block.
+ continue;
}
+ SplitTryBoundaryEdge(try_block, successor, HTryBoundary::kExit, code_item, *try_item);
}
}
}
@@ -487,14 +514,14 @@ bool HGraphBuilder::BuildGraph(const DexFile::CodeItem& code_item) {
// Add the suspend check to the entry block.
entry_block_->AddInstruction(new (arena_) HSuspendCheck(0));
entry_block_->AddInstruction(new (arena_) HGoto());
+ // Add the exit block at the end.
+ graph_->AddBlock(exit_block_);
// Iterate over blocks covered by TryItems and insert TryBoundaries at entry
// and exit points. This requires all control-flow instructions and
// non-exceptional edges to have been created.
InsertTryBoundaryBlocks(code_item);
- // Add the exit block at the end to give it the highest id.
- graph_->AddBlock(exit_block_);
return true;
}
@@ -563,11 +590,10 @@ bool HGraphBuilder::ComputeBranchTargets(const uint16_t* code_ptr,
uint32_t target = dex_pc + table.GetEntryAt(i + offset);
FindOrCreateBlockStartingAt(target);
- // The next case gets its own block.
- if (i < num_entries) {
- block = new (arena_) HBasicBlock(graph_, target);
- branch_targets_.Put(table.GetDexPcForIndex(i), block);
- }
+ // Create a block for the switch-case logic. The block gets the dex_pc
+ // of the SWITCH instruction because it is part of its semantics.
+ block = new (arena_) HBasicBlock(graph_, dex_pc);
+ branch_targets_.Put(table.GetDexPcForIndex(i), block);
}
// Fall-through. Add a block if there is more code afterwards.
@@ -730,6 +756,35 @@ void HGraphBuilder::BuildReturn(const Instruction& instruction, Primitive::Type
current_block_ = nullptr;
}
+void HGraphBuilder::PotentiallySimplifyFakeString(uint16_t original_dex_register,
+ uint32_t dex_pc,
+ HInvoke* actual_string) {
+ if (!graph_->IsDebuggable()) {
+ // Notify that we cannot compile with baseline. The dex registers aliasing
+ // with `original_dex_register` will be handled when we optimize
+ // (see HInstructionSimplifer::VisitFakeString).
+ can_use_baseline_for_string_init_ = false;
+ return;
+ }
+ const VerifiedMethod* verified_method =
+ compiler_driver_->GetVerifiedMethod(dex_file_, dex_compilation_unit_->GetDexMethodIndex());
+ if (verified_method != nullptr) {
+ UpdateLocal(original_dex_register, actual_string);
+ const SafeMap<uint32_t, std::set<uint32_t>>& string_init_map =
+ verified_method->GetStringInitPcRegMap();
+ auto map_it = string_init_map.find(dex_pc);
+ if (map_it != string_init_map.end()) {
+ std::set<uint32_t> reg_set = map_it->second;
+ for (auto set_it = reg_set.begin(); set_it != reg_set.end(); ++set_it) {
+ HInstruction* load_local = LoadLocal(original_dex_register, Primitive::kPrimNot);
+ UpdateLocal(*set_it, load_local);
+ }
+ }
+ } else {
+ can_use_baseline_for_string_init_ = false;
+ }
+}
+
bool HGraphBuilder::BuildInvoke(const Instruction& instruction,
uint32_t dex_pc,
uint32_t method_idx,
@@ -971,34 +1026,23 @@ bool HGraphBuilder::BuildInvoke(const Instruction& instruction,
if (clinit_check_requirement == HInvokeStaticOrDirect::ClinitCheckRequirement::kExplicit) {
// Add the class initialization check as last input of `invoke`.
DCHECK(clinit_check != nullptr);
+ DCHECK(!is_string_init);
invoke->SetArgumentAt(argument_index, clinit_check);
+ argument_index++;
}
- current_block_->AddInstruction(invoke);
- latest_result_ = invoke;
-
// Add move-result for StringFactory method.
if (is_string_init) {
uint32_t orig_this_reg = is_range ? register_index : args[0];
- UpdateLocal(orig_this_reg, invoke);
- const VerifiedMethod* verified_method =
- compiler_driver_->GetVerifiedMethod(dex_file_, dex_compilation_unit_->GetDexMethodIndex());
- if (verified_method == nullptr) {
- LOG(WARNING) << "No verified method for method calling String.<init>: "
- << PrettyMethod(dex_compilation_unit_->GetDexMethodIndex(), *dex_file_);
- return false;
- }
- const SafeMap<uint32_t, std::set<uint32_t>>& string_init_map =
- verified_method->GetStringInitPcRegMap();
- auto map_it = string_init_map.find(dex_pc);
- if (map_it != string_init_map.end()) {
- std::set<uint32_t> reg_set = map_it->second;
- for (auto set_it = reg_set.begin(); set_it != reg_set.end(); ++set_it) {
- HInstruction* load_local = LoadLocal(orig_this_reg, Primitive::kPrimNot);
- UpdateLocal(*set_it, load_local);
- }
- }
+ HInstruction* fake_string = LoadLocal(orig_this_reg, Primitive::kPrimNot);
+ invoke->SetArgumentAt(argument_index, fake_string);
+ current_block_->AddInstruction(invoke);
+ PotentiallySimplifyFakeString(orig_this_reg, dex_pc, invoke);
+ } else {
+ current_block_->AddInstruction(invoke);
}
+ latest_result_ = invoke;
+
return true;
}
@@ -2213,10 +2257,10 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
case Instruction::NEW_INSTANCE: {
uint16_t type_index = instruction.VRegB_21c();
if (compiler_driver_->IsStringTypeIndex(type_index, dex_file_)) {
- // Turn new-instance of string into a const 0.
int32_t register_index = instruction.VRegA();
- HNullConstant* constant = graph_->GetNullConstant();
- UpdateLocal(register_index, constant);
+ HFakeString* fake_string = new (arena_) HFakeString();
+ current_block_->AddInstruction(fake_string);
+ UpdateLocal(register_index, fake_string);
} else {
QuickEntrypointEnum entrypoint = NeedsAccessCheck(type_index)
? kQuickAllocObjectWithAccessCheck
diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h
index e487255ac4..76610f5be2 100644
--- a/compiler/optimizing/builder.h
+++ b/compiler/optimizing/builder.h
@@ -54,6 +54,7 @@ class HGraphBuilder : public ValueObject {
return_type_(Primitive::GetType(dex_compilation_unit_->GetShorty()[0])),
code_start_(nullptr),
latest_result_(nullptr),
+ can_use_baseline_for_string_init_(true),
compilation_stats_(compiler_stats) {}
// Only for unit testing.
@@ -72,10 +73,15 @@ class HGraphBuilder : public ValueObject {
return_type_(return_type),
code_start_(nullptr),
latest_result_(nullptr),
+ can_use_baseline_for_string_init_(true),
compilation_stats_(nullptr) {}
bool BuildGraph(const DexFile::CodeItem& code);
+ bool CanUseBaselineForStringInit() const {
+ return can_use_baseline_for_string_init_;
+ }
+
static constexpr const char* kBuilderPassName = "builder";
private:
@@ -98,9 +104,6 @@ class HGraphBuilder : public ValueObject {
HBasicBlock* FindBlockStartingAt(int32_t dex_pc) const;
HBasicBlock* FindOrCreateBlockStartingAt(int32_t dex_pc);
- // Returns whether the dex_pc of `block` lies within the given range.
- bool IsBlockInPcRange(HBasicBlock* block, uint32_t dex_pc_start, uint32_t dex_pc_end);
-
// Adds new blocks to `branch_targets_` starting at the limits of TryItems and
// their exception handlers.
void CreateBlocksForTryCatch(const DexFile::CodeItem& code_item);
@@ -254,6 +257,10 @@ class HGraphBuilder : public ValueObject {
// Returns whether `type_index` points to the outer-most compiling method's class.
bool IsOutermostCompilingClass(uint16_t type_index) const;
+ void PotentiallySimplifyFakeString(uint16_t original_dex_register,
+ uint32_t dex_pc,
+ HInvoke* invoke);
+
ArenaAllocator* const arena_;
// A list of the size of the dex code holding block information for
@@ -293,6 +300,11 @@ class HGraphBuilder : public ValueObject {
// used by move-result instructions.
HInstruction* latest_result_;
+ // We need to know whether we have built a graph that has calls to StringFactory
+ // and hasn't gone through the verifier. If the following flag is `false`, then
+ // we cannot compile with baseline.
+ bool can_use_baseline_for_string_init_;
+
OptimizingCompilerStats* compilation_stats_;
DISALLOW_COPY_AND_ASSIGN(HGraphBuilder);
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 4cecd61365..eb63b49884 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -294,6 +294,12 @@ class CodeGenerator {
allocated_registers_.Add(location);
}
+ bool HasAllocatedRegister(bool is_core, int reg) const {
+ return is_core
+ ? allocated_registers_.ContainsCoreRegister(reg)
+ : allocated_registers_.ContainsFloatingPointRegister(reg);
+ }
+
void AllocateLocations(HInstruction* instruction);
// Tells whether the stack frame of the compiled method is
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index ff1232902f..75b8f068ab 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -4552,6 +4552,18 @@ void InstructionCodeGeneratorARM::VisitBoundType(HBoundType* instruction) {
LOG(FATAL) << "Unreachable";
}
+void LocationsBuilderARM::VisitFakeString(HFakeString* instruction) {
+ DCHECK(codegen_->IsBaseline());
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ locations->SetOut(Location::ConstantLocation(GetGraph()->GetNullConstant()));
+}
+
+void InstructionCodeGeneratorARM::VisitFakeString(HFakeString* instruction ATTRIBUTE_UNUSED) {
+ DCHECK(codegen_->IsBaseline());
+ // Will be generated at use site.
+}
+
#undef __
#undef QUICK_ENTRY_POINT
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index c06b39ba17..11de4ee673 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -656,6 +656,13 @@ void CodeGeneratorARM64::Move(HInstruction* instruction,
Primitive::Type type = instruction->GetType();
DCHECK_NE(type, Primitive::kPrimVoid);
+ if (instruction->IsFakeString()) {
+ // The fake string is an alias for null.
+ DCHECK(IsBaseline());
+ instruction = locations->Out().GetConstant();
+ DCHECK(instruction->IsNullConstant()) << instruction->DebugName();
+ }
+
if (instruction->IsCurrentMethod()) {
MoveLocation(location, Location::DoubleStackSlot(kCurrentMethodStackOffset));
} else if (locations != nullptr && locations->Out().Equals(location)) {
@@ -904,7 +911,7 @@ void CodeGeneratorARM64::MoveLocation(Location destination, Location source, Pri
(source.IsFpuRegister() == Primitive::IsFloatingPointType(type)));
__ Str(CPURegisterFrom(source, type), StackOperandFrom(destination));
} else if (source.IsConstant()) {
- DCHECK(unspecified_type || CoherentConstantAndType(source, type));
+ DCHECK(unspecified_type || CoherentConstantAndType(source, type)) << source << " " << type;
UseScratchRegisterScope temps(GetVIXLAssembler());
HConstant* src_cst = source.GetConstant();
CPURegister temp;
@@ -1476,9 +1483,8 @@ void InstructionCodeGeneratorARM64::VisitArrayGet(HArrayGet* instruction) {
source = HeapOperand(obj, offset);
} else {
Register temp = temps.AcquireSameSizeAs(obj);
- Register index_reg = RegisterFrom(index, Primitive::kPrimInt);
- __ Add(temp, obj, Operand(index_reg, LSL, Primitive::ComponentSizeShift(type)));
- source = HeapOperand(temp, offset);
+ __ Add(temp, obj, offset);
+ source = HeapOperand(temp, XRegisterFrom(index), LSL, Primitive::ComponentSizeShift(type));
}
codegen_->Load(type, OutputCPURegister(instruction), source);
@@ -1561,9 +1567,11 @@ void InstructionCodeGeneratorARM64::VisitArraySet(HArraySet* instruction) {
destination = HeapOperand(obj, offset);
} else {
Register temp = temps.AcquireSameSizeAs(obj);
- Register index_reg = InputRegisterAt(instruction, 1);
- __ Add(temp, obj, Operand(index_reg, LSL, Primitive::ComponentSizeShift(value_type)));
- destination = HeapOperand(temp, offset);
+ __ Add(temp, obj, offset);
+ destination = HeapOperand(temp,
+ XRegisterFrom(index),
+ LSL,
+ Primitive::ComponentSizeShift(value_type));
}
codegen_->Store(value_type, source, destination);
@@ -3092,6 +3100,18 @@ void InstructionCodeGeneratorARM64::VisitBoundType(HBoundType* instruction) {
LOG(FATAL) << "Unreachable";
}
+void LocationsBuilderARM64::VisitFakeString(HFakeString* instruction) {
+ DCHECK(codegen_->IsBaseline());
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ locations->SetOut(Location::ConstantLocation(GetGraph()->GetNullConstant()));
+}
+
+void InstructionCodeGeneratorARM64::VisitFakeString(HFakeString* instruction ATTRIBUTE_UNUSED) {
+ DCHECK(codegen_->IsBaseline());
+ // Will be generated at use site.
+}
+
#undef __
#undef QUICK_ENTRY_POINT
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index aa4fd26590..e7d2ec6341 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -3292,5 +3292,17 @@ void InstructionCodeGeneratorMIPS64::VisitGreaterThanOrEqual(HGreaterThanOrEqual
VisitCondition(comp);
}
+void LocationsBuilderMIPS64::VisitFakeString(HFakeString* instruction) {
+ DCHECK(codegen_->IsBaseline());
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ locations->SetOut(Location::ConstantLocation(GetGraph()->GetNullConstant()));
+}
+
+void InstructionCodeGeneratorMIPS64::VisitFakeString(HFakeString* instruction ATTRIBUTE_UNUSED) {
+ DCHECK(codegen_->IsBaseline());
+ // Will be generated at use site.
+}
+
} // namespace mips64
} // namespace art
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index fd4bd1803a..e15eff9056 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -4964,6 +4964,18 @@ void InstructionCodeGeneratorX86::VisitBoundType(HBoundType* instruction) {
LOG(FATAL) << "Unreachable";
}
+void LocationsBuilderX86::VisitFakeString(HFakeString* instruction) {
+ DCHECK(codegen_->IsBaseline());
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ locations->SetOut(Location::ConstantLocation(GetGraph()->GetNullConstant()));
+}
+
+void InstructionCodeGeneratorX86::VisitFakeString(HFakeString* instruction ATTRIBUTE_UNUSED) {
+ DCHECK(codegen_->IsBaseline());
+ // Will be generated at use site.
+}
+
#undef __
} // namespace x86
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index ae7bcc8f0e..a95ce68657 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -4774,6 +4774,18 @@ void InstructionCodeGeneratorX86_64::VisitBoundType(HBoundType* instruction) {
LOG(FATAL) << "Unreachable";
}
+void LocationsBuilderX86_64::VisitFakeString(HFakeString* instruction) {
+ DCHECK(codegen_->IsBaseline());
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ locations->SetOut(Location::ConstantLocation(GetGraph()->GetNullConstant()));
+}
+
+void InstructionCodeGeneratorX86_64::VisitFakeString(HFakeString* instruction ATTRIBUTE_UNUSED) {
+ DCHECK(codegen_->IsBaseline());
+ // Will be generated at use site.
+}
+
void CodeGeneratorX86_64::Load64BitValue(CpuRegister dest, int64_t value) {
if (value == 0) {
__ xorl(dest, dest);
diff --git a/compiler/optimizing/common_arm64.h b/compiler/optimizing/common_arm64.h
index 246fff99ac..f54547534f 100644
--- a/compiler/optimizing/common_arm64.h
+++ b/compiler/optimizing/common_arm64.h
@@ -151,6 +151,15 @@ static inline vixl::MemOperand HeapOperand(const vixl::Register& base, size_t of
return vixl::MemOperand(base.X(), offset);
}
+static inline vixl::MemOperand HeapOperand(const vixl::Register& base,
+ const vixl::Register& regoffset,
+ vixl::Shift shift = vixl::LSL,
+ unsigned shift_amount = 0) {
+ // A heap reference must be 32bit, so fit in a W register.
+ DCHECK(base.IsW());
+ return vixl::MemOperand(base.X(), regoffset, shift, shift_amount);
+}
+
static inline vixl::MemOperand HeapOperand(const vixl::Register& base, Offset offset) {
return HeapOperand(base, offset.SizeValue());
}
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index 504c141799..37c060c1b1 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -357,6 +357,10 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor {
StartAttributeStream("kind") << barrier->GetBarrierKind();
}
+ void VisitMonitorOperation(HMonitorOperation* monitor) OVERRIDE {
+ StartAttributeStream("kind") << (monitor->IsEnter() ? "enter" : "exit");
+ }
+
void VisitLoadClass(HLoadClass* load_class) OVERRIDE {
StartAttributeStream("gen_clinit_check") << std::boolalpha
<< load_class->MustGenerateClinitCheck() << std::noboolalpha;
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index 04ba4e18c8..b30b6c7bae 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -70,6 +70,7 @@ class InstructionSimplifierVisitor : public HGraphVisitor {
void VisitUShr(HUShr* instruction) OVERRIDE;
void VisitXor(HXor* instruction) OVERRIDE;
void VisitInstanceOf(HInstanceOf* instruction) OVERRIDE;
+ void VisitFakeString(HFakeString* fake_string) OVERRIDE;
bool IsDominatedByInputNullCheck(HInstruction* instr);
OptimizingCompilerStats* stats_;
@@ -903,4 +904,46 @@ void InstructionSimplifierVisitor::VisitXor(HXor* instruction) {
}
}
+void InstructionSimplifierVisitor::VisitFakeString(HFakeString* instruction) {
+ HInstruction* actual_string = nullptr;
+
+ // Find the string we need to replace this instruction with. The actual string is
+ // the return value of a StringFactory call.
+ for (HUseIterator<HInstruction*> it(instruction->GetUses()); !it.Done(); it.Advance()) {
+ HInstruction* use = it.Current()->GetUser();
+ if (use->IsInvokeStaticOrDirect()
+ && use->AsInvokeStaticOrDirect()->IsStringFactoryFor(instruction)) {
+ use->AsInvokeStaticOrDirect()->RemoveFakeStringArgumentAsLastInput();
+ actual_string = use;
+ break;
+ }
+ }
+
+ // Check that there is no other instruction that thinks it is the factory for that string.
+ if (kIsDebugBuild) {
+ CHECK(actual_string != nullptr);
+ for (HUseIterator<HInstruction*> it(instruction->GetUses()); !it.Done(); it.Advance()) {
+ HInstruction* use = it.Current()->GetUser();
+ if (use->IsInvokeStaticOrDirect()) {
+ CHECK(!use->AsInvokeStaticOrDirect()->IsStringFactoryFor(instruction));
+ }
+ }
+ }
+
+ // We need to remove any environment uses of the fake string that are not dominated by
+ // `actual_string` to null.
+ for (HUseIterator<HEnvironment*> it(instruction->GetEnvUses()); !it.Done(); it.Advance()) {
+ HEnvironment* environment = it.Current()->GetUser();
+ if (!actual_string->StrictlyDominates(environment->GetHolder())) {
+ environment->RemoveAsUserOfInput(it.Current()->GetIndex());
+ environment->SetRawEnvAt(it.Current()->GetIndex(), nullptr);
+ }
+ }
+
+ // Only uses dominated by `actual_string` must remain. We can safely replace and remove
+ // `instruction`.
+ instruction->ReplaceWith(actual_string);
+ instruction->GetBlock()->RemoveInstruction(instruction);
+}
+
} // namespace art
diff --git a/compiler/optimizing/locations.h b/compiler/optimizing/locations.h
index f41a782fe6..4b250465aa 100644
--- a/compiler/optimizing/locations.h
+++ b/compiler/optimizing/locations.h
@@ -427,11 +427,11 @@ class RegisterSet : public ValueObject {
}
}
- bool ContainsCoreRegister(uint32_t id) {
+ bool ContainsCoreRegister(uint32_t id) const {
return Contains(core_registers_, id);
}
- bool ContainsFloatingPointRegister(uint32_t id) {
+ bool ContainsFloatingPointRegister(uint32_t id) const {
return Contains(floating_point_registers_, id);
}
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index d19170195a..8546a1066f 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -38,6 +38,7 @@ class HBasicBlock;
class HCurrentMethod;
class HDoubleConstant;
class HEnvironment;
+class HFakeString;
class HFloatConstant;
class HGraphBuilder;
class HGraphVisitor;
@@ -914,6 +915,7 @@ class HLoopInformationOutwardIterator : public ValueObject {
M(DoubleConstant, Constant) \
M(Equal, Condition) \
M(Exit, Instruction) \
+ M(FakeString, Instruction) \
M(FloatConstant, Constant) \
M(Goto, Instruction) \
M(GreaterThan, Condition) \
@@ -1339,8 +1341,7 @@ class HEnvironment : public ArenaObject<kArenaAllocMisc> {
const uint32_t dex_pc_;
const InvokeType invoke_type_;
- // The instruction that holds this environment. Only used in debug mode
- // to ensure the graph is consistent.
+ // The instruction that holds this environment.
HInstruction* const holder_;
friend class HInstruction;
@@ -2742,9 +2743,11 @@ class HInvokeStaticOrDirect : public HInvoke {
ClinitCheckRequirement clinit_check_requirement)
: HInvoke(arena,
number_of_arguments,
- // There is one extra argument for the HCurrentMethod node, and
- // potentially one other if the clinit check is explicit.
- clinit_check_requirement == ClinitCheckRequirement::kExplicit ? 2u : 1u,
+ // There is one extra argument for the HCurrentMethod node, and
+ // potentially one other if the clinit check is explicit, and one other
+ // if the method is a string factory.
+ 1u + (clinit_check_requirement == ClinitCheckRequirement::kExplicit ? 1u : 0u)
+ + (string_init_offset ? 1u : 0u),
return_type,
dex_pc,
dex_method_index,
@@ -2789,6 +2792,23 @@ class HInvokeStaticOrDirect : public HInvoke {
DCHECK(IsStaticWithImplicitClinitCheck());
}
+ bool IsStringFactoryFor(HFakeString* str) const {
+ if (!IsStringInit()) return false;
+ // +1 for the current method.
+ if (InputCount() == (number_of_arguments_ + 1)) return false;
+ return InputAt(InputCount() - 1)->AsFakeString() == str;
+ }
+
+ void RemoveFakeStringArgumentAsLastInput() {
+ DCHECK(IsStringInit());
+ size_t last_input_index = InputCount() - 1;
+ HInstruction* last_input = InputAt(last_input_index);
+ DCHECK(last_input != nullptr);
+ DCHECK(last_input->IsFakeString()) << last_input->DebugName();
+ RemoveAsUserOfInput(last_input_index);
+ inputs_.DeleteAt(last_input_index);
+ }
+
// Is this a call to a static method whose declaring class has an
// explicit intialization check in the graph?
bool IsStaticWithExplicitClinitCheck() const {
@@ -3773,6 +3793,7 @@ class HLoadClass : public HExpression<1> {
uint32_t GetDexPc() const OVERRIDE { return dex_pc_; }
uint16_t GetTypeIndex() const { return type_index_; }
bool IsReferrersClass() const { return is_referrers_class_; }
+ bool CanBeNull() const OVERRIDE { return false; }
bool NeedsEnvironment() const OVERRIDE {
// Will call runtime and load the class if the class is not loaded yet.
@@ -4142,13 +4163,19 @@ class HMonitorOperation : public HTemplateInstruction<1> {
};
HMonitorOperation(HInstruction* object, OperationKind kind, uint32_t dex_pc)
- : HTemplateInstruction(SideEffects::None()), kind_(kind), dex_pc_(dex_pc) {
+ : HTemplateInstruction(SideEffects::ChangesSomething()), kind_(kind), dex_pc_(dex_pc) {
SetRawInputAt(0, object);
}
// Instruction may throw a Java exception, so we need an environment.
- bool NeedsEnvironment() const OVERRIDE { return true; }
- bool CanThrow() const OVERRIDE { return true; }
+ bool NeedsEnvironment() const OVERRIDE { return CanThrow(); }
+
+ bool CanThrow() const OVERRIDE {
+ // Verifier guarantees that monitor-exit cannot throw.
+ // This is important because it allows the HGraphBuilder to remove
+ // a dead throw-catch loop generated for `synchronized` blocks/methods.
+ return IsEnter();
+ }
uint32_t GetDexPc() const OVERRIDE { return dex_pc_; }
@@ -4164,6 +4191,25 @@ class HMonitorOperation : public HTemplateInstruction<1> {
DISALLOW_COPY_AND_ASSIGN(HMonitorOperation);
};
+/**
+ * A HInstruction used as a marker for the replacement of new + <init>
+ * of a String to a call to a StringFactory. Only baseline will see
+ * the node at code generation, where it will be be treated as null.
+ * When compiling non-baseline, `HFakeString` instructions are being removed
+ * in the instruction simplifier.
+ */
+class HFakeString : public HTemplateInstruction<0> {
+ public:
+ HFakeString() : HTemplateInstruction(SideEffects::None()) {}
+
+ Primitive::Type GetType() const OVERRIDE { return Primitive::kPrimNot; }
+
+ DECLARE_INSTRUCTION(FakeString);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HFakeString);
+};
+
class MoveOperands : public ArenaObject<kArenaAllocMisc> {
public:
MoveOperands(Location source,
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 1e515307b4..4568a463db 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -627,7 +627,7 @@ CompiledMethod* OptimizingCompiler::TryCompile(const DexFile::CodeItem* code_ite
// `run_optimizations_` is set explicitly (either through a compiler filter
// or the debuggable flag). If it is set, we can run baseline. Otherwise, we fall back
// to Quick.
- bool can_use_baseline = !run_optimizations_;
+ bool can_use_baseline = !run_optimizations_ && builder.CanUseBaselineForStringInit();
if (run_optimizations_ && can_optimize && can_allocate_registers) {
VLOG(compiler) << "Optimizing " << method_name;
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index 3d6606b8dc..68316c2618 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -99,6 +99,12 @@ void ReferenceTypePropagation::BoundTypeForIfNotNull(HBasicBlock* block) {
return;
}
+ if (!obj->CanBeNull() || obj->IsNullConstant()) {
+ // Null check is dead code and will be removed by DCE.
+ return;
+ }
+ DCHECK(!obj->IsLoadClass()) << "We should not replace HLoadClass instructions";
+
// We only need to bound the type if we have uses in the relevant block.
// So start with null and create the HBoundType lazily, only if it's needed.
HBoundType* bound_type = nullptr;
@@ -160,6 +166,7 @@ void ReferenceTypePropagation::BoundTypeForIfInstanceOf(HBasicBlock* block) {
// input.
return;
}
+ DCHECK(!obj->IsLoadClass()) << "We should not replace HLoadClass instructions";
for (HUseIterator<HInstruction*> it(obj->GetUses()); !it.Done(); it.Advance()) {
HInstruction* user = it.Current()->GetUser();
if (instanceOfTrueBlock->Dominates(user->GetBlock())) {
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index 7b23d020c2..72ddabe559 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -209,6 +209,8 @@ void RegisterAllocator::ProcessInstruction(HInstruction* instruction) {
Location temp = locations->GetTemp(i);
if (temp.IsRegister() || temp.IsFpuRegister()) {
BlockRegister(temp, position, position + 1);
+ // Ensure that an explicit temporary register is marked as being allocated.
+ codegen_->AddAllocatedRegister(temp);
} else {
DCHECK(temp.IsUnallocated());
switch (temp.GetPolicy()) {
@@ -507,6 +509,11 @@ bool RegisterAllocator::ValidateIntervals(const GrowableArray<LiveInterval*>& in
}
if (current->HasRegister()) {
+ if (kIsDebugBuild && log_fatal_on_failure && !current->IsFixed()) {
+ // Only check when an error is fatal. Only tests code ask for non-fatal failures
+ // and test code may not properly fill the right information to the code generator.
+ CHECK(codegen.HasAllocatedRegister(processing_core_registers, current->GetRegister()));
+ }
BitVector* liveness_of_register = liveness_of_values.Get(current->GetRegister());
for (size_t j = it.CurrentRange()->GetStart(); j < it.CurrentRange()->GetEnd(); ++j) {
if (liveness_of_register->IsBitSet(j)) {
diff --git a/compiler/utils/arm/assembler_thumb2.cc b/compiler/utils/arm/assembler_thumb2.cc
index 2dde0149a6..88b2f2cc4d 100644
--- a/compiler/utils/arm/assembler_thumb2.cc
+++ b/compiler/utils/arm/assembler_thumb2.cc
@@ -975,6 +975,7 @@ void Thumb2Assembler::vcmpdz(DRegister dd, Condition cond) {
}
void Thumb2Assembler::b(Label* label, Condition cond) {
+ DCHECK_EQ(next_condition_, AL);
EmitBranch(cond, label, false, false);
}