Merge "Follow up 137982"
diff --git a/cmdline/cmdline_parser_test.cc b/cmdline/cmdline_parser_test.cc
index 18d8c7a..130eed2 100644
--- a/cmdline/cmdline_parser_test.cc
+++ b/cmdline/cmdline_parser_test.cc
@@ -418,8 +418,8 @@
* Test successes
*/
{
- EXPECT_SINGLE_PARSE_VALUE(true, "-Xjit", M::UseJIT);
- EXPECT_SINGLE_PARSE_VALUE(false, "-Xnojit", M::UseJIT);
+ EXPECT_SINGLE_PARSE_VALUE(true, "-Xusejit:true", M::UseJIT);
+ EXPECT_SINGLE_PARSE_VALUE(false, "-Xusejit:false", M::UseJIT);
}
{
EXPECT_SINGLE_PARSE_VALUE(MemoryKiB(16 * KB), "-Xjitcodecachesize:16K", M::JITCodeCacheCapacity);
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index 0be9fd4..0bac511 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -1067,7 +1067,10 @@
return lhs.LiteralOffset() < rhs.LiteralOffset();
});
- std::unique_ptr<std::vector<uint8_t>> cfi_info(ReturnFrameDescriptionEntry());
+ std::unique_ptr<std::vector<uint8_t>> cfi_info(
+ cu_->compiler_driver->GetCompilerOptions().GetGenerateGDBInformation() ?
+ ReturnFrameDescriptionEntry() :
+ nullptr);
ArrayRef<const uint8_t> cfi_ref;
if (cfi_info.get() != nullptr) {
cfi_ref = ArrayRef<const uint8_t>(*cfi_info);
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index 8f97d1e..dbe4848 100755
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -2202,6 +2202,7 @@
StoreFinalValue(rl_dest, rl_result);
} else {
// Do the addition directly to memory.
+ ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
OpMemReg(kOpAdd, rl_result, temp.GetReg());
}
}
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 029fd46..df2b520 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -358,6 +358,7 @@
image_(image),
image_classes_(image_classes),
classes_to_compile_(compiled_classes),
+ had_hard_verifier_failure_(false),
thread_count_(thread_count),
stats_(new AOTCompilationStats),
dedupe_enabled_(true),
@@ -616,6 +617,11 @@
Verify(class_loader, dex_files, thread_pool, timings);
VLOG(compiler) << "Verify: " << GetMemoryUsageString(false);
+ if (had_hard_verifier_failure_ && GetCompilerOptions().AbortOnHardVerifierFailure()) {
+ LOG(FATAL) << "Had a hard failure verifying all classes, and was asked to abort in such "
+ << "situations. Please check the log.";
+ }
+
InitializeClasses(class_loader, dex_files, thread_pool, timings);
VLOG(compiler) << "InitializeClasses: " << GetMemoryUsageString(false);
@@ -871,6 +877,11 @@
const char* name = klass->GetDescriptor(&temp);
if (data->image_class_descriptors_->find(name) != data->image_class_descriptors_->end()) {
data->image_classes_.push_back(klass);
+ } else {
+ // Check whether it is initialized and has a clinit. They must be kept, too.
+ if (klass->IsInitialized() && klass->FindClassInitializer() != nullptr) {
+ data->image_classes_.push_back(klass);
+ }
}
return true;
@@ -1834,6 +1845,7 @@
verifier::MethodVerifier::kHardFailure) {
LOG(ERROR) << "Verification failed on class " << PrettyDescriptor(descriptor)
<< " because: " << error_msg;
+ manager->GetCompiler()->SetHadHardVerifierFailure();
}
} else if (!SkipClass(jclass_loader, dex_file, klass.Get())) {
CHECK(klass->IsResolved()) << PrettyClass(klass.Get());
@@ -1843,6 +1855,7 @@
// ClassLinker::VerifyClass throws, which isn't useful in the compiler.
CHECK(soa.Self()->IsExceptionPending());
soa.Self()->ClearException();
+ manager->GetCompiler()->SetHadHardVerifierFailure();
}
CHECK(klass->IsCompileTimeVerified() || klass->IsErroneous())
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 24b6f17..f949667 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -437,6 +437,10 @@
// Get memory usage during compilation.
std::string GetMemoryUsageString(bool extended) const;
+ void SetHadHardVerifierFailure() {
+ had_hard_verifier_failure_ = true;
+ }
+
private:
// These flags are internal to CompilerDriver for collecting INVOKE resolution statistics.
// The only external contract is that unresolved method has flags 0 and resolved non-0.
@@ -575,6 +579,8 @@
// included in the image.
std::unique_ptr<std::set<std::string>> classes_to_compile_;
+ bool had_hard_verifier_failure_;
+
size_t thread_count_;
class AOTCompilationStats;
diff --git a/compiler/driver/compiler_options.cc b/compiler/driver/compiler_options.cc
index 09ec9a2..e436f52 100644
--- a/compiler/driver/compiler_options.cc
+++ b/compiler/driver/compiler_options.cc
@@ -30,6 +30,7 @@
generate_gdb_information_(false),
include_patch_information_(kDefaultIncludePatchInformation),
top_k_profile_threshold_(kDefaultTopKProfileThreshold),
+ debuggable_(false),
include_debug_symbols_(kDefaultIncludeDebugSymbols),
implicit_null_checks_(true),
implicit_so_checks_(true),
@@ -37,6 +38,7 @@
compile_pic_(false),
verbose_methods_(nullptr),
pass_manager_options_(new PassManagerOptions),
+ abort_on_hard_verifier_failure_(false),
init_failure_output_(nullptr) {
}
@@ -49,6 +51,7 @@
bool generate_gdb_information,
bool include_patch_information,
double top_k_profile_threshold,
+ bool debuggable,
bool include_debug_symbols,
bool implicit_null_checks,
bool implicit_so_checks,
@@ -56,7 +59,8 @@
bool compile_pic,
const std::vector<std::string>* verbose_methods,
PassManagerOptions* pass_manager_options,
- std::ostream* init_failure_output
+ std::ostream* init_failure_output,
+ bool abort_on_hard_verifier_failure
) : // NOLINT(whitespace/parens)
compiler_filter_(compiler_filter),
huge_method_threshold_(huge_method_threshold),
@@ -67,6 +71,7 @@
generate_gdb_information_(generate_gdb_information),
include_patch_information_(include_patch_information),
top_k_profile_threshold_(top_k_profile_threshold),
+ debuggable_(debuggable),
include_debug_symbols_(include_debug_symbols),
implicit_null_checks_(implicit_null_checks),
implicit_so_checks_(implicit_so_checks),
@@ -74,6 +79,7 @@
compile_pic_(compile_pic),
verbose_methods_(verbose_methods),
pass_manager_options_(pass_manager_options),
+ abort_on_hard_verifier_failure_(abort_on_hard_verifier_failure),
init_failure_output_(init_failure_output) {
}
diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h
index 0683d18..5042c75 100644
--- a/compiler/driver/compiler_options.h
+++ b/compiler/driver/compiler_options.h
@@ -62,6 +62,7 @@
bool generate_gdb_information,
bool include_patch_information,
double top_k_profile_threshold,
+ bool debuggable,
bool include_debug_symbols,
bool implicit_null_checks,
bool implicit_so_checks,
@@ -69,7 +70,8 @@
bool compile_pic,
const std::vector<std::string>* verbose_methods,
PassManagerOptions* pass_manager_options,
- std::ostream* init_failure_output);
+ std::ostream* init_failure_output,
+ bool abort_on_hard_verifier_failure);
CompilerFilter GetCompilerFilter() const {
return compiler_filter_;
@@ -128,6 +130,10 @@
return top_k_profile_threshold_;
}
+ bool GetDebuggable() const {
+ return debuggable_;
+ }
+
bool GetIncludeDebugSymbols() const {
return include_debug_symbols_;
}
@@ -178,6 +184,10 @@
return pass_manager_options_.get();
}
+ bool AbortOnHardVerifierFailure() const {
+ return abort_on_hard_verifier_failure_;
+ }
+
private:
CompilerFilter compiler_filter_;
const size_t huge_method_threshold_;
@@ -189,6 +199,7 @@
const bool include_patch_information_;
// When using a profile file only the top K% of the profiled samples will be compiled.
const double top_k_profile_threshold_;
+ const bool debuggable_;
const bool include_debug_symbols_;
const bool implicit_null_checks_;
const bool implicit_so_checks_;
@@ -200,6 +211,10 @@
std::unique_ptr<PassManagerOptions> pass_manager_options_;
+ // Abort compilation with an error if we find a class that fails verification with a hard
+ // failure.
+ const bool abort_on_hard_verifier_failure_;
+
// Log initialization of initialization failures to this stream if not null.
std::ostream* const init_failure_output_;
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index 0283791..04efa21 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -72,6 +72,7 @@
false,
false,
CompilerOptions::kDefaultTopKProfileThreshold,
+ false, // TODO: Think about debuggability of JIT-compiled code.
false,
false,
false,
@@ -79,7 +80,8 @@
false, // pic
nullptr,
pass_manager_options,
- nullptr));
+ nullptr,
+ false));
const InstructionSet instruction_set = kRuntimeISA;
instruction_set_features_.reset(InstructionSetFeatures::FromCppDefines());
cumulative_logger_.reset(new CumulativeLogger("jit times"));
diff --git a/compiler/optimizing/bounds_check_elimination.cc b/compiler/optimizing/bounds_check_elimination.cc
index 4ca3648..1d16794 100644
--- a/compiler/optimizing/bounds_check_elimination.cc
+++ b/compiler/optimizing/bounds_check_elimination.cc
@@ -944,6 +944,10 @@
};
void BoundsCheckElimination::Run() {
+ if (!graph_->HasArrayAccesses()) {
+ return;
+ }
+
BCEVisitor visitor(graph_);
// Reverse post order guarantees a node's dominators are visited first.
// We want to visit in the dominator-based order since if a value is known to
diff --git a/compiler/optimizing/bounds_check_elimination_test.cc b/compiler/optimizing/bounds_check_elimination_test.cc
index a298413..24fa583 100644
--- a/compiler/optimizing/bounds_check_elimination_test.cc
+++ b/compiler/optimizing/bounds_check_elimination_test.cc
@@ -43,6 +43,7 @@
ArenaAllocator allocator(&pool);
HGraph* graph = new (&allocator) HGraph(&allocator);
+ graph->SetHasArrayAccesses(true);
HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
graph->AddBlock(entry);
@@ -148,6 +149,7 @@
ArenaAllocator allocator(&pool);
HGraph* graph = new (&allocator) HGraph(&allocator);
+ graph->SetHasArrayAccesses(true);
HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
graph->AddBlock(entry);
@@ -221,6 +223,7 @@
ArenaAllocator allocator(&pool);
HGraph* graph = new (&allocator) HGraph(&allocator);
+ graph->SetHasArrayAccesses(true);
HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
graph->AddBlock(entry);
@@ -294,6 +297,7 @@
ArenaAllocator allocator(&pool);
HGraph* graph = new (&allocator) HGraph(&allocator);
+ graph->SetHasArrayAccesses(true);
HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
graph->AddBlock(entry);
@@ -369,6 +373,7 @@
int increment,
IfCondition cond = kCondGE) {
HGraph* graph = new (allocator) HGraph(allocator);
+ graph->SetHasArrayAccesses(true);
HBasicBlock* entry = new (allocator) HBasicBlock(graph);
graph->AddBlock(entry);
@@ -507,6 +512,7 @@
int increment = -1,
IfCondition cond = kCondLE) {
HGraph* graph = new (allocator) HGraph(allocator);
+ graph->SetHasArrayAccesses(true);
HBasicBlock* entry = new (allocator) HBasicBlock(graph);
graph->AddBlock(entry);
@@ -640,6 +646,7 @@
int increment,
IfCondition cond) {
HGraph* graph = new (allocator) HGraph(allocator);
+ graph->SetHasArrayAccesses(true);
HBasicBlock* entry = new (allocator) HBasicBlock(graph);
graph->AddBlock(entry);
@@ -752,6 +759,7 @@
int initial,
IfCondition cond = kCondGE) {
HGraph* graph = new (allocator) HGraph(allocator);
+ graph->SetHasArrayAccesses(true);
HBasicBlock* entry = new (allocator) HBasicBlock(graph);
graph->AddBlock(entry);
@@ -879,6 +887,7 @@
ArenaAllocator allocator(&pool);
HGraph* graph = new (&allocator) HGraph(&allocator);
+ graph->SetHasArrayAccesses(true);
HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
graph->AddBlock(entry);
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index 20a1b03..2cac93d 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -840,6 +840,7 @@
current_block_->AddInstruction(new (arena_) HArrayGet(object, index, anticipated_type));
UpdateLocal(source_or_dest_reg, current_block_->GetLastInstruction());
}
+ graph_->SetHasArrayAccesses(true);
}
void HGraphBuilder::BuildFilledNewArray(uint32_t dex_pc,
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index ba5f7d8..ed3f949 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -216,6 +216,29 @@
}
}
+void CodeGenerator::BlockIfInRegister(Location location, bool is_out) const {
+ // The DCHECKS below check that a register is not specified twice in
+ // the summary. The out location can overlap with an input, so we need
+ // to special case it.
+ if (location.IsRegister()) {
+ DCHECK(is_out || !blocked_core_registers_[location.reg()]);
+ blocked_core_registers_[location.reg()] = true;
+ } else if (location.IsFpuRegister()) {
+ DCHECK(is_out || !blocked_fpu_registers_[location.reg()]);
+ blocked_fpu_registers_[location.reg()] = true;
+ } else if (location.IsFpuRegisterPair()) {
+ DCHECK(is_out || !blocked_fpu_registers_[location.AsFpuRegisterPairLow<int>()]);
+ blocked_fpu_registers_[location.AsFpuRegisterPairLow<int>()] = true;
+ DCHECK(is_out || !blocked_fpu_registers_[location.AsFpuRegisterPairHigh<int>()]);
+ blocked_fpu_registers_[location.AsFpuRegisterPairHigh<int>()] = true;
+ } else if (location.IsRegisterPair()) {
+ DCHECK(is_out || !blocked_core_registers_[location.AsRegisterPairLow<int>()]);
+ blocked_core_registers_[location.AsRegisterPairLow<int>()] = true;
+ DCHECK(is_out || !blocked_core_registers_[location.AsRegisterPairHigh<int>()]);
+ blocked_core_registers_[location.AsRegisterPairHigh<int>()] = true;
+ }
+}
+
void CodeGenerator::AllocateRegistersLocally(HInstruction* instruction) const {
LocationSummary* locations = instruction->GetLocations();
if (locations == nullptr) return;
@@ -234,46 +257,19 @@
// Mark all fixed input, temp and output registers as used.
for (size_t i = 0, e = locations->GetInputCount(); i < e; ++i) {
- Location loc = locations->InAt(i);
- // The DCHECKS below check that a register is not specified twice in
- // the summary.
- if (loc.IsRegister()) {
- DCHECK(!blocked_core_registers_[loc.reg()]);
- blocked_core_registers_[loc.reg()] = true;
- } else if (loc.IsFpuRegister()) {
- DCHECK(!blocked_fpu_registers_[loc.reg()]);
- blocked_fpu_registers_[loc.reg()] = true;
- } else if (loc.IsFpuRegisterPair()) {
- DCHECK(!blocked_fpu_registers_[loc.AsFpuRegisterPairLow<int>()]);
- blocked_fpu_registers_[loc.AsFpuRegisterPairLow<int>()] = true;
- DCHECK(!blocked_fpu_registers_[loc.AsFpuRegisterPairHigh<int>()]);
- blocked_fpu_registers_[loc.AsFpuRegisterPairHigh<int>()] = true;
- } else if (loc.IsRegisterPair()) {
- DCHECK(!blocked_core_registers_[loc.AsRegisterPairLow<int>()]);
- blocked_core_registers_[loc.AsRegisterPairLow<int>()] = true;
- DCHECK(!blocked_core_registers_[loc.AsRegisterPairHigh<int>()]);
- blocked_core_registers_[loc.AsRegisterPairHigh<int>()] = true;
- }
+ BlockIfInRegister(locations->InAt(i));
}
for (size_t i = 0, e = locations->GetTempCount(); i < e; ++i) {
Location loc = locations->GetTemp(i);
- // The DCHECKS below check that a register is not specified twice in
- // the summary.
- if (loc.IsRegister()) {
- DCHECK(!blocked_core_registers_[loc.reg()]);
- blocked_core_registers_[loc.reg()] = true;
- } else if (loc.IsFpuRegister()) {
- DCHECK(!blocked_fpu_registers_[loc.reg()]);
- blocked_fpu_registers_[loc.reg()] = true;
- } else {
- DCHECK(loc.GetPolicy() == Location::kRequiresRegister
- || loc.GetPolicy() == Location::kRequiresFpuRegister);
- }
+ BlockIfInRegister(loc);
+ }
+ Location result_location = locations->Out();
+ if (locations->OutputCanOverlapWithInputs()) {
+ BlockIfInRegister(result_location, /* is_out */ true);
}
- static constexpr bool kBaseline = true;
- SetupBlockedRegisters(kBaseline);
+ SetupBlockedRegisters(/* is_baseline */ true);
// Allocate all unallocated input locations.
for (size_t i = 0, e = locations->GetInputCount(); i < e; ++i) {
@@ -318,7 +314,6 @@
locations->SetTempAt(i, loc);
}
}
- Location result_location = locations->Out();
if (result_location.IsUnallocated()) {
switch (result_location.GetPolicy()) {
case Location::kAny:
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index f46a36d..5146afa 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -378,6 +378,7 @@
void InitLocationsBaseline(HInstruction* instruction);
size_t GetStackOffsetOfSavedRegister(size_t index);
void CompileInternal(CodeAllocator* allocator, bool is_baseline);
+ void BlockIfInRegister(Location location, bool is_out = false) const;
HGraph* const graph_;
const CompilerOptions& compiler_options_;
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 3c8f62c..4b8addd 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -37,14 +37,13 @@
static constexpr Register kRuntimeParameterCoreRegisters[] = { EAX, ECX, EDX, EBX };
static constexpr size_t kRuntimeParameterCoreRegistersLength =
arraysize(kRuntimeParameterCoreRegisters);
+static constexpr Register kCoreCalleeSaves[] = { EBP, ESI, EDI };
static constexpr XmmRegister kRuntimeParameterFpuRegisters[] = { XMM0, XMM1, XMM2, XMM3 };
static constexpr size_t kRuntimeParameterFpuRegistersLength =
arraysize(kRuntimeParameterFpuRegisters);
static constexpr int kC2ConditionMask = 0x400;
-// Marker for places that can be updated once we don't follow the quick ABI.
-static constexpr bool kFollowsQuickABI = true;
static constexpr int kFakeReturnRegister = Register(8);
class InvokeRuntimeCallingConvention : public CallingConvention<Register, XmmRegister> {
@@ -371,8 +370,15 @@
}
CodeGeneratorX86::CodeGeneratorX86(HGraph* graph, const CompilerOptions& compiler_options)
- : CodeGenerator(graph, kNumberOfCpuRegisters, kNumberOfXmmRegisters,
- kNumberOfRegisterPairs, (1 << kFakeReturnRegister), 0, compiler_options),
+ : CodeGenerator(graph,
+ kNumberOfCpuRegisters,
+ kNumberOfXmmRegisters,
+ kNumberOfRegisterPairs,
+ ComputeRegisterMask(reinterpret_cast<const int*>(kCoreCalleeSaves),
+ arraysize(kCoreCalleeSaves))
+ | (1 << kFakeReturnRegister),
+ 0,
+ compiler_options),
block_labels_(graph->GetArena(), 0),
location_builder_(graph, this),
instruction_visitor_(graph, this),
@@ -427,18 +433,18 @@
return Location();
}
-void CodeGeneratorX86::SetupBlockedRegisters(bool is_baseline ATTRIBUTE_UNUSED) const {
+void CodeGeneratorX86::SetupBlockedRegisters(bool is_baseline) const {
// Don't allocate the dalvik style register pair passing.
blocked_register_pairs_[ECX_EDX] = true;
// Stack register is always reserved.
blocked_core_registers_[ESP] = true;
- // TODO: We currently don't use Quick's callee saved registers.
- DCHECK(kFollowsQuickABI);
- blocked_core_registers_[EBP] = true;
- blocked_core_registers_[ESI] = true;
- blocked_core_registers_[EDI] = true;
+ if (is_baseline) {
+ blocked_core_registers_[EBP] = true;
+ blocked_core_registers_[ESI] = true;
+ blocked_core_registers_[EDI] = true;
+ }
UpdateBlockedPairRegisters();
}
@@ -470,15 +476,33 @@
RecordPcInfo(nullptr, 0);
}
- if (!HasEmptyFrame()) {
- __ subl(ESP, Immediate(GetFrameSize() - FrameEntrySpillSize()));
- __ movl(Address(ESP, kCurrentMethodStackOffset), EAX);
+ if (HasEmptyFrame()) {
+ return;
}
+
+ for (int i = arraysize(kCoreCalleeSaves) - 1; i >= 0; --i) {
+ Register reg = kCoreCalleeSaves[i];
+ if (allocated_registers_.ContainsCoreRegister(reg)) {
+ __ pushl(reg);
+ }
+ }
+
+ __ subl(ESP, Immediate(GetFrameSize() - FrameEntrySpillSize()));
+ __ movl(Address(ESP, kCurrentMethodStackOffset), EAX);
}
void CodeGeneratorX86::GenerateFrameExit() {
- if (!HasEmptyFrame()) {
- __ addl(ESP, Immediate(GetFrameSize() - FrameEntrySpillSize()));
+ if (HasEmptyFrame()) {
+ return;
+ }
+
+ __ addl(ESP, Immediate(GetFrameSize() - FrameEntrySpillSize()));
+
+ for (size_t i = 0; i < arraysize(kCoreCalleeSaves); ++i) {
+ Register reg = kCoreCalleeSaves[i];
+ if (allocated_registers_.ContainsCoreRegister(reg)) {
+ __ popl(reg);
+ }
}
}
@@ -907,7 +931,8 @@
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::Any());
if (comp->NeedsMaterialization()) {
- locations->SetOut(Location::RequiresRegister());
+ // We need a byte register.
+ locations->SetOut(Location::RegisterLocation(ECX));
}
}
@@ -1345,8 +1370,10 @@
case Primitive::kPrimInt:
case Primitive::kPrimChar:
// Processing a Dex `int-to-byte' instruction.
- locations->SetInAt(0, Location::Any());
- locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ locations->SetInAt(0, Location::ByteRegisterOrConstant(ECX, conversion->InputAt(0)));
+ // Make the output overlap to please the register allocator. This greatly simplifies
+ // the validation of the linear scan implementation
+ locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
break;
default:
@@ -3161,15 +3188,16 @@
}
void LocationsBuilderX86::VisitArraySet(HArraySet* instruction) {
+ // This location builder might end up asking to up to four registers, which is
+ // not currently possible for baseline. The situation in which we need four
+ // registers cannot be met by baseline though, because it has not run any
+ // optimization.
+
Primitive::Type value_type = instruction->GetComponentType();
bool needs_write_barrier =
CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
- DCHECK(kFollowsQuickABI);
- bool not_enough_registers = needs_write_barrier
- && !instruction->GetValue()->IsConstant()
- && !instruction->GetIndex()->IsConstant();
- bool needs_runtime_call = instruction->NeedsTypeCheck() || not_enough_registers;
+ bool needs_runtime_call = instruction->NeedsTypeCheck();
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
instruction,
@@ -3389,7 +3417,7 @@
void LocationsBuilderX86::VisitBoundsCheck(HBoundsCheck* instruction) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
- locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
locations->SetInAt(1, Location::RequiresRegister());
if (instruction->HasUses()) {
locations->SetOut(Location::SameAsFirstInput());
@@ -3398,15 +3426,20 @@
void InstructionCodeGeneratorX86::VisitBoundsCheck(HBoundsCheck* instruction) {
LocationSummary* locations = instruction->GetLocations();
- SlowPathCodeX86* slow_path = new (GetGraph()->GetArena()) BoundsCheckSlowPathX86(
- instruction, locations->InAt(0), locations->InAt(1));
+ Location index_loc = locations->InAt(0);
+ Location length_loc = locations->InAt(1);
+ SlowPathCodeX86* slow_path =
+ new (GetGraph()->GetArena()) BoundsCheckSlowPathX86(instruction, index_loc, length_loc);
codegen_->AddSlowPath(slow_path);
- Register index = locations->InAt(0).AsRegister<Register>();
- Register length = locations->InAt(1).AsRegister<Register>();
-
- __ cmpl(index, length);
- __ j(kAboveEqual, slow_path->GetEntryLabel());
+ Register length = length_loc.AsRegister<Register>();
+ if (index_loc.IsConstant()) {
+ int32_t value = CodeGenerator::GetInt32ValueOf(index_loc.GetConstant());
+ __ cmpl(length, Immediate(value));
+ } else {
+ __ cmpl(length, index_loc.AsRegister<Register>());
+ }
+ __ j(kBelowEqual, slow_path->GetEntryLabel());
}
void LocationsBuilderX86::VisitTemporary(HTemporary* temp) {
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 6365bca..e7e6fff 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -3164,7 +3164,7 @@
void LocationsBuilderX86_64::VisitBoundsCheck(HBoundsCheck* instruction) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
- locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
locations->SetInAt(1, Location::RequiresRegister());
if (instruction->HasUses()) {
locations->SetOut(Location::SameAsFirstInput());
@@ -3173,15 +3173,20 @@
void InstructionCodeGeneratorX86_64::VisitBoundsCheck(HBoundsCheck* instruction) {
LocationSummary* locations = instruction->GetLocations();
- SlowPathCodeX86_64* slow_path = new (GetGraph()->GetArena()) BoundsCheckSlowPathX86_64(
- instruction, locations->InAt(0), locations->InAt(1));
+ Location index_loc = locations->InAt(0);
+ Location length_loc = locations->InAt(1);
+ SlowPathCodeX86_64* slow_path =
+ new (GetGraph()->GetArena()) BoundsCheckSlowPathX86_64(instruction, index_loc, length_loc);
codegen_->AddSlowPath(slow_path);
- CpuRegister index = locations->InAt(0).AsRegister<CpuRegister>();
- CpuRegister length = locations->InAt(1).AsRegister<CpuRegister>();
-
- __ cmpl(index, length);
- __ j(kAboveEqual, slow_path->GetEntryLabel());
+ CpuRegister length = length_loc.AsRegister<CpuRegister>();
+ if (index_loc.IsConstant()) {
+ int32_t value = CodeGenerator::GetInt32ValueOf(index_loc.GetConstant());
+ __ cmpl(length, Immediate(value));
+ } else {
+ __ cmpl(length, index_loc.AsRegister<CpuRegister>());
+ }
+ __ j(kBelowEqual, slow_path->GetEntryLabel());
}
void CodeGeneratorX86_64::MarkGCCard(CpuRegister temp,
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index d55a3ca..b34957a 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -216,6 +216,10 @@
callee_graph->InlineInto(graph_, invoke_instruction);
+ if (callee_graph->HasArrayAccesses()) {
+ graph_->SetHasArrayAccesses(true);
+ }
+
// Now that we have inlined the callee, we need to update the next
// instruction id of the caller, so that new instructions added
// after optimizations get a unique id.
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 98076a0..b7dd756 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -113,6 +113,7 @@
number_of_vregs_(0),
number_of_in_vregs_(0),
temporaries_vreg_slots_(0),
+ has_array_accesses_(false),
current_instruction_id_(start_instruction_id) {}
ArenaAllocator* GetArena() const { return arena_; }
@@ -199,6 +200,14 @@
return reverse_post_order_;
}
+ bool HasArrayAccesses() const {
+ return has_array_accesses_;
+ }
+
+ void SetHasArrayAccesses(bool value) {
+ has_array_accesses_ = value;
+ }
+
HNullConstant* GetNullConstant();
private:
@@ -236,6 +245,9 @@
// Number of vreg size slots that the temporaries use (used in baseline compiler).
size_t temporaries_vreg_slots_;
+ // Has array accesses. We can totally skip BCE if it's false.
+ bool has_array_accesses_;
+
// The current id to assign to a newly added instruction. See HInstruction.id_.
int32_t current_instruction_id_;
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index 54e62a5..748ab22 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -1381,6 +1381,7 @@
: Location::StackSlot(interval->GetParent()->GetSpillSlot()));
}
UsePosition* use = current->GetFirstUse();
+ size_t safepoint_index = safepoints_.Size();
// Walk over all siblings, updating locations of use positions, and
// connecting them when they are adjacent.
@@ -1422,19 +1423,27 @@
}
// At each safepoint, we record stack and register information.
- for (size_t i = 0, e = safepoints_.Size(); i < e; ++i) {
- HInstruction* safepoint = safepoints_.Get(i);
+ // We iterate backwards to test safepoints in ascending order of positions,
+ // which is what LiveInterval::Covers is optimized for.
+ for (; safepoint_index > 0; --safepoint_index) {
+ HInstruction* safepoint = safepoints_.Get(safepoint_index - 1);
size_t position = safepoint->GetLifetimePosition();
- LocationSummary* locations = safepoint->GetLocations();
- if (!current->Covers(position)) {
+
+ // Test that safepoints are ordered in the optimal way.
+ DCHECK(safepoint_index == safepoints_.Size()
+ || safepoints_.Get(safepoint_index)->GetLifetimePosition() <= position);
+
+ if (current->IsDeadAt(position)) {
+ break;
+ } else if (!current->Covers(position)) {
continue;
- }
- if (interval->GetStart() == position) {
+ } else if (interval->GetStart() == position) {
// The safepoint is for this instruction, so the location of the instruction
// does not need to be saved.
continue;
}
+ LocationSummary* locations = safepoint->GetLocations();
if ((current->GetType() == Primitive::kPrimNot) && current->GetParent()->HasSpillSlot()) {
locations->SetStackBit(current->GetParent()->GetSpillSlot() / kVRegSize);
}
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index df7bb57..8572f4d 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -487,10 +487,12 @@
// Profile file to use
double top_k_profile_threshold = CompilerOptions::kDefaultTopKProfileThreshold;
+ bool debuggable = false;
bool include_patch_information = CompilerOptions::kDefaultIncludePatchInformation;
bool include_debug_symbols = kIsDebugBuild;
bool watch_dog_enabled = true;
bool generate_gdb_information = kIsDebugBuild;
+ bool abort_on_hard_verifier_error = false;
PassManagerOptions pass_manager_options;
@@ -675,6 +677,8 @@
} else if (option == "--no-include-debug-symbols" || option == "--strip-symbols") {
include_debug_symbols = false;
generate_gdb_information = false; // Depends on debug symbols, see above.
+ } else if (option == "--debuggable") {
+ debuggable = true;
} else if (option.starts_with("--profile-file=")) {
profile_file_ = option.substr(strlen("--profile-file=")).data();
VLOG(compiler) << "dex2oat: profile file is " << profile_file_;
@@ -729,6 +733,8 @@
if (swap_fd_ < 0) {
Usage("--swap-fd passed a negative value %d", swap_fd_);
}
+ } else if (option == "--abort-on-hard-verifier-error") {
+ abort_on_hard_verifier_error = true;
} else {
Usage("Unknown argument %s", option.data());
}
@@ -915,6 +921,10 @@
break;
}
+ if (debuggable) {
+ // TODO: Consider adding CFI info and symbols here.
+ }
+
compiler_options_.reset(new CompilerOptions(compiler_filter,
huge_method_threshold,
large_method_threshold,
@@ -924,6 +934,7 @@
generate_gdb_information,
include_patch_information,
top_k_profile_threshold,
+ debuggable,
include_debug_symbols,
implicit_null_checks,
implicit_so_checks,
@@ -933,7 +944,8 @@
nullptr :
&verbose_methods_,
new PassManagerOptions(pass_manager_options),
- init_failure_output_.get()));
+ init_failure_output_.get(),
+ abort_on_hard_verifier_error));
// Done with usage checks, enable watchdog if requested
if (watch_dog_enabled) {
diff --git a/runtime/base/allocator.h b/runtime/base/allocator.h
index 8720f0e..2d67c8b 100644
--- a/runtime/base/allocator.h
+++ b/runtime/base/allocator.h
@@ -18,6 +18,7 @@
#define ART_RUNTIME_BASE_ALLOCATOR_H_
#include <map>
+#include <set>
#include "atomic.h"
#include "base/macros.h"
diff --git a/runtime/dex_file.cc b/runtime/dex_file.cc
index 19a4bd0..edd8bfe 100644
--- a/runtime/dex_file.cc
+++ b/runtime/dex_file.cc
@@ -100,7 +100,8 @@
if (IsZipMagic(magic)) {
std::unique_ptr<ZipArchive> zip_archive(ZipArchive::OpenFromFd(fd.release(), filename, error_msg));
if (zip_archive.get() == NULL) {
- *error_msg = StringPrintf("Failed to open zip archive '%s'", file_part);
+ *error_msg = StringPrintf("Failed to open zip archive '%s' (error msg: %s)", file_part,
+ error_msg->c_str());
return false;
}
std::unique_ptr<ZipEntry> zip_entry(zip_archive->Find(zip_entry_name, error_msg));
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 057eed1..dd45eca 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -206,7 +206,7 @@
}
cc->is_marking_ = true;
if (UNLIKELY(Runtime::Current()->IsActiveTransaction())) {
- CHECK(Runtime::Current()->IsCompiler());
+ CHECK(Runtime::Current()->IsAotCompiler());
TimingLogger::ScopedTiming split2("(Paused)VisitTransactionRoots", cc->GetTimings());
Runtime::Current()->VisitTransactionRoots(ConcurrentCopying::ProcessRootCallback, cc);
}
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index 754602f..604e133 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -911,7 +911,7 @@
} else if (name == "java.lang.Class java.lang.Void.lookupType()") {
result->SetL(Runtime::Current()->GetClassLinker()->FindPrimitiveClass('V'));
} else if (name == "java.lang.Object java.lang.Class.newInstance()") {
- StackHandleScope<2> hs(self);
+ StackHandleScope<3> hs(self); // Class, constructor, object.
Class* klass = shadow_frame->GetVRegReference(arg_offset)->AsClass();
Handle<Class> h_klass(hs.NewHandle(klass));
// There are two situations in which we'll abort this run.
@@ -920,13 +920,15 @@
// Note that 2) could likely be handled here, but for safety abort the transaction.
bool ok = false;
if (Runtime::Current()->GetClassLinker()->EnsureInitialized(self, h_klass, true, true)) {
- ArtMethod* c = h_klass->FindDeclaredDirectMethod("<init>", "()V");
- if (c != nullptr) {
- Handle<Object> obj(hs.NewHandle(klass->AllocObject(self)));
- CHECK(obj.Get() != nullptr); // We don't expect OOM at compile-time.
- EnterInterpreterFromInvoke(self, c, obj.Get(), nullptr, nullptr);
- result->SetL(obj.Get());
- ok = true;
+ Handle<ArtMethod> h_cons(hs.NewHandle(h_klass->FindDeclaredDirectMethod("<init>", "()V")));
+ if (h_cons.Get() != nullptr) {
+ Handle<Object> h_obj(hs.NewHandle(klass->AllocObject(self)));
+ CHECK(h_obj.Get() != nullptr); // We don't expect OOM at compile-time.
+ EnterInterpreterFromInvoke(self, h_cons.Get(), h_obj.Get(), nullptr, nullptr);
+ if (!self->IsExceptionPending()) {
+ result->SetL(h_obj.Get());
+ ok = true;
+ }
} else {
self->ThrowNewExceptionF(self->GetCurrentLocationForThrow(), "Ljava/lang/InternalError;",
"Could not find default constructor for '%s'",
@@ -964,8 +966,8 @@
}
}
CHECK(found != NULL)
- << "Failed to find field in Class.getDeclaredField in un-started runtime. name="
- << name2->ToModifiedUtf8() << " class=" << PrettyDescriptor(klass);
+ << "Failed to find field in Class.getDeclaredField in un-started runtime. name="
+ << name2->ToModifiedUtf8() << " class=" << PrettyDescriptor(klass);
// TODO: getDeclaredField calls GetType once the field is found to ensure a
// NoClassDefFoundError is thrown if the field's type cannot be resolved.
Class* jlr_Field = self->DecodeJObject(WellKnownClasses::java_lang_reflect_Field)->AsClass();
@@ -1046,22 +1048,24 @@
std::string caller2(PrettyMethod(shadow_frame->GetLink()->GetLink()->GetMethod()));
if (caller2 == "java.lang.String java.lang.Double.toString(double)") {
// Allocate new object.
- mirror::Class* real_to_string_class =
- shadow_frame->GetLink()->GetMethod()->GetDeclaringClass();
- mirror::Object* real_to_string_obj = real_to_string_class->AllocObject(self);
- if (real_to_string_obj != nullptr) {
+ StackHandleScope<2> hs(self);
+ Handle<Class> h_real_to_string_class(hs.NewHandle(
+ shadow_frame->GetLink()->GetMethod()->GetDeclaringClass()));
+ Handle<Object> h_real_to_string_obj(hs.NewHandle(
+ h_real_to_string_class->AllocObject(self)));
+ if (h_real_to_string_obj.Get() != nullptr) {
mirror::ArtMethod* init_method =
- real_to_string_class->FindDirectMethod("<init>", "()V");
+ h_real_to_string_class->FindDirectMethod("<init>", "()V");
if (init_method == nullptr) {
- real_to_string_class->DumpClass(LOG(FATAL), mirror::Class::kDumpClassFullDetail);
- }
- JValue invoke_result;
- // One arg, this.
- uint32_t args = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(real_to_string_obj));
- init_method->Invoke(self, &args, 4, &invoke_result, init_method->GetShorty());
- if (!self->IsExceptionPending()) {
- result->SetL(real_to_string_obj);
- ok = true;
+ h_real_to_string_class->DumpClass(LOG(FATAL), mirror::Class::kDumpClassFullDetail);
+ } else {
+ JValue invoke_result;
+ EnterInterpreterFromInvoke(self, init_method, h_real_to_string_obj.Get(), nullptr,
+ nullptr);
+ if (!self->IsExceptionPending()) {
+ result->SetL(h_real_to_string_obj.Get());
+ ok = true;
+ }
}
}
diff --git a/runtime/jdwp/jdwp_main.cc b/runtime/jdwp/jdwp_main.cc
index b6fedd9..3d69796 100644
--- a/runtime/jdwp/jdwp_main.cc
+++ b/runtime/jdwp/jdwp_main.cc
@@ -408,7 +408,7 @@
void JdwpState::Run() {
Runtime* runtime = Runtime::Current();
CHECK(runtime->AttachCurrentThread("JDWP", true, runtime->GetSystemThreadGroup(),
- !runtime->IsCompiler()));
+ !runtime->IsAotCompiler()));
VLOG(jdwp) << "JDWP: thread running";
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index 9d87ed7..607569a 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -146,8 +146,9 @@
.Define({"-XX:EnableHSpaceCompactForOOM", "-XX:DisableHSpaceCompactForOOM"})
.WithValues({true, false})
.IntoKey(M::EnableHSpaceCompactForOOM)
- .Define({"-Xjit", "-Xnojit"})
- .WithValues({true, false})
+ .Define("-Xusejit:_")
+ .WithType<bool>()
+ .WithValueMap({{"false", false}, {"true", true}})
.IntoKey(M::UseJIT)
.Define("-Xjitcodecachesize:_")
.WithType<MemoryKiB>()
@@ -642,8 +643,7 @@
UsageMessage(stream, " -Xcompiler-option dex2oat-option\n");
UsageMessage(stream, " -Ximage-compiler-option dex2oat-option\n");
UsageMessage(stream, " -Xpatchoat:filename\n");
- UsageMessage(stream, " -Xjit\n");
- UsageMessage(stream, " -Xnojit\n");
+ UsageMessage(stream, " -Xusejit:booleanvalue\n");
UsageMessage(stream, " -X[no]relocate\n");
UsageMessage(stream, " -X[no]dex2oat (Whether to invoke dex2oat on the application)\n");
UsageMessage(stream, " -X[no]image-dex2oat (Whether to create and use a boot image)\n");
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 383308c..35a9e6f 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -846,7 +846,7 @@
Dbg::ConfigureJdwp(runtime_options.GetOrDefault(Opt::JdwpOptions));
}
- if (!IsCompiler()) {
+ if (!IsAotCompiler()) {
// If we are already the compiler at this point, we must be dex2oat. Don't create the jit in
// this case.
// If runtime_options doesn't have UseJIT set to true then CreateFromRuntimeArguments returns
@@ -1551,14 +1551,14 @@
if (!IsActiveTransaction()) {
return false;
} else {
- DCHECK(IsCompiler());
+ DCHECK(IsAotCompiler());
return preinitialization_transaction_->IsAborted();
}
}
void Runtime::AbortTransactionAndThrowInternalError(Thread* self,
const std::string& abort_message) {
- DCHECK(IsCompiler());
+ DCHECK(IsAotCompiler());
DCHECK(IsActiveTransaction());
// Throwing an exception may cause its class initialization. If we mark the transaction
// aborted before that, we may warn with a false alarm. Throwing the exception before
@@ -1568,35 +1568,35 @@
}
void Runtime::ThrowInternalErrorForAbortedTransaction(Thread* self) {
- DCHECK(IsCompiler());
+ DCHECK(IsAotCompiler());
DCHECK(IsActiveTransaction());
preinitialization_transaction_->ThrowInternalError(self, true);
}
void Runtime::RecordWriteFieldBoolean(mirror::Object* obj, MemberOffset field_offset,
uint8_t value, bool is_volatile) const {
- DCHECK(IsCompiler());
+ DCHECK(IsAotCompiler());
DCHECK(IsActiveTransaction());
preinitialization_transaction_->RecordWriteFieldBoolean(obj, field_offset, value, is_volatile);
}
void Runtime::RecordWriteFieldByte(mirror::Object* obj, MemberOffset field_offset,
int8_t value, bool is_volatile) const {
- DCHECK(IsCompiler());
+ DCHECK(IsAotCompiler());
DCHECK(IsActiveTransaction());
preinitialization_transaction_->RecordWriteFieldByte(obj, field_offset, value, is_volatile);
}
void Runtime::RecordWriteFieldChar(mirror::Object* obj, MemberOffset field_offset,
uint16_t value, bool is_volatile) const {
- DCHECK(IsCompiler());
+ DCHECK(IsAotCompiler());
DCHECK(IsActiveTransaction());
preinitialization_transaction_->RecordWriteFieldChar(obj, field_offset, value, is_volatile);
}
void Runtime::RecordWriteFieldShort(mirror::Object* obj, MemberOffset field_offset,
int16_t value, bool is_volatile) const {
- DCHECK(IsCompiler());
+ DCHECK(IsAotCompiler());
DCHECK(IsActiveTransaction());
preinitialization_transaction_->RecordWriteFieldShort(obj, field_offset, value, is_volatile);
}
@@ -1674,6 +1674,10 @@
std::string feature_string("--instruction-set-features=");
feature_string += features->GetFeatureString();
argv->push_back(feature_string);
+
+ if (Dbg::IsJdwpConfigured()) {
+ argv->push_back("--debuggable");
+ }
}
void Runtime::UpdateProfilerState(int state) {
diff --git a/test/456-baseline-array-set/expected.txt b/test/456-baseline-array-set/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/456-baseline-array-set/expected.txt
diff --git a/test/456-baseline-array-set/info.txt b/test/456-baseline-array-set/info.txt
new file mode 100644
index 0000000..cf4137a
--- /dev/null
+++ b/test/456-baseline-array-set/info.txt
@@ -0,0 +1,3 @@
+Test for optimizing on x86, where we could run out
+of available registers when using the baseline register
+allocator.
diff --git a/test/456-baseline-array-set/src/Main.java b/test/456-baseline-array-set/src/Main.java
new file mode 100644
index 0000000..5475b41
--- /dev/null
+++ b/test/456-baseline-array-set/src/Main.java
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public static void main(String[] args) {
+ doArrayAccess(new Integer(1), 0);
+ }
+
+ public static void doArrayAccess(Integer value, int index) {
+ try {
+ Integer[] array = new Integer[2];
+ // If we were to do optimization on the baseline register
+ // allocator, generating code for the array set would fail on x86.
+ array[index] = array[index + 1];
+ array[index] = value;
+ } catch (ArrayStoreException e) {
+ throw e;
+ }
+ }
+}