diff options
| -rw-r--r-- | compiler/optimizing/code_generator_arm_vixl.cc | 12 | ||||
| -rw-r--r-- | compiler/optimizing/codegen_test.cc | 25 | ||||
| -rw-r--r-- | compiler/verifier_deps_test.cc | 10 | ||||
| -rw-r--r-- | runtime/art_method.h | 1 | ||||
| -rw-r--r-- | runtime/verifier/verifier_deps.cc | 20 |
5 files changed, 51 insertions, 17 deletions
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc index f108595a00..00ad3e34b7 100644 --- a/compiler/optimizing/code_generator_arm_vixl.cc +++ b/compiler/optimizing/code_generator_arm_vixl.cc @@ -5681,13 +5681,13 @@ void ParallelMoveResolverARMVIXL::Exchange(vixl32::Register reg, int mem) { void ParallelMoveResolverARMVIXL::Exchange(int mem1, int mem2) { // TODO(VIXL32): Double check the performance of this implementation. UseScratchRegisterScope temps(GetAssembler()->GetVIXLAssembler()); - vixl32::Register temp = temps.Acquire(); - vixl32::SRegister temp_s = temps.AcquireS(); + vixl32::SRegister temp_1 = temps.AcquireS(); + vixl32::SRegister temp_2 = temps.AcquireS(); - __ Ldr(temp, MemOperand(sp, mem1)); - __ Vldr(temp_s, MemOperand(sp, mem2)); - __ Str(temp, MemOperand(sp, mem2)); - __ Vstr(temp_s, MemOperand(sp, mem1)); + __ Vldr(temp_1, MemOperand(sp, mem1)); + __ Vldr(temp_2, MemOperand(sp, mem2)); + __ Vstr(temp_1, MemOperand(sp, mem2)); + __ Vstr(temp_2, MemOperand(sp, mem1)); } void ParallelMoveResolverARMVIXL::EmitSwap(size_t index) { diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc index ac83bd9b0c..879b4ce59e 100644 --- a/compiler/optimizing/codegen_test.cc +++ b/compiler/optimizing/codegen_test.cc @@ -1041,6 +1041,31 @@ TEST_F(CodegenTest, ComparisonsLong) { } } +#ifdef ART_ENABLE_CODEGEN_arm +TEST_F(CodegenTest, ARMVIXLParallelMoveResolver) { + std::unique_ptr<const ArmInstructionSetFeatures> features( + ArmInstructionSetFeatures::FromCppDefines()); + ArenaPool pool; + ArenaAllocator allocator(&pool); + HGraph* graph = CreateGraph(&allocator); + arm::CodeGeneratorARMVIXL codegen(graph, *features.get(), CompilerOptions()); + + codegen.Initialize(); + + // This will result in calling EmitSwap -> void ParallelMoveResolverARMVIXL::Exchange(int mem1, + // int mem2) which was faulty (before the fix). So previously GPR and FP scratch registers were + // used as temps; however GPR scratch register is required for big stack offsets which don't fit + // LDR encoding. So the following code is a regression test for that situation. + HParallelMove* move = new (graph->GetArena()) HParallelMove(graph->GetArena()); + move->AddMove(Location::StackSlot(0), Location::StackSlot(8192), Primitive::kPrimInt, nullptr); + move->AddMove(Location::StackSlot(8192), Location::StackSlot(0), Primitive::kPrimInt, nullptr); + codegen.GetMoveResolver()->EmitNativeCode(move); + + InternalCodeAllocator code_allocator; + codegen.Finalize(&code_allocator); +} +#endif + #ifdef ART_ENABLE_CODEGEN_mips TEST_F(CodegenTest, MipsClobberRA) { std::unique_ptr<const MipsInstructionSetFeatures> features_mips( diff --git a/compiler/verifier_deps_test.cc b/compiler/verifier_deps_test.cc index 85ae61f1bd..e716cdbed8 100644 --- a/compiler/verifier_deps_test.cc +++ b/compiler/verifier_deps_test.cc @@ -1101,6 +1101,16 @@ TEST_F(VerifierDepsTest, InvokeSuper_ThisNotAssignable) { "virtual", "Ljava/lang/Integer;", "intValue", "()I", true, "public", "Ljava/lang/Integer;")); } +TEST_F(VerifierDepsTest, ArgumentType_ResolvedReferenceArray) { + ASSERT_TRUE(VerifyMethod("ArgumentType_ResolvedReferenceArray")); + ASSERT_TRUE(HasClass("[Ljava/lang/Thread;", true, "public final abstract")); +} + +TEST_F(VerifierDepsTest, NewArray_Resolved) { + ASSERT_TRUE(VerifyMethod("NewArray_Resolved")); + ASSERT_TRUE(HasClass("[Ljava/lang/IllegalStateException;", true, "public final abstract")); +} + TEST_F(VerifierDepsTest, EncodeDecode) { VerifyDexFile(); diff --git a/runtime/art_method.h b/runtime/art_method.h index 963a541741..b38508b757 100644 --- a/runtime/art_method.h +++ b/runtime/art_method.h @@ -257,7 +257,6 @@ class ArtMethod FINAL { } void SetSkipAccessChecks() { - DCHECK(!SkipAccessChecks()); AddAccessFlags(kAccSkipAccessChecks); } diff --git a/runtime/verifier/verifier_deps.cc b/runtime/verifier/verifier_deps.cc index 5f94a1bd9b..c4058d63ee 100644 --- a/runtime/verifier/verifier_deps.cc +++ b/runtime/verifier/verifier_deps.cc @@ -245,18 +245,18 @@ std::string VerifierDeps::GetStringFromId(const DexFile& dex_file, dex::StringIn bool VerifierDeps::IsInClassPath(ObjPtr<mirror::Class> klass) const { DCHECK(klass != nullptr); - ObjPtr<mirror::DexCache> dex_cache = klass->GetDexCache(); - if (dex_cache == nullptr) { - // This is a synthesized class, in this case always an array. They are not - // defined in the compiled DEX files and therefore are part of the classpath. - // We do not record dependencies on arrays with component types in - // the compiled DEX files, as the only thing that might change is their - // access flags. If we were to change these flags in a breaking way, we would - // need to enforce full verification again anyways by updating the vdex version. - DCHECK(klass->IsArrayClass()) << klass->PrettyDescriptor(); - return false; + // For array types, we return whether the non-array component type + // is in the classpath. + while (klass->IsArrayClass()) { + klass = klass->GetComponentType(); } + if (klass->IsPrimitive()) { + return true; + } + + ObjPtr<mirror::DexCache> dex_cache = klass->GetDexCache(); + DCHECK(dex_cache != nullptr); const DexFile* dex_file = dex_cache->GetDexFile(); DCHECK(dex_file != nullptr); |