summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--adbconnection/adbconnection.cc25
-rw-r--r--adbconnection/adbconnection.h9
-rw-r--r--compiler/jit/jit_compiler.cc2
-rw-r--r--compiler/optimizing/code_generator_arm64.cc8
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.cc29
-rw-r--r--compiler/optimizing/code_generator_mips.cc78
-rw-r--r--compiler/optimizing/code_generator_mips64.cc76
-rw-r--r--compiler/optimizing/code_generator_vector_arm64.cc24
-rw-r--r--compiler/optimizing/code_generator_vector_arm_vixl.cc20
-rw-r--r--compiler/optimizing/code_generator_vector_mips.cc44
-rw-r--r--compiler/optimizing/code_generator_vector_mips64.cc44
-rw-r--r--compiler/optimizing/code_generator_vector_x86.cc24
-rw-r--r--compiler/optimizing/code_generator_vector_x86_64.cc24
-rw-r--r--compiler/optimizing/code_generator_x86.cc12
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc12
-rw-r--r--compiler/optimizing/data_type-inl.h2
-rw-r--r--compiler/optimizing/data_type.cc2
-rw-r--r--compiler/optimizing/data_type.h27
-rw-r--r--compiler/optimizing/graph_visualizer.cc11
-rw-r--r--compiler/optimizing/loop_optimization.cc68
-rw-r--r--compiler/optimizing/nodes_vector.h72
-rw-r--r--compiler/optimizing/nodes_vector_test.cc168
-rw-r--r--compiler/optimizing/optimizing_compiler.cc2
-rw-r--r--compiler/optimizing/register_allocation_resolver.cc2
-rw-r--r--compiler/optimizing/register_allocator_graph_color.cc2
-rw-r--r--compiler/optimizing/register_allocator_linear_scan.cc2
-rw-r--r--openjdkjvmti/OpenjdkJvmTi.cc1
-rw-r--r--openjdkjvmti/fixed_up_dex_file.cc7
-rw-r--r--openjdkjvmti/fixed_up_dex_file.h3
-rw-r--r--openjdkjvmti/ti_class_definition.cc93
-rw-r--r--openjdkjvmti/ti_class_definition.h35
-rw-r--r--openjdkjvmti/transform.cc173
-rw-r--r--openjdkjvmti/transform.h2
-rw-r--r--profman/profile_assistant.cc21
-rw-r--r--profman/profile_assistant.h11
-rw-r--r--profman/profile_assistant_test.cc108
-rw-r--r--profman/profman.cc136
-rw-r--r--runtime/arch/stub_test.cc10
-rw-r--r--runtime/base/mutex.cc6
-rw-r--r--runtime/base/mutex.h7
-rw-r--r--runtime/class_linker.cc18
-rw-r--r--runtime/class_linker.h1
-rw-r--r--runtime/dex/compact_dex_file.h6
-rw-r--r--runtime/dex/dex_file.h7
-rw-r--r--runtime/dex/standard_dex_file.h4
-rw-r--r--runtime/fault_handler.cc26
-rw-r--r--runtime/gc/heap-inl.h11
-rw-r--r--runtime/instrumentation.cc4
-rw-r--r--runtime/interpreter/interpreter_common.cc7
-rw-r--r--runtime/jit/debugger_interface.cc62
-rw-r--r--runtime/jit/debugger_interface.h20
-rw-r--r--runtime/jit/jit_code_cache.cc4
-rw-r--r--runtime/mem_map.cc115
-rw-r--r--runtime/mem_map.h33
-rw-r--r--runtime/mem_map_test.cc200
-rw-r--r--runtime/native/dalvik_system_DexFile.cc2
-rw-r--r--test/603-checker-instanceof/src/Main.java2
-rw-r--r--test/651-checker-int-simd-minmax/src/Main.java16
-rw-r--r--test/710-varhandle-creation/src-art/Main.java24
-rw-r--r--test/714-invoke-custom-lambda-metafactory/build22
-rw-r--r--test/714-invoke-custom-lambda-metafactory/expected.txt4
-rw-r--r--test/714-invoke-custom-lambda-metafactory/info.txt1
-rw-r--r--test/714-invoke-custom-lambda-metafactory/src/Main.java32
-rw-r--r--test/983-source-transform-verify/source_transform.cc8
-rwxr-xr-xtest/etc/run-test-jar2
-rw-r--r--test/testrunner/target_config.py24
-rw-r--r--tools/prebuilt_libjdwp_art_failures.txt2
67 files changed, 1477 insertions, 582 deletions
diff --git a/adbconnection/adbconnection.cc b/adbconnection/adbconnection.cc
index bdcdcbe23f..6b82be6193 100644
--- a/adbconnection/adbconnection.cc
+++ b/adbconnection/adbconnection.cc
@@ -139,7 +139,8 @@ AdbConnectionState::AdbConnectionState(const std::string& agent_name)
sent_agent_fds_(false),
performed_handshake_(false),
notified_ddm_active_(false),
- next_ddm_id_(1) {
+ next_ddm_id_(1),
+ started_debugger_threads_(false) {
// Setup the addr.
control_addr_.controlAddrUn.sun_family = AF_UNIX;
control_addr_len_ = sizeof(control_addr_.controlAddrUn.sun_family) + sizeof(kJdwpControlName) - 1;
@@ -174,6 +175,7 @@ struct CallbackData {
static void* CallbackFunction(void* vdata) {
std::unique_ptr<CallbackData> data(reinterpret_cast<CallbackData*>(vdata));
+ CHECK(data->this_ == gState);
art::Thread* self = art::Thread::Attach(kAdbConnectionThreadName,
true,
data->thr_);
@@ -199,6 +201,10 @@ static void* CallbackFunction(void* vdata) {
int detach_result = art::Runtime::Current()->GetJavaVM()->DetachCurrentThread();
CHECK_EQ(detach_result, 0);
+ // Get rid of the connection
+ gState = nullptr;
+ delete data->this_;
+
return nullptr;
}
@@ -247,11 +253,13 @@ void AdbConnectionState::StartDebuggerThreads() {
ScopedLocalRef<jobject> thr(soa.Env(), CreateAdbConnectionThread(soa.Self()));
pthread_t pthread;
std::unique_ptr<CallbackData> data(new CallbackData { this, soa.Env()->NewGlobalRef(thr.get()) });
+ started_debugger_threads_ = true;
int pthread_create_result = pthread_create(&pthread,
nullptr,
&CallbackFunction,
data.get());
if (pthread_create_result != 0) {
+ started_debugger_threads_ = false;
// If the create succeeded the other thread will call EndThreadBirth.
art::Runtime* runtime = art::Runtime::Current();
soa.Env()->DeleteGlobalRef(data->thr_);
@@ -545,6 +553,7 @@ bool AdbConnectionState::SetupAdbConnection() {
}
void AdbConnectionState::RunPollLoop(art::Thread* self) {
+ CHECK_NE(agent_name_, "");
CHECK_EQ(self->GetState(), art::kNative);
art::Locks::mutator_lock_->AssertNotHeld(self);
self->SetState(art::kWaitingInMainDebuggerLoop);
@@ -869,24 +878,26 @@ void AdbConnectionState::StopDebuggerThreads() {
shutting_down_ = true;
// Wakeup the poll loop.
uint64_t data = 1;
- TEMP_FAILURE_RETRY(write(sleep_event_fd_, &data, sizeof(data)));
+ if (sleep_event_fd_ != -1) {
+ TEMP_FAILURE_RETRY(write(sleep_event_fd_, &data, sizeof(data)));
+ }
}
// The plugin initialization function.
extern "C" bool ArtPlugin_Initialize() REQUIRES_SHARED(art::Locks::mutator_lock_) {
DCHECK(art::Runtime::Current()->GetJdwpProvider() == art::JdwpProvider::kAdbConnection);
// TODO Provide some way for apps to set this maybe?
+ DCHECK(gState == nullptr);
gState = new AdbConnectionState(kDefaultJdwpAgentName);
- CHECK(gState != nullptr);
return ValidateJdwpOptions(art::Runtime::Current()->GetJdwpOptions());
}
extern "C" bool ArtPlugin_Deinitialize() {
- CHECK(gState != nullptr);
- // Just do this a second time?
- // TODO I don't think this should be needed.
gState->StopDebuggerThreads();
- delete gState;
+ if (!gState->DebuggerThreadsStarted()) {
+ // If debugger threads were started then those threads will delete the state once they are done.
+ delete gState;
+ }
return true;
}
diff --git a/adbconnection/adbconnection.h b/adbconnection/adbconnection.h
index e63a3b607d..04e39bf4ff 100644
--- a/adbconnection/adbconnection.h
+++ b/adbconnection/adbconnection.h
@@ -72,7 +72,7 @@ struct AdbConnectionDdmCallback : public art::DdmCallback {
class AdbConnectionState {
public:
- explicit AdbConnectionState(const std::string& agent_name);
+ explicit AdbConnectionState(const std::string& name);
// Called on the listening thread to start dealing with new input. thr is used to attach the new
// thread to the runtime.
@@ -85,6 +85,11 @@ class AdbConnectionState {
// Stops debugger threads during shutdown.
void StopDebuggerThreads();
+ // If StartDebuggerThreads was called successfully.
+ bool DebuggerThreadsStarted() {
+ return started_debugger_threads_;
+ }
+
private:
uint32_t NextDdmId();
@@ -161,6 +166,8 @@ class AdbConnectionState {
std::atomic<uint32_t> next_ddm_id_;
+ bool started_debugger_threads_;
+
socklen_t control_addr_len_;
union {
sockaddr_un controlAddrUn;
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index 2c62095458..17b94d3bdf 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -76,7 +76,7 @@ extern "C" void jit_types_loaded(void* handle, mirror::Class** types, size_t cou
const ArrayRef<mirror::Class*> types_array(types, count);
std::vector<uint8_t> elf_file = debug::WriteDebugElfFileForClasses(
kRuntimeISA, jit_compiler->GetCompilerDriver()->GetInstructionSetFeatures(), types_array);
- MutexLock mu(Thread::Current(), g_jit_debug_mutex);
+ MutexLock mu(Thread::Current(), *Locks::native_debug_interface_lock_);
CreateJITCodeEntry(std::move(elf_file));
}
}
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 1380596ab2..3fd88e3e18 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -1889,6 +1889,8 @@ void CodeGeneratorARM64::Load(DataType::Type type,
DCHECK_EQ(dst.Is64Bits(), DataType::Is64BitType(type));
__ Ldr(dst, src);
break;
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << type;
}
@@ -1967,6 +1969,8 @@ void CodeGeneratorARM64::LoadAcquire(HInstruction* instruction,
__ Fmov(FPRegister(dst), temp);
break;
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << type;
}
@@ -1994,6 +1998,8 @@ void CodeGeneratorARM64::Store(DataType::Type type,
DCHECK_EQ(src.Is64Bits(), DataType::Is64BitType(type));
__ Str(src, dst);
break;
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << type;
}
@@ -2071,6 +2077,8 @@ void CodeGeneratorARM64::StoreRelease(HInstruction* instruction,
}
break;
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << type;
}
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 18e7d1cc46..6d49b32dbc 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -2498,8 +2498,23 @@ void CodeGeneratorARMVIXL::GenerateFrameEntry() {
}
if (!skip_overflow_check) {
+ // Using r4 instead of IP saves 2 bytes.
UseScratchRegisterScope temps(GetVIXLAssembler());
- vixl32::Register temp = temps.Acquire();
+ vixl32::Register temp;
+ // TODO: Remove this check when R4 is made a callee-save register
+ // in ART compiled code (b/72801708). Currently we need to make
+ // sure r4 is not blocked, e.g. in special purpose
+ // TestCodeGeneratorARMVIXL; also asserting that r4 is available
+ // here.
+ if (!blocked_core_registers_[R4]) {
+ for (vixl32::Register reg : kParameterCoreRegistersVIXL) {
+ DCHECK(!reg.Is(r4));
+ }
+ DCHECK(!kCoreCalleeSaves.Includes(r4));
+ temp = r4;
+ } else {
+ temp = temps.Acquire();
+ }
__ Sub(temp, sp, Operand::From(GetStackOverflowReservedBytes(InstructionSet::kArm)));
// The load must immediately precede RecordPcInfo.
ExactAssemblyScope aas(GetVIXLAssembler(),
@@ -2650,6 +2665,8 @@ Location InvokeDexCallingConventionVisitorARMVIXL::GetNextLocation(DataType::Typ
}
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unexpected parameter type " << type;
break;
@@ -2665,6 +2682,7 @@ Location InvokeDexCallingConventionVisitorARMVIXL::GetReturnLocation(DataType::T
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
+ case DataType::Type::kUint32:
case DataType::Type::kInt32: {
return LocationFrom(r0);
}
@@ -2673,6 +2691,7 @@ Location InvokeDexCallingConventionVisitorARMVIXL::GetReturnLocation(DataType::T
return LocationFrom(s0);
}
+ case DataType::Type::kUint64:
case DataType::Type::kInt64: {
return LocationFrom(r0, r1);
}
@@ -5512,6 +5531,8 @@ void InstructionCodeGeneratorARMVIXL::HandleFieldSet(HInstruction* instruction,
break;
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << field_type;
UNREACHABLE();
@@ -5756,6 +5777,8 @@ void InstructionCodeGeneratorARMVIXL::HandleFieldGet(HInstruction* instruction,
break;
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << load_type;
UNREACHABLE();
@@ -6248,6 +6271,8 @@ void InstructionCodeGeneratorARMVIXL::VisitArrayGet(HArrayGet* instruction) {
break;
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << type;
UNREACHABLE();
@@ -6537,6 +6562,8 @@ void InstructionCodeGeneratorARMVIXL::VisitArraySet(HArraySet* instruction) {
break;
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << value_type;
UNREACHABLE();
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 51fb4dacfd..855da2b18f 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -58,9 +58,11 @@ Location MipsReturnLocation(DataType::Type return_type) {
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
+ case DataType::Type::kUint32:
case DataType::Type::kInt32:
return Location::RegisterLocation(V0);
+ case DataType::Type::kUint64:
case DataType::Type::kInt64:
return Location::RegisterPairLocation(V0, V1);
@@ -140,6 +142,8 @@ Location InvokeDexCallingConventionVisitorMIPS::GetNextLocation(DataType::Type t
break;
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unexpected parameter type " << type;
break;
@@ -391,7 +395,7 @@ class TypeCheckSlowPathMIPS : public SlowPathCodeMIPS {
CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
__ Bind(GetEntryLabel());
- if (!is_fatal_) {
+ if (!is_fatal_ || instruction_->CanThrowIntoCatchBlock()) {
SaveLiveRegisters(codegen, locations);
}
@@ -2821,6 +2825,8 @@ void InstructionCodeGeneratorMIPS::VisitArrayGet(HArrayGet* instruction) {
break;
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << instruction->GetType();
UNREACHABLE();
@@ -3136,6 +3142,8 @@ void InstructionCodeGeneratorMIPS::VisitArraySet(HArraySet* instruction) {
break;
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << instruction->GetType();
UNREACHABLE();
@@ -3275,26 +3283,8 @@ static size_t NumberOfCheckCastTemps(TypeCheckKind type_check_kind) {
}
void LocationsBuilderMIPS::VisitCheckCast(HCheckCast* instruction) {
- LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
- bool throws_into_catch = instruction->CanThrowIntoCatchBlock();
-
TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
- switch (type_check_kind) {
- case TypeCheckKind::kExactCheck:
- case TypeCheckKind::kAbstractClassCheck:
- case TypeCheckKind::kClassHierarchyCheck:
- case TypeCheckKind::kArrayObjectCheck:
- call_kind = (throws_into_catch || kEmitCompilerReadBarrier)
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall; // In fact, call on a fatal (non-returning) slow path.
- break;
- case TypeCheckKind::kArrayCheck:
- case TypeCheckKind::kUnresolvedCheck:
- case TypeCheckKind::kInterfaceCheck:
- call_kind = LocationSummary::kCallOnSlowPath;
- break;
- }
-
+ LocationSummary::CallKind call_kind = CodeGenerator::GetCheckCastCallKind(instruction);
LocationSummary* locations =
new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind);
locations->SetInAt(0, Location::RequiresRegister());
@@ -3323,18 +3313,7 @@ void InstructionCodeGeneratorMIPS::VisitCheckCast(HCheckCast* instruction) {
mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
MipsLabel done;
- // Always false for read barriers since we may need to go to the entrypoint for non-fatal cases
- // from false negatives. The false negatives may come from avoiding read barriers below. Avoiding
- // read barriers is done for performance and code size reasons.
- bool is_type_check_slow_path_fatal = false;
- if (!kEmitCompilerReadBarrier) {
- is_type_check_slow_path_fatal =
- (type_check_kind == TypeCheckKind::kExactCheck ||
- type_check_kind == TypeCheckKind::kAbstractClassCheck ||
- type_check_kind == TypeCheckKind::kClassHierarchyCheck ||
- type_check_kind == TypeCheckKind::kArrayObjectCheck) &&
- !instruction->CanThrowIntoCatchBlock();
- }
+ bool is_type_check_slow_path_fatal = CodeGenerator::IsTypeCheckSlowPathFatal(instruction);
SlowPathCodeMIPS* slow_path =
new (codegen_->GetScopedAllocator()) TypeCheckSlowPathMIPS(
instruction, is_type_check_slow_path_fatal);
@@ -6320,6 +6299,8 @@ void InstructionCodeGeneratorMIPS::HandleFieldGet(HInstruction* instruction,
case DataType::Type::kFloat64:
load_type = kLoadDoubleword;
break;
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << type;
UNREACHABLE();
@@ -6473,6 +6454,8 @@ void InstructionCodeGeneratorMIPS::HandleFieldSet(HInstruction* instruction,
case DataType::Type::kFloat64:
store_type = kStoreDoubleword;
break;
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << type;
UNREACHABLE();
@@ -7201,11 +7184,12 @@ void LocationsBuilderMIPS::VisitInstanceOf(HInstanceOf* instruction) {
case TypeCheckKind::kExactCheck:
case TypeCheckKind::kAbstractClassCheck:
case TypeCheckKind::kClassHierarchyCheck:
- case TypeCheckKind::kArrayObjectCheck:
- call_kind =
- kEmitCompilerReadBarrier ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall;
- baker_read_barrier_slow_path = kUseBakerReadBarrier;
+ case TypeCheckKind::kArrayObjectCheck: {
+ bool needs_read_barrier = CodeGenerator::InstanceOfNeedsReadBarrier(instruction);
+ call_kind = needs_read_barrier ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall;
+ baker_read_barrier_slow_path = kUseBakerReadBarrier && needs_read_barrier;
break;
+ }
case TypeCheckKind::kArrayCheck:
case TypeCheckKind::kUnresolvedCheck:
case TypeCheckKind::kInterfaceCheck:
@@ -7253,13 +7237,15 @@ void InstructionCodeGeneratorMIPS::VisitInstanceOf(HInstanceOf* instruction) {
switch (type_check_kind) {
case TypeCheckKind::kExactCheck: {
+ ReadBarrierOption read_barrier_option =
+ CodeGenerator::ReadBarrierOptionForInstanceOf(instruction);
// /* HeapReference<Class> */ out = obj->klass_
GenerateReferenceLoadTwoRegisters(instruction,
out_loc,
obj_loc,
class_offset,
maybe_temp_loc,
- kCompilerReadBarrierOption);
+ read_barrier_option);
// Classes must be equal for the instanceof to succeed.
__ Xor(out, out, cls);
__ Sltiu(out, out, 1);
@@ -7267,13 +7253,15 @@ void InstructionCodeGeneratorMIPS::VisitInstanceOf(HInstanceOf* instruction) {
}
case TypeCheckKind::kAbstractClassCheck: {
+ ReadBarrierOption read_barrier_option =
+ CodeGenerator::ReadBarrierOptionForInstanceOf(instruction);
// /* HeapReference<Class> */ out = obj->klass_
GenerateReferenceLoadTwoRegisters(instruction,
out_loc,
obj_loc,
class_offset,
maybe_temp_loc,
- kCompilerReadBarrierOption);
+ read_barrier_option);
// If the class is abstract, we eagerly fetch the super class of the
// object to avoid doing a comparison we know will fail.
MipsLabel loop;
@@ -7283,7 +7271,7 @@ void InstructionCodeGeneratorMIPS::VisitInstanceOf(HInstanceOf* instruction) {
out_loc,
super_offset,
maybe_temp_loc,
- kCompilerReadBarrierOption);
+ read_barrier_option);
// If `out` is null, we use it for the result, and jump to `done`.
__ Beqz(out, &done);
__ Bne(out, cls, &loop);
@@ -7292,13 +7280,15 @@ void InstructionCodeGeneratorMIPS::VisitInstanceOf(HInstanceOf* instruction) {
}
case TypeCheckKind::kClassHierarchyCheck: {
+ ReadBarrierOption read_barrier_option =
+ CodeGenerator::ReadBarrierOptionForInstanceOf(instruction);
// /* HeapReference<Class> */ out = obj->klass_
GenerateReferenceLoadTwoRegisters(instruction,
out_loc,
obj_loc,
class_offset,
maybe_temp_loc,
- kCompilerReadBarrierOption);
+ read_barrier_option);
// Walk over the class hierarchy to find a match.
MipsLabel loop, success;
__ Bind(&loop);
@@ -7308,7 +7298,7 @@ void InstructionCodeGeneratorMIPS::VisitInstanceOf(HInstanceOf* instruction) {
out_loc,
super_offset,
maybe_temp_loc,
- kCompilerReadBarrierOption);
+ read_barrier_option);
__ Bnez(out, &loop);
// If `out` is null, we use it for the result, and jump to `done`.
__ B(&done);
@@ -7318,13 +7308,15 @@ void InstructionCodeGeneratorMIPS::VisitInstanceOf(HInstanceOf* instruction) {
}
case TypeCheckKind::kArrayObjectCheck: {
+ ReadBarrierOption read_barrier_option =
+ CodeGenerator::ReadBarrierOptionForInstanceOf(instruction);
// /* HeapReference<Class> */ out = obj->klass_
GenerateReferenceLoadTwoRegisters(instruction,
out_loc,
obj_loc,
class_offset,
maybe_temp_loc,
- kCompilerReadBarrierOption);
+ read_barrier_option);
// Do an exact check.
MipsLabel success;
__ Beq(out, cls, &success);
@@ -7334,7 +7326,7 @@ void InstructionCodeGeneratorMIPS::VisitInstanceOf(HInstanceOf* instruction) {
out_loc,
component_offset,
maybe_temp_loc,
- kCompilerReadBarrierOption);
+ read_barrier_option);
// If `out` is null, we use it for the result, and jump to `done`.
__ Beqz(out, &done);
__ LoadFromOffset(kLoadUnsignedHalfword, out, out, primitive_offset);
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 480b9178d2..8a06061c6a 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -55,8 +55,10 @@ Location Mips64ReturnLocation(DataType::Type return_type) {
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
+ case DataType::Type::kUint32:
case DataType::Type::kInt32:
case DataType::Type::kReference:
+ case DataType::Type::kUint64:
case DataType::Type::kInt64:
return Location::RegisterLocation(V0);
@@ -350,7 +352,7 @@ class TypeCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
__ Bind(GetEntryLabel());
- if (!is_fatal_) {
+ if (!is_fatal_ || instruction_->CanThrowIntoCatchBlock()) {
SaveLiveRegisters(codegen, locations);
}
@@ -2408,6 +2410,8 @@ void InstructionCodeGeneratorMIPS64::VisitArrayGet(HArrayGet* instruction) {
break;
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << instruction->GetType();
UNREACHABLE();
@@ -2711,6 +2715,8 @@ void InstructionCodeGeneratorMIPS64::VisitArraySet(HArraySet* instruction) {
break;
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << instruction->GetType();
UNREACHABLE();
@@ -2830,26 +2836,8 @@ static size_t NumberOfCheckCastTemps(TypeCheckKind type_check_kind) {
}
void LocationsBuilderMIPS64::VisitCheckCast(HCheckCast* instruction) {
- LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
- bool throws_into_catch = instruction->CanThrowIntoCatchBlock();
-
TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
- switch (type_check_kind) {
- case TypeCheckKind::kExactCheck:
- case TypeCheckKind::kAbstractClassCheck:
- case TypeCheckKind::kClassHierarchyCheck:
- case TypeCheckKind::kArrayObjectCheck:
- call_kind = (throws_into_catch || kEmitCompilerReadBarrier)
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall; // In fact, call on a fatal (non-returning) slow path.
- break;
- case TypeCheckKind::kArrayCheck:
- case TypeCheckKind::kUnresolvedCheck:
- case TypeCheckKind::kInterfaceCheck:
- call_kind = LocationSummary::kCallOnSlowPath;
- break;
- }
-
+ LocationSummary::CallKind call_kind = CodeGenerator::GetCheckCastCallKind(instruction);
LocationSummary* locations =
new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind);
locations->SetInAt(0, Location::RequiresRegister());
@@ -2878,18 +2866,7 @@ void InstructionCodeGeneratorMIPS64::VisitCheckCast(HCheckCast* instruction) {
mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
Mips64Label done;
- // Always false for read barriers since we may need to go to the entrypoint for non-fatal cases
- // from false negatives. The false negatives may come from avoiding read barriers below. Avoiding
- // read barriers is done for performance and code size reasons.
- bool is_type_check_slow_path_fatal = false;
- if (!kEmitCompilerReadBarrier) {
- is_type_check_slow_path_fatal =
- (type_check_kind == TypeCheckKind::kExactCheck ||
- type_check_kind == TypeCheckKind::kAbstractClassCheck ||
- type_check_kind == TypeCheckKind::kClassHierarchyCheck ||
- type_check_kind == TypeCheckKind::kArrayObjectCheck) &&
- !instruction->CanThrowIntoCatchBlock();
- }
+ bool is_type_check_slow_path_fatal = CodeGenerator::IsTypeCheckSlowPathFatal(instruction);
SlowPathCodeMIPS64* slow_path =
new (codegen_->GetScopedAllocator()) TypeCheckSlowPathMIPS64(
instruction, is_type_check_slow_path_fatal);
@@ -4798,6 +4775,8 @@ void InstructionCodeGeneratorMIPS64::HandleFieldGet(HInstruction* instruction,
case DataType::Type::kReference:
load_type = kLoadUnsignedWord;
break;
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << type;
UNREACHABLE();
@@ -4891,6 +4870,8 @@ void InstructionCodeGeneratorMIPS64::HandleFieldSet(HInstruction* instruction,
case DataType::Type::kFloat64:
store_type = kStoreDoubleword;
break;
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << type;
UNREACHABLE();
@@ -5518,11 +5499,12 @@ void LocationsBuilderMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
case TypeCheckKind::kExactCheck:
case TypeCheckKind::kAbstractClassCheck:
case TypeCheckKind::kClassHierarchyCheck:
- case TypeCheckKind::kArrayObjectCheck:
- call_kind =
- kEmitCompilerReadBarrier ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall;
- baker_read_barrier_slow_path = kUseBakerReadBarrier;
+ case TypeCheckKind::kArrayObjectCheck: {
+ bool needs_read_barrier = CodeGenerator::InstanceOfNeedsReadBarrier(instruction);
+ call_kind = needs_read_barrier ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall;
+ baker_read_barrier_slow_path = kUseBakerReadBarrier && needs_read_barrier;
break;
+ }
case TypeCheckKind::kArrayCheck:
case TypeCheckKind::kUnresolvedCheck:
case TypeCheckKind::kInterfaceCheck:
@@ -5570,13 +5552,15 @@ void InstructionCodeGeneratorMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
switch (type_check_kind) {
case TypeCheckKind::kExactCheck: {
+ ReadBarrierOption read_barrier_option =
+ CodeGenerator::ReadBarrierOptionForInstanceOf(instruction);
// /* HeapReference<Class> */ out = obj->klass_
GenerateReferenceLoadTwoRegisters(instruction,
out_loc,
obj_loc,
class_offset,
maybe_temp_loc,
- kCompilerReadBarrierOption);
+ read_barrier_option);
// Classes must be equal for the instanceof to succeed.
__ Xor(out, out, cls);
__ Sltiu(out, out, 1);
@@ -5584,13 +5568,15 @@ void InstructionCodeGeneratorMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
}
case TypeCheckKind::kAbstractClassCheck: {
+ ReadBarrierOption read_barrier_option =
+ CodeGenerator::ReadBarrierOptionForInstanceOf(instruction);
// /* HeapReference<Class> */ out = obj->klass_
GenerateReferenceLoadTwoRegisters(instruction,
out_loc,
obj_loc,
class_offset,
maybe_temp_loc,
- kCompilerReadBarrierOption);
+ read_barrier_option);
// If the class is abstract, we eagerly fetch the super class of the
// object to avoid doing a comparison we know will fail.
Mips64Label loop;
@@ -5600,7 +5586,7 @@ void InstructionCodeGeneratorMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
out_loc,
super_offset,
maybe_temp_loc,
- kCompilerReadBarrierOption);
+ read_barrier_option);
// If `out` is null, we use it for the result, and jump to `done`.
__ Beqzc(out, &done);
__ Bnec(out, cls, &loop);
@@ -5609,13 +5595,15 @@ void InstructionCodeGeneratorMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
}
case TypeCheckKind::kClassHierarchyCheck: {
+ ReadBarrierOption read_barrier_option =
+ CodeGenerator::ReadBarrierOptionForInstanceOf(instruction);
// /* HeapReference<Class> */ out = obj->klass_
GenerateReferenceLoadTwoRegisters(instruction,
out_loc,
obj_loc,
class_offset,
maybe_temp_loc,
- kCompilerReadBarrierOption);
+ read_barrier_option);
// Walk over the class hierarchy to find a match.
Mips64Label loop, success;
__ Bind(&loop);
@@ -5625,7 +5613,7 @@ void InstructionCodeGeneratorMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
out_loc,
super_offset,
maybe_temp_loc,
- kCompilerReadBarrierOption);
+ read_barrier_option);
__ Bnezc(out, &loop);
// If `out` is null, we use it for the result, and jump to `done`.
__ Bc(&done);
@@ -5635,13 +5623,15 @@ void InstructionCodeGeneratorMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
}
case TypeCheckKind::kArrayObjectCheck: {
+ ReadBarrierOption read_barrier_option =
+ CodeGenerator::ReadBarrierOptionForInstanceOf(instruction);
// /* HeapReference<Class> */ out = obj->klass_
GenerateReferenceLoadTwoRegisters(instruction,
out_loc,
obj_loc,
class_offset,
maybe_temp_loc,
- kCompilerReadBarrierOption);
+ read_barrier_option);
// Do an exact check.
Mips64Label success;
__ Beqc(out, cls, &success);
@@ -5651,7 +5641,7 @@ void InstructionCodeGeneratorMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
out_loc,
component_offset,
maybe_temp_loc,
- kCompilerReadBarrierOption);
+ read_barrier_option);
// If `out` is null, we use it for the result, and jump to `done`.
__ Beqzc(out, &done);
__ LoadFromOffset(kLoadUnsignedHalfword, out, out, primitive_offset);
diff --git a/compiler/optimizing/code_generator_vector_arm64.cc b/compiler/optimizing/code_generator_vector_arm64.cc
index 152a59c208..174efdf115 100644
--- a/compiler/optimizing/code_generator_vector_arm64.cc
+++ b/compiler/optimizing/code_generator_vector_arm64.cc
@@ -606,22 +606,20 @@ void InstructionCodeGeneratorARM64::VisitVecMin(HVecMin* instruction) {
DCHECK_EQ(8u, instruction->GetVectorLength());
__ Smin(dst.V8H(), lhs.V8H(), rhs.V8H());
break;
+ case DataType::Type::kUint32:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ Umin(dst.V4S(), lhs.V4S(), rhs.V4S());
+ break;
case DataType::Type::kInt32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ Umin(dst.V4S(), lhs.V4S(), rhs.V4S());
- } else {
- __ Smin(dst.V4S(), lhs.V4S(), rhs.V4S());
- }
+ __ Smin(dst.V4S(), lhs.V4S(), rhs.V4S());
break;
case DataType::Type::kFloat32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- DCHECK(!instruction->IsUnsigned());
__ Fmin(dst.V4S(), lhs.V4S(), rhs.V4S());
break;
case DataType::Type::kFloat64:
DCHECK_EQ(2u, instruction->GetVectorLength());
- DCHECK(!instruction->IsUnsigned());
__ Fmin(dst.V2D(), lhs.V2D(), rhs.V2D());
break;
default:
@@ -656,22 +654,20 @@ void InstructionCodeGeneratorARM64::VisitVecMax(HVecMax* instruction) {
DCHECK_EQ(8u, instruction->GetVectorLength());
__ Smax(dst.V8H(), lhs.V8H(), rhs.V8H());
break;
+ case DataType::Type::kUint32:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ Umax(dst.V4S(), lhs.V4S(), rhs.V4S());
+ break;
case DataType::Type::kInt32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ Umax(dst.V4S(), lhs.V4S(), rhs.V4S());
- } else {
- __ Smax(dst.V4S(), lhs.V4S(), rhs.V4S());
- }
+ __ Smax(dst.V4S(), lhs.V4S(), rhs.V4S());
break;
case DataType::Type::kFloat32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- DCHECK(!instruction->IsUnsigned());
__ Fmax(dst.V4S(), lhs.V4S(), rhs.V4S());
break;
case DataType::Type::kFloat64:
DCHECK_EQ(2u, instruction->GetVectorLength());
- DCHECK(!instruction->IsUnsigned());
__ Fmax(dst.V2D(), lhs.V2D(), rhs.V2D());
break;
default:
diff --git a/compiler/optimizing/code_generator_vector_arm_vixl.cc b/compiler/optimizing/code_generator_vector_arm_vixl.cc
index cc470ddb2e..7c3155ab73 100644
--- a/compiler/optimizing/code_generator_vector_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_vector_arm_vixl.cc
@@ -431,13 +431,13 @@ void InstructionCodeGeneratorARMVIXL::VisitVecMin(HVecMin* instruction) {
DCHECK_EQ(4u, instruction->GetVectorLength());
__ Vmin(DataTypeValue::S16, dst, lhs, rhs);
break;
+ case DataType::Type::kUint32:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ Vmin(DataTypeValue::U32, dst, lhs, rhs);
+ break;
case DataType::Type::kInt32:
DCHECK_EQ(2u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ Vmin(DataTypeValue::U32, dst, lhs, rhs);
- } else {
- __ Vmin(DataTypeValue::S32, dst, lhs, rhs);
- }
+ __ Vmin(DataTypeValue::S32, dst, lhs, rhs);
break;
default:
LOG(FATAL) << "Unsupported SIMD type";
@@ -471,13 +471,13 @@ void InstructionCodeGeneratorARMVIXL::VisitVecMax(HVecMax* instruction) {
DCHECK_EQ(4u, instruction->GetVectorLength());
__ Vmax(DataTypeValue::S16, dst, lhs, rhs);
break;
+ case DataType::Type::kUint32:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ Vmax(DataTypeValue::U32, dst, lhs, rhs);
+ break;
case DataType::Type::kInt32:
DCHECK_EQ(2u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ Vmax(DataTypeValue::U32, dst, lhs, rhs);
- } else {
- __ Vmax(DataTypeValue::S32, dst, lhs, rhs);
- }
+ __ Vmax(DataTypeValue::S32, dst, lhs, rhs);
break;
default:
LOG(FATAL) << "Unsupported SIMD type";
diff --git a/compiler/optimizing/code_generator_vector_mips.cc b/compiler/optimizing/code_generator_vector_mips.cc
index 3cf150a6b8..ed9de96496 100644
--- a/compiler/optimizing/code_generator_vector_mips.cc
+++ b/compiler/optimizing/code_generator_vector_mips.cc
@@ -613,32 +613,30 @@ void InstructionCodeGeneratorMIPS::VisitVecMin(HVecMin* instruction) {
DCHECK_EQ(8u, instruction->GetVectorLength());
__ Min_sH(dst, lhs, rhs);
break;
+ case DataType::Type::kUint32:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ Min_uW(dst, lhs, rhs);
+ break;
case DataType::Type::kInt32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ Min_uW(dst, lhs, rhs);
- } else {
- __ Min_sW(dst, lhs, rhs);
- }
+ __ Min_sW(dst, lhs, rhs);
+ break;
+ case DataType::Type::kUint64:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ Min_uD(dst, lhs, rhs);
break;
case DataType::Type::kInt64:
DCHECK_EQ(2u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ Min_uD(dst, lhs, rhs);
- } else {
- __ Min_sD(dst, lhs, rhs);
- }
+ __ Min_sD(dst, lhs, rhs);
break;
// When one of arguments is NaN, fmin.df returns other argument, but Java expects a NaN value.
// TODO: Fix min(x, NaN) cases for float and double.
case DataType::Type::kFloat32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- DCHECK(!instruction->IsUnsigned());
__ FminW(dst, lhs, rhs);
break;
case DataType::Type::kFloat64:
DCHECK_EQ(2u, instruction->GetVectorLength());
- DCHECK(!instruction->IsUnsigned());
__ FminD(dst, lhs, rhs);
break;
default:
@@ -673,32 +671,30 @@ void InstructionCodeGeneratorMIPS::VisitVecMax(HVecMax* instruction) {
DCHECK_EQ(8u, instruction->GetVectorLength());
__ Max_sH(dst, lhs, rhs);
break;
+ case DataType::Type::kUint32:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ Max_uW(dst, lhs, rhs);
+ break;
case DataType::Type::kInt32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ Max_uW(dst, lhs, rhs);
- } else {
- __ Max_sW(dst, lhs, rhs);
- }
+ __ Max_sW(dst, lhs, rhs);
+ break;
+ case DataType::Type::kUint64:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ Max_uD(dst, lhs, rhs);
break;
case DataType::Type::kInt64:
DCHECK_EQ(2u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ Max_uD(dst, lhs, rhs);
- } else {
- __ Max_sD(dst, lhs, rhs);
- }
+ __ Max_sD(dst, lhs, rhs);
break;
// When one of arguments is NaN, fmax.df returns other argument, but Java expects a NaN value.
// TODO: Fix max(x, NaN) cases for float and double.
case DataType::Type::kFloat32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- DCHECK(!instruction->IsUnsigned());
__ FmaxW(dst, lhs, rhs);
break;
case DataType::Type::kFloat64:
DCHECK_EQ(2u, instruction->GetVectorLength());
- DCHECK(!instruction->IsUnsigned());
__ FmaxD(dst, lhs, rhs);
break;
default:
diff --git a/compiler/optimizing/code_generator_vector_mips64.cc b/compiler/optimizing/code_generator_vector_mips64.cc
index 2d69533f21..9ea55ec8d7 100644
--- a/compiler/optimizing/code_generator_vector_mips64.cc
+++ b/compiler/optimizing/code_generator_vector_mips64.cc
@@ -612,32 +612,30 @@ void InstructionCodeGeneratorMIPS64::VisitVecMin(HVecMin* instruction) {
DCHECK_EQ(8u, instruction->GetVectorLength());
__ Min_sH(dst, lhs, rhs);
break;
+ case DataType::Type::kUint32:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ Min_uW(dst, lhs, rhs);
+ break;
case DataType::Type::kInt32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ Min_uW(dst, lhs, rhs);
- } else {
- __ Min_sW(dst, lhs, rhs);
- }
+ __ Min_sW(dst, lhs, rhs);
+ break;
+ case DataType::Type::kUint64:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ Min_uD(dst, lhs, rhs);
break;
case DataType::Type::kInt64:
DCHECK_EQ(2u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ Min_uD(dst, lhs, rhs);
- } else {
- __ Min_sD(dst, lhs, rhs);
- }
+ __ Min_sD(dst, lhs, rhs);
break;
// When one of arguments is NaN, fmin.df returns other argument, but Java expects a NaN value.
// TODO: Fix min(x, NaN) cases for float and double.
case DataType::Type::kFloat32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- DCHECK(!instruction->IsUnsigned());
__ FminW(dst, lhs, rhs);
break;
case DataType::Type::kFloat64:
DCHECK_EQ(2u, instruction->GetVectorLength());
- DCHECK(!instruction->IsUnsigned());
__ FminD(dst, lhs, rhs);
break;
default:
@@ -672,32 +670,30 @@ void InstructionCodeGeneratorMIPS64::VisitVecMax(HVecMax* instruction) {
DCHECK_EQ(8u, instruction->GetVectorLength());
__ Max_sH(dst, lhs, rhs);
break;
+ case DataType::Type::kUint32:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ Max_uW(dst, lhs, rhs);
+ break;
case DataType::Type::kInt32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ Max_uW(dst, lhs, rhs);
- } else {
- __ Max_sW(dst, lhs, rhs);
- }
+ __ Max_sW(dst, lhs, rhs);
+ break;
+ case DataType::Type::kUint64:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ Max_uD(dst, lhs, rhs);
break;
case DataType::Type::kInt64:
DCHECK_EQ(2u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ Max_uD(dst, lhs, rhs);
- } else {
- __ Max_sD(dst, lhs, rhs);
- }
+ __ Max_sD(dst, lhs, rhs);
break;
// When one of arguments is NaN, fmax.df returns other argument, but Java expects a NaN value.
// TODO: Fix max(x, NaN) cases for float and double.
case DataType::Type::kFloat32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- DCHECK(!instruction->IsUnsigned());
__ FmaxW(dst, lhs, rhs);
break;
case DataType::Type::kFloat64:
DCHECK_EQ(2u, instruction->GetVectorLength());
- DCHECK(!instruction->IsUnsigned());
__ FmaxD(dst, lhs, rhs);
break;
default:
diff --git a/compiler/optimizing/code_generator_vector_x86.cc b/compiler/optimizing/code_generator_vector_x86.cc
index 7b4b85d2fe..f2ffccc887 100644
--- a/compiler/optimizing/code_generator_vector_x86.cc
+++ b/compiler/optimizing/code_generator_vector_x86.cc
@@ -640,23 +640,21 @@ void InstructionCodeGeneratorX86::VisitVecMin(HVecMin* instruction) {
DCHECK_EQ(8u, instruction->GetVectorLength());
__ pminsw(dst, src);
break;
+ case DataType::Type::kUint32:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ pminud(dst, src);
+ break;
case DataType::Type::kInt32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ pminud(dst, src);
- } else {
- __ pminsd(dst, src);
- }
+ __ pminsd(dst, src);
break;
// Next cases are sloppy wrt 0.0 vs -0.0.
case DataType::Type::kFloat32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- DCHECK(!instruction->IsUnsigned());
__ minps(dst, src);
break;
case DataType::Type::kFloat64:
DCHECK_EQ(2u, instruction->GetVectorLength());
- DCHECK(!instruction->IsUnsigned());
__ minpd(dst, src);
break;
default:
@@ -691,23 +689,21 @@ void InstructionCodeGeneratorX86::VisitVecMax(HVecMax* instruction) {
DCHECK_EQ(8u, instruction->GetVectorLength());
__ pmaxsw(dst, src);
break;
+ case DataType::Type::kUint32:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ pmaxud(dst, src);
+ break;
case DataType::Type::kInt32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ pmaxud(dst, src);
- } else {
- __ pmaxsd(dst, src);
- }
+ __ pmaxsd(dst, src);
break;
// Next cases are sloppy wrt 0.0 vs -0.0.
case DataType::Type::kFloat32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- DCHECK(!instruction->IsUnsigned());
__ maxps(dst, src);
break;
case DataType::Type::kFloat64:
DCHECK_EQ(2u, instruction->GetVectorLength());
- DCHECK(!instruction->IsUnsigned());
__ maxpd(dst, src);
break;
default:
diff --git a/compiler/optimizing/code_generator_vector_x86_64.cc b/compiler/optimizing/code_generator_vector_x86_64.cc
index 107030e6c2..e2b0485f89 100644
--- a/compiler/optimizing/code_generator_vector_x86_64.cc
+++ b/compiler/optimizing/code_generator_vector_x86_64.cc
@@ -623,23 +623,21 @@ void InstructionCodeGeneratorX86_64::VisitVecMin(HVecMin* instruction) {
DCHECK_EQ(8u, instruction->GetVectorLength());
__ pminsw(dst, src);
break;
+ case DataType::Type::kUint32:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ pminud(dst, src);
+ break;
case DataType::Type::kInt32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ pminud(dst, src);
- } else {
- __ pminsd(dst, src);
- }
+ __ pminsd(dst, src);
break;
// Next cases are sloppy wrt 0.0 vs -0.0.
case DataType::Type::kFloat32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- DCHECK(!instruction->IsUnsigned());
__ minps(dst, src);
break;
case DataType::Type::kFloat64:
DCHECK_EQ(2u, instruction->GetVectorLength());
- DCHECK(!instruction->IsUnsigned());
__ minpd(dst, src);
break;
default:
@@ -674,23 +672,21 @@ void InstructionCodeGeneratorX86_64::VisitVecMax(HVecMax* instruction) {
DCHECK_EQ(8u, instruction->GetVectorLength());
__ pmaxsw(dst, src);
break;
+ case DataType::Type::kUint32:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ pmaxud(dst, src);
+ break;
case DataType::Type::kInt32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ pmaxud(dst, src);
- } else {
- __ pmaxsd(dst, src);
- }
+ __ pmaxsd(dst, src);
break;
// Next cases are sloppy wrt 0.0 vs -0.0.
case DataType::Type::kFloat32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- DCHECK(!instruction->IsUnsigned());
__ maxps(dst, src);
break;
case DataType::Type::kFloat64:
DCHECK_EQ(2u, instruction->GetVectorLength());
- DCHECK(!instruction->IsUnsigned());
__ maxpd(dst, src);
break;
default:
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index c52c7ff7f1..5fede80bc7 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -1134,9 +1134,11 @@ Location InvokeDexCallingConventionVisitorX86::GetReturnLocation(DataType::Type
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
+ case DataType::Type::kUint32:
case DataType::Type::kInt32:
return Location::RegisterLocation(EAX);
+ case DataType::Type::kUint64:
case DataType::Type::kInt64:
return Location::RegisterPairLocation(EAX, EDX);
@@ -1206,6 +1208,8 @@ Location InvokeDexCallingConventionVisitorX86::GetNextLocation(DataType::Type ty
}
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unexpected parameter type " << type;
break;
@@ -4844,6 +4848,8 @@ void InstructionCodeGeneratorX86::HandleFieldGet(HInstruction* instruction,
break;
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << load_type;
UNREACHABLE();
@@ -5017,6 +5023,8 @@ void InstructionCodeGeneratorX86::HandleFieldSet(HInstruction* instruction,
break;
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << field_type;
UNREACHABLE();
@@ -5320,6 +5328,8 @@ void InstructionCodeGeneratorX86::VisitArrayGet(HArrayGet* instruction) {
break;
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << type;
UNREACHABLE();
@@ -5571,6 +5581,8 @@ void InstructionCodeGeneratorX86::VisitArraySet(HArraySet* instruction) {
break;
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << instruction->GetType();
UNREACHABLE();
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index ee5918de71..ae35ab5983 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -2273,7 +2273,9 @@ Location InvokeDexCallingConventionVisitorX86_64::GetReturnLocation(DataType::Ty
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
+ case DataType::Type::kUint32:
case DataType::Type::kInt32:
+ case DataType::Type::kUint64:
case DataType::Type::kInt64:
return Location::RegisterLocation(RAX);
@@ -2342,6 +2344,8 @@ Location InvokeDexCallingConventionVisitorX86_64::GetNextLocation(DataType::Type
}
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unexpected parameter type " << type;
break;
@@ -4307,6 +4311,8 @@ void InstructionCodeGeneratorX86_64::HandleFieldGet(HInstruction* instruction,
break;
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << load_type;
UNREACHABLE();
@@ -4470,6 +4476,8 @@ void InstructionCodeGeneratorX86_64::HandleFieldSet(HInstruction* instruction,
break;
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << field_type;
UNREACHABLE();
@@ -4763,6 +4771,8 @@ void InstructionCodeGeneratorX86_64::VisitArrayGet(HArrayGet* instruction) {
break;
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << type;
UNREACHABLE();
@@ -5002,6 +5012,8 @@ void InstructionCodeGeneratorX86_64::VisitArraySet(HArraySet* instruction) {
break;
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << instruction->GetType();
UNREACHABLE();
diff --git a/compiler/optimizing/data_type-inl.h b/compiler/optimizing/data_type-inl.h
index e389bad3ad..e2cf7a80fe 100644
--- a/compiler/optimizing/data_type-inl.h
+++ b/compiler/optimizing/data_type-inl.h
@@ -53,7 +53,9 @@ constexpr char DataType::TypeId(DataType::Type type) {
case DataType::Type::kInt8: return 'b'; // Java byte (B).
case DataType::Type::kUint16: return 'c'; // Java char (C).
case DataType::Type::kInt16: return 's'; // Java short (S).
+ case DataType::Type::kUint32: return 'u'; // Picked 'u' for unsigned.
case DataType::Type::kInt32: return 'i'; // Java int (I).
+ case DataType::Type::kUint64: return 'w'; // Picked 'w' for long unsigned.
case DataType::Type::kInt64: return 'j'; // Java long (J).
case DataType::Type::kFloat32: return 'f'; // Java float (F).
case DataType::Type::kFloat64: return 'd'; // Java double (D).
diff --git a/compiler/optimizing/data_type.cc b/compiler/optimizing/data_type.cc
index 3c99a76c17..cb354f46cc 100644
--- a/compiler/optimizing/data_type.cc
+++ b/compiler/optimizing/data_type.cc
@@ -25,7 +25,9 @@ static const char* kTypeNames[] = {
"Int8",
"Uint16",
"Int16",
+ "Uint32",
"Int32",
+ "Uint64",
"Int64",
"Float32",
"Float64",
diff --git a/compiler/optimizing/data_type.h b/compiler/optimizing/data_type.h
index 548fe28cee..4a6c91459f 100644
--- a/compiler/optimizing/data_type.h
+++ b/compiler/optimizing/data_type.h
@@ -34,7 +34,9 @@ class DataType {
kInt8,
kUint16,
kInt16,
+ kUint32,
kInt32,
+ kUint64,
kInt64,
kFloat32,
kFloat64,
@@ -55,9 +57,11 @@ class DataType {
case Type::kUint16:
case Type::kInt16:
return 1;
+ case Type::kUint32:
case Type::kInt32:
case Type::kFloat32:
return 2;
+ case Type::kUint64:
case Type::kInt64:
case Type::kFloat64:
return 3;
@@ -80,9 +84,11 @@ class DataType {
case Type::kUint16:
case Type::kInt16:
return 2;
+ case Type::kUint32:
case Type::kInt32:
case Type::kFloat32:
return 4;
+ case Type::kUint64:
case Type::kInt64:
case Type::kFloat64:
return 8;
@@ -107,7 +113,9 @@ class DataType {
case Type::kInt8:
case Type::kUint16:
case Type::kInt16:
+ case Type::kUint32:
case Type::kInt32:
+ case Type::kUint64:
case Type::kInt64:
return true;
default:
@@ -120,11 +128,12 @@ class DataType {
}
static bool Is64BitType(Type type) {
- return type == Type::kInt64 || type == Type::kFloat64;
+ return type == Type::kUint64 || type == Type::kInt64 || type == Type::kFloat64;
}
static bool IsUnsignedType(Type type) {
- return type == Type::kBool || type == Type::kUint8 || type == Type::kUint16;
+ return type == Type::kBool || type == Type::kUint8 || type == Type::kUint16 ||
+ type == Type::kUint32 || type == Type::kUint64;
}
// Return the general kind of `type`, fusing integer-like types as Type::kInt.
@@ -133,10 +142,14 @@ class DataType {
case Type::kBool:
case Type::kUint8:
case Type::kInt8:
- case Type::kInt16:
case Type::kUint16:
+ case Type::kInt16:
+ case Type::kUint32:
case Type::kInt32:
return Type::kInt32;
+ case Type::kUint64:
+ case Type::kInt64:
+ return Type::kInt64;
default:
return type;
}
@@ -154,8 +167,12 @@ class DataType {
return std::numeric_limits<uint16_t>::min();
case Type::kInt16:
return std::numeric_limits<int16_t>::min();
+ case Type::kUint32:
+ return std::numeric_limits<uint32_t>::min();
case Type::kInt32:
return std::numeric_limits<int32_t>::min();
+ case Type::kUint64:
+ return std::numeric_limits<uint64_t>::min();
case Type::kInt64:
return std::numeric_limits<int64_t>::min();
default:
@@ -176,8 +193,12 @@ class DataType {
return std::numeric_limits<uint16_t>::max();
case Type::kInt16:
return std::numeric_limits<int16_t>::max();
+ case Type::kUint32:
+ return std::numeric_limits<uint32_t>::max();
case Type::kInt32:
return std::numeric_limits<int32_t>::max();
+ case Type::kUint64:
+ return std::numeric_limits<uint64_t>::max();
case Type::kInt64:
return std::numeric_limits<int64_t>::max();
default:
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index 12c69889ab..6144162f68 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -533,20 +533,9 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor {
void VisitVecHalvingAdd(HVecHalvingAdd* hadd) OVERRIDE {
VisitVecBinaryOperation(hadd);
- StartAttributeStream("unsigned") << std::boolalpha << hadd->IsUnsigned() << std::noboolalpha;
StartAttributeStream("rounded") << std::boolalpha << hadd->IsRounded() << std::noboolalpha;
}
- void VisitVecMin(HVecMin* min) OVERRIDE {
- VisitVecBinaryOperation(min);
- StartAttributeStream("unsigned") << std::boolalpha << min->IsUnsigned() << std::noboolalpha;
- }
-
- void VisitVecMax(HVecMax* max) OVERRIDE {
- VisitVecBinaryOperation(max);
- StartAttributeStream("unsigned") << std::boolalpha << max->IsUnsigned() << std::noboolalpha;
- }
-
void VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instruction) OVERRIDE {
VisitVecOperation(instruction);
StartAttributeStream("kind") << instruction->GetOpKind();
diff --git a/compiler/optimizing/loop_optimization.cc b/compiler/optimizing/loop_optimization.cc
index 3dc1ef7534..899496328e 100644
--- a/compiler/optimizing/loop_optimization.cc
+++ b/compiler/optimizing/loop_optimization.cc
@@ -30,46 +30,6 @@
namespace art {
-// TODO: Clean up the packed type detection so that we have the right type straight away
-// and do not need to go through this normalization.
-static inline void NormalizePackedType(/* inout */ DataType::Type* type,
- /* inout */ bool* is_unsigned) {
- switch (*type) {
- case DataType::Type::kBool:
- DCHECK(!*is_unsigned);
- break;
- case DataType::Type::kUint8:
- case DataType::Type::kInt8:
- if (*is_unsigned) {
- *is_unsigned = false;
- *type = DataType::Type::kUint8;
- } else {
- *type = DataType::Type::kInt8;
- }
- break;
- case DataType::Type::kUint16:
- case DataType::Type::kInt16:
- if (*is_unsigned) {
- *is_unsigned = false;
- *type = DataType::Type::kUint16;
- } else {
- *type = DataType::Type::kInt16;
- }
- break;
- case DataType::Type::kInt32:
- case DataType::Type::kInt64:
- // We do not have kUint32 and kUint64 at the moment.
- break;
- case DataType::Type::kFloat32:
- case DataType::Type::kFloat64:
- DCHECK(!*is_unsigned);
- break;
- default:
- LOG(FATAL) << "Unexpected type " << *type;
- UNREACHABLE();
- }
-}
-
// Enables vectorization (SIMDization) in the loop optimizer.
static constexpr bool kEnableVectorization = true;
@@ -1362,8 +1322,10 @@ bool HLoopOptimization::VectorizeUse(LoopNode* node,
}
if (VectorizeUse(node, r, generate_code, type, restrictions)) {
if (generate_code) {
- NormalizePackedType(&type, &is_unsigned);
- GenerateVecOp(instruction, vector_map_->Get(r), nullptr, type);
+ GenerateVecOp(instruction,
+ vector_map_->Get(r),
+ nullptr,
+ HVecOperation::ToProperType(type, is_unsigned));
}
return true;
}
@@ -1865,18 +1827,26 @@ void HLoopOptimization::GenerateVecOp(HInstruction* org,
case Intrinsics::kMathMinLongLong:
case Intrinsics::kMathMinFloatFloat:
case Intrinsics::kMathMinDoubleDouble: {
- NormalizePackedType(&type, &is_unsigned);
vector = new (global_allocator_)
- HVecMin(global_allocator_, opa, opb, type, vector_length_, is_unsigned, dex_pc);
+ HVecMin(global_allocator_,
+ opa,
+ opb,
+ HVecOperation::ToProperType(type, is_unsigned),
+ vector_length_,
+ dex_pc);
break;
}
case Intrinsics::kMathMaxIntInt:
case Intrinsics::kMathMaxLongLong:
case Intrinsics::kMathMaxFloatFloat:
case Intrinsics::kMathMaxDoubleDouble: {
- NormalizePackedType(&type, &is_unsigned);
vector = new (global_allocator_)
- HVecMax(global_allocator_, opa, opb, type, vector_length_, is_unsigned, dex_pc);
+ HVecMax(global_allocator_,
+ opa,
+ opb,
+ HVecOperation::ToProperType(type, is_unsigned),
+ vector_length_,
+ dex_pc);
break;
}
default:
@@ -1987,15 +1957,13 @@ bool HLoopOptimization::VectorizeHalvingAddIdiom(LoopNode* node,
VectorizeUse(node, s, generate_code, type, restrictions)) {
if (generate_code) {
if (vector_mode_ == kVector) {
- NormalizePackedType(&type, &is_unsigned);
vector_map_->Put(instruction, new (global_allocator_) HVecHalvingAdd(
global_allocator_,
vector_map_->Get(r),
vector_map_->Get(s),
- type,
+ HVecOperation::ToProperType(type, is_unsigned),
vector_length_,
is_rounded,
- is_unsigned,
kNoDexPc));
MaybeRecordStat(stats_, MethodCompilationStat::kLoopVectorizedIdiom);
} else {
@@ -2086,7 +2054,7 @@ bool HLoopOptimization::VectorizeSADIdiom(LoopNode* node,
VectorizeUse(node, r, generate_code, sub_type, restrictions) &&
VectorizeUse(node, s, generate_code, sub_type, restrictions)) {
if (generate_code) {
- NormalizePackedType(&reduction_type, &is_unsigned);
+ reduction_type = HVecOperation::ToProperType(reduction_type, is_unsigned);
if (vector_mode_ == kVector) {
vector_map_->Put(instruction, new (global_allocator_) HVecSADAccumulate(
global_allocator_,
diff --git a/compiler/optimizing/nodes_vector.h b/compiler/optimizing/nodes_vector.h
index 87dff8403b..ecabdf3b76 100644
--- a/compiler/optimizing/nodes_vector.h
+++ b/compiler/optimizing/nodes_vector.h
@@ -131,8 +131,6 @@ class HVecOperation : public HVariableInputSizeInstruction {
}
// Maps an integral type to the same-size signed type and leaves other types alone.
- // Can be used to test relaxed type consistency in which packed same-size integral
- // types can co-exist, but other type mixes are an error.
static DataType::Type ToSignedType(DataType::Type type) {
switch (type) {
case DataType::Type::kBool: // 1-byte storage unit
@@ -160,6 +158,11 @@ class HVecOperation : public HVariableInputSizeInstruction {
}
}
+ // Maps an integral type to the same-size (un)signed type. Leaves other types alone.
+ static DataType::Type ToProperType(DataType::Type type, bool is_unsigned) {
+ return is_unsigned ? ToUnsignedType(type) : ToSignedType(type);
+ }
+
// Helper method to determine if an instruction returns a SIMD value.
// TODO: This method is needed until we introduce SIMD as proper type.
static bool ReturnsSIMDValue(HInstruction* instruction) {
@@ -286,6 +289,8 @@ class HVecMemoryOperation : public HVecOperation {
};
// Packed type consistency checker ("same vector length" integral types may mix freely).
+// Tests relaxed type consistency in which packed same-size integral types can co-exist,
+// but other type mixes are an error.
inline static bool HasConsistentPackedTypes(HInstruction* input, DataType::Type type) {
if (input->IsPhi()) {
return input->GetType() == HVecOperation::kSIMDType; // carries SIMD
@@ -518,7 +523,7 @@ class HVecAdd FINAL : public HVecBinaryOperation {
// Performs halving add on every component in the two vectors, viz.
// rounded [ x1, .. , xn ] hradd [ y1, .. , yn ] = [ (x1 + y1 + 1) >> 1, .. , (xn + yn + 1) >> 1 ]
// truncated [ x1, .. , xn ] hadd [ y1, .. , yn ] = [ (x1 + y1) >> 1, .. , (xn + yn ) >> 1 ]
-// for either both signed or both unsigned operands x, y.
+// for either both signed or both unsigned operands x, y (reflected in packed_type).
class HVecHalvingAdd FINAL : public HVecBinaryOperation {
public:
HVecHalvingAdd(ArenaAllocator* allocator,
@@ -527,21 +532,13 @@ class HVecHalvingAdd FINAL : public HVecBinaryOperation {
DataType::Type packed_type,
size_t vector_length,
bool is_rounded,
- bool is_unsigned,
uint32_t dex_pc)
: HVecBinaryOperation(allocator, left, right, packed_type, vector_length, dex_pc) {
- // The `is_unsigned` flag should be used exclusively with the Int32 or Int64.
- // This flag is a temporary measure while we do not have the Uint32 and Uint64 data types.
- DCHECK(!is_unsigned ||
- packed_type == DataType::Type::kInt32 ||
- packed_type == DataType::Type::kInt64) << packed_type;
DCHECK(HasConsistentPackedTypes(left, packed_type));
DCHECK(HasConsistentPackedTypes(right, packed_type));
- SetPackedFlag<kFieldHAddIsUnsigned>(is_unsigned);
SetPackedFlag<kFieldHAddIsRounded>(is_rounded);
}
- bool IsUnsigned() const { return GetPackedFlag<kFieldHAddIsUnsigned>(); }
bool IsRounded() const { return GetPackedFlag<kFieldHAddIsRounded>(); }
bool CanBeMoved() const OVERRIDE { return true; }
@@ -549,9 +546,7 @@ class HVecHalvingAdd FINAL : public HVecBinaryOperation {
bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
DCHECK(other->IsVecHalvingAdd());
const HVecHalvingAdd* o = other->AsVecHalvingAdd();
- return HVecOperation::InstructionDataEquals(o) &&
- IsUnsigned() == o->IsUnsigned() &&
- IsRounded() == o->IsRounded();
+ return HVecOperation::InstructionDataEquals(o) && IsRounded() == o->IsRounded();
}
DECLARE_INSTRUCTION(VecHalvingAdd);
@@ -561,8 +556,7 @@ class HVecHalvingAdd FINAL : public HVecBinaryOperation {
private:
// Additional packed bits.
- static constexpr size_t kFieldHAddIsUnsigned = HVecOperation::kNumberOfVectorOpPackedBits;
- static constexpr size_t kFieldHAddIsRounded = kFieldHAddIsUnsigned + 1;
+ static constexpr size_t kFieldHAddIsRounded = HVecOperation::kNumberOfVectorOpPackedBits;
static constexpr size_t kNumberOfHAddPackedBits = kFieldHAddIsRounded + 1;
static_assert(kNumberOfHAddPackedBits <= kMaxNumberOfPackedBits, "Too many packed fields.");
};
@@ -638,7 +632,7 @@ class HVecDiv FINAL : public HVecBinaryOperation {
// Takes minimum of every component in the two vectors,
// viz. MIN( [ x1, .. , xn ] , [ y1, .. , yn ]) = [ min(x1, y1), .. , min(xn, yn) ]
-// for either both signed or both unsigned operands x, y.
+// for either both signed or both unsigned operands x, y (reflected in packed_type).
class HVecMin FINAL : public HVecBinaryOperation {
public:
HVecMin(ArenaAllocator* allocator,
@@ -646,44 +640,23 @@ class HVecMin FINAL : public HVecBinaryOperation {
HInstruction* right,
DataType::Type packed_type,
size_t vector_length,
- bool is_unsigned,
uint32_t dex_pc)
: HVecBinaryOperation(allocator, left, right, packed_type, vector_length, dex_pc) {
- // The `is_unsigned` flag should be used exclusively with the Int32 or Int64.
- // This flag is a temporary measure while we do not have the Uint32 and Uint64 data types.
- DCHECK(!is_unsigned ||
- packed_type == DataType::Type::kInt32 ||
- packed_type == DataType::Type::kInt64) << packed_type;
DCHECK(HasConsistentPackedTypes(left, packed_type));
DCHECK(HasConsistentPackedTypes(right, packed_type));
- SetPackedFlag<kFieldMinOpIsUnsigned>(is_unsigned);
}
- bool IsUnsigned() const { return GetPackedFlag<kFieldMinOpIsUnsigned>(); }
-
bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
- DCHECK(other->IsVecMin());
- const HVecMin* o = other->AsVecMin();
- return HVecOperation::InstructionDataEquals(o) && IsUnsigned() == o->IsUnsigned();
- }
-
DECLARE_INSTRUCTION(VecMin);
protected:
DEFAULT_COPY_CONSTRUCTOR(VecMin);
-
- private:
- // Additional packed bits.
- static constexpr size_t kFieldMinOpIsUnsigned = HVecOperation::kNumberOfVectorOpPackedBits;
- static constexpr size_t kNumberOfMinOpPackedBits = kFieldMinOpIsUnsigned + 1;
- static_assert(kNumberOfMinOpPackedBits <= kMaxNumberOfPackedBits, "Too many packed fields.");
};
// Takes maximum of every component in the two vectors,
// viz. MAX( [ x1, .. , xn ] , [ y1, .. , yn ]) = [ max(x1, y1), .. , max(xn, yn) ]
-// for either both signed or both unsigned operands x, y.
+// for either both signed or both unsigned operands x, y (reflected in packed_type).
class HVecMax FINAL : public HVecBinaryOperation {
public:
HVecMax(ArenaAllocator* allocator,
@@ -691,39 +664,18 @@ class HVecMax FINAL : public HVecBinaryOperation {
HInstruction* right,
DataType::Type packed_type,
size_t vector_length,
- bool is_unsigned,
uint32_t dex_pc)
: HVecBinaryOperation(allocator, left, right, packed_type, vector_length, dex_pc) {
- // The `is_unsigned` flag should be used exclusively with the Int32 or Int64.
- // This flag is a temporary measure while we do not have the Uint32 and Uint64 data types.
- DCHECK(!is_unsigned ||
- packed_type == DataType::Type::kInt32 ||
- packed_type == DataType::Type::kInt64) << packed_type;
DCHECK(HasConsistentPackedTypes(left, packed_type));
DCHECK(HasConsistentPackedTypes(right, packed_type));
- SetPackedFlag<kFieldMaxOpIsUnsigned>(is_unsigned);
}
- bool IsUnsigned() const { return GetPackedFlag<kFieldMaxOpIsUnsigned>(); }
-
bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
- DCHECK(other->IsVecMax());
- const HVecMax* o = other->AsVecMax();
- return HVecOperation::InstructionDataEquals(o) && IsUnsigned() == o->IsUnsigned();
- }
-
DECLARE_INSTRUCTION(VecMax);
protected:
DEFAULT_COPY_CONSTRUCTOR(VecMax);
-
- private:
- // Additional packed bits.
- static constexpr size_t kFieldMaxOpIsUnsigned = HVecOperation::kNumberOfVectorOpPackedBits;
- static constexpr size_t kNumberOfMaxOpPackedBits = kFieldMaxOpIsUnsigned + 1;
- static_assert(kNumberOfMaxOpPackedBits <= kMaxNumberOfPackedBits, "Too many packed fields.");
};
// Bitwise-ands every component in the two vectors,
diff --git a/compiler/optimizing/nodes_vector_test.cc b/compiler/optimizing/nodes_vector_test.cc
index ab9d7594d9..af13449646 100644
--- a/compiler/optimizing/nodes_vector_test.cc
+++ b/compiler/optimizing/nodes_vector_test.cc
@@ -282,143 +282,53 @@ TEST_F(NodesVectorTest, VectorAlignmentMattersOnStore) {
EXPECT_FALSE(v0->Equals(v1)); // no longer equal
}
-TEST_F(NodesVectorTest, VectorSignMattersOnMin) {
- HVecOperation* p0 = new (GetAllocator())
- HVecReplicateScalar(GetAllocator(), int32_parameter_, DataType::Type::kInt32, 4, kNoDexPc);
- HVecOperation* p1 = new (GetAllocator())
- HVecReplicateScalar(GetAllocator(), int8_parameter_, DataType::Type::kInt8, 4, kNoDexPc);
- HVecOperation* p2 = new (GetAllocator())
- HVecReplicateScalar(GetAllocator(), int16_parameter_, DataType::Type::kInt16, 4, kNoDexPc);
-
- HVecMin* v0 = new (GetAllocator()) HVecMin(
- GetAllocator(), p0, p0, DataType::Type::kInt32, 4, /*is_unsigned*/ true, kNoDexPc);
- HVecMin* v1 = new (GetAllocator()) HVecMin(
- GetAllocator(), p0, p0, DataType::Type::kInt32, 4, /*is_unsigned*/ false, kNoDexPc);
- HVecMin* v2 = new (GetAllocator()) HVecMin(
- GetAllocator(), p0, p0, DataType::Type::kInt32, 2, /*is_unsigned*/ true, kNoDexPc);
- HVecMin* v3 = new (GetAllocator()) HVecMin(
- GetAllocator(), p1, p1, DataType::Type::kUint8, 16, /*is_unsigned*/ false, kNoDexPc);
- HVecMin* v4 = new (GetAllocator()) HVecMin(
- GetAllocator(), p1, p1, DataType::Type::kInt8, 16, /*is_unsigned*/ false, kNoDexPc);
- HVecMin* v5 = new (GetAllocator()) HVecMin(
- GetAllocator(), p2, p2, DataType::Type::kUint16, 8, /*is_unsigned*/ false, kNoDexPc);
- HVecMin* v6 = new (GetAllocator()) HVecMin(
- GetAllocator(), p2, p2, DataType::Type::kInt16, 8, /*is_unsigned*/ false, kNoDexPc);
- HVecMin* min_insns[] = { v0, v1, v2, v3, v4, v5, v6 };
-
- EXPECT_FALSE(p0->CanBeMoved());
- EXPECT_FALSE(p1->CanBeMoved());
- EXPECT_FALSE(p2->CanBeMoved());
-
- for (HVecMin* min_insn : min_insns) {
- EXPECT_TRUE(min_insn->CanBeMoved());
- }
-
- // Deprecated; IsUnsigned() should be removed with the introduction of Uint32 and Uint64.
- EXPECT_TRUE(v0->IsUnsigned());
- EXPECT_FALSE(v1->IsUnsigned());
- EXPECT_TRUE(v2->IsUnsigned());
-
- for (HVecMin* min_insn1 : min_insns) {
- for (HVecMin* min_insn2 : min_insns) {
- EXPECT_EQ(min_insn1 == min_insn2, min_insn1->Equals(min_insn2));
- }
- }
-}
-
-TEST_F(NodesVectorTest, VectorSignMattersOnMax) {
- HVecOperation* p0 = new (GetAllocator())
- HVecReplicateScalar(GetAllocator(), int32_parameter_, DataType::Type::kInt32, 4, kNoDexPc);
- HVecOperation* p1 = new (GetAllocator())
- HVecReplicateScalar(GetAllocator(), int8_parameter_, DataType::Type::kInt8, 4, kNoDexPc);
- HVecOperation* p2 = new (GetAllocator())
- HVecReplicateScalar(GetAllocator(), int16_parameter_, DataType::Type::kInt16, 4, kNoDexPc);
-
- HVecMax* v0 = new (GetAllocator()) HVecMax(
- GetAllocator(), p0, p0, DataType::Type::kInt32, 4, /*is_unsigned*/ true, kNoDexPc);
- HVecMax* v1 = new (GetAllocator()) HVecMax(
- GetAllocator(), p0, p0, DataType::Type::kInt32, 4, /*is_unsigned*/ false, kNoDexPc);
- HVecMax* v2 = new (GetAllocator()) HVecMax(
- GetAllocator(), p0, p0, DataType::Type::kInt32, 2, /*is_unsigned*/ true, kNoDexPc);
- HVecMax* v3 = new (GetAllocator()) HVecMax(
- GetAllocator(), p1, p1, DataType::Type::kUint8, 16, /*is_unsigned*/ false, kNoDexPc);
- HVecMax* v4 = new (GetAllocator()) HVecMax(
- GetAllocator(), p1, p1, DataType::Type::kInt8, 16, /*is_unsigned*/ false, kNoDexPc);
- HVecMax* v5 = new (GetAllocator()) HVecMax(
- GetAllocator(), p2, p2, DataType::Type::kUint16, 8, /*is_unsigned*/ false, kNoDexPc);
- HVecMax* v6 = new (GetAllocator()) HVecMax(
- GetAllocator(), p2, p2, DataType::Type::kInt16, 8, /*is_unsigned*/ false, kNoDexPc);
- HVecMax* max_insns[] = { v0, v1, v2, v3, v4, v5, v6 };
-
- EXPECT_FALSE(p0->CanBeMoved());
- EXPECT_FALSE(p1->CanBeMoved());
- EXPECT_FALSE(p2->CanBeMoved());
-
- for (HVecMax* max_insn : max_insns) {
- EXPECT_TRUE(max_insn->CanBeMoved());
- }
-
- // Deprecated; IsUnsigned() should be removed with the introduction of Uint32 and Uint64.
- EXPECT_TRUE(v0->IsUnsigned());
- EXPECT_FALSE(v1->IsUnsigned());
- EXPECT_TRUE(v2->IsUnsigned());
-
- for (HVecMax* max_insn1 : max_insns) {
- for (HVecMax* max_insn2 : max_insns) {
- EXPECT_EQ(max_insn1 == max_insn2, max_insn1->Equals(max_insn2));
- }
- }
-}
-
TEST_F(NodesVectorTest, VectorAttributesMatterOnHalvingAdd) {
+ HVecOperation* u0 = new (GetAllocator())
+ HVecReplicateScalar(GetAllocator(), int32_parameter_, DataType::Type::kUint32, 4, kNoDexPc);
+ HVecOperation* u1 = new (GetAllocator())
+ HVecReplicateScalar(GetAllocator(), int16_parameter_, DataType::Type::kUint16, 8, kNoDexPc);
+ HVecOperation* u2 = new (GetAllocator())
+ HVecReplicateScalar(GetAllocator(), int8_parameter_, DataType::Type::kUint8, 16, kNoDexPc);
+
HVecOperation* p0 = new (GetAllocator())
HVecReplicateScalar(GetAllocator(), int32_parameter_, DataType::Type::kInt32, 4, kNoDexPc);
HVecOperation* p1 = new (GetAllocator())
- HVecReplicateScalar(GetAllocator(), int8_parameter_, DataType::Type::kInt8, 4, kNoDexPc);
+ HVecReplicateScalar(GetAllocator(), int16_parameter_, DataType::Type::kInt16, 8, kNoDexPc);
HVecOperation* p2 = new (GetAllocator())
- HVecReplicateScalar(GetAllocator(), int16_parameter_, DataType::Type::kInt16, 4, kNoDexPc);
+ HVecReplicateScalar(GetAllocator(), int8_parameter_, DataType::Type::kInt8, 16, kNoDexPc);
HVecHalvingAdd* v0 = new (GetAllocator()) HVecHalvingAdd(
- GetAllocator(), p0, p0, DataType::Type::kInt32, 4,
- /*is_rounded*/ true, /*is_unsigned*/ true, kNoDexPc);
+ GetAllocator(), u0, u0, DataType::Type::kUint32, 4, /*is_rounded*/ true, kNoDexPc);
HVecHalvingAdd* v1 = new (GetAllocator()) HVecHalvingAdd(
- GetAllocator(), p0, p0, DataType::Type::kInt32, 4,
- /*is_rounded*/ false, /*is_unsigned*/ true, kNoDexPc);
+ GetAllocator(), u0, u0, DataType::Type::kUint32, 4, /*is_rounded*/ false, kNoDexPc);
HVecHalvingAdd* v2 = new (GetAllocator()) HVecHalvingAdd(
- GetAllocator(), p0, p0, DataType::Type::kInt32, 4,
- /*is_rounded*/ true, /*is_unsigned*/ false, kNoDexPc);
+ GetAllocator(), p0, p0, DataType::Type::kInt32, 4, /*is_rounded*/ true, kNoDexPc);
HVecHalvingAdd* v3 = new (GetAllocator()) HVecHalvingAdd(
- GetAllocator(), p0, p0, DataType::Type::kInt32, 4,
- /*is_rounded*/ false, /*is_unsigned*/ false, kNoDexPc);
+ GetAllocator(), p0, p0, DataType::Type::kInt32, 4, /*is_rounded*/ false, kNoDexPc);
+
HVecHalvingAdd* v4 = new (GetAllocator()) HVecHalvingAdd(
- GetAllocator(), p0, p0, DataType::Type::kInt32, 2,
- /*is_rounded*/ true, /*is_unsigned*/ true, kNoDexPc);
+ GetAllocator(), u1, u1, DataType::Type::kUint16, 8, /*is_rounded*/ true, kNoDexPc);
HVecHalvingAdd* v5 = new (GetAllocator()) HVecHalvingAdd(
- GetAllocator(), p1, p1, DataType::Type::kUint8, 16,
- /*is_rounded*/ true, /*is_unsigned*/ false, kNoDexPc);
+ GetAllocator(), u1, u1, DataType::Type::kUint16, 8, /*is_rounded*/ false, kNoDexPc);
HVecHalvingAdd* v6 = new (GetAllocator()) HVecHalvingAdd(
- GetAllocator(), p1, p1, DataType::Type::kUint8, 16,
- /*is_rounded*/ false, /*is_unsigned*/ false, kNoDexPc);
+ GetAllocator(), p1, p1, DataType::Type::kInt16, 8, /*is_rounded*/ true, kNoDexPc);
HVecHalvingAdd* v7 = new (GetAllocator()) HVecHalvingAdd(
- GetAllocator(), p1, p1, DataType::Type::kInt8, 16,
- /*is_rounded*/ true, /*is_unsigned*/ false, kNoDexPc);
+ GetAllocator(), p1, p1, DataType::Type::kInt16, 8, /*is_rounded*/ false, kNoDexPc);
+
HVecHalvingAdd* v8 = new (GetAllocator()) HVecHalvingAdd(
- GetAllocator(), p1, p1, DataType::Type::kInt8, 16,
- /*is_rounded*/ false, /*is_unsigned*/ false, kNoDexPc);
+ GetAllocator(), u2, u2, DataType::Type::kUint8, 16, /*is_rounded*/ true, kNoDexPc);
HVecHalvingAdd* v9 = new (GetAllocator()) HVecHalvingAdd(
- GetAllocator(), p2, p2, DataType::Type::kUint16, 8,
- /*is_rounded*/ true, /*is_unsigned*/ false, kNoDexPc);
+ GetAllocator(), u2, u2, DataType::Type::kUint8, 16, /*is_rounded*/ false, kNoDexPc);
HVecHalvingAdd* v10 = new (GetAllocator()) HVecHalvingAdd(
- GetAllocator(), p2, p2, DataType::Type::kUint16, 8,
- /*is_rounded*/ false, /*is_unsigned*/ false, kNoDexPc);
+ GetAllocator(), p2, p2, DataType::Type::kInt8, 16, /*is_rounded*/ true, kNoDexPc);
HVecHalvingAdd* v11 = new (GetAllocator()) HVecHalvingAdd(
- GetAllocator(), p2, p2, DataType::Type::kInt16, 2,
- /*is_rounded*/ true, /*is_unsigned*/ false, kNoDexPc);
- HVecHalvingAdd* v12 = new (GetAllocator()) HVecHalvingAdd(
- GetAllocator(), p2, p2, DataType::Type::kInt16, 2,
- /*is_rounded*/ false, /*is_unsigned*/ false, kNoDexPc);
- HVecHalvingAdd* hadd_insns[] = { v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12 };
+ GetAllocator(), p2, p2, DataType::Type::kInt8, 16, /*is_rounded*/ false, kNoDexPc);
+ HVecHalvingAdd* hadd_insns[] = { v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11 };
+
+ EXPECT_FALSE(u0->CanBeMoved());
+ EXPECT_FALSE(u1->CanBeMoved());
+ EXPECT_FALSE(u2->CanBeMoved());
EXPECT_FALSE(p0->CanBeMoved());
EXPECT_FALSE(p1->CanBeMoved());
EXPECT_FALSE(p2->CanBeMoved());
@@ -427,26 +337,18 @@ TEST_F(NodesVectorTest, VectorAttributesMatterOnHalvingAdd) {
EXPECT_TRUE(hadd_insn->CanBeMoved());
}
- // Deprecated; IsUnsigned() should be removed with the introduction of Uint32 and Uint64.
- EXPECT_TRUE(v0->IsUnsigned());
- EXPECT_TRUE(v1->IsUnsigned());
- EXPECT_TRUE(!v2->IsUnsigned());
- EXPECT_TRUE(!v3->IsUnsigned());
- EXPECT_TRUE(v4->IsUnsigned());
-
EXPECT_TRUE(v0->IsRounded());
EXPECT_TRUE(!v1->IsRounded());
EXPECT_TRUE(v2->IsRounded());
EXPECT_TRUE(!v3->IsRounded());
EXPECT_TRUE(v4->IsRounded());
- EXPECT_TRUE(v5->IsRounded());
- EXPECT_TRUE(!v6->IsRounded());
- EXPECT_TRUE(v7->IsRounded());
- EXPECT_TRUE(!v8->IsRounded());
- EXPECT_TRUE(v9->IsRounded());
- EXPECT_TRUE(!v10->IsRounded());
- EXPECT_TRUE(v11->IsRounded());
- EXPECT_TRUE(!v12->IsRounded());
+ EXPECT_TRUE(!v5->IsRounded());
+ EXPECT_TRUE(v6->IsRounded());
+ EXPECT_TRUE(!v7->IsRounded());
+ EXPECT_TRUE(v8->IsRounded());
+ EXPECT_TRUE(!v9->IsRounded());
+ EXPECT_TRUE(v10->IsRounded());
+ EXPECT_TRUE(!v11->IsRounded());
for (HVecHalvingAdd* hadd_insn1 : hadd_insns) {
for (HVecHalvingAdd* hadd_insn2 : hadd_insns) {
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 47ef194574..b3f23a0dcd 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -1410,7 +1410,7 @@ void OptimizingCompiler::GenerateJitDebugInfo(ArtMethod* method, debug::MethodDe
GetCompilerDriver()->GetInstructionSetFeatures(),
mini_debug_info,
ArrayRef<const debug::MethodDebugInfo>(&info, 1));
- MutexLock mu(Thread::Current(), g_jit_debug_mutex);
+ MutexLock mu(Thread::Current(), *Locks::native_debug_interface_lock_);
JITCodeEntry* entry = CreateJITCodeEntry(elf_file);
IncrementJITCodeEntryRefcount(entry, info.code_address);
diff --git a/compiler/optimizing/register_allocation_resolver.cc b/compiler/optimizing/register_allocation_resolver.cc
index 1d3fe0334d..27f9ac3990 100644
--- a/compiler/optimizing/register_allocation_resolver.cc
+++ b/compiler/optimizing/register_allocation_resolver.cc
@@ -103,6 +103,7 @@ void RegisterAllocationResolver::Resolve(ArrayRef<HInstruction* const> safepoint
case DataType::Type::kFloat64:
slot += long_spill_slots;
FALLTHROUGH_INTENDED;
+ case DataType::Type::kUint64:
case DataType::Type::kInt64:
slot += float_spill_slots;
FALLTHROUGH_INTENDED;
@@ -110,6 +111,7 @@ void RegisterAllocationResolver::Resolve(ArrayRef<HInstruction* const> safepoint
slot += int_spill_slots;
FALLTHROUGH_INTENDED;
case DataType::Type::kReference:
+ case DataType::Type::kUint32:
case DataType::Type::kInt32:
case DataType::Type::kUint16:
case DataType::Type::kUint8:
diff --git a/compiler/optimizing/register_allocator_graph_color.cc b/compiler/optimizing/register_allocator_graph_color.cc
index ad5248e982..fa7ad82316 100644
--- a/compiler/optimizing/register_allocator_graph_color.cc
+++ b/compiler/optimizing/register_allocator_graph_color.cc
@@ -1972,6 +1972,8 @@ void RegisterAllocatorGraphColor::AllocateSpillSlots(ArrayRef<InterferenceNode*
case DataType::Type::kInt16:
int_intervals.push_back(parent);
break;
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unexpected type for interval " << node->GetInterval()->GetType();
UNREACHABLE();
diff --git a/compiler/optimizing/register_allocator_linear_scan.cc b/compiler/optimizing/register_allocator_linear_scan.cc
index cfe63bd758..216fb57a96 100644
--- a/compiler/optimizing/register_allocator_linear_scan.cc
+++ b/compiler/optimizing/register_allocator_linear_scan.cc
@@ -1131,6 +1131,8 @@ void RegisterAllocatorLinearScan::AllocateSpillSlotFor(LiveInterval* interval) {
case DataType::Type::kInt16:
spill_slots = &int_spill_slots_;
break;
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unexpected type for interval " << interval->GetType();
}
diff --git a/openjdkjvmti/OpenjdkJvmTi.cc b/openjdkjvmti/OpenjdkJvmTi.cc
index 027635bbb5..a0c7f40b6f 100644
--- a/openjdkjvmti/OpenjdkJvmTi.cc
+++ b/openjdkjvmti/OpenjdkJvmTi.cc
@@ -1535,6 +1535,7 @@ extern "C" bool ArtPlugin_Initialize() {
MethodUtil::Register(&gEventHandler);
SearchUtil::Register();
HeapUtil::Register();
+ Transformer::Setup();
{
// Make sure we can deopt anything we need to.
diff --git a/openjdkjvmti/fixed_up_dex_file.cc b/openjdkjvmti/fixed_up_dex_file.cc
index a8d2a37fa6..e9522b3984 100644
--- a/openjdkjvmti/fixed_up_dex_file.cc
+++ b/openjdkjvmti/fixed_up_dex_file.cc
@@ -45,14 +45,13 @@
namespace openjdkjvmti {
-static void RecomputeDexChecksum(art::DexFile* dex_file)
- REQUIRES_SHARED(art::Locks::mutator_lock_) {
+static void RecomputeDexChecksum(art::DexFile* dex_file) {
reinterpret_cast<art::DexFile::Header*>(const_cast<uint8_t*>(dex_file->Begin()))->checksum_ =
dex_file->CalculateChecksum();
}
-static void DoDexUnquicken(const art::DexFile& new_dex_file, const art::DexFile& original_dex_file)
- REQUIRES_SHARED(art::Locks::mutator_lock_) {
+static void DoDexUnquicken(const art::DexFile& new_dex_file,
+ const art::DexFile& original_dex_file) {
const art::OatDexFile* oat_dex = original_dex_file.GetOatDexFile();
if (oat_dex == nullptr) {
return;
diff --git a/openjdkjvmti/fixed_up_dex_file.h b/openjdkjvmti/fixed_up_dex_file.h
index 7f05a2930a..594e8a7358 100644
--- a/openjdkjvmti/fixed_up_dex_file.h
+++ b/openjdkjvmti/fixed_up_dex_file.h
@@ -50,8 +50,7 @@ namespace openjdkjvmti {
class FixedUpDexFile {
public:
static std::unique_ptr<FixedUpDexFile> Create(const art::DexFile& original,
- const char* descriptor)
- REQUIRES_SHARED(art::Locks::mutator_lock_);
+ const char* descriptor);
const art::DexFile& GetDexFile() {
return *dex_file_;
diff --git a/openjdkjvmti/ti_class_definition.cc b/openjdkjvmti/ti_class_definition.cc
index 7f2f80009a..1b641cd905 100644
--- a/openjdkjvmti/ti_class_definition.cc
+++ b/openjdkjvmti/ti_class_definition.cc
@@ -45,6 +45,31 @@
namespace openjdkjvmti {
+void ArtClassDefinition::InitializeMemory() const {
+ DCHECK(art::MemMap::kCanReplaceMapping);
+ VLOG(signals) << "Initializing de-quickened memory for dex file of " << name_;
+ CHECK(dex_data_mmap_ != nullptr);
+ CHECK(temp_mmap_ != nullptr);
+ CHECK_EQ(dex_data_mmap_->GetProtect(), PROT_NONE);
+ CHECK_EQ(temp_mmap_->GetProtect(), PROT_READ | PROT_WRITE);
+
+ std::string desc = std::string("L") + name_ + ";";
+ std::unique_ptr<FixedUpDexFile>
+ fixed_dex_file(FixedUpDexFile::Create(*initial_dex_file_unquickened_, desc.c_str()));
+ CHECK(fixed_dex_file.get() != nullptr);
+ CHECK_LE(fixed_dex_file->Size(), temp_mmap_->Size());
+ CHECK_EQ(temp_mmap_->Size(), dex_data_mmap_->Size());
+ // Copy the data to the temp mmap.
+ memcpy(temp_mmap_->Begin(), fixed_dex_file->Begin(), fixed_dex_file->Size());
+
+ // Move the mmap atomically.
+ art::MemMap* source = temp_mmap_.release();
+ std::string error;
+ CHECK(dex_data_mmap_->ReplaceWith(&source, &error)) << "Failed to replace mmap for "
+ << name_ << " because " << error;
+ CHECK(dex_data_mmap_->Protect(PROT_READ));
+}
+
bool ArtClassDefinition::IsModified() const {
// RedefineClasses calls always are 'modified' since they need to change the current_dex_file of
// the class.
@@ -58,6 +83,27 @@ bool ArtClassDefinition::IsModified() const {
return false;
}
+ // The dex_data_ was never touched by the agents.
+ if (dex_data_mmap_ != nullptr && dex_data_mmap_->GetProtect() == PROT_NONE) {
+ if (current_dex_file_.data() == dex_data_mmap_->Begin()) {
+ // the dex_data_ looks like it changed (not equal to current_dex_file_) but we never
+ // initialized the dex_data_mmap_. This means the new_dex_data was filled in without looking
+ // at the initial dex_data_.
+ return true;
+ } else if (dex_data_.data() == dex_data_mmap_->Begin()) {
+ // The dex file used to have modifications but they were not added again.
+ return true;
+ } else {
+ // It's not clear what happened. It's possible that the agent got the current dex file data
+ // from some other source so we need to initialize everything to see if it is the same.
+ VLOG(signals) << "Lazy dex file for " << name_ << " was never touched but the dex_data_ is "
+ << "changed! Need to initialize the memory to see if anything changed";
+ InitializeMemory();
+ }
+ }
+
+ // We can definitely read current_dex_file_ and dex_file_ without causing page faults.
+
// Check if the dex file we want to set is the same as the current one.
// Unfortunately we need to do this check even if no modifications have been done since it could
// be that agents were removed in the mean-time so we still have a different dex file. The dex
@@ -194,6 +240,53 @@ void ArtClassDefinition::InitWithDex(GetOriginalDexFile get_original,
const art::DexFile* quick_dex) {
art::Thread* self = art::Thread::Current();
DCHECK(quick_dex != nullptr);
+ if (art::MemMap::kCanReplaceMapping && kEnableOnDemandDexDequicken) {
+ size_t dequick_size = quick_dex->GetDequickenedSize();
+ std::string mmap_name("anon-mmap-for-redefine: ");
+ mmap_name += name_;
+ std::string error;
+ dex_data_mmap_.reset(art::MemMap::MapAnonymous(mmap_name.c_str(),
+ nullptr,
+ dequick_size,
+ PROT_NONE,
+ /*low_4gb*/ false,
+ /*reuse*/ false,
+ &error));
+ mmap_name += "-TEMP";
+ temp_mmap_.reset(art::MemMap::MapAnonymous(mmap_name.c_str(),
+ nullptr,
+ dequick_size,
+ PROT_READ | PROT_WRITE,
+ /*low_4gb*/ false,
+ /*reuse*/ false,
+ &error));
+ if (UNLIKELY(dex_data_mmap_ != nullptr && temp_mmap_ != nullptr)) {
+ // Need to save the initial dexfile so we don't need to search for it in the fault-handler.
+ initial_dex_file_unquickened_ = quick_dex;
+ dex_data_ = art::ArrayRef<const unsigned char>(dex_data_mmap_->Begin(),
+ dex_data_mmap_->Size());
+ if (from_class_ext_) {
+ // We got initial from class_ext so the current one must have undergone redefinition so no
+ // cdex or quickening stuff.
+ // We can only do this if it's not a first load.
+ DCHECK(klass_ != nullptr);
+ const art::DexFile& cur_dex = self->DecodeJObject(klass_)->AsClass()->GetDexFile();
+ current_dex_file_ = art::ArrayRef<const unsigned char>(cur_dex.Begin(), cur_dex.Size());
+ } else {
+ // This class hasn't been redefined before. The dequickened current data is the same as the
+ // dex_data_mmap_ when it's filled it. We don't need to copy anything because the mmap will
+ // not be cleared until after everything is done.
+ current_dex_file_ = art::ArrayRef<const unsigned char>(dex_data_mmap_->Begin(),
+ dequick_size);
+ }
+ return;
+ }
+ }
+ dex_data_mmap_.reset(nullptr);
+ temp_mmap_.reset(nullptr);
+ // Failed to mmap a large enough area (or on-demand dequickening was disabled). This is
+ // unfortunate. Since currently the size is just a guess though we might as well try to do it
+ // manually.
get_original(/*out*/&dex_data_memory_);
dex_data_ = art::ArrayRef<const unsigned char>(dex_data_memory_);
if (from_class_ext_) {
diff --git a/openjdkjvmti/ti_class_definition.h b/openjdkjvmti/ti_class_definition.h
index 00847394e7..31c3611e72 100644
--- a/openjdkjvmti/ti_class_definition.h
+++ b/openjdkjvmti/ti_class_definition.h
@@ -32,9 +32,14 @@
#ifndef ART_OPENJDKJVMTI_TI_CLASS_DEFINITION_H_
#define ART_OPENJDKJVMTI_TI_CLASS_DEFINITION_H_
+#include <stddef.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+
#include "art_jvmti.h"
#include "base/array_ref.h"
+#include "mem_map.h"
namespace openjdkjvmti {
@@ -43,13 +48,20 @@ namespace openjdkjvmti {
// redefinition/retransformation function that created it.
class ArtClassDefinition {
public:
+ // If we support doing a on-demand dex-dequickening using signal handlers.
+ static constexpr bool kEnableOnDemandDexDequicken = true;
+
ArtClassDefinition()
: klass_(nullptr),
loader_(nullptr),
name_(),
protection_domain_(nullptr),
+ dex_data_mmap_(nullptr),
+ temp_mmap_(nullptr),
dex_data_memory_(),
+ initial_dex_file_unquickened_(nullptr),
dex_data_(),
+ current_dex_memory_(),
current_dex_file_(),
redefined_(false),
from_class_ext_(false),
@@ -87,6 +99,12 @@ class ArtClassDefinition {
}
}
+ bool ContainsAddress(uintptr_t ptr) const {
+ return dex_data_mmap_ != nullptr &&
+ reinterpret_cast<uintptr_t>(dex_data_mmap_->Begin()) <= ptr &&
+ reinterpret_cast<uintptr_t>(dex_data_mmap_->End()) > ptr;
+ }
+
bool IsModified() const REQUIRES_SHARED(art::Locks::mutator_lock_);
bool IsInitialized() const {
@@ -108,6 +126,13 @@ class ArtClassDefinition {
return name_;
}
+ bool IsLazyDefinition() const {
+ DCHECK(IsInitialized());
+ return dex_data_mmap_ != nullptr &&
+ dex_data_.data() == dex_data_mmap_->Begin() &&
+ dex_data_mmap_->GetProtect() == PROT_NONE;
+ }
+
jobject GetProtectionDomain() const {
DCHECK(IsInitialized());
return protection_domain_;
@@ -118,6 +143,8 @@ class ArtClassDefinition {
return dex_data_;
}
+ void InitializeMemory() const;
+
private:
jvmtiError InitCommon(art::Thread* self, jclass klass);
@@ -130,9 +157,17 @@ class ArtClassDefinition {
std::string name_;
jobject protection_domain_;
+ // Mmap that will be filled with the original-dex-file lazily if it needs to be de-quickened or
+ // de-compact-dex'd
+ mutable std::unique_ptr<art::MemMap> dex_data_mmap_;
+ // This is a temporary mmap we will use to be able to fill the dex file data atomically.
+ mutable std::unique_ptr<art::MemMap> temp_mmap_;
+
// A unique_ptr to the current dex_data if it needs to be cleaned up.
std::vector<unsigned char> dex_data_memory_;
+ const art::DexFile* initial_dex_file_unquickened_;
+
// A ref to the current dex data. This is either dex_data_memory_, or current_dex_file_. This is
// what the dex file will be turned into.
art::ArrayRef<const unsigned char> dex_data_;
diff --git a/openjdkjvmti/transform.cc b/openjdkjvmti/transform.cc
index af838d62b9..7b19a24ba4 100644
--- a/openjdkjvmti/transform.cc
+++ b/openjdkjvmti/transform.cc
@@ -29,6 +29,9 @@
* questions.
*/
+#include <stddef.h>
+#include <sys/types.h>
+
#include <unordered_map>
#include <unordered_set>
@@ -41,6 +44,7 @@
#include "dex/dex_file_types.h"
#include "dex/utf.h"
#include "events-inl.h"
+#include "fault_handler.h"
#include "gc_root-inl.h"
#include "globals.h"
#include "jni_env_ext-inl.h"
@@ -63,6 +67,174 @@
namespace openjdkjvmti {
+// A FaultHandler that will deal with initializing ClassDefinitions when they are actually needed.
+class TransformationFaultHandler FINAL : public art::FaultHandler {
+ public:
+ explicit TransformationFaultHandler(art::FaultManager* manager)
+ : art::FaultHandler(manager),
+ uninitialized_class_definitions_lock_("JVMTI Initialized class definitions lock",
+ art::LockLevel::kSignalHandlingLock),
+ class_definition_initialized_cond_("JVMTI Initialized class definitions condition",
+ uninitialized_class_definitions_lock_) {
+ manager->AddHandler(this, /* generated_code */ false);
+ }
+
+ ~TransformationFaultHandler() {
+ art::MutexLock mu(art::Thread::Current(), uninitialized_class_definitions_lock_);
+ uninitialized_class_definitions_.clear();
+ }
+
+ bool Action(int sig, siginfo_t* siginfo, void* context ATTRIBUTE_UNUSED) OVERRIDE {
+ DCHECK_EQ(sig, SIGSEGV);
+ art::Thread* self = art::Thread::Current();
+ if (UNLIKELY(uninitialized_class_definitions_lock_.IsExclusiveHeld(self))) {
+ if (self != nullptr) {
+ LOG(FATAL) << "Recursive call into Transformation fault handler!";
+ UNREACHABLE();
+ } else {
+ LOG(ERROR) << "Possible deadlock due to recursive signal delivery of segv.";
+ }
+ }
+ uintptr_t ptr = reinterpret_cast<uintptr_t>(siginfo->si_addr);
+ ArtClassDefinition* res = nullptr;
+
+ {
+ // NB Technically using a mutex and condition variables here is non-posix compliant but
+ // everything should be fine since both glibc and bionic implementations of mutexs and
+ // condition variables work fine so long as the thread was not interrupted during a
+ // lock/unlock (which it wasn't) on all architectures we care about.
+ art::MutexLock mu(self, uninitialized_class_definitions_lock_);
+ auto it = std::find_if(uninitialized_class_definitions_.begin(),
+ uninitialized_class_definitions_.end(),
+ [&](const auto op) { return op->ContainsAddress(ptr); });
+ if (it != uninitialized_class_definitions_.end()) {
+ res = *it;
+ // Remove the class definition.
+ uninitialized_class_definitions_.erase(it);
+ // Put it in the initializing list
+ initializing_class_definitions_.push_back(res);
+ } else {
+ // Wait for the ptr to be initialized (if it is currently initializing).
+ while (DefinitionIsInitializing(ptr)) {
+ WaitForClassInitializationToFinish();
+ }
+ // Return true (continue with user code) if we find that the definition has been
+ // initialized. Return false (continue on to next signal handler) if the definition is not
+ // initialized or found.
+ return std::find_if(initialized_class_definitions_.begin(),
+ initialized_class_definitions_.end(),
+ [&](const auto op) { return op->ContainsAddress(ptr); }) !=
+ uninitialized_class_definitions_.end();
+ }
+ }
+
+ VLOG(signals) << "Lazy initialization of dex file for transformation of " << res->GetName()
+ << " during SEGV";
+ res->InitializeMemory();
+
+ {
+ art::MutexLock mu(self, uninitialized_class_definitions_lock_);
+ // Move to initialized state and notify waiters.
+ initializing_class_definitions_.erase(std::find(initializing_class_definitions_.begin(),
+ initializing_class_definitions_.end(),
+ res));
+ initialized_class_definitions_.push_back(res);
+ class_definition_initialized_cond_.Broadcast(self);
+ }
+
+ return true;
+ }
+
+ void RemoveDefinition(ArtClassDefinition* def) REQUIRES(!uninitialized_class_definitions_lock_) {
+ art::MutexLock mu(art::Thread::Current(), uninitialized_class_definitions_lock_);
+ auto it = std::find(uninitialized_class_definitions_.begin(),
+ uninitialized_class_definitions_.end(),
+ def);
+ if (it != uninitialized_class_definitions_.end()) {
+ uninitialized_class_definitions_.erase(it);
+ return;
+ }
+ while (std::find(initializing_class_definitions_.begin(),
+ initializing_class_definitions_.end(),
+ def) != initializing_class_definitions_.end()) {
+ WaitForClassInitializationToFinish();
+ }
+ it = std::find(initialized_class_definitions_.begin(),
+ initialized_class_definitions_.end(),
+ def);
+ CHECK(it != initialized_class_definitions_.end()) << "Could not find class definition for "
+ << def->GetName();
+ initialized_class_definitions_.erase(it);
+ }
+
+ void AddArtDefinition(ArtClassDefinition* def) REQUIRES(!uninitialized_class_definitions_lock_) {
+ DCHECK(def->IsLazyDefinition());
+ art::MutexLock mu(art::Thread::Current(), uninitialized_class_definitions_lock_);
+ uninitialized_class_definitions_.push_back(def);
+ }
+
+ private:
+ bool DefinitionIsInitializing(uintptr_t ptr) REQUIRES(uninitialized_class_definitions_lock_) {
+ return std::find_if(initializing_class_definitions_.begin(),
+ initializing_class_definitions_.end(),
+ [&](const auto op) { return op->ContainsAddress(ptr); }) !=
+ initializing_class_definitions_.end();
+ }
+
+ void WaitForClassInitializationToFinish() REQUIRES(uninitialized_class_definitions_lock_) {
+ class_definition_initialized_cond_.Wait(art::Thread::Current());
+ }
+
+ art::Mutex uninitialized_class_definitions_lock_ ACQUIRED_BEFORE(art::Locks::abort_lock_);
+ art::ConditionVariable class_definition_initialized_cond_
+ GUARDED_BY(uninitialized_class_definitions_lock_);
+
+ // A list of the class definitions that have a non-readable map.
+ std::vector<ArtClassDefinition*> uninitialized_class_definitions_
+ GUARDED_BY(uninitialized_class_definitions_lock_);
+
+ // A list of class definitions that are currently undergoing unquickening. Threads should wait
+ // until the definition is no longer in this before returning.
+ std::vector<ArtClassDefinition*> initializing_class_definitions_
+ GUARDED_BY(uninitialized_class_definitions_lock_);
+
+ // A list of class definitions that are already unquickened. Threads should immediately return if
+ // it is here.
+ std::vector<ArtClassDefinition*> initialized_class_definitions_
+ GUARDED_BY(uninitialized_class_definitions_lock_);
+};
+
+static TransformationFaultHandler* gTransformFaultHandler = nullptr;
+
+void Transformer::Setup() {
+ // Although we create this the fault handler is actually owned by the 'art::fault_manager' which
+ // will take care of destroying it.
+ if (art::MemMap::kCanReplaceMapping && ArtClassDefinition::kEnableOnDemandDexDequicken) {
+ gTransformFaultHandler = new TransformationFaultHandler(&art::fault_manager);
+ }
+}
+
+// Simple helper to add and remove the class definition from the fault handler.
+class ScopedDefinitionHandler {
+ public:
+ explicit ScopedDefinitionHandler(ArtClassDefinition* def)
+ : def_(def), is_lazy_(def_->IsLazyDefinition()) {
+ if (is_lazy_) {
+ gTransformFaultHandler->AddArtDefinition(def_);
+ }
+ }
+
+ ~ScopedDefinitionHandler() {
+ if (is_lazy_) {
+ gTransformFaultHandler->RemoveDefinition(def_);
+ }
+ }
+
+ private:
+ ArtClassDefinition* def_;
+ bool is_lazy_;
+};
+
// Initialize templates.
template
void Transformer::TransformSingleClassDirect<ArtJvmtiEvent::kClassFileLoadHookNonRetransformable>(
@@ -78,6 +250,7 @@ void Transformer::TransformSingleClassDirect(EventHandler* event_handler,
static_assert(kEvent == ArtJvmtiEvent::kClassFileLoadHookNonRetransformable ||
kEvent == ArtJvmtiEvent::kClassFileLoadHookRetransformable,
"bad event type");
+ ScopedDefinitionHandler handler(def);
jint new_len = -1;
unsigned char* new_data = nullptr;
art::ArrayRef<const unsigned char> dex_data = def->GetDexData();
diff --git a/openjdkjvmti/transform.h b/openjdkjvmti/transform.h
index f43af174f0..8bbeda4b09 100644
--- a/openjdkjvmti/transform.h
+++ b/openjdkjvmti/transform.h
@@ -48,6 +48,8 @@ jvmtiError GetClassLocation(ArtJvmTiEnv* env, jclass klass, /*out*/std::string*
class Transformer {
public:
+ static void Setup();
+
template<ArtJvmtiEvent kEvent>
static void TransformSingleClassDirect(
EventHandler* event_handler,
diff --git a/profman/profile_assistant.cc b/profman/profile_assistant.cc
index ff02b5d59f..a00b1fa5bd 100644
--- a/profman/profile_assistant.cc
+++ b/profman/profile_assistant.cc
@@ -31,12 +31,13 @@ static constexpr const uint32_t kMinNewClassesPercentChangeForCompilation = 2;
ProfileAssistant::ProcessingResult ProfileAssistant::ProcessProfilesInternal(
const std::vector<ScopedFlock>& profile_files,
- const ScopedFlock& reference_profile_file) {
+ const ScopedFlock& reference_profile_file,
+ const ProfileCompilationInfo::ProfileLoadFilterFn& filter_fn) {
DCHECK(!profile_files.empty());
ProfileCompilationInfo info;
// Load the reference profile.
- if (!info.Load(reference_profile_file->Fd())) {
+ if (!info.Load(reference_profile_file->Fd(), /*merge_classes*/ true, filter_fn)) {
LOG(WARNING) << "Could not load reference profile file";
return kErrorBadProfiles;
}
@@ -48,7 +49,7 @@ ProfileAssistant::ProcessingResult ProfileAssistant::ProcessProfilesInternal(
// Merge all current profiles.
for (size_t i = 0; i < profile_files.size(); i++) {
ProfileCompilationInfo cur_info;
- if (!cur_info.Load(profile_files[i]->Fd())) {
+ if (!cur_info.Load(profile_files[i]->Fd(), /*merge_classes*/ true, filter_fn)) {
LOG(WARNING) << "Could not load profile file at index " << i;
return kErrorBadProfiles;
}
@@ -122,7 +123,8 @@ class ScopedFlockList {
ProfileAssistant::ProcessingResult ProfileAssistant::ProcessProfiles(
const std::vector<int>& profile_files_fd,
- int reference_profile_file_fd) {
+ int reference_profile_file_fd,
+ const ProfileCompilationInfo::ProfileLoadFilterFn& filter_fn) {
DCHECK_GE(reference_profile_file_fd, 0);
std::string error;
@@ -143,12 +145,15 @@ ProfileAssistant::ProcessingResult ProfileAssistant::ProcessProfiles(
return kErrorCannotLock;
}
- return ProcessProfilesInternal(profile_files.Get(), reference_profile_file);
+ return ProcessProfilesInternal(profile_files.Get(),
+ reference_profile_file,
+ filter_fn);
}
ProfileAssistant::ProcessingResult ProfileAssistant::ProcessProfiles(
const std::vector<std::string>& profile_files,
- const std::string& reference_profile_file) {
+ const std::string& reference_profile_file,
+ const ProfileCompilationInfo::ProfileLoadFilterFn& filter_fn) {
std::string error;
ScopedFlockList profile_files_list(profile_files.size());
@@ -164,7 +169,9 @@ ProfileAssistant::ProcessingResult ProfileAssistant::ProcessProfiles(
return kErrorCannotLock;
}
- return ProcessProfilesInternal(profile_files_list.Get(), locked_reference_profile_file);
+ return ProcessProfilesInternal(profile_files_list.Get(),
+ locked_reference_profile_file,
+ filter_fn);
}
} // namespace art
diff --git a/profman/profile_assistant.h b/profman/profile_assistant.h
index be703abda8..ee555840d7 100644
--- a/profman/profile_assistant.h
+++ b/profman/profile_assistant.h
@@ -53,16 +53,21 @@ class ProfileAssistant {
//
static ProcessingResult ProcessProfiles(
const std::vector<std::string>& profile_files,
- const std::string& reference_profile_file);
+ const std::string& reference_profile_file,
+ const ProfileCompilationInfo::ProfileLoadFilterFn& filter_fn
+ = ProfileCompilationInfo::ProfileFilterFnAcceptAll);
static ProcessingResult ProcessProfiles(
const std::vector<int>& profile_files_fd_,
- int reference_profile_file_fd);
+ int reference_profile_file_fd,
+ const ProfileCompilationInfo::ProfileLoadFilterFn& filter_fn
+ = ProfileCompilationInfo::ProfileFilterFnAcceptAll);
private:
static ProcessingResult ProcessProfilesInternal(
const std::vector<ScopedFlock>& profile_files,
- const ScopedFlock& reference_profile_file);
+ const ScopedFlock& reference_profile_file,
+ const ProfileCompilationInfo::ProfileLoadFilterFn& filter_fn);
DISALLOW_COPY_AND_ASSIGN(ProfileAssistant);
};
diff --git a/profman/profile_assistant_test.cc b/profman/profile_assistant_test.cc
index c75f3e9688..79310ac166 100644
--- a/profman/profile_assistant_test.cc
+++ b/profman/profile_assistant_test.cc
@@ -16,6 +16,7 @@
#include <gtest/gtest.h>
+#include "android-base/strings.h"
#include "art_method-inl.h"
#include "base/unix_file/fd_file.h"
#include "common_runtime_test.h"
@@ -51,6 +52,28 @@ class ProfileAssistantTest : public CommonRuntimeTest {
uint32_t dex_location_checksum1 = checksum;
std::string dex_location2 = "location2" + id;
uint32_t dex_location_checksum2 = 10 * checksum;
+ SetupProfile(dex_location1,
+ dex_location_checksum1,
+ dex_location2,
+ dex_location_checksum2,
+ number_of_methods,
+ number_of_classes,
+ profile,
+ info,
+ start_method_index,
+ reverse_dex_write_order);
+ }
+
+ void SetupProfile(const std::string& dex_location1,
+ uint32_t dex_location_checksum1,
+ const std::string& dex_location2,
+ uint32_t dex_location_checksum2,
+ uint16_t number_of_methods,
+ uint16_t number_of_classes,
+ const ScratchFile& profile,
+ ProfileCompilationInfo* info,
+ uint16_t start_method_index = 0,
+ bool reverse_dex_write_order = false) {
for (uint16_t i = start_method_index; i < start_method_index + number_of_methods; i++) {
// reverse_dex_write_order controls the order in which the dex files will be added to
// the profile and thus written to disk.
@@ -1128,4 +1151,89 @@ TEST_F(ProfileAssistantTest, DumpOnly) {
}
}
+TEST_F(ProfileAssistantTest, MergeProfilesWithFilter) {
+ ScratchFile profile1;
+ ScratchFile profile2;
+ ScratchFile reference_profile;
+
+ std::vector<int> profile_fds({
+ GetFd(profile1),
+ GetFd(profile2)});
+ int reference_profile_fd = GetFd(reference_profile);
+
+ // Use a real dex file to generate profile test data.
+ // The file will be used during merging to filter unwanted data.
+ std::vector<std::unique_ptr<const DexFile>> dex_files = OpenTestDexFiles("ProfileTestMultiDex");
+ const DexFile& d1 = *dex_files[0];
+ const DexFile& d2 = *dex_files[1];
+ // The new profile info will contain the methods with indices 0-100.
+ const uint16_t kNumberOfMethodsToEnableCompilation = 100;
+ ProfileCompilationInfo info1;
+ SetupProfile(d1.GetLocation(), d1.GetLocationChecksum(), "p1", 1,
+ kNumberOfMethodsToEnableCompilation, 0, profile1, &info1);
+ ProfileCompilationInfo info2;
+ SetupProfile(d2.GetLocation(), d2.GetLocationChecksum(), "p2", 2,
+ kNumberOfMethodsToEnableCompilation, 0, profile2, &info2);
+
+
+ // The reference profile info will contain the methods with indices 50-150.
+ const uint16_t kNumberOfMethodsAlreadyCompiled = 100;
+ ProfileCompilationInfo reference_info;
+ SetupProfile(d1.GetLocation(), d1.GetLocationChecksum(), "p1", 1,
+ kNumberOfMethodsAlreadyCompiled, 0, reference_profile,
+ &reference_info, kNumberOfMethodsToEnableCompilation / 2);
+
+ // Run profman and pass the dex file with --apk-fd.
+ android::base::unique_fd apk_fd(
+ open(GetTestDexFileName("ProfileTestMultiDex").c_str(), O_RDONLY));
+ ASSERT_GE(apk_fd.get(), 0);
+
+ std::string profman_cmd = GetProfmanCmd();
+ std::vector<std::string> argv_str;
+ argv_str.push_back(profman_cmd);
+ argv_str.push_back("--profile-file-fd=" + std::to_string(profile1.GetFd()));
+ argv_str.push_back("--profile-file-fd=" + std::to_string(profile2.GetFd()));
+ argv_str.push_back("--reference-profile-file-fd=" + std::to_string(reference_profile.GetFd()));
+ argv_str.push_back("--apk-fd=" + std::to_string(apk_fd.get()));
+ std::string error;
+
+ EXPECT_EQ(ExecAndReturnCode(argv_str, &error), 0) << error;
+
+ // Verify that we can load the result.
+
+ ProfileCompilationInfo result;
+ ASSERT_TRUE(reference_profile.GetFile()->ResetOffset());
+ ASSERT_TRUE(result.Load(reference_profile_fd));
+
+
+ ASSERT_TRUE(profile1.GetFile()->ResetOffset());
+ ASSERT_TRUE(profile2.GetFile()->ResetOffset());
+ ASSERT_TRUE(reference_profile.GetFile()->ResetOffset());
+
+ // Verify that the result filtered out data not belonging to the dex file.
+ // This is equivalent to checking that the result is equal to the merging of
+ // all profiles while filtering out data not belonging to the dex file.
+
+ ProfileCompilationInfo::ProfileLoadFilterFn filter_fn =
+ [&d1, &d2](const std::string& dex_location, uint32_t checksum) -> bool {
+ return (dex_location == ProfileCompilationInfo::GetProfileDexFileKey(d1.GetLocation())
+ && checksum == d1.GetLocationChecksum())
+ || (dex_location == ProfileCompilationInfo::GetProfileDexFileKey(d2.GetLocation())
+ && checksum == d2.GetLocationChecksum());
+ };
+
+ ProfileCompilationInfo info1_filter;
+ ProfileCompilationInfo info2_filter;
+ ProfileCompilationInfo expected;
+
+ info2_filter.Load(profile1.GetFd(), /*merge_classes*/ true, filter_fn);
+ info2_filter.Load(profile2.GetFd(), /*merge_classes*/ true, filter_fn);
+ expected.Load(reference_profile.GetFd(), /*merge_classes*/ true, filter_fn);
+
+ ASSERT_TRUE(expected.MergeWith(info1_filter));
+ ASSERT_TRUE(expected.MergeWith(info2_filter));
+
+ ASSERT_TRUE(expected.Equals(result));
+}
+
} // namespace art
diff --git a/profman/profman.cc b/profman/profman.cc
index ea6c382a81..387ce8dfae 100644
--- a/profman/profman.cc
+++ b/profman/profman.cc
@@ -18,6 +18,7 @@
#include <stdio.h>
#include <stdlib.h>
#include <sys/file.h>
+#include <sys/param.h>
#include <sys/stat.h>
#include <unistd.h>
@@ -297,6 +298,22 @@ class ProfMan FINAL {
}
}
+ struct ProfileFilterKey {
+ ProfileFilterKey(const std::string& dex_location, uint32_t checksum)
+ : dex_location_(dex_location), checksum_(checksum) {}
+ const std::string dex_location_;
+ uint32_t checksum_;
+
+ bool operator==(const ProfileFilterKey& other) const {
+ return checksum_ == other.checksum_ && dex_location_ == other.dex_location_;
+ }
+ bool operator<(const ProfileFilterKey& other) const {
+ return checksum_ == other.checksum_
+ ? dex_location_ < other.dex_location_
+ : checksum_ < other.checksum_;
+ }
+ };
+
ProfileAssistant::ProcessingResult ProcessProfiles() {
// Validate that at least one profile file was passed, as well as a reference profile.
if (profile_files_.empty() && profile_files_fd_.empty()) {
@@ -310,6 +327,27 @@ class ProfMan FINAL {
Usage("Options --profile-file-fd and --reference-profile-file-fd "
"should only be used together");
}
+
+ // Check if we have any apks which we should use to filter the profile data.
+ std::set<ProfileFilterKey> profile_filter_keys;
+ if (!GetProfileFilterKeyFromApks(&profile_filter_keys)) {
+ return ProfileAssistant::kErrorIO;
+ }
+
+ // Build the profile filter function. If the set of keys is empty it means we
+ // don't have any apks; as such we do not filter anything.
+ const ProfileCompilationInfo::ProfileLoadFilterFn& filter_fn =
+ [profile_filter_keys](const std::string& dex_location, uint32_t checksum) {
+ if (profile_filter_keys.empty()) {
+ // No --apk was specified. Accept all dex files.
+ return true;
+ } else {
+ bool res = profile_filter_keys.find(
+ ProfileFilterKey(dex_location, checksum)) != profile_filter_keys.end();
+ return res;
+ }
+ };
+
ProfileAssistant::ProcessingResult result;
if (profile_files_.empty()) {
@@ -317,10 +355,13 @@ class ProfMan FINAL {
// so don't check the usage.
File file(reference_profile_file_fd_, false);
result = ProfileAssistant::ProcessProfiles(profile_files_fd_,
- reference_profile_file_fd_);
+ reference_profile_file_fd_,
+ filter_fn);
CloseAllFds(profile_files_fd_, "profile_files_fd_");
} else {
- result = ProfileAssistant::ProcessProfiles(profile_files_, reference_profile_file_);
+ result = ProfileAssistant::ProcessProfiles(profile_files_,
+ reference_profile_file_,
+ filter_fn);
}
return result;
}
@@ -329,18 +370,48 @@ class ProfMan FINAL {
return skip_apk_verification_;
}
- void OpenApkFilesFromLocations(std::vector<std::unique_ptr<const DexFile>>* dex_files) const {
+ bool GetProfileFilterKeyFromApks(std::set<ProfileFilterKey>* profile_filter_keys) {
+ auto process_fn = [profile_filter_keys](std::unique_ptr<const DexFile>&& dex_file) {
+ // Store the profile key of the location instead of the location itself.
+ // This will make the matching in the profile filter method much easier.
+ profile_filter_keys->emplace(ProfileCompilationInfo::GetProfileDexFileKey(
+ dex_file->GetLocation()), dex_file->GetLocationChecksum());
+ };
+ return OpenApkFilesFromLocations(process_fn);
+ }
+
+ bool OpenApkFilesFromLocations(std::vector<std::unique_ptr<const DexFile>>* dex_files) {
+ auto process_fn = [dex_files](std::unique_ptr<const DexFile>&& dex_file) {
+ dex_files->emplace_back(std::move(dex_file));
+ };
+ return OpenApkFilesFromLocations(process_fn);
+ }
+
+ bool OpenApkFilesFromLocations(
+ std::function<void(std::unique_ptr<const DexFile>&&)> process_fn) {
bool use_apk_fd_list = !apks_fd_.empty();
if (use_apk_fd_list) {
// Get the APKs from the collection of FDs.
- CHECK_EQ(dex_locations_.size(), apks_fd_.size());
+ if (dex_locations_.empty()) {
+ // Try to compute the dex locations from the file paths of the descriptions.
+ // This will make it easier to invoke profman with --apk-fd and without
+ // being force to pass --dex-location when the location would be the apk path.
+ if (!ComputeDexLocationsFromApkFds()) {
+ return false;
+ }
+ } else {
+ if (dex_locations_.size() != apks_fd_.size()) {
+ Usage("The number of apk-fds must match the number of dex-locations.");
+ }
+ }
} else if (!apk_files_.empty()) {
- // Get the APKs from the collection of filenames.
- CHECK_EQ(dex_locations_.size(), apk_files_.size());
+ if (dex_locations_.size() != apk_files_.size()) {
+ Usage("The number of apk-fds must match the number of dex-locations.");
+ }
} else {
// No APKs were specified.
CHECK(dex_locations_.empty());
- return;
+ return true;
}
static constexpr bool kVerifyChecksum = true;
for (size_t i = 0; i < dex_locations_.size(); ++i) {
@@ -355,8 +426,8 @@ class ProfMan FINAL {
&error_msg,
&dex_files_for_location)) {
} else {
- LOG(WARNING) << "OpenZip failed for '" << dex_locations_[i] << "' " << error_msg;
- continue;
+ LOG(ERROR) << "OpenZip failed for '" << dex_locations_[i] << "' " << error_msg;
+ return false;
}
} else {
if (dex_file_loader.Open(apk_files_[i].c_str(),
@@ -366,14 +437,36 @@ class ProfMan FINAL {
&error_msg,
&dex_files_for_location)) {
} else {
- LOG(WARNING) << "Open failed for '" << dex_locations_[i] << "' " << error_msg;
- continue;
+ LOG(ERROR) << "Open failed for '" << dex_locations_[i] << "' " << error_msg;
+ return false;
}
}
for (std::unique_ptr<const DexFile>& dex_file : dex_files_for_location) {
- dex_files->emplace_back(std::move(dex_file));
+ process_fn(std::move(dex_file));
}
}
+ return true;
+ }
+
+ // Get the dex locations from the apk fds.
+ // The methods reads the links from /proc/self/fd/ to find the original apk paths
+ // and puts them in the dex_locations_ vector.
+ bool ComputeDexLocationsFromApkFds() {
+ // We can't use a char array of PATH_MAX size without exceeding the frame size.
+ // So we use a vector as the buffer for the path.
+ std::vector<char> buffer(PATH_MAX, 0);
+ for (size_t i = 0; i < apks_fd_.size(); ++i) {
+ std::string fd_path = "/proc/self/fd/" + std::to_string(apks_fd_[i]);
+ ssize_t len = readlink(fd_path.c_str(), buffer.data(), buffer.size() - 1);
+ if (len == -1) {
+ PLOG(ERROR) << "Could not open path from fd";
+ return false;
+ }
+
+ buffer[len] = '\0';
+ dex_locations_.push_back(buffer.data());
+ }
+ return true;
}
std::unique_ptr<const ProfileCompilationInfo> LoadProfile(const std::string& filename, int fd) {
@@ -416,8 +509,6 @@ class ProfMan FINAL {
static const char* kOrdinaryProfile = "=== profile ===";
static const char* kReferenceProfile = "=== reference profile ===";
- // Open apk/zip files and and read dex files.
- MemMap::Init(); // for ZipArchive::OpenFromFd
std::vector<std::unique_ptr<const DexFile>> dex_files;
OpenApkFilesFromLocations(&dex_files);
std::string dump;
@@ -553,8 +644,7 @@ class ProfMan FINAL {
reference_profile_file_.empty() && !FdIsValid(reference_profile_file_fd_)) {
Usage("No profile files or reference profile specified.");
}
- // Open apk/zip files and and read dex files.
- MemMap::Init(); // for ZipArchive::OpenFromFd
+
// Open the dex files to get the names for classes.
std::vector<std::unique_ptr<const DexFile>> dex_files;
OpenApkFilesFromLocations(&dex_files);
@@ -948,8 +1038,6 @@ class ProfMan FINAL {
Usage("Profile must be specified with --reference-profile-file or "
"--reference-profile-file-fd");
}
- // for ZipArchive::OpenFromFd
- MemMap::Init();
// Open the profile output file if needed.
int fd = OpenReferenceProfile();
if (!FdIsValid(fd)) {
@@ -984,8 +1072,6 @@ class ProfMan FINAL {
}
int CreateBootProfile() {
- // Initialize memmap since it's required to open dex files.
- MemMap::Init();
// Open the profile output file.
const int reference_fd = OpenReferenceProfile();
if (!FdIsValid(reference_fd)) {
@@ -1065,8 +1151,6 @@ class ProfMan FINAL {
test_profile_class_percentage_,
test_profile_seed_);
} else {
- // Initialize MemMap for ZipArchive::OpenFromFd.
- MemMap::Init();
// Open the dex files to look up classes and methods.
std::vector<std::unique_ptr<const DexFile>> dex_files;
OpenApkFilesFromLocations(&dex_files);
@@ -1089,7 +1173,7 @@ class ProfMan FINAL {
return copy_and_update_profile_key_;
}
- bool CopyAndUpdateProfileKey() const {
+ bool CopyAndUpdateProfileKey() {
// Validate that at least one profile file was passed, as well as a reference profile.
if (!(profile_files_.size() == 1 ^ profile_files_fd_.size() == 1)) {
Usage("Only one profile file should be specified.");
@@ -1133,7 +1217,8 @@ class ProfMan FINAL {
static void CloseAllFds(const std::vector<int>& fds, const char* descriptor) {
for (size_t i = 0; i < fds.size(); i++) {
if (close(fds[i]) < 0) {
- PLOG(WARNING) << "Failed to close descriptor for " << descriptor << " at index " << i;
+ PLOG(WARNING) << "Failed to close descriptor for "
+ << descriptor << " at index " << i << ": " << fds[i];
}
}
}
@@ -1176,6 +1261,9 @@ static int profman(int argc, char** argv) {
// Parse arguments. Argument mistakes will lead to exit(EXIT_FAILURE) in UsageError.
profman.ParseArgs(argc, argv);
+ // Initialize MemMap for ZipArchive::OpenFromFd.
+ MemMap::Init();
+
if (profman.ShouldGenerateTestProfile()) {
return profman.GenerateTestProfile();
}
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
index bd51809c22..4be4b12611 100644
--- a/runtime/arch/stub_test.cc
+++ b/runtime/arch/stub_test.cc
@@ -186,10 +186,9 @@ class StubTest : public CommonRuntimeTest {
"stp x2, x3, [sp, #16]\n\t"
"stp x4, x5, [sp, #32]\n\t"
"stp x6, x7, [sp, #48]\n\t"
- // To be extra defensive, store x20. We do this because some of the stubs might make a
+ // To be extra defensive, store x20,x21. We do this because some of the stubs might make a
// transition into the runtime via the blr instruction below and *not* save x20.
- "str x20, [sp, #64]\n\t"
- // 8 byte buffer
+ "stp x20, x21, [sp, #64]\n\t"
"sub sp, sp, #16\n\t" // Reserve stack space, 16B aligned
".cfi_adjust_cfa_offset 16\n\t"
@@ -288,7 +287,7 @@ class StubTest : public CommonRuntimeTest {
"ldp x2, x3, [sp, #16]\n\t"
"ldp x4, x5, [sp, #32]\n\t"
"ldp x6, x7, [sp, #48]\n\t"
- "ldr x20, [sp, #64]\n\t"
+ "ldp x20, x21, [sp, #64]\n\t"
"add sp, sp, #80\n\t" // Free stack space, now sp as on entry
".cfi_adjust_cfa_offset -80\n\t"
@@ -312,8 +311,9 @@ class StubTest : public CommonRuntimeTest {
// -fstack-protector-strong. According to AAPCS64 registers x9-x15 are caller-saved,
// which means we should unclobber one of the callee-saved registers that are unused.
// Here we use x20.
+ // http://b/72613441, Clang 7.0 asks for one more register, so we do not reserve x21.
: "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x18", "x19",
- "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "x30",
+ "x22", "x23", "x24", "x25", "x26", "x27", "x28", "x30",
"d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
"d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
"d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index 9f17ad051c..a4c32dd814 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -74,6 +74,7 @@ Uninterruptible Roles::uninterruptible_;
ReaderWriterMutex* Locks::jni_globals_lock_ = nullptr;
Mutex* Locks::jni_weak_globals_lock_ = nullptr;
ReaderWriterMutex* Locks::dex_lock_ = nullptr;
+Mutex* Locks::native_debug_interface_lock_ = nullptr;
std::vector<BaseMutex*> Locks::expected_mutexes_on_weak_ref_access_;
Atomic<const BaseMutex*> Locks::expected_mutexes_on_weak_ref_access_guard_;
@@ -1073,6 +1074,7 @@ void Locks::Init() {
DCHECK(unexpected_signal_lock_ != nullptr);
DCHECK(user_code_suspension_lock_ != nullptr);
DCHECK(dex_lock_ != nullptr);
+ DCHECK(native_debug_interface_lock_ != nullptr);
} else {
// Create global locks in level order from highest lock level to lowest.
LockLevel current_lock_level = kInstrumentEntrypointsLock;
@@ -1228,6 +1230,10 @@ void Locks::Init() {
DCHECK(unexpected_signal_lock_ == nullptr);
unexpected_signal_lock_ = new Mutex("unexpected signal lock", current_lock_level, true);
+ UPDATE_CURRENT_LOCK_LEVEL(kNativeDebugInterfaceLock);
+ DCHECK(native_debug_interface_lock_ == nullptr);
+ native_debug_interface_lock_ = new Mutex("Native debug interface lock", current_lock_level);
+
UPDATE_CURRENT_LOCK_LEVEL(kLoggingLock);
DCHECK(logging_lock_ == nullptr);
logging_lock_ = new Mutex("logging lock", current_lock_level, true);
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index d541b79a98..bf27b7f17c 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -58,10 +58,12 @@ class Thread;
// [1] http://www.drdobbs.com/parallel/use-lock-hierarchies-to-avoid-deadlock/204801163
enum LockLevel {
kLoggingLock = 0,
+ kNativeDebugInterfaceLock,
kSwapMutexesLock,
kUnexpectedSignalLock,
kThreadSuspendCountLock,
kAbortLock,
+ kSignalHandlingLock,
kJdwpAdbStateLock,
kJdwpSocketLock,
kRegionSpaceRegionLock,
@@ -745,8 +747,11 @@ class Locks {
// One unexpected signal at a time lock.
static Mutex* unexpected_signal_lock_ ACQUIRED_AFTER(thread_suspend_count_lock_);
+ // Guards the magic global variables used by native tools (e.g. libunwind).
+ static Mutex* native_debug_interface_lock_ ACQUIRED_AFTER(unexpected_signal_lock_);
+
// Have an exclusive logging thread.
- static Mutex* logging_lock_ ACQUIRED_AFTER(unexpected_signal_lock_);
+ static Mutex* logging_lock_ ACQUIRED_AFTER(native_debug_interface_lock_);
// List of mutexes that we expect a thread may hold when accessing weak refs. This is used to
// avoid a deadlock in the empty checkpoint while weak ref access is disabled (b/34964016). If we
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 32d304073c..800427d6ab 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -73,6 +73,7 @@
#include "intern_table.h"
#include "interpreter/interpreter.h"
#include "java_vm_ext.h"
+#include "jit/debugger_interface.h"
#include "jit/jit.h"
#include "jit/jit_code_cache.h"
#include "jit/profile_compilation_info.h"
@@ -3432,6 +3433,7 @@ void ClassLinker::RegisterDexFileLocked(const DexFile& dex_file,
data.weak_root = dex_cache_jweak;
data.dex_file = dex_cache->GetDexFile();
data.class_table = ClassTableForClassLoader(class_loader);
+ RegisterDexFileForNative(self, data.dex_file->Begin());
DCHECK(data.class_table != nullptr);
// Make sure to hold the dex cache live in the class table. This case happens for the boot class
// path dex caches without an image.
@@ -8368,7 +8370,6 @@ mirror::MethodHandle* ClassLinker::ResolveMethodHandleForField(
mirror::MethodHandle* ClassLinker::ResolveMethodHandleForMethod(
Thread* self,
- const DexFile* const dex_file,
const DexFile::MethodHandleItem& method_handle,
ArtMethod* referrer) {
DexFile::MethodHandleType handle_type =
@@ -8492,19 +8493,20 @@ mirror::MethodHandle* ClassLinker::ResolveMethodHandleForMethod(
return nullptr;
}
- Handle<mirror::DexCache> dex_cache(hs.NewHandle(referrer->GetDexCache()));
- Handle<mirror::ClassLoader> class_loader(hs.NewHandle(referrer->GetClassLoader()));
int32_t index = 0;
-
if (receiver_count != 0) {
// Insert receiver
method_params->Set(index++, target_method->GetDeclaringClass());
}
-
- DexFileParameterIterator it(*dex_file, target_method->GetPrototype());
+ DexFileParameterIterator it(*target_method->GetDexFile(), target_method->GetPrototype());
+ Handle<mirror::DexCache> target_method_dex_cache(hs.NewHandle(target_method->GetDexCache()));
+ Handle<mirror::ClassLoader> target_method_class_loader(hs.NewHandle(target_method->GetClassLoader()));
while (it.HasNext()) {
+ DCHECK_LT(index, num_params);
const dex::TypeIndex type_idx = it.GetTypeIdx();
- ObjPtr<mirror::Class> klass = ResolveType(type_idx, dex_cache, class_loader);
+ ObjPtr<mirror::Class> klass = ResolveType(type_idx,
+ target_method_dex_cache,
+ target_method_class_loader);
if (nullptr == klass) {
DCHECK(self->IsExceptionPending());
return nullptr;
@@ -8554,7 +8556,7 @@ ObjPtr<mirror::MethodHandle> ClassLinker::ResolveMethodHandle(Thread* self,
case DexFile::MethodHandleType::kInvokeConstructor:
case DexFile::MethodHandleType::kInvokeDirect:
case DexFile::MethodHandleType::kInvokeInterface:
- return ResolveMethodHandleForMethod(self, dex_file, method_handle, referrer);
+ return ResolveMethodHandleForMethod(self, method_handle, referrer);
}
}
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 3e3425f5ac..16fa1ce801 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -979,7 +979,6 @@ class ClassLinker {
REQUIRES_SHARED(Locks::mutator_lock_);
mirror::MethodHandle* ResolveMethodHandleForMethod(Thread* self,
- const DexFile* const dex_file,
const DexFile::MethodHandleItem& method_handle,
ArtMethod* referrer)
REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/dex/compact_dex_file.h b/runtime/dex/compact_dex_file.h
index 1ecff04cba..31aeb27872 100644
--- a/runtime/dex/compact_dex_file.h
+++ b/runtime/dex/compact_dex_file.h
@@ -245,6 +245,12 @@ class CompactDexFile : public DexFile {
static bool IsVersionValid(const uint8_t* magic);
virtual bool IsVersionValid() const OVERRIDE;
+ // TODO This is completely a guess. We really need to do better. b/72402467
+ // We ask for 64 megabytes which should be big enough for any realistic dex file.
+ virtual size_t GetDequickenedSize() const OVERRIDE {
+ return 64 * MB;
+ }
+
const Header& GetHeader() const {
return down_cast<const Header&>(DexFile::GetHeader());
}
diff --git a/runtime/dex/dex_file.h b/runtime/dex/dex_file.h
index 7e2fe98923..cf8c840b59 100644
--- a/runtime/dex/dex_file.h
+++ b/runtime/dex/dex_file.h
@@ -456,6 +456,13 @@ class DexFile {
// Returns true if the dex file supports default methods.
virtual bool SupportsDefaultMethods() const = 0;
+ // Returns the maximum size in bytes needed to store an equivalent dex file strictly conforming to
+ // the dex file specification. That is the size if we wanted to get rid of all the
+ // quickening/compact-dexing/etc.
+ //
+ // TODO This should really be an exact size! b/72402467
+ virtual size_t GetDequickenedSize() const = 0;
+
// Returns the number of string identifiers in the .dex file.
size_t NumStringIds() const {
DCHECK(header_ != nullptr) << GetLocation();
diff --git a/runtime/dex/standard_dex_file.h b/runtime/dex/standard_dex_file.h
index 94ef1f2a8e..e0e9f2f11c 100644
--- a/runtime/dex/standard_dex_file.h
+++ b/runtime/dex/standard_dex_file.h
@@ -83,6 +83,10 @@ class StandardDexFile : public DexFile {
uint32_t GetCodeItemSize(const DexFile::CodeItem& item) const OVERRIDE;
+ virtual size_t GetDequickenedSize() const OVERRIDE {
+ return Size();
+ }
+
private:
StandardDexFile(const uint8_t* base,
size_t size,
diff --git a/runtime/fault_handler.cc b/runtime/fault_handler.cc
index 9d6e5de803..3015b10103 100644
--- a/runtime/fault_handler.cc
+++ b/runtime/fault_handler.cc
@@ -17,6 +17,7 @@
#include "fault_handler.h"
#include <setjmp.h>
+#include <string.h>
#include <sys/mman.h>
#include <sys/ucontext.h>
@@ -183,8 +184,31 @@ bool FaultManager::HandleFaultByOtherHandlers(int sig, siginfo_t* info, void* co
return false;
}
+static const char* SignalCodeName(int sig, int code) {
+ if (sig != SIGSEGV) {
+ return "UNKNOWN";
+ } else {
+ switch (code) {
+ case SEGV_MAPERR: return "SEGV_MAPERR";
+ case SEGV_ACCERR: return "SEGV_ACCERR";
+ default: return "UNKNOWN";
+ }
+ }
+}
+static std::ostream& PrintSignalInfo(std::ostream& os, siginfo_t* info) {
+ os << " si_signo: " << info->si_signo << " (" << strsignal(info->si_signo) << ")\n"
+ << " si_code: " << info->si_code
+ << " (" << SignalCodeName(info->si_signo, info->si_code) << ")";
+ if (info->si_signo == SIGSEGV) {
+ os << "\n" << " si_addr: " << info->si_addr;
+ }
+ return os;
+}
+
bool FaultManager::HandleFault(int sig, siginfo_t* info, void* context) {
- VLOG(signals) << "Handling fault";
+ if (VLOG_IS_ON(signals)) {
+ PrintSignalInfo(VLOG_STREAM(signals) << "Handling fault:" << "\n", info);
+ }
#ifdef TEST_NESTED_SIGNAL
// Simulate a crash in a handler.
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index 52dd104ac8..6735961591 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -106,8 +106,8 @@ inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self,
pre_fence_visitor(obj, usable_size);
QuasiAtomic::ThreadFenceForConstructor();
} else {
- // bytes allocated that takes bulk thread-local buffer allocations into account.
- size_t bytes_tl_bulk_allocated = 0;
+ // Bytes allocated that takes bulk thread-local buffer allocations into account.
+ size_t bytes_tl_bulk_allocated = 0u;
obj = TryToAllocate<kInstrumented, false>(self, allocator, byte_count, &bytes_allocated,
&usable_size, &bytes_tl_bulk_allocated);
if (UNLIKELY(obj == nullptr)) {
@@ -154,12 +154,13 @@ inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self,
}
pre_fence_visitor(obj, usable_size);
QuasiAtomic::ThreadFenceForConstructor();
- new_num_bytes_allocated = num_bytes_allocated_.FetchAndAddRelaxed(bytes_tl_bulk_allocated) +
- bytes_tl_bulk_allocated;
+ size_t num_bytes_allocated_before =
+ num_bytes_allocated_.FetchAndAddRelaxed(bytes_tl_bulk_allocated);
+ new_num_bytes_allocated = num_bytes_allocated_before + bytes_tl_bulk_allocated;
if (bytes_tl_bulk_allocated > 0) {
// Only trace when we get an increase in the number of bytes allocated. This happens when
// obtaining a new TLAB and isn't often enough to hurt performance according to golem.
- TraceHeapSize(new_num_bytes_allocated + bytes_tl_bulk_allocated);
+ TraceHeapSize(new_num_bytes_allocated);
}
}
if (kIsDebugBuild && Runtime::Current()->IsStarted()) {
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index 2101f6837b..24cedb093b 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -1384,8 +1384,8 @@ TwoWordReturn Instrumentation::PopInstrumentationStackFrame(Thread* self,
reinterpret_cast<uintptr_t>(GetQuickDeoptimizationEntryPoint()));
} else {
if (deoptimize && !Runtime::Current()->IsAsyncDeoptimizeable(*return_pc)) {
- LOG(WARNING) << "Got a deoptimization request on un-deoptimizable " << method->PrettyMethod()
- << " at PC " << reinterpret_cast<void*>(*return_pc);
+ VLOG(deopt) << "Got a deoptimization request on un-deoptimizable " << method->PrettyMethod()
+ << " at PC " << reinterpret_cast<void*>(*return_pc);
}
if (kVerboseInstrumentation) {
LOG(INFO) << "Returning from " << method->PrettyMethod()
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index d53da215a2..12b8c38bbb 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -1183,10 +1183,9 @@ static ObjPtr<mirror::CallSite> InvokeBootstrapMethod(Thread* self,
}
Handle<mirror::Object> object(hs.NewHandle(result.GetL()));
-
- // Check the result is not null.
if (UNLIKELY(object.IsNull())) {
- ThrowNullPointerException("CallSite == null");
+ // This will typically be for LambdaMetafactory which is not supported.
+ ThrowNullPointerException("Bootstrap method returned null");
return nullptr;
}
@@ -1202,7 +1201,7 @@ static ObjPtr<mirror::CallSite> InvokeBootstrapMethod(Thread* self,
// Check the call site target is not null as we're going to invoke it.
Handle<mirror::MethodHandle> target = hs.NewHandle(call_site->GetTarget());
if (UNLIKELY(target.IsNull())) {
- ThrowNullPointerException("CallSite target == null");
+ ThrowNullPointerException("Target for call-site is null");
return nullptr;
}
diff --git a/runtime/jit/debugger_interface.cc b/runtime/jit/debugger_interface.cc
index 0e295e2442..d60f70a54f 100644
--- a/runtime/jit/debugger_interface.cc
+++ b/runtime/jit/debugger_interface.cc
@@ -68,11 +68,65 @@ extern "C" {
// Static initialization is necessary to prevent GDB from seeing
// uninitialized descriptor.
JITDescriptor __jit_debug_descriptor = { 1, JIT_NOACTION, nullptr, nullptr };
+
+ // Incremented whenever __jit_debug_descriptor is modified.
+ uint32_t __jit_debug_descriptor_timestamp = 0;
+
+ struct DEXFileEntry {
+ DEXFileEntry* next_;
+ DEXFileEntry* prev_;
+ const void* dexfile_;
+ };
+
+ DEXFileEntry* __art_debug_dexfiles = nullptr;
+
+ // Incremented whenever __art_debug_dexfiles is modified.
+ uint32_t __art_debug_dexfiles_timestamp = 0;
}
-Mutex g_jit_debug_mutex("JIT debug interface lock", kJitDebugInterfaceLock);
+static size_t g_jit_debug_mem_usage
+ GUARDED_BY(Locks::native_debug_interface_lock_) = 0;
+
+static std::unordered_map<const void*, DEXFileEntry*> g_dexfile_entries
+ GUARDED_BY(Locks::native_debug_interface_lock_);
+
+void RegisterDexFileForNative(Thread* current_thread, const void* dexfile_header) {
+ MutexLock mu(current_thread, *Locks::native_debug_interface_lock_);
+ if (g_dexfile_entries.count(dexfile_header) == 0) {
+ DEXFileEntry* entry = new DEXFileEntry();
+ CHECK(entry != nullptr);
+ entry->dexfile_ = dexfile_header;
+ entry->prev_ = nullptr;
+ entry->next_ = __art_debug_dexfiles;
+ if (entry->next_ != nullptr) {
+ entry->next_->prev_ = entry;
+ }
+ __art_debug_dexfiles = entry;
+ __art_debug_dexfiles_timestamp++;
+ g_dexfile_entries.emplace(dexfile_header, entry);
+ }
+}
-static size_t g_jit_debug_mem_usage = 0;
+void DeregisterDexFileForNative(Thread* current_thread, const void* dexfile_header) {
+ MutexLock mu(current_thread, *Locks::native_debug_interface_lock_);
+ auto it = g_dexfile_entries.find(dexfile_header);
+ // We register dex files in the class linker and free them in DexFile_closeDexFile,
+ // but might be cases where we load the dex file without using it in the class linker.
+ if (it != g_dexfile_entries.end()) {
+ DEXFileEntry* entry = it->second;
+ if (entry->prev_ != nullptr) {
+ entry->prev_->next_ = entry->next_;
+ } else {
+ __art_debug_dexfiles = entry->next_;
+ }
+ if (entry->next_ != nullptr) {
+ entry->next_->prev_ = entry->prev_;
+ }
+ __art_debug_dexfiles_timestamp++;
+ delete entry;
+ g_dexfile_entries.erase(it);
+ }
+}
JITCodeEntry* CreateJITCodeEntry(const std::vector<uint8_t>& symfile) {
DCHECK_NE(symfile.size(), 0u);
@@ -96,6 +150,7 @@ JITCodeEntry* CreateJITCodeEntry(const std::vector<uint8_t>& symfile) {
__jit_debug_descriptor.first_entry_ = entry;
__jit_debug_descriptor.relevant_entry_ = entry;
__jit_debug_descriptor.action_flag_ = JIT_REGISTER_FN;
+ __jit_debug_descriptor_timestamp++;
(*__jit_debug_register_code_ptr)();
return entry;
}
@@ -114,6 +169,7 @@ void DeleteJITCodeEntry(JITCodeEntry* entry) {
g_jit_debug_mem_usage -= sizeof(JITCodeEntry) + entry->symfile_size_;
__jit_debug_descriptor.relevant_entry_ = entry;
__jit_debug_descriptor.action_flag_ = JIT_UNREGISTER_FN;
+ __jit_debug_descriptor_timestamp++;
(*__jit_debug_register_code_ptr)();
delete[] entry->symfile_addr_;
delete entry;
@@ -121,7 +177,7 @@ void DeleteJITCodeEntry(JITCodeEntry* entry) {
// Mapping from code address to entry. Used to manage life-time of the entries.
static std::unordered_map<uintptr_t, JITCodeEntry*> g_jit_code_entries
- GUARDED_BY(g_jit_debug_mutex);
+ GUARDED_BY(Locks::native_debug_interface_lock_);
void IncrementJITCodeEntryRefcount(JITCodeEntry* entry, uintptr_t code_address) {
DCHECK(entry != nullptr);
diff --git a/runtime/jit/debugger_interface.h b/runtime/jit/debugger_interface.h
index 9aec988f67..8c4bb3fdf4 100644
--- a/runtime/jit/debugger_interface.h
+++ b/runtime/jit/debugger_interface.h
@@ -30,36 +30,42 @@ extern "C" {
struct JITCodeEntry;
}
-extern Mutex g_jit_debug_mutex;
+// Notify native tools (e.g. libunwind) that DEX file has been opened.
+// The pointer needs to point the start of the dex data (not the DexFile* object).
+void RegisterDexFileForNative(Thread* current_thread, const void* dexfile_header);
+
+// Notify native tools (e.g. libunwind) that DEX file has been closed.
+// The pointer needs to point the start of the dex data (not the DexFile* object).
+void DeregisterDexFileForNative(Thread* current_thread, const void* dexfile_header);
// Notify native debugger about new JITed code by passing in-memory ELF.
// It takes ownership of the in-memory ELF file.
JITCodeEntry* CreateJITCodeEntry(const std::vector<uint8_t>& symfile)
- REQUIRES(g_jit_debug_mutex);
+ REQUIRES(Locks::native_debug_interface_lock_);
// Notify native debugger that JITed code has been removed.
// It also releases the associated in-memory ELF file.
void DeleteJITCodeEntry(JITCodeEntry* entry)
- REQUIRES(g_jit_debug_mutex);
+ REQUIRES(Locks::native_debug_interface_lock_);
// Helper method to track life-time of JITCodeEntry.
// It registers given code address as being described by the given entry.
void IncrementJITCodeEntryRefcount(JITCodeEntry* entry, uintptr_t code_address)
- REQUIRES(g_jit_debug_mutex);
+ REQUIRES(Locks::native_debug_interface_lock_);
// Helper method to track life-time of JITCodeEntry.
// It de-registers given code address as being described by the given entry.
void DecrementJITCodeEntryRefcount(JITCodeEntry* entry, uintptr_t code_address)
- REQUIRES(g_jit_debug_mutex);
+ REQUIRES(Locks::native_debug_interface_lock_);
// Find the registered JITCodeEntry for given code address.
// There can be only one entry per address at any given time.
JITCodeEntry* GetJITCodeEntry(uintptr_t code_address)
- REQUIRES(g_jit_debug_mutex);
+ REQUIRES(Locks::native_debug_interface_lock_);
// Returns approximate memory used by all JITCodeEntries.
size_t GetJITCodeEntryMemUsage()
- REQUIRES(g_jit_debug_mutex);
+ REQUIRES(Locks::native_debug_interface_lock_);
} // namespace art
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 7f0447732e..c8c13cb20f 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -549,7 +549,7 @@ void JitCodeCache::FreeCode(const void* code_ptr) {
uintptr_t allocation = FromCodeToAllocation(code_ptr);
// Notify native debugger that we are about to remove the code.
// It does nothing if we are not using native debugger.
- MutexLock mu(Thread::Current(), g_jit_debug_mutex);
+ MutexLock mu(Thread::Current(), *Locks::native_debug_interface_lock_);
JITCodeEntry* entry = GetJITCodeEntry(reinterpret_cast<uintptr_t>(code_ptr));
if (entry != nullptr) {
DecrementJITCodeEntryRefcount(entry, reinterpret_cast<uintptr_t>(code_ptr));
@@ -1825,7 +1825,7 @@ void JitCodeCache::FreeData(uint8_t* data) {
void JitCodeCache::Dump(std::ostream& os) {
MutexLock mu(Thread::Current(), lock_);
- MutexLock mu2(Thread::Current(), g_jit_debug_mutex);
+ MutexLock mu2(Thread::Current(), *Locks::native_debug_interface_lock_);
os << "Current JIT code cache size: " << PrettySize(used_memory_for_code_) << "\n"
<< "Current JIT data cache size: " << PrettySize(used_memory_for_data_) << "\n"
<< "Current JIT mini-debug-info size: " << PrettySize(GetJITCodeEntryMemUsage()) << "\n"
diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc
index 55e9c390cd..26acef06d6 100644
--- a/runtime/mem_map.cc
+++ b/runtime/mem_map.cc
@@ -396,6 +396,91 @@ MemMap* MemMap::MapDummy(const char* name, uint8_t* addr, size_t byte_count) {
return new MemMap(name, addr, byte_count, addr, page_aligned_byte_count, 0, true /* reuse */);
}
+template<typename A, typename B>
+static ptrdiff_t PointerDiff(A* a, B* b) {
+ return static_cast<ptrdiff_t>(reinterpret_cast<intptr_t>(a) - reinterpret_cast<intptr_t>(b));
+}
+
+bool MemMap::ReplaceWith(MemMap** source_ptr, /*out*/std::string* error) {
+#if !HAVE_MREMAP_SYSCALL
+ UNUSED(source_ptr);
+ *error = "Cannot perform atomic replace because we are missing the required mremap syscall";
+ return false;
+#else // !HAVE_MREMAP_SYSCALL
+ CHECK(source_ptr != nullptr);
+ CHECK(*source_ptr != nullptr);
+ if (!MemMap::kCanReplaceMapping) {
+ *error = "Unable to perform atomic replace due to runtime environment!";
+ return false;
+ }
+ MemMap* source = *source_ptr;
+ // neither can be reuse.
+ if (source->reuse_ || reuse_) {
+ *error = "One or both mappings is not a real mmap!";
+ return false;
+ }
+ // TODO Support redzones.
+ if (source->redzone_size_ != 0 || redzone_size_ != 0) {
+ *error = "source and dest have different redzone sizes";
+ return false;
+ }
+ // Make sure they have the same offset from the actual mmap'd address
+ if (PointerDiff(BaseBegin(), Begin()) != PointerDiff(source->BaseBegin(), source->Begin())) {
+ *error =
+ "source starts at a different offset from the mmap. Cannot atomically replace mappings";
+ return false;
+ }
+ // mremap doesn't allow the final [start, end] to overlap with the initial [start, end] (it's like
+ // memcpy but the check is explicit and actually done).
+ if (source->BaseBegin() > BaseBegin() &&
+ reinterpret_cast<uint8_t*>(BaseBegin()) + source->BaseSize() >
+ reinterpret_cast<uint8_t*>(source->BaseBegin())) {
+ *error = "destination memory pages overlap with source memory pages";
+ return false;
+ }
+ // Change the protection to match the new location.
+ int old_prot = source->GetProtect();
+ if (!source->Protect(GetProtect())) {
+ *error = "Could not change protections for source to those required for dest.";
+ return false;
+ }
+
+ // Do the mremap.
+ void* res = mremap(/*old_address*/source->BaseBegin(),
+ /*old_size*/source->BaseSize(),
+ /*new_size*/source->BaseSize(),
+ /*flags*/MREMAP_MAYMOVE | MREMAP_FIXED,
+ /*new_address*/BaseBegin());
+ if (res == MAP_FAILED) {
+ int saved_errno = errno;
+ // Wasn't able to move mapping. Change the protection of source back to the original one and
+ // return.
+ source->Protect(old_prot);
+ *error = std::string("Failed to mremap source to dest. Error was ") + strerror(saved_errno);
+ return false;
+ }
+ CHECK(res == BaseBegin());
+
+ // The new base_size is all the pages of the 'source' plus any remaining dest pages. We will unmap
+ // them later.
+ size_t new_base_size = std::max(source->base_size_, base_size_);
+
+ // Delete the old source, don't unmap it though (set reuse) since it is already gone.
+ *source_ptr = nullptr;
+ size_t source_size = source->size_;
+ source->already_unmapped_ = true;
+ delete source;
+ source = nullptr;
+
+ size_ = source_size;
+ base_size_ = new_base_size;
+ // Reduce base_size if needed (this will unmap the extra pages).
+ SetSize(source_size);
+
+ return true;
+#endif // !HAVE_MREMAP_SYSCALL
+}
+
MemMap* MemMap::MapFileAtAddress(uint8_t* expected_ptr,
size_t byte_count,
int prot,
@@ -499,9 +584,11 @@ MemMap::~MemMap() {
if (!reuse_) {
MEMORY_TOOL_MAKE_UNDEFINED(base_begin_, base_size_);
- int result = munmap(base_begin_, base_size_);
- if (result == -1) {
- PLOG(FATAL) << "munmap failed";
+ if (!already_unmapped_) {
+ int result = munmap(base_begin_, base_size_);
+ if (result == -1) {
+ PLOG(FATAL) << "munmap failed";
+ }
}
}
@@ -523,7 +610,7 @@ MemMap::~MemMap() {
MemMap::MemMap(const std::string& name, uint8_t* begin, size_t size, void* base_begin,
size_t base_size, int prot, bool reuse, size_t redzone_size)
: name_(name), begin_(begin), size_(size), base_begin_(base_begin), base_size_(base_size),
- prot_(prot), reuse_(reuse), redzone_size_(redzone_size) {
+ prot_(prot), reuse_(reuse), already_unmapped_(false), redzone_size_(redzone_size) {
if (size_ == 0) {
CHECK(begin_ == nullptr);
CHECK(base_begin_ == nullptr);
@@ -794,19 +881,21 @@ void MemMap::Shutdown() {
}
void MemMap::SetSize(size_t new_size) {
- if (new_size == base_size_) {
+ CHECK_LE(new_size, size_);
+ size_t new_base_size = RoundUp(new_size + static_cast<size_t>(PointerDiff(Begin(), BaseBegin())),
+ kPageSize);
+ if (new_base_size == base_size_) {
+ size_ = new_size;
return;
}
- CHECK_ALIGNED(new_size, kPageSize);
- CHECK_EQ(base_size_, size_) << "Unsupported";
- CHECK_LE(new_size, base_size_);
+ CHECK_LT(new_base_size, base_size_);
MEMORY_TOOL_MAKE_UNDEFINED(
reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(BaseBegin()) +
- new_size),
- base_size_ - new_size);
- CHECK_EQ(munmap(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(BaseBegin()) + new_size),
- base_size_ - new_size), 0) << new_size << " " << base_size_;
- base_size_ = new_size;
+ new_base_size),
+ base_size_ - new_base_size);
+ CHECK_EQ(munmap(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(BaseBegin()) + new_base_size),
+ base_size_ - new_base_size), 0) << new_base_size << " " << base_size_;
+ base_size_ = new_base_size;
size_ = new_size;
}
diff --git a/runtime/mem_map.h b/runtime/mem_map.h
index 5603963eac..0ecb414614 100644
--- a/runtime/mem_map.h
+++ b/runtime/mem_map.h
@@ -39,8 +39,12 @@ namespace art {
#ifdef __linux__
static constexpr bool kMadviseZeroes = true;
+#define HAVE_MREMAP_SYSCALL true
#else
static constexpr bool kMadviseZeroes = false;
+// We cannot ever perform MemMap::ReplaceWith on non-linux hosts since the syscall is not
+// present.
+#define HAVE_MREMAP_SYSCALL false
#endif
// Used to keep track of mmap segments.
@@ -52,6 +56,32 @@ static constexpr bool kMadviseZeroes = false;
// Otherwise, calls might see uninitialized values.
class MemMap {
public:
+ static constexpr bool kCanReplaceMapping = HAVE_MREMAP_SYSCALL;
+
+ // Replace the data in this memmmap with the data in the memmap pointed to by source. The caller
+ // relinquishes ownership of the source mmap.
+ //
+ // For the call to be successful:
+ // * The range [dest->Begin, dest->Begin() + source->Size()] must not overlap with
+ // [source->Begin(), source->End()].
+ // * Neither source nor dest may be 'reused' mappings (they must own all the pages associated
+ // with them.
+ // * kCanReplaceMapping must be true.
+ // * Neither source nor dest may use manual redzones.
+ // * Both source and dest must have the same offset from the nearest page boundary.
+ // * mremap must succeed when called on the mappings.
+ //
+ // If this call succeeds it will return true and:
+ // * Deallocate *source
+ // * Sets *source to nullptr
+ // * The protection of this will remain the same.
+ // * The size of this will be the size of the source
+ // * The data in this will be the data from source.
+ //
+ // If this call fails it will return false and make no changes to *source or this. The ownership
+ // of the source mmap is returned to the caller.
+ bool ReplaceWith(/*in-out*/MemMap** source, /*out*/std::string* error);
+
// Request an anonymous region of length 'byte_count' and a requested base address.
// Use null as the requested base address if you don't care.
// "reuse" allows re-mapping an address range from an existing mapping.
@@ -246,6 +276,9 @@ class MemMap {
// unmapping.
const bool reuse_;
+ // When already_unmapped_ is true the destructor will not call munmap.
+ bool already_unmapped_;
+
const size_t redzone_size_;
#if USE_ART_LOW_4G_ALLOCATOR
diff --git a/runtime/mem_map_test.cc b/runtime/mem_map_test.cc
index a4ebb16d09..3adbf18a7a 100644
--- a/runtime/mem_map_test.cc
+++ b/runtime/mem_map_test.cc
@@ -19,6 +19,7 @@
#include <sys/mman.h>
#include <memory>
+#include <random>
#include "base/memory_tool.h"
#include "base/unix_file/fd_file.h"
@@ -36,6 +37,25 @@ class MemMapTest : public CommonRuntimeTest {
return mem_map->base_size_;
}
+ static bool IsAddressMapped(void* addr) {
+ bool res = msync(addr, 1, MS_SYNC) == 0;
+ if (!res && errno != ENOMEM) {
+ PLOG(FATAL) << "Unexpected error occurred on msync";
+ }
+ return res;
+ }
+
+ static std::vector<uint8_t> RandomData(size_t size) {
+ std::random_device rd;
+ std::uniform_int_distribution<uint8_t> dist;
+ std::vector<uint8_t> res;
+ res.resize(size);
+ for (size_t i = 0; i < size; i++) {
+ res[i] = dist(rd);
+ }
+ return res;
+ }
+
static uint8_t* GetValidMapAddress(size_t size, bool low_4gb) {
// Find a valid map address and unmap it before returning.
std::string error_msg;
@@ -143,6 +163,186 @@ TEST_F(MemMapTest, Start) {
}
#endif
+// We need mremap to be able to test ReplaceMapping at all
+#if HAVE_MREMAP_SYSCALL
+TEST_F(MemMapTest, ReplaceMapping_SameSize) {
+ std::string error_msg;
+ std::unique_ptr<MemMap> dest(MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
+ nullptr,
+ kPageSize,
+ PROT_READ,
+ false,
+ false,
+ &error_msg));
+ ASSERT_TRUE(dest != nullptr);
+ MemMap* source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
+ nullptr,
+ kPageSize,
+ PROT_WRITE | PROT_READ,
+ false,
+ false,
+ &error_msg);
+ ASSERT_TRUE(source != nullptr);
+ void* source_addr = source->Begin();
+ void* dest_addr = dest->Begin();
+ ASSERT_TRUE(IsAddressMapped(source_addr));
+ ASSERT_TRUE(IsAddressMapped(dest_addr));
+
+ std::vector<uint8_t> data = RandomData(kPageSize);
+ memcpy(source->Begin(), data.data(), data.size());
+
+ ASSERT_TRUE(dest->ReplaceWith(&source, &error_msg)) << error_msg;
+
+ ASSERT_FALSE(IsAddressMapped(source_addr));
+ ASSERT_TRUE(IsAddressMapped(dest_addr));
+ ASSERT_TRUE(source == nullptr);
+
+ ASSERT_EQ(dest->Size(), static_cast<size_t>(kPageSize));
+
+ ASSERT_EQ(memcmp(dest->Begin(), data.data(), dest->Size()), 0);
+}
+
+TEST_F(MemMapTest, ReplaceMapping_MakeLarger) {
+ std::string error_msg;
+ std::unique_ptr<MemMap> dest(MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
+ nullptr,
+ 5 * kPageSize, // Need to make it larger
+ // initially so we know
+ // there won't be mappings
+ // in the way we we move
+ // source.
+ PROT_READ,
+ false,
+ false,
+ &error_msg));
+ ASSERT_TRUE(dest != nullptr);
+ MemMap* source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
+ nullptr,
+ 3 * kPageSize,
+ PROT_WRITE | PROT_READ,
+ false,
+ false,
+ &error_msg);
+ ASSERT_TRUE(source != nullptr);
+ uint8_t* source_addr = source->Begin();
+ uint8_t* dest_addr = dest->Begin();
+ ASSERT_TRUE(IsAddressMapped(source_addr));
+
+ // Fill the source with random data.
+ std::vector<uint8_t> data = RandomData(3 * kPageSize);
+ memcpy(source->Begin(), data.data(), data.size());
+
+ // Make the dest smaller so that we know we'll have space.
+ dest->SetSize(kPageSize);
+
+ ASSERT_TRUE(IsAddressMapped(dest_addr));
+ ASSERT_FALSE(IsAddressMapped(dest_addr + 2 * kPageSize));
+ ASSERT_EQ(dest->Size(), static_cast<size_t>(kPageSize));
+
+ ASSERT_TRUE(dest->ReplaceWith(&source, &error_msg)) << error_msg;
+
+ ASSERT_FALSE(IsAddressMapped(source_addr));
+ ASSERT_EQ(dest->Size(), static_cast<size_t>(3 * kPageSize));
+ ASSERT_TRUE(IsAddressMapped(dest_addr));
+ ASSERT_TRUE(IsAddressMapped(dest_addr + 2 * kPageSize));
+ ASSERT_TRUE(source == nullptr);
+
+ ASSERT_EQ(memcmp(dest->Begin(), data.data(), dest->Size()), 0);
+}
+
+TEST_F(MemMapTest, ReplaceMapping_MakeSmaller) {
+ std::string error_msg;
+ std::unique_ptr<MemMap> dest(MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
+ nullptr,
+ 3 * kPageSize,
+ PROT_READ,
+ false,
+ false,
+ &error_msg));
+ ASSERT_TRUE(dest != nullptr);
+ MemMap* source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
+ nullptr,
+ kPageSize,
+ PROT_WRITE | PROT_READ,
+ false,
+ false,
+ &error_msg);
+ ASSERT_TRUE(source != nullptr);
+ uint8_t* source_addr = source->Begin();
+ uint8_t* dest_addr = dest->Begin();
+ ASSERT_TRUE(IsAddressMapped(source_addr));
+ ASSERT_TRUE(IsAddressMapped(dest_addr));
+ ASSERT_TRUE(IsAddressMapped(dest_addr + 2 * kPageSize));
+ ASSERT_EQ(dest->Size(), static_cast<size_t>(3 * kPageSize));
+
+ std::vector<uint8_t> data = RandomData(kPageSize);
+ memcpy(source->Begin(), data.data(), kPageSize);
+
+ ASSERT_TRUE(dest->ReplaceWith(&source, &error_msg)) << error_msg;
+
+ ASSERT_FALSE(IsAddressMapped(source_addr));
+ ASSERT_EQ(dest->Size(), static_cast<size_t>(kPageSize));
+ ASSERT_TRUE(IsAddressMapped(dest_addr));
+ ASSERT_FALSE(IsAddressMapped(dest_addr + 2 * kPageSize));
+ ASSERT_TRUE(source == nullptr);
+
+ ASSERT_EQ(memcmp(dest->Begin(), data.data(), dest->Size()), 0);
+}
+
+TEST_F(MemMapTest, ReplaceMapping_FailureOverlap) {
+ std::string error_msg;
+ std::unique_ptr<MemMap> dest(
+ MemMap::MapAnonymous(
+ "MapAnonymousEmpty-atomic-replace-dest",
+ nullptr,
+ 3 * kPageSize, // Need to make it larger initially so we know there won't be mappings in
+ // the way we we move source.
+ PROT_READ | PROT_WRITE,
+ false,
+ false,
+ &error_msg));
+ ASSERT_TRUE(dest != nullptr);
+ // Resize down to 1 page so we can remap the rest.
+ dest->SetSize(kPageSize);
+ // Create source from the last 2 pages
+ MemMap* source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
+ dest->Begin() + kPageSize,
+ 2 * kPageSize,
+ PROT_WRITE | PROT_READ,
+ false,
+ false,
+ &error_msg);
+ ASSERT_TRUE(source != nullptr);
+ MemMap* orig_source = source;
+ ASSERT_EQ(dest->Begin() + kPageSize, source->Begin());
+ uint8_t* source_addr = source->Begin();
+ uint8_t* dest_addr = dest->Begin();
+ ASSERT_TRUE(IsAddressMapped(source_addr));
+
+ // Fill the source and dest with random data.
+ std::vector<uint8_t> data = RandomData(2 * kPageSize);
+ memcpy(source->Begin(), data.data(), data.size());
+ std::vector<uint8_t> dest_data = RandomData(kPageSize);
+ memcpy(dest->Begin(), dest_data.data(), dest_data.size());
+
+ ASSERT_TRUE(IsAddressMapped(dest_addr));
+ ASSERT_EQ(dest->Size(), static_cast<size_t>(kPageSize));
+
+ ASSERT_FALSE(dest->ReplaceWith(&source, &error_msg)) << error_msg;
+
+ ASSERT_TRUE(source == orig_source);
+ ASSERT_TRUE(IsAddressMapped(source_addr));
+ ASSERT_TRUE(IsAddressMapped(dest_addr));
+ ASSERT_EQ(source->Size(), data.size());
+ ASSERT_EQ(dest->Size(), dest_data.size());
+
+ ASSERT_EQ(memcmp(source->Begin(), data.data(), data.size()), 0);
+ ASSERT_EQ(memcmp(dest->Begin(), dest_data.data(), dest_data.size()), 0);
+
+ delete source;
+}
+#endif // HAVE_MREMAP_SYSCALL
+
TEST_F(MemMapTest, MapAnonymousEmpty) {
CommonInit();
std::string error_msg;
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index 0f430874cf..6ea9a7ad62 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -30,6 +30,7 @@
#include "dex/art_dex_file_loader.h"
#include "dex/dex_file-inl.h"
#include "dex/dex_file_loader.h"
+#include "jit/debugger_interface.h"
#include "jni_internal.h"
#include "mirror/class_loader.h"
#include "mirror/object-inl.h"
@@ -331,6 +332,7 @@ static jboolean DexFile_closeDexFile(JNIEnv* env, jclass, jobject cookie) {
int32_t i = kDexFileIndexStart; // Oat file is at index 0.
for (const DexFile* dex_file : dex_files) {
if (dex_file != nullptr) {
+ DeregisterDexFileForNative(soa.Self(), dex_file->Begin());
// Only delete the dex file if the dex cache is not found to prevent runtime crashes if there
// are calls to DexFile.close while the ART DexFile is still in use.
if (!class_linker->IsDexFileRegistered(soa.Self(), *dex_file)) {
diff --git a/test/603-checker-instanceof/src/Main.java b/test/603-checker-instanceof/src/Main.java
index 1487969c03..2c97bedbaa 100644
--- a/test/603-checker-instanceof/src/Main.java
+++ b/test/603-checker-instanceof/src/Main.java
@@ -59,7 +59,7 @@ public class Main {
/// CHECK: InstanceOf check_kind:exact_check
/// CHECK-NOT: {{.*gs:.*}}
- /// CHECK-START-{ARM,ARM64}: boolean Main.$noinline$instanceOfString(java.lang.Object) disassembly (after)
+ /// CHECK-START-{ARM,ARM64,MIPS,MIPS64}: boolean Main.$noinline$instanceOfString(java.lang.Object) disassembly (after)
/// CHECK: InstanceOf check_kind:exact_check
// For ARM and ARM64, the marking register (r8 and x20, respectively) can be used in
// non-CC configs for any other purpose, so we'd need a config-specific checker test.
diff --git a/test/651-checker-int-simd-minmax/src/Main.java b/test/651-checker-int-simd-minmax/src/Main.java
index 66343adaa8..cfa0ae7dca 100644
--- a/test/651-checker-int-simd-minmax/src/Main.java
+++ b/test/651-checker-int-simd-minmax/src/Main.java
@@ -27,10 +27,10 @@ public class Main {
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-{ARM,ARM64,MIPS64}: void Main.doitMin(int[], int[], int[]) loop_optimization (after)
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] unsigned:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Min>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] packed_type:Int32 loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Min>>] loop:<<Loop>> outer_loop:none
private static void doitMin(int[] x, int[] y, int[] z) {
int min = Math.min(x.length, Math.min(y.length, z.length));
for (int i = 0; i < min; i++) {
@@ -46,10 +46,10 @@ public class Main {
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-{ARM,ARM64,MIPS64}: void Main.doitMax(int[], int[], int[]) loop_optimization (after)
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] unsigned:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Max>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] packed_type:Int32 loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Max>>] loop:<<Loop>> outer_loop:none
private static void doitMax(int[] x, int[] y, int[] z) {
int min = Math.min(x.length, Math.min(y.length, z.length));
for (int i = 0; i < min; i++) {
diff --git a/test/710-varhandle-creation/src-art/Main.java b/test/710-varhandle-creation/src-art/Main.java
index a737b5ba9e..246aac6900 100644
--- a/test/710-varhandle-creation/src-art/Main.java
+++ b/test/710-varhandle-creation/src-art/Main.java
@@ -1,21 +1,17 @@
/*
- * Copyright 2017 Google Inc.
+ * Copyright (C) 2018 The Android Open Source Project
*
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation. Google designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Google in the LICENSE file that accompanied this code.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
+ * http://www.apache.org/licenses/LICENSE-2.0
*
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*/
import java.lang.invoke.MethodHandles;
diff --git a/test/714-invoke-custom-lambda-metafactory/build b/test/714-invoke-custom-lambda-metafactory/build
new file mode 100644
index 0000000000..b5002ba79c
--- /dev/null
+++ b/test/714-invoke-custom-lambda-metafactory/build
@@ -0,0 +1,22 @@
+#!/bin/bash
+#
+# Copyright 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Make us exit on a failure
+set -e
+
+# Opt-out from desugaring to ensure offending lambda is in the DEX.
+export USE_DESUGAR=false
+./default-build "$@" --experimental method-handles
diff --git a/test/714-invoke-custom-lambda-metafactory/expected.txt b/test/714-invoke-custom-lambda-metafactory/expected.txt
new file mode 100644
index 0000000000..cbe98404c5
--- /dev/null
+++ b/test/714-invoke-custom-lambda-metafactory/expected.txt
@@ -0,0 +1,4 @@
+Exception in thread "main" java.lang.BootstrapMethodError: Exception from call site #0 bootstrap method
+ at Main.main(Main.java:25)
+Caused by: java.lang.NullPointerException: Bootstrap method returned null
+ ... 1 more
diff --git a/test/714-invoke-custom-lambda-metafactory/info.txt b/test/714-invoke-custom-lambda-metafactory/info.txt
new file mode 100644
index 0000000000..4ef117b7e3
--- /dev/null
+++ b/test/714-invoke-custom-lambda-metafactory/info.txt
@@ -0,0 +1 @@
+Checks that ART doesn't crash when it encounters LambdaMetafactory.
diff --git a/test/714-invoke-custom-lambda-metafactory/src/Main.java b/test/714-invoke-custom-lambda-metafactory/src/Main.java
new file mode 100644
index 0000000000..74e0ad4370
--- /dev/null
+++ b/test/714-invoke-custom-lambda-metafactory/src/Main.java
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Arrays;
+import java.util.List;
+import java.util.Optional;
+
+public class Main {
+ public static void main(String[] args) {
+ int requiredLength = 3;
+ List<String> list = Arrays.asList("A", "B", "C", "D", "EEE");
+ Optional<String> result = list.stream().filter(x -> x.length() >= requiredLength).findAny();
+ if (result.isPresent()) {
+ System.out.println("Result is " + result.get());
+ } else {
+ System.out.println("Result is not there.");
+ }
+ }
+}
diff --git a/test/983-source-transform-verify/source_transform.cc b/test/983-source-transform-verify/source_transform.cc
index c076d1521f..dfefce207b 100644
--- a/test/983-source-transform-verify/source_transform.cc
+++ b/test/983-source-transform-verify/source_transform.cc
@@ -67,6 +67,14 @@ void JNICALL CheckDexFileHook(jvmtiEnv* jvmti_env ATTRIBUTE_UNUSED,
if (IsJVM()) {
return;
}
+
+ // Due to b/72402467 the class_data_len might just be an estimate.
+ CHECK_GE(static_cast<size_t>(class_data_len), sizeof(DexFile::Header));
+ const DexFile::Header* header = reinterpret_cast<const DexFile::Header*>(class_data);
+ uint32_t header_file_size = header->file_size_;
+ CHECK_LE(static_cast<jint>(header_file_size), class_data_len);
+ class_data_len = static_cast<jint>(header_file_size);
+
const ArtDexFileLoader dex_file_loader;
std::string error;
std::unique_ptr<const DexFile> dex(dex_file_loader.Open(class_data,
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index ea2d464d24..02438701b8 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -849,7 +849,7 @@ if [ "$HOST" = "n" ]; then
fi
# System libraries needed by libarttestd.so
- PUBLIC_LIBS=libart.so:libartd.so:libc++.so:libbacktrace.so:libbase.so:libnativehelper.so
+ PUBLIC_LIBS=libart.so:libartd.so:libc++.so:libbacktrace.so:libdexfile.so:libbase.so:libnativehelper.so
# Create a script with the command. The command can get longer than the longest
# allowed adb command and there is no way to get the exit status from a adb shell
diff --git a/test/testrunner/target_config.py b/test/testrunner/target_config.py
index 414e3e5037..3064c76ffd 100644
--- a/test/testrunner/target_config.py
+++ b/test/testrunner/target_config.py
@@ -327,27 +327,20 @@ target_config = {
'ART_USE_READ_BARRIER' : 'false'
}
},
- 'art-gtest-heap-poisoning': {
- 'make' : 'valgrind-test-art-host64',
- 'env' : {
- 'ART_HEAP_POISONING' : 'true',
- 'ART_USE_READ_BARRIER' : 'false'
- }
- },
# ASAN (host) configurations.
# These configurations need detect_leaks=0 to work in non-setup environments like build bots,
# as our build tools leak. b/37751350
- 'art-gtest-asan': {
+ 'art-gtest-asan': {
'make' : 'test-art-host-gtest',
'env': {
'SANITIZE_HOST' : 'address',
'ASAN_OPTIONS' : 'detect_leaks=0'
}
- },
- 'art-asan': {
+ },
+ 'art-asan': {
'run-test' : ['--interpreter',
'--optimizing',
'--jit'],
@@ -355,7 +348,16 @@ target_config = {
'SANITIZE_HOST' : 'address',
'ASAN_OPTIONS' : 'detect_leaks=0'
}
- },
+ },
+ 'art-gtest-heap-poisoning': {
+ 'make' : 'test-art-host-gtest',
+ 'env' : {
+ 'ART_HEAP_POISONING' : 'true',
+ 'ART_USE_READ_BARRIER' : 'false',
+ 'SANITIZE_HOST' : 'address',
+ 'ASAN_OPTIONS' : 'detect_leaks=0'
+ }
+ },
# ART Golem build targets used by go/lem (continuous ART benchmarking),
# (art-opt-cc is used by default since it mimics the default preopt config),
diff --git a/tools/prebuilt_libjdwp_art_failures.txt b/tools/prebuilt_libjdwp_art_failures.txt
index eaa99a3bf8..7a8a4dd35f 100644
--- a/tools/prebuilt_libjdwp_art_failures.txt
+++ b/tools/prebuilt_libjdwp_art_failures.txt
@@ -124,6 +124,6 @@
result: EXEC_FAILED,
bug: 70958370,
names: [ "org.apache.harmony.jpda.tests.jdwp.ObjectReference.EnableCollectionTest#testEnableCollection001",
- "org.apache.harmony.jpda.tests.jdwp.MultiSession.EnableCollectionTest.testEnableCollection001" ]
+ "org.apache.harmony.jpda.tests.jdwp.MultiSession.EnableCollectionTest#testEnableCollection001" ]
}
]