summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--compiler/optimizing/code_generator_mips.cc93
-rw-r--r--compiler/optimizing/code_generator_mips64.cc93
-rw-r--r--openjdkjvmti/events.cc36
-rw-r--r--openjdkjvmti/events.h12
4 files changed, 196 insertions, 38 deletions
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index d6922d2f3f..6376f03b26 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -3103,23 +3103,92 @@ void LocationsBuilderMIPS::VisitBoundsCheck(HBoundsCheck* instruction) {
caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction, caller_saves);
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RequiresRegister());
+
+ HInstruction* index = instruction->InputAt(0);
+ HInstruction* length = instruction->InputAt(1);
+
+ bool const_index = false;
+ bool const_length = false;
+
+ if (index->IsConstant()) {
+ if (length->IsConstant()) {
+ const_index = true;
+ const_length = true;
+ } else {
+ int32_t index_value = index->AsIntConstant()->GetValue();
+ if (index_value < 0 || IsInt<16>(index_value + 1)) {
+ const_index = true;
+ }
+ }
+ } else if (length->IsConstant()) {
+ int32_t length_value = length->AsIntConstant()->GetValue();
+ if (IsUint<15>(length_value)) {
+ const_length = true;
+ }
+ }
+
+ locations->SetInAt(0, const_index
+ ? Location::ConstantLocation(index->AsConstant())
+ : Location::RequiresRegister());
+ locations->SetInAt(1, const_length
+ ? Location::ConstantLocation(length->AsConstant())
+ : Location::RequiresRegister());
}
void InstructionCodeGeneratorMIPS::VisitBoundsCheck(HBoundsCheck* instruction) {
LocationSummary* locations = instruction->GetLocations();
- BoundsCheckSlowPathMIPS* slow_path =
- new (codegen_->GetScopedAllocator()) BoundsCheckSlowPathMIPS(instruction);
- codegen_->AddSlowPath(slow_path);
-
- Register index = locations->InAt(0).AsRegister<Register>();
- Register length = locations->InAt(1).AsRegister<Register>();
+ Location index_loc = locations->InAt(0);
+ Location length_loc = locations->InAt(1);
+
+ if (length_loc.IsConstant()) {
+ int32_t length = length_loc.GetConstant()->AsIntConstant()->GetValue();
+ if (index_loc.IsConstant()) {
+ int32_t index = index_loc.GetConstant()->AsIntConstant()->GetValue();
+ if (index < 0 || index >= length) {
+ BoundsCheckSlowPathMIPS* slow_path =
+ new (codegen_->GetScopedAllocator()) BoundsCheckSlowPathMIPS(instruction);
+ codegen_->AddSlowPath(slow_path);
+ __ B(slow_path->GetEntryLabel());
+ } else {
+ // Nothing to be done.
+ }
+ return;
+ }
- // length is limited by the maximum positive signed 32-bit integer.
- // Unsigned comparison of length and index checks for index < 0
- // and for length <= index simultaneously.
- __ Bgeu(index, length, slow_path->GetEntryLabel());
+ BoundsCheckSlowPathMIPS* slow_path =
+ new (codegen_->GetScopedAllocator()) BoundsCheckSlowPathMIPS(instruction);
+ codegen_->AddSlowPath(slow_path);
+ Register index = index_loc.AsRegister<Register>();
+ if (length == 0) {
+ __ B(slow_path->GetEntryLabel());
+ } else if (length == 1) {
+ __ Bnez(index, slow_path->GetEntryLabel());
+ } else {
+ DCHECK(IsUint<15>(length)) << length;
+ __ Sltiu(TMP, index, length);
+ __ Beqz(TMP, slow_path->GetEntryLabel());
+ }
+ } else {
+ Register length = length_loc.AsRegister<Register>();
+ BoundsCheckSlowPathMIPS* slow_path =
+ new (codegen_->GetScopedAllocator()) BoundsCheckSlowPathMIPS(instruction);
+ codegen_->AddSlowPath(slow_path);
+ if (index_loc.IsConstant()) {
+ int32_t index = index_loc.GetConstant()->AsIntConstant()->GetValue();
+ if (index < 0) {
+ __ B(slow_path->GetEntryLabel());
+ } else if (index == 0) {
+ __ Blez(length, slow_path->GetEntryLabel());
+ } else {
+ DCHECK(IsInt<16>(index + 1)) << index;
+ __ Sltiu(TMP, length, index + 1);
+ __ Bnez(TMP, slow_path->GetEntryLabel());
+ }
+ } else {
+ Register index = index_loc.AsRegister<Register>();
+ __ Bgeu(index, length, slow_path->GetEntryLabel());
+ }
+ }
}
// Temp is used for read barrier.
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index ee33b3f335..03a719f445 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -2614,23 +2614,92 @@ void LocationsBuilderMIPS64::VisitBoundsCheck(HBoundsCheck* instruction) {
caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction, caller_saves);
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RequiresRegister());
+
+ HInstruction* index = instruction->InputAt(0);
+ HInstruction* length = instruction->InputAt(1);
+
+ bool const_index = false;
+ bool const_length = false;
+
+ if (index->IsConstant()) {
+ if (length->IsConstant()) {
+ const_index = true;
+ const_length = true;
+ } else {
+ int32_t index_value = index->AsIntConstant()->GetValue();
+ if (index_value < 0 || IsInt<16>(index_value + 1)) {
+ const_index = true;
+ }
+ }
+ } else if (length->IsConstant()) {
+ int32_t length_value = length->AsIntConstant()->GetValue();
+ if (IsUint<15>(length_value)) {
+ const_length = true;
+ }
+ }
+
+ locations->SetInAt(0, const_index
+ ? Location::ConstantLocation(index->AsConstant())
+ : Location::RequiresRegister());
+ locations->SetInAt(1, const_length
+ ? Location::ConstantLocation(length->AsConstant())
+ : Location::RequiresRegister());
}
void InstructionCodeGeneratorMIPS64::VisitBoundsCheck(HBoundsCheck* instruction) {
LocationSummary* locations = instruction->GetLocations();
- BoundsCheckSlowPathMIPS64* slow_path =
- new (codegen_->GetScopedAllocator()) BoundsCheckSlowPathMIPS64(instruction);
- codegen_->AddSlowPath(slow_path);
-
- GpuRegister index = locations->InAt(0).AsRegister<GpuRegister>();
- GpuRegister length = locations->InAt(1).AsRegister<GpuRegister>();
+ Location index_loc = locations->InAt(0);
+ Location length_loc = locations->InAt(1);
+
+ if (length_loc.IsConstant()) {
+ int32_t length = length_loc.GetConstant()->AsIntConstant()->GetValue();
+ if (index_loc.IsConstant()) {
+ int32_t index = index_loc.GetConstant()->AsIntConstant()->GetValue();
+ if (index < 0 || index >= length) {
+ BoundsCheckSlowPathMIPS64* slow_path =
+ new (codegen_->GetScopedAllocator()) BoundsCheckSlowPathMIPS64(instruction);
+ codegen_->AddSlowPath(slow_path);
+ __ Bc(slow_path->GetEntryLabel());
+ } else {
+ // Nothing to be done.
+ }
+ return;
+ }
- // length is limited by the maximum positive signed 32-bit integer.
- // Unsigned comparison of length and index checks for index < 0
- // and for length <= index simultaneously.
- __ Bgeuc(index, length, slow_path->GetEntryLabel());
+ BoundsCheckSlowPathMIPS64* slow_path =
+ new (codegen_->GetScopedAllocator()) BoundsCheckSlowPathMIPS64(instruction);
+ codegen_->AddSlowPath(slow_path);
+ GpuRegister index = index_loc.AsRegister<GpuRegister>();
+ if (length == 0) {
+ __ Bc(slow_path->GetEntryLabel());
+ } else if (length == 1) {
+ __ Bnezc(index, slow_path->GetEntryLabel());
+ } else {
+ DCHECK(IsUint<15>(length)) << length;
+ __ Sltiu(TMP, index, length);
+ __ Beqzc(TMP, slow_path->GetEntryLabel());
+ }
+ } else {
+ GpuRegister length = length_loc.AsRegister<GpuRegister>();
+ BoundsCheckSlowPathMIPS64* slow_path =
+ new (codegen_->GetScopedAllocator()) BoundsCheckSlowPathMIPS64(instruction);
+ codegen_->AddSlowPath(slow_path);
+ if (index_loc.IsConstant()) {
+ int32_t index = index_loc.GetConstant()->AsIntConstant()->GetValue();
+ if (index < 0) {
+ __ Bc(slow_path->GetEntryLabel());
+ } else if (index == 0) {
+ __ Blezc(length, slow_path->GetEntryLabel());
+ } else {
+ DCHECK(IsInt<16>(index + 1)) << index;
+ __ Sltiu(TMP, length, index + 1);
+ __ Bnezc(TMP, slow_path->GetEntryLabel());
+ }
+ } else {
+ GpuRegister index = index_loc.AsRegister<GpuRegister>();
+ __ Bgeuc(index, length, slow_path->GetEntryLabel());
+ }
+ }
}
// Temp is used for read barrier.
diff --git a/openjdkjvmti/events.cc b/openjdkjvmti/events.cc
index 05f9125df9..912e7548db 100644
--- a/openjdkjvmti/events.cc
+++ b/openjdkjvmti/events.cc
@@ -139,7 +139,9 @@ EventMask* EventMasks::GetEventMaskOrNull(art::Thread* thread) {
}
-void EventMasks::EnableEvent(art::Thread* thread, ArtJvmtiEvent event) {
+void EventMasks::EnableEvent(ArtJvmTiEnv* env, art::Thread* thread, ArtJvmtiEvent event) {
+ DCHECK_EQ(&env->event_masks, this);
+ env->event_info_mutex_.AssertExclusiveHeld(art::Thread::Current());
DCHECK(EventMask::EventIsInRange(event));
GetEventMask(thread).Set(event);
if (thread != nullptr) {
@@ -147,7 +149,9 @@ void EventMasks::EnableEvent(art::Thread* thread, ArtJvmtiEvent event) {
}
}
-void EventMasks::DisableEvent(art::Thread* thread, ArtJvmtiEvent event) {
+void EventMasks::DisableEvent(ArtJvmTiEnv* env, art::Thread* thread, ArtJvmtiEvent event) {
+ DCHECK_EQ(&env->event_masks, this);
+ env->event_info_mutex_.AssertExclusiveHeld(art::Thread::Current());
DCHECK(EventMask::EventIsInRange(event));
GetEventMask(thread).Set(event, false);
if (thread != nullptr) {
@@ -1134,20 +1138,28 @@ jvmtiError EventHandler::SetEvent(ArtJvmTiEnv* env,
return ERR(MUST_POSSESS_CAPABILITY);
}
- bool old_state = global_mask.Test(event);
+ bool old_state;
+ bool new_state;
- if (mode == JVMTI_ENABLE) {
- env->event_masks.EnableEvent(thread, event);
- global_mask.Set(event);
- } else {
- DCHECK_EQ(mode, JVMTI_DISABLE);
+ {
+ // Change the event masks atomically.
+ art::Thread* self = art::Thread::Current();
+ art::MutexLock mu(self, envs_lock_);
+ art::WriterMutexLock mu_env_info(self, env->event_info_mutex_);
+ old_state = global_mask.Test(event);
+ if (mode == JVMTI_ENABLE) {
+ env->event_masks.EnableEvent(env, thread, event);
+ global_mask.Set(event);
+ new_state = true;
+ } else {
+ DCHECK_EQ(mode, JVMTI_DISABLE);
- env->event_masks.DisableEvent(thread, event);
- RecalculateGlobalEventMask(event);
+ env->event_masks.DisableEvent(env, thread, event);
+ RecalculateGlobalEventMaskLocked(event);
+ new_state = global_mask.Test(event);
+ }
}
- bool new_state = global_mask.Test(event);
-
// Handle any special work required for the event type.
if (new_state != old_state) {
HandleEventType(event, mode == JVMTI_ENABLE);
diff --git a/openjdkjvmti/events.h b/openjdkjvmti/events.h
index b05136661b..7bdd9a58ec 100644
--- a/openjdkjvmti/events.h
+++ b/openjdkjvmti/events.h
@@ -149,8 +149,16 @@ struct EventMasks {
EventMask& GetEventMask(art::Thread* thread);
EventMask* GetEventMaskOrNull(art::Thread* thread);
- void EnableEvent(art::Thread* thread, ArtJvmtiEvent event);
- void DisableEvent(art::Thread* thread, ArtJvmtiEvent event);
+ // Circular dependencies mean we cannot see the definition of ArtJvmTiEnv so the mutex is simply
+ // asserted in the function.
+ // Note that the 'env' passed in must be the same env this EventMasks is associated with.
+ void EnableEvent(ArtJvmTiEnv* env, art::Thread* thread, ArtJvmtiEvent event);
+ // REQUIRES(env->event_info_mutex_);
+ // Circular dependencies mean we cannot see the definition of ArtJvmTiEnv so the mutex is simply
+ // asserted in the function.
+ // Note that the 'env' passed in must be the same env this EventMasks is associated with.
+ void DisableEvent(ArtJvmTiEnv* env, art::Thread* thread, ArtJvmtiEvent event);
+ // REQUIRES(env->event_info_mutex_);
bool IsEnabledAnywhere(ArtJvmtiEvent event);
// Make any changes to event masks needed for the given capability changes. If caps_added is true
// then caps is all the newly set capabilities of the jvmtiEnv. If it is false then caps is the