summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--build/Android.common_build.mk12
-rw-r--r--build/Android.executable.mk5
-rw-r--r--build/Android.gtest.mk2
-rw-r--r--compiler/Android.mk1
-rw-r--r--compiler/optimizing/builder.cc5
-rw-r--r--compiler/optimizing/code_generator.cc15
-rw-r--r--compiler/optimizing/code_generator_arm.cc60
-rw-r--r--compiler/optimizing/code_generator_arm.h2
-rw-r--r--compiler/optimizing/code_generator_arm64.cc62
-rw-r--r--compiler/optimizing/code_generator_arm64.h2
-rw-r--r--compiler/optimizing/code_generator_x86.cc51
-rw-r--r--compiler/optimizing/code_generator_x86.h2
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc52
-rw-r--r--compiler/optimizing/code_generator_x86_64.h2
-rw-r--r--compiler/optimizing/intrinsics_arm.cc3
-rw-r--r--compiler/optimizing/intrinsics_arm64.cc3
-rw-r--r--compiler/optimizing/intrinsics_x86.cc6
-rw-r--r--compiler/optimizing/intrinsics_x86_64.cc6
-rw-r--r--compiler/optimizing/locations.h2
-rw-r--r--compiler/optimizing/nodes.h5
-rw-r--r--compiler/optimizing/register_allocator.cc8
-rw-r--r--compiler/optimizing/ssa_liveness_analysis.cc2
-rw-r--r--compiler/optimizing/ssa_liveness_analysis.h2
-rw-r--r--compiler/optimizing/stack_map_stream.cc48
-rw-r--r--compiler/optimizing/stack_map_stream.h1
-rw-r--r--compiler/optimizing/stack_map_test.cc318
-rw-r--r--disassembler/Android.mk5
-rw-r--r--oatdump/oatdump.cc6
-rw-r--r--runtime/Android.mk1
-rw-r--r--runtime/art_method.cc5
-rw-r--r--runtime/base/time_utils.h4
-rw-r--r--runtime/check_reference_map_visitor.h13
-rw-r--r--runtime/class_linker.cc7
-rw-r--r--runtime/entrypoints/entrypoint_utils-inl.h7
-rw-r--r--runtime/entrypoints/quick/quick_trampoline_entrypoints.cc9
-rw-r--r--runtime/gc/heap.cc40
-rw-r--r--runtime/gc/heap.h4
-rw-r--r--runtime/java_vm_ext.cc7
-rw-r--r--runtime/jni_internal.cc2
-rw-r--r--runtime/jni_internal_test.cc2
-rw-r--r--runtime/mem_map.cc24
-rw-r--r--runtime/mirror/class.h7
-rw-r--r--runtime/runtime.cc19
-rw-r--r--runtime/stack.cc41
-rw-r--r--runtime/stack_map.cc211
-rw-r--r--runtime/stack_map.h409
-rw-r--r--runtime/thread.cc7
-rw-r--r--runtime/verifier/method_verifier.cc5
-rw-r--r--runtime/well_known_classes.cc5
-rw-r--r--runtime/well_known_classes.h2
-rw-r--r--sigchainlib/Android.mk4
-rw-r--r--sigchainlib/version-script.txt1
-rw-r--r--test/004-NativeAllocations/src/Main.java30
-rw-r--r--test/135-MirandaDispatch/expected.txt1
-rw-r--r--test/135-MirandaDispatch/smali/b_21646347.smali15
-rw-r--r--test/135-MirandaDispatch/src/Main.java9
-rw-r--r--test/441-checker-inliner/src/Main.java12
-rw-r--r--test/478-checker-clinit-check-pruning/src/Main.java6
-rw-r--r--test/Android.libarttest.mk1
-rw-r--r--test/Android.libnativebridgetest.mk1
-rw-r--r--test/Android.run-test.mk5
61 files changed, 859 insertions, 745 deletions
diff --git a/build/Android.common_build.mk b/build/Android.common_build.mk
index b84154b307..ace6a73654 100644
--- a/build/Android.common_build.mk
+++ b/build/Android.common_build.mk
@@ -68,6 +68,9 @@ art_default_gc_type_cflags := -DART_DEFAULT_GC_TYPE_IS_$(ART_DEFAULT_GC_TYPE)
ART_HOST_CFLAGS :=
ART_TARGET_CFLAGS :=
+ART_HOST_ASFLAGS :=
+ART_TARGET_ASFLAGS :=
+
# Clang build support.
# Host.
@@ -199,6 +202,9 @@ art_cflags := \
-fvisibility=protected \
$(art_default_gc_type_cflags)
+# Base set of asflags used by all things ART.
+art_asflags :=
+
# Missing declarations: too many at the moment, as we use "extern" quite a bit.
# -Wmissing-declarations \
@@ -217,10 +223,12 @@ endif
ifeq ($(ART_HEAP_POISONING),true)
art_cflags += -DART_HEAP_POISONING=1
+ art_asflags += -DART_HEAP_POISONING=1
endif
ifeq ($(ART_USE_READ_BARRIER),true)
art_cflags += -DART_USE_READ_BARRIER=1
+ art_asflags += -DART_USE_READ_BARRIER=1
endif
ifeq ($(ART_USE_TLAB),true)
@@ -258,11 +266,13 @@ ifndef LIBART_IMG_HOST_BASE_ADDRESS
endif
ART_HOST_CFLAGS += $(art_cflags) -DART_BASE_ADDRESS=$(LIBART_IMG_HOST_BASE_ADDRESS)
ART_HOST_CFLAGS += -DART_DEFAULT_INSTRUCTION_SET_FEATURES=default
+ART_HOST_ASFLAGS += $(art_asflags)
ifndef LIBART_IMG_TARGET_BASE_ADDRESS
$(error LIBART_IMG_TARGET_BASE_ADDRESS unset)
endif
ART_TARGET_CFLAGS += $(art_cflags) -DART_TARGET -DART_BASE_ADDRESS=$(LIBART_IMG_TARGET_BASE_ADDRESS)
+ART_TARGET_ASFLAGS += $(art_asflags)
ART_HOST_NON_DEBUG_CFLAGS := $(art_host_non_debug_cflags)
ART_TARGET_NON_DEBUG_CFLAGS := $(art_target_non_debug_cflags)
@@ -292,6 +302,7 @@ ART_TARGET_CFLAGS += -DART_BASE_ADDRESS_MAX_DELTA=$(LIBART_IMG_TARGET_MAX_BASE_A
# Clear locals now they've served their purpose.
art_cflags :=
+art_asflags :=
art_debug_cflags :=
art_non_debug_cflags :=
art_host_non_debug_cflags :=
@@ -311,6 +322,7 @@ ART_TARGET_LDFLAGS :=
define set-target-local-cflags-vars
LOCAL_CFLAGS += $(ART_TARGET_CFLAGS)
LOCAL_CFLAGS_x86 += $(ART_TARGET_CFLAGS_x86)
+ LOCAL_ASFLAGS += $(ART_TARGET_ASFLAGS)
LOCAL_LDFLAGS += $(ART_TARGET_LDFLAGS)
art_target_cflags_ndebug_or_debug := $(1)
ifeq ($$(art_target_cflags_ndebug_or_debug),debug)
diff --git a/build/Android.executable.mk b/build/Android.executable.mk
index dfea6e191e..7b036825e7 100644
--- a/build/Android.executable.mk
+++ b/build/Android.executable.mk
@@ -70,13 +70,14 @@ define build-art-executable
endif
ifeq ($$(art_target_or_host),target)
- $(call set-target-local-clang-vars)
- $(call set-target-local-cflags-vars,$(6))
+ $(call set-target-local-clang-vars)
+ $(call set-target-local-cflags-vars,$(6))
LOCAL_SHARED_LIBRARIES += libdl
else # host
LOCAL_CLANG := $(ART_HOST_CLANG)
LOCAL_LDLIBS := $(ART_HOST_LDLIBS)
LOCAL_CFLAGS += $(ART_HOST_CFLAGS)
+ LOCAL_ASFLAGS += $(ART_HOST_ASFLAGS)
ifeq ($$(art_ndebug_or_debug),debug)
LOCAL_CFLAGS += $(ART_HOST_DEBUG_CFLAGS)
else
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 5052187794..4fc184ecd9 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -291,6 +291,7 @@ LOCAL_MODULE := libart-gtest
LOCAL_MODULE_TAGS := optional
LOCAL_CPP_EXTENSION := cc
LOCAL_CFLAGS := $(ART_HOST_CFLAGS)
+LOCAL_ASFLAGS := $(ART_HOST_ASFLAGS)
LOCAL_SRC_FILES := runtime/common_runtime_test.cc compiler/common_compiler_test.cc
LOCAL_C_INCLUDES := $(ART_C_INCLUDES) art/runtime art/compiler
LOCAL_SHARED_LIBRARIES := libartd libartd-compiler
@@ -489,6 +490,7 @@ test-art-target-gtest-$$(art_gtest_name): $$(ART_TEST_TARGET_GTEST_$$(art_gtest_
else # host
LOCAL_CLANG := $$(ART_HOST_CLANG)
LOCAL_CFLAGS += $$(ART_HOST_CFLAGS) $$(ART_HOST_DEBUG_CFLAGS)
+ LOCAL_ASFLAGS += $$(ART_HOST_ASFLAGS)
LOCAL_SHARED_LIBRARIES += libicuuc-host libicui18n-host libnativehelper libziparchive-host libz-host libvixld
LOCAL_LDLIBS := $(ART_HOST_LDLIBS) -lpthread -ldl
LOCAL_IS_HOST_MODULE := true
diff --git a/compiler/Android.mk b/compiler/Android.mk
index 3f5271d31f..67536f00b9 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -234,6 +234,7 @@ $$(ENUM_OPERATOR_OUT_GEN): $$(GENERATED_SRC_DIR)/%_operator_out.cc : $(LOCAL_PAT
else # host
LOCAL_CLANG := $(ART_HOST_CLANG)
LOCAL_CFLAGS += $(ART_HOST_CFLAGS)
+ LOCAL_ASFLAGS += $(ART_HOST_ASFLAGS)
LOCAL_LDLIBS := $(ART_HOST_LDLIBS)
ifeq ($$(art_ndebug_or_debug),debug)
LOCAL_CFLAGS += $(ART_HOST_DEBUG_CFLAGS)
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index dbda63bae4..f98029da03 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -763,11 +763,6 @@ bool HGraphBuilder::BuildInvoke(const Instruction& instruction,
}
DCHECK_EQ(argument_index, number_of_arguments);
- if (invoke->IsInvokeStaticOrDirect()) {
- invoke->SetArgumentAt(argument_index, graph_->GetCurrentMethod());
- argument_index++;
- }
-
if (clinit_check_requirement == HInvokeStaticOrDirect::ClinitCheckRequirement::kExplicit) {
// Add the class initialization check as last input of `invoke`.
DCHECK(clinit_check != nullptr);
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 049b3e3a40..08c0351eab 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -292,6 +292,7 @@ void CodeGenerator::CreateCommonInvokeLocationSummary(
HInvoke* invoke, InvokeDexCallingConventionVisitor* visitor) {
ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetArena();
LocationSummary* locations = new (allocator) LocationSummary(invoke, LocationSummary::kCall);
+ locations->AddTemp(visitor->GetMethodLocation());
for (size_t i = 0; i < invoke->GetNumberOfArguments(); i++) {
HInstruction* input = invoke->InputAt(i);
@@ -299,20 +300,6 @@ void CodeGenerator::CreateCommonInvokeLocationSummary(
}
locations->SetOut(visitor->GetReturnLocation(invoke->GetType()));
-
- if (invoke->IsInvokeStaticOrDirect()) {
- HInvokeStaticOrDirect* call = invoke->AsInvokeStaticOrDirect();
- if (call->IsStringInit()) {
- locations->AddTemp(visitor->GetMethodLocation());
- } else if (call->IsRecursive()) {
- locations->SetInAt(call->GetCurrentMethodInputIndex(), visitor->GetMethodLocation());
- } else {
- locations->AddTemp(visitor->GetMethodLocation());
- locations->SetInAt(call->GetCurrentMethodInputIndex(), Location::RequiresRegister());
- }
- } else {
- locations->AddTemp(visitor->GetMethodLocation());
- }
}
void CodeGenerator::BlockIfInRegister(Location location, bool is_out) const {
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index f4544ea70d..f7731069fb 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -1254,10 +1254,6 @@ void LocationsBuilderARM::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invok
IntrinsicLocationsBuilderARM intrinsic(GetGraph()->GetArena(),
codegen_->GetInstructionSetFeatures());
if (intrinsic.TryDispatch(invoke)) {
- LocationSummary* locations = invoke->GetLocations();
- if (locations->CanCall()) {
- locations->SetInAt(invoke->GetCurrentMethodInputIndex(), Location::RequiresRegister());
- }
return;
}
@@ -1287,9 +1283,9 @@ void InstructionCodeGeneratorARM::VisitInvokeStaticOrDirect(HInvokeStaticOrDirec
return;
}
- LocationSummary* locations = invoke->GetLocations();
- codegen_->GenerateStaticOrDirectCall(
- invoke, locations->HasTemps() ? locations->GetTemp(0) : Location::NoLocation());
+ Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>();
+
+ codegen_->GenerateStaticOrDirectCall(invoke, temp);
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
}
@@ -1320,8 +1316,12 @@ void InstructionCodeGeneratorARM::VisitInvokeVirtual(HInvokeVirtual* invoke) {
Location receiver = locations->InAt(0);
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
// temp = object->GetClass();
- DCHECK(receiver.IsRegister());
- __ LoadFromOffset(kLoadWord, temp, receiver.AsRegister<Register>(), class_offset);
+ if (receiver.IsStackSlot()) {
+ __ LoadFromOffset(kLoadWord, temp, SP, receiver.GetStackIndex());
+ __ LoadFromOffset(kLoadWord, temp, temp, class_offset);
+ } else {
+ __ LoadFromOffset(kLoadWord, temp, receiver.AsRegister<Register>(), class_offset);
+ }
codegen_->MaybeRecordImplicitNullCheck(invoke);
// temp = temp->GetMethodAt(method_offset);
uint32_t entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(
@@ -4206,7 +4206,9 @@ void InstructionCodeGeneratorARM::HandleBitwiseOperation(HBinaryOperation* instr
}
}
-void CodeGeneratorARM::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) {
+void CodeGeneratorARM::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Register temp) {
+ DCHECK_EQ(temp, kArtMethodRegister);
+
// TODO: Implement all kinds of calls:
// 1) boot -> boot
// 2) app -> boot
@@ -4215,32 +4217,32 @@ void CodeGeneratorARM::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke,
// Currently we implement the app -> app logic, which looks up in the resolve cache.
if (invoke->IsStringInit()) {
- Register reg = temp.AsRegister<Register>();
// temp = thread->string_init_entrypoint
- __ LoadFromOffset(kLoadWord, reg, TR, invoke->GetStringInitOffset());
+ __ LoadFromOffset(kLoadWord, temp, TR, invoke->GetStringInitOffset());
// LR = temp[offset_of_quick_compiled_code]
- __ LoadFromOffset(kLoadWord, LR, reg,
+ __ LoadFromOffset(kLoadWord, LR, temp,
ArtMethod::EntryPointFromQuickCompiledCodeOffset(
kArmWordSize).Int32Value());
// LR()
__ blx(LR);
- } else if (invoke->IsRecursive()) {
- __ bl(GetFrameEntryLabel());
} else {
- Register current_method =
- invoke->GetLocations()->InAt(invoke->GetCurrentMethodInputIndex()).AsRegister<Register>();
- Register reg = temp.AsRegister<Register>();
- // reg = current_method->dex_cache_resolved_methods_;
- __ LoadFromOffset(
- kLoadWord, reg, current_method, ArtMethod::DexCacheResolvedMethodsOffset().Int32Value());
- // reg = reg[index_in_cache]
- __ LoadFromOffset(
- kLoadWord, reg, reg, CodeGenerator::GetCacheOffset(invoke->GetDexMethodIndex()));
- // LR = reg[offset_of_quick_compiled_code]
- __ LoadFromOffset(kLoadWord, LR, reg, ArtMethod::EntryPointFromQuickCompiledCodeOffset(
- kArmWordSize).Int32Value());
- // LR()
- __ blx(LR);
+ // temp = method;
+ LoadCurrentMethod(temp);
+ if (!invoke->IsRecursive()) {
+ // temp = temp->dex_cache_resolved_methods_;
+ __ LoadFromOffset(
+ kLoadWord, temp, temp, ArtMethod::DexCacheResolvedMethodsOffset().Int32Value());
+ // temp = temp[index_in_cache]
+ __ LoadFromOffset(
+ kLoadWord, temp, temp, CodeGenerator::GetCacheOffset(invoke->GetDexMethodIndex()));
+ // LR = temp[offset_of_quick_compiled_code]
+ __ LoadFromOffset(kLoadWord, LR, temp, ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ kArmWordSize).Int32Value());
+ // LR()
+ __ blx(LR);
+ } else {
+ __ bl(GetFrameEntryLabel());
+ }
}
DCHECK(!IsLeafMethod());
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index b871acd0ae..d84f2d3d20 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -301,7 +301,7 @@ class CodeGeneratorARM : public CodeGenerator {
Label* GetFrameEntryLabel() { return &frame_entry_label_; }
- void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp);
+ void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Register temp);
private:
// Labels for each block that will be compiled.
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index ac99d56676..2f607f70a3 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -484,7 +484,7 @@ Location InvokeDexCallingConventionVisitorARM64::GetNextLocation(Primitive::Type
}
Location InvokeDexCallingConventionVisitorARM64::GetMethodLocation() const {
- return LocationFrom(kArtMethodRegister);
+ return LocationFrom(x0);
}
CodeGeneratorARM64::CodeGeneratorARM64(HGraph* graph,
@@ -2227,10 +2227,6 @@ void LocationsBuilderARM64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* inv
IntrinsicLocationsBuilderARM64 intrinsic(GetGraph()->GetArena());
if (intrinsic.TryDispatch(invoke)) {
- LocationSummary* locations = invoke->GetLocations();
- if (locations->CanCall()) {
- locations->SetInAt(invoke->GetCurrentMethodInputIndex(), Location::RequiresRegister());
- }
return;
}
@@ -2246,8 +2242,9 @@ static bool TryGenerateIntrinsicCode(HInvoke* invoke, CodeGeneratorARM64* codege
return false;
}
-void CodeGeneratorARM64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) {
+void CodeGeneratorARM64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Register temp) {
// Make sure that ArtMethod* is passed in kArtMethodRegister as per the calling convention.
+ DCHECK(temp.Is(kArtMethodRegister));
size_t index_in_cache = GetCachePointerOffset(invoke->GetDexMethodIndex());
// TODO: Implement all kinds of calls:
@@ -2258,30 +2255,30 @@ void CodeGeneratorARM64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invok
// Currently we implement the app -> app logic, which looks up in the resolve cache.
if (invoke->IsStringInit()) {
- Register reg = XRegisterFrom(temp);
// temp = thread->string_init_entrypoint
- __ Ldr(reg.X(), MemOperand(tr, invoke->GetStringInitOffset()));
+ __ Ldr(temp.X(), MemOperand(tr, invoke->GetStringInitOffset()));
// LR = temp->entry_point_from_quick_compiled_code_;
__ Ldr(lr, MemOperand(
- reg, ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize).Int32Value()));
+ temp, ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize).Int32Value()));
// lr()
__ Blr(lr);
- } else if (invoke->IsRecursive()) {
- __ Bl(&frame_entry_label_);
} else {
- Register current_method =
- XRegisterFrom(invoke->GetLocations()->InAt(invoke->GetCurrentMethodInputIndex()));
- Register reg = XRegisterFrom(temp);
- // temp = current_method->dex_cache_resolved_methods_;
- __ Ldr(reg.W(), MemOperand(current_method.X(),
- ArtMethod::DexCacheResolvedMethodsOffset().Int32Value()));
- // temp = temp[index_in_cache];
- __ Ldr(reg.X(), MemOperand(reg, index_in_cache));
- // lr = temp->entry_point_from_quick_compiled_code_;
- __ Ldr(lr, MemOperand(reg.X(), ArtMethod::EntryPointFromQuickCompiledCodeOffset(
- kArm64WordSize).Int32Value()));
- // lr();
- __ Blr(lr);
+ // temp = method;
+ LoadCurrentMethod(temp.X());
+ if (!invoke->IsRecursive()) {
+ // temp = temp->dex_cache_resolved_methods_;
+ __ Ldr(temp.W(), MemOperand(temp.X(),
+ ArtMethod::DexCacheResolvedMethodsOffset().Int32Value()));
+ // temp = temp[index_in_cache];
+ __ Ldr(temp.X(), MemOperand(temp, index_in_cache));
+ // lr = temp->entry_point_from_quick_compiled_code_;
+ __ Ldr(lr, MemOperand(temp.X(), ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ kArm64WordSize).Int32Value()));
+ // lr();
+ __ Blr(lr);
+ } else {
+ __ Bl(&frame_entry_label_);
+ }
}
DCHECK(!IsLeafMethod());
@@ -2297,9 +2294,8 @@ void InstructionCodeGeneratorARM64::VisitInvokeStaticOrDirect(HInvokeStaticOrDir
}
BlockPoolsScope block_pools(GetVIXLAssembler());
- LocationSummary* locations = invoke->GetLocations();
- codegen_->GenerateStaticOrDirectCall(
- invoke, locations->HasTemps() ? locations->GetTemp(0) : Location::NoLocation());
+ Register temp = XRegisterFrom(invoke->GetLocations()->GetTemp(0));
+ codegen_->GenerateStaticOrDirectCall(invoke, temp);
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
}
@@ -2318,8 +2314,14 @@ void InstructionCodeGeneratorARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
BlockPoolsScope block_pools(GetVIXLAssembler());
- DCHECK(receiver.IsRegister());
- __ Ldr(temp.W(), HeapOperandFrom(receiver, class_offset));
+ // temp = object->GetClass();
+ if (receiver.IsStackSlot()) {
+ __ Ldr(temp.W(), MemOperand(sp, receiver.GetStackIndex()));
+ __ Ldr(temp.W(), HeapOperand(temp.W(), class_offset));
+ } else {
+ DCHECK(receiver.IsRegister());
+ __ Ldr(temp.W(), HeapOperandFrom(receiver, class_offset));
+ }
codegen_->MaybeRecordImplicitNullCheck(invoke);
// temp = temp->GetMethodAt(method_offset);
__ Ldr(temp, MemOperand(temp, method_offset));
@@ -2672,7 +2674,7 @@ void InstructionCodeGeneratorARM64::VisitParameterValue(
void LocationsBuilderARM64::VisitCurrentMethod(HCurrentMethod* instruction) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
- locations->SetOut(LocationFrom(kArtMethodRegister));
+ locations->SetOut(LocationFrom(x0));
}
void InstructionCodeGeneratorARM64::VisitCurrentMethod(
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 32466485ad..c62ba951cd 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -344,7 +344,7 @@ class CodeGeneratorARM64 : public CodeGenerator {
return false;
}
- void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp);
+ void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, vixl::Register temp);
private:
// Labels for each block that will be compiled.
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 4065c443fd..8a7b52e549 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -1231,10 +1231,6 @@ void LocationsBuilderX86::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invok
IntrinsicLocationsBuilderX86 intrinsic(codegen_);
if (intrinsic.TryDispatch(invoke)) {
- LocationSummary* locations = invoke->GetLocations();
- if (locations->CanCall()) {
- locations->SetInAt(invoke->GetCurrentMethodInputIndex(), Location::RequiresRegister());
- }
return;
}
@@ -1259,9 +1255,8 @@ void InstructionCodeGeneratorX86::VisitInvokeStaticOrDirect(HInvokeStaticOrDirec
return;
}
- LocationSummary* locations = invoke->GetLocations();
codegen_->GenerateStaticOrDirectCall(
- invoke, locations->HasTemps() ? locations->GetTemp(0) : Location::NoLocation());
+ invoke, invoke->GetLocations()->GetTemp(0).AsRegister<Register>());
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
}
@@ -1281,8 +1276,13 @@ void InstructionCodeGeneratorX86::VisitInvokeVirtual(HInvokeVirtual* invoke) {
LocationSummary* locations = invoke->GetLocations();
Location receiver = locations->InAt(0);
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
- DCHECK(receiver.IsRegister());
- __ movl(temp, Address(receiver.AsRegister<Register>(), class_offset));
+ // temp = object->GetClass();
+ if (receiver.IsStackSlot()) {
+ __ movl(temp, Address(ESP, receiver.GetStackIndex()));
+ __ movl(temp, Address(temp, class_offset));
+ } else {
+ __ movl(temp, Address(receiver.AsRegister<Register>(), class_offset));
+ }
codegen_->MaybeRecordImplicitNullCheck(invoke);
// temp = temp->GetMethodAt(method_offset);
__ movl(temp, Address(temp, method_offset));
@@ -3201,7 +3201,7 @@ void InstructionCodeGeneratorX86::GenerateMemoryBarrier(MemBarrierKind kind) {
void CodeGeneratorX86::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke,
- Location temp) {
+ Register temp) {
// TODO: Implement all kinds of calls:
// 1) boot -> boot
// 2) app -> boot
@@ -3211,26 +3211,25 @@ void CodeGeneratorX86::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke,
if (invoke->IsStringInit()) {
// temp = thread->string_init_entrypoint
- Register reg = temp.AsRegister<Register>();
- __ fs()->movl(reg, Address::Absolute(invoke->GetStringInitOffset()));
+ __ fs()->movl(temp, Address::Absolute(invoke->GetStringInitOffset()));
// (temp + offset_of_quick_compiled_code)()
__ call(Address(
- reg, ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86WordSize).Int32Value()));
- } else if (invoke->IsRecursive()) {
- __ call(GetFrameEntryLabel());
+ temp, ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86WordSize).Int32Value()));
} else {
- Register current_method =
- invoke->GetLocations()->InAt(invoke->GetCurrentMethodInputIndex()).AsRegister<Register>();
- Register reg = temp.AsRegister<Register>();
- // temp = temp->dex_cache_resolved_methods_;
- __ movl(reg, Address(
- current_method, ArtMethod::DexCacheResolvedMethodsOffset().Int32Value()));
- // temp = temp[index_in_cache]
- __ movl(reg, Address(reg,
- CodeGenerator::GetCachePointerOffset(invoke->GetDexMethodIndex())));
- // (temp + offset_of_quick_compiled_code)()
- __ call(Address(reg,
- ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86WordSize).Int32Value()));
+ // temp = method;
+ LoadCurrentMethod(temp);
+ if (!invoke->IsRecursive()) {
+ // temp = temp->dex_cache_resolved_methods_;
+ __ movl(temp, Address(temp, ArtMethod::DexCacheResolvedMethodsOffset().Int32Value()));
+ // temp = temp[index_in_cache]
+ __ movl(temp, Address(temp,
+ CodeGenerator::GetCachePointerOffset(invoke->GetDexMethodIndex())));
+ // (temp + offset_of_quick_compiled_code)()
+ __ call(Address(temp,
+ ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86WordSize).Int32Value()));
+ } else {
+ __ call(GetFrameEntryLabel());
+ }
}
DCHECK(!IsLeafMethod());
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index b8553d266d..61827a45ab 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -263,7 +263,7 @@ class CodeGeneratorX86 : public CodeGenerator {
void Move64(Location destination, Location source);
// Generate a call to a static or direct method.
- void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp);
+ void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Register temp);
// Emit a write barrier.
void MarkGCCard(Register temp,
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index c9fe813f20..a2a3cf523c 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -360,7 +360,7 @@ inline Condition X86_64Condition(IfCondition cond) {
}
void CodeGeneratorX86_64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke,
- Location temp) {
+ CpuRegister temp) {
// All registers are assumed to be correctly set up.
// TODO: Implement all kinds of calls:
@@ -371,28 +371,26 @@ void CodeGeneratorX86_64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invo
// Currently we implement the app -> app logic, which looks up in the resolve cache.
if (invoke->IsStringInit()) {
- CpuRegister reg = temp.AsRegister<CpuRegister>();
// temp = thread->string_init_entrypoint
- __ gs()->movl(reg, Address::Absolute(invoke->GetStringInitOffset()));
+ __ gs()->movl(temp, Address::Absolute(invoke->GetStringInitOffset()));
// (temp + offset_of_quick_compiled_code)()
- __ call(Address(reg, ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ __ call(Address(temp, ArtMethod::EntryPointFromQuickCompiledCodeOffset(
kX86_64WordSize).SizeValue()));
- } else if (invoke->IsRecursive()) {
- __ call(&frame_entry_label_);
} else {
- LocationSummary* locations = invoke->GetLocations();
- CpuRegister reg = temp.AsRegister<CpuRegister>();
- CpuRegister current_method =
- locations->InAt(invoke->GetCurrentMethodInputIndex()).AsRegister<CpuRegister>();
- // temp = temp->dex_cache_resolved_methods_;
- __ movl(reg, Address(
- current_method, ArtMethod::DexCacheResolvedMethodsOffset().SizeValue()));
- // temp = temp[index_in_cache]
- __ movq(reg, Address(
- reg, CodeGenerator::GetCachePointerOffset(invoke->GetDexMethodIndex())));
- // (temp + offset_of_quick_compiled_code)()
- __ call(Address(reg, ArtMethod::EntryPointFromQuickCompiledCodeOffset(
- kX86_64WordSize).SizeValue()));
+ // temp = method;
+ LoadCurrentMethod(temp);
+ if (!invoke->IsRecursive()) {
+ // temp = temp->dex_cache_resolved_methods_;
+ __ movl(temp, Address(temp, ArtMethod::DexCacheResolvedMethodsOffset().SizeValue()));
+ // temp = temp[index_in_cache]
+ __ movq(temp, Address(
+ temp, CodeGenerator::GetCachePointerOffset(invoke->GetDexMethodIndex())));
+ // (temp + offset_of_quick_compiled_code)()
+ __ call(Address(temp, ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ kX86_64WordSize).SizeValue()));
+ } else {
+ __ call(&frame_entry_label_);
+ }
}
DCHECK(!IsLeafMethod());
@@ -1336,10 +1334,6 @@ void LocationsBuilderX86_64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* in
IntrinsicLocationsBuilderX86_64 intrinsic(codegen_);
if (intrinsic.TryDispatch(invoke)) {
- LocationSummary* locations = invoke->GetLocations();
- if (locations->CanCall()) {
- locations->SetInAt(invoke->GetCurrentMethodInputIndex(), Location::RequiresRegister());
- }
return;
}
@@ -1364,9 +1358,9 @@ void InstructionCodeGeneratorX86_64::VisitInvokeStaticOrDirect(HInvokeStaticOrDi
return;
}
- LocationSummary* locations = invoke->GetLocations();
codegen_->GenerateStaticOrDirectCall(
- invoke, locations->HasTemps() ? locations->GetTemp(0) : Location::NoLocation());
+ invoke,
+ invoke->GetLocations()->GetTemp(0).AsRegister<CpuRegister>());
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
}
@@ -1396,8 +1390,12 @@ void InstructionCodeGeneratorX86_64::VisitInvokeVirtual(HInvokeVirtual* invoke)
Location receiver = locations->InAt(0);
size_t class_offset = mirror::Object::ClassOffset().SizeValue();
// temp = object->GetClass();
- DCHECK(receiver.IsRegister());
- __ movl(temp, Address(receiver.AsRegister<CpuRegister>(), class_offset));
+ if (receiver.IsStackSlot()) {
+ __ movl(temp, Address(CpuRegister(RSP), receiver.GetStackIndex()));
+ __ movl(temp, Address(temp, class_offset));
+ } else {
+ __ movl(temp, Address(receiver.AsRegister<CpuRegister>(), class_offset));
+ }
codegen_->MaybeRecordImplicitNullCheck(invoke);
// temp = temp->GetMethodAt(method_offset);
__ movq(temp, Address(temp, method_offset));
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 61f863c162..c19e686c10 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -277,7 +277,7 @@ class CodeGeneratorX86_64 : public CodeGenerator {
return false;
}
- void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp);
+ void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, CpuRegister temp);
const X86_64InstructionSetFeatures& GetInstructionSetFeatures() const {
return isa_features_;
diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc
index 749bedf99e..5436ec2dd9 100644
--- a/compiler/optimizing/intrinsics_arm.cc
+++ b/compiler/optimizing/intrinsics_arm.cc
@@ -101,8 +101,7 @@ class IntrinsicSlowPathARM : public SlowPathCodeARM {
MoveArguments(invoke_, codegen);
if (invoke_->IsInvokeStaticOrDirect()) {
- codegen->GenerateStaticOrDirectCall(invoke_->AsInvokeStaticOrDirect(),
- Location::RegisterLocation(kArtMethodRegister));
+ codegen->GenerateStaticOrDirectCall(invoke_->AsInvokeStaticOrDirect(), kArtMethodRegister);
RecordPcInfo(codegen, invoke_, invoke_->GetDexPc());
} else {
UNIMPLEMENTED(FATAL) << "Non-direct intrinsic slow-path not yet implemented";
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index c108ad5daa..d1dc5b3843 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -110,8 +110,7 @@ class IntrinsicSlowPathARM64 : public SlowPathCodeARM64 {
MoveArguments(invoke_, codegen);
if (invoke_->IsInvokeStaticOrDirect()) {
- codegen->GenerateStaticOrDirectCall(invoke_->AsInvokeStaticOrDirect(),
- LocationFrom(kArtMethodRegister));
+ codegen->GenerateStaticOrDirectCall(invoke_->AsInvokeStaticOrDirect(), kArtMethodRegister);
RecordPcInfo(codegen, invoke_, invoke_->GetDexPc());
} else {
UNIMPLEMENTED(FATAL) << "Non-direct intrinsic slow-path not yet implemented";
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index 424ac7c855..5bbbc72020 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -138,8 +138,7 @@ class IntrinsicSlowPathX86 : public SlowPathCodeX86 {
MoveArguments(invoke_, codegen);
if (invoke_->IsInvokeStaticOrDirect()) {
- codegen->GenerateStaticOrDirectCall(invoke_->AsInvokeStaticOrDirect(),
- Location::RegisterLocation(EAX));
+ codegen->GenerateStaticOrDirectCall(invoke_->AsInvokeStaticOrDirect(), EAX);
RecordPcInfo(codegen, invoke_, invoke_->GetDexPc());
} else {
UNIMPLEMENTED(FATAL) << "Non-direct intrinsic slow-path not yet implemented";
@@ -733,8 +732,7 @@ static void InvokeOutOfLineIntrinsic(CodeGeneratorX86* codegen, HInvoke* invoke)
MoveArguments(invoke, codegen);
DCHECK(invoke->IsInvokeStaticOrDirect());
- codegen->GenerateStaticOrDirectCall(invoke->AsInvokeStaticOrDirect(),
- Location::RegisterLocation(EAX));
+ codegen->GenerateStaticOrDirectCall(invoke->AsInvokeStaticOrDirect(), EAX);
codegen->RecordPcInfo(invoke, invoke->GetDexPc());
// Copy the result back to the expected output.
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index 891531435e..d6c90ff510 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -129,8 +129,7 @@ class IntrinsicSlowPathX86_64 : public SlowPathCodeX86_64 {
MoveArguments(invoke_, codegen);
if (invoke_->IsInvokeStaticOrDirect()) {
- codegen->GenerateStaticOrDirectCall(
- invoke_->AsInvokeStaticOrDirect(), Location::RegisterLocation(RDI));
+ codegen->GenerateStaticOrDirectCall(invoke_->AsInvokeStaticOrDirect(), CpuRegister(RDI));
RecordPcInfo(codegen, invoke_, invoke_->GetDexPc());
} else {
UNIMPLEMENTED(FATAL) << "Non-direct intrinsic slow-path not yet implemented";
@@ -610,8 +609,7 @@ static void InvokeOutOfLineIntrinsic(CodeGeneratorX86_64* codegen, HInvoke* invo
MoveArguments(invoke, codegen);
DCHECK(invoke->IsInvokeStaticOrDirect());
- codegen->GenerateStaticOrDirectCall(
- invoke->AsInvokeStaticOrDirect(), Location::RegisterLocation(RDI));
+ codegen->GenerateStaticOrDirectCall(invoke->AsInvokeStaticOrDirect(), CpuRegister(RDI));
codegen->RecordPcInfo(invoke, invoke->GetDexPc());
// Copy the result back to the expected output.
diff --git a/compiler/optimizing/locations.h b/compiler/optimizing/locations.h
index 66c5fb1102..09bbb33042 100644
--- a/compiler/optimizing/locations.h
+++ b/compiler/optimizing/locations.h
@@ -525,8 +525,6 @@ class LocationSummary : public ArenaObject<kArenaAllocMisc> {
return temps_.Size();
}
- bool HasTemps() const { return !temps_.IsEmpty(); }
-
Location Out() const { return output_; }
bool CanCall() const { return call_kind_ != kNoCall; }
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index d914363688..47927340f4 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -2528,9 +2528,7 @@ class HInvokeStaticOrDirect : public HInvoke {
ClinitCheckRequirement clinit_check_requirement)
: HInvoke(arena,
number_of_arguments,
- // There is one extra argument for the HCurrentMethod node, and
- // potentially one other if the clinit check is explicit.
- clinit_check_requirement == ClinitCheckRequirement::kExplicit ? 2u : 1u,
+ clinit_check_requirement == ClinitCheckRequirement::kExplicit ? 1u : 0u,
return_type,
dex_pc,
dex_method_index,
@@ -2552,7 +2550,6 @@ class HInvokeStaticOrDirect : public HInvoke {
bool NeedsDexCache() const OVERRIDE { return !IsRecursive(); }
bool IsStringInit() const { return string_init_offset_ != 0; }
int32_t GetStringInitOffset() const { return string_init_offset_; }
- uint32_t GetCurrentMethodInputIndex() const { return GetNumberOfArguments(); }
// Is this instruction a call to a static method?
bool IsStatic() const {
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index e38e49cd19..a381315bac 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -714,15 +714,13 @@ bool RegisterAllocator::TryAllocateFreeReg(LiveInterval* current) {
if (defined_by != nullptr && !current->IsSplit()) {
LocationSummary* locations = defined_by->GetLocations();
if (!locations->OutputCanOverlapWithInputs() && locations->Out().IsUnallocated()) {
- for (size_t i = 0, e = defined_by->InputCount(); i < e; ++i) {
+ for (HInputIterator it(defined_by); !it.Done(); it.Advance()) {
// Take the last interval of the input. It is the location of that interval
// that will be used at `defined_by`.
- LiveInterval* interval = defined_by->InputAt(i)->GetLiveInterval()->GetLastSibling();
+ LiveInterval* interval = it.Current()->GetLiveInterval()->GetLastSibling();
// Note that interval may have not been processed yet.
// TODO: Handle non-split intervals last in the work list.
- if (locations->InAt(i).IsValid()
- && interval->HasRegister()
- && interval->SameRegisterKind(*current)) {
+ if (interval->HasRegister() && interval->SameRegisterKind(*current)) {
// The input must be live until the end of `defined_by`, to comply to
// the linear scan algorithm. So we use `defined_by`'s end lifetime
// position to check whether the input is dead or is inactive after
diff --git a/compiler/optimizing/ssa_liveness_analysis.cc b/compiler/optimizing/ssa_liveness_analysis.cc
index 701dbb019b..d5f977feec 100644
--- a/compiler/optimizing/ssa_liveness_analysis.cc
+++ b/compiler/optimizing/ssa_liveness_analysis.cc
@@ -242,7 +242,7 @@ void SsaLivenessAnalysis::ComputeLiveRanges() {
HInstruction* input = current->InputAt(i);
// Some instructions 'inline' their inputs, that is they do not need
// to be materialized.
- if (input->HasSsaIndex() && current->GetLocations()->InAt(i).IsValid()) {
+ if (input->HasSsaIndex()) {
live_in->SetBit(input->GetSsaIndex());
input->GetLiveInterval()->AddUse(current, /* environment */ nullptr, i);
}
diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h
index 220ee6a8d0..4667825a62 100644
--- a/compiler/optimizing/ssa_liveness_analysis.h
+++ b/compiler/optimizing/ssa_liveness_analysis.h
@@ -394,7 +394,7 @@ class LiveInterval : public ArenaObject<kArenaAllocMisc> {
first_range_->start_ = from;
} else {
// Instruction without uses.
- DCHECK(first_use_ == nullptr);
+ DCHECK(!defined_by_->HasNonEnvironmentUses());
DCHECK(from == defined_by_->GetLifetimePosition());
first_range_ = last_range_ = range_search_start_ =
new (allocator_) LiveRange(from, from + 2, nullptr);
diff --git a/compiler/optimizing/stack_map_stream.cc b/compiler/optimizing/stack_map_stream.cc
index 5663e3973d..42b9182d55 100644
--- a/compiler/optimizing/stack_map_stream.cc
+++ b/compiler/optimizing/stack_map_stream.cc
@@ -133,13 +133,13 @@ size_t StackMapStream::PrepareForFillIn() {
stack_mask_size_ = RoundUp(stack_mask_number_of_bits, kBitsPerByte) / kBitsPerByte;
inline_info_size_ = ComputeInlineInfoSize();
dex_register_maps_size_ = ComputeDexRegisterMapsSize();
- stack_maps_size_ = stack_maps_.Size()
- * StackMap::ComputeStackMapSize(stack_mask_size_,
- inline_info_size_,
- dex_register_maps_size_,
- dex_pc_max_,
- native_pc_offset_max_,
- register_mask_max_);
+ stack_map_encoding_ = StackMapEncoding::CreateFromSizes(stack_mask_size_,
+ inline_info_size_,
+ dex_register_maps_size_,
+ dex_pc_max_,
+ native_pc_offset_max_,
+ register_mask_max_);
+ stack_maps_size_ = stack_maps_.Size() * stack_map_encoding_.ComputeStackMapSize();
dex_register_location_catalog_size_ = ComputeDexRegisterLocationCatalogSize();
// Note: use RoundUp to word-size here if you want CodeInfo objects to be word aligned.
@@ -235,14 +235,9 @@ void StackMapStream::FillIn(MemoryRegion region) {
MemoryRegion inline_infos_region = region.Subregion(
inline_infos_start_, inline_info_size_);
- code_info.SetEncoding(inline_info_size_,
- dex_register_maps_size_,
- dex_pc_max_,
- native_pc_offset_max_,
- register_mask_max_);
+ code_info.SetEncoding(stack_map_encoding_);
code_info.SetNumberOfStackMaps(stack_maps_.Size());
- code_info.SetStackMaskSize(stack_mask_size_);
- DCHECK_EQ(code_info.GetStackMapsSize(), stack_maps_size_);
+ DCHECK_EQ(code_info.GetStackMapsSize(code_info.ExtractEncoding()), stack_maps_size_);
// Set the Dex register location catalog.
code_info.SetNumberOfDexRegisterLocationCatalogEntries(location_catalog_entries_.Size());
@@ -263,26 +258,27 @@ void StackMapStream::FillIn(MemoryRegion region) {
uintptr_t next_dex_register_map_offset = 0;
uintptr_t next_inline_info_offset = 0;
for (size_t i = 0, e = stack_maps_.Size(); i < e; ++i) {
- StackMap stack_map = code_info.GetStackMapAt(i);
+ StackMap stack_map = code_info.GetStackMapAt(i, stack_map_encoding_);
StackMapEntry entry = stack_maps_.Get(i);
- stack_map.SetDexPc(code_info, entry.dex_pc);
- stack_map.SetNativePcOffset(code_info, entry.native_pc_offset);
- stack_map.SetRegisterMask(code_info, entry.register_mask);
+ stack_map.SetDexPc(stack_map_encoding_, entry.dex_pc);
+ stack_map.SetNativePcOffset(stack_map_encoding_, entry.native_pc_offset);
+ stack_map.SetRegisterMask(stack_map_encoding_, entry.register_mask);
if (entry.sp_mask != nullptr) {
- stack_map.SetStackMask(code_info, *entry.sp_mask);
+ stack_map.SetStackMask(stack_map_encoding_, *entry.sp_mask);
}
if (entry.num_dex_registers == 0) {
// No dex map available.
- stack_map.SetDexRegisterMapOffset(code_info, StackMap::kNoDexRegisterMap);
+ stack_map.SetDexRegisterMapOffset(stack_map_encoding_, StackMap::kNoDexRegisterMap);
} else {
// Search for an entry with the same dex map.
if (entry.same_dex_register_map_as_ != kNoSameDexMapFound) {
// If we have a hit reuse the offset.
- stack_map.SetDexRegisterMapOffset(code_info,
- code_info.GetStackMapAt(entry.same_dex_register_map_as_)
- .GetDexRegisterMapOffset(code_info));
+ stack_map.SetDexRegisterMapOffset(
+ stack_map_encoding_,
+ code_info.GetStackMapAt(entry.same_dex_register_map_as_, stack_map_encoding_)
+ .GetDexRegisterMapOffset(stack_map_encoding_));
} else {
// New dex registers maps should be added to the stack map.
MemoryRegion register_region = dex_register_locations_region.Subregion(
@@ -291,7 +287,7 @@ void StackMapStream::FillIn(MemoryRegion region) {
next_dex_register_map_offset += register_region.size();
DexRegisterMap dex_register_map(register_region);
stack_map.SetDexRegisterMapOffset(
- code_info, register_region.start() - dex_register_locations_region.start());
+ stack_map_encoding_, register_region.start() - dex_register_locations_region.start());
// Set the dex register location.
FillInDexRegisterMap(dex_register_map,
@@ -311,7 +307,7 @@ void StackMapStream::FillIn(MemoryRegion region) {
// Currently relative to the dex register map.
stack_map.SetInlineDescriptorOffset(
- code_info, inline_region.start() - dex_register_locations_region.start());
+ stack_map_encoding_, inline_region.start() - dex_register_locations_region.start());
inline_info.SetDepth(entry.inlining_depth);
for (size_t depth = 0; depth < entry.inlining_depth; ++depth) {
@@ -341,7 +337,7 @@ void StackMapStream::FillIn(MemoryRegion region) {
}
} else {
if (inline_info_size_ != 0) {
- stack_map.SetInlineDescriptorOffset(code_info, StackMap::kNoInlineInfo);
+ stack_map.SetInlineDescriptorOffset(stack_map_encoding_, StackMap::kNoInlineInfo);
}
}
}
diff --git a/compiler/optimizing/stack_map_stream.h b/compiler/optimizing/stack_map_stream.h
index 0af983b1bf..274d573350 100644
--- a/compiler/optimizing/stack_map_stream.h
+++ b/compiler/optimizing/stack_map_stream.h
@@ -171,6 +171,7 @@ class StackMapStream : public ValueObject {
StackMapEntry current_entry_;
InlineInfoEntry current_inline_info_;
+ StackMapEncoding stack_map_encoding_;
size_t stack_mask_size_;
size_t inline_info_size_;
size_t dex_register_maps_size_;
diff --git a/compiler/optimizing/stack_map_test.cc b/compiler/optimizing/stack_map_test.cc
index 666fb604c0..b4ac1b4d1a 100644
--- a/compiler/optimizing/stack_map_test.cc
+++ b/compiler/optimizing/stack_map_test.cc
@@ -51,32 +51,33 @@ TEST(StackMapTest, Test1) {
stream.FillIn(region);
CodeInfo code_info(region);
- ASSERT_EQ(0u, code_info.GetStackMaskSize());
+ StackMapEncoding encoding = code_info.ExtractEncoding();
+ ASSERT_EQ(0u, encoding.NumberOfBytesForStackMask());
ASSERT_EQ(1u, code_info.GetNumberOfStackMaps());
uint32_t number_of_location_catalog_entries =
code_info.GetNumberOfDexRegisterLocationCatalogEntries();
ASSERT_EQ(2u, number_of_location_catalog_entries);
- DexRegisterLocationCatalog location_catalog = code_info.GetDexRegisterLocationCatalog();
+ DexRegisterLocationCatalog location_catalog = code_info.GetDexRegisterLocationCatalog(encoding);
// The Dex register location catalog contains:
// - one 1-byte short Dex register location, and
// - one 5-byte large Dex register location.
size_t expected_location_catalog_size = 1u + 5u;
ASSERT_EQ(expected_location_catalog_size, location_catalog.Size());
- StackMap stack_map = code_info.GetStackMapAt(0);
- ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(0)));
- ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64)));
- ASSERT_EQ(0u, stack_map.GetDexPc(code_info));
- ASSERT_EQ(64u, stack_map.GetNativePcOffset(code_info));
- ASSERT_EQ(0x3u, stack_map.GetRegisterMask(code_info));
+ StackMap stack_map = code_info.GetStackMapAt(0, encoding);
+ ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(0, encoding)));
+ ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64, encoding)));
+ ASSERT_EQ(0u, stack_map.GetDexPc(encoding));
+ ASSERT_EQ(64u, stack_map.GetNativePcOffset(encoding));
+ ASSERT_EQ(0x3u, stack_map.GetRegisterMask(encoding));
- MemoryRegion stack_mask = stack_map.GetStackMask(code_info);
+ MemoryRegion stack_mask = stack_map.GetStackMask(encoding);
ASSERT_TRUE(SameBits(stack_mask, sp_mask));
- ASSERT_TRUE(stack_map.HasDexRegisterMap(code_info));
+ ASSERT_TRUE(stack_map.HasDexRegisterMap(encoding));
DexRegisterMap dex_register_map =
- code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers);
+ code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_dex_registers);
ASSERT_TRUE(dex_register_map.IsDexRegisterLive(0));
ASSERT_TRUE(dex_register_map.IsDexRegisterLive(1));
ASSERT_EQ(2u, dex_register_map.GetNumberOfLiveDexRegisters(number_of_dex_registers));
@@ -86,16 +87,17 @@ TEST(StackMapTest, Test1) {
size_t expected_dex_register_map_size = 1u + 1u;
ASSERT_EQ(expected_dex_register_map_size, dex_register_map.Size());
- ASSERT_EQ(Kind::kInStack,
- dex_register_map.GetLocationKind(0, number_of_dex_registers, code_info));
- ASSERT_EQ(Kind::kConstant,
- dex_register_map.GetLocationKind(1, number_of_dex_registers, code_info));
- ASSERT_EQ(Kind::kInStack,
- dex_register_map.GetLocationInternalKind(0, number_of_dex_registers, code_info));
- ASSERT_EQ(Kind::kConstantLargeValue,
- dex_register_map.GetLocationInternalKind(1, number_of_dex_registers, code_info));
- ASSERT_EQ(0, dex_register_map.GetStackOffsetInBytes(0, number_of_dex_registers, code_info));
- ASSERT_EQ(-2, dex_register_map.GetConstant(1, number_of_dex_registers, code_info));
+ ASSERT_EQ(Kind::kInStack, dex_register_map.GetLocationKind(
+ 0, number_of_dex_registers, code_info, encoding));
+ ASSERT_EQ(Kind::kConstant, dex_register_map.GetLocationKind(
+ 1, number_of_dex_registers, code_info, encoding));
+ ASSERT_EQ(Kind::kInStack, dex_register_map.GetLocationInternalKind(
+ 0, number_of_dex_registers, code_info, encoding));
+ ASSERT_EQ(Kind::kConstantLargeValue, dex_register_map.GetLocationInternalKind(
+ 1, number_of_dex_registers, code_info, encoding));
+ ASSERT_EQ(0, dex_register_map.GetStackOffsetInBytes(
+ 0, number_of_dex_registers, code_info, encoding));
+ ASSERT_EQ(-2, dex_register_map.GetConstant(1, number_of_dex_registers, code_info, encoding));
size_t index0 = dex_register_map.GetLocationCatalogEntryIndex(
0, number_of_dex_registers, number_of_location_catalog_entries);
@@ -112,7 +114,7 @@ TEST(StackMapTest, Test1) {
ASSERT_EQ(0, location0.GetValue());
ASSERT_EQ(-2, location1.GetValue());
- ASSERT_FALSE(stack_map.HasInlineInfo(code_info));
+ ASSERT_FALSE(stack_map.HasInlineInfo(encoding));
}
TEST(StackMapTest, Test2) {
@@ -148,13 +150,14 @@ TEST(StackMapTest, Test2) {
stream.FillIn(region);
CodeInfo code_info(region);
- ASSERT_EQ(2u, code_info.GetStackMaskSize());
+ StackMapEncoding encoding = code_info.ExtractEncoding();
+ ASSERT_EQ(2u, encoding.NumberOfBytesForStackMask());
ASSERT_EQ(2u, code_info.GetNumberOfStackMaps());
uint32_t number_of_location_catalog_entries =
code_info.GetNumberOfDexRegisterLocationCatalogEntries();
ASSERT_EQ(4u, number_of_location_catalog_entries);
- DexRegisterLocationCatalog location_catalog = code_info.GetDexRegisterLocationCatalog();
+ DexRegisterLocationCatalog location_catalog = code_info.GetDexRegisterLocationCatalog(encoding);
// The Dex register location catalog contains:
// - three 1-byte short Dex register locations, and
// - one 5-byte large Dex register location.
@@ -163,19 +166,19 @@ TEST(StackMapTest, Test2) {
// First stack map.
{
- StackMap stack_map = code_info.GetStackMapAt(0);
- ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(0)));
- ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64)));
- ASSERT_EQ(0u, stack_map.GetDexPc(code_info));
- ASSERT_EQ(64u, stack_map.GetNativePcOffset(code_info));
- ASSERT_EQ(0x3u, stack_map.GetRegisterMask(code_info));
-
- MemoryRegion stack_mask = stack_map.GetStackMask(code_info);
+ StackMap stack_map = code_info.GetStackMapAt(0, encoding);
+ ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(0, encoding)));
+ ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64, encoding)));
+ ASSERT_EQ(0u, stack_map.GetDexPc(encoding));
+ ASSERT_EQ(64u, stack_map.GetNativePcOffset(encoding));
+ ASSERT_EQ(0x3u, stack_map.GetRegisterMask(encoding));
+
+ MemoryRegion stack_mask = stack_map.GetStackMask(encoding);
ASSERT_TRUE(SameBits(stack_mask, sp_mask1));
- ASSERT_TRUE(stack_map.HasDexRegisterMap(code_info));
+ ASSERT_TRUE(stack_map.HasDexRegisterMap(encoding));
DexRegisterMap dex_register_map =
- code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers);
+ code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_dex_registers);
ASSERT_TRUE(dex_register_map.IsDexRegisterLive(0));
ASSERT_TRUE(dex_register_map.IsDexRegisterLive(1));
ASSERT_EQ(2u, dex_register_map.GetNumberOfLiveDexRegisters(number_of_dex_registers));
@@ -185,16 +188,17 @@ TEST(StackMapTest, Test2) {
size_t expected_dex_register_map_size = 1u + 1u;
ASSERT_EQ(expected_dex_register_map_size, dex_register_map.Size());
- ASSERT_EQ(Kind::kInStack,
- dex_register_map.GetLocationKind(0, number_of_dex_registers, code_info));
- ASSERT_EQ(Kind::kConstant,
- dex_register_map.GetLocationKind(1, number_of_dex_registers, code_info));
- ASSERT_EQ(Kind::kInStack,
- dex_register_map.GetLocationInternalKind(0, number_of_dex_registers, code_info));
- ASSERT_EQ(Kind::kConstantLargeValue,
- dex_register_map.GetLocationInternalKind(1, number_of_dex_registers, code_info));
- ASSERT_EQ(0, dex_register_map.GetStackOffsetInBytes(0, number_of_dex_registers, code_info));
- ASSERT_EQ(-2, dex_register_map.GetConstant(1, number_of_dex_registers, code_info));
+ ASSERT_EQ(Kind::kInStack, dex_register_map.GetLocationKind(
+ 0, number_of_dex_registers, code_info, encoding));
+ ASSERT_EQ(Kind::kConstant, dex_register_map.GetLocationKind(
+ 1, number_of_dex_registers, code_info, encoding));
+ ASSERT_EQ(Kind::kInStack, dex_register_map.GetLocationInternalKind(
+ 0, number_of_dex_registers, code_info, encoding));
+ ASSERT_EQ(Kind::kConstantLargeValue, dex_register_map.GetLocationInternalKind(
+ 1, number_of_dex_registers, code_info, encoding));
+ ASSERT_EQ(0, dex_register_map.GetStackOffsetInBytes(
+ 0, number_of_dex_registers, code_info, encoding));
+ ASSERT_EQ(-2, dex_register_map.GetConstant(1, number_of_dex_registers, code_info, encoding));
size_t index0 = dex_register_map.GetLocationCatalogEntryIndex(
0, number_of_dex_registers, number_of_location_catalog_entries);
@@ -211,8 +215,8 @@ TEST(StackMapTest, Test2) {
ASSERT_EQ(0, location0.GetValue());
ASSERT_EQ(-2, location1.GetValue());
- ASSERT_TRUE(stack_map.HasInlineInfo(code_info));
- InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map);
+ ASSERT_TRUE(stack_map.HasInlineInfo(encoding));
+ InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding);
ASSERT_EQ(2u, inline_info.GetDepth());
ASSERT_EQ(82u, inline_info.GetMethodIndexAtDepth(0));
ASSERT_EQ(42u, inline_info.GetMethodIndexAtDepth(1));
@@ -224,19 +228,19 @@ TEST(StackMapTest, Test2) {
// Second stack map.
{
- StackMap stack_map = code_info.GetStackMapAt(1);
- ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(1u)));
- ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(128u)));
- ASSERT_EQ(1u, stack_map.GetDexPc(code_info));
- ASSERT_EQ(128u, stack_map.GetNativePcOffset(code_info));
- ASSERT_EQ(0xFFu, stack_map.GetRegisterMask(code_info));
-
- MemoryRegion stack_mask = stack_map.GetStackMask(code_info);
+ StackMap stack_map = code_info.GetStackMapAt(1, encoding);
+ ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(1u, encoding)));
+ ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(128u, encoding)));
+ ASSERT_EQ(1u, stack_map.GetDexPc(encoding));
+ ASSERT_EQ(128u, stack_map.GetNativePcOffset(encoding));
+ ASSERT_EQ(0xFFu, stack_map.GetRegisterMask(encoding));
+
+ MemoryRegion stack_mask = stack_map.GetStackMask(encoding);
ASSERT_TRUE(SameBits(stack_mask, sp_mask2));
- ASSERT_TRUE(stack_map.HasDexRegisterMap(code_info));
+ ASSERT_TRUE(stack_map.HasDexRegisterMap(encoding));
DexRegisterMap dex_register_map =
- code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers);
+ code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_dex_registers);
ASSERT_TRUE(dex_register_map.IsDexRegisterLive(0));
ASSERT_TRUE(dex_register_map.IsDexRegisterLive(1));
ASSERT_EQ(2u, dex_register_map.GetNumberOfLiveDexRegisters(number_of_dex_registers));
@@ -246,16 +250,18 @@ TEST(StackMapTest, Test2) {
size_t expected_dex_register_map_size = 1u + 1u;
ASSERT_EQ(expected_dex_register_map_size, dex_register_map.Size());
- ASSERT_EQ(Kind::kInRegister,
- dex_register_map.GetLocationKind(0, number_of_dex_registers, code_info));
- ASSERT_EQ(Kind::kInFpuRegister,
- dex_register_map.GetLocationKind(1, number_of_dex_registers, code_info));
- ASSERT_EQ(Kind::kInRegister,
- dex_register_map.GetLocationInternalKind(0, number_of_dex_registers, code_info));
- ASSERT_EQ(Kind::kInFpuRegister,
- dex_register_map.GetLocationInternalKind(1, number_of_dex_registers, code_info));
- ASSERT_EQ(18, dex_register_map.GetMachineRegister(0, number_of_dex_registers, code_info));
- ASSERT_EQ(3, dex_register_map.GetMachineRegister(1, number_of_dex_registers, code_info));
+ ASSERT_EQ(Kind::kInRegister, dex_register_map.GetLocationKind(
+ 0, number_of_dex_registers, code_info, encoding));
+ ASSERT_EQ(Kind::kInFpuRegister, dex_register_map.GetLocationKind(
+ 1, number_of_dex_registers, code_info, encoding));
+ ASSERT_EQ(Kind::kInRegister, dex_register_map.GetLocationInternalKind(
+ 0, number_of_dex_registers, code_info, encoding));
+ ASSERT_EQ(Kind::kInFpuRegister, dex_register_map.GetLocationInternalKind(
+ 1, number_of_dex_registers, code_info, encoding));
+ ASSERT_EQ(18, dex_register_map.GetMachineRegister(
+ 0, number_of_dex_registers, code_info, encoding));
+ ASSERT_EQ(3, dex_register_map.GetMachineRegister(
+ 1, number_of_dex_registers, code_info, encoding));
size_t index0 = dex_register_map.GetLocationCatalogEntryIndex(
0, number_of_dex_registers, number_of_location_catalog_entries);
@@ -272,7 +278,7 @@ TEST(StackMapTest, Test2) {
ASSERT_EQ(18, location0.GetValue());
ASSERT_EQ(3, location1.GetValue());
- ASSERT_FALSE(stack_map.HasInlineInfo(code_info));
+ ASSERT_FALSE(stack_map.HasInlineInfo(encoding));
}
}
@@ -294,28 +300,29 @@ TEST(StackMapTest, TestNonLiveDexRegisters) {
stream.FillIn(region);
CodeInfo code_info(region);
- ASSERT_EQ(0u, code_info.GetStackMaskSize());
+ StackMapEncoding encoding = code_info.ExtractEncoding();
+ ASSERT_EQ(0u, encoding.NumberOfBytesForStackMask());
ASSERT_EQ(1u, code_info.GetNumberOfStackMaps());
uint32_t number_of_location_catalog_entries =
code_info.GetNumberOfDexRegisterLocationCatalogEntries();
ASSERT_EQ(1u, number_of_location_catalog_entries);
- DexRegisterLocationCatalog location_catalog = code_info.GetDexRegisterLocationCatalog();
+ DexRegisterLocationCatalog location_catalog = code_info.GetDexRegisterLocationCatalog(encoding);
// The Dex register location catalog contains:
// - one 5-byte large Dex register location.
size_t expected_location_catalog_size = 5u;
ASSERT_EQ(expected_location_catalog_size, location_catalog.Size());
- StackMap stack_map = code_info.GetStackMapAt(0);
- ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(0)));
- ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64)));
- ASSERT_EQ(0u, stack_map.GetDexPc(code_info));
- ASSERT_EQ(64u, stack_map.GetNativePcOffset(code_info));
- ASSERT_EQ(0x3u, stack_map.GetRegisterMask(code_info));
+ StackMap stack_map = code_info.GetStackMapAt(0, encoding);
+ ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(0, encoding)));
+ ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64, encoding)));
+ ASSERT_EQ(0u, stack_map.GetDexPc(encoding));
+ ASSERT_EQ(64u, stack_map.GetNativePcOffset(encoding));
+ ASSERT_EQ(0x3u, stack_map.GetRegisterMask(encoding));
- ASSERT_TRUE(stack_map.HasDexRegisterMap(code_info));
+ ASSERT_TRUE(stack_map.HasDexRegisterMap(encoding));
DexRegisterMap dex_register_map =
- code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers);
+ code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_dex_registers);
ASSERT_FALSE(dex_register_map.IsDexRegisterLive(0));
ASSERT_TRUE(dex_register_map.IsDexRegisterLive(1));
ASSERT_EQ(1u, dex_register_map.GetNumberOfLiveDexRegisters(number_of_dex_registers));
@@ -325,15 +332,15 @@ TEST(StackMapTest, TestNonLiveDexRegisters) {
size_t expected_dex_register_map_size = 1u + 0u;
ASSERT_EQ(expected_dex_register_map_size, dex_register_map.Size());
- ASSERT_EQ(Kind::kNone,
- dex_register_map.GetLocationKind(0, number_of_dex_registers, code_info));
- ASSERT_EQ(Kind::kConstant,
- dex_register_map.GetLocationKind(1, number_of_dex_registers, code_info));
- ASSERT_EQ(Kind::kNone,
- dex_register_map.GetLocationInternalKind(0, number_of_dex_registers, code_info));
- ASSERT_EQ(Kind::kConstantLargeValue,
- dex_register_map.GetLocationInternalKind(1, number_of_dex_registers, code_info));
- ASSERT_EQ(-2, dex_register_map.GetConstant(1, number_of_dex_registers, code_info));
+ ASSERT_EQ(Kind::kNone, dex_register_map.GetLocationKind(
+ 0, number_of_dex_registers, code_info, encoding));
+ ASSERT_EQ(Kind::kConstant, dex_register_map.GetLocationKind(
+ 1, number_of_dex_registers, code_info, encoding));
+ ASSERT_EQ(Kind::kNone, dex_register_map.GetLocationInternalKind(
+ 0, number_of_dex_registers, code_info, encoding));
+ ASSERT_EQ(Kind::kConstantLargeValue, dex_register_map.GetLocationInternalKind(
+ 1, number_of_dex_registers, code_info, encoding));
+ ASSERT_EQ(-2, dex_register_map.GetConstant(1, number_of_dex_registers, code_info, encoding));
size_t index0 = dex_register_map.GetLocationCatalogEntryIndex(
0, number_of_dex_registers, number_of_location_catalog_entries);
@@ -350,7 +357,7 @@ TEST(StackMapTest, TestNonLiveDexRegisters) {
ASSERT_EQ(0, location0.GetValue());
ASSERT_EQ(-2, location1.GetValue());
- ASSERT_FALSE(stack_map.HasInlineInfo(code_info));
+ ASSERT_FALSE(stack_map.HasInlineInfo(encoding));
}
// Generate a stack map whose dex register offset is
@@ -387,6 +394,7 @@ TEST(StackMapTest, DexRegisterMapOffsetOverflow) {
stream.FillIn(region);
CodeInfo code_info(region);
+ StackMapEncoding encoding = code_info.ExtractEncoding();
// The location catalog contains two entries (DexRegisterLocation(kConstant, 0)
// and DexRegisterLocation(kConstant, 1)), therefore the location catalog index
// has a size of 1 bit.
@@ -402,20 +410,20 @@ TEST(StackMapTest, DexRegisterMapOffsetOverflow) {
// locations (that is, 127 bytes of data).
// Hence it has a size of 255 bytes, and therefore...
ASSERT_EQ(128u, DexRegisterMap::GetLiveBitMaskSize(number_of_dex_registers));
- StackMap stack_map0 = code_info.GetStackMapAt(0);
+ StackMap stack_map0 = code_info.GetStackMapAt(0, encoding);
DexRegisterMap dex_register_map0 =
- code_info.GetDexRegisterMapOf(stack_map0, number_of_dex_registers);
+ code_info.GetDexRegisterMapOf(stack_map0, encoding, number_of_dex_registers);
ASSERT_EQ(127u, dex_register_map0.GetLocationMappingDataSize(number_of_dex_registers,
number_of_location_catalog_entries));
ASSERT_EQ(255u, dex_register_map0.Size());
- StackMap stack_map1 = code_info.GetStackMapAt(1);
- ASSERT_TRUE(stack_map1.HasDexRegisterMap(code_info));
+ StackMap stack_map1 = code_info.GetStackMapAt(1, encoding);
+ ASSERT_TRUE(stack_map1.HasDexRegisterMap(encoding));
// ...the offset of the second Dex register map (relative to the
// beginning of the Dex register maps region) is 255 (i.e.,
// kNoDexRegisterMapSmallEncoding).
- ASSERT_NE(stack_map1.GetDexRegisterMapOffset(code_info), StackMap::kNoDexRegisterMap);
- ASSERT_EQ(stack_map1.GetDexRegisterMapOffset(code_info), 0xFFu);
+ ASSERT_NE(stack_map1.GetDexRegisterMapOffset(encoding), StackMap::kNoDexRegisterMap);
+ ASSERT_EQ(stack_map1.GetDexRegisterMapOffset(encoding), 0xFFu);
}
TEST(StackMapTest, TestShareDexRegisterMap) {
@@ -447,28 +455,30 @@ TEST(StackMapTest, TestShareDexRegisterMap) {
stream.FillIn(region);
CodeInfo ci(region);
+ StackMapEncoding encoding = ci.ExtractEncoding();
+
// Verify first stack map.
- StackMap sm0 = ci.GetStackMapAt(0);
- DexRegisterMap dex_registers0 = ci.GetDexRegisterMapOf(sm0, number_of_dex_registers);
- ASSERT_EQ(0, dex_registers0.GetMachineRegister(0, number_of_dex_registers, ci));
- ASSERT_EQ(-2, dex_registers0.GetConstant(1, number_of_dex_registers, ci));
+ StackMap sm0 = ci.GetStackMapAt(0, encoding);
+ DexRegisterMap dex_registers0 = ci.GetDexRegisterMapOf(sm0, encoding, number_of_dex_registers);
+ ASSERT_EQ(0, dex_registers0.GetMachineRegister(0, number_of_dex_registers, ci, encoding));
+ ASSERT_EQ(-2, dex_registers0.GetConstant(1, number_of_dex_registers, ci, encoding));
// Verify second stack map.
- StackMap sm1 = ci.GetStackMapAt(1);
- DexRegisterMap dex_registers1 = ci.GetDexRegisterMapOf(sm1, number_of_dex_registers);
- ASSERT_EQ(0, dex_registers1.GetMachineRegister(0, number_of_dex_registers, ci));
- ASSERT_EQ(-2, dex_registers1.GetConstant(1, number_of_dex_registers, ci));
+ StackMap sm1 = ci.GetStackMapAt(1, encoding);
+ DexRegisterMap dex_registers1 = ci.GetDexRegisterMapOf(sm1, encoding, number_of_dex_registers);
+ ASSERT_EQ(0, dex_registers1.GetMachineRegister(0, number_of_dex_registers, ci, encoding));
+ ASSERT_EQ(-2, dex_registers1.GetConstant(1, number_of_dex_registers, ci, encoding));
// Verify third stack map.
- StackMap sm2 = ci.GetStackMapAt(2);
- DexRegisterMap dex_registers2 = ci.GetDexRegisterMapOf(sm2, number_of_dex_registers);
- ASSERT_EQ(2, dex_registers2.GetMachineRegister(0, number_of_dex_registers, ci));
- ASSERT_EQ(-2, dex_registers2.GetConstant(1, number_of_dex_registers, ci));
+ StackMap sm2 = ci.GetStackMapAt(2, encoding);
+ DexRegisterMap dex_registers2 = ci.GetDexRegisterMapOf(sm2, encoding, number_of_dex_registers);
+ ASSERT_EQ(2, dex_registers2.GetMachineRegister(0, number_of_dex_registers, ci, encoding));
+ ASSERT_EQ(-2, dex_registers2.GetConstant(1, number_of_dex_registers, ci, encoding));
// Verify dex register map offsets.
- ASSERT_EQ(sm0.GetDexRegisterMapOffset(ci), sm1.GetDexRegisterMapOffset(ci));
- ASSERT_NE(sm0.GetDexRegisterMapOffset(ci), sm2.GetDexRegisterMapOffset(ci));
- ASSERT_NE(sm1.GetDexRegisterMapOffset(ci), sm2.GetDexRegisterMapOffset(ci));
+ ASSERT_EQ(sm0.GetDexRegisterMapOffset(encoding), sm1.GetDexRegisterMapOffset(encoding));
+ ASSERT_NE(sm0.GetDexRegisterMapOffset(encoding), sm2.GetDexRegisterMapOffset(encoding));
+ ASSERT_NE(sm1.GetDexRegisterMapOffset(encoding), sm2.GetDexRegisterMapOffset(encoding));
}
TEST(StackMapTest, TestNoDexRegisterMap) {
@@ -487,24 +497,25 @@ TEST(StackMapTest, TestNoDexRegisterMap) {
stream.FillIn(region);
CodeInfo code_info(region);
- ASSERT_EQ(0u, code_info.GetStackMaskSize());
+ StackMapEncoding encoding = code_info.ExtractEncoding();
+ ASSERT_EQ(0u, encoding.NumberOfBytesForStackMask());
ASSERT_EQ(1u, code_info.GetNumberOfStackMaps());
uint32_t number_of_location_catalog_entries =
code_info.GetNumberOfDexRegisterLocationCatalogEntries();
ASSERT_EQ(0u, number_of_location_catalog_entries);
- DexRegisterLocationCatalog location_catalog = code_info.GetDexRegisterLocationCatalog();
+ DexRegisterLocationCatalog location_catalog = code_info.GetDexRegisterLocationCatalog(encoding);
ASSERT_EQ(0u, location_catalog.Size());
- StackMap stack_map = code_info.GetStackMapAt(0);
- ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(0)));
- ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64)));
- ASSERT_EQ(0u, stack_map.GetDexPc(code_info));
- ASSERT_EQ(64u, stack_map.GetNativePcOffset(code_info));
- ASSERT_EQ(0x3u, stack_map.GetRegisterMask(code_info));
+ StackMap stack_map = code_info.GetStackMapAt(0, encoding);
+ ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(0, encoding)));
+ ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64, encoding)));
+ ASSERT_EQ(0u, stack_map.GetDexPc(encoding));
+ ASSERT_EQ(64u, stack_map.GetNativePcOffset(encoding));
+ ASSERT_EQ(0x3u, stack_map.GetRegisterMask(encoding));
- ASSERT_FALSE(stack_map.HasDexRegisterMap(code_info));
- ASSERT_FALSE(stack_map.HasInlineInfo(code_info));
+ ASSERT_FALSE(stack_map.HasDexRegisterMap(encoding));
+ ASSERT_FALSE(stack_map.HasInlineInfo(encoding));
}
TEST(StackMapTest, InlineTest) {
@@ -579,16 +590,17 @@ TEST(StackMapTest, InlineTest) {
stream.FillIn(region);
CodeInfo ci(region);
+ StackMapEncoding encoding = ci.ExtractEncoding();
{
// Verify first stack map.
- StackMap sm0 = ci.GetStackMapAt(0);
+ StackMap sm0 = ci.GetStackMapAt(0, encoding);
- DexRegisterMap dex_registers0 = ci.GetDexRegisterMapOf(sm0, 2);
- ASSERT_EQ(0, dex_registers0.GetStackOffsetInBytes(0, 2, ci));
- ASSERT_EQ(4, dex_registers0.GetConstant(1, 2, ci));
+ DexRegisterMap dex_registers0 = ci.GetDexRegisterMapOf(sm0, encoding, 2);
+ ASSERT_EQ(0, dex_registers0.GetStackOffsetInBytes(0, 2, ci, encoding));
+ ASSERT_EQ(4, dex_registers0.GetConstant(1, 2, ci, encoding));
- InlineInfo if0 = ci.GetInlineInfoOf(sm0);
+ InlineInfo if0 = ci.GetInlineInfoOf(sm0, encoding);
ASSERT_EQ(2u, if0.GetDepth());
ASSERT_EQ(2u, if0.GetDexPcAtDepth(0));
ASSERT_EQ(42u, if0.GetMethodIndexAtDepth(0));
@@ -597,24 +609,24 @@ TEST(StackMapTest, InlineTest) {
ASSERT_EQ(82u, if0.GetMethodIndexAtDepth(1));
ASSERT_EQ(kStatic, if0.GetInvokeTypeAtDepth(1));
- DexRegisterMap dex_registers1 = ci.GetDexRegisterMapAtDepth(0, if0, 1);
- ASSERT_EQ(8, dex_registers1.GetStackOffsetInBytes(0, 1, ci));
+ DexRegisterMap dex_registers1 = ci.GetDexRegisterMapAtDepth(0, if0, encoding, 1);
+ ASSERT_EQ(8, dex_registers1.GetStackOffsetInBytes(0, 1, ci, encoding));
- DexRegisterMap dex_registers2 = ci.GetDexRegisterMapAtDepth(1, if0, 3);
- ASSERT_EQ(16, dex_registers2.GetStackOffsetInBytes(0, 3, ci));
- ASSERT_EQ(20, dex_registers2.GetConstant(1, 3, ci));
- ASSERT_EQ(15, dex_registers2.GetMachineRegister(2, 3, ci));
+ DexRegisterMap dex_registers2 = ci.GetDexRegisterMapAtDepth(1, if0, encoding, 3);
+ ASSERT_EQ(16, dex_registers2.GetStackOffsetInBytes(0, 3, ci, encoding));
+ ASSERT_EQ(20, dex_registers2.GetConstant(1, 3, ci, encoding));
+ ASSERT_EQ(15, dex_registers2.GetMachineRegister(2, 3, ci, encoding));
}
{
// Verify second stack map.
- StackMap sm1 = ci.GetStackMapAt(1);
+ StackMap sm1 = ci.GetStackMapAt(1, encoding);
- DexRegisterMap dex_registers0 = ci.GetDexRegisterMapOf(sm1, 2);
- ASSERT_EQ(56, dex_registers0.GetStackOffsetInBytes(0, 2, ci));
- ASSERT_EQ(0, dex_registers0.GetConstant(1, 2, ci));
+ DexRegisterMap dex_registers0 = ci.GetDexRegisterMapOf(sm1, encoding, 2);
+ ASSERT_EQ(56, dex_registers0.GetStackOffsetInBytes(0, 2, ci, encoding));
+ ASSERT_EQ(0, dex_registers0.GetConstant(1, 2, ci, encoding));
- InlineInfo if1 = ci.GetInlineInfoOf(sm1);
+ InlineInfo if1 = ci.GetInlineInfoOf(sm1, encoding);
ASSERT_EQ(3u, if1.GetDepth());
ASSERT_EQ(2u, if1.GetDexPcAtDepth(0));
ASSERT_EQ(42u, if1.GetMethodIndexAtDepth(0));
@@ -626,36 +638,36 @@ TEST(StackMapTest, InlineTest) {
ASSERT_EQ(52u, if1.GetMethodIndexAtDepth(2));
ASSERT_EQ(kVirtual, if1.GetInvokeTypeAtDepth(2));
- DexRegisterMap dex_registers1 = ci.GetDexRegisterMapAtDepth(0, if1, 1);
- ASSERT_EQ(12, dex_registers1.GetStackOffsetInBytes(0, 1, ci));
+ DexRegisterMap dex_registers1 = ci.GetDexRegisterMapAtDepth(0, if1, encoding, 1);
+ ASSERT_EQ(12, dex_registers1.GetStackOffsetInBytes(0, 1, ci, encoding));
- DexRegisterMap dex_registers2 = ci.GetDexRegisterMapAtDepth(1, if1, 3);
- ASSERT_EQ(80, dex_registers2.GetStackOffsetInBytes(0, 3, ci));
- ASSERT_EQ(10, dex_registers2.GetConstant(1, 3, ci));
- ASSERT_EQ(5, dex_registers2.GetMachineRegister(2, 3, ci));
+ DexRegisterMap dex_registers2 = ci.GetDexRegisterMapAtDepth(1, if1, encoding, 3);
+ ASSERT_EQ(80, dex_registers2.GetStackOffsetInBytes(0, 3, ci, encoding));
+ ASSERT_EQ(10, dex_registers2.GetConstant(1, 3, ci, encoding));
+ ASSERT_EQ(5, dex_registers2.GetMachineRegister(2, 3, ci, encoding));
ASSERT_FALSE(if1.HasDexRegisterMapAtDepth(2));
}
{
// Verify third stack map.
- StackMap sm2 = ci.GetStackMapAt(2);
+ StackMap sm2 = ci.GetStackMapAt(2, encoding);
- DexRegisterMap dex_registers0 = ci.GetDexRegisterMapOf(sm2, 2);
+ DexRegisterMap dex_registers0 = ci.GetDexRegisterMapOf(sm2, encoding, 2);
ASSERT_FALSE(dex_registers0.IsDexRegisterLive(0));
- ASSERT_EQ(4, dex_registers0.GetConstant(1, 2, ci));
- ASSERT_FALSE(sm2.HasInlineInfo(ci));
+ ASSERT_EQ(4, dex_registers0.GetConstant(1, 2, ci, encoding));
+ ASSERT_FALSE(sm2.HasInlineInfo(encoding));
}
{
// Verify fourth stack map.
- StackMap sm3 = ci.GetStackMapAt(3);
+ StackMap sm3 = ci.GetStackMapAt(3, encoding);
- DexRegisterMap dex_registers0 = ci.GetDexRegisterMapOf(sm3, 2);
- ASSERT_EQ(56, dex_registers0.GetStackOffsetInBytes(0, 2, ci));
- ASSERT_EQ(0, dex_registers0.GetConstant(1, 2, ci));
+ DexRegisterMap dex_registers0 = ci.GetDexRegisterMapOf(sm3, encoding, 2);
+ ASSERT_EQ(56, dex_registers0.GetStackOffsetInBytes(0, 2, ci, encoding));
+ ASSERT_EQ(0, dex_registers0.GetConstant(1, 2, ci, encoding));
- InlineInfo if2 = ci.GetInlineInfoOf(sm3);
+ InlineInfo if2 = ci.GetInlineInfoOf(sm3, encoding);
ASSERT_EQ(3u, if2.GetDepth());
ASSERT_EQ(2u, if2.GetDexPcAtDepth(0));
ASSERT_EQ(42u, if2.GetMethodIndexAtDepth(0));
@@ -669,12 +681,12 @@ TEST(StackMapTest, InlineTest) {
ASSERT_FALSE(if2.HasDexRegisterMapAtDepth(0));
- DexRegisterMap dex_registers1 = ci.GetDexRegisterMapAtDepth(1, if2, 1);
- ASSERT_EQ(2, dex_registers1.GetMachineRegister(0, 1, ci));
+ DexRegisterMap dex_registers1 = ci.GetDexRegisterMapAtDepth(1, if2, encoding, 1);
+ ASSERT_EQ(2, dex_registers1.GetMachineRegister(0, 1, ci, encoding));
- DexRegisterMap dex_registers2 = ci.GetDexRegisterMapAtDepth(2, if2, 2);
+ DexRegisterMap dex_registers2 = ci.GetDexRegisterMapAtDepth(2, if2, encoding, 2);
ASSERT_FALSE(dex_registers2.IsDexRegisterLive(0));
- ASSERT_EQ(3, dex_registers2.GetMachineRegister(1, 2, ci));
+ ASSERT_EQ(3, dex_registers2.GetMachineRegister(1, 2, ci, encoding));
}
}
diff --git a/disassembler/Android.mk b/disassembler/Android.mk
index 691c43f7a6..039986ce2b 100644
--- a/disassembler/Android.mk
+++ b/disassembler/Android.mk
@@ -59,12 +59,13 @@ define build-libart-disassembler
LOCAL_SRC_FILES := $$(LIBART_DISASSEMBLER_SRC_FILES)
ifeq ($$(art_target_or_host),target)
- $(call set-target-local-clang-vars)
- $(call set-target-local-cflags-vars,$(2))
+ $(call set-target-local-clang-vars)
+ $(call set-target-local-cflags-vars,$(2))
else # host
LOCAL_CLANG := $(ART_HOST_CLANG)
LOCAL_LDLIBS := $(ART_HOST_LDLIBS)
LOCAL_CFLAGS += $(ART_HOST_CFLAGS)
+ LOCAL_ASFLAGS += $(ART_HOST_ASFLAGS)
ifeq ($$(art_ndebug_or_debug),debug)
LOCAL_CFLAGS += $(ART_HOST_DEBUG_CFLAGS)
else
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index e039d10583..96d5654d65 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -1346,9 +1346,11 @@ class OatDumper {
const void* raw_code_info = oat_method.GetVmapTable();
if (raw_code_info != nullptr) {
CodeInfo code_info(raw_code_info);
- StackMap stack_map = code_info.GetStackMapForNativePcOffset(offset);
+ StackMapEncoding encoding = code_info.ExtractEncoding();
+ StackMap stack_map = code_info.GetStackMapForNativePcOffset(offset, encoding);
if (stack_map.IsValid()) {
- stack_map.Dump(os, code_info, oat_method.GetCodeOffset(), code_item->registers_size_);
+ stack_map.Dump(
+ os, code_info, encoding, oat_method.GetCodeOffset(), code_item->registers_size_);
}
}
}
diff --git a/runtime/Android.mk b/runtime/Android.mk
index b38f9bc9a5..c1e6e09728 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -451,6 +451,7 @@ $$(ENUM_OPERATOR_OUT_GEN): $$(GENERATED_SRC_DIR)/%_operator_out.cc : $(LOCAL_PAT
endif
LOCAL_CFLAGS += $$(ART_HOST_CFLAGS)
LOCAL_CFLAGS += -DART_DEFAULT_INSTRUCTION_SET_FEATURES="$(LIBART_HOST_DEFAULT_INSTRUCTION_SET_FEATURES)"
+ LOCAL_ASFLAGS += $$(ART_HOST_ASFLAGS)
ifeq ($$(art_ndebug_or_debug),debug)
LOCAL_CFLAGS += $$(ART_HOST_DEBUG_CFLAGS)
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index 349013cfd0..fe26438eac 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -182,9 +182,10 @@ uint32_t ArtMethod::ToDexPc(const uintptr_t pc, bool abort_on_failure) {
uint32_t sought_offset = pc - reinterpret_cast<uintptr_t>(entry_point);
if (IsOptimized(sizeof(void*))) {
CodeInfo code_info = GetOptimizedCodeInfo();
- StackMap stack_map = code_info.GetStackMapForNativePcOffset(sought_offset);
+ StackMapEncoding encoding = code_info.ExtractEncoding();
+ StackMap stack_map = code_info.GetStackMapForNativePcOffset(sought_offset, encoding);
if (stack_map.IsValid()) {
- return stack_map.GetDexPc(code_info);
+ return stack_map.GetDexPc(encoding);
}
} else {
MappingTable table(entry_point != nullptr ?
diff --git a/runtime/base/time_utils.h b/runtime/base/time_utils.h
index f58c22a7cc..55d2764576 100644
--- a/runtime/base/time_utils.h
+++ b/runtime/base/time_utils.h
@@ -68,8 +68,8 @@ static constexpr inline uint64_t NsToMs(uint64_t ns) {
}
// Converts the given number of milliseconds to nanoseconds
-static constexpr inline uint64_t MsToNs(uint64_t ns) {
- return ns * 1000 * 1000;
+static constexpr inline uint64_t MsToNs(uint64_t ms) {
+ return ms * 1000 * 1000;
}
#if defined(__APPLE__)
diff --git a/runtime/check_reference_map_visitor.h b/runtime/check_reference_map_visitor.h
index d323379e4c..504b7536f6 100644
--- a/runtime/check_reference_map_visitor.h
+++ b/runtime/check_reference_map_visitor.h
@@ -65,17 +65,18 @@ class CheckReferenceMapVisitor : public StackVisitor {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
CodeInfo code_info = m->GetOptimizedCodeInfo();
- StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
+ StackMapEncoding encoding = code_info.ExtractEncoding();
+ StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
uint16_t number_of_dex_registers = m->GetCodeItem()->registers_size_;
DexRegisterMap dex_register_map =
- code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers);
- MemoryRegion stack_mask = stack_map.GetStackMask(code_info);
- uint32_t register_mask = stack_map.GetRegisterMask(code_info);
+ code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_dex_registers);
+ MemoryRegion stack_mask = stack_map.GetStackMask(encoding);
+ uint32_t register_mask = stack_map.GetRegisterMask(encoding);
for (int i = 0; i < number_of_references; ++i) {
int reg = registers[i];
CHECK(reg < m->GetCodeItem()->registers_size_);
- DexRegisterLocation location =
- dex_register_map.GetDexRegisterLocation(reg, number_of_dex_registers, code_info);
+ DexRegisterLocation location = dex_register_map.GetDexRegisterLocation(
+ reg, number_of_dex_registers, code_info, encoding);
switch (location.GetKind()) {
case DexRegisterLocation::Kind::kNone:
// Not set, should not be a reference.
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 31140a83fc..429fa5bfe0 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -85,6 +85,9 @@ namespace art {
static constexpr bool kSanityCheckObjects = kIsDebugBuild;
+// For b/21333911.
+static constexpr bool kDuplicateClassesCheck = false;
+
static void ThrowNoClassDefFoundError(const char* fmt, ...)
__attribute__((__format__(__printf__, 1, 2)))
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -840,6 +843,10 @@ const OatFile* ClassLinker::GetPrimaryOatFile() {
// the two elements agree on whether their dex file was from an already-loaded oat-file or the
// new oat file. Any disagreement indicates a collision.
bool ClassLinker::HasCollisions(const OatFile* oat_file, std::string* error_msg) {
+ if (!kDuplicateClassesCheck) {
+ return false;
+ }
+
// Dex files are registered late - once a class is actually being loaded. We have to compare
// against the open oat files. Take the dex_lock_ that protects oat_files_ accesses.
ReaderMutexLock mu(Thread::Current(), dex_lock_);
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index dbd24a276f..b0cbd02880 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -77,10 +77,11 @@ inline ArtMethod* GetCalleeSaveMethodCaller(ArtMethod** sp,
(reinterpret_cast<uint8_t*>(sp) + callee_return_pc_offset));
uintptr_t native_pc_offset = outer_method->NativeQuickPcOffset(caller_pc);
CodeInfo code_info = outer_method->GetOptimizedCodeInfo();
- StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
+ StackMapEncoding encoding = code_info.ExtractEncoding();
+ StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
DCHECK(stack_map.IsValid());
- if (stack_map.HasInlineInfo(code_info)) {
- InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map);
+ if (stack_map.HasInlineInfo(encoding)) {
+ InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding);
uint32_t method_index = inline_info.GetMethodIndexAtDepth(inline_info.GetDepth() - 1);
InvokeType invoke_type = static_cast<InvokeType>(
inline_info.GetInvokeTypeAtDepth(inline_info.GetDepth() - 1));
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 042c33f7ff..cc83db1e59 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -314,13 +314,14 @@ class QuickArgumentVisitor {
if (outer_method->IsOptimized(sizeof(void*))) {
CodeInfo code_info = outer_method->GetOptimizedCodeInfo();
- StackMap stack_map = code_info.GetStackMapForNativePcOffset(outer_pc_offset);
+ StackMapEncoding encoding = code_info.ExtractEncoding();
+ StackMap stack_map = code_info.GetStackMapForNativePcOffset(outer_pc_offset, encoding);
DCHECK(stack_map.IsValid());
- if (stack_map.HasInlineInfo(code_info)) {
- InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map);
+ if (stack_map.HasInlineInfo(encoding)) {
+ InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding);
return inline_info.GetDexPcAtDepth(inline_info.GetDepth() - 1);
} else {
- return stack_map.GetDexPc(code_info);
+ return stack_map.GetDexPc(encoding);
}
} else {
return outer_method->ToDexPc(outer_pc);
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 20e791d9f2..aeab7d80b4 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -110,6 +110,9 @@ static constexpr size_t kVerifyObjectAllocationStackSize = 16 * KB /
sizeof(mirror::HeapReference<mirror::Object>);
static constexpr size_t kDefaultAllocationStackSize = 8 * MB /
sizeof(mirror::HeapReference<mirror::Object>);
+// System.runFinalization can deadlock with native allocations, to deal with this, we have a
+// timeout on how long we wait for finalizers to run. b/21544853
+static constexpr uint64_t kNativeAllocationFinalizeTimeout = MsToNs(250u);
Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max_free,
double target_utilization, double foreground_heap_growth_multiplier,
@@ -232,10 +235,11 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
requested_alloc_space_begin = reinterpret_cast<uint8_t*>(300 * MB) - non_moving_space_capacity;
}
if (!image_file_name.empty()) {
+ ATRACE_BEGIN("ImageSpace::Create");
std::string error_msg;
- space::ImageSpace* image_space = space::ImageSpace::Create(image_file_name.c_str(),
- image_instruction_set,
- &error_msg);
+ auto* image_space = space::ImageSpace::Create(image_file_name.c_str(), image_instruction_set,
+ &error_msg);
+ ATRACE_END();
if (image_space != nullptr) {
AddSpace(image_space);
// Oat files referenced by image files immediately follow them in memory, ensure alloc space
@@ -287,6 +291,7 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
}
std::string error_str;
std::unique_ptr<MemMap> non_moving_space_mem_map;
+ ATRACE_BEGIN("Create heap maps");
if (separate_non_moving_space) {
// If we are the zygote, the non moving space becomes the zygote space when we run
// PreZygoteFork the first time. In this case, call the map "zygote space" since we can't
@@ -323,6 +328,8 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
capacity_, &error_str));
CHECK(main_mem_map_2.get() != nullptr) << error_str;
}
+ ATRACE_END();
+ ATRACE_BEGIN("Create spaces");
// Create the non moving space first so that bitmaps don't take up the address range.
if (separate_non_moving_space) {
// Non moving space is always dlmalloc since we currently don't have support for multiple
@@ -340,7 +347,8 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
if (foreground_collector_type_ == kCollectorTypeCC) {
region_space_ = space::RegionSpace::Create("Region space", capacity_ * 2, request_begin);
AddSpace(region_space_);
- } else if (IsMovingGc(foreground_collector_type_) && foreground_collector_type_ != kCollectorTypeGSS) {
+ } else if (IsMovingGc(foreground_collector_type_) &&
+ foreground_collector_type_ != kCollectorTypeGSS) {
// Create bump pointer spaces.
// We only to create the bump pointer if the foreground collector is a compacting GC.
// TODO: Place bump-pointer spaces somewhere to minimize size of card table.
@@ -411,10 +419,12 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
if (main_space_backup_.get() != nullptr) {
RemoveSpace(main_space_backup_.get());
}
+ ATRACE_END();
// Allocate the card table.
+ ATRACE_BEGIN("Create card table");
card_table_.reset(accounting::CardTable::Create(heap_begin, heap_capacity));
CHECK(card_table_.get() != nullptr) << "Failed to create card table";
-
+ ATRACE_END();
if (foreground_collector_type_ == kCollectorTypeCC && kUseTableLookupReadBarrier) {
rb_table_.reset(new accounting::ReadBarrierTable());
DCHECK(rb_table_->IsAllCleared());
@@ -3531,22 +3541,16 @@ bool Heap::IsGCRequestPending() const {
return concurrent_gc_pending_.LoadRelaxed();
}
-void Heap::RunFinalization(JNIEnv* env) {
- // Can't do this in WellKnownClasses::Init since System is not properly set up at that point.
- if (WellKnownClasses::java_lang_System_runFinalization == nullptr) {
- CHECK(WellKnownClasses::java_lang_System != nullptr);
- WellKnownClasses::java_lang_System_runFinalization =
- CacheMethod(env, WellKnownClasses::java_lang_System, true, "runFinalization", "()V");
- CHECK(WellKnownClasses::java_lang_System_runFinalization != nullptr);
- }
- env->CallStaticVoidMethod(WellKnownClasses::java_lang_System,
- WellKnownClasses::java_lang_System_runFinalization);
+void Heap::RunFinalization(JNIEnv* env, uint64_t timeout) {
+ env->CallStaticVoidMethod(WellKnownClasses::dalvik_system_VMRuntime,
+ WellKnownClasses::dalvik_system_VMRuntime_runFinalization,
+ static_cast<jlong>(timeout));
}
void Heap::RegisterNativeAllocation(JNIEnv* env, size_t bytes) {
Thread* self = ThreadForEnv(env);
if (native_need_to_run_finalization_) {
- RunFinalization(env);
+ RunFinalization(env, kNativeAllocationFinalizeTimeout);
UpdateMaxNativeFootprint();
native_need_to_run_finalization_ = false;
}
@@ -3562,7 +3566,7 @@ void Heap::RegisterNativeAllocation(JNIEnv* env, size_t bytes) {
if (new_native_bytes_allocated > growth_limit_) {
if (WaitForGcToComplete(kGcCauseForNativeAlloc, self) != collector::kGcTypeNone) {
// Just finished a GC, attempt to run finalizers.
- RunFinalization(env);
+ RunFinalization(env, kNativeAllocationFinalizeTimeout);
CHECK(!env->ExceptionCheck());
// Native bytes allocated may be updated by finalization, refresh it.
new_native_bytes_allocated = native_bytes_allocated_.LoadRelaxed();
@@ -3570,7 +3574,7 @@ void Heap::RegisterNativeAllocation(JNIEnv* env, size_t bytes) {
// If we still are over the watermark, attempt a GC for alloc and run finalizers.
if (new_native_bytes_allocated > growth_limit_) {
CollectGarbageInternal(gc_type, kGcCauseForNativeAlloc, false);
- RunFinalization(env);
+ RunFinalization(env, kNativeAllocationFinalizeTimeout);
native_need_to_run_finalization_ = false;
CHECK(!env->ExceptionCheck());
}
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index c72414a1ab..81a97414ba 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -776,8 +776,8 @@ class Heap {
bool IsValidContinuousSpaceObjectAddress(const mirror::Object* obj) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Run the finalizers.
- void RunFinalization(JNIEnv* env);
+ // Run the finalizers. If timeout is non zero, then we use the VMRuntime version.
+ void RunFinalization(JNIEnv* env, uint64_t timeout);
// Blocks the caller until the garbage collector becomes idle and returns the type of GC we
// waited for.
diff --git a/runtime/java_vm_ext.cc b/runtime/java_vm_ext.cc
index 2d3d19ce3e..eb9c32d7ad 100644
--- a/runtime/java_vm_ext.cc
+++ b/runtime/java_vm_ext.cc
@@ -16,6 +16,8 @@
#include "jni_internal.h"
+#define ATRACE_TAG ATRACE_TAG_DALVIK
+#include <cutils/trace.h>
#include <dlfcn.h>
#include "art_method.h"
@@ -788,9 +790,11 @@ void JavaVMExt::VisitRoots(RootVisitor* visitor) {
// JNI Invocation interface.
extern "C" jint JNI_CreateJavaVM(JavaVM** p_vm, JNIEnv** p_env, void* vm_args) {
+ ATRACE_BEGIN(__FUNCTION__);
const JavaVMInitArgs* args = static_cast<JavaVMInitArgs*>(vm_args);
if (IsBadJniVersion(args->version)) {
LOG(ERROR) << "Bad JNI version passed to CreateJavaVM: " << args->version;
+ ATRACE_END();
return JNI_EVERSION;
}
RuntimeOptions options;
@@ -800,6 +804,7 @@ extern "C" jint JNI_CreateJavaVM(JavaVM** p_vm, JNIEnv** p_env, void* vm_args) {
}
bool ignore_unrecognized = args->ignoreUnrecognized;
if (!Runtime::Create(options, ignore_unrecognized)) {
+ ATRACE_END();
return JNI_ERR;
}
Runtime* runtime = Runtime::Current();
@@ -808,10 +813,12 @@ extern "C" jint JNI_CreateJavaVM(JavaVM** p_vm, JNIEnv** p_env, void* vm_args) {
delete Thread::Current()->GetJniEnv();
delete runtime->GetJavaVM();
LOG(WARNING) << "CreateJavaVM failed";
+ ATRACE_END();
return JNI_ERR;
}
*p_env = Thread::Current()->GetJniEnv();
*p_vm = runtime->GetJavaVM();
+ ATRACE_END();
return JNI_OK;
}
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index 6ab44551d0..cb36183977 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -406,7 +406,7 @@ class JNI {
CHECK_NON_NULL_ARGUMENT(java_class);
ScopedObjectAccess soa(env);
mirror::Class* c = soa.Decode<mirror::Class*>(java_class);
- return soa.AddLocalReference<jclass>(c->GetSuperClass());
+ return soa.AddLocalReference<jclass>(c->IsInterface() ? nullptr : c->GetSuperClass());
}
// Note: java_class1 should be safely castable to java_class2, and
diff --git a/runtime/jni_internal_test.cc b/runtime/jni_internal_test.cc
index 99eb365094..2a0cb28f0c 100644
--- a/runtime/jni_internal_test.cc
+++ b/runtime/jni_internal_test.cc
@@ -1238,7 +1238,7 @@ TEST_F(JniInternalTest, GetSuperclass) {
ASSERT_NE(runnable_interface, nullptr);
ASSERT_TRUE(env_->IsSameObject(object_class, env_->GetSuperclass(string_class)));
ASSERT_EQ(env_->GetSuperclass(object_class), nullptr);
- ASSERT_TRUE(env_->IsSameObject(object_class, env_->GetSuperclass(runnable_interface)));
+ ASSERT_EQ(env_->GetSuperclass(runnable_interface), nullptr);
// Null as class should fail.
CheckJniAbortCatcher jni_abort_catcher;
diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc
index d8c1ec1508..6b9b5d1015 100644
--- a/runtime/mem_map.cc
+++ b/runtime/mem_map.cc
@@ -314,7 +314,31 @@ MemMap* MemMap::MapAnonymous(const char* name, uint8_t* expected_ptr, size_t byt
if (low_4gb && expected_ptr == nullptr) {
bool first_run = true;
+ MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
for (uintptr_t ptr = next_mem_pos_; ptr < 4 * GB; ptr += kPageSize) {
+ // Use maps_ as an optimization to skip over large maps.
+ // Find the first map which is address > ptr.
+ auto it = maps_->upper_bound(reinterpret_cast<void*>(ptr));
+ if (it != maps_->begin()) {
+ auto before_it = it;
+ --before_it;
+ // Start at the end of the map before the upper bound.
+ ptr = std::max(ptr, reinterpret_cast<uintptr_t>(before_it->second->BaseEnd()));
+ CHECK_ALIGNED(ptr, kPageSize);
+ }
+ while (it != maps_->end()) {
+ // How much space do we have until the next map?
+ size_t delta = reinterpret_cast<uintptr_t>(it->first) - ptr;
+ // If the space may be sufficient, break out of the loop.
+ if (delta >= page_aligned_byte_count) {
+ break;
+ }
+ // Otherwise, skip to the end of the map.
+ ptr = reinterpret_cast<uintptr_t>(it->second->BaseEnd());
+ CHECK_ALIGNED(ptr, kPageSize);
+ ++it;
+ }
+
if (4U * GB - ptr < page_aligned_byte_count) {
// Not enough memory until 4GB.
if (first_run) {
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index ba8a693bdb..551e7e290a 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -1207,7 +1207,12 @@ class MANAGED Class FINAL : public Object {
// Descriptor for the class such as "java.lang.Class" or "[C". Lazily initialized by ComputeName
HeapReference<String> name_;
- // The superclass, or null if this is java.lang.Object, an interface or primitive type.
+ // The superclass, or null if this is java.lang.Object or a primitive type.
+ //
+ // Note that interfaces have java.lang.Object as their
+ // superclass. This doesn't match the expectations in JNI
+ // GetSuperClass or java.lang.Class.getSuperClass() which need to
+ // check for interfaces and return null.
HeapReference<Class> super_class_;
// If class verify fails, we must return same error on subsequent tries.
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 65ea77ad29..9d651bf6a4 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -22,6 +22,8 @@
#include <linux/fs.h>
#endif
+#define ATRACE_TAG ATRACE_TAG_DALVIK
+#include <cutils/trace.h>
#include <signal.h>
#include <sys/syscall.h>
#include <valgrind.h>
@@ -492,8 +494,12 @@ bool Runtime::Start() {
ScopedObjectAccess soa(self);
gc::space::ImageSpace* image_space = heap_->GetImageSpace();
if (image_space != nullptr) {
+ ATRACE_BEGIN("AddImageStringsToTable");
GetInternTable()->AddImageStringsToTable(image_space);
+ ATRACE_END();
+ ATRACE_BEGIN("MoveImageClassesToClassTable");
GetClassLinker()->MoveImageClassesToClassTable();
+ ATRACE_END();
}
}
@@ -512,7 +518,9 @@ bool Runtime::Start() {
// InitNativeMethods needs to be after started_ so that the classes
// it touches will have methods linked to the oat file if necessary.
+ ATRACE_BEGIN("InitNativeMethods");
InitNativeMethods();
+ ATRACE_END();
// Initialize well known thread group values that may be accessed threads while attaching.
InitThreadGroups(self);
@@ -533,7 +541,9 @@ bool Runtime::Start() {
GetInstructionSetString(kRuntimeISA));
}
+ ATRACE_BEGIN("StartDaemonThreads");
StartDaemonThreads();
+ ATRACE_END();
{
ScopedObjectAccess soa(self);
@@ -763,6 +773,7 @@ static size_t OpenDexFiles(const std::vector<std::string>& dex_filenames,
}
bool Runtime::Init(const RuntimeOptions& raw_options, bool ignore_unrecognized) {
+ ATRACE_BEGIN("Runtime::Init");
CHECK_EQ(sysconf(_SC_PAGE_SIZE), kPageSize);
MemMap::Init();
@@ -773,6 +784,7 @@ bool Runtime::Init(const RuntimeOptions& raw_options, bool ignore_unrecognized)
ParsedOptions::Create(raw_options, ignore_unrecognized, &runtime_options));
if (parsed_options.get() == nullptr) {
LOG(ERROR) << "Failed to parse options";
+ ATRACE_END();
return false;
}
VLOG(startup) << "Runtime::Init -verbose:startup enabled";
@@ -826,6 +838,7 @@ bool Runtime::Init(const RuntimeOptions& raw_options, bool ignore_unrecognized)
zygote_max_failed_boots_ = runtime_options.GetOrDefault(Opt::ZygoteMaxFailedBoots);
XGcOption xgc_option = runtime_options.GetOrDefault(Opt::GcOption);
+ ATRACE_BEGIN("CreateHeap");
heap_ = new gc::Heap(runtime_options.GetOrDefault(Opt::MemoryInitialSize),
runtime_options.GetOrDefault(Opt::HeapGrowthLimit),
runtime_options.GetOrDefault(Opt::HeapMinFree),
@@ -855,9 +868,11 @@ bool Runtime::Init(const RuntimeOptions& raw_options, bool ignore_unrecognized)
xgc_option.verify_post_gc_rosalloc_,
runtime_options.GetOrDefault(Opt::EnableHSpaceCompactForOOM),
runtime_options.GetOrDefault(Opt::HSpaceCompactForOOMMinIntervalsMs));
+ ATRACE_END();
if (heap_->GetImageSpace() == nullptr && !allow_dex_file_fallback_) {
LOG(ERROR) << "Dex file fallback disabled, cannot continue without image.";
+ ATRACE_END();
return false;
}
@@ -957,7 +972,9 @@ bool Runtime::Init(const RuntimeOptions& raw_options, bool ignore_unrecognized)
CHECK_GE(GetHeap()->GetContinuousSpaces().size(), 1U);
class_linker_ = new ClassLinker(intern_table_);
if (GetHeap()->HasImageSpace()) {
+ ATRACE_BEGIN("InitFromImage");
class_linker_->InitFromImage();
+ ATRACE_END();
if (kIsDebugBuild) {
GetHeap()->GetImageSpace()->VerifyImageAllocations();
}
@@ -1090,6 +1107,8 @@ bool Runtime::Init(const RuntimeOptions& raw_options, bool ignore_unrecognized)
VLOG(startup) << "Runtime::Init exiting";
+ ATRACE_END();
+
return true;
}
diff --git a/runtime/stack.cc b/runtime/stack.cc
index 411088794a..5aeca98a88 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -113,9 +113,10 @@ InlineInfo StackVisitor::GetCurrentInlineInfo() const {
ArtMethod* outer_method = *GetCurrentQuickFrame();
uint32_t native_pc_offset = outer_method->NativeQuickPcOffset(cur_quick_frame_pc_);
CodeInfo code_info = outer_method->GetOptimizedCodeInfo();
- StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
+ StackMapEncoding encoding = code_info.ExtractEncoding();
+ StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
DCHECK(stack_map.IsValid());
- return code_info.GetInlineInfoOf(stack_map);
+ return code_info.GetInlineInfoOf(stack_map, encoding);
}
ArtMethod* StackVisitor::GetMethod() const {
@@ -274,34 +275,40 @@ bool StackVisitor::GetVRegFromOptimizedCode(ArtMethod* m, uint16_t vreg, VRegKin
const void* code_pointer = outer_method->GetQuickOatCodePointer(sizeof(void*));
DCHECK(code_pointer != nullptr);
CodeInfo code_info = outer_method->GetOptimizedCodeInfo();
+ StackMapEncoding encoding = code_info.ExtractEncoding();
uint32_t native_pc_offset = outer_method->NativeQuickPcOffset(cur_quick_frame_pc_);
- StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
+ StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
DCHECK(stack_map.IsValid());
size_t depth_in_stack_map = current_inlining_depth_ - 1;
DexRegisterMap dex_register_map = IsInInlinedFrame()
- ? code_info.GetDexRegisterMapAtDepth(
- depth_in_stack_map, code_info.GetInlineInfoOf(stack_map), number_of_dex_registers)
- : code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers);
+ ? code_info.GetDexRegisterMapAtDepth(depth_in_stack_map,
+ code_info.GetInlineInfoOf(stack_map, encoding),
+ encoding,
+ number_of_dex_registers)
+ : code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_dex_registers);
DexRegisterLocation::Kind location_kind =
- dex_register_map.GetLocationKind(vreg, number_of_dex_registers, code_info);
+ dex_register_map.GetLocationKind(vreg, number_of_dex_registers, code_info, encoding);
switch (location_kind) {
case DexRegisterLocation::Kind::kInStack: {
- const int32_t offset =
- dex_register_map.GetStackOffsetInBytes(vreg, number_of_dex_registers, code_info);
+ const int32_t offset = dex_register_map.GetStackOffsetInBytes(vreg,
+ number_of_dex_registers,
+ code_info,
+ encoding);
const uint8_t* addr = reinterpret_cast<const uint8_t*>(cur_quick_frame_) + offset;
*val = *reinterpret_cast<const uint32_t*>(addr);
return true;
}
case DexRegisterLocation::Kind::kInRegister:
case DexRegisterLocation::Kind::kInFpuRegister: {
- uint32_t reg = dex_register_map.GetMachineRegister(vreg, number_of_dex_registers, code_info);
+ uint32_t reg =
+ dex_register_map.GetMachineRegister(vreg, number_of_dex_registers, code_info, encoding);
return GetRegisterIfAccessible(reg, kind, val);
}
case DexRegisterLocation::Kind::kConstant:
- *val = dex_register_map.GetConstant(vreg, number_of_dex_registers, code_info);
+ *val = dex_register_map.GetConstant(vreg, number_of_dex_registers, code_info, encoding);
return true;
case DexRegisterLocation::Kind::kNone:
return false;
@@ -309,7 +316,10 @@ bool StackVisitor::GetVRegFromOptimizedCode(ArtMethod* m, uint16_t vreg, VRegKin
LOG(FATAL)
<< "Unexpected location kind"
<< DexRegisterLocation::PrettyDescriptor(
- dex_register_map.GetLocationInternalKind(vreg, number_of_dex_registers, code_info));
+ dex_register_map.GetLocationInternalKind(vreg,
+ number_of_dex_registers,
+ code_info,
+ encoding));
UNREACHABLE();
}
}
@@ -782,10 +792,11 @@ void StackVisitor::WalkStack(bool include_transitions) {
if ((walk_kind_ == StackWalkKind::kIncludeInlinedFrames)
&& method->IsOptimized(sizeof(void*))) {
CodeInfo code_info = method->GetOptimizedCodeInfo();
+ StackMapEncoding encoding = code_info.ExtractEncoding();
uint32_t native_pc_offset = method->NativeQuickPcOffset(cur_quick_frame_pc_);
- StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
- if (stack_map.IsValid() && stack_map.HasInlineInfo(code_info)) {
- InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map);
+ StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
+ if (stack_map.IsValid() && stack_map.HasInlineInfo(encoding)) {
+ InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding);
DCHECK_EQ(current_inlining_depth_, 0u);
for (current_inlining_depth_ = inline_info.GetDepth();
current_inlining_depth_ != 0;
diff --git a/runtime/stack_map.cc b/runtime/stack_map.cc
index a64071ff05..f8fc2a9e90 100644
--- a/runtime/stack_map.cc
+++ b/runtime/stack_map.cc
@@ -28,9 +28,10 @@ constexpr uint32_t StackMap::kNoInlineInfo;
DexRegisterLocation::Kind DexRegisterMap::GetLocationInternalKind(uint16_t dex_register_number,
uint16_t number_of_dex_registers,
- const CodeInfo& code_info) const {
+ const CodeInfo& code_info,
+ const StackMapEncoding& enc) const {
DexRegisterLocationCatalog dex_register_location_catalog =
- code_info.GetDexRegisterLocationCatalog();
+ code_info.GetDexRegisterLocationCatalog(enc);
size_t location_catalog_entry_index = GetLocationCatalogEntryIndex(
dex_register_number,
number_of_dex_registers,
@@ -40,9 +41,10 @@ DexRegisterLocation::Kind DexRegisterMap::GetLocationInternalKind(uint16_t dex_r
DexRegisterLocation DexRegisterMap::GetDexRegisterLocation(uint16_t dex_register_number,
uint16_t number_of_dex_registers,
- const CodeInfo& code_info) const {
+ const CodeInfo& code_info,
+ const StackMapEncoding& enc) const {
DexRegisterLocationCatalog dex_register_location_catalog =
- code_info.GetDexRegisterLocationCatalog();
+ code_info.GetDexRegisterLocationCatalog(enc);
size_t location_catalog_entry_index = GetLocationCatalogEntryIndex(
dex_register_number,
number_of_dex_registers,
@@ -50,156 +52,43 @@ DexRegisterLocation DexRegisterMap::GetDexRegisterLocation(uint16_t dex_register
return dex_register_location_catalog.GetDexRegisterLocation(location_catalog_entry_index);
}
-// Loads `number_of_bytes` at the given `offset` and assemble a uint32_t. If `check_max` is true,
-// this method converts a maximum value of size `number_of_bytes` into a uint32_t 0xFFFFFFFF.
-static uint32_t LoadAt(MemoryRegion region,
- size_t number_of_bytes,
- size_t offset,
- bool check_max = false) {
+uint32_t StackMap::LoadAt(size_t number_of_bytes, size_t offset, bool check_max) const {
if (number_of_bytes == 0u) {
DCHECK(!check_max);
return 0;
} else if (number_of_bytes == 1u) {
- uint8_t value = region.LoadUnaligned<uint8_t>(offset);
- if (check_max && value == 0xFF) {
- return -1;
- } else {
- return value;
- }
+ uint8_t value = region_.LoadUnaligned<uint8_t>(offset);
+ return (check_max && value == 0xFF) ? -1 : value;
} else if (number_of_bytes == 2u) {
- uint16_t value = region.LoadUnaligned<uint16_t>(offset);
- if (check_max && value == 0xFFFF) {
- return -1;
- } else {
- return value;
- }
+ uint16_t value = region_.LoadUnaligned<uint16_t>(offset);
+ return (check_max && value == 0xFFFF) ? -1 : value;
} else if (number_of_bytes == 3u) {
- uint16_t low = region.LoadUnaligned<uint16_t>(offset);
- uint16_t high = region.LoadUnaligned<uint8_t>(offset + sizeof(uint16_t));
+ uint16_t low = region_.LoadUnaligned<uint16_t>(offset);
+ uint16_t high = region_.LoadUnaligned<uint8_t>(offset + sizeof(uint16_t));
uint32_t value = (high << 16) + low;
- if (check_max && value == 0xFFFFFF) {
- return -1;
- } else {
- return value;
- }
+ return (check_max && value == 0xFFFFFF) ? -1 : value;
} else {
DCHECK_EQ(number_of_bytes, 4u);
- return region.LoadUnaligned<uint32_t>(offset);
+ return region_.LoadUnaligned<uint32_t>(offset);
}
}
-static void StoreAt(MemoryRegion region, size_t number_of_bytes, size_t offset, uint32_t value) {
+void StackMap::StoreAt(size_t number_of_bytes, size_t offset, uint32_t value) const {
if (number_of_bytes == 0u) {
DCHECK_EQ(value, 0u);
} else if (number_of_bytes == 1u) {
- region.StoreUnaligned<uint8_t>(offset, value);
+ region_.StoreUnaligned<uint8_t>(offset, value);
} else if (number_of_bytes == 2u) {
- region.StoreUnaligned<uint16_t>(offset, value);
+ region_.StoreUnaligned<uint16_t>(offset, value);
} else if (number_of_bytes == 3u) {
- region.StoreUnaligned<uint16_t>(offset, Low16Bits(value));
- region.StoreUnaligned<uint8_t>(offset + sizeof(uint16_t), High16Bits(value));
+ region_.StoreUnaligned<uint16_t>(offset, Low16Bits(value));
+ region_.StoreUnaligned<uint8_t>(offset + sizeof(uint16_t), High16Bits(value));
} else {
- region.StoreUnaligned<uint32_t>(offset, value);
+ region_.StoreUnaligned<uint32_t>(offset, value);
DCHECK_EQ(number_of_bytes, 4u);
}
}
-uint32_t StackMap::GetDexPc(const CodeInfo& info) const {
- return LoadAt(region_, info.NumberOfBytesForDexPc(), info.ComputeStackMapDexPcOffset());
-}
-
-void StackMap::SetDexPc(const CodeInfo& info, uint32_t dex_pc) {
- StoreAt(region_, info.NumberOfBytesForDexPc(), info.ComputeStackMapDexPcOffset(), dex_pc);
-}
-
-uint32_t StackMap::GetNativePcOffset(const CodeInfo& info) const {
- return LoadAt(region_, info.NumberOfBytesForNativePc(), info.ComputeStackMapNativePcOffset());
-}
-
-void StackMap::SetNativePcOffset(const CodeInfo& info, uint32_t native_pc_offset) {
- StoreAt(region_, info.NumberOfBytesForNativePc(), info.ComputeStackMapNativePcOffset(), native_pc_offset);
-}
-
-uint32_t StackMap::GetDexRegisterMapOffset(const CodeInfo& info) const {
- return LoadAt(region_,
- info.NumberOfBytesForDexRegisterMap(),
- info.ComputeStackMapDexRegisterMapOffset(),
- /* check_max */ true);
-}
-
-void StackMap::SetDexRegisterMapOffset(const CodeInfo& info, uint32_t offset) {
- StoreAt(region_,
- info.NumberOfBytesForDexRegisterMap(),
- info.ComputeStackMapDexRegisterMapOffset(),
- offset);
-}
-
-uint32_t StackMap::GetInlineDescriptorOffset(const CodeInfo& info) const {
- if (!info.HasInlineInfo()) return kNoInlineInfo;
- return LoadAt(region_,
- info.NumberOfBytesForInlineInfo(),
- info.ComputeStackMapInlineInfoOffset(),
- /* check_max */ true);
-}
-
-void StackMap::SetInlineDescriptorOffset(const CodeInfo& info, uint32_t offset) {
- DCHECK(info.HasInlineInfo());
- StoreAt(region_,
- info.NumberOfBytesForInlineInfo(),
- info.ComputeStackMapInlineInfoOffset(),
- offset);
-}
-
-uint32_t StackMap::GetRegisterMask(const CodeInfo& info) const {
- return LoadAt(region_,
- info.NumberOfBytesForRegisterMask(),
- info.ComputeStackMapRegisterMaskOffset());
-}
-
-void StackMap::SetRegisterMask(const CodeInfo& info, uint32_t mask) {
- StoreAt(region_,
- info.NumberOfBytesForRegisterMask(),
- info.ComputeStackMapRegisterMaskOffset(),
- mask);
-}
-
-size_t StackMap::ComputeStackMapSizeInternal(size_t stack_mask_size,
- size_t number_of_bytes_for_inline_info,
- size_t number_of_bytes_for_dex_map,
- size_t number_of_bytes_for_dex_pc,
- size_t number_of_bytes_for_native_pc,
- size_t number_of_bytes_for_register_mask) {
- return stack_mask_size
- + number_of_bytes_for_inline_info
- + number_of_bytes_for_dex_map
- + number_of_bytes_for_dex_pc
- + number_of_bytes_for_native_pc
- + number_of_bytes_for_register_mask;
-}
-
-size_t StackMap::ComputeStackMapSize(size_t stack_mask_size,
- size_t inline_info_size,
- size_t dex_register_map_size,
- size_t dex_pc_max,
- size_t native_pc_max,
- size_t register_mask_max) {
- return ComputeStackMapSizeInternal(
- stack_mask_size,
- inline_info_size == 0
- ? 0
- // + 1 to also encode kNoInlineInfo.
- : CodeInfo::EncodingSizeInBytes(inline_info_size + dex_register_map_size + 1),
- // + 1 to also encode kNoDexRegisterMap.
- CodeInfo::EncodingSizeInBytes(dex_register_map_size + 1),
- CodeInfo::EncodingSizeInBytes(dex_pc_max),
- CodeInfo::EncodingSizeInBytes(native_pc_max),
- CodeInfo::EncodingSizeInBytes(register_mask_max));
-}
-
-MemoryRegion StackMap::GetStackMask(const CodeInfo& info) const {
- return region_.Subregion(info.ComputeStackMapStackMaskOffset(), info.GetStackMaskSize());
-}
-
static void DumpRegisterMapping(std::ostream& os,
size_t dex_register_num,
DexRegisterLocation location,
@@ -216,6 +105,7 @@ void CodeInfo::Dump(std::ostream& os,
uint32_t code_offset,
uint16_t number_of_dex_registers,
bool dump_stack_maps) const {
+ StackMapEncoding encoding = ExtractEncoding();
uint32_t code_info_size = GetOverallSize();
size_t number_of_stack_maps = GetNumberOfStackMaps();
Indenter indent_filter(os.rdbuf(), kIndentChar, kIndentBy1Count);
@@ -223,21 +113,26 @@ void CodeInfo::Dump(std::ostream& os,
indented_os << "Optimized CodeInfo (size=" << code_info_size
<< ", number_of_dex_registers=" << number_of_dex_registers
<< ", number_of_stack_maps=" << number_of_stack_maps
- << ", has_inline_info=" << HasInlineInfo()
- << ", number_of_bytes_for_inline_info=" << NumberOfBytesForInlineInfo()
- << ", number_of_bytes_for_dex_register_map=" << NumberOfBytesForDexRegisterMap()
- << ", number_of_bytes_for_dex_pc=" << NumberOfBytesForDexPc()
- << ", number_of_bytes_for_native_pc=" << NumberOfBytesForNativePc()
- << ", number_of_bytes_for_register_mask=" << NumberOfBytesForRegisterMask()
+ << ", has_inline_info=" << encoding.HasInlineInfo()
+ << ", number_of_bytes_for_inline_info=" << encoding.NumberOfBytesForInlineInfo()
+ << ", number_of_bytes_for_dex_register_map="
+ << encoding.NumberOfBytesForDexRegisterMap()
+ << ", number_of_bytes_for_dex_pc=" << encoding.NumberOfBytesForDexPc()
+ << ", number_of_bytes_for_native_pc=" << encoding.NumberOfBytesForNativePc()
+ << ", number_of_bytes_for_register_mask=" << encoding.NumberOfBytesForRegisterMask()
<< ")\n";
// Display the Dex register location catalog.
- GetDexRegisterLocationCatalog().Dump(indented_os, *this);
+ GetDexRegisterLocationCatalog(encoding).Dump(indented_os, *this);
// Display stack maps along with (live) Dex register maps.
if (dump_stack_maps) {
for (size_t i = 0; i < number_of_stack_maps; ++i) {
- StackMap stack_map = GetStackMapAt(i);
- stack_map.Dump(
- indented_os, *this, code_offset, number_of_dex_registers, " " + std::to_string(i));
+ StackMap stack_map = GetStackMapAt(i, encoding);
+ stack_map.Dump(indented_os,
+ *this,
+ encoding,
+ code_offset,
+ number_of_dex_registers,
+ " " + std::to_string(i));
}
}
// TODO: Dump the stack map's inline information? We need to know more from the caller:
@@ -245,9 +140,10 @@ void CodeInfo::Dump(std::ostream& os,
}
void DexRegisterLocationCatalog::Dump(std::ostream& os, const CodeInfo& code_info) {
+ StackMapEncoding encoding = code_info.ExtractEncoding();
size_t number_of_location_catalog_entries =
code_info.GetNumberOfDexRegisterLocationCatalogEntries();
- size_t location_catalog_size_in_bytes = code_info.GetDexRegisterLocationCatalogSize();
+ size_t location_catalog_size_in_bytes = code_info.GetDexRegisterLocationCatalogSize(encoding);
Indenter indent_filter(os.rdbuf(), kIndentChar, kIndentBy1Count);
std::ostream indented_os(&indent_filter);
indented_os
@@ -262,6 +158,7 @@ void DexRegisterLocationCatalog::Dump(std::ostream& os, const CodeInfo& code_inf
void DexRegisterMap::Dump(std::ostream& os,
const CodeInfo& code_info,
uint16_t number_of_dex_registers) const {
+ StackMapEncoding encoding = code_info.ExtractEncoding();
size_t number_of_location_catalog_entries =
code_info.GetNumberOfDexRegisterLocationCatalogEntries();
Indenter indent_filter(os.rdbuf(), kIndentChar, kIndentBy1Count);
@@ -271,7 +168,10 @@ void DexRegisterMap::Dump(std::ostream& os,
if (IsDexRegisterLive(j)) {
size_t location_catalog_entry_index = GetLocationCatalogEntryIndex(
j, number_of_dex_registers, number_of_location_catalog_entries);
- DexRegisterLocation location = GetDexRegisterLocation(j, number_of_dex_registers, code_info);
+ DexRegisterLocation location = GetDexRegisterLocation(j,
+ number_of_dex_registers,
+ code_info,
+ encoding);
DumpRegisterMapping(
indented_os, j, location, "v",
"\t[entry " + std::to_string(static_cast<int>(location_catalog_entry_index)) + "]");
@@ -281,6 +181,7 @@ void DexRegisterMap::Dump(std::ostream& os,
void StackMap::Dump(std::ostream& os,
const CodeInfo& code_info,
+ const StackMapEncoding& encoding,
uint32_t code_offset,
uint16_t number_of_dex_registers,
const std::string& header_suffix) const {
@@ -288,21 +189,22 @@ void StackMap::Dump(std::ostream& os,
std::ostream indented_os(&indent_filter);
indented_os << "StackMap" << header_suffix
<< std::hex
- << " [native_pc=0x" << code_offset + GetNativePcOffset(code_info) << "]"
- << " (dex_pc=0x" << GetDexPc(code_info)
- << ", native_pc_offset=0x" << GetNativePcOffset(code_info)
- << ", dex_register_map_offset=0x" << GetDexRegisterMapOffset(code_info)
- << ", inline_info_offset=0x" << GetInlineDescriptorOffset(code_info)
- << ", register_mask=0x" << GetRegisterMask(code_info)
+ << " [native_pc=0x" << code_offset + GetNativePcOffset(encoding) << "]"
+ << " (dex_pc=0x" << GetDexPc(encoding)
+ << ", native_pc_offset=0x" << GetNativePcOffset(encoding)
+ << ", dex_register_map_offset=0x" << GetDexRegisterMapOffset(encoding)
+ << ", inline_info_offset=0x" << GetInlineDescriptorOffset(encoding)
+ << ", register_mask=0x" << GetRegisterMask(encoding)
<< std::dec
<< ", stack_mask=0b";
- MemoryRegion stack_mask = GetStackMask(code_info);
+ MemoryRegion stack_mask = GetStackMask(encoding);
for (size_t i = 0, e = stack_mask.size_in_bits(); i < e; ++i) {
indented_os << stack_mask.LoadBit(e - i - 1);
}
indented_os << ")\n";
- if (HasDexRegisterMap(code_info)) {
- DexRegisterMap dex_register_map = code_info.GetDexRegisterMapOf(*this, number_of_dex_registers);
+ if (HasDexRegisterMap(encoding)) {
+ DexRegisterMap dex_register_map = code_info.GetDexRegisterMapOf(
+ *this, encoding, number_of_dex_registers);
dex_register_map.Dump(os, code_info, number_of_dex_registers);
}
}
@@ -321,8 +223,9 @@ void InlineInfo::Dump(std::ostream& os,
<< ", method_index=0x" << GetMethodIndexAtDepth(i)
<< ")\n";
if (HasDexRegisterMapAtDepth(i)) {
+ StackMapEncoding encoding = code_info.ExtractEncoding();
DexRegisterMap dex_register_map =
- code_info.GetDexRegisterMapAtDepth(i, *this, number_of_dex_registers[i]);
+ code_info.GetDexRegisterMapAtDepth(i, *this, encoding, number_of_dex_registers[i]);
dex_register_map.Dump(indented_os, code_info, number_of_dex_registers[i]);
}
}
diff --git a/runtime/stack_map.h b/runtime/stack_map.h
index 7f33e6d090..4e420084d1 100644
--- a/runtime/stack_map.h
+++ b/runtime/stack_map.h
@@ -32,6 +32,7 @@ static constexpr ssize_t kFrameSlotSize = 4;
static constexpr size_t kVRegSize = 4;
class CodeInfo;
+class StackMapEncoding;
/**
* Classes in the following file are wrapper on stack map information backed
@@ -437,26 +438,30 @@ class DexRegisterMap {
// Get the surface kind of Dex register `dex_register_number`.
DexRegisterLocation::Kind GetLocationKind(uint16_t dex_register_number,
uint16_t number_of_dex_registers,
- const CodeInfo& code_info) const {
+ const CodeInfo& code_info,
+ const StackMapEncoding& enc) const {
return DexRegisterLocation::ConvertToSurfaceKind(
- GetLocationInternalKind(dex_register_number, number_of_dex_registers, code_info));
+ GetLocationInternalKind(dex_register_number, number_of_dex_registers, code_info, enc));
}
// Get the internal kind of Dex register `dex_register_number`.
DexRegisterLocation::Kind GetLocationInternalKind(uint16_t dex_register_number,
uint16_t number_of_dex_registers,
- const CodeInfo& code_info) const;
+ const CodeInfo& code_info,
+ const StackMapEncoding& enc) const;
// Get the Dex register location `dex_register_number`.
DexRegisterLocation GetDexRegisterLocation(uint16_t dex_register_number,
uint16_t number_of_dex_registers,
- const CodeInfo& code_info) const;
+ const CodeInfo& code_info,
+ const StackMapEncoding& enc) const;
int32_t GetStackOffsetInBytes(uint16_t dex_register_number,
uint16_t number_of_dex_registers,
- const CodeInfo& code_info) const {
+ const CodeInfo& code_info,
+ const StackMapEncoding& enc) const {
DexRegisterLocation location =
- GetDexRegisterLocation(dex_register_number, number_of_dex_registers, code_info);
+ GetDexRegisterLocation(dex_register_number, number_of_dex_registers, code_info, enc);
DCHECK(location.GetKind() == DexRegisterLocation::Kind::kInStack);
// GetDexRegisterLocation returns the offset in bytes.
return location.GetValue();
@@ -464,9 +469,10 @@ class DexRegisterMap {
int32_t GetConstant(uint16_t dex_register_number,
uint16_t number_of_dex_registers,
- const CodeInfo& code_info) const {
+ const CodeInfo& code_info,
+ const StackMapEncoding& enc) const {
DexRegisterLocation location =
- GetDexRegisterLocation(dex_register_number, number_of_dex_registers, code_info);
+ GetDexRegisterLocation(dex_register_number, number_of_dex_registers, code_info, enc);
DCHECK(location.GetKind() == DexRegisterLocation::Kind::kConstant)
<< DexRegisterLocation::PrettyDescriptor(location.GetKind());
return location.GetValue();
@@ -474,9 +480,10 @@ class DexRegisterMap {
int32_t GetMachineRegister(uint16_t dex_register_number,
uint16_t number_of_dex_registers,
- const CodeInfo& code_info) const {
+ const CodeInfo& code_info,
+ const StackMapEncoding& enc) const {
DexRegisterLocation location =
- GetDexRegisterLocation(dex_register_number, number_of_dex_registers, code_info);
+ GetDexRegisterLocation(dex_register_number, number_of_dex_registers, code_info, enc);
DCHECK(location.GetInternalKind() == DexRegisterLocation::Kind::kInRegister
|| location.GetInternalKind() == DexRegisterLocation::Kind::kInFpuRegister)
<< DexRegisterLocation::PrettyDescriptor(location.GetInternalKind());
@@ -628,6 +635,111 @@ class DexRegisterMap {
friend class StackMapStream;
};
+class StackMapEncoding {
+ public:
+ StackMapEncoding() {}
+
+ StackMapEncoding(size_t stack_mask_size,
+ size_t bytes_for_inline_info,
+ size_t bytes_for_dex_register_map,
+ size_t bytes_for_dex_pc,
+ size_t bytes_for_native_pc,
+ size_t bytes_for_register_mask)
+ : bytes_for_stack_mask_(stack_mask_size),
+ bytes_for_inline_info_(bytes_for_inline_info),
+ bytes_for_dex_register_map_(bytes_for_dex_register_map),
+ bytes_for_dex_pc_(bytes_for_dex_pc),
+ bytes_for_native_pc_(bytes_for_native_pc),
+ bytes_for_register_mask_(bytes_for_register_mask) {}
+
+ static StackMapEncoding CreateFromSizes(size_t stack_mask_size,
+ size_t inline_info_size,
+ size_t dex_register_map_size,
+ size_t dex_pc_max,
+ size_t native_pc_max,
+ size_t register_mask_max) {
+ return StackMapEncoding(
+ stack_mask_size,
+ // + 1 to also encode kNoInlineInfo: if an inline info offset
+ // is at 0xFF, we want to overflow to a larger encoding, because it will
+ // conflict with kNoInlineInfo.
+ // The offset is relative to the dex register map. TODO: Change this.
+ inline_info_size == 0
+ ? 0
+ : EncodingSizeInBytes(dex_register_map_size + inline_info_size + 1),
+ // + 1 to also encode kNoDexRegisterMap: if a dex register map offset
+ // is at 0xFF, we want to overflow to a larger encoding, because it will
+ // conflict with kNoDexRegisterMap.
+ EncodingSizeInBytes(dex_register_map_size + 1),
+ EncodingSizeInBytes(dex_pc_max),
+ EncodingSizeInBytes(native_pc_max),
+ EncodingSizeInBytes(register_mask_max));
+ }
+
+ // Get the size of one stack map of this CodeInfo object, in bytes.
+ // All stack maps of a CodeInfo have the same size.
+ size_t ComputeStackMapSize() const {
+ return bytes_for_register_mask_
+ + bytes_for_stack_mask_
+ + bytes_for_inline_info_
+ + bytes_for_dex_register_map_
+ + bytes_for_dex_pc_
+ + bytes_for_native_pc_;
+ }
+
+ bool HasInlineInfo() const { return bytes_for_inline_info_ > 0; }
+
+ size_t NumberOfBytesForStackMask() const { return bytes_for_stack_mask_; }
+ size_t NumberOfBytesForInlineInfo() const { return bytes_for_inline_info_; }
+ size_t NumberOfBytesForDexRegisterMap() const { return bytes_for_dex_register_map_; }
+ size_t NumberOfBytesForDexPc() const { return bytes_for_dex_pc_; }
+ size_t NumberOfBytesForNativePc() const { return bytes_for_native_pc_; }
+ size_t NumberOfBytesForRegisterMask() const { return bytes_for_register_mask_; }
+
+ size_t ComputeStackMapRegisterMaskOffset() const {
+ return kRegisterMaskOffset;
+ }
+
+ size_t ComputeStackMapStackMaskOffset() const {
+ return ComputeStackMapRegisterMaskOffset() + bytes_for_register_mask_;
+ }
+
+ size_t ComputeStackMapDexPcOffset() const {
+ return ComputeStackMapStackMaskOffset() + bytes_for_stack_mask_;
+ }
+
+ size_t ComputeStackMapNativePcOffset() const {
+ return ComputeStackMapDexPcOffset() + bytes_for_dex_pc_;
+ }
+
+ size_t ComputeStackMapDexRegisterMapOffset() const {
+ return ComputeStackMapNativePcOffset() + bytes_for_native_pc_;
+ }
+
+ size_t ComputeStackMapInlineInfoOffset() const {
+ return ComputeStackMapDexRegisterMapOffset() + bytes_for_dex_register_map_;
+ }
+
+ private:
+ static size_t EncodingSizeInBytes(size_t max_element) {
+ DCHECK(IsUint<32>(max_element));
+ return (max_element == 0) ? 0
+ : IsUint<8>(max_element) ? 1
+ : IsUint<16>(max_element) ? 2
+ : IsUint<24>(max_element) ? 3
+ : 4;
+ }
+
+ static constexpr int kRegisterMaskOffset = 0;
+
+ size_t bytes_for_stack_mask_;
+ size_t bytes_for_inline_info_;
+ size_t bytes_for_dex_register_map_;
+ size_t bytes_for_dex_pc_;
+ size_t bytes_for_native_pc_;
+ size_t bytes_for_register_mask_;
+};
+
/**
* A Stack Map holds compilation information for a specific PC necessary for:
* - Mapping it to a dex PC,
@@ -642,44 +754,82 @@ class DexRegisterMap {
*/
class StackMap {
public:
- explicit StackMap(MemoryRegion region) : region_(region) {}
StackMap() {}
+ explicit StackMap(MemoryRegion region) : region_(region) {}
bool IsValid() const { return region_.pointer() != nullptr; }
- uint32_t GetDexPc(const CodeInfo& info) const;
+ uint32_t GetDexPc(const StackMapEncoding& encoding) const {
+ return LoadAt(encoding.NumberOfBytesForDexPc(), encoding.ComputeStackMapDexPcOffset());
+ }
- void SetDexPc(const CodeInfo& info, uint32_t dex_pc);
+ void SetDexPc(const StackMapEncoding& encoding, uint32_t dex_pc) {
+ StoreAt(encoding.NumberOfBytesForDexPc(), encoding.ComputeStackMapDexPcOffset(), dex_pc);
+ }
- uint32_t GetNativePcOffset(const CodeInfo& info) const;
+ uint32_t GetNativePcOffset(const StackMapEncoding& encoding) const {
+ return LoadAt(encoding.NumberOfBytesForNativePc(), encoding.ComputeStackMapNativePcOffset());
+ }
- void SetNativePcOffset(const CodeInfo& info, uint32_t native_pc_offset);
+ void SetNativePcOffset(const StackMapEncoding& encoding, uint32_t native_pc_offset) {
+ StoreAt(encoding.NumberOfBytesForNativePc(),
+ encoding.ComputeStackMapNativePcOffset(),
+ native_pc_offset);
+ }
- uint32_t GetDexRegisterMapOffset(const CodeInfo& info) const;
+ uint32_t GetDexRegisterMapOffset(const StackMapEncoding& encoding) const {
+ return LoadAt(encoding.NumberOfBytesForDexRegisterMap(),
+ encoding.ComputeStackMapDexRegisterMapOffset(),
+ /* check_max */ true);
+ }
- void SetDexRegisterMapOffset(const CodeInfo& info, uint32_t offset);
+ void SetDexRegisterMapOffset(const StackMapEncoding& encoding, uint32_t offset) {
+ StoreAt(encoding.NumberOfBytesForDexRegisterMap(),
+ encoding.ComputeStackMapDexRegisterMapOffset(),
+ offset);
+ }
- uint32_t GetInlineDescriptorOffset(const CodeInfo& info) const;
+ uint32_t GetInlineDescriptorOffset(const StackMapEncoding& encoding) const {
+ if (!encoding.HasInlineInfo()) return kNoInlineInfo;
+ return LoadAt(encoding.NumberOfBytesForInlineInfo(),
+ encoding.ComputeStackMapInlineInfoOffset(),
+ /* check_max */ true);
+ }
- void SetInlineDescriptorOffset(const CodeInfo& info, uint32_t offset);
+ void SetInlineDescriptorOffset(const StackMapEncoding& encoding, uint32_t offset) {
+ DCHECK(encoding.HasInlineInfo());
+ StoreAt(encoding.NumberOfBytesForInlineInfo(),
+ encoding.ComputeStackMapInlineInfoOffset(),
+ offset);
+ }
- uint32_t GetRegisterMask(const CodeInfo& info) const;
+ uint32_t GetRegisterMask(const StackMapEncoding& encoding) const {
+ return LoadAt(encoding.NumberOfBytesForRegisterMask(),
+ encoding.ComputeStackMapRegisterMaskOffset());
+ }
- void SetRegisterMask(const CodeInfo& info, uint32_t mask);
+ void SetRegisterMask(const StackMapEncoding& encoding, uint32_t mask) {
+ StoreAt(encoding.NumberOfBytesForRegisterMask(),
+ encoding.ComputeStackMapRegisterMaskOffset(),
+ mask);
+ }
- MemoryRegion GetStackMask(const CodeInfo& info) const;
+ MemoryRegion GetStackMask(const StackMapEncoding& encoding) const {
+ return region_.Subregion(encoding.ComputeStackMapStackMaskOffset(),
+ encoding.NumberOfBytesForStackMask());
+ }
- void SetStackMask(const CodeInfo& info, const BitVector& sp_map) {
- MemoryRegion region = GetStackMask(info);
+ void SetStackMask(const StackMapEncoding& encoding, const BitVector& sp_map) {
+ MemoryRegion region = GetStackMask(encoding);
sp_map.CopyTo(region.start(), region.size());
}
- bool HasDexRegisterMap(const CodeInfo& info) const {
- return GetDexRegisterMapOffset(info) != kNoDexRegisterMap;
+ bool HasDexRegisterMap(const StackMapEncoding& encoding) const {
+ return GetDexRegisterMapOffset(encoding) != kNoDexRegisterMap;
}
- bool HasInlineInfo(const CodeInfo& info) const {
- return GetInlineDescriptorOffset(info) != kNoInlineInfo;
+ bool HasInlineInfo(const StackMapEncoding& encoding) const {
+ return GetInlineDescriptorOffset(encoding) != kNoInlineInfo;
}
bool Equals(const StackMap& other) const {
@@ -687,15 +837,9 @@ class StackMap {
&& region_.size() == other.region_.size();
}
- static size_t ComputeStackMapSize(size_t stack_mask_size,
- size_t inline_info_size,
- size_t dex_register_map_size,
- size_t dex_pc_max,
- size_t native_pc_max,
- size_t register_mask_max);
-
void Dump(std::ostream& os,
const CodeInfo& code_info,
+ const StackMapEncoding& encoding,
uint32_t code_offset,
uint16_t number_of_dex_registers,
const std::string& header_suffix = "") const;
@@ -709,21 +853,17 @@ class StackMap {
static constexpr uint32_t kNoInlineInfo = -1;
private:
- static size_t ComputeStackMapSizeInternal(size_t stack_mask_size,
- size_t number_of_bytes_for_inline_info,
- size_t number_of_bytes_for_dex_map,
- size_t number_of_bytes_for_dex_pc,
- size_t number_of_bytes_for_native_pc,
- size_t number_of_bytes_for_register_mask);
-
// TODO: Instead of plain types such as "uint32_t", introduce
// typedefs (and document the memory layout of StackMap).
- static constexpr int kRegisterMaskOffset = 0;
static constexpr int kFixedSize = 0;
+ // Loads `number_of_bytes` at the given `offset` and assemble a uint32_t. If `check_max` is true,
+ // this method converts a maximum value of size `number_of_bytes` into a uint32_t 0xFFFFFFFF.
+ uint32_t LoadAt(size_t number_of_bytes, size_t offset, bool check_max = false) const;
+ void StoreAt(size_t number_of_bytes, size_t offset, uint32_t value) const;
+
MemoryRegion region_;
- friend class CodeInfo;
friend class StackMapStream;
};
@@ -827,39 +967,23 @@ class CodeInfo {
region_ = MemoryRegion(const_cast<void*>(data), size);
}
- static size_t EncodingSizeInBytes(size_t max_element) {
- DCHECK(IsUint<32>(max_element));
- return (max_element == 0) ? 0
- : IsUint<8>(max_element) ? 1
- : IsUint<16>(max_element) ? 2
- : IsUint<24>(max_element) ? 3
- : 4;
+ StackMapEncoding ExtractEncoding() const {
+ return StackMapEncoding(region_.LoadUnaligned<uint32_t>(kStackMaskSizeOffset),
+ GetNumberOfBytesForEncoding(kInlineInfoBitOffset),
+ GetNumberOfBytesForEncoding(kDexRegisterMapBitOffset),
+ GetNumberOfBytesForEncoding(kDexPcBitOffset),
+ GetNumberOfBytesForEncoding(kNativePcBitOffset),
+ GetNumberOfBytesForEncoding(kRegisterMaskBitOffset));
}
- void SetEncoding(size_t inline_info_size,
- size_t dex_register_map_size,
- size_t dex_pc_max,
- size_t native_pc_max,
- size_t register_mask_max) {
- if (inline_info_size != 0) {
- region_.StoreBit(kHasInlineInfoBitOffset, 1);
- // + 1 to also encode kNoInlineInfo: if an inline info offset
- // is at 0xFF, we want to overflow to a larger encoding, because it will
- // conflict with kNoInlineInfo.
- // The offset is relative to the dex register map. TODO: Change this.
- SetEncodingAt(kInlineInfoBitOffset,
- EncodingSizeInBytes(dex_register_map_size + inline_info_size + 1));
- } else {
- region_.StoreBit(kHasInlineInfoBitOffset, 0);
- SetEncodingAt(kInlineInfoBitOffset, 0);
- }
- // + 1 to also encode kNoDexRegisterMap: if a dex register map offset
- // is at 0xFF, we want to overflow to a larger encoding, because it will
- // conflict with kNoDexRegisterMap.
- SetEncodingAt(kDexRegisterMapBitOffset, EncodingSizeInBytes(dex_register_map_size + 1));
- SetEncodingAt(kDexPcBitOffset, EncodingSizeInBytes(dex_pc_max));
- SetEncodingAt(kNativePcBitOffset, EncodingSizeInBytes(native_pc_max));
- SetEncodingAt(kRegisterMaskBitOffset, EncodingSizeInBytes(register_mask_max));
+ void SetEncoding(const StackMapEncoding& encoding) {
+ region_.StoreUnaligned<uint32_t>(kStackMaskSizeOffset, encoding.NumberOfBytesForStackMask());
+ region_.StoreBit(kHasInlineInfoBitOffset, encoding.NumberOfBytesForInlineInfo() != 0);
+ SetEncodingAt(kInlineInfoBitOffset, encoding.NumberOfBytesForInlineInfo());
+ SetEncodingAt(kDexRegisterMapBitOffset, encoding.NumberOfBytesForDexRegisterMap());
+ SetEncodingAt(kDexPcBitOffset, encoding.NumberOfBytesForDexPc());
+ SetEncodingAt(kNativePcBitOffset, encoding.NumberOfBytesForNativePc());
+ SetEncodingAt(kRegisterMaskBitOffset, encoding.NumberOfBytesForRegisterMask());
}
void SetEncodingAt(size_t bit_offset, size_t number_of_bytes) {
@@ -880,64 +1004,15 @@ class CodeInfo {
return region_.LoadBit(kHasInlineInfoBitOffset);
}
- size_t NumberOfBytesForInlineInfo() const {
- return GetNumberOfBytesForEncoding(kInlineInfoBitOffset);
- }
-
- size_t NumberOfBytesForDexRegisterMap() const {
- return GetNumberOfBytesForEncoding(kDexRegisterMapBitOffset);
- }
-
- size_t NumberOfBytesForRegisterMask() const {
- return GetNumberOfBytesForEncoding(kRegisterMaskBitOffset);
- }
-
- size_t NumberOfBytesForNativePc() const {
- return GetNumberOfBytesForEncoding(kNativePcBitOffset);
- }
-
- size_t NumberOfBytesForDexPc() const {
- return GetNumberOfBytesForEncoding(kDexPcBitOffset);
- }
-
- size_t ComputeStackMapRegisterMaskOffset() const {
- return StackMap::kRegisterMaskOffset;
- }
-
- size_t ComputeStackMapStackMaskOffset() const {
- return ComputeStackMapRegisterMaskOffset()
- + (NumberOfBytesForRegisterMask() * sizeof(uint8_t));
- }
-
- size_t ComputeStackMapDexPcOffset() const {
- return ComputeStackMapStackMaskOffset() + GetStackMaskSize();
- }
-
- size_t ComputeStackMapNativePcOffset() const {
- return ComputeStackMapDexPcOffset()
- + (NumberOfBytesForDexPc() * sizeof(uint8_t));
- }
-
- size_t ComputeStackMapDexRegisterMapOffset() const {
- return ComputeStackMapNativePcOffset()
- + (NumberOfBytesForNativePc() * sizeof(uint8_t));
- }
-
- size_t ComputeStackMapInlineInfoOffset() const {
- CHECK(HasInlineInfo());
- return ComputeStackMapDexRegisterMapOffset()
- + (NumberOfBytesForDexRegisterMap() * sizeof(uint8_t));
- }
-
- DexRegisterLocationCatalog GetDexRegisterLocationCatalog() const {
+ DexRegisterLocationCatalog GetDexRegisterLocationCatalog(const StackMapEncoding& encoding) const {
return DexRegisterLocationCatalog(region_.Subregion(
- GetDexRegisterLocationCatalogOffset(),
- GetDexRegisterLocationCatalogSize()));
+ GetDexRegisterLocationCatalogOffset(encoding),
+ GetDexRegisterLocationCatalogSize(encoding)));
}
- StackMap GetStackMapAt(size_t i) const {
- size_t size = StackMapSize();
- return StackMap(GetStackMaps().Subregion(i * size, size));
+ StackMap GetStackMapAt(size_t i, const StackMapEncoding& encoding) const {
+ size_t stack_map_size = encoding.ComputeStackMapSize();
+ return StackMap(GetStackMaps(encoding).Subregion(i * stack_map_size, stack_map_size));
}
uint32_t GetOverallSize() const {
@@ -956,19 +1031,11 @@ class CodeInfo {
region_.StoreUnaligned<uint32_t>(kNumberOfDexRegisterLocationCatalogEntriesOffset, num_entries);
}
- uint32_t GetDexRegisterLocationCatalogSize() const {
- return ComputeDexRegisterLocationCatalogSize(GetDexRegisterLocationCatalogOffset(),
+ uint32_t GetDexRegisterLocationCatalogSize(const StackMapEncoding& encoding) const {
+ return ComputeDexRegisterLocationCatalogSize(GetDexRegisterLocationCatalogOffset(encoding),
GetNumberOfDexRegisterLocationCatalogEntries());
}
- uint32_t GetStackMaskSize() const {
- return region_.LoadUnaligned<uint32_t>(kStackMaskSizeOffset);
- }
-
- void SetStackMaskSize(uint32_t size) {
- region_.StoreUnaligned<uint32_t>(kStackMaskSizeOffset, size);
- }
-
size_t GetNumberOfStackMaps() const {
return region_.LoadUnaligned<uint32_t>(kNumberOfStackMapsOffset);
}
@@ -977,37 +1044,30 @@ class CodeInfo {
region_.StoreUnaligned<uint32_t>(kNumberOfStackMapsOffset, number_of_stack_maps);
}
- // Get the size of one stack map of this CodeInfo object, in bytes.
- // All stack maps of a CodeInfo have the same size.
- size_t StackMapSize() const {
- return StackMap::ComputeStackMapSizeInternal(GetStackMaskSize(),
- NumberOfBytesForInlineInfo(),
- NumberOfBytesForDexRegisterMap(),
- NumberOfBytesForDexPc(),
- NumberOfBytesForNativePc(),
- NumberOfBytesForRegisterMask());
- }
-
// Get the size all the stack maps of this CodeInfo object, in bytes.
- size_t GetStackMapsSize() const {
- return StackMapSize() * GetNumberOfStackMaps();
+ size_t GetStackMapsSize(const StackMapEncoding& encoding) const {
+ return encoding.ComputeStackMapSize() * GetNumberOfStackMaps();
}
- uint32_t GetDexRegisterLocationCatalogOffset() const {
- return GetStackMapsOffset() + GetStackMapsSize();
+ uint32_t GetDexRegisterLocationCatalogOffset(const StackMapEncoding& encoding) const {
+ return GetStackMapsOffset() + GetStackMapsSize(encoding);
}
- size_t GetDexRegisterMapsOffset() const {
- return GetDexRegisterLocationCatalogOffset() + GetDexRegisterLocationCatalogSize();
+ size_t GetDexRegisterMapsOffset(const StackMapEncoding& encoding) const {
+ return GetDexRegisterLocationCatalogOffset(encoding)
+ + GetDexRegisterLocationCatalogSize(encoding);
}
uint32_t GetStackMapsOffset() const {
return kFixedSize;
}
- DexRegisterMap GetDexRegisterMapOf(StackMap stack_map, uint32_t number_of_dex_registers) const {
- DCHECK(stack_map.HasDexRegisterMap(*this));
- uint32_t offset = GetDexRegisterMapsOffset() + stack_map.GetDexRegisterMapOffset(*this);
+ DexRegisterMap GetDexRegisterMapOf(StackMap stack_map,
+ const StackMapEncoding& encoding,
+ uint32_t number_of_dex_registers) const {
+ DCHECK(stack_map.HasDexRegisterMap(encoding));
+ uint32_t offset = GetDexRegisterMapsOffset(encoding)
+ + stack_map.GetDexRegisterMapOffset(encoding);
size_t size = ComputeDexRegisterMapSizeOf(offset, number_of_dex_registers);
return DexRegisterMap(region_.Subregion(offset, size));
}
@@ -1015,37 +1075,40 @@ class CodeInfo {
// Return the `DexRegisterMap` pointed by `inline_info` at depth `depth`.
DexRegisterMap GetDexRegisterMapAtDepth(uint8_t depth,
InlineInfo inline_info,
+ const StackMapEncoding& encoding,
uint32_t number_of_dex_registers) const {
DCHECK(inline_info.HasDexRegisterMapAtDepth(depth));
- uint32_t offset =
- GetDexRegisterMapsOffset() + inline_info.GetDexRegisterMapOffsetAtDepth(depth);
+ uint32_t offset = GetDexRegisterMapsOffset(encoding)
+ + inline_info.GetDexRegisterMapOffsetAtDepth(depth);
size_t size = ComputeDexRegisterMapSizeOf(offset, number_of_dex_registers);
return DexRegisterMap(region_.Subregion(offset, size));
}
- InlineInfo GetInlineInfoOf(StackMap stack_map) const {
- DCHECK(stack_map.HasInlineInfo(*this));
- uint32_t offset = stack_map.GetInlineDescriptorOffset(*this) + GetDexRegisterMapsOffset();
+ InlineInfo GetInlineInfoOf(StackMap stack_map, const StackMapEncoding& encoding) const {
+ DCHECK(stack_map.HasInlineInfo(encoding));
+ uint32_t offset = stack_map.GetInlineDescriptorOffset(encoding)
+ + GetDexRegisterMapsOffset(encoding);
uint8_t depth = region_.LoadUnaligned<uint8_t>(offset);
return InlineInfo(region_.Subregion(offset,
InlineInfo::kFixedSize + depth * InlineInfo::SingleEntrySize()));
}
- StackMap GetStackMapForDexPc(uint32_t dex_pc) const {
+ StackMap GetStackMapForDexPc(uint32_t dex_pc, const StackMapEncoding& encoding) const {
for (size_t i = 0, e = GetNumberOfStackMaps(); i < e; ++i) {
- StackMap stack_map = GetStackMapAt(i);
- if (stack_map.GetDexPc(*this) == dex_pc) {
+ StackMap stack_map = GetStackMapAt(i, encoding);
+ if (stack_map.GetDexPc(encoding) == dex_pc) {
return stack_map;
}
}
return StackMap();
}
- StackMap GetStackMapForNativePcOffset(uint32_t native_pc_offset) const {
+ StackMap GetStackMapForNativePcOffset(uint32_t native_pc_offset,
+ const StackMapEncoding& encoding) const {
// TODO: stack maps are sorted by native pc, we can do a binary search.
for (size_t i = 0, e = GetNumberOfStackMaps(); i < e; ++i) {
- StackMap stack_map = GetStackMapAt(i);
- if (stack_map.GetNativePcOffset(*this) == native_pc_offset) {
+ StackMap stack_map = GetStackMapAt(i, encoding);
+ if (stack_map.GetNativePcOffset(encoding) == native_pc_offset) {
return stack_map;
}
}
@@ -1081,10 +1144,10 @@ class CodeInfo {
static constexpr int kNativePcBitOffset = kDexPcBitOffset + 3;
static constexpr int kRegisterMaskBitOffset = kNativePcBitOffset + 3;
- MemoryRegion GetStackMaps() const {
+ MemoryRegion GetStackMaps(const StackMapEncoding& encoding) const {
return region_.size() == 0
? MemoryRegion()
- : region_.Subregion(GetStackMapsOffset(), GetStackMapsSize());
+ : region_.Subregion(GetStackMapsOffset(), GetStackMapsSize(encoding));
}
// Compute the size of the Dex register map associated to the stack map at
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 67f611e422..4203b96f24 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -2273,9 +2273,10 @@ class ReferenceMapVisitor : public StackVisitor {
const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(m, sizeof(void*));
uintptr_t native_pc_offset = m->NativeQuickPcOffset(GetCurrentQuickFramePc(), entry_point);
CodeInfo code_info = m->GetOptimizedCodeInfo();
- StackMap map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
+ StackMapEncoding encoding = code_info.ExtractEncoding();
+ StackMap map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
DCHECK(map.IsValid());
- MemoryRegion mask = map.GetStackMask(code_info);
+ MemoryRegion mask = map.GetStackMask(encoding);
// Visit stack entries that hold pointers.
for (size_t i = 0; i < mask.size_in_bits(); ++i) {
if (mask.LoadBit(i)) {
@@ -2291,7 +2292,7 @@ class ReferenceMapVisitor : public StackVisitor {
}
}
// Visit callee-save registers that hold pointers.
- uint32_t register_mask = map.GetRegisterMask(code_info);
+ uint32_t register_mask = map.GetRegisterMask(encoding);
for (size_t i = 0; i < BitSizeOf<uint32_t>(); ++i) {
if (register_mask & (1 << i)) {
mirror::Object** ref_addr = reinterpret_cast<mirror::Object**>(GetGPRAddress(i));
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 9faaa4a57e..4d88227956 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -3318,7 +3318,10 @@ ArtMethod* MethodVerifier::VerifyInvocationArgsFromIterator(
}
if (method_type != METHOD_INTERFACE && !actual_arg_type.IsZero()) {
const RegType* res_method_class;
- if (res_method != nullptr) {
+ // Miranda methods have the declaring interface as their declaring class, not the abstract
+ // class. It would be wrong to use this for the type check (interface type checks are
+ // postponed to runtime).
+ if (res_method != nullptr && !res_method->IsMiranda()) {
mirror::Class* klass = res_method->GetDeclaringClass();
std::string temp;
res_method_class = &reg_types_.FromClass(klass->GetDescriptor(&temp), klass,
diff --git a/runtime/well_known_classes.cc b/runtime/well_known_classes.cc
index 3dbfe1b516..e7857a0602 100644
--- a/runtime/well_known_classes.cc
+++ b/runtime/well_known_classes.cc
@@ -34,6 +34,7 @@ jclass WellKnownClasses::dalvik_system_DexFile;
jclass WellKnownClasses::dalvik_system_DexPathList;
jclass WellKnownClasses::dalvik_system_DexPathList__Element;
jclass WellKnownClasses::dalvik_system_PathClassLoader;
+jclass WellKnownClasses::dalvik_system_VMRuntime;
jclass WellKnownClasses::java_lang_BootClassLoader;
jclass WellKnownClasses::java_lang_ClassLoader;
jclass WellKnownClasses::java_lang_ClassNotFoundException;
@@ -63,6 +64,7 @@ jclass WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk;
jclass WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer;
jmethodID WellKnownClasses::com_android_dex_Dex_create;
+jmethodID WellKnownClasses::dalvik_system_VMRuntime_runFinalization;
jmethodID WellKnownClasses::java_lang_Boolean_valueOf;
jmethodID WellKnownClasses::java_lang_Byte_valueOf;
jmethodID WellKnownClasses::java_lang_Character_valueOf;
@@ -209,6 +211,8 @@ void WellKnownClasses::Init(JNIEnv* env) {
dalvik_system_DexPathList = CacheClass(env, "dalvik/system/DexPathList");
dalvik_system_DexPathList__Element = CacheClass(env, "dalvik/system/DexPathList$Element");
dalvik_system_PathClassLoader = CacheClass(env, "dalvik/system/PathClassLoader");
+ dalvik_system_VMRuntime = CacheClass(env, "dalvik/system/VMRuntime");
+
java_lang_BootClassLoader = CacheClass(env, "java/lang/BootClassLoader");
java_lang_ClassLoader = CacheClass(env, "java/lang/ClassLoader");
java_lang_ClassNotFoundException = CacheClass(env, "java/lang/ClassNotFoundException");
@@ -238,6 +242,7 @@ void WellKnownClasses::Init(JNIEnv* env) {
org_apache_harmony_dalvik_ddmc_Chunk = CacheClass(env, "org/apache/harmony/dalvik/ddmc/Chunk");
org_apache_harmony_dalvik_ddmc_DdmServer = CacheClass(env, "org/apache/harmony/dalvik/ddmc/DdmServer");
+ dalvik_system_VMRuntime_runFinalization = CacheMethod(env, dalvik_system_VMRuntime, true, "runFinalization", "(J)V");
com_android_dex_Dex_create = CacheMethod(env, com_android_dex_Dex, true, "create", "(Ljava/nio/ByteBuffer;)Lcom/android/dex/Dex;");
java_lang_ClassNotFoundException_init = CacheMethod(env, java_lang_ClassNotFoundException, false, "<init>", "(Ljava/lang/String;Ljava/lang/Throwable;)V");
java_lang_ClassLoader_loadClass = CacheMethod(env, java_lang_ClassLoader, false, "loadClass", "(Ljava/lang/String;)Ljava/lang/Class;");
diff --git a/runtime/well_known_classes.h b/runtime/well_known_classes.h
index d25d1c3f7d..66b9abece7 100644
--- a/runtime/well_known_classes.h
+++ b/runtime/well_known_classes.h
@@ -45,6 +45,7 @@ struct WellKnownClasses {
static jclass dalvik_system_DexPathList;
static jclass dalvik_system_DexPathList__Element;
static jclass dalvik_system_PathClassLoader;
+ static jclass dalvik_system_VMRuntime;
static jclass java_lang_BootClassLoader;
static jclass java_lang_ClassLoader;
static jclass java_lang_ClassNotFoundException;
@@ -74,6 +75,7 @@ struct WellKnownClasses {
static jclass org_apache_harmony_dalvik_ddmc_DdmServer;
static jmethodID com_android_dex_Dex_create;
+ static jmethodID dalvik_system_VMRuntime_runFinalization;
static jmethodID java_lang_Boolean_valueOf;
static jmethodID java_lang_Byte_valueOf;
static jmethodID java_lang_Character_valueOf;
diff --git a/sigchainlib/Android.mk b/sigchainlib/Android.mk
index e1aae11f06..11f44fec83 100644
--- a/sigchainlib/Android.mk
+++ b/sigchainlib/Android.mk
@@ -22,6 +22,7 @@ include $(CLEAR_VARS)
LOCAL_CPP_EXTENSION := $(ART_CPP_EXTENSION)
LOCAL_MODULE_TAGS := optional
LOCAL_CFLAGS += $(ART_TARGET_CFLAGS)
+LOCAL_ASFLAGS += $(ART_TARGET_ASFLAGS)
LOCAL_SRC_FILES := sigchain_dummy.cc
LOCAL_CLANG = $(ART_TARGET_CLANG)
LOCAL_MODULE:= libsigchain
@@ -36,6 +37,7 @@ include $(CLEAR_VARS)
LOCAL_CPP_EXTENSION := $(ART_CPP_EXTENSION)
LOCAL_MODULE_TAGS := optional
LOCAL_CFLAGS += $(ART_TARGET_CFLAGS)
+LOCAL_ASFLAGS += $(ART_TARGET_ASFLAGS)
LOCAL_SRC_FILES := sigchain.cc
LOCAL_CLANG = $(ART_TARGET_CLANG)
LOCAL_MODULE:= libsigchain
@@ -51,6 +53,7 @@ LOCAL_CPP_EXTENSION := $(ART_CPP_EXTENSION)
LOCAL_MODULE_TAGS := optional
LOCAL_IS_HOST_MODULE := true
LOCAL_CFLAGS += $(ART_HOST_CFLAGS)
+LOCAL_ASFLAGS += $(ART_HOST_ASFLAGS)
LOCAL_CLANG = $(ART_HOST_CLANG)
LOCAL_SRC_FILES := sigchain_dummy.cc
LOCAL_MODULE:= libsigchain
@@ -65,6 +68,7 @@ LOCAL_CPP_EXTENSION := $(ART_CPP_EXTENSION)
LOCAL_MODULE_TAGS := optional
LOCAL_IS_HOST_MODULE := true
LOCAL_CFLAGS += $(ART_HOST_CFLAGS)
+LOCAL_ASFLAGS += $(ART_HOST_ASFLAGS)
LOCAL_CLANG = $(ART_HOST_CLANG)
LOCAL_SRC_FILES := sigchain.cc
LOCAL_MODULE:= libsigchain
diff --git a/sigchainlib/version-script.txt b/sigchainlib/version-script.txt
index ce1505490b..08c312e7e2 100644
--- a/sigchainlib/version-script.txt
+++ b/sigchainlib/version-script.txt
@@ -5,6 +5,7 @@ global:
InvokeUserSignalHandler;
InitializeSignalChain;
EnsureFrontOfChain;
+ SetSpecialSignalHandlerFn;
sigaction;
signal;
sigprocmask;
diff --git a/test/004-NativeAllocations/src/Main.java b/test/004-NativeAllocations/src/Main.java
index a99fe92081..92f4e21f40 100644
--- a/test/004-NativeAllocations/src/Main.java
+++ b/test/004-NativeAllocations/src/Main.java
@@ -19,6 +19,8 @@ import java.lang.Runtime;
public class Main {
static Object nativeLock = new Object();
+ static Object deadlockLock = new Object();
+ static boolean aboutToDeadlockLock = false;
static int nativeBytes = 0;
static Object runtime;
static Method register_native_allocation;
@@ -28,13 +30,15 @@ public class Main {
static class NativeAllocation {
private int bytes;
- NativeAllocation(int bytes) throws Exception {
+ NativeAllocation(int bytes, boolean testingDeadlock) throws Exception {
this.bytes = bytes;
register_native_allocation.invoke(runtime, bytes);
synchronized (nativeLock) {
- nativeBytes += bytes;
- if (nativeBytes > maxMem) {
- throw new OutOfMemoryError();
+ if (!testingDeadlock) {
+ nativeBytes += bytes;
+ if (nativeBytes > maxMem) {
+ throw new OutOfMemoryError();
+ }
}
}
}
@@ -44,6 +48,9 @@ public class Main {
nativeBytes -= bytes;
}
register_native_free.invoke(runtime, bytes);
+ aboutToDeadlockLock = true;
+ synchronized (deadlockLock) {
+ }
}
}
@@ -59,7 +66,20 @@ public class Main {
int allocation_count = 256;
NativeAllocation[] allocations = new NativeAllocation[count];
for (int i = 0; i < allocation_count; ++i) {
- allocations[i % count] = new NativeAllocation(size);
+ allocations[i % count] = new NativeAllocation(size, false);
+ }
+ // Test that we don't get a deadlock if we are holding nativeLock. If there is no timeout,
+ // then we will get a finalizer timeout exception.
+ aboutToDeadlockLock = false;
+ synchronized (deadlockLock) {
+ for (int i = 0; aboutToDeadlockLock != true; ++i) {
+ allocations[i % count] = new NativeAllocation(size, true);
+ }
+ // Do more allocations now that the finalizer thread is deadlocked so that we force
+ // finalization and timeout.
+ for (int i = 0; i < 10; ++i) {
+ allocations[i % count] = new NativeAllocation(size, true);
+ }
}
System.out.println("Test complete");
}
diff --git a/test/135-MirandaDispatch/expected.txt b/test/135-MirandaDispatch/expected.txt
index 134d8d0b47..5b098e5fac 100644
--- a/test/135-MirandaDispatch/expected.txt
+++ b/test/135-MirandaDispatch/expected.txt
@@ -1 +1,2 @@
+b/21646347
Finishing
diff --git a/test/135-MirandaDispatch/smali/b_21646347.smali b/test/135-MirandaDispatch/smali/b_21646347.smali
new file mode 100644
index 0000000000..b4979a5357
--- /dev/null
+++ b/test/135-MirandaDispatch/smali/b_21646347.smali
@@ -0,0 +1,15 @@
+.class public LB21646347;
+
+# If an invoke-virtual dispatches to a miranda method, ensure that we test for the receiver
+# being a subclass of the abstract class, not postpone the check because the miranda method's
+# declaring class is an interface.
+
+.super Ljava/lang/Object;
+
+.method public static run(LB21646347;)V
+ .registers 1
+ # Invoke the miranda method on an object of this class. This should fail type-checking,
+ # instead of letting this pass as the declaring class is an interface.
+ invoke-virtual {v0}, LMain$AbstractClass;->m()V
+ return-void
+.end method
diff --git a/test/135-MirandaDispatch/src/Main.java b/test/135-MirandaDispatch/src/Main.java
index bb005b0103..ada8cefead 100644
--- a/test/135-MirandaDispatch/src/Main.java
+++ b/test/135-MirandaDispatch/src/Main.java
@@ -46,6 +46,15 @@ public class Main {
if (counter != loopIterations * loopIterations) {
System.out.println("Expected " + loopIterations * loopIterations + " got " + counter);
}
+
+ try {
+ Class<?> b21646347 = Class.forName("B21646347");
+ throw new RuntimeException("Expected a VerifyError");
+ } catch (VerifyError expected) {
+ System.out.println("b/21646347");
+ } catch (Throwable t) {
+ t.printStackTrace();
+ }
System.out.println("Finishing");
}
}
diff --git a/test/441-checker-inliner/src/Main.java b/test/441-checker-inliner/src/Main.java
index 3899d7fb26..df969a488e 100644
--- a/test/441-checker-inliner/src/Main.java
+++ b/test/441-checker-inliner/src/Main.java
@@ -19,7 +19,7 @@ public class Main {
/// CHECK-START: void Main.InlineVoid() inliner (before)
/// CHECK-DAG: <<Const42:i\d+>> IntConstant 42
/// CHECK-DAG: InvokeStaticOrDirect
- /// CHECK-DAG: InvokeStaticOrDirect [<<Const42>>,{{[ij]\d+}}]
+ /// CHECK-DAG: InvokeStaticOrDirect [<<Const42>>]
/// CHECK-START: void Main.InlineVoid() inliner (after)
/// CHECK-NOT: InvokeStaticOrDirect
@@ -31,7 +31,7 @@ public class Main {
/// CHECK-START: int Main.InlineParameter(int) inliner (before)
/// CHECK-DAG: <<Param:i\d+>> ParameterValue
- /// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect [<<Param>>,{{[ij]\d+}}]
+ /// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect [<<Param>>]
/// CHECK-DAG: Return [<<Result>>]
/// CHECK-START: int Main.InlineParameter(int) inliner (after)
@@ -44,7 +44,7 @@ public class Main {
/// CHECK-START: long Main.InlineWideParameter(long) inliner (before)
/// CHECK-DAG: <<Param:j\d+>> ParameterValue
- /// CHECK-DAG: <<Result:j\d+>> InvokeStaticOrDirect [<<Param>>,{{[ij]\d+}}]
+ /// CHECK-DAG: <<Result:j\d+>> InvokeStaticOrDirect [<<Param>>]
/// CHECK-DAG: Return [<<Result>>]
/// CHECK-START: long Main.InlineWideParameter(long) inliner (after)
@@ -57,7 +57,7 @@ public class Main {
/// CHECK-START: java.lang.Object Main.InlineReferenceParameter(java.lang.Object) inliner (before)
/// CHECK-DAG: <<Param:l\d+>> ParameterValue
- /// CHECK-DAG: <<Result:l\d+>> InvokeStaticOrDirect [<<Param>>,{{[ij]\d+}}]
+ /// CHECK-DAG: <<Result:l\d+>> InvokeStaticOrDirect [<<Param>>]
/// CHECK-DAG: Return [<<Result>>]
/// CHECK-START: java.lang.Object Main.InlineReferenceParameter(java.lang.Object) inliner (after)
@@ -130,8 +130,8 @@ public class Main {
/// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
/// CHECK-DAG: <<Const3:i\d+>> IntConstant 3
/// CHECK-DAG: <<Const5:i\d+>> IntConstant 5
- /// CHECK-DAG: <<Add:i\d+>> InvokeStaticOrDirect [<<Const1>>,<<Const3>>,{{[ij]\d+}}]
- /// CHECK-DAG: <<Sub:i\d+>> InvokeStaticOrDirect [<<Const5>>,<<Const3>>,{{[ij]\d+}}]
+ /// CHECK-DAG: <<Add:i\d+>> InvokeStaticOrDirect [<<Const1>>,<<Const3>>]
+ /// CHECK-DAG: <<Sub:i\d+>> InvokeStaticOrDirect [<<Const5>>,<<Const3>>]
/// CHECK-DAG: <<Phi:i\d+>> Phi [<<Add>>,<<Sub>>]
/// CHECK-DAG: Return [<<Phi>>]
diff --git a/test/478-checker-clinit-check-pruning/src/Main.java b/test/478-checker-clinit-check-pruning/src/Main.java
index e6aab630f9..a2c98c9363 100644
--- a/test/478-checker-clinit-check-pruning/src/Main.java
+++ b/test/478-checker-clinit-check-pruning/src/Main.java
@@ -26,7 +26,7 @@ public class Main {
/// CHECK-START: void Main.invokeStaticInlined() builder (after)
/// CHECK-DAG: <<LoadClass:l\d+>> LoadClass gen_clinit_check:false
/// CHECK-DAG: <<ClinitCheck:l\d+>> ClinitCheck [<<LoadClass>>]
- /// CHECK-DAG: InvokeStaticOrDirect [{{[ij]\d+}},<<ClinitCheck>>]
+ /// CHECK-DAG: InvokeStaticOrDirect [<<ClinitCheck>>]
/// CHECK-START: void Main.invokeStaticInlined() inliner (after)
/// CHECK-DAG: <<LoadClass:l\d+>> LoadClass gen_clinit_check:false
@@ -69,12 +69,12 @@ public class Main {
/// CHECK-START: void Main.invokeStaticNotInlined() builder (after)
/// CHECK-DAG: <<LoadClass:l\d+>> LoadClass gen_clinit_check:false
/// CHECK-DAG: <<ClinitCheck:l\d+>> ClinitCheck [<<LoadClass>>]
- /// CHECK-DAG: InvokeStaticOrDirect [{{[ij]\d+}},<<ClinitCheck>>]
+ /// CHECK-DAG: InvokeStaticOrDirect [<<ClinitCheck>>]
/// CHECK-START: void Main.invokeStaticNotInlined() inliner (after)
/// CHECK-DAG: <<LoadClass:l\d+>> LoadClass gen_clinit_check:false
/// CHECK-DAG: <<ClinitCheck:l\d+>> ClinitCheck [<<LoadClass>>]
- /// CHECK-DAG: InvokeStaticOrDirect [{{[ij]\d+}},<<ClinitCheck>>]
+ /// CHECK-DAG: InvokeStaticOrDirect [<<ClinitCheck>>]
// The following checks ensure the clinit check and load class
// instructions added by the builder are pruned by the
diff --git a/test/Android.libarttest.mk b/test/Android.libarttest.mk
index 57d06c49cd..847ad0d2f7 100644
--- a/test/Android.libarttest.mk
+++ b/test/Android.libarttest.mk
@@ -74,6 +74,7 @@ define build-libarttest
else # host
LOCAL_CLANG := $(ART_HOST_CLANG)
LOCAL_CFLAGS := $(ART_HOST_CFLAGS) $(ART_HOST_DEBUG_CFLAGS)
+ LOCAL_ASFLAGS := $(ART_HOST_ASFLAGS)
LOCAL_LDLIBS := $(ART_HOST_LDLIBS) -ldl -lpthread
LOCAL_IS_HOST_MODULE := true
LOCAL_MULTILIB := both
diff --git a/test/Android.libnativebridgetest.mk b/test/Android.libnativebridgetest.mk
index 5a5f72584f..e8cc7e45ce 100644
--- a/test/Android.libnativebridgetest.mk
+++ b/test/Android.libnativebridgetest.mk
@@ -60,6 +60,7 @@ define build-libnativebridgetest
else # host
LOCAL_CLANG := $(ART_HOST_CLANG)
LOCAL_CFLAGS := $(ART_HOST_CFLAGS) $(ART_HOST_DEBUG_CFLAGS)
+ LOCAL_ASFLAGS := $(ART_HOST_ASFLAGS)
LOCAL_SHARED_LIBRARIES := libcutils
LOCAL_LDLIBS := $(ART_HOST_LDLIBS) -ldl -lpthread
ifeq ($(HOST_OS),linux)
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index fa13fe5c64..3ca1c065e5 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -259,6 +259,11 @@ ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),
$(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES),$(IMAGE_TYPES), \
$(PICTEST_TYPES),$(DEBUGGABLE_TYPES),131-structural-change,$(ALL_ADDRESS_SIZES))
+# 138-duplicate-classes-check. Turned off temporarily, b/21333911.
+ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
+ $(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES),$(IMAGE_TYPES), \
+ $(PICTEST_TYPES),$(DEBUGGABLE_TYPES),138-duplicate-classes-check,$(ALL_ADDRESS_SIZES))
+
# All these tests check that we have sane behavior if we don't have a patchoat or dex2oat.
# Therefore we shouldn't run them in situations where we actually don't have these since they
# explicitly test for them. These all also assume we have an image.