summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
author Andreas Gampe <agampe@google.com> 2016-07-19 22:34:53 -0700
committer Vladimir Marko <vmarko@google.com> 2016-07-20 11:44:25 +0100
commit6740997e6934bbca27d5830a32352d82aabbd38b (patch)
tree684ab2e46ddeaaf251fb6919bf64295810e46afa
parentdc4f4d42aa1712a7ac2e4c24c0aebe58b71ae2c0 (diff)
ART: Change return types of field access entrypoints
Ensure that return types guarantee full-width data as the compiled code and mterp expect by using size_t and ssize_t. This fixes Clang no longer sign-/zero-extending small return types. Bug: 30232671 Test: m ART_TEST_RUN_TEST_NDEBUG=true ART_TEST_INTERPRETER=true test-art-host-run-test Change-Id: Ic505befc6c94e2dccbc8abf2b13d4c2d662e68d1
-rw-r--r--compiler/optimizing/code_generator_arm.cc2
-rw-r--r--compiler/optimizing/code_generator_arm64.cc2
-rw-r--r--compiler/optimizing/code_generator_mips.cc2
-rw-r--r--compiler/optimizing/code_generator_mips64.cc2
-rw-r--r--compiler/optimizing/code_generator_x86.cc2
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc2
-rw-r--r--runtime/arch/arm/entrypoints_init_arm.cc4
-rw-r--r--runtime/arch/arm64/entrypoints_init_arm64.cc4
-rw-r--r--runtime/arch/mips/entrypoints_init_mips.cc4
-rw-r--r--runtime/arch/x86/entrypoints_init_x86.cc4
-rw-r--r--runtime/arch/x86_64/entrypoints_init_x86_64.cc4
-rw-r--r--runtime/entrypoints/quick/quick_cast_entrypoints.cc2
-rw-r--r--runtime/entrypoints/quick/quick_default_externs.h20
-rw-r--r--runtime/entrypoints/quick/quick_entrypoints_list.h22
-rw-r--r--runtime/entrypoints/quick/quick_field_entrypoints.cc54
-rw-r--r--runtime/interpreter/interpreter.cc2
-rw-r--r--runtime/interpreter/mterp/mterp.cc191
-rw-r--r--runtime/interpreter/mterp/mterp.h7
-rw-r--r--test/529-checker-unresolved/src/Main.java10
19 files changed, 198 insertions, 142 deletions
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 1aa7b5404c..3d12aedf7f 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -316,7 +316,7 @@ class TypeCheckSlowPathARM : public SlowPathCode {
instruction_->GetDexPc(),
this);
CheckEntrypointTypes<
- kQuickInstanceofNonTrivial, uint32_t, const mirror::Class*, const mirror::Class*>();
+ kQuickInstanceofNonTrivial, size_t, const mirror::Class*, const mirror::Class*>();
arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
} else {
DCHECK(instruction_->IsCheckCast());
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 07b7823571..bf2c598210 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -462,7 +462,7 @@ class TypeCheckSlowPathARM64 : public SlowPathCodeARM64 {
if (instruction_->IsInstanceOf()) {
arm64_codegen->InvokeRuntime(
QUICK_ENTRY_POINT(pInstanceofNonTrivial), instruction_, dex_pc, this);
- CheckEntrypointTypes<kQuickInstanceofNonTrivial, uint32_t,
+ CheckEntrypointTypes<kQuickInstanceofNonTrivial, size_t,
const mirror::Class*, const mirror::Class*>();
Primitive::Type ret_type = instruction_->GetType();
Location ret_loc = calling_convention.GetReturnLocation(ret_type);
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 2b71da0d1c..cf8928ff25 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -415,7 +415,7 @@ class TypeCheckSlowPathMIPS : public SlowPathCodeMIPS {
this,
IsDirectEntrypoint(kQuickInstanceofNonTrivial));
CheckEntrypointTypes<
- kQuickInstanceofNonTrivial, uint32_t, const mirror::Class*, const mirror::Class*>();
+ kQuickInstanceofNonTrivial, size_t, const mirror::Class*, const mirror::Class*>();
Primitive::Type ret_type = instruction_->GetType();
Location ret_loc = calling_convention.GetReturnLocation(ret_type);
mips_codegen->MoveLocation(locations->Out(), ret_loc, ret_type);
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index aa1ba84178..cf3c42e7a9 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -362,7 +362,7 @@ class TypeCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
dex_pc,
this);
CheckEntrypointTypes<
- kQuickInstanceofNonTrivial, uint32_t, const mirror::Class*, const mirror::Class*>();
+ kQuickInstanceofNonTrivial, size_t, const mirror::Class*, const mirror::Class*>();
Primitive::Type ret_type = instruction_->GetType();
Location ret_loc = calling_convention.GetReturnLocation(ret_type);
mips64_codegen->MoveLocation(locations->Out(), ret_loc, ret_type);
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 1cc6060f68..425f31c3b2 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -349,7 +349,7 @@ class TypeCheckSlowPathX86 : public SlowPathCode {
instruction_->GetDexPc(),
this);
CheckEntrypointTypes<
- kQuickInstanceofNonTrivial, uint32_t, const mirror::Class*, const mirror::Class*>();
+ kQuickInstanceofNonTrivial, size_t, const mirror::Class*, const mirror::Class*>();
} else {
DCHECK(instruction_->IsCheckCast());
x86_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast),
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index a0158938b5..bd4ded1790 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -369,7 +369,7 @@ class TypeCheckSlowPathX86_64 : public SlowPathCode {
dex_pc,
this);
CheckEntrypointTypes<
- kQuickInstanceofNonTrivial, uint32_t, const mirror::Class*, const mirror::Class*>();
+ kQuickInstanceofNonTrivial, size_t, const mirror::Class*, const mirror::Class*>();
} else {
DCHECK(instruction_->IsCheckCast());
x86_64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast),
diff --git a/runtime/arch/arm/entrypoints_init_arm.cc b/runtime/arch/arm/entrypoints_init_arm.cc
index 8f18ff3204..e48a164667 100644
--- a/runtime/arch/arm/entrypoints_init_arm.cc
+++ b/runtime/arch/arm/entrypoints_init_arm.cc
@@ -27,8 +27,8 @@
namespace art {
// Cast entrypoints.
-extern "C" uint32_t artIsAssignableFromCode(const mirror::Class* klass,
- const mirror::Class* ref_class);
+extern "C" size_t artIsAssignableFromCode(const mirror::Class* klass,
+ const mirror::Class* ref_class);
// Read barrier entrypoints.
// art_quick_read_barrier_mark_regX uses an non-standard calling
diff --git a/runtime/arch/arm64/entrypoints_init_arm64.cc b/runtime/arch/arm64/entrypoints_init_arm64.cc
index c3188b6aad..5385a2f46e 100644
--- a/runtime/arch/arm64/entrypoints_init_arm64.cc
+++ b/runtime/arch/arm64/entrypoints_init_arm64.cc
@@ -27,8 +27,8 @@
namespace art {
// Cast entrypoints.
-extern "C" uint32_t artIsAssignableFromCode(const mirror::Class* klass,
- const mirror::Class* ref_class);
+extern "C" size_t artIsAssignableFromCode(const mirror::Class* klass,
+ const mirror::Class* ref_class);
// Read barrier entrypoints.
// art_quick_read_barrier_mark_regX uses an non-standard calling
diff --git a/runtime/arch/mips/entrypoints_init_mips.cc b/runtime/arch/mips/entrypoints_init_mips.cc
index e3cc0e0b67..22efd199cf 100644
--- a/runtime/arch/mips/entrypoints_init_mips.cc
+++ b/runtime/arch/mips/entrypoints_init_mips.cc
@@ -28,8 +28,8 @@
namespace art {
// Cast entrypoints.
-extern "C" uint32_t artIsAssignableFromCode(const mirror::Class* klass,
- const mirror::Class* ref_class);
+extern "C" size_t artIsAssignableFromCode(const mirror::Class* klass,
+ const mirror::Class* ref_class);
// Math entrypoints.
extern int32_t CmpgDouble(double a, double b);
diff --git a/runtime/arch/x86/entrypoints_init_x86.cc b/runtime/arch/x86/entrypoints_init_x86.cc
index 69c939e4cb..8f13d58de2 100644
--- a/runtime/arch/x86/entrypoints_init_x86.cc
+++ b/runtime/arch/x86/entrypoints_init_x86.cc
@@ -25,8 +25,8 @@
namespace art {
// Cast entrypoints.
-extern "C" uint32_t art_quick_is_assignable(const mirror::Class* klass,
- const mirror::Class* ref_class);
+extern "C" size_t art_quick_is_assignable(const mirror::Class* klass,
+ const mirror::Class* ref_class);
// Read barrier entrypoints.
// art_quick_read_barrier_mark_regX uses an non-standard calling
diff --git a/runtime/arch/x86_64/entrypoints_init_x86_64.cc b/runtime/arch/x86_64/entrypoints_init_x86_64.cc
index 2bea3dbf61..b566fb1ced 100644
--- a/runtime/arch/x86_64/entrypoints_init_x86_64.cc
+++ b/runtime/arch/x86_64/entrypoints_init_x86_64.cc
@@ -28,8 +28,8 @@
namespace art {
// Cast entrypoints.
-extern "C" uint32_t art_quick_assignable_from_code(const mirror::Class* klass,
- const mirror::Class* ref_class);
+extern "C" size_t art_quick_assignable_from_code(const mirror::Class* klass,
+ const mirror::Class* ref_class);
// Read barrier entrypoints.
// art_quick_read_barrier_mark_regX uses an non-standard calling
diff --git a/runtime/entrypoints/quick/quick_cast_entrypoints.cc b/runtime/entrypoints/quick/quick_cast_entrypoints.cc
index 968ac534b3..8db69a376f 100644
--- a/runtime/entrypoints/quick/quick_cast_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_cast_entrypoints.cc
@@ -20,7 +20,7 @@
namespace art {
// Assignable test for code, won't throw. Null and equality tests already performed
-extern "C" uint32_t artIsAssignableFromCode(mirror::Class* klass, mirror::Class* ref_class)
+extern "C" size_t artIsAssignableFromCode(mirror::Class* klass, mirror::Class* ref_class)
SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(klass != nullptr);
DCHECK(ref_class != nullptr);
diff --git a/runtime/entrypoints/quick/quick_default_externs.h b/runtime/entrypoints/quick/quick_default_externs.h
index d0dad34789..86fb8818ec 100644
--- a/runtime/entrypoints/quick/quick_default_externs.h
+++ b/runtime/entrypoints/quick/quick_default_externs.h
@@ -50,16 +50,16 @@ extern "C" int art_quick_set64_instance(uint32_t, void*, int64_t);
extern "C" int art_quick_set64_static(uint32_t, int64_t);
extern "C" int art_quick_set_obj_instance(uint32_t, void*, void*);
extern "C" int art_quick_set_obj_static(uint32_t, void*);
-extern "C" int8_t art_quick_get_byte_instance(uint32_t, void*);
-extern "C" uint8_t art_quick_get_boolean_instance(uint32_t, void*);
-extern "C" int8_t art_quick_get_byte_static(uint32_t);
-extern "C" uint8_t art_quick_get_boolean_static(uint32_t);
-extern "C" int16_t art_quick_get_short_instance(uint32_t, void*);
-extern "C" uint16_t art_quick_get_char_instance(uint32_t, void*);
-extern "C" int16_t art_quick_get_short_static(uint32_t);
-extern "C" uint16_t art_quick_get_char_static(uint32_t);
-extern "C" int32_t art_quick_get32_instance(uint32_t, void*);
-extern "C" int32_t art_quick_get32_static(uint32_t);
+extern "C" ssize_t art_quick_get_byte_instance(uint32_t, void*);
+extern "C" size_t art_quick_get_boolean_instance(uint32_t, void*);
+extern "C" ssize_t art_quick_get_byte_static(uint32_t);
+extern "C" size_t art_quick_get_boolean_static(uint32_t);
+extern "C" ssize_t art_quick_get_short_instance(uint32_t, void*);
+extern "C" size_t art_quick_get_char_instance(uint32_t, void*);
+extern "C" ssize_t art_quick_get_short_static(uint32_t);
+extern "C" size_t art_quick_get_char_static(uint32_t);
+extern "C" ssize_t art_quick_get32_instance(uint32_t, void*);
+extern "C" ssize_t art_quick_get32_static(uint32_t);
extern "C" int64_t art_quick_get64_instance(uint32_t, void*);
extern "C" int64_t art_quick_get64_static(uint32_t);
extern "C" void* art_quick_get_obj_instance(uint32_t, void*);
diff --git a/runtime/entrypoints/quick/quick_entrypoints_list.h b/runtime/entrypoints/quick/quick_entrypoints_list.h
index d6b7d9ef2e..e0ec68ee87 100644
--- a/runtime/entrypoints/quick/quick_entrypoints_list.h
+++ b/runtime/entrypoints/quick/quick_entrypoints_list.h
@@ -33,7 +33,7 @@
V(AllocStringFromChars, void*, int32_t, int32_t, void*) \
V(AllocStringFromString, void*, void*) \
\
- V(InstanceofNonTrivial, uint32_t, const mirror::Class*, const mirror::Class*) \
+ V(InstanceofNonTrivial, size_t, const mirror::Class*, const mirror::Class*) \
V(CheckCast, void, const mirror::Class*, const mirror::Class*) \
\
V(InitializeStaticStorage, void*, uint32_t) \
@@ -51,16 +51,16 @@
V(Set64Static, int, uint32_t, int64_t) \
V(SetObjInstance, int, uint32_t, void*, void*) \
V(SetObjStatic, int, uint32_t, void*) \
- V(GetByteInstance, int8_t, uint32_t, void*) \
- V(GetBooleanInstance, uint8_t, uint32_t, void*) \
- V(GetByteStatic, int8_t, uint32_t) \
- V(GetBooleanStatic, uint8_t, uint32_t) \
- V(GetShortInstance, int16_t, uint32_t, void*) \
- V(GetCharInstance, uint16_t, uint32_t, void*) \
- V(GetShortStatic, int16_t, uint32_t) \
- V(GetCharStatic, uint16_t, uint32_t) \
- V(Get32Instance, int32_t, uint32_t, void*) \
- V(Get32Static, int32_t, uint32_t) \
+ V(GetByteInstance, ssize_t, uint32_t, void*) \
+ V(GetBooleanInstance, size_t, uint32_t, void*) \
+ V(GetByteStatic, ssize_t, uint32_t) \
+ V(GetBooleanStatic, size_t, uint32_t) \
+ V(GetShortInstance, ssize_t, uint32_t, void*) \
+ V(GetCharInstance, size_t, uint32_t, void*) \
+ V(GetShortStatic, ssize_t, uint32_t) \
+ V(GetCharStatic, size_t, uint32_t) \
+ V(Get32Instance, ssize_t, uint32_t, void*) \
+ V(Get32Static, ssize_t, uint32_t) \
V(Get64Instance, int64_t, uint32_t, void*) \
V(Get64Static, int64_t, uint32_t) \
V(GetObjInstance, void*, uint32_t, void*) \
diff --git a/runtime/entrypoints/quick/quick_field_entrypoints.cc b/runtime/entrypoints/quick/quick_field_entrypoints.cc
index a245f18d3f..1a12bd45de 100644
--- a/runtime/entrypoints/quick/quick_field_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_field_entrypoints.cc
@@ -55,9 +55,7 @@ ALWAYS_INLINE static inline ArtField* FindInstanceField(uint32_t field_idx,
return field;
}
-extern "C" int8_t artGetByteStaticFromCode(uint32_t field_idx,
- ArtMethod* referrer,
- Thread* self)
+extern "C" ssize_t artGetByteStaticFromCode(uint32_t field_idx, ArtMethod* referrer, Thread* self)
SHARED_REQUIRES(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int8_t));
@@ -71,9 +69,7 @@ extern "C" int8_t artGetByteStaticFromCode(uint32_t field_idx,
return 0; // Will throw exception by checking with Thread::Current.
}
-extern "C" uint8_t artGetBooleanStaticFromCode(uint32_t field_idx,
- ArtMethod* referrer,
- Thread* self)
+extern "C" size_t artGetBooleanStaticFromCode(uint32_t field_idx, ArtMethod* referrer, Thread* self)
SHARED_REQUIRES(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int8_t));
@@ -87,9 +83,7 @@ extern "C" uint8_t artGetBooleanStaticFromCode(uint32_t field_idx,
return 0; // Will throw exception by checking with Thread::Current.
}
-extern "C" int16_t artGetShortStaticFromCode(uint32_t field_idx,
- ArtMethod* referrer,
- Thread* self)
+extern "C" ssize_t artGetShortStaticFromCode(uint32_t field_idx, ArtMethod* referrer, Thread* self)
SHARED_REQUIRES(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int16_t));
@@ -103,9 +97,7 @@ extern "C" int16_t artGetShortStaticFromCode(uint32_t field_idx,
return 0; // Will throw exception by checking with Thread::Current.
}
-extern "C" uint16_t artGetCharStaticFromCode(uint32_t field_idx,
- ArtMethod* referrer,
- Thread* self)
+extern "C" size_t artGetCharStaticFromCode(uint32_t field_idx, ArtMethod* referrer, Thread* self)
SHARED_REQUIRES(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int16_t));
@@ -119,9 +111,7 @@ extern "C" uint16_t artGetCharStaticFromCode(uint32_t field_idx,
return 0; // Will throw exception by checking with Thread::Current.
}
-extern "C" uint32_t artGet32StaticFromCode(uint32_t field_idx,
- ArtMethod* referrer,
- Thread* self)
+extern "C" size_t artGet32StaticFromCode(uint32_t field_idx, ArtMethod* referrer, Thread* self)
SHARED_REQUIRES(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int32_t));
@@ -173,10 +163,10 @@ extern "C" mirror::Object* artGetObjStaticFromCode(uint32_t field_idx,
return nullptr; // Will throw exception by checking with Thread::Current.
}
-extern "C" int8_t artGetByteInstanceFromCode(uint32_t field_idx,
- mirror::Object* obj,
- ArtMethod* referrer,
- Thread* self)
+extern "C" ssize_t artGetByteInstanceFromCode(uint32_t field_idx,
+ mirror::Object* obj,
+ ArtMethod* referrer,
+ Thread* self)
SHARED_REQUIRES(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int8_t));
@@ -194,10 +184,10 @@ extern "C" int8_t artGetByteInstanceFromCode(uint32_t field_idx,
return 0; // Will throw exception by checking with Thread::Current.
}
-extern "C" uint8_t artGetBooleanInstanceFromCode(uint32_t field_idx,
- mirror::Object* obj,
- ArtMethod* referrer,
- Thread* self)
+extern "C" size_t artGetBooleanInstanceFromCode(uint32_t field_idx,
+ mirror::Object* obj,
+ ArtMethod* referrer,
+ Thread* self)
SHARED_REQUIRES(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int8_t));
@@ -214,7 +204,7 @@ extern "C" uint8_t artGetBooleanInstanceFromCode(uint32_t field_idx,
}
return 0; // Will throw exception by checking with Thread::Current.
}
-extern "C" int16_t artGetShortInstanceFromCode(uint32_t field_idx,
+extern "C" ssize_t artGetShortInstanceFromCode(uint32_t field_idx,
mirror::Object* obj,
ArtMethod* referrer,
Thread* self)
@@ -235,10 +225,10 @@ extern "C" int16_t artGetShortInstanceFromCode(uint32_t field_idx,
return 0; // Will throw exception by checking with Thread::Current.
}
-extern "C" uint16_t artGetCharInstanceFromCode(uint32_t field_idx,
- mirror::Object* obj,
- ArtMethod* referrer,
- Thread* self)
+extern "C" size_t artGetCharInstanceFromCode(uint32_t field_idx,
+ mirror::Object* obj,
+ ArtMethod* referrer,
+ Thread* self)
SHARED_REQUIRES(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int16_t));
@@ -256,10 +246,10 @@ extern "C" uint16_t artGetCharInstanceFromCode(uint32_t field_idx,
return 0; // Will throw exception by checking with Thread::Current.
}
-extern "C" uint32_t artGet32InstanceFromCode(uint32_t field_idx,
- mirror::Object* obj,
- ArtMethod* referrer,
- Thread* self)
+extern "C" size_t artGet32InstanceFromCode(uint32_t field_idx,
+ mirror::Object* obj,
+ ArtMethod* referrer,
+ Thread* self)
SHARED_REQUIRES(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int32_t));
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index 8c42b3abce..f1f7f42117 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -324,7 +324,7 @@ static inline JValue Execute(
} else {
while (true) {
// Mterp does not support all instrumentation/debugging.
- if (MterpShouldSwitchInterpreters()) {
+ if (MterpShouldSwitchInterpreters() != 0) {
return ExecuteSwitchImpl<false, false>(self, code_item, shadow_frame, result_register,
false);
}
diff --git a/runtime/interpreter/mterp/mterp.cc b/runtime/interpreter/mterp/mterp.cc
index 8aa87b1a8c..c25cd78309 100644
--- a/runtime/interpreter/mterp/mterp.cc
+++ b/runtime/interpreter/mterp/mterp.cc
@@ -57,7 +57,7 @@ void InitMterpTls(Thread* self) {
* Returns 3 if we don't find a match (it's the size of the sparse-switch
* instruction).
*/
-extern "C" int32_t MterpDoSparseSwitch(const uint16_t* switchData, int32_t testVal) {
+extern "C" ssize_t MterpDoSparseSwitch(const uint16_t* switchData, int32_t testVal) {
const int kInstrLen = 3;
uint16_t size;
const int32_t* keys;
@@ -109,7 +109,7 @@ extern "C" int32_t MterpDoSparseSwitch(const uint16_t* switchData, int32_t testV
return kInstrLen;
}
-extern "C" int32_t MterpDoPackedSwitch(const uint16_t* switchData, int32_t testVal) {
+extern "C" ssize_t MterpDoPackedSwitch(const uint16_t* switchData, int32_t testVal) {
const int kInstrLen = 3;
/*
@@ -142,7 +142,7 @@ extern "C" int32_t MterpDoPackedSwitch(const uint16_t* switchData, int32_t testV
return entries[index];
}
-extern "C" bool MterpShouldSwitchInterpreters()
+extern "C" size_t MterpShouldSwitchInterpreters()
SHARED_REQUIRES(Locks::mutator_lock_) {
const instrumentation::Instrumentation* const instrumentation =
Runtime::Current()->GetInstrumentation();
@@ -150,8 +150,10 @@ extern "C" bool MterpShouldSwitchInterpreters()
}
-extern "C" bool MterpInvokeVirtual(Thread* self, ShadowFrame* shadow_frame,
- uint16_t* dex_pc_ptr, uint16_t inst_data )
+extern "C" size_t MterpInvokeVirtual(Thread* self,
+ ShadowFrame* shadow_frame,
+ uint16_t* dex_pc_ptr,
+ uint16_t inst_data)
SHARED_REQUIRES(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
@@ -159,8 +161,10 @@ extern "C" bool MterpInvokeVirtual(Thread* self, ShadowFrame* shadow_frame,
self, *shadow_frame, inst, inst_data, result_register);
}
-extern "C" bool MterpInvokeSuper(Thread* self, ShadowFrame* shadow_frame,
- uint16_t* dex_pc_ptr, uint16_t inst_data )
+extern "C" size_t MterpInvokeSuper(Thread* self,
+ ShadowFrame* shadow_frame,
+ uint16_t* dex_pc_ptr,
+ uint16_t inst_data)
SHARED_REQUIRES(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
@@ -168,8 +172,10 @@ extern "C" bool MterpInvokeSuper(Thread* self, ShadowFrame* shadow_frame,
self, *shadow_frame, inst, inst_data, result_register);
}
-extern "C" bool MterpInvokeInterface(Thread* self, ShadowFrame* shadow_frame,
- uint16_t* dex_pc_ptr, uint16_t inst_data )
+extern "C" size_t MterpInvokeInterface(Thread* self,
+ ShadowFrame* shadow_frame,
+ uint16_t* dex_pc_ptr,
+ uint16_t inst_data)
SHARED_REQUIRES(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
@@ -177,8 +183,10 @@ extern "C" bool MterpInvokeInterface(Thread* self, ShadowFrame* shadow_frame,
self, *shadow_frame, inst, inst_data, result_register);
}
-extern "C" bool MterpInvokeDirect(Thread* self, ShadowFrame* shadow_frame,
- uint16_t* dex_pc_ptr, uint16_t inst_data )
+extern "C" size_t MterpInvokeDirect(Thread* self,
+ ShadowFrame* shadow_frame,
+ uint16_t* dex_pc_ptr,
+ uint16_t inst_data)
SHARED_REQUIRES(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
@@ -186,8 +194,10 @@ extern "C" bool MterpInvokeDirect(Thread* self, ShadowFrame* shadow_frame,
self, *shadow_frame, inst, inst_data, result_register);
}
-extern "C" bool MterpInvokeStatic(Thread* self, ShadowFrame* shadow_frame,
- uint16_t* dex_pc_ptr, uint16_t inst_data )
+extern "C" size_t MterpInvokeStatic(Thread* self,
+ ShadowFrame* shadow_frame,
+ uint16_t* dex_pc_ptr,
+ uint16_t inst_data)
SHARED_REQUIRES(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
@@ -195,8 +205,10 @@ extern "C" bool MterpInvokeStatic(Thread* self, ShadowFrame* shadow_frame,
self, *shadow_frame, inst, inst_data, result_register);
}
-extern "C" bool MterpInvokeVirtualRange(Thread* self, ShadowFrame* shadow_frame,
- uint16_t* dex_pc_ptr, uint16_t inst_data )
+extern "C" size_t MterpInvokeVirtualRange(Thread* self,
+ ShadowFrame* shadow_frame,
+ uint16_t* dex_pc_ptr,
+ uint16_t inst_data)
SHARED_REQUIRES(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
@@ -204,8 +216,10 @@ extern "C" bool MterpInvokeVirtualRange(Thread* self, ShadowFrame* shadow_frame,
self, *shadow_frame, inst, inst_data, result_register);
}
-extern "C" bool MterpInvokeSuperRange(Thread* self, ShadowFrame* shadow_frame,
- uint16_t* dex_pc_ptr, uint16_t inst_data )
+extern "C" size_t MterpInvokeSuperRange(Thread* self,
+ ShadowFrame* shadow_frame,
+ uint16_t* dex_pc_ptr,
+ uint16_t inst_data)
SHARED_REQUIRES(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
@@ -213,8 +227,10 @@ extern "C" bool MterpInvokeSuperRange(Thread* self, ShadowFrame* shadow_frame,
self, *shadow_frame, inst, inst_data, result_register);
}
-extern "C" bool MterpInvokeInterfaceRange(Thread* self, ShadowFrame* shadow_frame,
- uint16_t* dex_pc_ptr, uint16_t inst_data )
+extern "C" size_t MterpInvokeInterfaceRange(Thread* self,
+ ShadowFrame* shadow_frame,
+ uint16_t* dex_pc_ptr,
+ uint16_t inst_data)
SHARED_REQUIRES(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
@@ -222,8 +238,10 @@ extern "C" bool MterpInvokeInterfaceRange(Thread* self, ShadowFrame* shadow_fram
self, *shadow_frame, inst, inst_data, result_register);
}
-extern "C" bool MterpInvokeDirectRange(Thread* self, ShadowFrame* shadow_frame,
- uint16_t* dex_pc_ptr, uint16_t inst_data )
+extern "C" size_t MterpInvokeDirectRange(Thread* self,
+ ShadowFrame* shadow_frame,
+ uint16_t* dex_pc_ptr,
+ uint16_t inst_data)
SHARED_REQUIRES(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
@@ -231,8 +249,10 @@ extern "C" bool MterpInvokeDirectRange(Thread* self, ShadowFrame* shadow_frame,
self, *shadow_frame, inst, inst_data, result_register);
}
-extern "C" bool MterpInvokeStaticRange(Thread* self, ShadowFrame* shadow_frame,
- uint16_t* dex_pc_ptr, uint16_t inst_data )
+extern "C" size_t MterpInvokeStaticRange(Thread* self,
+ ShadowFrame* shadow_frame,
+ uint16_t* dex_pc_ptr,
+ uint16_t inst_data)
SHARED_REQUIRES(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
@@ -240,8 +260,10 @@ extern "C" bool MterpInvokeStaticRange(Thread* self, ShadowFrame* shadow_frame,
self, *shadow_frame, inst, inst_data, result_register);
}
-extern "C" bool MterpInvokeVirtualQuick(Thread* self, ShadowFrame* shadow_frame,
- uint16_t* dex_pc_ptr, uint16_t inst_data )
+extern "C" size_t MterpInvokeVirtualQuick(Thread* self,
+ ShadowFrame* shadow_frame,
+ uint16_t* dex_pc_ptr,
+ uint16_t inst_data)
SHARED_REQUIRES(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
@@ -249,8 +271,10 @@ extern "C" bool MterpInvokeVirtualQuick(Thread* self, ShadowFrame* shadow_frame,
self, *shadow_frame, inst, inst_data, result_register);
}
-extern "C" bool MterpInvokeVirtualQuickRange(Thread* self, ShadowFrame* shadow_frame,
- uint16_t* dex_pc_ptr, uint16_t inst_data )
+extern "C" size_t MterpInvokeVirtualQuickRange(Thread* self,
+ ShadowFrame* shadow_frame,
+ uint16_t* dex_pc_ptr,
+ uint16_t inst_data)
SHARED_REQUIRES(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
@@ -262,8 +286,10 @@ extern "C" void MterpThreadFenceForConstructor() {
QuasiAtomic::ThreadFenceForConstructor();
}
-extern "C" bool MterpConstString(uint32_t index, uint32_t tgt_vreg, ShadowFrame* shadow_frame,
- Thread* self)
+extern "C" size_t MterpConstString(uint32_t index,
+ uint32_t tgt_vreg,
+ ShadowFrame* shadow_frame,
+ Thread* self)
SHARED_REQUIRES(Locks::mutator_lock_) {
String* s = ResolveString(self, *shadow_frame, index);
if (UNLIKELY(s == nullptr)) {
@@ -273,8 +299,10 @@ extern "C" bool MterpConstString(uint32_t index, uint32_t tgt_vreg, ShadowFrame*
return false;
}
-extern "C" bool MterpConstClass(uint32_t index, uint32_t tgt_vreg, ShadowFrame* shadow_frame,
- Thread* self)
+extern "C" size_t MterpConstClass(uint32_t index,
+ uint32_t tgt_vreg,
+ ShadowFrame* shadow_frame,
+ Thread* self)
SHARED_REQUIRES(Locks::mutator_lock_) {
Class* c = ResolveVerifyAndClinit(index, shadow_frame->GetMethod(), self, false, false);
if (UNLIKELY(c == nullptr)) {
@@ -284,8 +312,10 @@ extern "C" bool MterpConstClass(uint32_t index, uint32_t tgt_vreg, ShadowFrame*
return false;
}
-extern "C" bool MterpCheckCast(uint32_t index, StackReference<mirror::Object>* vreg_addr,
- art::ArtMethod* method, Thread* self)
+extern "C" size_t MterpCheckCast(uint32_t index,
+ StackReference<mirror::Object>* vreg_addr,
+ art::ArtMethod* method,
+ Thread* self)
SHARED_REQUIRES(Locks::mutator_lock_) {
Class* c = ResolveVerifyAndClinit(index, method, self, false, false);
if (UNLIKELY(c == nullptr)) {
@@ -300,8 +330,10 @@ extern "C" bool MterpCheckCast(uint32_t index, StackReference<mirror::Object>* v
return false;
}
-extern "C" bool MterpInstanceOf(uint32_t index, StackReference<mirror::Object>* vreg_addr,
- art::ArtMethod* method, Thread* self)
+extern "C" size_t MterpInstanceOf(uint32_t index,
+ StackReference<mirror::Object>* vreg_addr,
+ art::ArtMethod* method,
+ Thread* self)
SHARED_REQUIRES(Locks::mutator_lock_) {
Class* c = ResolveVerifyAndClinit(index, method, self, false, false);
if (UNLIKELY(c == nullptr)) {
@@ -312,12 +344,12 @@ extern "C" bool MterpInstanceOf(uint32_t index, StackReference<mirror::Object>*
return (obj != nullptr) && obj->InstanceOf(c);
}
-extern "C" bool MterpFillArrayData(Object* obj, const Instruction::ArrayDataPayload* payload)
+extern "C" size_t MterpFillArrayData(Object* obj, const Instruction::ArrayDataPayload* payload)
SHARED_REQUIRES(Locks::mutator_lock_) {
return FillArrayData(obj, payload);
}
-extern "C" bool MterpNewInstance(ShadowFrame* shadow_frame, Thread* self, uint32_t inst_data)
+extern "C" size_t MterpNewInstance(ShadowFrame* shadow_frame, Thread* self, uint32_t inst_data)
SHARED_REQUIRES(Locks::mutator_lock_) {
const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
Object* obj = nullptr;
@@ -342,7 +374,7 @@ extern "C" bool MterpNewInstance(ShadowFrame* shadow_frame, Thread* self, uint32
return true;
}
-extern "C" bool MterpSputObject(ShadowFrame* shadow_frame, uint16_t* dex_pc_ptr,
+extern "C" size_t MterpSputObject(ShadowFrame* shadow_frame, uint16_t* dex_pc_ptr,
uint32_t inst_data, Thread* self)
SHARED_REQUIRES(Locks::mutator_lock_) {
const Instruction* inst = Instruction::At(dex_pc_ptr);
@@ -350,23 +382,27 @@ extern "C" bool MterpSputObject(ShadowFrame* shadow_frame, uint16_t* dex_pc_ptr,
(self, *shadow_frame, inst, inst_data);
}
-extern "C" bool MterpIputObject(ShadowFrame* shadow_frame, uint16_t* dex_pc_ptr,
- uint32_t inst_data, Thread* self)
+extern "C" size_t MterpIputObject(ShadowFrame* shadow_frame,
+ uint16_t* dex_pc_ptr,
+ uint32_t inst_data,
+ Thread* self)
SHARED_REQUIRES(Locks::mutator_lock_) {
const Instruction* inst = Instruction::At(dex_pc_ptr);
return DoFieldPut<InstanceObjectWrite, Primitive::kPrimNot, false, false>
(self, *shadow_frame, inst, inst_data);
}
-extern "C" bool MterpIputObjectQuick(ShadowFrame* shadow_frame, uint16_t* dex_pc_ptr,
- uint32_t inst_data)
+extern "C" size_t MterpIputObjectQuick(ShadowFrame* shadow_frame,
+ uint16_t* dex_pc_ptr,
+ uint32_t inst_data)
SHARED_REQUIRES(Locks::mutator_lock_) {
const Instruction* inst = Instruction::At(dex_pc_ptr);
return DoIPutQuick<Primitive::kPrimNot, false>(*shadow_frame, inst, inst_data);
}
-extern "C" bool MterpAputObject(ShadowFrame* shadow_frame, uint16_t* dex_pc_ptr,
- uint32_t inst_data)
+extern "C" size_t MterpAputObject(ShadowFrame* shadow_frame,
+ uint16_t* dex_pc_ptr,
+ uint32_t inst_data)
SHARED_REQUIRES(Locks::mutator_lock_) {
const Instruction* inst = Instruction::At(dex_pc_ptr);
Object* a = shadow_frame->GetVRegReference(inst->VRegB_23x());
@@ -383,24 +419,27 @@ extern "C" bool MterpAputObject(ShadowFrame* shadow_frame, uint16_t* dex_pc_ptr,
return false;
}
-extern "C" bool MterpFilledNewArray(ShadowFrame* shadow_frame, uint16_t* dex_pc_ptr,
- Thread* self)
+extern "C" size_t MterpFilledNewArray(ShadowFrame* shadow_frame,
+ uint16_t* dex_pc_ptr,
+ Thread* self)
SHARED_REQUIRES(Locks::mutator_lock_) {
const Instruction* inst = Instruction::At(dex_pc_ptr);
return DoFilledNewArray<false, false, false>(inst, *shadow_frame, self,
shadow_frame->GetResultRegister());
}
-extern "C" bool MterpFilledNewArrayRange(ShadowFrame* shadow_frame, uint16_t* dex_pc_ptr,
- Thread* self)
+extern "C" size_t MterpFilledNewArrayRange(ShadowFrame* shadow_frame,
+ uint16_t* dex_pc_ptr,
+ Thread* self)
SHARED_REQUIRES(Locks::mutator_lock_) {
const Instruction* inst = Instruction::At(dex_pc_ptr);
return DoFilledNewArray<true, false, false>(inst, *shadow_frame, self,
shadow_frame->GetResultRegister());
}
-extern "C" bool MterpNewArray(ShadowFrame* shadow_frame, uint16_t* dex_pc_ptr,
- uint32_t inst_data, Thread* self)
+extern "C" size_t MterpNewArray(ShadowFrame* shadow_frame,
+ uint16_t* dex_pc_ptr,
+ uint32_t inst_data, Thread* self)
SHARED_REQUIRES(Locks::mutator_lock_) {
const Instruction* inst = Instruction::At(dex_pc_ptr);
int32_t length = shadow_frame->GetVReg(inst->VRegB_22c(inst_data));
@@ -414,7 +453,7 @@ extern "C" bool MterpNewArray(ShadowFrame* shadow_frame, uint16_t* dex_pc_ptr,
return true;
}
-extern "C" bool MterpHandleException(Thread* self, ShadowFrame* shadow_frame)
+extern "C" size_t MterpHandleException(Thread* self, ShadowFrame* shadow_frame)
SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(self->IsExceptionPending());
const instrumentation::Instrumentation* const instrumentation =
@@ -526,14 +565,16 @@ extern "C" void MterpLogSuspendFallback(Thread* self, ShadowFrame* shadow_frame,
}
}
-extern "C" bool MterpSuspendCheck(Thread* self)
+extern "C" size_t MterpSuspendCheck(Thread* self)
SHARED_REQUIRES(Locks::mutator_lock_) {
self->AllowThreadSuspension();
return MterpShouldSwitchInterpreters();
}
-extern "C" int artSet64IndirectStaticFromMterp(uint32_t field_idx, ArtMethod* referrer,
- uint64_t* new_value, Thread* self)
+extern "C" ssize_t artSet64IndirectStaticFromMterp(uint32_t field_idx,
+ ArtMethod* referrer,
+ uint64_t* new_value,
+ Thread* self)
SHARED_REQUIRES(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(int64_t));
@@ -551,8 +592,10 @@ extern "C" int artSet64IndirectStaticFromMterp(uint32_t field_idx, ArtMethod* re
return -1; // failure
}
-extern "C" int artSet8InstanceFromMterp(uint32_t field_idx, mirror::Object* obj, uint8_t new_value,
- ArtMethod* referrer)
+extern "C" ssize_t artSet8InstanceFromMterp(uint32_t field_idx,
+ mirror::Object* obj,
+ uint8_t new_value,
+ ArtMethod* referrer)
SHARED_REQUIRES(Locks::mutator_lock_) {
ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(int8_t));
if (LIKELY(field != nullptr && obj != nullptr)) {
@@ -568,8 +611,10 @@ extern "C" int artSet8InstanceFromMterp(uint32_t field_idx, mirror::Object* obj,
return -1; // failure
}
-extern "C" int artSet16InstanceFromMterp(uint32_t field_idx, mirror::Object* obj, uint16_t new_value,
- ArtMethod* referrer)
+extern "C" ssize_t artSet16InstanceFromMterp(uint32_t field_idx,
+ mirror::Object* obj,
+ uint16_t new_value,
+ ArtMethod* referrer)
SHARED_REQUIRES(Locks::mutator_lock_) {
ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite,
sizeof(int16_t));
@@ -586,8 +631,10 @@ extern "C" int artSet16InstanceFromMterp(uint32_t field_idx, mirror::Object* obj
return -1; // failure
}
-extern "C" int artSet32InstanceFromMterp(uint32_t field_idx, mirror::Object* obj,
- uint32_t new_value, ArtMethod* referrer)
+extern "C" ssize_t artSet32InstanceFromMterp(uint32_t field_idx,
+ mirror::Object* obj,
+ uint32_t new_value,
+ ArtMethod* referrer)
SHARED_REQUIRES(Locks::mutator_lock_) {
ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite,
sizeof(int32_t));
@@ -598,8 +645,10 @@ extern "C" int artSet32InstanceFromMterp(uint32_t field_idx, mirror::Object* obj
return -1; // failure
}
-extern "C" int artSet64InstanceFromMterp(uint32_t field_idx, mirror::Object* obj,
- uint64_t* new_value, ArtMethod* referrer)
+extern "C" ssize_t artSet64InstanceFromMterp(uint32_t field_idx,
+ mirror::Object* obj,
+ uint64_t* new_value,
+ ArtMethod* referrer)
SHARED_REQUIRES(Locks::mutator_lock_) {
ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite,
sizeof(int64_t));
@@ -610,8 +659,10 @@ extern "C" int artSet64InstanceFromMterp(uint32_t field_idx, mirror::Object* obj
return -1; // failure
}
-extern "C" int artSetObjInstanceFromMterp(uint32_t field_idx, mirror::Object* obj,
- mirror::Object* new_value, ArtMethod* referrer)
+extern "C" ssize_t artSetObjInstanceFromMterp(uint32_t field_idx,
+ mirror::Object* obj,
+ mirror::Object* new_value,
+ ArtMethod* referrer)
SHARED_REQUIRES(Locks::mutator_lock_) {
ArtField* field = FindFieldFast(field_idx, referrer, InstanceObjectWrite,
sizeof(mirror::HeapReference<mirror::Object>));
@@ -651,7 +702,7 @@ extern "C" mirror::Object* artIGetObjectFromMterp(mirror::Object* obj, uint32_t
* to the full instrumentation via MterpAddHotnessBatch. Called once on entry to the method,
* and regenerated following batch updates.
*/
-extern "C" int MterpSetUpHotnessCountdown(ArtMethod* method, ShadowFrame* shadow_frame)
+extern "C" ssize_t MterpSetUpHotnessCountdown(ArtMethod* method, ShadowFrame* shadow_frame)
SHARED_REQUIRES(Locks::mutator_lock_) {
uint16_t hotness_count = method->GetCounter();
int32_t countdown_value = jit::kJitHotnessDisabled;
@@ -689,7 +740,7 @@ extern "C" int MterpSetUpHotnessCountdown(ArtMethod* method, ShadowFrame* shadow
* Report a batch of hotness events to the instrumentation and then return the new
* countdown value to the next time we should report.
*/
-extern "C" int16_t MterpAddHotnessBatch(ArtMethod* method,
+extern "C" ssize_t MterpAddHotnessBatch(ArtMethod* method,
ShadowFrame* shadow_frame,
Thread* self)
SHARED_REQUIRES(Locks::mutator_lock_) {
@@ -702,7 +753,7 @@ extern "C" int16_t MterpAddHotnessBatch(ArtMethod* method,
}
// TUNING: Unused by arm/arm64/x86/x86_64. Remove when mips/mips64 mterps support batch updates.
-extern "C" bool MterpProfileBranch(Thread* self, ShadowFrame* shadow_frame, int32_t offset)
+extern "C" size_t MterpProfileBranch(Thread* self, ShadowFrame* shadow_frame, int32_t offset)
SHARED_REQUIRES(Locks::mutator_lock_) {
ArtMethod* method = shadow_frame->GetMethod();
JValue* result = shadow_frame->GetResultRegister();
@@ -719,9 +770,9 @@ extern "C" bool MterpProfileBranch(Thread* self, ShadowFrame* shadow_frame, int
}
}
-extern "C" bool MterpMaybeDoOnStackReplacement(Thread* self,
- ShadowFrame* shadow_frame,
- int32_t offset)
+extern "C" size_t MterpMaybeDoOnStackReplacement(Thread* self,
+ ShadowFrame* shadow_frame,
+ int32_t offset)
SHARED_REQUIRES(Locks::mutator_lock_) {
ArtMethod* method = shadow_frame->GetMethod();
JValue* result = shadow_frame->GetResultRegister();
diff --git a/runtime/interpreter/mterp/mterp.h b/runtime/interpreter/mterp/mterp.h
index 88e17bc33f..45ab98b9a6 100644
--- a/runtime/interpreter/mterp/mterp.h
+++ b/runtime/interpreter/mterp/mterp.h
@@ -30,7 +30,12 @@ namespace interpreter {
void InitMterpTls(Thread* self);
void CheckMterpAsmConstants();
-extern "C" bool MterpShouldSwitchInterpreters();
+
+// The return type should be 'bool' but our assembly stubs expect 'bool'
+// to be zero-extended to the whole register and that's broken on x86-64
+// as a 'bool' is returned in 'al' and the rest of 'rax' is garbage.
+// TODO: Fix mterp and stubs and revert this workaround. http://b/30232671
+extern "C" size_t MterpShouldSwitchInterpreters();
// Poison value for TestExportPC. If we segfault with this value, it means that a mterp
// handler for a recent opcode failed to export the Dalvik PC prior to a possible exit from
diff --git a/test/529-checker-unresolved/src/Main.java b/test/529-checker-unresolved/src/Main.java
index 5a36ba5d9c..7b5cbc1504 100644
--- a/test/529-checker-unresolved/src/Main.java
+++ b/test/529-checker-unresolved/src/Main.java
@@ -77,6 +77,16 @@ public class Main extends UnresolvedSuperClass {
expectEquals(123456789123456789f, UnresolvedClass.staticFloat);
expectEquals(123456789123456789d, UnresolvedClass.staticDouble);
expectEquals(o, UnresolvedClass.staticObject);
+
+ // Check "large" values.
+
+ UnresolvedClass.staticByte = (byte)-1;
+ UnresolvedClass.staticChar = (char)32768;
+ UnresolvedClass.staticInt = -1;
+
+ expectEquals((byte)-1, UnresolvedClass.staticByte);
+ expectEquals((char)32768, UnresolvedClass.staticChar);
+ expectEquals(-1, UnresolvedClass.staticInt);
}
/// CHECK-START: void Main.callUnresolvedInstanceFieldAccess(UnresolvedClass) register (before)