Merge "Remove InvokeVirtualOrInterface event from instrumentation."
diff --git a/build/Android.bp b/build/Android.bp
index 78fd21a..19d54d5 100644
--- a/build/Android.bp
+++ b/build/Android.bp
@@ -106,6 +106,10 @@
"-Wunreachable-code-break",
"-Wunreachable-code-return",
+ // Disable warning for use of offsetof on non-standard layout type.
+ // We use it to implement OFFSETOF_MEMBER - see macros.h.
+ "-Wno-invalid-offsetof",
+
// Enable thread annotations for std::mutex, etc.
"-D_LIBCPP_ENABLE_THREAD_SAFETY_ANNOTATIONS",
],
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index 62e8e02..09376dd 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -219,12 +219,6 @@
jni_asm->cfi().SetEnabled(compiler_options.GenerateAnyDebugInfo());
jni_asm->SetEmitRunTimeChecksInDebugMode(compiler_options.EmitRunTimeChecksInDebugMode());
- // Offsets into data structures
- // TODO: if cross compiling these offsets are for the host not the target
- const Offset functions(OFFSETOF_MEMBER(JNIEnvExt, functions));
- const Offset monitor_enter(OFFSETOF_MEMBER(JNINativeInterface, MonitorEnter));
- const Offset monitor_exit(OFFSETOF_MEMBER(JNINativeInterface, MonitorExit));
-
// 1. Build the frame saving all callee saves, Method*, and PC return address.
const size_t frame_size(main_jni_conv->FrameSize()); // Excludes outgoing args.
ArrayRef<const ManagedRegister> callee_save_regs = main_jni_conv->CalleeSaveRegisters();
diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc
index 38e4c89..02d2f55 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.cc
+++ b/compiler/optimizing/intrinsics_arm_vixl.cc
@@ -446,7 +446,7 @@
void IntrinsicCodeGeneratorARMVIXL::VisitMathRint(HInvoke* invoke) {
DCHECK(codegen_->GetInstructionSetFeatures().HasARMv8AInstructions());
ArmVIXLAssembler* assembler = GetAssembler();
- __ Vrintn(F64, F64, OutputDRegister(invoke), InputDRegisterAt(invoke, 0));
+ __ Vrintn(F64, OutputDRegister(invoke), InputDRegisterAt(invoke, 0));
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathRoundFloat(HInvoke* invoke) {
@@ -481,7 +481,7 @@
// Handle input < 0 cases.
// If input is negative but not a tie, previous result (round to nearest) is valid.
// If input is a negative tie, change rounding direction to positive infinity, out_reg += 1.
- __ Vrinta(F32, F32, temp1, in_reg);
+ __ Vrinta(F32, temp1, in_reg);
__ Vmov(temp2, 0.5);
__ Vsub(F32, temp1, in_reg, temp1);
__ Vcmp(F32, temp1, temp2);
@@ -2945,7 +2945,7 @@
void IntrinsicCodeGeneratorARMVIXL::VisitMathCeil(HInvoke* invoke) {
ArmVIXLAssembler* assembler = GetAssembler();
DCHECK(codegen_->GetInstructionSetFeatures().HasARMv8AInstructions());
- __ Vrintp(F64, F64, OutputDRegister(invoke), InputDRegisterAt(invoke, 0));
+ __ Vrintp(F64, OutputDRegister(invoke), InputDRegisterAt(invoke, 0));
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathFloor(HInvoke* invoke) {
@@ -2957,7 +2957,7 @@
void IntrinsicCodeGeneratorARMVIXL::VisitMathFloor(HInvoke* invoke) {
ArmVIXLAssembler* assembler = GetAssembler();
DCHECK(codegen_->GetInstructionSetFeatures().HasARMv8AInstructions());
- __ Vrintm(F64, F64, OutputDRegister(invoke), InputDRegisterAt(invoke, 0));
+ __ Vrintm(F64, OutputDRegister(invoke), InputDRegisterAt(invoke, 0));
}
void IntrinsicLocationsBuilderARMVIXL::VisitIntegerValueOf(HInvoke* invoke) {
diff --git a/dexlayout/dexlayout_main.cc b/dexlayout/dexlayout_main.cc
index a851cfa..78a8dd6 100644
--- a/dexlayout/dexlayout_main.cc
+++ b/dexlayout/dexlayout_main.cc
@@ -46,7 +46,7 @@
LOG(ERROR) << "Copyright (C) 2016 The Android Open Source Project\n";
LOG(ERROR) << kProgramName
<< ": [-a] [-c] [-d] [-e] [-f] [-h] [-i] [-l layout] [-o outfile] [-p profile]"
- " [-s] [-t] [-v] [-w directory] dexfile...\n";
+ " [-s] [-t] [-u] [-v] [-w directory] dexfile...\n";
LOG(ERROR) << " -a : display annotations";
LOG(ERROR) << " -b : build dex_ir";
LOG(ERROR) << " -c : verify checksum and exit";
diff --git a/libartbase/base/macros.h b/libartbase/base/macros.h
index 315f4d2..323fa4e 100644
--- a/libartbase/base/macros.h
+++ b/libartbase/base/macros.h
@@ -42,8 +42,16 @@
private: \
void* operator new(size_t) = delete // NOLINT
-#define OFFSETOF_MEMBER(t, f) \
- (reinterpret_cast<uintptr_t>(&reinterpret_cast<t*>(16)->f) - static_cast<uintptr_t>(16u)) // NOLINT
+// offsetof is not defined by the spec on types with non-standard layout,
+// however it is implemented by compilers in practice.
+// (note that reinterpret_cast is not valid constexpr)
+//
+// Alternative approach would be something like:
+// #define OFFSETOF_HELPER(t, f) \
+// (reinterpret_cast<uintptr_t>(&reinterpret_cast<t*>(16)->f) - static_cast<uintptr_t>(16u))
+// #define OFFSETOF_MEMBER(t, f) \
+// (__builtin_constant_p(OFFSETOF_HELPER(t,f)) ? OFFSETOF_HELPER(t,f) : OFFSETOF_HELPER(t,f))
+#define OFFSETOF_MEMBER(t, f) offsetof(t, f)
#define OFFSETOF_MEMBERPTR(t, f) \
(reinterpret_cast<uintptr_t>(&(reinterpret_cast<t*>(16)->*f)) - static_cast<uintptr_t>(16)) // NOLINT
diff --git a/runtime/art_method.h b/runtime/art_method.h
index 570b0e6..18ddcc0 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -103,7 +103,7 @@
bool CASDeclaringClass(ObjPtr<mirror::Class> expected_class, ObjPtr<mirror::Class> desired_class)
REQUIRES_SHARED(Locks::mutator_lock_);
- static MemberOffset DeclaringClassOffset() {
+ static constexpr MemberOffset DeclaringClassOffset() {
return MemberOffset(OFFSETOF_MEMBER(ArtMethod, declaring_class_));
}
@@ -118,7 +118,7 @@
access_flags_.store(new_access_flags, std::memory_order_relaxed);
}
- static MemberOffset AccessFlagsOffset() {
+ static constexpr MemberOffset AccessFlagsOffset() {
return MemberOffset(OFFSETOF_MEMBER(ArtMethod, access_flags_));
}
@@ -351,11 +351,11 @@
method_index_ = new_method_index;
}
- static MemberOffset DexMethodIndexOffset() {
+ static constexpr MemberOffset DexMethodIndexOffset() {
return MemberOffset(OFFSETOF_MEMBER(ArtMethod, dex_method_index_));
}
- static MemberOffset MethodIndexOffset() {
+ static constexpr MemberOffset MethodIndexOffset() {
return MemberOffset(OFFSETOF_MEMBER(ArtMethod, method_index_));
}
@@ -431,16 +431,16 @@
void UnregisterNative() REQUIRES_SHARED(Locks::mutator_lock_);
- static MemberOffset DataOffset(PointerSize pointer_size) {
+ static constexpr MemberOffset DataOffset(PointerSize pointer_size) {
return MemberOffset(PtrSizedFieldsOffset(pointer_size) + OFFSETOF_MEMBER(
PtrSizedFields, data_) / sizeof(void*) * static_cast<size_t>(pointer_size));
}
- static MemberOffset EntryPointFromJniOffset(PointerSize pointer_size) {
+ static constexpr MemberOffset EntryPointFromJniOffset(PointerSize pointer_size) {
return DataOffset(pointer_size);
}
- static MemberOffset EntryPointFromQuickCompiledCodeOffset(PointerSize pointer_size) {
+ static constexpr MemberOffset EntryPointFromQuickCompiledCodeOffset(PointerSize pointer_size) {
return MemberOffset(PtrSizedFieldsOffset(pointer_size) + OFFSETOF_MEMBER(
PtrSizedFields, entry_point_from_quick_compiled_code_) / sizeof(void*)
* static_cast<size_t>(pointer_size));
@@ -652,7 +652,7 @@
return hotness_count_;
}
- static MemberOffset HotnessCountOffset() {
+ static constexpr MemberOffset HotnessCountOffset() {
return MemberOffset(OFFSETOF_MEMBER(ArtMethod, hotness_count_));
}
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index 1045d2a..4c52ed3 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -420,28 +420,17 @@
#undef EXPLICIT_FIND_FIELD_FROM_CODE_TYPED_TEMPLATE_DECL
#undef EXPLICIT_FIND_FIELD_FROM_CODE_TEMPLATE_DECL
+// Follow virtual/interface indirections if applicable.
+// Will throw null-pointer exception the if the object is null.
template<InvokeType type, bool access_check>
-inline ArtMethod* FindMethodFromCode(uint32_t method_idx,
- ObjPtr<mirror::Object>* this_object,
- ArtMethod* referrer,
- Thread* self) {
+ALWAYS_INLINE ArtMethod* FindMethodToCall(uint32_t method_idx,
+ ArtMethod* resolved_method,
+ ObjPtr<mirror::Object>* this_object,
+ ArtMethod* referrer,
+ Thread* self)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
- constexpr ClassLinker::ResolveMode resolve_mode =
- access_check ? ClassLinker::ResolveMode::kCheckICCEAndIAE
- : ClassLinker::ResolveMode::kNoChecks;
- ArtMethod* resolved_method;
- if (type == kStatic) {
- resolved_method = class_linker->ResolveMethod<resolve_mode>(self, method_idx, referrer, type);
- } else {
- StackHandleScope<1> hs(self);
- HandleWrapperObjPtr<mirror::Object> h_this(hs.NewHandleWrapper(this_object));
- resolved_method = class_linker->ResolveMethod<resolve_mode>(self, method_idx, referrer, type);
- }
- if (UNLIKELY(resolved_method == nullptr)) {
- DCHECK(self->IsExceptionPending()); // Throw exception and unwind.
- return nullptr; // Failure.
- }
- // Next, null pointer check.
+ // Null pointer check.
if (UNLIKELY(*this_object == nullptr && type != kStatic)) {
if (UNLIKELY(resolved_method->GetDeclaringClass()->IsStringClass() &&
resolved_method->IsConstructor())) {
@@ -570,6 +559,31 @@
}
}
+template<InvokeType type, bool access_check>
+inline ArtMethod* FindMethodFromCode(uint32_t method_idx,
+ ObjPtr<mirror::Object>* this_object,
+ ArtMethod* referrer,
+ Thread* self) {
+ ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
+ constexpr ClassLinker::ResolveMode resolve_mode =
+ access_check ? ClassLinker::ResolveMode::kCheckICCEAndIAE
+ : ClassLinker::ResolveMode::kNoChecks;
+ ArtMethod* resolved_method;
+ if (type == kStatic) {
+ resolved_method = class_linker->ResolveMethod<resolve_mode>(self, method_idx, referrer, type);
+ } else {
+ StackHandleScope<1> hs(self);
+ HandleWrapperObjPtr<mirror::Object> h_this(hs.NewHandleWrapper(this_object));
+ resolved_method = class_linker->ResolveMethod<resolve_mode>(self, method_idx, referrer, type);
+ }
+ if (UNLIKELY(resolved_method == nullptr)) {
+ DCHECK(self->IsExceptionPending()); // Throw exception and unwind.
+ return nullptr; // Failure.
+ }
+ return FindMethodToCall<type, access_check>(
+ method_idx, resolved_method, this_object, referrer, self);
+}
+
// Explicit template declarations of FindMethodFromCode for all invoke types.
#define EXPLICIT_FIND_METHOD_FROM_CODE_TEMPLATE_DECL(_type, _access_check) \
template REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE \
diff --git a/runtime/gc/allocator/rosalloc.h b/runtime/gc/allocator/rosalloc.h
index 0562167..bf26aea 100644
--- a/runtime/gc/allocator/rosalloc.h
+++ b/runtime/gc/allocator/rosalloc.h
@@ -830,16 +830,16 @@
size_t page_release_size_threshold = kDefaultPageReleaseSizeThreshold);
~RosAlloc();
- static size_t RunFreeListOffset() {
+ static constexpr size_t RunFreeListOffset() {
return OFFSETOF_MEMBER(Run, free_list_);
}
- static size_t RunFreeListHeadOffset() {
+ static constexpr size_t RunFreeListHeadOffset() {
return OFFSETOF_MEMBER(SlotFreeList<false>, head_);
}
- static size_t RunFreeListSizeOffset() {
+ static constexpr size_t RunFreeListSizeOffset() {
return OFFSETOF_MEMBER(SlotFreeList<false>, size_);
}
- static size_t RunSlotNextOffset() {
+ static constexpr size_t RunSlotNextOffset() {
return OFFSETOF_MEMBER(Slot, next_);
}
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index 056c8ec..1e4239e 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -136,11 +136,34 @@
}
const uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
const uint32_t vregC = (is_range) ? inst->VRegC_3rc() : inst->VRegC_35c();
+ ArtMethod* sf_method = shadow_frame.GetMethod();
+
+ // Try to find the method in small thread-local cache first.
+ InterpreterCache* tls_cache = self->GetInterpreterCache();
+ size_t tls_value;
+ ArtMethod* resolved_method;
+ if (LIKELY(tls_cache->Get(inst, &tls_value))) {
+ resolved_method = reinterpret_cast<ArtMethod*>(tls_value);
+ } else {
+ ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
+ constexpr ClassLinker::ResolveMode resolve_mode =
+ do_access_check ? ClassLinker::ResolveMode::kCheckICCEAndIAE
+ : ClassLinker::ResolveMode::kNoChecks;
+ resolved_method = class_linker->ResolveMethod<resolve_mode>(self, method_idx, sf_method, type);
+ if (UNLIKELY(resolved_method == nullptr)) {
+ CHECK(self->IsExceptionPending());
+ result->SetJ(0);
+ return false;
+ }
+ tls_cache->Set(inst, reinterpret_cast<size_t>(resolved_method));
+ }
+
+ // Null pointer check and virtual method resolution.
ObjPtr<mirror::Object> receiver =
(type == kStatic) ? nullptr : shadow_frame.GetVRegReference(vregC);
- ArtMethod* sf_method = shadow_frame.GetMethod();
- ArtMethod* const called_method = FindMethodFromCode<type, do_access_check>(
- method_idx, &receiver, sf_method, self);
+ ArtMethod* const called_method = FindMethodToCall<type, do_access_check>(
+ method_idx, resolved_method, &receiver, sf_method, self);
+
// The shadow frame should already be pushed, so we don't need to update it.
if (UNLIKELY(called_method == nullptr)) {
CHECK(self->IsExceptionPending());
diff --git a/runtime/interpreter/mterp/arm/main.S b/runtime/interpreter/mterp/arm/main.S
index 4da67b2..4951220 100644
--- a/runtime/interpreter/mterp/arm/main.S
+++ b/runtime/interpreter/mterp/arm/main.S
@@ -259,14 +259,6 @@
mov \reg, #0
str \reg, [rREFS, \vreg, lsl #2]
.endm
-.macro SET_VREG_WIDE regLo, regHi, vreg
- add ip, rFP, \vreg, lsl #2
- strd \regLo, \regHi, [ip]
- mov \regLo, #0
- mov \regHi, #0
- add ip, rREFS, \vreg, lsl #2
- strd \regLo, \regHi, [ip]
-.endm
.macro SET_VREG_OBJECT reg, vreg, tmpreg
str \reg, [rFP, \vreg, lsl #2]
str \reg, [rREFS, \vreg, lsl #2]
diff --git a/runtime/interpreter/mterp/arm/object.S b/runtime/interpreter/mterp/arm/object.S
index ccdc8f5..af1ece2 100644
--- a/runtime/interpreter/mterp/arm/object.S
+++ b/runtime/interpreter/mterp/arm/object.S
@@ -34,65 +34,29 @@
GET_INST_OPCODE ip @ extract opcode from rINST
GOTO_OPCODE ip @ jump to next instruction
-%def op_iget(is_object=False, is_wide=False, load="ldr", helper="MterpIGetU32"):
- @ Fast-path which gets the field offset from thread-local cache.
- add r0, rSELF, #THREAD_INTERPRETER_CACHE_OFFSET @ cache address
- ubfx r1, rPC, #2, #THREAD_INTERPRETER_CACHE_SIZE_LOG2 @ entry index
- add r0, r0, r1, lsl #3 @ entry address within the cache
- ldrd r0, r1, [r0] @ entry key (pc) and value (offset)
- mov r2, rINST, lsr #12 @ B
- GET_VREG r2, r2 @ object we're operating on
- cmp r0, rPC
- bne .L_${opcode}_slow_path @ cache miss
- cmp r2, #0
- beq common_errNullObject @ null object
-% if is_wide:
- ldrd r0, r1, [r2, r1] @ r0,r1 <- obj.field
-% else:
- ${load} r0, [r2, r1] @ r0 <- obj.field
-% #endif
-% if is_object:
- ldr ip, [rSELF, #THREAD_IS_GC_MARKING_OFFSET]
- cmp ip, #0
- beq .L_${opcode}_gc_not_marking @ GC is not active
- // TODO: This is UNLIKELY. Move this block at the end.
- bl artReadBarrierMark @ r0 <- artReadBarrierMark(r0)
-.L_${opcode}_gc_not_marking:
-% #endif
- ubfx r2, rINST, #8, #4 @ A
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
-% if is_object:
- SET_VREG_OBJECT r0, r2 @ fp[A]<- r0
-% elif is_wide:
- SET_VREG_WIDE r0, r1, r2 @ fp[A]<- r0, r1
-% else:
- SET_VREG r0, r2 @ fp[A]<- r0
-% #endif
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-.L_${opcode}_slow_path:
+%def op_iget(is_object="0", helper="MterpIGetU32"):
% field(helper=helper)
%def op_iget_boolean():
-% op_iget(load="ldrb", helper="MterpIGetU8")
+% op_iget(helper="MterpIGetU8")
%def op_iget_boolean_quick():
% op_iget_quick(load="ldrb")
%def op_iget_byte():
-% op_iget(load="ldrsb", helper="MterpIGetI8")
+% op_iget(helper="MterpIGetI8")
%def op_iget_byte_quick():
% op_iget_quick(load="ldrsb")
%def op_iget_char():
-% op_iget(load="ldrh", helper="MterpIGetU16")
+% op_iget(helper="MterpIGetU16")
%def op_iget_char_quick():
% op_iget_quick(load="ldrh")
%def op_iget_object():
-% op_iget(is_object=True, helper="MterpIGetObj")
+% op_iget(is_object="1", helper="MterpIGetObj")
%def op_iget_object_quick():
/* For: iget-object-quick */
@@ -128,13 +92,13 @@
GOTO_OPCODE ip @ jump to next instruction
%def op_iget_short():
-% op_iget(load="ldrsh", helper="MterpIGetI16")
+% op_iget(helper="MterpIGetI16")
%def op_iget_short_quick():
% op_iget_quick(load="ldrsh")
%def op_iget_wide():
-% op_iget(is_wide=True, helper="MterpIGetU64")
+% op_iget(helper="MterpIGetU64")
%def op_iget_wide_quick():
/* iget-wide-quick vA, vB, offset@CCCC */
@@ -177,7 +141,7 @@
GET_INST_OPCODE ip @ extract opcode from rINST
GOTO_OPCODE ip @ jump to next instruction
-%def op_iput(helper="MterpIPutU32"):
+%def op_iput(is_object="0", helper="MterpIPutU32"):
% field(helper=helper)
%def op_iput_boolean():
@@ -199,7 +163,7 @@
% op_iput_quick(store="strh")
%def op_iput_object():
-% op_iput(helper="MterpIPutObj")
+% op_iput(is_object="1", helper="MterpIPutObj")
%def op_iput_object_quick():
EXPORT_PC
@@ -268,7 +232,7 @@
GET_INST_OPCODE ip @ extract opcode from rINST
GOTO_OPCODE ip @ jump to next instruction
-%def op_sget(helper="MterpSGetU32"):
+%def op_sget(is_object="0", helper="MterpSGetU32"):
% field(helper=helper)
%def op_sget_boolean():
@@ -281,7 +245,7 @@
% op_sget(helper="MterpSGetU16")
%def op_sget_object():
-% op_sget(helper="MterpSGetObj")
+% op_sget(is_object="1", helper="MterpSGetObj")
%def op_sget_short():
% op_sget(helper="MterpSGetI16")
@@ -289,7 +253,7 @@
%def op_sget_wide():
% op_sget(helper="MterpSGetU64")
-%def op_sput(helper="MterpSPutU32"):
+%def op_sput(is_object="0", helper="MterpSPutU32"):
% field(helper=helper)
%def op_sput_boolean():
@@ -302,7 +266,7 @@
% op_sput(helper="MterpSPutU16")
%def op_sput_object():
-% op_sput(helper="MterpSPutObj")
+% op_sput(is_object="1", helper="MterpSPutObj")
%def op_sput_short():
% op_sput(helper="MterpSPutI16")
diff --git a/runtime/interpreter/mterp/arm64/object.S b/runtime/interpreter/mterp/arm64/object.S
index afba895..388aba5 100644
--- a/runtime/interpreter/mterp/arm64/object.S
+++ b/runtime/interpreter/mterp/arm64/object.S
@@ -32,66 +32,29 @@
GET_INST_OPCODE ip // extract opcode from rINST
GOTO_OPCODE ip // jump to next instruction
-%def op_iget(is_object=False, is_wide=False, load="ldr", helper="MterpIGetU32"):
- // Fast-path which gets the field offset from thread-local cache.
- add x0, xSELF, #THREAD_INTERPRETER_CACHE_OFFSET // cache address
- ubfx x1, xPC, #2, #THREAD_INTERPRETER_CACHE_SIZE_LOG2 // entry index
- add x0, x0, x1, lsl #4 // entry address within the cache
- ldp x0, x1, [x0] // entry key (pc) and value (offset)
- lsr w2, wINST, #12 // B
- GET_VREG w2, w2 // object we're operating on
- cmp x0, xPC
- b.ne .L_${opcode}_slow_path // cache miss
- cbz w2, common_errNullObject // null object
-% if is_wide:
- ldr x0, [x2, x1] // x0<- obj.field
-% else:
- ${load} w0, [x2, x1] // w0<- obj.field
-% #endif
-% if is_object:
- ldr w1, [xSELF, #THREAD_IS_GC_MARKING_OFFSET]
- cbnz w1, .L_${opcode}_mark // GC is active.
-.L_${opcode}_marked:
-% #endif
- ubfx w2, wINST, #8, #4 // w2<- A
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
-% if is_object:
- SET_VREG_OBJECT w0, w2 // fp[A]<- w0
-% elif is_wide:
- SET_VREG_WIDE x0, w2 // fp[A]<- x0
-% else:
- SET_VREG w0, w2 // fp[A]<- w0
-% #endif
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-% if is_object:
-.L_${opcode}_mark:
- bl artReadBarrierMark // x0 <- artReadBarrierMark(x0)
- b .L_${opcode}_marked
-% #endif
-.L_${opcode}_slow_path:
+%def op_iget(is_object="0", helper="MterpIGetU32"):
% field(helper=helper)
%def op_iget_boolean():
-% op_iget(load="ldrb", helper="MterpIGetU8")
+% op_iget(helper="MterpIGetU8")
%def op_iget_boolean_quick():
% op_iget_quick(load="ldrb")
%def op_iget_byte():
-% op_iget(load="ldrsb", helper="MterpIGetI8")
+% op_iget(helper="MterpIGetI8")
%def op_iget_byte_quick():
% op_iget_quick(load="ldrsb")
%def op_iget_char():
-% op_iget(load="ldrh", helper="MterpIGetU16")
+% op_iget(helper="MterpIGetU16")
%def op_iget_char_quick():
% op_iget_quick(load="ldrh")
%def op_iget_object():
-% op_iget(is_object=True, helper="MterpIGetObj")
+% op_iget(is_object="1", helper="MterpIGetObj")
%def op_iget_object_quick():
/* For: iget-object-quick */
@@ -126,13 +89,13 @@
GOTO_OPCODE ip // jump to next instruction
%def op_iget_short():
-% op_iget(load="ldrsh", helper="MterpIGetI16")
+% op_iget(helper="MterpIGetI16")
%def op_iget_short_quick():
% op_iget_quick(load="ldrsh")
%def op_iget_wide():
-% op_iget(is_wide=True, helper="MterpIGetU64")
+% op_iget(helper="MterpIGetU64")
%def op_iget_wide_quick():
/* iget-wide-quick vA, vB, offset//CCCC */
@@ -171,7 +134,7 @@
GET_INST_OPCODE ip // extract opcode from rINST
GOTO_OPCODE ip // jump to next instruction
-%def op_iput(helper="MterpIPutU32"):
+%def op_iput(is_object="0", helper="MterpIPutU32"):
% field(helper=helper)
%def op_iput_boolean():
@@ -193,7 +156,7 @@
% op_iput_quick(store="strh")
%def op_iput_object():
-% op_iput(helper="MterpIPutObj")
+% op_iput(is_object="1", helper="MterpIPutObj")
%def op_iput_object_quick():
EXPORT_PC
@@ -257,7 +220,7 @@
GET_INST_OPCODE ip // extract opcode from rINST
GOTO_OPCODE ip // jump to next instruction
-%def op_sget(helper="MterpSGetU32"):
+%def op_sget(is_object="0", helper="MterpSGetU32"):
% field(helper=helper)
%def op_sget_boolean():
@@ -270,7 +233,7 @@
% op_sget(helper="MterpSGetU16")
%def op_sget_object():
-% op_sget(helper="MterpSGetObj")
+% op_sget(is_object="1", helper="MterpSGetObj")
%def op_sget_short():
% op_sget(helper="MterpSGetI16")
@@ -278,7 +241,7 @@
%def op_sget_wide():
% op_sget(helper="MterpSGetU64")
-%def op_sput(helper="MterpSPutU32"):
+%def op_sput(is_object="0", helper="MterpSPutU32"):
% field(helper=helper)
%def op_sput_boolean():
@@ -291,7 +254,7 @@
% op_sput(helper="MterpSPutU16")
%def op_sput_object():
-% op_sput(helper="MterpSPutObj")
+% op_sput(is_object="1", helper="MterpSPutObj")
%def op_sput_short():
% op_sput(helper="MterpSPutI16")
diff --git a/runtime/interpreter/shadow_frame.h b/runtime/interpreter/shadow_frame.h
index 0e4cf27..91371d1 100644
--- a/runtime/interpreter/shadow_frame.h
+++ b/runtime/interpreter/shadow_frame.h
@@ -279,47 +279,47 @@
return lock_count_data_;
}
- static size_t LockCountDataOffset() {
+ static constexpr size_t LockCountDataOffset() {
return OFFSETOF_MEMBER(ShadowFrame, lock_count_data_);
}
- static size_t LinkOffset() {
+ static constexpr size_t LinkOffset() {
return OFFSETOF_MEMBER(ShadowFrame, link_);
}
- static size_t MethodOffset() {
+ static constexpr size_t MethodOffset() {
return OFFSETOF_MEMBER(ShadowFrame, method_);
}
- static size_t DexPCOffset() {
+ static constexpr size_t DexPCOffset() {
return OFFSETOF_MEMBER(ShadowFrame, dex_pc_);
}
- static size_t NumberOfVRegsOffset() {
+ static constexpr size_t NumberOfVRegsOffset() {
return OFFSETOF_MEMBER(ShadowFrame, number_of_vregs_);
}
- static size_t VRegsOffset() {
+ static constexpr size_t VRegsOffset() {
return OFFSETOF_MEMBER(ShadowFrame, vregs_);
}
- static size_t ResultRegisterOffset() {
+ static constexpr size_t ResultRegisterOffset() {
return OFFSETOF_MEMBER(ShadowFrame, result_register_);
}
- static size_t DexPCPtrOffset() {
+ static constexpr size_t DexPCPtrOffset() {
return OFFSETOF_MEMBER(ShadowFrame, dex_pc_ptr_);
}
- static size_t DexInstructionsOffset() {
+ static constexpr size_t DexInstructionsOffset() {
return OFFSETOF_MEMBER(ShadowFrame, dex_instructions_);
}
- static size_t CachedHotnessCountdownOffset() {
+ static constexpr size_t CachedHotnessCountdownOffset() {
return OFFSETOF_MEMBER(ShadowFrame, cached_hotness_countdown_);
}
- static size_t HotnessCountdownOffset() {
+ static constexpr size_t HotnessCountdownOffset() {
return OFFSETOF_MEMBER(ShadowFrame, hotness_countdown_);
}
diff --git a/runtime/managed_stack.h b/runtime/managed_stack.h
index d1c230f..6a0f075 100644
--- a/runtime/managed_stack.h
+++ b/runtime/managed_stack.h
@@ -95,7 +95,7 @@
tagged_top_quick_frame_ = TaggedTopQuickFrame::CreateTagged(top);
}
- static size_t TaggedTopQuickFrameOffset() {
+ static constexpr size_t TaggedTopQuickFrameOffset() {
return OFFSETOF_MEMBER(ManagedStack, tagged_top_quick_frame_);
}
diff --git a/runtime/mirror/accessible_object.h b/runtime/mirror/accessible_object.h
index d489f14..9660bf0 100644
--- a/runtime/mirror/accessible_object.h
+++ b/runtime/mirror/accessible_object.h
@@ -36,7 +36,8 @@
}
private:
- uint8_t flag_;
+ // We only use the field indirectly using the FlagOffset() method.
+ uint8_t flag_ ATTRIBUTE_UNUSED;
// Padding required for correct alignment of subclasses like Executable, Field, etc.
uint8_t padding_[1] ATTRIBUTE_UNUSED;
diff --git a/runtime/mirror/array-inl.h b/runtime/mirror/array-inl.h
index 2e39530..704fb11 100644
--- a/runtime/mirror/array-inl.h
+++ b/runtime/mirror/array-inl.h
@@ -50,14 +50,6 @@
return header_size + data_size;
}
-inline MemberOffset Array::DataOffset(size_t component_size) {
- DCHECK(IsPowerOfTwo(component_size)) << component_size;
- size_t data_offset = RoundUp(OFFSETOF_MEMBER(Array, first_element_), component_size);
- DCHECK_EQ(RoundUp(data_offset, component_size), data_offset)
- << "Array data offset isn't aligned with component size";
- return MemberOffset(data_offset);
-}
-
template<VerifyObjectFlags kVerifyFlags>
inline bool Array::CheckIsValidIndex(int32_t index) {
if (UNLIKELY(static_cast<uint32_t>(index) >=
diff --git a/runtime/mirror/array.h b/runtime/mirror/array.h
index a31a9144..7edc851 100644
--- a/runtime/mirror/array.h
+++ b/runtime/mirror/array.h
@@ -17,6 +17,7 @@
#ifndef ART_RUNTIME_MIRROR_ARRAY_H_
#define ART_RUNTIME_MIRROR_ARRAY_H_
+#include "base/bit_utils.h"
#include "base/enums.h"
#include "gc/allocator_type.h"
#include "obj_ptr.h"
@@ -66,11 +67,17 @@
SetField32<false, false, kVerifyNone>(OFFSET_OF_OBJECT_MEMBER(Array, length_), length);
}
- static MemberOffset LengthOffset() {
+ static constexpr MemberOffset LengthOffset() {
return OFFSET_OF_OBJECT_MEMBER(Array, length_);
}
- static MemberOffset DataOffset(size_t component_size);
+ static constexpr MemberOffset DataOffset(size_t component_size) {
+ DCHECK(IsPowerOfTwo(component_size)) << component_size;
+ size_t data_offset = RoundUp(OFFSETOF_MEMBER(Array, first_element_), component_size);
+ DCHECK_EQ(RoundUp(data_offset, component_size), data_offset)
+ << "Array data offset isn't aligned with component size";
+ return MemberOffset(data_offset);
+ }
void* GetRawData(size_t component_size, int32_t index)
REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -102,9 +109,11 @@
REQUIRES_SHARED(Locks::mutator_lock_);
// The number of array elements.
- int32_t length_;
+ // We only use the field indirectly using the LengthOffset() method.
+ int32_t length_ ATTRIBUTE_UNUSED;
// Marker for the data (used by generated code)
- uint32_t first_element_[0];
+ // We only use the field indirectly using the DataOffset() method.
+ uint32_t first_element_[0] ATTRIBUTE_UNUSED;
DISALLOW_IMPLICIT_CONSTRUCTORS(Array);
};
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index f640d3b..eddc84b 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -89,7 +89,7 @@
static void SetStatus(Handle<Class> h_this, ClassStatus new_status, Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
- static MemberOffset StatusOffset() {
+ static constexpr MemberOffset StatusOffset() {
return MemberOffset(OFFSET_OF_OBJECT_MEMBER(Class, status_));
}
@@ -173,7 +173,7 @@
return GetField32<kVerifyFlags>(AccessFlagsOffset());
}
- static MemberOffset AccessFlagsOffset() {
+ static constexpr MemberOffset AccessFlagsOffset() {
return OFFSET_OF_OBJECT_MEMBER(Class, access_flags_);
}
@@ -352,7 +352,7 @@
return (access_flags & kAccClassIsProxy) != 0;
}
- static MemberOffset PrimitiveTypeOffset() {
+ static constexpr MemberOffset PrimitiveTypeOffset() {
return OFFSET_OF_OBJECT_MEMBER(Class, primitive_type_);
}
@@ -440,7 +440,7 @@
bool IsThrowableClass() REQUIRES_SHARED(Locks::mutator_lock_);
- static MemberOffset ComponentTypeOffset() {
+ static constexpr MemberOffset ComponentTypeOffset() {
return OFFSET_OF_OBJECT_MEMBER(Class, component_type_);
}
@@ -549,10 +549,10 @@
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
uint32_t GetObjectSize() REQUIRES_SHARED(Locks::mutator_lock_);
- static MemberOffset ObjectSizeOffset() {
+ static constexpr MemberOffset ObjectSizeOffset() {
return OFFSET_OF_OBJECT_MEMBER(Class, object_size_);
}
- static MemberOffset ObjectSizeAllocFastPathOffset() {
+ static constexpr MemberOffset ObjectSizeAllocFastPathOffset() {
return OFFSET_OF_OBJECT_MEMBER(Class, object_size_alloc_fast_path_);
}
@@ -636,7 +636,7 @@
return GetSuperClass() != nullptr;
}
- static MemberOffset SuperClassOffset() {
+ static constexpr MemberOffset SuperClassOffset() {
return MemberOffset(OFFSETOF_MEMBER(Class, super_class_));
}
@@ -646,11 +646,11 @@
void SetClassLoader(ObjPtr<ClassLoader> new_cl) REQUIRES_SHARED(Locks::mutator_lock_);
- static MemberOffset DexCacheOffset() {
+ static constexpr MemberOffset DexCacheOffset() {
return MemberOffset(OFFSETOF_MEMBER(Class, dex_cache_));
}
- static MemberOffset IfTableOffset() {
+ static constexpr MemberOffset IfTableOffset() {
return MemberOffset(OFFSETOF_MEMBER(Class, iftable_));
}
@@ -675,7 +675,7 @@
ALWAYS_INLINE LengthPrefixedArray<ArtMethod>* GetMethodsPtr()
REQUIRES_SHARED(Locks::mutator_lock_);
- static MemberOffset MethodsOffset() {
+ static constexpr MemberOffset MethodsOffset() {
return MemberOffset(OFFSETOF_MEMBER(Class, methods_));
}
@@ -784,15 +784,15 @@
void SetVTable(ObjPtr<PointerArray> new_vtable) REQUIRES_SHARED(Locks::mutator_lock_);
- static MemberOffset VTableOffset() {
+ static constexpr MemberOffset VTableOffset() {
return OFFSET_OF_OBJECT_MEMBER(Class, vtable_);
}
- static MemberOffset EmbeddedVTableLengthOffset() {
+ static constexpr MemberOffset EmbeddedVTableLengthOffset() {
return MemberOffset(sizeof(Class));
}
- static MemberOffset ImtPtrOffset(PointerSize pointer_size) {
+ static constexpr MemberOffset ImtPtrOffset(PointerSize pointer_size) {
return MemberOffset(
RoundUp(EmbeddedVTableLengthOffset().Uint32Value() + sizeof(uint32_t),
static_cast<size_t>(pointer_size)));
@@ -1427,6 +1427,7 @@
// Tid used to check for recursive <clinit> invocation.
pid_t clinit_thread_id_;
+ static_assert(sizeof(pid_t) == sizeof(int32_t), "java.lang.Class.clinitThreadId size check");
// ClassDef index in dex file, -1 if no class definition such as an array.
// TODO: really 16bits
diff --git a/runtime/mirror/dex_cache.h b/runtime/mirror/dex_cache.h
index 22ccd20..da1cd3f 100644
--- a/runtime/mirror/dex_cache.h
+++ b/runtime/mirror/dex_cache.h
@@ -226,51 +226,51 @@
return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(DexCache, location_));
}
- static MemberOffset StringsOffset() {
+ static constexpr MemberOffset StringsOffset() {
return OFFSET_OF_OBJECT_MEMBER(DexCache, strings_);
}
- static MemberOffset ResolvedTypesOffset() {
+ static constexpr MemberOffset ResolvedTypesOffset() {
return OFFSET_OF_OBJECT_MEMBER(DexCache, resolved_types_);
}
- static MemberOffset ResolvedFieldsOffset() {
+ static constexpr MemberOffset ResolvedFieldsOffset() {
return OFFSET_OF_OBJECT_MEMBER(DexCache, resolved_fields_);
}
- static MemberOffset ResolvedMethodsOffset() {
+ static constexpr MemberOffset ResolvedMethodsOffset() {
return OFFSET_OF_OBJECT_MEMBER(DexCache, resolved_methods_);
}
- static MemberOffset ResolvedMethodTypesOffset() {
+ static constexpr MemberOffset ResolvedMethodTypesOffset() {
return OFFSET_OF_OBJECT_MEMBER(DexCache, resolved_method_types_);
}
- static MemberOffset ResolvedCallSitesOffset() {
+ static constexpr MemberOffset ResolvedCallSitesOffset() {
return OFFSET_OF_OBJECT_MEMBER(DexCache, resolved_call_sites_);
}
- static MemberOffset NumStringsOffset() {
+ static constexpr MemberOffset NumStringsOffset() {
return OFFSET_OF_OBJECT_MEMBER(DexCache, num_strings_);
}
- static MemberOffset NumResolvedTypesOffset() {
+ static constexpr MemberOffset NumResolvedTypesOffset() {
return OFFSET_OF_OBJECT_MEMBER(DexCache, num_resolved_types_);
}
- static MemberOffset NumResolvedFieldsOffset() {
+ static constexpr MemberOffset NumResolvedFieldsOffset() {
return OFFSET_OF_OBJECT_MEMBER(DexCache, num_resolved_fields_);
}
- static MemberOffset NumResolvedMethodsOffset() {
+ static constexpr MemberOffset NumResolvedMethodsOffset() {
return OFFSET_OF_OBJECT_MEMBER(DexCache, num_resolved_methods_);
}
- static MemberOffset NumResolvedMethodTypesOffset() {
+ static constexpr MemberOffset NumResolvedMethodTypesOffset() {
return OFFSET_OF_OBJECT_MEMBER(DexCache, num_resolved_method_types_);
}
- static MemberOffset NumResolvedCallSitesOffset() {
+ static constexpr MemberOffset NumResolvedCallSitesOffset() {
return OFFSET_OF_OBJECT_MEMBER(DexCache, num_resolved_call_sites_);
}
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index 8fe9923..11e8cca 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -86,7 +86,7 @@
return sizeof(Object);
}
- static MemberOffset ClassOffset() {
+ static constexpr MemberOffset ClassOffset() {
return OFFSET_OF_OBJECT_MEMBER(Object, klass_);
}
@@ -138,7 +138,7 @@
REQUIRES(!Locks::thread_list_lock_,
!Locks::thread_suspend_count_lock_);
- static MemberOffset MonitorOffset() {
+ static constexpr MemberOffset MonitorOffset() {
return OFFSET_OF_OBJECT_MEMBER(Object, monitor_);
}
diff --git a/runtime/mirror/string.h b/runtime/mirror/string.h
index d08717c..b32db08 100644
--- a/runtime/mirror/string.h
+++ b/runtime/mirror/string.h
@@ -50,11 +50,11 @@
return sizeof(String);
}
- static MemberOffset CountOffset() {
+ static constexpr MemberOffset CountOffset() {
return OFFSET_OF_OBJECT_MEMBER(String, count_);
}
- static MemberOffset ValueOffset() {
+ static constexpr MemberOffset ValueOffset() {
return OFFSET_OF_OBJECT_MEMBER(String, value_);
}
diff --git a/runtime/offsets.h b/runtime/offsets.h
index 4df9b27..372b821 100644
--- a/runtime/offsets.h
+++ b/runtime/offsets.h
@@ -27,14 +27,14 @@
// Allow the meaning of offsets to be strongly typed.
class Offset {
public:
- explicit Offset(size_t val) : val_(val) {}
- int32_t Int32Value() const {
+ constexpr explicit Offset(size_t val) : val_(val) {}
+ constexpr int32_t Int32Value() const {
return static_cast<int32_t>(val_);
}
- uint32_t Uint32Value() const {
+ constexpr uint32_t Uint32Value() const {
return static_cast<uint32_t>(val_);
}
- size_t SizeValue() const {
+ constexpr size_t SizeValue() const {
return val_;
}
@@ -46,7 +46,7 @@
// Offsets relative to the current frame.
class FrameOffset : public Offset {
public:
- explicit FrameOffset(size_t val) : Offset(val) {}
+ constexpr explicit FrameOffset(size_t val) : Offset(val) {}
bool operator>(FrameOffset other) const { return val_ > other.val_; }
bool operator<(FrameOffset other) const { return val_ < other.val_; }
};
@@ -55,7 +55,7 @@
template<PointerSize pointer_size>
class ThreadOffset : public Offset {
public:
- explicit ThreadOffset(size_t val) : Offset(val) {}
+ constexpr explicit ThreadOffset(size_t val) : Offset(val) {}
};
using ThreadOffset32 = ThreadOffset<PointerSize::k32>;
@@ -64,7 +64,7 @@
// Offsets relative to an object.
class MemberOffset : public Offset {
public:
- explicit MemberOffset(size_t val) : Offset(val) {}
+ constexpr explicit MemberOffset(size_t val) : Offset(val) {}
};
} // namespace art
diff --git a/runtime/runtime.h b/runtime/runtime.h
index ea2eb39..478ff50 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -399,7 +399,7 @@
QuickMethodFrameInfo GetRuntimeMethodFrameInfo(ArtMethod* method)
REQUIRES_SHARED(Locks::mutator_lock_);
- static size_t GetCalleeSaveMethodOffset(CalleeSaveType type) {
+ static constexpr size_t GetCalleeSaveMethodOffset(CalleeSaveType type) {
return OFFSETOF_MEMBER(Runtime, callee_save_methods_[static_cast<size_t>(type)]);
}
diff --git a/runtime/thread.h b/runtime/thread.h
index a915cd8..3d13774 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -651,28 +651,28 @@
//
template<PointerSize pointer_size>
- static ThreadOffset<pointer_size> ThinLockIdOffset() {
+ static constexpr ThreadOffset<pointer_size> ThinLockIdOffset() {
return ThreadOffset<pointer_size>(
OFFSETOF_MEMBER(Thread, tls32_) +
OFFSETOF_MEMBER(tls_32bit_sized_values, thin_lock_thread_id));
}
template<PointerSize pointer_size>
- static ThreadOffset<pointer_size> InterruptedOffset() {
+ static constexpr ThreadOffset<pointer_size> InterruptedOffset() {
return ThreadOffset<pointer_size>(
OFFSETOF_MEMBER(Thread, tls32_) +
OFFSETOF_MEMBER(tls_32bit_sized_values, interrupted));
}
template<PointerSize pointer_size>
- static ThreadOffset<pointer_size> ThreadFlagsOffset() {
+ static constexpr ThreadOffset<pointer_size> ThreadFlagsOffset() {
return ThreadOffset<pointer_size>(
OFFSETOF_MEMBER(Thread, tls32_) +
OFFSETOF_MEMBER(tls_32bit_sized_values, state_and_flags));
}
template<PointerSize pointer_size>
- static ThreadOffset<pointer_size> IsGcMarkingOffset() {
+ static constexpr ThreadOffset<pointer_size> IsGcMarkingOffset() {
return ThreadOffset<pointer_size>(
OFFSETOF_MEMBER(Thread, tls32_) +
OFFSETOF_MEMBER(tls_32bit_sized_values, is_gc_marking));
@@ -687,21 +687,12 @@
private:
template<PointerSize pointer_size>
- static ThreadOffset<pointer_size> ThreadOffsetFromTlsPtr(size_t tls_ptr_offset) {
+ static constexpr ThreadOffset<pointer_size> ThreadOffsetFromTlsPtr(size_t tls_ptr_offset) {
size_t base = OFFSETOF_MEMBER(Thread, tlsPtr_);
- size_t scale;
- size_t shrink;
- if (pointer_size == kRuntimePointerSize) {
- scale = 1;
- shrink = 1;
- } else if (pointer_size > kRuntimePointerSize) {
- scale = static_cast<size_t>(pointer_size) / static_cast<size_t>(kRuntimePointerSize);
- shrink = 1;
- } else {
- DCHECK_GT(kRuntimePointerSize, pointer_size);
- scale = 1;
- shrink = static_cast<size_t>(kRuntimePointerSize) / static_cast<size_t>(pointer_size);
- }
+ size_t scale = (pointer_size > kRuntimePointerSize) ?
+ static_cast<size_t>(pointer_size) / static_cast<size_t>(kRuntimePointerSize) : 1;
+ size_t shrink = (kRuntimePointerSize > pointer_size) ?
+ static_cast<size_t>(kRuntimePointerSize) / static_cast<size_t>(pointer_size) : 1;
return ThreadOffset<pointer_size>(base + ((tls_ptr_offset * scale) / shrink));
}
@@ -741,82 +732,82 @@
}
template<PointerSize pointer_size>
- static ThreadOffset<pointer_size> SelfOffset() {
+ static constexpr ThreadOffset<pointer_size> SelfOffset() {
return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, self));
}
template<PointerSize pointer_size>
- static ThreadOffset<pointer_size> MterpCurrentIBaseOffset() {
+ static constexpr ThreadOffset<pointer_size> MterpCurrentIBaseOffset() {
return ThreadOffsetFromTlsPtr<pointer_size>(
OFFSETOF_MEMBER(tls_ptr_sized_values, mterp_current_ibase));
}
template<PointerSize pointer_size>
- static ThreadOffset<pointer_size> MterpDefaultIBaseOffset() {
+ static constexpr ThreadOffset<pointer_size> MterpDefaultIBaseOffset() {
return ThreadOffsetFromTlsPtr<pointer_size>(
OFFSETOF_MEMBER(tls_ptr_sized_values, mterp_default_ibase));
}
template<PointerSize pointer_size>
- static ThreadOffset<pointer_size> MterpAltIBaseOffset() {
+ static constexpr ThreadOffset<pointer_size> MterpAltIBaseOffset() {
return ThreadOffsetFromTlsPtr<pointer_size>(
OFFSETOF_MEMBER(tls_ptr_sized_values, mterp_alt_ibase));
}
template<PointerSize pointer_size>
- static ThreadOffset<pointer_size> ExceptionOffset() {
+ static constexpr ThreadOffset<pointer_size> ExceptionOffset() {
return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, exception));
}
template<PointerSize pointer_size>
- static ThreadOffset<pointer_size> PeerOffset() {
+ static constexpr ThreadOffset<pointer_size> PeerOffset() {
return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, opeer));
}
template<PointerSize pointer_size>
- static ThreadOffset<pointer_size> CardTableOffset() {
+ static constexpr ThreadOffset<pointer_size> CardTableOffset() {
return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, card_table));
}
template<PointerSize pointer_size>
- static ThreadOffset<pointer_size> ThreadSuspendTriggerOffset() {
+ static constexpr ThreadOffset<pointer_size> ThreadSuspendTriggerOffset() {
return ThreadOffsetFromTlsPtr<pointer_size>(
OFFSETOF_MEMBER(tls_ptr_sized_values, suspend_trigger));
}
template<PointerSize pointer_size>
- static ThreadOffset<pointer_size> ThreadLocalPosOffset() {
+ static constexpr ThreadOffset<pointer_size> ThreadLocalPosOffset() {
return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
thread_local_pos));
}
template<PointerSize pointer_size>
- static ThreadOffset<pointer_size> ThreadLocalEndOffset() {
+ static constexpr ThreadOffset<pointer_size> ThreadLocalEndOffset() {
return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
thread_local_end));
}
template<PointerSize pointer_size>
- static ThreadOffset<pointer_size> ThreadLocalObjectsOffset() {
+ static constexpr ThreadOffset<pointer_size> ThreadLocalObjectsOffset() {
return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
thread_local_objects));
}
template<PointerSize pointer_size>
- static ThreadOffset<pointer_size> RosAllocRunsOffset() {
+ static constexpr ThreadOffset<pointer_size> RosAllocRunsOffset() {
return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
rosalloc_runs));
}
template<PointerSize pointer_size>
- static ThreadOffset<pointer_size> ThreadLocalAllocStackTopOffset() {
+ static constexpr ThreadOffset<pointer_size> ThreadLocalAllocStackTopOffset() {
return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
thread_local_alloc_stack_top));
}
template<PointerSize pointer_size>
- static ThreadOffset<pointer_size> ThreadLocalAllocStackEndOffset() {
+ static constexpr ThreadOffset<pointer_size> ThreadLocalAllocStackEndOffset() {
return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
thread_local_alloc_stack_end));
}
@@ -859,19 +850,19 @@
}
template<PointerSize pointer_size>
- static ThreadOffset<pointer_size> StackEndOffset() {
+ static constexpr ThreadOffset<pointer_size> StackEndOffset() {
return ThreadOffsetFromTlsPtr<pointer_size>(
OFFSETOF_MEMBER(tls_ptr_sized_values, stack_end));
}
template<PointerSize pointer_size>
- static ThreadOffset<pointer_size> JniEnvOffset() {
+ static constexpr ThreadOffset<pointer_size> JniEnvOffset() {
return ThreadOffsetFromTlsPtr<pointer_size>(
OFFSETOF_MEMBER(tls_ptr_sized_values, jni_env));
}
template<PointerSize pointer_size>
- static ThreadOffset<pointer_size> TopOfManagedStackOffset() {
+ static constexpr ThreadOffset<pointer_size> TopOfManagedStackOffset() {
return ThreadOffsetFromTlsPtr<pointer_size>(
OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) +
ManagedStack::TaggedTopQuickFrameOffset());
@@ -893,7 +884,7 @@
ALWAYS_INLINE ShadowFrame* PopShadowFrame();
template<PointerSize pointer_size>
- static ThreadOffset<pointer_size> TopShadowFrameOffset() {
+ static constexpr ThreadOffset<pointer_size> TopShadowFrameOffset() {
return ThreadOffsetFromTlsPtr<pointer_size>(
OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) +
ManagedStack::TopShadowFrameOffset());
@@ -922,7 +913,7 @@
}
template<PointerSize pointer_size>
- static ThreadOffset<pointer_size> TopHandleScopeOffset() {
+ static constexpr ThreadOffset<pointer_size> TopHandleScopeOffset() {
return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
top_handle_scope));
}
@@ -1307,11 +1298,11 @@
static void ClearAllInterpreterCaches();
template<PointerSize pointer_size>
- static ThreadOffset<pointer_size> InterpreterCacheOffset() {
+ static constexpr ThreadOffset<pointer_size> InterpreterCacheOffset() {
return ThreadOffset<pointer_size>(OFFSETOF_MEMBER(Thread, interpreter_cache_));
}
- static int InterpreterCacheSizeLog2() {
+ static constexpr int InterpreterCacheSizeLog2() {
return WhichPowerOf2(InterpreterCache::kSize);
}
diff --git a/test/1935-get-set-current-frame-jit/src/Main.java b/test/1935-get-set-current-frame-jit/src/Main.java
index 97f0973..cc8a4c4 100644
--- a/test/1935-get-set-current-frame-jit/src/Main.java
+++ b/test/1935-get-set-current-frame-jit/src/Main.java
@@ -58,7 +58,8 @@
}
public void run() {
int TARGET = 42;
- if (hasJit() && expectOsr && !Main.isInterpreted()) {
+ boolean normalJit = hasJit() && getJitThreshold() != 0; // Excluding JIT-at-first-use.
+ if (normalJit && expectOsr && !Main.isInterpreted()) {
System.out.println("Unexpectedly in jit code prior to restarting the JIT!");
}
startJit();
@@ -72,10 +73,10 @@
do {
// Don't actually do anything here.
inBusyLoop = true;
- } while (hasJit() && !Main.isInOsrCode("run") && osrDeadline.compareTo(Instant.now()) > 0);
+ } while (normalJit && !Main.isInOsrCode("run") && osrDeadline.compareTo(Instant.now()) > 0);
// We shouldn't be doing OSR since we are using JVMTI and the set prevents OSR.
// Set local will also push us to interpreter but the get local may remain in compiled code.
- if (hasJit()) {
+ if (normalJit) {
boolean inOsr = Main.isInOsrCode("run");
if (expectOsr && !inOsr) {
throw new Error(
@@ -184,4 +185,5 @@
public static native boolean stopJit();
public static native boolean startJit();
public static native boolean hasJit();
+ public static native int getJitThreshold();
}
diff --git a/test/566-polymorphic-inlining/polymorphic_inline.cc b/test/566-polymorphic-inlining/polymorphic_inline.cc
index 7c1507f..17ccd9a 100644
--- a/test/566-polymorphic-inlining/polymorphic_inline.cc
+++ b/test/566-polymorphic-inlining/polymorphic_inline.cc
@@ -41,11 +41,12 @@
header = OatQuickMethodHeader::FromEntryPoint(pc);
break;
} else {
+ ScopedThreadSuspension sts(soa.Self(), kSuspended);
// Sleep to yield to the compiler thread.
usleep(1000);
- // Will either ensure it's compiled or do the compilation itself.
- jit->CompileMethod(method, soa.Self(), /* osr */ false);
}
+ // Will either ensure it's compiled or do the compilation itself.
+ jit->CompileMethod(method, soa.Self(), /* osr */ false);
}
CodeInfo info(header);
diff --git a/test/common/runtime_state.cc b/test/common/runtime_state.cc
index c9b789e..4967834 100644
--- a/test/common/runtime_state.cc
+++ b/test/common/runtime_state.cc
@@ -313,4 +313,9 @@
}
}
+extern "C" JNIEXPORT jint JNICALL Java_Main_getJitThreshold(JNIEnv*, jclass) {
+ jit::Jit* jit = Runtime::Current()->GetJit();
+ return (jit != nullptr) ? jit->HotMethodThreshold() : 0;
+}
+
} // namespace art
diff --git a/tools/libcore_gcstress_debug_failures.txt b/tools/libcore_gcstress_debug_failures.txt
index 23533af..b8ad955 100644
--- a/tools/libcore_gcstress_debug_failures.txt
+++ b/tools/libcore_gcstress_debug_failures.txt
@@ -45,7 +45,8 @@
"libcore.libcore.icu.RelativeDateTimeFormatterTest#test_getRelativeTimeSpanStringAbbrev",
"libcore.libcore.icu.RelativeDateTimeFormatterTest#test_getRelativeTimeSpanStringCTS",
"libcore.libcore.icu.RelativeDateTimeFormatterTest#test_getRelativeTimeSpanStringFrench",
- "libcore.libcore.icu.RelativeDateTimeFormatterTest#test_getRelativeTimeSpanStringGerman"
+ "libcore.libcore.icu.RelativeDateTimeFormatterTest#test_getRelativeTimeSpanStringGerman",
+ "org.apache.harmony.tests.java.lang.ProcessManagerTest#testSleep"
]
}
]
diff --git a/tools/veridex/Android.bp b/tools/veridex/Android.bp
index 96d4a09..92ace03 100644
--- a/tools/veridex/Android.bp
+++ b/tools/veridex/Android.bp
@@ -14,6 +14,7 @@
cc_binary {
name: "veridex",
+ defaults: ["art_defaults"],
host_supported: true,
srcs: [
"flow_analysis.cc",
diff --git a/tools/veridex/hidden_api_finder.cc b/tools/veridex/hidden_api_finder.cc
index cb45c58..a8c53b3 100644
--- a/tools/veridex/hidden_api_finder.cc
+++ b/tools/veridex/hidden_api_finder.cc
@@ -232,7 +232,7 @@
counts[ref_string]++;
}
- for (const std::pair<const std::string, const size_t>& pair : counts) {
+ for (const std::pair<const std::string, size_t>& pair : counts) {
os << kPrefix << pair.first;
if (pair.second > 1) {
os << " (" << pair.second << " occurrences)";
diff --git a/tools/veridex/precise_hidden_api_finder.cc b/tools/veridex/precise_hidden_api_finder.cc
index 08ac6d7..9e02cbf 100644
--- a/tools/veridex/precise_hidden_api_finder.cc
+++ b/tools/veridex/precise_hidden_api_finder.cc
@@ -85,7 +85,7 @@
void PreciseHiddenApiFinder::Dump(std::ostream& os, HiddenApiStats* stats) {
static const char* kPrefix = " ";
std::map<std::string, std::vector<MethodReference>> named_uses;
- for (auto it : concrete_uses_) {
+ for (auto& it : concrete_uses_) {
MethodReference ref = it.first;
for (const ReflectAccessInfo& info : it.second) {
std::string cls(info.cls.ToString());
@@ -98,7 +98,7 @@
}
}
- for (auto it : named_uses) {
+ for (auto& it : named_uses) {
++stats->reflection_count;
const std::string& full_name = it.first;
HiddenApiAccessFlags::ApiList api_list = hidden_api_.GetApiList(full_name);
diff --git a/tools/veridex/veridex.h b/tools/veridex/veridex.h
index 31ddbf4..e0d8261 100644
--- a/tools/veridex/veridex.h
+++ b/tools/veridex/veridex.h
@@ -44,7 +44,6 @@
*/
class VeriClass {
public:
- VeriClass(const VeriClass& other) = default;
VeriClass() = default;
VeriClass(Primitive::Type k, uint8_t dims, const DexFile::ClassDef* cl)
: kind_(k), dimensions_(dims), class_def_(cl) {}