Merge "ART: Add comments to dex2oat return codes"
diff --git a/compiler/compiler.h b/compiler/compiler.h
index 908d366..cd4c591 100644
--- a/compiler/compiler.h
+++ b/compiler/compiler.h
@@ -25,11 +25,11 @@
namespace jit {
class JitCodeCache;
-}
+} // namespace jit
namespace mirror {
class ClassLoader;
class DexCache;
-}
+} // namespace mirror
class ArtMethod;
class CompilerDriver;
diff --git a/compiler/debug/elf_debug_loc_writer.h b/compiler/debug/elf_debug_loc_writer.h
index cbfdbdd..bf47e8f 100644
--- a/compiler/debug/elf_debug_loc_writer.h
+++ b/compiler/debug/elf_debug_loc_writer.h
@@ -85,7 +85,7 @@
// The result will cover all ranges where the variable is in scope.
// PCs corresponding to stackmap with dex register map are accurate,
// all other PCs are best-effort only.
-std::vector<VariableLocation> GetVariableLocations(
+static std::vector<VariableLocation> GetVariableLocations(
const MethodDebugInfo* method_info,
const std::vector<DexRegisterMap>& dex_register_maps,
uint16_t vreg,
diff --git a/compiler/debug/elf_debug_writer.h b/compiler/debug/elf_debug_writer.h
index 07f7229..5d68810 100644
--- a/compiler/debug/elf_debug_writer.h
+++ b/compiler/debug/elf_debug_writer.h
@@ -29,7 +29,7 @@
class OatHeader;
namespace mirror {
class Class;
-}
+} // namespace mirror
namespace debug {
struct MethodDebugInfo;
diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h
index 5a82021..89c2537 100644
--- a/compiler/driver/compiler_options.h
+++ b/compiler/driver/compiler_options.h
@@ -31,7 +31,7 @@
namespace verifier {
class VerifierDepsTest;
-}
+} // namespace verifier
class DexFile;
diff --git a/compiler/optimizing/intrinsics_arm64.h b/compiler/optimizing/intrinsics_arm64.h
index 3c53517..ff59ce9 100644
--- a/compiler/optimizing/intrinsics_arm64.h
+++ b/compiler/optimizing/intrinsics_arm64.h
@@ -24,7 +24,8 @@
class MacroAssembler;
-}} // namespace vixl::aarch64
+} // namespace aarch64
+} // namespace vixl
namespace art {
diff --git a/compiler/utils/label.h b/compiler/utils/label.h
index 0f82ad5..4c6ae8e 100644
--- a/compiler/utils/label.h
+++ b/compiler/utils/label.h
@@ -29,24 +29,24 @@
namespace arm {
class ArmAssembler;
class Thumb2Assembler;
-}
+} // namespace arm
namespace arm64 {
class Arm64Assembler;
-}
+} // namespace arm64
namespace mips {
class MipsAssembler;
-}
+} // namespace mips
namespace mips64 {
class Mips64Assembler;
-}
+} // namespace mips64
namespace x86 {
class X86Assembler;
class NearLabel;
-}
+} // namespace x86
namespace x86_64 {
class X86_64Assembler;
class NearLabel;
-}
+} // namespace x86_64
class ExternalLabel {
public:
diff --git a/compiler/utils/managed_register.h b/compiler/utils/managed_register.h
index 184cdf5..2b7b2aa 100644
--- a/compiler/utils/managed_register.h
+++ b/compiler/utils/managed_register.h
@@ -26,24 +26,24 @@
namespace arm {
class ArmManagedRegister;
-}
+} // namespace arm
namespace arm64 {
class Arm64ManagedRegister;
-}
+} // namespace arm64
namespace mips {
class MipsManagedRegister;
-}
+} // namespace mips
namespace mips64 {
class Mips64ManagedRegister;
-}
+} // namespace mips64
namespace x86 {
class X86ManagedRegister;
-}
+} // namespace x86
namespace x86_64 {
class X86_64ManagedRegister;
-}
+} // namespace x86_64
class ManagedRegister : public ValueObject {
public:
diff --git a/runtime/arch/instruction_set_features.cc b/runtime/arch/instruction_set_features.cc
index 00d22c4..43c1711 100644
--- a/runtime/arch/instruction_set_features.cc
+++ b/runtime/arch/instruction_set_features.cc
@@ -33,33 +33,26 @@
std::unique_ptr<const InstructionSetFeatures> InstructionSetFeatures::FromVariant(
InstructionSet isa, const std::string& variant, std::string* error_msg) {
- std::unique_ptr<const InstructionSetFeatures> result;
switch (isa) {
case kArm:
case kThumb2:
- result.reset(ArmInstructionSetFeatures::FromVariant(variant, error_msg).release());
- break;
+ return ArmInstructionSetFeatures::FromVariant(variant, error_msg);
case kArm64:
- result.reset(Arm64InstructionSetFeatures::FromVariant(variant, error_msg).release());
- break;
+ return Arm64InstructionSetFeatures::FromVariant(variant, error_msg);
case kMips:
- result.reset(MipsInstructionSetFeatures::FromVariant(variant, error_msg).release());
- break;
+ return MipsInstructionSetFeatures::FromVariant(variant, error_msg);
case kMips64:
- result = Mips64InstructionSetFeatures::FromVariant(variant, error_msg);
- break;
+ return Mips64InstructionSetFeatures::FromVariant(variant, error_msg);
case kX86:
- result.reset(X86InstructionSetFeatures::FromVariant(variant, error_msg).release());
- break;
+ return X86InstructionSetFeatures::FromVariant(variant, error_msg);
case kX86_64:
- result.reset(X86_64InstructionSetFeatures::FromVariant(variant, error_msg).release());
+ return X86_64InstructionSetFeatures::FromVariant(variant, error_msg);
+
+ case kNone:
break;
- default:
- UNIMPLEMENTED(FATAL) << isa;
- UNREACHABLE();
}
- CHECK_EQ(result == nullptr, error_msg->size() != 0);
- return result;
+ UNIMPLEMENTED(FATAL) << isa;
+ UNREACHABLE();
}
std::unique_ptr<const InstructionSetFeatures> InstructionSetFeatures::FromBitmap(InstructionSet isa,
@@ -68,23 +61,25 @@
switch (isa) {
case kArm:
case kThumb2:
- result.reset(ArmInstructionSetFeatures::FromBitmap(bitmap).release());
+ result = ArmInstructionSetFeatures::FromBitmap(bitmap);
break;
case kArm64:
- result.reset(Arm64InstructionSetFeatures::FromBitmap(bitmap).release());
+ result = Arm64InstructionSetFeatures::FromBitmap(bitmap);
break;
case kMips:
- result.reset(MipsInstructionSetFeatures::FromBitmap(bitmap).release());
+ result = MipsInstructionSetFeatures::FromBitmap(bitmap);
break;
case kMips64:
result = Mips64InstructionSetFeatures::FromBitmap(bitmap);
break;
case kX86:
- result.reset(X86InstructionSetFeatures::FromBitmap(bitmap).release());
+ result = X86InstructionSetFeatures::FromBitmap(bitmap);
break;
case kX86_64:
- result.reset(X86_64InstructionSetFeatures::FromBitmap(bitmap).release());
+ result = X86_64InstructionSetFeatures::FromBitmap(bitmap);
break;
+
+ case kNone:
default:
UNIMPLEMENTED(FATAL) << isa;
UNREACHABLE();
@@ -94,120 +89,96 @@
}
std::unique_ptr<const InstructionSetFeatures> InstructionSetFeatures::FromCppDefines() {
- std::unique_ptr<const InstructionSetFeatures> result;
switch (kRuntimeISA) {
case kArm:
case kThumb2:
- result.reset(ArmInstructionSetFeatures::FromCppDefines().release());
- break;
+ return ArmInstructionSetFeatures::FromCppDefines();
case kArm64:
- result.reset(Arm64InstructionSetFeatures::FromCppDefines().release());
- break;
+ return Arm64InstructionSetFeatures::FromCppDefines();
case kMips:
- result.reset(MipsInstructionSetFeatures::FromCppDefines().release());
- break;
+ return MipsInstructionSetFeatures::FromCppDefines();
case kMips64:
- result = Mips64InstructionSetFeatures::FromCppDefines();
- break;
+ return Mips64InstructionSetFeatures::FromCppDefines();
case kX86:
- result.reset(X86InstructionSetFeatures::FromCppDefines().release());
- break;
+ return X86InstructionSetFeatures::FromCppDefines();
case kX86_64:
- result.reset(X86_64InstructionSetFeatures::FromCppDefines().release());
+ return X86_64InstructionSetFeatures::FromCppDefines();
+
+ case kNone:
break;
- default:
- UNIMPLEMENTED(FATAL) << kRuntimeISA;
- UNREACHABLE();
}
- return result;
+ UNIMPLEMENTED(FATAL) << kRuntimeISA;
+ UNREACHABLE();
}
std::unique_ptr<const InstructionSetFeatures> InstructionSetFeatures::FromCpuInfo() {
- std::unique_ptr<const InstructionSetFeatures> result;
switch (kRuntimeISA) {
case kArm:
case kThumb2:
- result.reset(ArmInstructionSetFeatures::FromCpuInfo().release());
- break;
+ return ArmInstructionSetFeatures::FromCpuInfo();
case kArm64:
- result.reset(Arm64InstructionSetFeatures::FromCpuInfo().release());
- break;
+ return Arm64InstructionSetFeatures::FromCpuInfo();
case kMips:
- result.reset(MipsInstructionSetFeatures::FromCpuInfo().release());
- break;
+ return MipsInstructionSetFeatures::FromCpuInfo();
case kMips64:
- result = Mips64InstructionSetFeatures::FromCpuInfo();
- break;
+ return Mips64InstructionSetFeatures::FromCpuInfo();
case kX86:
- result.reset(X86InstructionSetFeatures::FromCpuInfo().release());
- break;
+ return X86InstructionSetFeatures::FromCpuInfo();
case kX86_64:
- result.reset(X86_64InstructionSetFeatures::FromCpuInfo().release());
+ return X86_64InstructionSetFeatures::FromCpuInfo();
+
+ case kNone:
break;
- default:
- UNIMPLEMENTED(FATAL) << kRuntimeISA;
- UNREACHABLE();
}
- return result;
+ UNIMPLEMENTED(FATAL) << kRuntimeISA;
+ UNREACHABLE();
}
std::unique_ptr<const InstructionSetFeatures> InstructionSetFeatures::FromHwcap() {
- std::unique_ptr<const InstructionSetFeatures> result;
switch (kRuntimeISA) {
case kArm:
case kThumb2:
- result.reset(ArmInstructionSetFeatures::FromHwcap().release());
- break;
+ return ArmInstructionSetFeatures::FromHwcap();
case kArm64:
- result.reset(Arm64InstructionSetFeatures::FromHwcap().release());
- break;
+ return Arm64InstructionSetFeatures::FromHwcap();
case kMips:
- result.reset(MipsInstructionSetFeatures::FromHwcap().release());
- break;
+ return MipsInstructionSetFeatures::FromHwcap();
case kMips64:
- result = Mips64InstructionSetFeatures::FromHwcap();
- break;
+ return Mips64InstructionSetFeatures::FromHwcap();
case kX86:
- result.reset(X86InstructionSetFeatures::FromHwcap().release());
- break;
+ return X86InstructionSetFeatures::FromHwcap();
case kX86_64:
- result.reset(X86_64InstructionSetFeatures::FromHwcap().release());
+ return X86_64InstructionSetFeatures::FromHwcap();
+
+ case kNone:
break;
- default:
- UNIMPLEMENTED(FATAL) << kRuntimeISA;
- UNREACHABLE();
}
- return result;
+ UNIMPLEMENTED(FATAL) << kRuntimeISA;
+ UNREACHABLE();
}
std::unique_ptr<const InstructionSetFeatures> InstructionSetFeatures::FromAssembly() {
- std::unique_ptr<const InstructionSetFeatures> result;
switch (kRuntimeISA) {
case kArm:
case kThumb2:
- result.reset(ArmInstructionSetFeatures::FromAssembly().release());
- break;
+ return ArmInstructionSetFeatures::FromAssembly();
case kArm64:
- result.reset(Arm64InstructionSetFeatures::FromAssembly().release());
- break;
+ return Arm64InstructionSetFeatures::FromAssembly();
case kMips:
- result.reset(MipsInstructionSetFeatures::FromAssembly().release());
- break;
+ return MipsInstructionSetFeatures::FromAssembly();
case kMips64:
- result = Mips64InstructionSetFeatures::FromAssembly();
- break;
+ return Mips64InstructionSetFeatures::FromAssembly();
case kX86:
- result.reset(X86InstructionSetFeatures::FromAssembly().release());
- break;
+ return X86InstructionSetFeatures::FromAssembly();
case kX86_64:
- result.reset(X86_64InstructionSetFeatures::FromAssembly().release());
+ return X86_64InstructionSetFeatures::FromAssembly();
+
+ case kNone:
break;
- default:
- UNIMPLEMENTED(FATAL) << kRuntimeISA;
- UNREACHABLE();
}
- return result;
+ UNIMPLEMENTED(FATAL) << kRuntimeISA;
+ UNREACHABLE();
}
std::unique_ptr<const InstructionSetFeatures> InstructionSetFeatures::AddFeaturesFromString(
diff --git a/runtime/arch/memcmp16.cc b/runtime/arch/memcmp16.cc
index 813df2f..e714cfc 100644
--- a/runtime/arch/memcmp16.cc
+++ b/runtime/arch/memcmp16.cc
@@ -37,7 +37,7 @@
return MemCmp16(s0, s1, count);
}
-}
+} // namespace testing
} // namespace art
diff --git a/runtime/arch/memcmp16.h b/runtime/arch/memcmp16.h
index c449a14..b051a1c 100644
--- a/runtime/arch/memcmp16.h
+++ b/runtime/arch/memcmp16.h
@@ -59,7 +59,7 @@
// implementation.
int32_t MemCmp16Testing(const uint16_t* s0, const uint16_t* s1, size_t count);
-}
+} // namespace testing
} // namespace art
diff --git a/runtime/dex_file.cc b/runtime/dex_file.cc
index 3d68af1..aace8eb 100644
--- a/runtime/dex_file.cc
+++ b/runtime/dex_file.cc
@@ -204,7 +204,7 @@
verify_checksum,
error_msg);
if (dex_file != nullptr) {
- dex_file->mem_map_.reset(map.release());
+ dex_file->mem_map_ = std::move(map);
}
return dex_file;
}
@@ -323,7 +323,7 @@
verify_checksum,
error_msg);
if (dex_file != nullptr) {
- dex_file->mem_map_.reset(map.release());
+ dex_file->mem_map_ = std::move(map);
}
return dex_file;
@@ -397,7 +397,7 @@
}
return nullptr;
}
- dex_file->mem_map_.reset(map.release());
+ dex_file->mem_map_ = std::move(map);
if (!dex_file->DisableWrite()) {
*error_msg = StringPrintf("Failed to make dex file '%s' read only", location.c_str());
*error_code = ZipOpenErrorCode::kMakeReadOnlyError;
diff --git a/runtime/entrypoints/quick/quick_default_init_entrypoints.h b/runtime/entrypoints/quick/quick_default_init_entrypoints.h
index 6481b97..267f384 100644
--- a/runtime/entrypoints/quick/quick_default_init_entrypoints.h
+++ b/runtime/entrypoints/quick/quick_default_init_entrypoints.h
@@ -26,7 +26,7 @@
namespace art {
-void DefaultInitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) {
+static void DefaultInitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) {
// JNI
jpoints->pDlsymLookup = art_jni_dlsym_lookup_stub;
diff --git a/runtime/gc/allocation_listener.h b/runtime/gc/allocation_listener.h
index f60bc0c..d694a68 100644
--- a/runtime/gc/allocation_listener.h
+++ b/runtime/gc/allocation_listener.h
@@ -30,7 +30,7 @@
namespace mirror {
class Object;
-}
+} // namespace mirror
class Thread;
diff --git a/runtime/gc/allocation_record.h b/runtime/gc/allocation_record.h
index 90cff6a..227c7ad 100644
--- a/runtime/gc/allocation_record.h
+++ b/runtime/gc/allocation_record.h
@@ -33,7 +33,7 @@
namespace mirror {
class Class;
class Object;
-}
+} // namespace mirror
namespace gc {
diff --git a/runtime/handle_scope.h b/runtime/handle_scope.h
index c43a482..f248a11 100644
--- a/runtime/handle_scope.h
+++ b/runtime/handle_scope.h
@@ -36,7 +36,7 @@
namespace mirror {
class Object;
-}
+} // namespace mirror
// Basic handle scope, tracked by a list. May be variable sized.
class PACKED(4) BaseHandleScope {
diff --git a/runtime/jit/profiling_info.h b/runtime/jit/profiling_info.h
index d6881aa..788fa1f 100644
--- a/runtime/jit/profiling_info.h
+++ b/runtime/jit/profiling_info.h
@@ -29,11 +29,11 @@
namespace jit {
class JitCodeCache;
-}
+} // namespace jit
namespace mirror {
class Class;
-}
+} // namespace mirror
// Structure to store the classes seen at runtime for a specific instruction.
// Once the classes_ array is full, we consider the INVOKE to be megamorphic.
diff --git a/runtime/method_handles.h b/runtime/method_handles.h
index 5bea0ab..e8a2dce 100644
--- a/runtime/method_handles.h
+++ b/runtime/method_handles.h
@@ -29,7 +29,7 @@
namespace mirror {
class MethodHandle;
class MethodType;
-} // mirror
+} // namespace mirror
class ShadowFrame;
diff --git a/runtime/openjdkjvmti/jvmti_weak_table.h b/runtime/openjdkjvmti/jvmti_weak_table.h
index a6fd247..be6edef 100644
--- a/runtime/openjdkjvmti/jvmti_weak_table.h
+++ b/runtime/openjdkjvmti/jvmti_weak_table.h
@@ -59,19 +59,19 @@
// Remove the mapping for the given object, returning whether such a mapping existed (and the old
// value).
- bool Remove(art::mirror::Object* obj, /* out */ T* tag)
+ ALWAYS_INLINE bool Remove(art::mirror::Object* obj, /* out */ T* tag)
REQUIRES_SHARED(art::Locks::mutator_lock_)
REQUIRES(!allow_disallow_lock_);
- bool RemoveLocked(art::mirror::Object* obj, /* out */ T* tag)
+ ALWAYS_INLINE bool RemoveLocked(art::mirror::Object* obj, /* out */ T* tag)
REQUIRES_SHARED(art::Locks::mutator_lock_)
REQUIRES(allow_disallow_lock_);
// Set the mapping for the given object. Returns true if this overwrites an already existing
// mapping.
- virtual bool Set(art::mirror::Object* obj, T tag)
+ ALWAYS_INLINE virtual bool Set(art::mirror::Object* obj, T tag)
REQUIRES_SHARED(art::Locks::mutator_lock_)
REQUIRES(!allow_disallow_lock_);
- virtual bool SetLocked(art::mirror::Object* obj, T tag)
+ ALWAYS_INLINE virtual bool SetLocked(art::mirror::Object* obj, T tag)
REQUIRES_SHARED(art::Locks::mutator_lock_)
REQUIRES(allow_disallow_lock_);
@@ -97,11 +97,12 @@
}
// Sweep the container. DO NOT CALL MANUALLY.
- void Sweep(art::IsMarkedVisitor* visitor)
+ ALWAYS_INLINE void Sweep(art::IsMarkedVisitor* visitor)
REQUIRES_SHARED(art::Locks::mutator_lock_)
REQUIRES(!allow_disallow_lock_);
// Return all objects that have a value mapping in tags.
+ ALWAYS_INLINE
jvmtiError GetTaggedObjects(jvmtiEnv* jvmti_env,
jint tag_count,
const T* tags,
@@ -112,11 +113,11 @@
REQUIRES(!allow_disallow_lock_);
// Locking functions, to allow coarse-grained locking and amortization.
- void Lock() ACQUIRE(allow_disallow_lock_);
- void Unlock() RELEASE(allow_disallow_lock_);
- void AssertLocked() ASSERT_CAPABILITY(allow_disallow_lock_);
+ ALWAYS_INLINE void Lock() ACQUIRE(allow_disallow_lock_);
+ ALWAYS_INLINE void Unlock() RELEASE(allow_disallow_lock_);
+ ALWAYS_INLINE void AssertLocked() ASSERT_CAPABILITY(allow_disallow_lock_);
- art::mirror::Object* Find(T tag)
+ ALWAYS_INLINE art::mirror::Object* Find(T tag)
REQUIRES_SHARED(art::Locks::mutator_lock_)
REQUIRES(!allow_disallow_lock_);
@@ -129,10 +130,12 @@
virtual void HandleNullSweep(T tag ATTRIBUTE_UNUSED) {}
private:
+ ALWAYS_INLINE
bool SetLocked(art::Thread* self, art::mirror::Object* obj, T tag)
REQUIRES_SHARED(art::Locks::mutator_lock_)
REQUIRES(allow_disallow_lock_);
+ ALWAYS_INLINE
bool RemoveLocked(art::Thread* self, art::mirror::Object* obj, /* out */ T* tag)
REQUIRES_SHARED(art::Locks::mutator_lock_)
REQUIRES(allow_disallow_lock_);
@@ -160,12 +163,14 @@
// Slow-path for GetTag. We didn't find the object, but we might be storing from-pointers and
// are asked to retrieve with a to-pointer.
+ ALWAYS_INLINE
bool GetTagSlowPath(art::Thread* self, art::mirror::Object* obj, /* out */ T* result)
REQUIRES_SHARED(art::Locks::mutator_lock_)
REQUIRES(allow_disallow_lock_);
// Update the table by doing read barriers on each element, ensuring that to-space pointers
// are stored.
+ ALWAYS_INLINE
void UpdateTableWithReadBarrier()
REQUIRES_SHARED(art::Locks::mutator_lock_)
REQUIRES(allow_disallow_lock_);
diff --git a/runtime/openjdkjvmti/ti_thread.h b/runtime/openjdkjvmti/ti_thread.h
index c7f75d8..939aea7 100644
--- a/runtime/openjdkjvmti/ti_thread.h
+++ b/runtime/openjdkjvmti/ti_thread.h
@@ -37,7 +37,7 @@
namespace art {
class ArtField;
-}
+} // namespace art
namespace openjdkjvmti {
diff --git a/runtime/thread_list.h b/runtime/thread_list.h
index 3375746..0ce1d78 100644
--- a/runtime/thread_list.h
+++ b/runtime/thread_list.h
@@ -34,7 +34,7 @@
namespace gc {
namespace collector {
class GarbageCollector;
- } // namespac collector
+ } // namespace collector
class GcPauseListener;
} // namespace gc
class Closure;
diff --git a/runtime/transaction.h b/runtime/transaction.h
index 0333fe8..921de03 100644
--- a/runtime/transaction.h
+++ b/runtime/transaction.h
@@ -36,7 +36,7 @@
class DexCache;
class Object;
class String;
-}
+} // namespace mirror
class InternTable;
class Transaction FINAL {
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 81bf293..7490611 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -1258,7 +1258,7 @@
}
inline bool MethodVerifier::CheckRegisterIndex(uint32_t idx) {
- if (idx >= code_item_->registers_size_) {
+ if (UNLIKELY(idx >= code_item_->registers_size_)) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "register index out of range (" << idx << " >= "
<< code_item_->registers_size_ << ")";
return false;
@@ -1267,7 +1267,7 @@
}
inline bool MethodVerifier::CheckWideRegisterIndex(uint32_t idx) {
- if (idx + 1 >= code_item_->registers_size_) {
+ if (UNLIKELY(idx + 1 >= code_item_->registers_size_)) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "wide register index out of range (" << idx
<< "+1 >= " << code_item_->registers_size_ << ")";
return false;
@@ -1276,7 +1276,7 @@
}
inline bool MethodVerifier::CheckFieldIndex(uint32_t idx) {
- if (idx >= dex_file_->GetHeader().field_ids_size_) {
+ if (UNLIKELY(idx >= dex_file_->GetHeader().field_ids_size_)) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "bad field index " << idx << " (max "
<< dex_file_->GetHeader().field_ids_size_ << ")";
return false;
@@ -1285,7 +1285,7 @@
}
inline bool MethodVerifier::CheckMethodIndex(uint32_t idx) {
- if (idx >= dex_file_->GetHeader().method_ids_size_) {
+ if (UNLIKELY(idx >= dex_file_->GetHeader().method_ids_size_)) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "bad method index " << idx << " (max "
<< dex_file_->GetHeader().method_ids_size_ << ")";
return false;
@@ -1294,17 +1294,17 @@
}
inline bool MethodVerifier::CheckNewInstance(dex::TypeIndex idx) {
- if (idx.index_ >= dex_file_->GetHeader().type_ids_size_) {
+ if (UNLIKELY(idx.index_ >= dex_file_->GetHeader().type_ids_size_)) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "bad type index " << idx.index_ << " (max "
<< dex_file_->GetHeader().type_ids_size_ << ")";
return false;
}
// We don't need the actual class, just a pointer to the class name.
const char* descriptor = dex_file_->StringByTypeIdx(idx);
- if (descriptor[0] != 'L') {
+ if (UNLIKELY(descriptor[0] != 'L')) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "can't call new-instance on type '" << descriptor << "'";
return false;
- } else if (strcmp(descriptor, "Ljava/lang/Class;") == 0) {
+ } else if (UNLIKELY(strcmp(descriptor, "Ljava/lang/Class;") == 0)) {
// An unlikely new instance on Class is not allowed. Fall back to interpreter to ensure an
// exception is thrown when this statement is executed (compiled code would not do that).
Fail(VERIFY_ERROR_INSTANTIATION);
@@ -1313,7 +1313,7 @@
}
inline bool MethodVerifier::CheckPrototypeIndex(uint32_t idx) {
- if (idx >= dex_file_->GetHeader().proto_ids_size_) {
+ if (UNLIKELY(idx >= dex_file_->GetHeader().proto_ids_size_)) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "bad prototype index " << idx << " (max "
<< dex_file_->GetHeader().proto_ids_size_ << ")";
return false;
@@ -1322,7 +1322,7 @@
}
inline bool MethodVerifier::CheckStringIndex(uint32_t idx) {
- if (idx >= dex_file_->GetHeader().string_ids_size_) {
+ if (UNLIKELY(idx >= dex_file_->GetHeader().string_ids_size_)) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "bad string index " << idx << " (max "
<< dex_file_->GetHeader().string_ids_size_ << ")";
return false;
@@ -1331,7 +1331,7 @@
}
inline bool MethodVerifier::CheckTypeIndex(dex::TypeIndex idx) {
- if (idx.index_ >= dex_file_->GetHeader().type_ids_size_) {
+ if (UNLIKELY(idx.index_ >= dex_file_->GetHeader().type_ids_size_)) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "bad type index " << idx.index_ << " (max "
<< dex_file_->GetHeader().type_ids_size_ << ")";
return false;
@@ -1340,7 +1340,7 @@
}
bool MethodVerifier::CheckNewArray(dex::TypeIndex idx) {
- if (idx.index_ >= dex_file_->GetHeader().type_ids_size_) {
+ if (UNLIKELY(idx.index_ >= dex_file_->GetHeader().type_ids_size_)) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "bad type index " << idx.index_ << " (max "
<< dex_file_->GetHeader().type_ids_size_ << ")";
return false;
@@ -1351,12 +1351,12 @@
while (*cp++ == '[') {
bracket_count++;
}
- if (bracket_count == 0) {
+ if (UNLIKELY(bracket_count == 0)) {
/* The given class must be an array type. */
Fail(VERIFY_ERROR_BAD_CLASS_HARD)
<< "can't new-array class '" << descriptor << "' (not an array)";
return false;
- } else if (bracket_count > 255) {
+ } else if (UNLIKELY(bracket_count > 255)) {
/* It is illegal to create an array of more than 255 dimensions. */
Fail(VERIFY_ERROR_BAD_CLASS_HARD)
<< "can't new-array class '" << descriptor << "' (exceeds limit)";
@@ -1374,8 +1374,8 @@
DCHECK_LT(cur_offset, insn_count);
/* make sure the start of the array data table is in range */
array_data_offset = insns[1] | (static_cast<int32_t>(insns[2]) << 16);
- if (static_cast<int32_t>(cur_offset) + array_data_offset < 0 ||
- cur_offset + array_data_offset + 2 >= insn_count) {
+ if (UNLIKELY(static_cast<int32_t>(cur_offset) + array_data_offset < 0 ||
+ cur_offset + array_data_offset + 2 >= insn_count)) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid array data start: at " << cur_offset
<< ", data offset " << array_data_offset
<< ", count " << insn_count;
@@ -1384,14 +1384,14 @@
/* offset to array data table is a relative branch-style offset */
array_data = insns + array_data_offset;
// Make sure the table is at an even dex pc, that is, 32-bit aligned.
- if (!IsAligned<4>(array_data)) {
+ if (UNLIKELY(!IsAligned<4>(array_data))) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "unaligned array data table: at " << cur_offset
<< ", data offset " << array_data_offset;
return false;
}
// Make sure the array-data is marked as an opcode. This ensures that it was reached when
// traversing the code item linearly. It is an approximation for a by-spec padding value.
- if (!GetInstructionFlags(cur_offset + array_data_offset).IsOpcode()) {
+ if (UNLIKELY(!GetInstructionFlags(cur_offset + array_data_offset).IsOpcode())) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "array data table at " << cur_offset
<< ", data offset " << array_data_offset
<< " not correctly visited, probably bad padding.";
@@ -1402,7 +1402,7 @@
uint32_t value_count = *reinterpret_cast<const uint32_t*>(&array_data[2]);
uint32_t table_size = 4 + (value_width * value_count + 1) / 2;
/* make sure the end of the switch is in range */
- if (cur_offset + array_data_offset + table_size > insn_count) {
+ if (UNLIKELY(cur_offset + array_data_offset + table_size > insn_count)) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid array data end: at " << cur_offset
<< ", data offset " << array_data_offset << ", end "
<< cur_offset + array_data_offset + table_size
@@ -1418,23 +1418,23 @@
if (!GetBranchOffset(cur_offset, &offset, &isConditional, &selfOkay)) {
return false;
}
- if (!selfOkay && offset == 0) {
+ if (UNLIKELY(!selfOkay && offset == 0)) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "branch offset of zero not allowed at"
<< reinterpret_cast<void*>(cur_offset);
return false;
}
// Check for 32-bit overflow. This isn't strictly necessary if we can depend on the runtime
// to have identical "wrap-around" behavior, but it's unwise to depend on that.
- if (((int64_t) cur_offset + (int64_t) offset) != (int64_t) (cur_offset + offset)) {
+ if (UNLIKELY(((int64_t) cur_offset + (int64_t) offset) != (int64_t) (cur_offset + offset))) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "branch target overflow "
<< reinterpret_cast<void*>(cur_offset) << " +" << offset;
return false;
}
const uint32_t insn_count = code_item_->insns_size_in_code_units_;
int32_t abs_offset = cur_offset + offset;
- if (abs_offset < 0 ||
- (uint32_t) abs_offset >= insn_count ||
- !GetInstructionFlags(abs_offset).IsOpcode()) {
+ if (UNLIKELY(abs_offset < 0 ||
+ (uint32_t) abs_offset >= insn_count ||
+ !GetInstructionFlags(abs_offset).IsOpcode())) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid branch target " << offset << " (-> "
<< reinterpret_cast<void*>(abs_offset) << ") at "
<< reinterpret_cast<void*>(cur_offset);
@@ -1487,8 +1487,8 @@
const uint16_t* insns = code_item_->insns_ + cur_offset;
/* make sure the start of the switch is in range */
int32_t switch_offset = insns[1] | (static_cast<int32_t>(insns[2]) << 16);
- if (static_cast<int32_t>(cur_offset) + switch_offset < 0 ||
- cur_offset + switch_offset + 2 > insn_count) {
+ if (UNLIKELY(static_cast<int32_t>(cur_offset) + switch_offset < 0 ||
+ cur_offset + switch_offset + 2 > insn_count)) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid switch start: at " << cur_offset
<< ", switch offset " << switch_offset
<< ", count " << insn_count;
@@ -1497,14 +1497,14 @@
/* offset to switch table is a relative branch-style offset */
const uint16_t* switch_insns = insns + switch_offset;
// Make sure the table is at an even dex pc, that is, 32-bit aligned.
- if (!IsAligned<4>(switch_insns)) {
+ if (UNLIKELY(!IsAligned<4>(switch_insns))) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "unaligned switch table: at " << cur_offset
<< ", switch offset " << switch_offset;
return false;
}
// Make sure the switch data is marked as an opcode. This ensures that it was reached when
// traversing the code item linearly. It is an approximation for a by-spec padding value.
- if (!GetInstructionFlags(cur_offset + switch_offset).IsOpcode()) {
+ if (UNLIKELY(!GetInstructionFlags(cur_offset + switch_offset).IsOpcode())) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "switch table at " << cur_offset
<< ", switch offset " << switch_offset
<< " not correctly visited, probably bad padding.";
@@ -1526,14 +1526,14 @@
expected_signature = Instruction::kSparseSwitchSignature;
}
uint32_t table_size = targets_offset + switch_count * 2;
- if (switch_insns[0] != expected_signature) {
+ if (UNLIKELY(switch_insns[0] != expected_signature)) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD)
<< StringPrintf("wrong signature for switch table (%x, wanted %x)",
switch_insns[0], expected_signature);
return false;
}
/* make sure the end of the switch is in range */
- if (cur_offset + switch_offset + table_size > (uint32_t) insn_count) {
+ if (UNLIKELY(cur_offset + switch_offset + table_size > (uint32_t) insn_count)) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid switch end: at " << cur_offset
<< ", switch offset " << switch_offset
<< ", end " << (cur_offset + switch_offset + table_size)
@@ -1548,7 +1548,7 @@
int32_t first_key = switch_insns[keys_offset] | (switch_insns[keys_offset + 1] << 16);
int32_t max_first_key =
std::numeric_limits<int32_t>::max() - (static_cast<int32_t>(switch_count) - 1);
- if (first_key > max_first_key) {
+ if (UNLIKELY(first_key > max_first_key)) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid packed switch: first_key=" << first_key
<< ", switch_count=" << switch_count;
return false;
@@ -1560,7 +1560,7 @@
int32_t key =
static_cast<int32_t>(switch_insns[keys_offset + targ * 2]) |
static_cast<int32_t>(switch_insns[keys_offset + targ * 2 + 1] << 16);
- if (key <= last_key) {
+ if (UNLIKELY(key <= last_key)) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid sparse switch: last key=" << last_key
<< ", this=" << key;
return false;
@@ -1574,9 +1574,9 @@
int32_t offset = static_cast<int32_t>(switch_insns[targets_offset + targ * 2]) |
static_cast<int32_t>(switch_insns[targets_offset + targ * 2 + 1] << 16);
int32_t abs_offset = cur_offset + offset;
- if (abs_offset < 0 ||
- abs_offset >= static_cast<int32_t>(insn_count) ||
- !GetInstructionFlags(abs_offset).IsOpcode()) {
+ if (UNLIKELY(abs_offset < 0 ||
+ abs_offset >= static_cast<int32_t>(insn_count) ||
+ !GetInstructionFlags(abs_offset).IsOpcode())) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid switch target " << offset
<< " (-> " << reinterpret_cast<void*>(abs_offset) << ") at "
<< reinterpret_cast<void*>(cur_offset)
@@ -1591,7 +1591,7 @@
bool MethodVerifier::CheckVarArgRegs(uint32_t vA, uint32_t arg[]) {
uint16_t registers_size = code_item_->registers_size_;
for (uint32_t idx = 0; idx < vA; idx++) {
- if (arg[idx] >= registers_size) {
+ if (UNLIKELY(arg[idx] >= registers_size)) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid reg index (" << arg[idx]
<< ") in non-range invoke (>= " << registers_size << ")";
return false;
@@ -1605,7 +1605,7 @@
uint16_t registers_size = code_item_->registers_size_;
// vA/vC are unsigned 8-bit/16-bit quantities for /range instructions, so there's no risk of
// integer overflow when adding them here.
- if (vA + vC > registers_size) {
+ if (UNLIKELY(vA + vC > registers_size)) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid reg index " << vA << "+" << vC
<< " in range invoke (> " << registers_size << ")";
return false;
diff --git a/runtime/verifier/verifier_deps.h b/runtime/verifier/verifier_deps.h
index d69e4dc..70ce0c4 100644
--- a/runtime/verifier/verifier_deps.h
+++ b/runtime/verifier/verifier_deps.h
@@ -39,7 +39,7 @@
namespace mirror {
class Class;
class ClassLoader;
-}
+} // namespace mirror
namespace verifier {