Merge "Testrunner: Temporarily fix long Dex File name issue"
diff --git a/compiler/driver/compiler_options.cc b/compiler/driver/compiler_options.cc
index 34ad1c5..a0c0a2a 100644
--- a/compiler/driver/compiler_options.cc
+++ b/compiler/driver/compiler_options.cc
@@ -27,7 +27,6 @@
small_method_threshold_(kDefaultSmallMethodThreshold),
tiny_method_threshold_(kDefaultTinyMethodThreshold),
num_dex_methods_threshold_(kDefaultNumDexMethodsThreshold),
- inline_depth_limit_(kUnsetInlineDepthLimit),
inline_max_code_units_(kUnsetInlineMaxCodeUnits),
no_inline_from_(nullptr),
boot_image_(false),
@@ -62,7 +61,6 @@
size_t small_method_threshold,
size_t tiny_method_threshold,
size_t num_dex_methods_threshold,
- size_t inline_depth_limit,
size_t inline_max_code_units,
const std::vector<const DexFile*>* no_inline_from,
double top_k_profile_threshold,
@@ -86,7 +84,6 @@
small_method_threshold_(small_method_threshold),
tiny_method_threshold_(tiny_method_threshold),
num_dex_methods_threshold_(num_dex_methods_threshold),
- inline_depth_limit_(inline_depth_limit),
inline_max_code_units_(inline_max_code_units),
no_inline_from_(no_inline_from),
boot_image_(false),
@@ -130,10 +127,6 @@
ParseUintOption(option, "--num-dex-methods", &num_dex_methods_threshold_, Usage);
}
-void CompilerOptions::ParseInlineDepthLimit(const StringPiece& option, UsageFn Usage) {
- ParseUintOption(option, "--inline-depth-limit", &inline_depth_limit_, Usage);
-}
-
void CompilerOptions::ParseInlineMaxCodeUnits(const StringPiece& option, UsageFn Usage) {
ParseUintOption(option, "--inline-max-code-units", &inline_max_code_units_, Usage);
}
@@ -183,8 +176,6 @@
ParseTinyMethodMax(option, Usage);
} else if (option.starts_with("--num-dex-methods=")) {
ParseNumDexMethods(option, Usage);
- } else if (option.starts_with("--inline-depth-limit=")) {
- ParseInlineDepthLimit(option, Usage);
} else if (option.starts_with("--inline-max-code-units=")) {
ParseInlineMaxCodeUnits(option, Usage);
} else if (option == "--generate-debug-info" || option == "-g") {
diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h
index 2e3e55f..2376fbf 100644
--- a/compiler/driver/compiler_options.h
+++ b/compiler/driver/compiler_options.h
@@ -46,15 +46,9 @@
static constexpr double kDefaultTopKProfileThreshold = 90.0;
static const bool kDefaultGenerateDebugInfo = false;
static const bool kDefaultGenerateMiniDebugInfo = false;
- static const size_t kDefaultInlineDepthLimit = 3;
static const size_t kDefaultInlineMaxCodeUnits = 32;
- static constexpr size_t kUnsetInlineDepthLimit = -1;
static constexpr size_t kUnsetInlineMaxCodeUnits = -1;
- // Default inlining settings when the space filter is used.
- static constexpr size_t kSpaceFilterInlineDepthLimit = 3;
- static constexpr size_t kSpaceFilterInlineMaxCodeUnits = 10;
-
CompilerOptions();
~CompilerOptions();
@@ -64,7 +58,6 @@
size_t small_method_threshold,
size_t tiny_method_threshold,
size_t num_dex_methods_threshold,
- size_t inline_depth_limit,
size_t inline_max_code_units,
const std::vector<const DexFile*>* no_inline_from,
double top_k_profile_threshold,
@@ -155,13 +148,6 @@
return num_dex_methods_threshold_;
}
- size_t GetInlineDepthLimit() const {
- return inline_depth_limit_;
- }
- void SetInlineDepthLimit(size_t limit) {
- inline_depth_limit_ = limit;
- }
-
size_t GetInlineMaxCodeUnits() const {
return inline_max_code_units_;
}
@@ -275,7 +261,6 @@
void ParseDumpInitFailures(const StringPiece& option, UsageFn Usage);
void ParseDumpCfgPasses(const StringPiece& option, UsageFn Usage);
void ParseInlineMaxCodeUnits(const StringPiece& option, UsageFn Usage);
- void ParseInlineDepthLimit(const StringPiece& option, UsageFn Usage);
void ParseNumDexMethods(const StringPiece& option, UsageFn Usage);
void ParseTinyMethodMax(const StringPiece& option, UsageFn Usage);
void ParseSmallMethodMax(const StringPiece& option, UsageFn Usage);
@@ -289,7 +274,6 @@
size_t small_method_threshold_;
size_t tiny_method_threshold_;
size_t num_dex_methods_threshold_;
- size_t inline_depth_limit_;
size_t inline_max_code_units_;
// Dex files from which we should not inline code.
diff --git a/compiler/image_test.cc b/compiler/image_test.cc
index 7ee494a..897d819 100644
--- a/compiler/image_test.cc
+++ b/compiler/image_test.cc
@@ -363,7 +363,6 @@
}
CreateCompilerDriver(Compiler::kOptimizing, kRuntimeISA, kIsTargetBuild ? 2U : 16U);
// Set inline filter values.
- compiler_options_->SetInlineDepthLimit(CompilerOptions::kDefaultInlineDepthLimit);
compiler_options_->SetInlineMaxCodeUnits(CompilerOptions::kDefaultInlineMaxCodeUnits);
image_classes_.clear();
if (!extra_dex.empty()) {
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index aefdb54..d156644 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -714,7 +714,8 @@
class_linker->VisitClassesWithoutClassesLock(&visitor);
}
-static bool IsBootClassLoaderClass(mirror::Class* klass) REQUIRES_SHARED(Locks::mutator_lock_) {
+static bool IsBootClassLoaderClass(ObjPtr<mirror::Class> klass)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return klass->GetClassLoader() == nullptr;
}
@@ -722,33 +723,33 @@
return IsBootClassLoaderClass(klass) && !IsInBootImage(klass);
}
-bool ImageWriter::PruneAppImageClass(mirror::Class* klass) {
+bool ImageWriter::PruneAppImageClass(ObjPtr<mirror::Class> klass) {
bool early_exit = false;
std::unordered_set<mirror::Class*> visited;
return PruneAppImageClassInternal(klass, &early_exit, &visited);
}
bool ImageWriter::PruneAppImageClassInternal(
- mirror::Class* klass,
+ ObjPtr<mirror::Class> klass,
bool* early_exit,
std::unordered_set<mirror::Class*>* visited) {
DCHECK(early_exit != nullptr);
DCHECK(visited != nullptr);
DCHECK(compile_app_image_);
- if (klass == nullptr || IsInBootImage(klass)) {
+ if (klass == nullptr || IsInBootImage(klass.Ptr())) {
return false;
}
- auto found = prune_class_memo_.find(klass);
+ auto found = prune_class_memo_.find(klass.Ptr());
if (found != prune_class_memo_.end()) {
// Already computed, return the found value.
return found->second;
}
// Circular dependencies, return false but do not store the result in the memoization table.
- if (visited->find(klass) != visited->end()) {
+ if (visited->find(klass.Ptr()) != visited->end()) {
*early_exit = true;
return false;
}
- visited->emplace(klass);
+ visited->emplace(klass.Ptr());
bool result = IsBootClassLoaderClass(klass);
std::string temp;
// Prune if not an image class, this handles any broken sets of image classes such as having a
@@ -812,20 +813,20 @@
dex_file_oat_index_map_.find(dex_cache->GetDexFile()) == dex_file_oat_index_map_.end();
}
// Erase the element we stored earlier since we are exiting the function.
- auto it = visited->find(klass);
+ auto it = visited->find(klass.Ptr());
DCHECK(it != visited->end());
visited->erase(it);
// Only store result if it is true or none of the calls early exited due to circular
// dependencies. If visited is empty then we are the root caller, in this case the cycle was in
// a child call and we can remember the result.
if (result == true || !my_early_exit || visited->empty()) {
- prune_class_memo_[klass] = result;
+ prune_class_memo_[klass.Ptr()] = result;
}
*early_exit |= my_early_exit;
return result;
}
-bool ImageWriter::KeepClass(Class* klass) {
+bool ImageWriter::KeepClass(ObjPtr<mirror::Class> klass) {
if (klass == nullptr) {
return false;
}
@@ -896,15 +897,27 @@
Runtime::Current()->GetClassLinker()->ClassTableForClassLoader(class_loader);
class_table->Visit(classes_visitor);
removed_class_count_ += classes_visitor.Prune();
+
+ // Record app image class loader. The fake boot class loader should not get registered
+ // and we should end up with only one class loader for an app and none for boot image.
+ if (class_loader != nullptr && class_table != nullptr) {
+ DCHECK(class_loader_ == nullptr);
+ class_loader_ = class_loader;
+ }
}
size_t GetRemovedClassCount() const {
return removed_class_count_;
}
+ ObjPtr<mirror::ClassLoader> GetClassLoader() const REQUIRES_SHARED(Locks::mutator_lock_) {
+ return class_loader_;
+ }
+
private:
ImageWriter* const image_writer_;
size_t removed_class_count_;
+ ObjPtr<mirror::ClassLoader> class_loader_;
};
void ImageWriter::VisitClassLoaders(ClassLoaderVisitor* visitor) {
@@ -913,69 +926,149 @@
Runtime::Current()->GetClassLinker()->VisitClassLoaders(visitor);
}
+void ImageWriter::PruneAndPreloadDexCache(ObjPtr<mirror::DexCache> dex_cache,
+ ObjPtr<mirror::ClassLoader> class_loader) {
+ // To ensure deterministic contents of the hash-based arrays, each slot shall contain
+ // the candidate with the lowest index. As we're processing entries in increasing index
+ // order, this means trying to look up the entry for the current index if the slot is
+ // empty or if it contains a higher index.
+
+ Runtime* runtime = Runtime::Current();
+ ClassLinker* class_linker = runtime->GetClassLinker();
+ ArtMethod* resolution_method = runtime->GetResolutionMethod();
+ const DexFile& dex_file = *dex_cache->GetDexFile();
+ // Prune methods.
+ ArtMethod** resolved_methods = dex_cache->GetResolvedMethods();
+ for (size_t i = 0, num = dex_cache->NumResolvedMethods(); i != num; ++i) {
+ ArtMethod* method =
+ mirror::DexCache::GetElementPtrSize(resolved_methods, i, target_ptr_size_);
+ DCHECK(method != nullptr) << "Expected resolution method instead of null method";
+ mirror::Class* declaring_class = method->GetDeclaringClass();
+ // Copied methods may be held live by a class which was not an image class but have a
+ // declaring class which is an image class. Set it to the resolution method to be safe and
+ // prevent dangling pointers.
+ if (method->IsCopied() || !KeepClass(declaring_class)) {
+ mirror::DexCache::SetElementPtrSize(resolved_methods,
+ i,
+ resolution_method,
+ target_ptr_size_);
+ } else if (kIsDebugBuild) {
+ // Check that the class is still in the classes table.
+ ReaderMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
+ CHECK(class_linker->ClassInClassTable(declaring_class)) << "Class "
+ << Class::PrettyClass(declaring_class) << " not in class linker table";
+ }
+ }
+ // Prune fields and make the contents of the field array deterministic.
+ mirror::FieldDexCacheType* resolved_fields = dex_cache->GetResolvedFields();
+ dex::TypeIndex last_class_idx; // Initialized to invalid index.
+ ObjPtr<mirror::Class> last_class = nullptr;
+ for (size_t i = 0, end = dex_file.NumFieldIds(); i < end; ++i) {
+ uint32_t slot_idx = dex_cache->FieldSlotIndex(i);
+ auto pair = mirror::DexCache::GetNativePairPtrSize(resolved_fields, slot_idx, target_ptr_size_);
+ uint32_t stored_index = pair.index;
+ ArtField* field = pair.object;
+ if (field != nullptr && i > stored_index) {
+ continue; // Already checked.
+ }
+ // Check if the referenced class is in the image. Note that we want to check the referenced
+ // class rather than the declaring class to preserve the semantics, i.e. using a FieldId
+ // results in resolving the referenced class and that can for example throw OOME.
+ const DexFile::FieldId& field_id = dex_file.GetFieldId(i);
+ if (field_id.class_idx_ != last_class_idx) {
+ last_class_idx = field_id.class_idx_;
+ last_class = class_linker->LookupResolvedType(
+ dex_file, last_class_idx, dex_cache, class_loader);
+ if (last_class != nullptr && !KeepClass(last_class)) {
+ last_class = nullptr;
+ }
+ }
+ if (field == nullptr || i < stored_index) {
+ if (last_class != nullptr) {
+ const char* name = dex_file.StringDataByIdx(field_id.name_idx_);
+ const char* type = dex_file.StringByTypeIdx(field_id.type_idx_);
+ field = mirror::Class::FindField(Thread::Current(), last_class, name, type);
+ if (field != nullptr) {
+ // If the referenced class is in the image, the defining class must also be there.
+ DCHECK(KeepClass(field->GetDeclaringClass()));
+ dex_cache->SetResolvedField(i, field, target_ptr_size_);
+ }
+ }
+ } else {
+ DCHECK_EQ(i, stored_index);
+ if (last_class == nullptr) {
+ dex_cache->ClearResolvedField(stored_index, target_ptr_size_);
+ }
+ }
+ }
+ // Prune types and make the contents of the type array deterministic.
+ // This is done after fields and methods as their lookup can touch the types array.
+ for (size_t i = 0, end = dex_cache->GetDexFile()->NumTypeIds(); i < end; ++i) {
+ dex::TypeIndex type_idx(i);
+ uint32_t slot_idx = dex_cache->TypeSlotIndex(type_idx);
+ mirror::TypeDexCachePair pair =
+ dex_cache->GetResolvedTypes()[slot_idx].load(std::memory_order_relaxed);
+ uint32_t stored_index = pair.index;
+ ObjPtr<mirror::Class> klass = pair.object.Read();
+ if (klass == nullptr || i < stored_index) {
+ klass = class_linker->LookupResolvedType(dex_file, type_idx, dex_cache, class_loader);
+ if (klass != nullptr) {
+ DCHECK_EQ(dex_cache->GetResolvedType(type_idx), klass);
+ stored_index = i; // For correct clearing below if not keeping the `klass`.
+ }
+ } else if (i == stored_index && !KeepClass(klass)) {
+ dex_cache->ClearResolvedType(dex::TypeIndex(stored_index));
+ }
+ }
+ // Strings do not need pruning, but the contents of the string array must be deterministic.
+ for (size_t i = 0, end = dex_cache->GetDexFile()->NumStringIds(); i < end; ++i) {
+ dex::StringIndex string_idx(i);
+ uint32_t slot_idx = dex_cache->StringSlotIndex(string_idx);
+ mirror::StringDexCachePair pair =
+ dex_cache->GetStrings()[slot_idx].load(std::memory_order_relaxed);
+ uint32_t stored_index = pair.index;
+ ObjPtr<mirror::String> string = pair.object.Read();
+ if (string == nullptr || i < stored_index) {
+ string = class_linker->LookupString(dex_file, string_idx, dex_cache);
+ DCHECK(string == nullptr || dex_cache->GetResolvedString(string_idx) == string);
+ }
+ }
+}
+
void ImageWriter::PruneNonImageClasses() {
Runtime* runtime = Runtime::Current();
ClassLinker* class_linker = runtime->GetClassLinker();
Thread* self = Thread::Current();
+ ScopedAssertNoThreadSuspension sa(__FUNCTION__);
// Clear class table strong roots so that dex caches can get pruned. We require pruning the class
// path dex caches.
class_linker->ClearClassTableStrongRoots();
// Remove the undesired classes from the class roots.
+ ObjPtr<mirror::ClassLoader> class_loader;
{
PruneClassLoaderClassesVisitor class_loader_visitor(this);
VisitClassLoaders(&class_loader_visitor);
VLOG(compiler) << "Pruned " << class_loader_visitor.GetRemovedClassCount() << " classes";
+ class_loader = class_loader_visitor.GetClassLoader();
+ DCHECK_EQ(class_loader != nullptr, compile_app_image_);
}
// Clear references to removed classes from the DexCaches.
- ArtMethod* resolution_method = runtime->GetResolutionMethod();
-
- ScopedAssertNoThreadSuspension sa(__FUNCTION__);
- ReaderMutexLock mu(self, *Locks::classlinker_classes_lock_); // For ClassInClassTable
- ReaderMutexLock mu2(self, *Locks::dex_lock_);
- for (const ClassLinker::DexCacheData& data : class_linker->GetDexCachesData()) {
- if (self->IsJWeakCleared(data.weak_root)) {
- continue;
- }
- ObjPtr<mirror::DexCache> dex_cache = self->DecodeJObject(data.weak_root)->AsDexCache();
- for (size_t i = 0; i < dex_cache->NumResolvedTypes(); i++) {
- mirror::TypeDexCachePair pair =
- dex_cache->GetResolvedTypes()[i].load(std::memory_order_relaxed);
- mirror::Class* klass = pair.object.Read();
- if (klass != nullptr && !KeepClass(klass)) {
- dex_cache->ClearResolvedType(dex::TypeIndex(pair.index));
+ std::vector<ObjPtr<mirror::DexCache>> dex_caches;
+ {
+ ReaderMutexLock mu2(self, *Locks::dex_lock_);
+ dex_caches.reserve(class_linker->GetDexCachesData().size());
+ for (const ClassLinker::DexCacheData& data : class_linker->GetDexCachesData()) {
+ if (self->IsJWeakCleared(data.weak_root)) {
+ continue;
}
+ dex_caches.push_back(self->DecodeJObject(data.weak_root)->AsDexCache());
}
- ArtMethod** resolved_methods = dex_cache->GetResolvedMethods();
- for (size_t i = 0, num = dex_cache->NumResolvedMethods(); i != num; ++i) {
- ArtMethod* method =
- mirror::DexCache::GetElementPtrSize(resolved_methods, i, target_ptr_size_);
- DCHECK(method != nullptr) << "Expected resolution method instead of null method";
- mirror::Class* declaring_class = method->GetDeclaringClass();
- // Copied methods may be held live by a class which was not an image class but have a
- // declaring class which is an image class. Set it to the resolution method to be safe and
- // prevent dangling pointers.
- if (method->IsCopied() || !KeepClass(declaring_class)) {
- mirror::DexCache::SetElementPtrSize(resolved_methods,
- i,
- resolution_method,
- target_ptr_size_);
- } else {
- // Check that the class is still in the classes table.
- DCHECK(class_linker->ClassInClassTable(declaring_class)) << "Class "
- << Class::PrettyClass(declaring_class) << " not in class linker table";
- }
- }
- mirror::FieldDexCacheType* resolved_fields = dex_cache->GetResolvedFields();
- for (size_t i = 0; i < dex_cache->NumResolvedFields(); i++) {
- auto pair = mirror::DexCache::GetNativePairPtrSize(resolved_fields, i, target_ptr_size_);
- ArtField* field = pair.object;
- if (field != nullptr && !KeepClass(field->GetDeclaringClass().Ptr())) {
- dex_cache->ClearResolvedField(pair.index, target_ptr_size_);
- }
- }
+ }
+ for (ObjPtr<mirror::DexCache> dex_cache : dex_caches) {
+ PruneAndPreloadDexCache(dex_cache, class_loader);
}
// Drop the array class cache in the ClassLinker, as these are roots holding those classes live.
diff --git a/compiler/image_writer.h b/compiler/image_writer.h
index bdc7146..16aff61 100644
--- a/compiler/image_writer.h
+++ b/compiler/image_writer.h
@@ -376,7 +376,7 @@
}
// Returns true if the class was in the original requested image classes list.
- bool KeepClass(mirror::Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
+ bool KeepClass(ObjPtr<mirror::Class> klass) REQUIRES_SHARED(Locks::mutator_lock_);
// Debug aid that list of requested image classes.
void DumpImageClasses();
@@ -391,6 +391,12 @@
// Remove unwanted classes from various roots.
void PruneNonImageClasses() REQUIRES_SHARED(Locks::mutator_lock_);
+ // Remove unwanted classes from the DexCache roots and preload deterministic DexCache contents.
+ void PruneAndPreloadDexCache(ObjPtr<mirror::DexCache> dex_cache,
+ ObjPtr<mirror::ClassLoader> class_loader)
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(!Locks::classlinker_classes_lock_);
+
// Verify unwanted classes removed.
void CheckNonImageClassesRemoved() REQUIRES_SHARED(Locks::mutator_lock_);
static void CheckNonImageClassesRemovedCallback(mirror::Object* obj, void* arg)
@@ -473,11 +479,11 @@
// we also cannot have any classes which refer to these boot class loader non image classes.
// PruneAppImageClass also prunes if klass depends on a non-image class according to the compiler
// driver.
- bool PruneAppImageClass(mirror::Class* klass)
+ bool PruneAppImageClass(ObjPtr<mirror::Class> klass)
REQUIRES_SHARED(Locks::mutator_lock_);
// early_exit is true if we had a cyclic dependency anywhere down the chain.
- bool PruneAppImageClassInternal(mirror::Class* klass,
+ bool PruneAppImageClassInternal(ObjPtr<mirror::Class> klass,
bool* early_exit,
std::unordered_set<mirror::Class*>* visited)
REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index 3ae7974..ad951bc 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -97,7 +97,6 @@
CompilerOptions::kDefaultSmallMethodThreshold,
CompilerOptions::kDefaultTinyMethodThreshold,
CompilerOptions::kDefaultNumDexMethodsThreshold,
- CompilerOptions::kDefaultInlineDepthLimit,
CompilerOptions::kDefaultInlineMaxCodeUnits,
/* no_inline_from */ nullptr,
CompilerOptions::kDefaultTopKProfileThreshold,
@@ -177,10 +176,6 @@
jit_logger_.reset(new JitLogger());
jit_logger_->OpenLog();
}
-
- size_t inline_depth_limit = compiler_driver_->GetCompilerOptions().GetInlineDepthLimit();
- DCHECK_LT(thread_count * inline_depth_limit, std::numeric_limits<uint16_t>::max())
- << "ProfilingInfo's inline counter can potentially overflow";
}
JitCompiler::~JitCompiler() {
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index 8e25aa3..105db1d 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -1440,12 +1440,10 @@
mirror::String* GetTargetString(const LinkerPatch& patch) REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedObjectAccessUnchecked soa(Thread::Current());
- StackHandleScope<1> hs(soa.Self());
ClassLinker* linker = Runtime::Current()->GetClassLinker();
- Handle<mirror::DexCache> dex_cache(hs.NewHandle(GetDexCache(patch.TargetStringDexFile())));
mirror::String* string = linker->LookupString(*patch.TargetStringDexFile(),
patch.TargetStringIndex(),
- dex_cache);
+ GetDexCache(patch.TargetStringDexFile()));
DCHECK(string != nullptr);
DCHECK(writer_->HasBootImage() ||
Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(string));
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index caea250..d735b27 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -1602,14 +1602,20 @@
}
}
-static Condition GenerateLongTestConstant(HCondition* condition,
- bool invert,
- CodeGeneratorARM* codegen) {
+static std::pair<Condition, Condition> GenerateLongTestConstant(HCondition* condition,
+ bool invert,
+ CodeGeneratorARM* codegen) {
DCHECK_EQ(condition->GetLeft()->GetType(), Primitive::kPrimLong);
const LocationSummary* const locations = condition->GetLocations();
- IfCondition cond = invert ? condition->GetOppositeCondition() : condition->GetCondition();
- Condition ret = EQ;
+ IfCondition cond = condition->GetCondition();
+ IfCondition opposite = condition->GetOppositeCondition();
+
+ if (invert) {
+ std::swap(cond, opposite);
+ }
+
+ std::pair<Condition, Condition> ret;
const Location left = locations->InAt(0);
const Location right = locations->InAt(1);
@@ -1629,22 +1635,26 @@
__ CmpConstant(left_high, High32Bits(value));
__ it(EQ);
__ cmp(left_low, ShifterOperand(Low32Bits(value)), EQ);
- ret = ARMUnsignedCondition(cond);
+ ret = std::make_pair(ARMUnsignedCondition(cond), ARMUnsignedCondition(opposite));
break;
case kCondLE:
case kCondGT:
// Trivially true or false.
if (value == std::numeric_limits<int64_t>::max()) {
__ cmp(left_low, ShifterOperand(left_low));
- ret = cond == kCondLE ? EQ : NE;
+ ret = cond == kCondLE ? std::make_pair(EQ, NE) : std::make_pair(NE, EQ);
break;
}
if (cond == kCondLE) {
+ DCHECK_EQ(opposite, kCondGT);
cond = kCondLT;
+ opposite = kCondGE;
} else {
DCHECK_EQ(cond, kCondGT);
+ DCHECK_EQ(opposite, kCondLE);
cond = kCondGE;
+ opposite = kCondLT;
}
value++;
@@ -1653,7 +1663,7 @@
case kCondLT:
__ CmpConstant(left_low, Low32Bits(value));
__ sbcs(IP, left_high, ShifterOperand(High32Bits(value)));
- ret = ARMCondition(cond);
+ ret = std::make_pair(ARMCondition(cond), ARMCondition(opposite));
break;
default:
LOG(FATAL) << "Unreachable";
@@ -1663,14 +1673,20 @@
return ret;
}
-static Condition GenerateLongTest(HCondition* condition,
- bool invert,
- CodeGeneratorARM* codegen) {
+static std::pair<Condition, Condition> GenerateLongTest(HCondition* condition,
+ bool invert,
+ CodeGeneratorARM* codegen) {
DCHECK_EQ(condition->GetLeft()->GetType(), Primitive::kPrimLong);
const LocationSummary* const locations = condition->GetLocations();
- IfCondition cond = invert ? condition->GetOppositeCondition() : condition->GetCondition();
- Condition ret = EQ;
+ IfCondition cond = condition->GetCondition();
+ IfCondition opposite = condition->GetOppositeCondition();
+
+ if (invert) {
+ std::swap(cond, opposite);
+ }
+
+ std::pair<Condition, Condition> ret;
Location left = locations->InAt(0);
Location right = locations->InAt(1);
@@ -1689,15 +1705,19 @@
__ cmp(left.AsRegisterPairLow<Register>(),
ShifterOperand(right.AsRegisterPairLow<Register>()),
EQ);
- ret = ARMUnsignedCondition(cond);
+ ret = std::make_pair(ARMUnsignedCondition(cond), ARMUnsignedCondition(opposite));
break;
case kCondLE:
case kCondGT:
if (cond == kCondLE) {
+ DCHECK_EQ(opposite, kCondGT);
cond = kCondGE;
+ opposite = kCondLT;
} else {
DCHECK_EQ(cond, kCondGT);
+ DCHECK_EQ(opposite, kCondLE);
cond = kCondLT;
+ opposite = kCondGE;
}
std::swap(left, right);
@@ -1709,7 +1729,7 @@
__ sbcs(IP,
left.AsRegisterPairHigh<Register>(),
ShifterOperand(right.AsRegisterPairHigh<Register>()));
- ret = ARMCondition(cond);
+ ret = std::make_pair(ARMCondition(cond), ARMCondition(opposite));
break;
default:
LOG(FATAL) << "Unreachable";
@@ -1719,90 +1739,83 @@
return ret;
}
-static Condition GenerateTest(HInstruction* instruction,
- Location loc,
- bool invert,
- CodeGeneratorARM* codegen) {
- DCHECK(!instruction->IsConstant());
+static std::pair<Condition, Condition> GenerateTest(HCondition* condition,
+ bool invert,
+ CodeGeneratorARM* codegen) {
+ const LocationSummary* const locations = condition->GetLocations();
+ const Primitive::Type type = condition->GetLeft()->GetType();
+ IfCondition cond = condition->GetCondition();
+ IfCondition opposite = condition->GetOppositeCondition();
+ std::pair<Condition, Condition> ret;
+ const Location right = locations->InAt(1);
- Condition ret = invert ? EQ : NE;
+ if (invert) {
+ std::swap(cond, opposite);
+ }
- if (IsBooleanValueOrMaterializedCondition(instruction)) {
- __ CmpConstant(loc.AsRegister<Register>(), 0);
+ if (type == Primitive::kPrimLong) {
+ ret = locations->InAt(1).IsConstant()
+ ? GenerateLongTestConstant(condition, invert, codegen)
+ : GenerateLongTest(condition, invert, codegen);
+ } else if (Primitive::IsFloatingPointType(type)) {
+ GenerateVcmp(condition, codegen);
+ __ vmstat();
+ ret = std::make_pair(ARMFPCondition(cond, condition->IsGtBias()),
+ ARMFPCondition(opposite, condition->IsGtBias()));
} else {
- HCondition* const condition = instruction->AsCondition();
- const LocationSummary* const locations = condition->GetLocations();
- const Primitive::Type type = condition->GetLeft()->GetType();
- const IfCondition cond = invert ? condition->GetOppositeCondition() : condition->GetCondition();
- const Location right = locations->InAt(1);
+ DCHECK(Primitive::IsIntegralType(type) || type == Primitive::kPrimNot) << type;
- if (type == Primitive::kPrimLong) {
- ret = condition->GetLocations()->InAt(1).IsConstant()
- ? GenerateLongTestConstant(condition, invert, codegen)
- : GenerateLongTest(condition, invert, codegen);
- } else if (Primitive::IsFloatingPointType(type)) {
- GenerateVcmp(condition, codegen);
- __ vmstat();
- ret = ARMFPCondition(cond, condition->IsGtBias());
+ const Register left = locations->InAt(0).AsRegister<Register>();
+
+ if (right.IsRegister()) {
+ __ cmp(left, ShifterOperand(right.AsRegister<Register>()));
} else {
- DCHECK(Primitive::IsIntegralType(type) || type == Primitive::kPrimNot) << type;
-
- const Register left = locations->InAt(0).AsRegister<Register>();
-
- if (right.IsRegister()) {
- __ cmp(left, ShifterOperand(right.AsRegister<Register>()));
- } else {
- DCHECK(right.IsConstant());
- __ CmpConstant(left, CodeGenerator::GetInt32ValueOf(right.GetConstant()));
- }
-
- ret = ARMCondition(cond);
+ DCHECK(right.IsConstant());
+ __ CmpConstant(left, CodeGenerator::GetInt32ValueOf(right.GetConstant()));
}
+
+ ret = std::make_pair(ARMCondition(cond), ARMCondition(opposite));
}
return ret;
}
-static bool CanGenerateTest(HInstruction* condition, ArmAssembler* assembler) {
- if (!IsBooleanValueOrMaterializedCondition(condition)) {
- const HCondition* const cond = condition->AsCondition();
+static bool CanGenerateTest(HCondition* condition, ArmAssembler* assembler) {
+ if (condition->GetLeft()->GetType() == Primitive::kPrimLong) {
+ const LocationSummary* const locations = condition->GetLocations();
+ const IfCondition c = condition->GetCondition();
- if (cond->GetLeft()->GetType() == Primitive::kPrimLong) {
- const LocationSummary* const locations = cond->GetLocations();
- const IfCondition c = cond->GetCondition();
+ if (locations->InAt(1).IsConstant()) {
+ const int64_t value = locations->InAt(1).GetConstant()->AsLongConstant()->GetValue();
+ ShifterOperand so;
- if (locations->InAt(1).IsConstant()) {
- const int64_t value = locations->InAt(1).GetConstant()->AsLongConstant()->GetValue();
- ShifterOperand so;
-
- if (c < kCondLT || c > kCondGE) {
- // Since IT blocks longer than a 16-bit instruction are deprecated by ARMv8,
- // we check that the least significant half of the first input to be compared
- // is in a low register (the other half is read outside an IT block), and
- // the constant fits in an 8-bit unsigned integer, so that a 16-bit CMP
- // encoding can be used.
- if (!ArmAssembler::IsLowRegister(locations->InAt(0).AsRegisterPairLow<Register>()) ||
- !IsUint<8>(Low32Bits(value))) {
- return false;
- }
- } else if (c == kCondLE || c == kCondGT) {
- if (value < std::numeric_limits<int64_t>::max() &&
- !assembler->ShifterOperandCanHold(kNoRegister,
- kNoRegister,
- SBC,
- High32Bits(value + 1),
- kCcSet,
- &so)) {
- return false;
- }
- } else if (!assembler->ShifterOperandCanHold(kNoRegister,
- kNoRegister,
- SBC,
- High32Bits(value),
- kCcSet,
- &so)) {
+ if (c < kCondLT || c > kCondGE) {
+ // Since IT blocks longer than a 16-bit instruction are deprecated by ARMv8,
+ // we check that the least significant half of the first input to be compared
+ // is in a low register (the other half is read outside an IT block), and
+ // the constant fits in an 8-bit unsigned integer, so that a 16-bit CMP
+ // encoding can be used.
+ if (!ArmAssembler::IsLowRegister(locations->InAt(0).AsRegisterPairLow<Register>()) ||
+ !IsUint<8>(Low32Bits(value))) {
return false;
}
+ } else if (c == kCondLE || c == kCondGT) {
+ if (value < std::numeric_limits<int64_t>::max() &&
+ !assembler->ShifterOperandCanHold(kNoRegister,
+ kNoRegister,
+ SBC,
+ High32Bits(value + 1),
+ kCcSet,
+ &so)) {
+ return false;
+ }
+ } else if (!assembler->ShifterOperandCanHold(kNoRegister,
+ kNoRegister,
+ SBC,
+ High32Bits(value),
+ kCcSet,
+ &so)) {
+ return false;
}
}
}
@@ -2415,13 +2428,6 @@
void InstructionCodeGeneratorARM::VisitExit(HExit* exit ATTRIBUTE_UNUSED) {
}
-void InstructionCodeGeneratorARM::GenerateFPJumps(HCondition* cond,
- Label* true_label,
- Label* false_label ATTRIBUTE_UNUSED) {
- __ vmstat(); // transfer FP status register to ARM APSR.
- __ b(true_label, ARMFPCondition(cond->GetCondition(), cond->IsGtBias()));
-}
-
void InstructionCodeGeneratorARM::GenerateLongComparesAndJumps(HCondition* cond,
Label* true_label,
Label* false_label) {
@@ -2438,7 +2444,6 @@
// Set the conditions for the test, remembering that == needs to be
// decided using the low words.
- // TODO: consider avoiding jumps with temporary and CMP low+SBC high
switch (if_cond) {
case kCondEQ:
case kCondNE:
@@ -2509,25 +2514,38 @@
void InstructionCodeGeneratorARM::GenerateCompareTestAndBranch(HCondition* condition,
Label* true_target_in,
Label* false_target_in) {
+ if (CanGenerateTest(condition, codegen_->GetAssembler())) {
+ Label* non_fallthrough_target;
+ bool invert;
+
+ if (true_target_in == nullptr) {
+ DCHECK(false_target_in != nullptr);
+ non_fallthrough_target = false_target_in;
+ invert = true;
+ } else {
+ non_fallthrough_target = true_target_in;
+ invert = false;
+ }
+
+ const auto cond = GenerateTest(condition, invert, codegen_);
+
+ __ b(non_fallthrough_target, cond.first);
+
+ if (false_target_in != nullptr && false_target_in != non_fallthrough_target) {
+ __ b(false_target_in);
+ }
+
+ return;
+ }
+
// Generated branching requires both targets to be explicit. If either of the
// targets is nullptr (fallthrough) use and bind `fallthrough_target` instead.
Label fallthrough_target;
Label* true_target = true_target_in == nullptr ? &fallthrough_target : true_target_in;
Label* false_target = false_target_in == nullptr ? &fallthrough_target : false_target_in;
- Primitive::Type type = condition->InputAt(0)->GetType();
- switch (type) {
- case Primitive::kPrimLong:
- GenerateLongComparesAndJumps(condition, true_target, false_target);
- break;
- case Primitive::kPrimFloat:
- case Primitive::kPrimDouble:
- GenerateVcmp(condition, codegen_);
- GenerateFPJumps(condition, true_target, false_target);
- break;
- default:
- LOG(FATAL) << "Unexpected compare type " << type;
- }
+ DCHECK_EQ(condition->InputAt(0)->GetType(), Primitive::kPrimLong);
+ GenerateLongComparesAndJumps(condition, true_target, false_target);
if (false_target != &fallthrough_target) {
__ b(false_target);
@@ -2729,7 +2747,8 @@
}
if (!Primitive::IsFloatingPointType(type) &&
- CanGenerateTest(condition, codegen_->GetAssembler())) {
+ (IsBooleanValueOrMaterializedCondition(condition) ||
+ CanGenerateTest(condition->AsCondition(), codegen_->GetAssembler()))) {
bool invert = false;
if (out.Equals(second)) {
@@ -2753,7 +2772,14 @@
codegen_->MoveLocation(out, src.Equals(first) ? second : first, type);
}
- const Condition cond = GenerateTest(condition, locations->InAt(2), invert, codegen_);
+ std::pair<Condition, Condition> cond;
+
+ if (IsBooleanValueOrMaterializedCondition(condition)) {
+ __ CmpConstant(locations->InAt(2).AsRegister<Register>(), 0);
+ cond = invert ? std::make_pair(EQ, NE) : std::make_pair(NE, EQ);
+ } else {
+ cond = GenerateTest(condition->AsCondition(), invert, codegen_);
+ }
if (out.IsRegister()) {
ShifterOperand operand;
@@ -2765,8 +2791,8 @@
operand = ShifterOperand(src.AsRegister<Register>());
}
- __ it(cond);
- __ mov(out.AsRegister<Register>(), operand, cond);
+ __ it(cond.first);
+ __ mov(out.AsRegister<Register>(), operand, cond.first);
} else {
DCHECK(out.IsRegisterPair());
@@ -2784,10 +2810,10 @@
operand_low = ShifterOperand(src.AsRegisterPairLow<Register>());
}
- __ it(cond);
- __ mov(out.AsRegisterPairLow<Register>(), operand_low, cond);
- __ it(cond);
- __ mov(out.AsRegisterPairHigh<Register>(), operand_high, cond);
+ __ it(cond.first);
+ __ mov(out.AsRegisterPairLow<Register>(), operand_low, cond.first);
+ __ it(cond.first);
+ __ mov(out.AsRegisterPairHigh<Register>(), operand_high, cond.first);
}
return;
@@ -2840,7 +2866,7 @@
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RegisterOrConstant(cond->InputAt(1)));
if (!cond->IsEmittedAtUseSite()) {
- locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
break;
@@ -2867,51 +2893,44 @@
return;
}
- LocationSummary* locations = cond->GetLocations();
- Location left = locations->InAt(0);
- Location right = locations->InAt(1);
- Register out = locations->Out().AsRegister<Register>();
- Label true_label, false_label;
+ const Register out = cond->GetLocations()->Out().AsRegister<Register>();
- switch (cond->InputAt(0)->GetType()) {
- default: {
- // Integer case.
- if (right.IsRegister()) {
- __ cmp(left.AsRegister<Register>(), ShifterOperand(right.AsRegister<Register>()));
- } else {
- DCHECK(right.IsConstant());
- __ CmpConstant(left.AsRegister<Register>(),
- CodeGenerator::GetInt32ValueOf(right.GetConstant()));
- }
- __ it(ARMCondition(cond->GetCondition()), kItElse);
- __ mov(locations->Out().AsRegister<Register>(), ShifterOperand(1),
- ARMCondition(cond->GetCondition()));
- __ mov(locations->Out().AsRegister<Register>(), ShifterOperand(0),
- ARMCondition(cond->GetOppositeCondition()));
- return;
- }
- case Primitive::kPrimLong:
- GenerateLongComparesAndJumps(cond, &true_label, &false_label);
- break;
- case Primitive::kPrimFloat:
- case Primitive::kPrimDouble:
- GenerateVcmp(cond, codegen_);
- GenerateFPJumps(cond, &true_label, &false_label);
- break;
+ if (ArmAssembler::IsLowRegister(out) && CanGenerateTest(cond, codegen_->GetAssembler())) {
+ const auto condition = GenerateTest(cond, false, codegen_);
+
+ __ it(condition.first);
+ __ mov(out, ShifterOperand(1), condition.first);
+ __ it(condition.second);
+ __ mov(out, ShifterOperand(0), condition.second);
+ return;
}
// Convert the jumps into the result.
Label done_label;
- Label* final_label = codegen_->GetFinalLabel(cond, &done_label);
+ Label* const final_label = codegen_->GetFinalLabel(cond, &done_label);
- // False case: result = 0.
- __ Bind(&false_label);
- __ LoadImmediate(out, 0);
- __ b(final_label);
+ if (cond->InputAt(0)->GetType() == Primitive::kPrimLong) {
+ Label true_label, false_label;
- // True case: result = 1.
- __ Bind(&true_label);
- __ LoadImmediate(out, 1);
+ GenerateLongComparesAndJumps(cond, &true_label, &false_label);
+
+ // False case: result = 0.
+ __ Bind(&false_label);
+ __ LoadImmediate(out, 0);
+ __ b(final_label);
+
+ // True case: result = 1.
+ __ Bind(&true_label);
+ __ LoadImmediate(out, 1);
+ } else {
+ DCHECK(CanGenerateTest(cond, codegen_->GetAssembler()));
+
+ const auto condition = GenerateTest(cond, false, codegen_);
+
+ __ mov(out, ShifterOperand(0), AL, kCcKeep);
+ __ b(final_label, condition.second);
+ __ LoadImmediate(out, 1);
+ }
if (done_label.IsLinked()) {
__ Bind(&done_label);
@@ -7039,14 +7058,16 @@
uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value();
- Label done, zero;
- Label* final_label = codegen_->GetFinalLabel(instruction, &done);
+ Label done;
+ Label* const final_label = codegen_->GetFinalLabel(instruction, &done);
SlowPathCodeARM* slow_path = nullptr;
// Return 0 if `obj` is null.
// avoid null check if we know obj is not null.
if (instruction->MustDoNullCheck()) {
- __ CompareAndBranchIfZero(obj, &zero);
+ DCHECK_NE(out, obj);
+ __ LoadImmediate(out, 0);
+ __ CompareAndBranchIfZero(obj, final_label);
}
switch (type_check_kind) {
@@ -7058,11 +7079,23 @@
class_offset,
maybe_temp_loc,
kCompilerReadBarrierOption);
- __ cmp(out, ShifterOperand(cls));
// Classes must be equal for the instanceof to succeed.
- __ b(&zero, NE);
- __ LoadImmediate(out, 1);
- __ b(final_label);
+ __ cmp(out, ShifterOperand(cls));
+ // We speculatively set the result to false without changing the condition
+ // flags, which allows us to avoid some branching later.
+ __ mov(out, ShifterOperand(0), AL, kCcKeep);
+
+ // Since IT blocks longer than a 16-bit instruction are deprecated by ARMv8,
+ // we check that the output is in a low register, so that a 16-bit MOV
+ // encoding can be used.
+ if (ArmAssembler::IsLowRegister(out)) {
+ __ it(EQ);
+ __ mov(out, ShifterOperand(1), EQ);
+ } else {
+ __ b(final_label, NE);
+ __ LoadImmediate(out, 1);
+ }
+
break;
}
@@ -7084,14 +7117,11 @@
super_offset,
maybe_temp_loc,
kCompilerReadBarrierOption);
- // If `out` is null, we use it for the result, and jump to `done`.
+ // If `out` is null, we use it for the result, and jump to the final label.
__ CompareAndBranchIfZero(out, final_label);
__ cmp(out, ShifterOperand(cls));
__ b(&loop, NE);
__ LoadImmediate(out, 1);
- if (zero.IsLinked()) {
- __ b(final_label);
- }
break;
}
@@ -7114,14 +7144,32 @@
super_offset,
maybe_temp_loc,
kCompilerReadBarrierOption);
- __ CompareAndBranchIfNonZero(out, &loop);
- // If `out` is null, we use it for the result, and jump to `done`.
- __ b(final_label);
- __ Bind(&success);
- __ LoadImmediate(out, 1);
- if (zero.IsLinked()) {
+ // This is essentially a null check, but it sets the condition flags to the
+ // proper value for the code that follows the loop, i.e. not `EQ`.
+ __ cmp(out, ShifterOperand(1));
+ __ b(&loop, HS);
+
+ // Since IT blocks longer than a 16-bit instruction are deprecated by ARMv8,
+ // we check that the output is in a low register, so that a 16-bit MOV
+ // encoding can be used.
+ if (ArmAssembler::IsLowRegister(out)) {
+ // If `out` is null, we use it for the result, and the condition flags
+ // have already been set to `NE`, so the IT block that comes afterwards
+ // (and which handles the successful case) turns into a NOP (instead of
+ // overwriting `out`).
+ __ Bind(&success);
+ // There is only one branch to the `success` label (which is bound to this
+ // IT block), and it has the same condition, `EQ`, so in that case the MOV
+ // is executed.
+ __ it(EQ);
+ __ mov(out, ShifterOperand(1), EQ);
+ } else {
+ // If `out` is null, we use it for the result, and jump to the final label.
__ b(final_label);
+ __ Bind(&success);
+ __ LoadImmediate(out, 1);
}
+
break;
}
@@ -7144,14 +7192,28 @@
component_offset,
maybe_temp_loc,
kCompilerReadBarrierOption);
- // If `out` is null, we use it for the result, and jump to `done`.
+ // If `out` is null, we use it for the result, and jump to the final label.
__ CompareAndBranchIfZero(out, final_label);
__ LoadFromOffset(kLoadUnsignedHalfword, out, out, primitive_offset);
static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
- __ CompareAndBranchIfNonZero(out, &zero);
- __ Bind(&exact_check);
- __ LoadImmediate(out, 1);
- __ b(final_label);
+ __ cmp(out, ShifterOperand(0));
+ // We speculatively set the result to false without changing the condition
+ // flags, which allows us to avoid some branching later.
+ __ mov(out, ShifterOperand(0), AL, kCcKeep);
+
+ // Since IT blocks longer than a 16-bit instruction are deprecated by ARMv8,
+ // we check that the output is in a low register, so that a 16-bit MOV
+ // encoding can be used.
+ if (ArmAssembler::IsLowRegister(out)) {
+ __ Bind(&exact_check);
+ __ it(EQ);
+ __ mov(out, ShifterOperand(1), EQ);
+ } else {
+ __ b(final_label, NE);
+ __ Bind(&exact_check);
+ __ LoadImmediate(out, 1);
+ }
+
break;
}
@@ -7171,9 +7233,6 @@
codegen_->AddSlowPath(slow_path);
__ b(slow_path->GetEntryLabel(), NE);
__ LoadImmediate(out, 1);
- if (zero.IsLinked()) {
- __ b(final_label);
- }
break;
}
@@ -7202,18 +7261,10 @@
/* is_fatal */ false);
codegen_->AddSlowPath(slow_path);
__ b(slow_path->GetEntryLabel());
- if (zero.IsLinked()) {
- __ b(final_label);
- }
break;
}
}
- if (zero.IsLinked()) {
- __ Bind(&zero);
- __ LoadImmediate(out, 0);
- }
-
if (done.IsLinked()) {
__ Bind(&done);
}
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index 59a7f7c..86f2f21 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -299,7 +299,6 @@
void GenerateCompareTestAndBranch(HCondition* condition,
Label* true_target,
Label* false_target);
- void GenerateFPJumps(HCondition* cond, Label* true_label, Label* false_label);
void GenerateLongComparesAndJumps(HCondition* cond, Label* true_label, Label* false_label);
void DivRemOneOrMinusOne(HBinaryOperation* instruction);
void DivRemByPowerOfTwo(HBinaryOperation* instruction);
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 2d2d810..a1c3da9 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -1687,14 +1687,21 @@
}
}
-static vixl32::Condition GenerateLongTestConstant(HCondition* condition,
- bool invert,
- CodeGeneratorARMVIXL* codegen) {
+static std::pair<vixl32::Condition, vixl32::Condition> GenerateLongTestConstant(
+ HCondition* condition,
+ bool invert,
+ CodeGeneratorARMVIXL* codegen) {
DCHECK_EQ(condition->GetLeft()->GetType(), Primitive::kPrimLong);
const LocationSummary* const locations = condition->GetLocations();
- IfCondition cond = invert ? condition->GetOppositeCondition() : condition->GetCondition();
- vixl32::Condition ret = eq;
+ IfCondition cond = condition->GetCondition();
+ IfCondition opposite = condition->GetOppositeCondition();
+
+ if (invert) {
+ std::swap(cond, opposite);
+ }
+
+ std::pair<vixl32::Condition, vixl32::Condition> ret(eq, ne);
const Location left = locations->InAt(0);
const Location right = locations->InAt(1);
@@ -1713,13 +1720,14 @@
case kCondAE: {
__ Cmp(left_high, High32Bits(value));
+ // We use the scope because of the IT block that follows.
ExactAssemblyScope guard(codegen->GetVIXLAssembler(),
2 * vixl32::k16BitT32InstructionSizeInBytes,
CodeBufferCheckScope::kExactSize);
__ it(eq);
__ cmp(eq, left_low, Low32Bits(value));
- ret = ARMUnsignedCondition(cond);
+ ret = std::make_pair(ARMUnsignedCondition(cond), ARMUnsignedCondition(opposite));
break;
}
case kCondLE:
@@ -1727,15 +1735,19 @@
// Trivially true or false.
if (value == std::numeric_limits<int64_t>::max()) {
__ Cmp(left_low, left_low);
- ret = cond == kCondLE ? eq : ne;
+ ret = cond == kCondLE ? std::make_pair(eq, ne) : std::make_pair(ne, eq);
break;
}
if (cond == kCondLE) {
+ DCHECK_EQ(opposite, kCondGT);
cond = kCondLT;
+ opposite = kCondGE;
} else {
DCHECK_EQ(cond, kCondGT);
+ DCHECK_EQ(opposite, kCondLE);
cond = kCondGE;
+ opposite = kCondLT;
}
value++;
@@ -1746,7 +1758,7 @@
__ Cmp(left_low, Low32Bits(value));
__ Sbcs(temps.Acquire(), left_high, High32Bits(value));
- ret = ARMCondition(cond);
+ ret = std::make_pair(ARMCondition(cond), ARMCondition(opposite));
break;
}
default:
@@ -1757,14 +1769,21 @@
return ret;
}
-static vixl32::Condition GenerateLongTest(HCondition* condition,
- bool invert,
- CodeGeneratorARMVIXL* codegen) {
+static std::pair<vixl32::Condition, vixl32::Condition> GenerateLongTest(
+ HCondition* condition,
+ bool invert,
+ CodeGeneratorARMVIXL* codegen) {
DCHECK_EQ(condition->GetLeft()->GetType(), Primitive::kPrimLong);
const LocationSummary* const locations = condition->GetLocations();
- IfCondition cond = invert ? condition->GetOppositeCondition() : condition->GetCondition();
- vixl32::Condition ret = eq;
+ IfCondition cond = condition->GetCondition();
+ IfCondition opposite = condition->GetOppositeCondition();
+
+ if (invert) {
+ std::swap(cond, opposite);
+ }
+
+ std::pair<vixl32::Condition, vixl32::Condition> ret(eq, ne);
Location left = locations->InAt(0);
Location right = locations->InAt(1);
@@ -1779,22 +1798,27 @@
case kCondAE: {
__ Cmp(HighRegisterFrom(left), HighRegisterFrom(right));
+ // We use the scope because of the IT block that follows.
ExactAssemblyScope guard(codegen->GetVIXLAssembler(),
2 * vixl32::k16BitT32InstructionSizeInBytes,
CodeBufferCheckScope::kExactSize);
__ it(eq);
__ cmp(eq, LowRegisterFrom(left), LowRegisterFrom(right));
- ret = ARMUnsignedCondition(cond);
+ ret = std::make_pair(ARMUnsignedCondition(cond), ARMUnsignedCondition(opposite));
break;
}
case kCondLE:
case kCondGT:
if (cond == kCondLE) {
+ DCHECK_EQ(opposite, kCondGT);
cond = kCondGE;
+ opposite = kCondLT;
} else {
DCHECK_EQ(cond, kCondGT);
+ DCHECK_EQ(opposite, kCondLE);
cond = kCondLT;
+ opposite = kCondGE;
}
std::swap(left, right);
@@ -1805,7 +1829,7 @@
__ Cmp(LowRegisterFrom(left), LowRegisterFrom(right));
__ Sbcs(temps.Acquire(), HighRegisterFrom(left), HighRegisterFrom(right));
- ret = ARMCondition(cond);
+ ret = std::make_pair(ARMCondition(cond), ARMCondition(opposite));
break;
}
default:
@@ -1816,69 +1840,62 @@
return ret;
}
-static vixl32::Condition GenerateTest(HInstruction* instruction,
- Location loc,
- bool invert,
- CodeGeneratorARMVIXL* codegen) {
- DCHECK(!instruction->IsConstant());
+static std::pair<vixl32::Condition, vixl32::Condition> GenerateTest(HCondition* condition,
+ bool invert,
+ CodeGeneratorARMVIXL* codegen) {
+ const Primitive::Type type = condition->GetLeft()->GetType();
+ IfCondition cond = condition->GetCondition();
+ IfCondition opposite = condition->GetOppositeCondition();
+ std::pair<vixl32::Condition, vixl32::Condition> ret(eq, ne);
- vixl32::Condition ret = invert ? eq : ne;
+ if (invert) {
+ std::swap(cond, opposite);
+ }
- if (IsBooleanValueOrMaterializedCondition(instruction)) {
- __ Cmp(RegisterFrom(loc), 0);
+ if (type == Primitive::kPrimLong) {
+ ret = condition->GetLocations()->InAt(1).IsConstant()
+ ? GenerateLongTestConstant(condition, invert, codegen)
+ : GenerateLongTest(condition, invert, codegen);
+ } else if (Primitive::IsFloatingPointType(type)) {
+ GenerateVcmp(condition, codegen);
+ __ Vmrs(RegisterOrAPSR_nzcv(kPcCode), FPSCR);
+ ret = std::make_pair(ARMFPCondition(cond, condition->IsGtBias()),
+ ARMFPCondition(opposite, condition->IsGtBias()));
} else {
- HCondition* const condition = instruction->AsCondition();
- const Primitive::Type type = condition->GetLeft()->GetType();
- const IfCondition cond = invert ? condition->GetOppositeCondition() : condition->GetCondition();
-
- if (type == Primitive::kPrimLong) {
- ret = condition->GetLocations()->InAt(1).IsConstant()
- ? GenerateLongTestConstant(condition, invert, codegen)
- : GenerateLongTest(condition, invert, codegen);
- } else if (Primitive::IsFloatingPointType(type)) {
- GenerateVcmp(condition, codegen);
- __ Vmrs(RegisterOrAPSR_nzcv(kPcCode), FPSCR);
- ret = ARMFPCondition(cond, condition->IsGtBias());
- } else {
- DCHECK(Primitive::IsIntegralType(type) || type == Primitive::kPrimNot) << type;
- __ Cmp(InputRegisterAt(condition, 0), InputOperandAt(condition, 1));
- ret = ARMCondition(cond);
- }
+ DCHECK(Primitive::IsIntegralType(type) || type == Primitive::kPrimNot) << type;
+ __ Cmp(InputRegisterAt(condition, 0), InputOperandAt(condition, 1));
+ ret = std::make_pair(ARMCondition(cond), ARMCondition(opposite));
}
return ret;
}
-static bool CanGenerateTest(HInstruction* condition, ArmVIXLAssembler* assembler) {
- if (!IsBooleanValueOrMaterializedCondition(condition)) {
- const HCondition* const cond = condition->AsCondition();
+static bool CanGenerateTest(HCondition* condition, ArmVIXLAssembler* assembler) {
+ if (condition->GetLeft()->GetType() == Primitive::kPrimLong) {
+ const LocationSummary* const locations = condition->GetLocations();
+ const IfCondition c = condition->GetCondition();
- if (cond->GetLeft()->GetType() == Primitive::kPrimLong) {
- const LocationSummary* const locations = cond->GetLocations();
- const IfCondition c = cond->GetCondition();
+ if (locations->InAt(1).IsConstant()) {
+ const int64_t value = Int64ConstantFrom(locations->InAt(1));
- if (locations->InAt(1).IsConstant()) {
- const int64_t value = Int64ConstantFrom(locations->InAt(1));
-
- if (c < kCondLT || c > kCondGE) {
- // Since IT blocks longer than a 16-bit instruction are deprecated by ARMv8,
- // we check that the least significant half of the first input to be compared
- // is in a low register (the other half is read outside an IT block), and
- // the constant fits in an 8-bit unsigned integer, so that a 16-bit CMP
- // encoding can be used.
- if (!LowRegisterFrom(locations->InAt(0)).IsLow() || !IsUint<8>(Low32Bits(value))) {
- return false;
- }
- // TODO(VIXL): The rest of the checks are there to keep the backend in sync with
- // the previous one, but are not strictly necessary.
- } else if (c == kCondLE || c == kCondGT) {
- if (value < std::numeric_limits<int64_t>::max() &&
- !assembler->ShifterOperandCanHold(SBC, High32Bits(value + 1), kCcSet)) {
- return false;
- }
- } else if (!assembler->ShifterOperandCanHold(SBC, High32Bits(value), kCcSet)) {
+ if (c < kCondLT || c > kCondGE) {
+ // Since IT blocks longer than a 16-bit instruction are deprecated by ARMv8,
+ // we check that the least significant half of the first input to be compared
+ // is in a low register (the other half is read outside an IT block), and
+ // the constant fits in an 8-bit unsigned integer, so that a 16-bit CMP
+ // encoding can be used.
+ if (!LowRegisterFrom(locations->InAt(0)).IsLow() || !IsUint<8>(Low32Bits(value))) {
return false;
}
+ // TODO(VIXL): The rest of the checks are there to keep the backend in sync with
+ // the previous one, but are not strictly necessary.
+ } else if (c == kCondLE || c == kCondGT) {
+ if (value < std::numeric_limits<int64_t>::max() &&
+ !assembler->ShifterOperandCanHold(SBC, High32Bits(value + 1), kCcSet)) {
+ return false;
+ }
+ } else if (!assembler->ShifterOperandCanHold(SBC, High32Bits(value), kCcSet)) {
+ return false;
}
}
}
@@ -2445,14 +2462,6 @@
void InstructionCodeGeneratorARMVIXL::VisitExit(HExit* exit ATTRIBUTE_UNUSED) {
}
-void InstructionCodeGeneratorARMVIXL::GenerateFPJumps(HCondition* cond,
- vixl32::Label* true_label,
- vixl32::Label* false_label ATTRIBUTE_UNUSED) {
- // To branch on the result of the FP compare we transfer FPSCR to APSR (encoded as PC in VMRS).
- __ Vmrs(RegisterOrAPSR_nzcv(kPcCode), FPSCR);
- __ B(ARMFPCondition(cond->GetCondition(), cond->IsGtBias()), true_label);
-}
-
void InstructionCodeGeneratorARMVIXL::GenerateLongComparesAndJumps(HCondition* cond,
vixl32::Label* true_label,
vixl32::Label* false_label) {
@@ -2469,7 +2478,6 @@
// Set the conditions for the test, remembering that == needs to be
// decided using the low words.
- // TODO: consider avoiding jumps with temporary and CMP low+SBC high
switch (if_cond) {
case kCondEQ:
case kCondNE:
@@ -2540,31 +2548,44 @@
void InstructionCodeGeneratorARMVIXL::GenerateCompareTestAndBranch(HCondition* condition,
vixl32::Label* true_target_in,
vixl32::Label* false_target_in) {
+ if (CanGenerateTest(condition, codegen_->GetAssembler())) {
+ vixl32::Label* non_fallthrough_target;
+ bool invert;
+
+ if (true_target_in == nullptr) {
+ DCHECK(false_target_in != nullptr);
+ non_fallthrough_target = false_target_in;
+ invert = true;
+ } else {
+ non_fallthrough_target = true_target_in;
+ invert = false;
+ }
+
+ const auto cond = GenerateTest(condition, invert, codegen_);
+
+ __ B(cond.first, non_fallthrough_target);
+
+ if (false_target_in != nullptr && false_target_in != non_fallthrough_target) {
+ __ B(false_target_in);
+ }
+
+ return;
+ }
+
// Generated branching requires both targets to be explicit. If either of the
// targets is nullptr (fallthrough) use and bind `fallthrough` instead.
vixl32::Label fallthrough;
vixl32::Label* true_target = (true_target_in == nullptr) ? &fallthrough : true_target_in;
vixl32::Label* false_target = (false_target_in == nullptr) ? &fallthrough : false_target_in;
- Primitive::Type type = condition->InputAt(0)->GetType();
- switch (type) {
- case Primitive::kPrimLong:
- GenerateLongComparesAndJumps(condition, true_target, false_target);
- break;
- case Primitive::kPrimFloat:
- case Primitive::kPrimDouble:
- GenerateVcmp(condition, codegen_);
- GenerateFPJumps(condition, true_target, false_target);
- break;
- default:
- LOG(FATAL) << "Unexpected compare type " << type;
- }
+ DCHECK_EQ(condition->InputAt(0)->GetType(), Primitive::kPrimLong);
+ GenerateLongComparesAndJumps(condition, true_target, false_target);
if (false_target != &fallthrough) {
__ B(false_target);
}
- if (true_target_in == nullptr || false_target_in == nullptr) {
+ if (fallthrough.IsReferenced()) {
__ Bind(&fallthrough);
}
}
@@ -2759,7 +2780,8 @@
}
if (!Primitive::IsFloatingPointType(type) &&
- CanGenerateTest(condition, codegen_->GetAssembler())) {
+ (IsBooleanValueOrMaterializedCondition(condition) ||
+ CanGenerateTest(condition->AsCondition(), codegen_->GetAssembler()))) {
bool invert = false;
if (out.Equals(second)) {
@@ -2783,15 +2805,24 @@
codegen_->MoveLocation(out, src.Equals(first) ? second : first, type);
}
- const vixl32::Condition cond = GenerateTest(condition, locations->InAt(2), invert, codegen_);
+ std::pair<vixl32::Condition, vixl32::Condition> cond(eq, ne);
+
+ if (IsBooleanValueOrMaterializedCondition(condition)) {
+ __ Cmp(InputRegisterAt(select, 2), 0);
+ cond = invert ? std::make_pair(eq, ne) : std::make_pair(ne, eq);
+ } else {
+ cond = GenerateTest(condition->AsCondition(), invert, codegen_);
+ }
+
const size_t instr_count = out.IsRegisterPair() ? 4 : 2;
+ // We use the scope because of the IT block that follows.
ExactAssemblyScope guard(GetVIXLAssembler(),
instr_count * vixl32::k16BitT32InstructionSizeInBytes,
CodeBufferCheckScope::kExactSize);
if (out.IsRegister()) {
- __ it(cond);
- __ mov(cond, RegisterFrom(out), OperandFrom(src, type));
+ __ it(cond.first);
+ __ mov(cond.first, RegisterFrom(out), OperandFrom(src, type));
} else {
DCHECK(out.IsRegisterPair());
@@ -2809,10 +2840,10 @@
operand_low = LowRegisterFrom(src);
}
- __ it(cond);
- __ mov(cond, LowRegisterFrom(out), operand_low);
- __ it(cond);
- __ mov(cond, HighRegisterFrom(out), operand_high);
+ __ it(cond.first);
+ __ mov(cond.first, LowRegisterFrom(out), operand_low);
+ __ it(cond.first);
+ __ mov(cond.first, HighRegisterFrom(out), operand_high);
}
return;
@@ -2865,7 +2896,7 @@
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RegisterOrConstant(cond->InputAt(1)));
if (!cond->IsEmittedAtUseSite()) {
- locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
break;
@@ -2892,50 +2923,48 @@
return;
}
- Location right = cond->GetLocations()->InAt(1);
- vixl32::Register out = OutputRegister(cond);
- vixl32::Label true_label, false_label;
+ const vixl32::Register out = OutputRegister(cond);
- switch (cond->InputAt(0)->GetType()) {
- default: {
- // Integer case.
- if (right.IsRegister()) {
- __ Cmp(InputRegisterAt(cond, 0), InputOperandAt(cond, 1));
- } else {
- DCHECK(right.IsConstant());
- __ Cmp(InputRegisterAt(cond, 0),
- CodeGenerator::GetInt32ValueOf(right.GetConstant()));
- }
- ExactAssemblyScope aas(GetVIXLAssembler(),
- 3 * vixl32::kMaxInstructionSizeInBytes,
- CodeBufferCheckScope::kMaximumSize);
- __ ite(ARMCondition(cond->GetCondition()));
- __ mov(ARMCondition(cond->GetCondition()), OutputRegister(cond), 1);
- __ mov(ARMCondition(cond->GetOppositeCondition()), OutputRegister(cond), 0);
- return;
- }
- case Primitive::kPrimLong:
- GenerateLongComparesAndJumps(cond, &true_label, &false_label);
- break;
- case Primitive::kPrimFloat:
- case Primitive::kPrimDouble:
- GenerateVcmp(cond, codegen_);
- GenerateFPJumps(cond, &true_label, &false_label);
- break;
+ if (out.IsLow() && CanGenerateTest(cond, codegen_->GetAssembler())) {
+ const auto condition = GenerateTest(cond, false, codegen_);
+ // We use the scope because of the IT block that follows.
+ ExactAssemblyScope guard(GetVIXLAssembler(),
+ 4 * vixl32::k16BitT32InstructionSizeInBytes,
+ CodeBufferCheckScope::kExactSize);
+
+ __ it(condition.first);
+ __ mov(condition.first, out, 1);
+ __ it(condition.second);
+ __ mov(condition.second, out, 0);
+ return;
}
// Convert the jumps into the result.
vixl32::Label done_label;
- vixl32::Label* final_label = codegen_->GetFinalLabel(cond, &done_label);
+ vixl32::Label* const final_label = codegen_->GetFinalLabel(cond, &done_label);
- // False case: result = 0.
- __ Bind(&false_label);
- __ Mov(out, 0);
- __ B(final_label);
+ if (cond->InputAt(0)->GetType() == Primitive::kPrimLong) {
+ vixl32::Label true_label, false_label;
- // True case: result = 1.
- __ Bind(&true_label);
- __ Mov(out, 1);
+ GenerateLongComparesAndJumps(cond, &true_label, &false_label);
+
+ // False case: result = 0.
+ __ Bind(&false_label);
+ __ Mov(out, 0);
+ __ B(final_label);
+
+ // True case: result = 1.
+ __ Bind(&true_label);
+ __ Mov(out, 1);
+ } else {
+ DCHECK(CanGenerateTest(cond, codegen_->GetAssembler()));
+
+ const auto condition = GenerateTest(cond, false, codegen_);
+
+ __ Mov(LeaveFlags, out, 0);
+ __ B(condition.second, final_label, /* far_target */ false);
+ __ Mov(out, 1);
+ }
if (done_label.IsReferenced()) {
__ Bind(&done_label);
@@ -7079,14 +7108,16 @@
uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value();
- vixl32::Label done, zero;
- vixl32::Label* final_label = codegen_->GetFinalLabel(instruction, &done);
+ vixl32::Label done;
+ vixl32::Label* const final_label = codegen_->GetFinalLabel(instruction, &done);
SlowPathCodeARMVIXL* slow_path = nullptr;
// Return 0 if `obj` is null.
// avoid null check if we know obj is not null.
if (instruction->MustDoNullCheck()) {
- __ CompareAndBranchIfZero(obj, &zero, /* far_target */ false);
+ DCHECK(!out.Is(obj));
+ __ Mov(out, 0);
+ __ CompareAndBranchIfZero(obj, final_label, /* far_target */ false);
}
switch (type_check_kind) {
@@ -7098,11 +7129,28 @@
class_offset,
maybe_temp_loc,
kCompilerReadBarrierOption);
- __ Cmp(out, cls);
// Classes must be equal for the instanceof to succeed.
- __ B(ne, &zero, /* far_target */ false);
- __ Mov(out, 1);
- __ B(final_label);
+ __ Cmp(out, cls);
+ // We speculatively set the result to false without changing the condition
+ // flags, which allows us to avoid some branching later.
+ __ Mov(LeaveFlags, out, 0);
+
+ // Since IT blocks longer than a 16-bit instruction are deprecated by ARMv8,
+ // we check that the output is in a low register, so that a 16-bit MOV
+ // encoding can be used.
+ if (out.IsLow()) {
+ // We use the scope because of the IT block that follows.
+ ExactAssemblyScope guard(GetVIXLAssembler(),
+ 2 * vixl32::k16BitT32InstructionSizeInBytes,
+ CodeBufferCheckScope::kExactSize);
+
+ __ it(eq);
+ __ mov(eq, out, 1);
+ } else {
+ __ B(ne, final_label, /* far_target */ false);
+ __ Mov(out, 1);
+ }
+
break;
}
@@ -7124,14 +7172,11 @@
super_offset,
maybe_temp_loc,
kCompilerReadBarrierOption);
- // If `out` is null, we use it for the result, and jump to `done`.
+ // If `out` is null, we use it for the result, and jump to the final label.
__ CompareAndBranchIfZero(out, final_label, /* far_target */ false);
__ Cmp(out, cls);
__ B(ne, &loop, /* far_target */ false);
__ Mov(out, 1);
- if (zero.IsReferenced()) {
- __ B(final_label);
- }
break;
}
@@ -7154,14 +7199,38 @@
super_offset,
maybe_temp_loc,
kCompilerReadBarrierOption);
- __ CompareAndBranchIfNonZero(out, &loop);
- // If `out` is null, we use it for the result, and jump to `done`.
- __ B(final_label);
- __ Bind(&success);
- __ Mov(out, 1);
- if (zero.IsReferenced()) {
+ // This is essentially a null check, but it sets the condition flags to the
+ // proper value for the code that follows the loop, i.e. not `eq`.
+ __ Cmp(out, 1);
+ __ B(hs, &loop, /* far_target */ false);
+
+ // Since IT blocks longer than a 16-bit instruction are deprecated by ARMv8,
+ // we check that the output is in a low register, so that a 16-bit MOV
+ // encoding can be used.
+ if (out.IsLow()) {
+ // If `out` is null, we use it for the result, and the condition flags
+ // have already been set to `ne`, so the IT block that comes afterwards
+ // (and which handles the successful case) turns into a NOP (instead of
+ // overwriting `out`).
+ __ Bind(&success);
+
+ // We use the scope because of the IT block that follows.
+ ExactAssemblyScope guard(GetVIXLAssembler(),
+ 2 * vixl32::k16BitT32InstructionSizeInBytes,
+ CodeBufferCheckScope::kExactSize);
+
+ // There is only one branch to the `success` label (which is bound to this
+ // IT block), and it has the same condition, `eq`, so in that case the MOV
+ // is executed.
+ __ it(eq);
+ __ mov(eq, out, 1);
+ } else {
+ // If `out` is null, we use it for the result, and jump to the final label.
__ B(final_label);
+ __ Bind(&success);
+ __ Mov(out, 1);
}
+
break;
}
@@ -7184,14 +7253,34 @@
component_offset,
maybe_temp_loc,
kCompilerReadBarrierOption);
- // If `out` is null, we use it for the result, and jump to `done`.
+ // If `out` is null, we use it for the result, and jump to the final label.
__ CompareAndBranchIfZero(out, final_label, /* far_target */ false);
GetAssembler()->LoadFromOffset(kLoadUnsignedHalfword, out, out, primitive_offset);
static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
- __ CompareAndBranchIfNonZero(out, &zero, /* far_target */ false);
- __ Bind(&exact_check);
- __ Mov(out, 1);
- __ B(final_label);
+ __ Cmp(out, 0);
+ // We speculatively set the result to false without changing the condition
+ // flags, which allows us to avoid some branching later.
+ __ Mov(LeaveFlags, out, 0);
+
+ // Since IT blocks longer than a 16-bit instruction are deprecated by ARMv8,
+ // we check that the output is in a low register, so that a 16-bit MOV
+ // encoding can be used.
+ if (out.IsLow()) {
+ __ Bind(&exact_check);
+
+ // We use the scope because of the IT block that follows.
+ ExactAssemblyScope guard(GetVIXLAssembler(),
+ 2 * vixl32::k16BitT32InstructionSizeInBytes,
+ CodeBufferCheckScope::kExactSize);
+
+ __ it(eq);
+ __ mov(eq, out, 1);
+ } else {
+ __ B(ne, final_label, /* far_target */ false);
+ __ Bind(&exact_check);
+ __ Mov(out, 1);
+ }
+
break;
}
@@ -7211,9 +7300,6 @@
codegen_->AddSlowPath(slow_path);
__ B(ne, slow_path->GetEntryLabel());
__ Mov(out, 1);
- if (zero.IsReferenced()) {
- __ B(final_label);
- }
break;
}
@@ -7242,18 +7328,10 @@
/* is_fatal */ false);
codegen_->AddSlowPath(slow_path);
__ B(slow_path->GetEntryLabel());
- if (zero.IsReferenced()) {
- __ B(final_label);
- }
break;
}
}
- if (zero.IsReferenced()) {
- __ Bind(&zero);
- __ Mov(out, 0);
- }
-
if (done.IsReferenced()) {
__ Bind(&done);
}
diff --git a/compiler/optimizing/code_generator_arm_vixl.h b/compiler/optimizing/code_generator_arm_vixl.h
index 781027a..1e9669d 100644
--- a/compiler/optimizing/code_generator_arm_vixl.h
+++ b/compiler/optimizing/code_generator_arm_vixl.h
@@ -401,9 +401,6 @@
void GenerateCompareTestAndBranch(HCondition* condition,
vixl::aarch32::Label* true_target,
vixl::aarch32::Label* false_target);
- void GenerateFPJumps(HCondition* cond,
- vixl::aarch32::Label* true_label,
- vixl::aarch32::Label* false_label);
void GenerateLongComparesAndJumps(HCondition* cond,
vixl::aarch32::Label* true_label,
vixl::aarch32::Label* false_label);
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 5246dbc..c82533b 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -558,26 +558,21 @@
return;
}
- // Make sure the frame size isn't unreasonably large. Per the various APIs
- // it looks like it should always be less than 2GB in size, which allows
- // us using 32-bit signed offsets from the stack pointer.
- if (GetFrameSize() > 0x7FFFFFFF)
- LOG(FATAL) << "Stack frame larger than 2GB";
+ // Make sure the frame size isn't unreasonably large.
+ if (GetFrameSize() > GetStackOverflowReservedBytes(kMips64)) {
+ LOG(FATAL) << "Stack frame larger than " << GetStackOverflowReservedBytes(kMips64) << " bytes";
+ }
// Spill callee-saved registers.
- // Note that their cumulative size is small and they can be indexed using
- // 16-bit offsets.
- // TODO: increment/decrement SP in one step instead of two or remove this comment.
-
- uint32_t ofs = FrameEntrySpillSize();
+ uint32_t ofs = GetFrameSize();
__ IncreaseFrameSize(ofs);
for (int i = arraysize(kCoreCalleeSaves) - 1; i >= 0; --i) {
GpuRegister reg = kCoreCalleeSaves[i];
if (allocated_registers_.ContainsCoreRegister(reg)) {
ofs -= kMips64DoublewordSize;
- __ Sd(reg, SP, ofs);
+ __ StoreToOffset(kStoreDoubleword, reg, SP, ofs);
__ cfi().RelOffset(DWARFReg(reg), ofs);
}
}
@@ -586,23 +581,16 @@
FpuRegister reg = kFpuCalleeSaves[i];
if (allocated_registers_.ContainsFloatingPointRegister(reg)) {
ofs -= kMips64DoublewordSize;
- __ Sdc1(reg, SP, ofs);
+ __ StoreFpuToOffset(kStoreDoubleword, reg, SP, ofs);
__ cfi().RelOffset(DWARFReg(reg), ofs);
}
}
- // Allocate the rest of the frame and store the current method pointer
- // at its end.
-
- __ IncreaseFrameSize(GetFrameSize() - FrameEntrySpillSize());
-
// Save the current method if we need it. Note that we do not
// do this in HCurrentMethod, as the instruction might have been removed
// in the SSA graph.
if (RequiresCurrentMethod()) {
- static_assert(IsInt<16>(kCurrentMethodStackOffset),
- "kCurrentMethodStackOffset must fit into int16_t");
- __ Sd(kMethodRegisterArgument, SP, kCurrentMethodStackOffset);
+ __ StoreToOffset(kStoreDoubleword, kMethodRegisterArgument, SP, kCurrentMethodStackOffset);
}
if (GetGraph()->HasShouldDeoptimizeFlag()) {
@@ -615,42 +603,32 @@
__ cfi().RememberState();
if (!HasEmptyFrame()) {
- // Deallocate the rest of the frame.
-
- __ DecreaseFrameSize(GetFrameSize() - FrameEntrySpillSize());
-
// Restore callee-saved registers.
- // Note that their cumulative size is small and they can be indexed using
- // 16-bit offsets.
- // TODO: increment/decrement SP in one step instead of two or remove this comment.
-
- uint32_t ofs = 0;
-
- for (size_t i = 0; i < arraysize(kFpuCalleeSaves); ++i) {
- FpuRegister reg = kFpuCalleeSaves[i];
- if (allocated_registers_.ContainsFloatingPointRegister(reg)) {
- __ Ldc1(reg, SP, ofs);
- ofs += kMips64DoublewordSize;
- __ cfi().Restore(DWARFReg(reg));
- }
- }
-
- for (size_t i = 0; i < arraysize(kCoreCalleeSaves); ++i) {
+ // For better instruction scheduling restore RA before other registers.
+ uint32_t ofs = GetFrameSize();
+ for (int i = arraysize(kCoreCalleeSaves) - 1; i >= 0; --i) {
GpuRegister reg = kCoreCalleeSaves[i];
if (allocated_registers_.ContainsCoreRegister(reg)) {
- __ Ld(reg, SP, ofs);
- ofs += kMips64DoublewordSize;
+ ofs -= kMips64DoublewordSize;
+ __ LoadFromOffset(kLoadDoubleword, reg, SP, ofs);
__ cfi().Restore(DWARFReg(reg));
}
}
- DCHECK_EQ(ofs, FrameEntrySpillSize());
- __ DecreaseFrameSize(ofs);
+ for (int i = arraysize(kFpuCalleeSaves) - 1; i >= 0; --i) {
+ FpuRegister reg = kFpuCalleeSaves[i];
+ if (allocated_registers_.ContainsFloatingPointRegister(reg)) {
+ ofs -= kMips64DoublewordSize;
+ __ LoadFpuFromOffset(kLoadDoubleword, reg, SP, ofs);
+ __ cfi().Restore(DWARFReg(reg));
+ }
+ }
+
+ __ DecreaseFrameSize(GetFrameSize());
}
- __ Jr(RA);
- __ Nop();
+ __ Jic(RA, 0);
__ cfi().RestoreState();
__ cfi().DefCFAOffset(GetFrameSize());
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 958c1a6..4db4796 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -967,7 +967,7 @@
size_t CodeGeneratorX86::SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
if (GetGraph()->HasSIMD()) {
- __ movupd(Address(ESP, stack_index), XmmRegister(reg_id));
+ __ movups(Address(ESP, stack_index), XmmRegister(reg_id));
} else {
__ movsd(Address(ESP, stack_index), XmmRegister(reg_id));
}
@@ -976,7 +976,7 @@
size_t CodeGeneratorX86::RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
if (GetGraph()->HasSIMD()) {
- __ movupd(XmmRegister(reg_id), Address(ESP, stack_index));
+ __ movups(XmmRegister(reg_id), Address(ESP, stack_index));
} else {
__ movsd(XmmRegister(reg_id), Address(ESP, stack_index));
}
@@ -5713,9 +5713,8 @@
// In suspend check slow path, usually there are no caller-save registers at all.
// If SIMD instructions are present, however, we force spilling all live SIMD
// registers in full width (since the runtime only saves/restores lower part).
- locations->SetCustomSlowPathCallerSaves(GetGraph()->HasSIMD()
- ? RegisterSet::AllFpu()
- : RegisterSet::Empty());
+ locations->SetCustomSlowPathCallerSaves(
+ GetGraph()->HasSIMD() ? RegisterSet::AllFpu() : RegisterSet::Empty());
}
void InstructionCodeGeneratorX86::VisitSuspendCheck(HSuspendCheck* instruction) {
@@ -5818,9 +5817,11 @@
__ movd(destination.AsRegisterPairHigh<Register>(), src_reg);
} else if (destination.IsStackSlot()) {
__ movss(Address(ESP, destination.GetStackIndex()), source.AsFpuRegister<XmmRegister>());
- } else {
- DCHECK(destination.IsDoubleStackSlot());
+ } else if (destination.IsDoubleStackSlot()) {
__ movsd(Address(ESP, destination.GetStackIndex()), source.AsFpuRegister<XmmRegister>());
+ } else {
+ DCHECK(destination.IsSIMDStackSlot());
+ __ movups(Address(ESP, destination.GetStackIndex()), source.AsFpuRegister<XmmRegister>());
}
} else if (source.IsStackSlot()) {
if (destination.IsRegister()) {
@@ -5842,6 +5843,9 @@
DCHECK(destination.IsDoubleStackSlot()) << destination;
MoveMemoryToMemory64(destination.GetStackIndex(), source.GetStackIndex());
}
+ } else if (source.IsSIMDStackSlot()) {
+ DCHECK(destination.IsFpuRegister());
+ __ movups(destination.AsFpuRegister<XmmRegister>(), Address(ESP, source.GetStackIndex()));
} else if (source.IsConstant()) {
HConstant* constant = source.GetConstant();
if (constant->IsIntConstant() || constant->IsNullConstant()) {
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index c106d9b..2ffc398 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -1162,7 +1162,7 @@
size_t CodeGeneratorX86_64::SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
if (GetGraph()->HasSIMD()) {
- __ movupd(Address(CpuRegister(RSP), stack_index), XmmRegister(reg_id));
+ __ movups(Address(CpuRegister(RSP), stack_index), XmmRegister(reg_id));
} else {
__ movsd(Address(CpuRegister(RSP), stack_index), XmmRegister(reg_id));
}
@@ -1171,7 +1171,7 @@
size_t CodeGeneratorX86_64::RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
if (GetGraph()->HasSIMD()) {
- __ movupd(XmmRegister(reg_id), Address(CpuRegister(RSP), stack_index));
+ __ movups(XmmRegister(reg_id), Address(CpuRegister(RSP), stack_index));
} else {
__ movsd(XmmRegister(reg_id), Address(CpuRegister(RSP), stack_index));
}
@@ -5166,9 +5166,8 @@
// In suspend check slow path, usually there are no caller-save registers at all.
// If SIMD instructions are present, however, we force spilling all live SIMD
// registers in full width (since the runtime only saves/restores lower part).
- locations->SetCustomSlowPathCallerSaves(GetGraph()->HasSIMD()
- ? RegisterSet::AllFpu()
- : RegisterSet::Empty());
+ locations->SetCustomSlowPathCallerSaves(
+ GetGraph()->HasSIMD() ? RegisterSet::AllFpu() : RegisterSet::Empty());
}
void InstructionCodeGeneratorX86_64::VisitSuspendCheck(HSuspendCheck* instruction) {
@@ -5257,6 +5256,10 @@
__ movq(CpuRegister(TMP), Address(CpuRegister(RSP), source.GetStackIndex()));
__ movq(Address(CpuRegister(RSP), destination.GetStackIndex()), CpuRegister(TMP));
}
+ } else if (source.IsSIMDStackSlot()) {
+ DCHECK(destination.IsFpuRegister());
+ __ movups(destination.AsFpuRegister<XmmRegister>(),
+ Address(CpuRegister(RSP), source.GetStackIndex()));
} else if (source.IsConstant()) {
HConstant* constant = source.GetConstant();
if (constant->IsIntConstant() || constant->IsNullConstant()) {
@@ -5307,10 +5310,13 @@
} else if (destination.IsStackSlot()) {
__ movss(Address(CpuRegister(RSP), destination.GetStackIndex()),
source.AsFpuRegister<XmmRegister>());
- } else {
- DCHECK(destination.IsDoubleStackSlot()) << destination;
+ } else if (destination.IsDoubleStackSlot()) {
__ movsd(Address(CpuRegister(RSP), destination.GetStackIndex()),
source.AsFpuRegister<XmmRegister>());
+ } else {
+ DCHECK(destination.IsSIMDStackSlot());
+ __ movups(Address(CpuRegister(RSP), destination.GetStackIndex()),
+ source.AsFpuRegister<XmmRegister>());
}
}
}
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index 2bf5c53..0dfae11 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -322,9 +322,11 @@
codegen_.DumpCoreRegister(stream, location.high());
} else if (location.IsUnallocated()) {
stream << "unallocated";
- } else {
- DCHECK(location.IsDoubleStackSlot());
+ } else if (location.IsDoubleStackSlot()) {
stream << "2x" << location.GetStackIndex() << "(sp)";
+ } else {
+ DCHECK(location.IsSIMDStackSlot());
+ stream << "4x" << location.GetStackIndex() << "(sp)";
}
}
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 9550a53..f733145 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -46,32 +46,100 @@
namespace art {
-static constexpr size_t kMaximumNumberOfHInstructions = 32;
+// Instruction limit to control memory.
+static constexpr size_t kMaximumNumberOfTotalInstructions = 1024;
+
+// Maximum number of instructions for considering a method small,
+// which we will always try to inline if the other non-instruction limits
+// are not reached.
+static constexpr size_t kMaximumNumberOfInstructionsForSmallMethod = 3;
// Limit the number of dex registers that we accumulate while inlining
// to avoid creating large amount of nested environments.
static constexpr size_t kMaximumNumberOfCumulatedDexRegisters = 64;
-// Avoid inlining within a huge method due to memory pressure.
-static constexpr size_t kMaximumCodeUnitSize = 4096;
+// Limit recursive call inlining, which do not benefit from too
+// much inlining compared to code locality.
+static constexpr size_t kMaximumNumberOfRecursiveCalls = 4;
// Controls the use of inline caches in AOT mode.
static constexpr bool kUseAOTInlineCaches = false;
+// We check for line numbers to make sure the DepthString implementation
+// aligns the output nicely.
+#define LOG_INTERNAL(msg) \
+ static_assert(__LINE__ > 10, "Unhandled line number"); \
+ static_assert(__LINE__ < 10000, "Unhandled line number"); \
+ VLOG(compiler) << DepthString(__LINE__) << msg
+
+#define LOG_TRY() LOG_INTERNAL("Try inlinining call: ")
+#define LOG_NOTE() LOG_INTERNAL("Note: ")
+#define LOG_SUCCESS() LOG_INTERNAL("Success: ")
+#define LOG_FAIL(stat) MaybeRecordStat(stat); LOG_INTERNAL("Fail: ")
+#define LOG_FAIL_NO_STAT() LOG_INTERNAL("Fail: ")
+
+std::string HInliner::DepthString(int line) const {
+ std::string value;
+ // Indent according to the inlining depth.
+ size_t count = depth_;
+ // Line numbers get printed in the log, so add a space if the log's line is less
+ // than 1000, and two if less than 100. 10 cannot be reached as it's the copyright.
+ if (!kIsTargetBuild) {
+ if (line < 100) {
+ value += " ";
+ }
+ if (line < 1000) {
+ value += " ";
+ }
+ // Safeguard if this file reaches more than 10000 lines.
+ DCHECK_LT(line, 10000);
+ }
+ for (size_t i = 0; i < count; ++i) {
+ value += " ";
+ }
+ return value;
+}
+
+static size_t CountNumberOfInstructions(HGraph* graph) {
+ size_t number_of_instructions = 0;
+ for (HBasicBlock* block : graph->GetReversePostOrderSkipEntryBlock()) {
+ for (HInstructionIterator instr_it(block->GetInstructions());
+ !instr_it.Done();
+ instr_it.Advance()) {
+ ++number_of_instructions;
+ }
+ }
+ return number_of_instructions;
+}
+
+void HInliner::UpdateInliningBudget() {
+ if (total_number_of_instructions_ >= kMaximumNumberOfTotalInstructions) {
+ // Always try to inline small methods.
+ inlining_budget_ = kMaximumNumberOfInstructionsForSmallMethod;
+ } else {
+ inlining_budget_ = std::max(
+ kMaximumNumberOfInstructionsForSmallMethod,
+ kMaximumNumberOfTotalInstructions - total_number_of_instructions_);
+ }
+}
+
void HInliner::Run() {
- const CompilerOptions& compiler_options = compiler_driver_->GetCompilerOptions();
- if ((compiler_options.GetInlineDepthLimit() == 0)
- || (compiler_options.GetInlineMaxCodeUnits() == 0)) {
- return;
- }
- if (caller_compilation_unit_.GetCodeItem()->insns_size_in_code_units_ > kMaximumCodeUnitSize) {
- return;
- }
if (graph_->IsDebuggable()) {
// For simplicity, we currently never inline when the graph is debuggable. This avoids
// doing some logic in the runtime to discover if a method could have been inlined.
return;
}
+
+ // Initialize the number of instructions for the method being compiled. Recursive calls
+ // to HInliner::Run have already updated the instruction count.
+ if (outermost_graph_ == graph_) {
+ total_number_of_instructions_ = CountNumberOfInstructions(graph_);
+ }
+
+ UpdateInliningBudget();
+ DCHECK_NE(total_number_of_instructions_, 0u);
+ DCHECK_NE(inlining_budget_, 0u);
+
// Keep a copy of all blocks when starting the visit.
ArenaVector<HBasicBlock*> blocks = graph_->GetReversePostOrder();
DCHECK(!blocks.empty());
@@ -316,17 +384,18 @@
ScopedObjectAccess soa(Thread::Current());
uint32_t method_index = invoke_instruction->GetDexMethodIndex();
const DexFile& caller_dex_file = *caller_compilation_unit_.GetDexFile();
- VLOG(compiler) << "Try inlining " << caller_dex_file.PrettyMethod(method_index);
+ LOG_TRY() << caller_dex_file.PrettyMethod(method_index);
- // We can query the dex cache directly. The verifier has populated it already.
ArtMethod* resolved_method = invoke_instruction->GetResolvedMethod();
- ArtMethod* actual_method = nullptr;
if (resolved_method == nullptr) {
DCHECK(invoke_instruction->IsInvokeStaticOrDirect());
DCHECK(invoke_instruction->AsInvokeStaticOrDirect()->IsStringInit());
- VLOG(compiler) << "Not inlining a String.<init> method";
+ LOG_FAIL_NO_STAT() << "Not inlining a String.<init> method";
return false;
- } else if (invoke_instruction->IsInvokeStaticOrDirect()) {
+ }
+ ArtMethod* actual_method = nullptr;
+
+ if (invoke_instruction->IsInvokeStaticOrDirect()) {
actual_method = resolved_method;
} else {
// Check if we can statically find the method.
@@ -339,6 +408,7 @@
if (method != nullptr) {
cha_devirtualize = true;
actual_method = method;
+ LOG_NOTE() << "Try CHA-based inlining of " << actual_method->PrettyMethod();
}
}
@@ -401,16 +471,23 @@
: GetInlineCacheJIT(invoke_instruction, &hs, &inline_cache);
switch (inline_cache_type) {
- case kInlineCacheNoData:
- break;
-
- case kInlineCacheUninitialized:
- VLOG(compiler) << "Interface or virtual call to "
- << caller_dex_file.PrettyMethod(invoke_instruction->GetDexMethodIndex())
- << " is not hit and not inlined";
+ case kInlineCacheNoData: {
+ LOG_FAIL_NO_STAT()
+ << "Interface or virtual call to "
+ << caller_dex_file.PrettyMethod(invoke_instruction->GetDexMethodIndex())
+ << " could not be statically determined";
return false;
+ }
- case kInlineCacheMonomorphic:
+ case kInlineCacheUninitialized: {
+ LOG_FAIL_NO_STAT()
+ << "Interface or virtual call to "
+ << caller_dex_file.PrettyMethod(invoke_instruction->GetDexMethodIndex())
+ << " is not hit and not inlined";
+ return false;
+ }
+
+ case kInlineCacheMonomorphic: {
MaybeRecordStat(kMonomorphicCall);
if (outermost_graph_->IsCompilingOsr()) {
// If we are compiling OSR, we pretend this call is polymorphic, as we may come from the
@@ -419,23 +496,29 @@
} else {
return TryInlineMonomorphicCall(invoke_instruction, resolved_method, inline_cache);
}
+ }
- case kInlineCachePolymorphic:
+ case kInlineCachePolymorphic: {
MaybeRecordStat(kPolymorphicCall);
return TryInlinePolymorphicCall(invoke_instruction, resolved_method, inline_cache);
+ }
- case kInlineCacheMegamorphic:
- VLOG(compiler) << "Interface or virtual call to "
- << caller_dex_file.PrettyMethod(invoke_instruction->GetDexMethodIndex())
- << " is megamorphic and not inlined";
+ case kInlineCacheMegamorphic: {
+ LOG_FAIL_NO_STAT()
+ << "Interface or virtual call to "
+ << caller_dex_file.PrettyMethod(invoke_instruction->GetDexMethodIndex())
+ << " is megamorphic and not inlined";
MaybeRecordStat(kMegamorphicCall);
return false;
+ }
- case kInlineCacheMissingTypes:
- VLOG(compiler) << "Interface or virtual call to "
- << caller_dex_file.PrettyMethod(invoke_instruction->GetDexMethodIndex())
- << " is missing types and not inlined";
+ case kInlineCacheMissingTypes: {
+ LOG_FAIL_NO_STAT()
+ << "Interface or virtual call to "
+ << caller_dex_file.PrettyMethod(invoke_instruction->GetDexMethodIndex())
+ << " is missing types and not inlined";
return false;
+ }
}
UNREACHABLE();
}
@@ -598,9 +681,10 @@
dex::TypeIndex class_index = FindClassIndexIn(
GetMonomorphicType(classes), caller_compilation_unit_);
if (!class_index.IsValid()) {
- VLOG(compiler) << "Call to " << ArtMethod::PrettyMethod(resolved_method)
- << " from inline cache is not inlined because its class is not"
- << " accessible to the caller";
+ LOG_FAIL(kNotInlinedDexCache)
+ << "Call to " << ArtMethod::PrettyMethod(resolved_method)
+ << " from inline cache is not inlined because its class is not"
+ << " accessible to the caller";
return false;
}
@@ -614,6 +698,7 @@
resolved_method = GetMonomorphicType(classes)->FindVirtualMethodForVirtual(
resolved_method, pointer_size);
}
+ LOG_NOTE() << "Try inline monomorphic call to " << resolved_method->PrettyMethod();
DCHECK(resolved_method != nullptr);
HInstruction* receiver = invoke_instruction->InputAt(0);
HInstruction* cursor = invoke_instruction->GetPrevious();
@@ -763,6 +848,7 @@
dex::TypeIndex class_index = FindClassIndexIn(handle.Get(), caller_compilation_unit_);
HInstruction* return_replacement = nullptr;
+ LOG_NOTE() << "Try inline polymorphic call to " << method->PrettyMethod();
if (!class_index.IsValid() ||
!TryBuildAndInline(invoke_instruction,
method,
@@ -772,8 +858,8 @@
} else {
one_target_inlined = true;
- VLOG(compiler) << "Polymorphic call to " << ArtMethod::PrettyMethod(resolved_method)
- << " has inlined " << ArtMethod::PrettyMethod(method);
+ LOG_SUCCESS() << "Polymorphic call to " << ArtMethod::PrettyMethod(resolved_method)
+ << " has inlined " << ArtMethod::PrettyMethod(method);
// If we have inlined all targets before, and this receiver is the last seen,
// we deoptimize instead of keeping the original invoke instruction.
@@ -807,9 +893,10 @@
}
if (!one_target_inlined) {
- VLOG(compiler) << "Call to " << ArtMethod::PrettyMethod(resolved_method)
- << " from inline cache is not inlined because none"
- << " of its targets could be inlined";
+ LOG_FAIL_NO_STAT()
+ << "Call to " << ArtMethod::PrettyMethod(resolved_method)
+ << " from inline cache is not inlined because none"
+ << " of its targets could be inlined";
return false;
}
@@ -943,9 +1030,6 @@
actual_method = new_method;
} else if (actual_method != new_method) {
// Different methods, bailout.
- VLOG(compiler) << "Call to " << ArtMethod::PrettyMethod(resolved_method)
- << " from inline cache is not inlined because it resolves"
- << " to different methods";
return false;
}
}
@@ -1018,6 +1102,7 @@
MaybeRecordStat(kInlinedPolymorphicCall);
+ LOG_SUCCESS() << "Inlined same polymorphic target " << actual_method->PrettyMethod();
return true;
}
@@ -1099,13 +1184,34 @@
return true;
}
+size_t HInliner::CountRecursiveCallsOf(ArtMethod* method) const {
+ const HInliner* current = this;
+ size_t count = 0;
+ do {
+ if (current->graph_->GetArtMethod() == method) {
+ ++count;
+ }
+ current = current->parent_;
+ } while (current != nullptr);
+ return count;
+}
+
bool HInliner::TryBuildAndInline(HInvoke* invoke_instruction,
ArtMethod* method,
ReferenceTypeInfo receiver_type,
HInstruction** return_replacement) {
if (method->IsProxyMethod()) {
- VLOG(compiler) << "Method " << method->PrettyMethod()
- << " is not inlined because of unimplemented inline support for proxy methods.";
+ LOG_FAIL(kNotInlinedProxy)
+ << "Method " << method->PrettyMethod()
+ << " is not inlined because of unimplemented inline support for proxy methods.";
+ return false;
+ }
+
+ if (CountRecursiveCallsOf(method) > kMaximumNumberOfRecursiveCalls) {
+ LOG_FAIL(kNotInlinedRecursiveBudget)
+ << "Method "
+ << method->PrettyMethod()
+ << " is not inlined because it has reached its recursive call budget.";
return false;
}
@@ -1114,15 +1220,16 @@
if (!compiler_driver_->MayInline(method->GetDexFile(),
outer_compilation_unit_.GetDexFile())) {
if (TryPatternSubstitution(invoke_instruction, method, return_replacement)) {
- VLOG(compiler) << "Successfully replaced pattern of invoke "
- << method->PrettyMethod();
+ LOG_SUCCESS() << "Successfully replaced pattern of invoke "
+ << method->PrettyMethod();
MaybeRecordStat(kReplacedInvokeWithSimplePattern);
return true;
}
- VLOG(compiler) << "Won't inline " << method->PrettyMethod() << " in "
- << outer_compilation_unit_.GetDexFile()->GetLocation() << " ("
- << caller_compilation_unit_.GetDexFile()->GetLocation() << ") from "
- << method->GetDexFile()->GetLocation();
+ LOG_FAIL(kNotInlinedWont)
+ << "Won't inline " << method->PrettyMethod() << " in "
+ << outer_compilation_unit_.GetDexFile()->GetLocation() << " ("
+ << caller_compilation_unit_.GetDexFile()->GetLocation() << ") from "
+ << method->GetDexFile()->GetLocation();
return false;
}
@@ -1131,30 +1238,32 @@
const DexFile::CodeItem* code_item = method->GetCodeItem();
if (code_item == nullptr) {
- VLOG(compiler) << "Method " << method->PrettyMethod()
- << " is not inlined because it is native";
+ LOG_FAIL_NO_STAT()
+ << "Method " << method->PrettyMethod() << " is not inlined because it is native";
return false;
}
size_t inline_max_code_units = compiler_driver_->GetCompilerOptions().GetInlineMaxCodeUnits();
if (code_item->insns_size_in_code_units_ > inline_max_code_units) {
- VLOG(compiler) << "Method " << method->PrettyMethod()
- << " is too big to inline: "
- << code_item->insns_size_in_code_units_
- << " > "
- << inline_max_code_units;
+ LOG_FAIL(kNotInlinedCodeItem)
+ << "Method " << method->PrettyMethod()
+ << " is not inlined because its code item is too big: "
+ << code_item->insns_size_in_code_units_
+ << " > "
+ << inline_max_code_units;
return false;
}
if (code_item->tries_size_ != 0) {
- VLOG(compiler) << "Method " << method->PrettyMethod()
- << " is not inlined because of try block";
+ LOG_FAIL(kNotInlinedTryCatch)
+ << "Method " << method->PrettyMethod() << " is not inlined because of try block";
return false;
}
if (!method->IsCompilable()) {
- VLOG(compiler) << "Method " << method->PrettyMethod()
- << " has soft failures un-handled by the compiler, so it cannot be inlined";
+ LOG_FAIL(kNotInlinedNotVerified)
+ << "Method " << method->PrettyMethod()
+ << " has soft failures un-handled by the compiler, so it cannot be inlined";
}
if (!method->GetDeclaringClass()->IsVerified()) {
@@ -1162,8 +1271,9 @@
if (Runtime::Current()->UseJitCompilation() ||
!compiler_driver_->IsMethodVerifiedWithoutFailures(
method->GetDexMethodIndex(), class_def_idx, *method->GetDexFile())) {
- VLOG(compiler) << "Method " << method->PrettyMethod()
- << " couldn't be verified, so it cannot be inlined";
+ LOG_FAIL(kNotInlinedNotVerified)
+ << "Method " << method->PrettyMethod()
+ << " couldn't be verified, so it cannot be inlined";
return false;
}
}
@@ -1172,9 +1282,9 @@
invoke_instruction->AsInvokeStaticOrDirect()->IsStaticWithImplicitClinitCheck()) {
// Case of a static method that cannot be inlined because it implicitly
// requires an initialization check of its declaring class.
- VLOG(compiler) << "Method " << method->PrettyMethod()
- << " is not inlined because it is static and requires a clinit"
- << " check that cannot be emitted due to Dex cache limitations";
+ LOG_FAIL(kNotInlinedDexCache) << "Method " << method->PrettyMethod()
+ << " is not inlined because it is static and requires a clinit"
+ << " check that cannot be emitted due to Dex cache limitations";
return false;
}
@@ -1183,7 +1293,7 @@
return false;
}
- VLOG(compiler) << "Successfully inlined " << method->PrettyMethod();
+ LOG_SUCCESS() << method->PrettyMethod();
MaybeRecordStat(kInlinedInvoke);
return true;
}
@@ -1471,15 +1581,17 @@
handles_);
if (builder.BuildGraph() != kAnalysisSuccess) {
- VLOG(compiler) << "Method " << callee_dex_file.PrettyMethod(method_index)
- << " could not be built, so cannot be inlined";
+ LOG_FAIL(kNotInlinedCannotBuild)
+ << "Method " << callee_dex_file.PrettyMethod(method_index)
+ << " could not be built, so cannot be inlined";
return false;
}
if (!RegisterAllocator::CanAllocateRegistersFor(*callee_graph,
compiler_driver_->GetInstructionSet())) {
- VLOG(compiler) << "Method " << callee_dex_file.PrettyMethod(method_index)
- << " cannot be inlined because of the register allocator";
+ LOG_FAIL(kNotInlinedRegisterAllocator)
+ << "Method " << callee_dex_file.PrettyMethod(method_index)
+ << " cannot be inlined because of the register allocator";
return false;
}
@@ -1526,15 +1638,13 @@
/* is_first_run */ false).Run();
}
- size_t number_of_instructions_budget = kMaximumNumberOfHInstructions;
- size_t number_of_inlined_instructions =
- RunOptimizations(callee_graph, code_item, dex_compilation_unit);
- number_of_instructions_budget += number_of_inlined_instructions;
+ RunOptimizations(callee_graph, code_item, dex_compilation_unit);
HBasicBlock* exit_block = callee_graph->GetExitBlock();
if (exit_block == nullptr) {
- VLOG(compiler) << "Method " << callee_dex_file.PrettyMethod(method_index)
- << " could not be inlined because it has an infinite loop";
+ LOG_FAIL(kNotInlinedInfiniteLoop)
+ << "Method " << callee_dex_file.PrettyMethod(method_index)
+ << " could not be inlined because it has an infinite loop";
return false;
}
@@ -1543,15 +1653,17 @@
if (predecessor->GetLastInstruction()->IsThrow()) {
if (invoke_instruction->GetBlock()->IsTryBlock()) {
// TODO(ngeoffray): Support adding HTryBoundary in Hgraph::InlineInto.
- VLOG(compiler) << "Method " << callee_dex_file.PrettyMethod(method_index)
- << " could not be inlined because one branch always throws and"
- << " caller is in a try/catch block";
+ LOG_FAIL(kNotInlinedTryCatch)
+ << "Method " << callee_dex_file.PrettyMethod(method_index)
+ << " could not be inlined because one branch always throws and"
+ << " caller is in a try/catch block";
return false;
} else if (graph_->GetExitBlock() == nullptr) {
// TODO(ngeoffray): Support adding HExit in the caller graph.
- VLOG(compiler) << "Method " << callee_dex_file.PrettyMethod(method_index)
- << " could not be inlined because one branch always throws and"
- << " caller does not have an exit block";
+ LOG_FAIL(kNotInlinedInfiniteLoop)
+ << "Method " << callee_dex_file.PrettyMethod(method_index)
+ << " could not be inlined because one branch always throws and"
+ << " caller does not have an exit block";
return false;
} else if (graph_->HasIrreducibleLoops()) {
// TODO(ngeoffray): Support re-computing loop information to graphs with
@@ -1567,32 +1679,31 @@
}
if (!has_one_return) {
- VLOG(compiler) << "Method " << callee_dex_file.PrettyMethod(method_index)
- << " could not be inlined because it always throws";
+ LOG_FAIL(kNotInlinedAlwaysThrows)
+ << "Method " << callee_dex_file.PrettyMethod(method_index)
+ << " could not be inlined because it always throws";
return false;
}
size_t number_of_instructions = 0;
-
- bool can_inline_environment =
- total_number_of_dex_registers_ < kMaximumNumberOfCumulatedDexRegisters;
-
// Skip the entry block, it does not contain instructions that prevent inlining.
for (HBasicBlock* block : callee_graph->GetReversePostOrderSkipEntryBlock()) {
if (block->IsLoopHeader()) {
if (block->GetLoopInformation()->IsIrreducible()) {
// Don't inline methods with irreducible loops, they could prevent some
// optimizations to run.
- VLOG(compiler) << "Method " << callee_dex_file.PrettyMethod(method_index)
- << " could not be inlined because it contains an irreducible loop";
+ LOG_FAIL(kNotInlinedIrreducibleLoop)
+ << "Method " << callee_dex_file.PrettyMethod(method_index)
+ << " could not be inlined because it contains an irreducible loop";
return false;
}
if (!block->GetLoopInformation()->HasExitEdge()) {
// Don't inline methods with loops without exit, since they cause the
// loop information to be computed incorrectly when updating after
// inlining.
- VLOG(compiler) << "Method " << callee_dex_file.PrettyMethod(method_index)
- << " could not be inlined because it contains a loop with no exit";
+ LOG_FAIL(kNotInlinedLoopWithoutExit)
+ << "Method " << callee_dex_file.PrettyMethod(method_index)
+ << " could not be inlined because it contains a loop with no exit";
return false;
}
}
@@ -1600,34 +1711,39 @@
for (HInstructionIterator instr_it(block->GetInstructions());
!instr_it.Done();
instr_it.Advance()) {
- if (number_of_instructions++ == number_of_instructions_budget) {
- VLOG(compiler) << "Method " << callee_dex_file.PrettyMethod(method_index)
- << " is not inlined because its caller has reached"
- << " its instruction budget limit.";
+ if (++number_of_instructions >= inlining_budget_) {
+ LOG_FAIL(kNotInlinedInstructionBudget)
+ << "Method " << callee_dex_file.PrettyMethod(method_index)
+ << " is not inlined because the outer method has reached"
+ << " its instruction budget limit.";
return false;
}
HInstruction* current = instr_it.Current();
- if (!can_inline_environment && current->NeedsEnvironment()) {
- VLOG(compiler) << "Method " << callee_dex_file.PrettyMethod(method_index)
- << " is not inlined because its caller has reached"
- << " its environment budget limit.";
+ if (current->NeedsEnvironment() &&
+ (total_number_of_dex_registers_ >= kMaximumNumberOfCumulatedDexRegisters)) {
+ LOG_FAIL(kNotInlinedEnvironmentBudget)
+ << "Method " << callee_dex_file.PrettyMethod(method_index)
+ << " is not inlined because its caller has reached"
+ << " its environment budget limit.";
return false;
}
if (current->NeedsEnvironment() &&
!CanEncodeInlinedMethodInStackMap(*caller_compilation_unit_.GetDexFile(),
resolved_method)) {
- VLOG(compiler) << "Method " << callee_dex_file.PrettyMethod(method_index)
- << " could not be inlined because " << current->DebugName()
- << " needs an environment, is in a different dex file"
- << ", and cannot be encoded in the stack maps.";
+ LOG_FAIL(kNotInlinedStackMaps)
+ << "Method " << callee_dex_file.PrettyMethod(method_index)
+ << " could not be inlined because " << current->DebugName()
+ << " needs an environment, is in a different dex file"
+ << ", and cannot be encoded in the stack maps.";
return false;
}
if (!same_dex_file && current->NeedsDexCacheOfDeclaringClass()) {
- VLOG(compiler) << "Method " << callee_dex_file.PrettyMethod(method_index)
- << " could not be inlined because " << current->DebugName()
- << " it is in a different dex file and requires access to the dex cache";
+ LOG_FAIL(kNotInlinedDexCache)
+ << "Method " << callee_dex_file.PrettyMethod(method_index)
+ << " could not be inlined because " << current->DebugName()
+ << " it is in a different dex file and requires access to the dex cache";
return false;
}
@@ -1636,21 +1752,24 @@
current->IsUnresolvedStaticFieldSet() ||
current->IsUnresolvedInstanceFieldSet()) {
// Entrypoint for unresolved fields does not handle inlined frames.
- VLOG(compiler) << "Method " << callee_dex_file.PrettyMethod(method_index)
- << " could not be inlined because it is using an unresolved"
- << " entrypoint";
+ LOG_FAIL(kNotInlinedUnresolvedEntrypoint)
+ << "Method " << callee_dex_file.PrettyMethod(method_index)
+ << " could not be inlined because it is using an unresolved"
+ << " entrypoint";
return false;
}
}
}
- number_of_inlined_instructions_ += number_of_instructions;
-
DCHECK_EQ(caller_instruction_counter, graph_->GetCurrentInstructionId())
<< "No instructions can be added to the outer graph while inner graph is being built";
+ // Inline the callee graph inside the caller graph.
const int32_t callee_instruction_counter = callee_graph->GetCurrentInstructionId();
graph_->SetCurrentInstructionId(callee_instruction_counter);
*return_replacement = callee_graph->InlineInto(graph_, invoke_instruction);
+ // Update our budget for other inlining attempts in `caller_graph`.
+ total_number_of_instructions_ += number_of_instructions;
+ UpdateInliningBudget();
DCHECK_EQ(callee_instruction_counter, callee_graph->GetCurrentInstructionId())
<< "No instructions can be added to the inner graph during inlining into the outer graph";
@@ -1663,9 +1782,9 @@
return true;
}
-size_t HInliner::RunOptimizations(HGraph* callee_graph,
- const DexFile::CodeItem* code_item,
- const DexCompilationUnit& dex_compilation_unit) {
+void HInliner::RunOptimizations(HGraph* callee_graph,
+ const DexFile::CodeItem* code_item,
+ const DexCompilationUnit& dex_compilation_unit) {
// Note: if the outermost_graph_ is being compiled OSR, we should not run any
// optimization that could lead to a HDeoptimize. The following optimizations do not.
HDeadCodeElimination dce(callee_graph, inline_stats_, "dead_code_elimination$inliner");
@@ -1687,23 +1806,37 @@
optimization->Run();
}
- size_t number_of_inlined_instructions = 0u;
- if (depth_ + 1 < compiler_driver_->GetCompilerOptions().GetInlineDepthLimit()) {
- HInliner inliner(callee_graph,
- outermost_graph_,
- codegen_,
- outer_compilation_unit_,
- dex_compilation_unit,
- compiler_driver_,
- handles_,
- inline_stats_,
- total_number_of_dex_registers_ + code_item->registers_size_,
- depth_ + 1);
- inliner.Run();
- number_of_inlined_instructions += inliner.number_of_inlined_instructions_;
+ // Bail early for pathological cases on the environment (for example recursive calls,
+ // or too large environment).
+ if (total_number_of_dex_registers_ >= kMaximumNumberOfCumulatedDexRegisters) {
+ LOG_NOTE() << "Calls in " << callee_graph->GetArtMethod()->PrettyMethod()
+ << " will not be inlined because the outer method has reached"
+ << " its environment budget limit.";
+ return;
}
- return number_of_inlined_instructions;
+ // Bail early if we know we already are over the limit.
+ size_t number_of_instructions = CountNumberOfInstructions(callee_graph);
+ if (number_of_instructions > inlining_budget_) {
+ LOG_NOTE() << "Calls in " << callee_graph->GetArtMethod()->PrettyMethod()
+ << " will not be inlined because the outer method has reached"
+ << " its instruction budget limit. " << number_of_instructions;
+ return;
+ }
+
+ HInliner inliner(callee_graph,
+ outermost_graph_,
+ codegen_,
+ outer_compilation_unit_,
+ dex_compilation_unit,
+ compiler_driver_,
+ handles_,
+ inline_stats_,
+ total_number_of_dex_registers_ + code_item->registers_size_,
+ total_number_of_instructions_ + number_of_instructions,
+ this,
+ depth_ + 1);
+ inliner.Run();
}
static bool IsReferenceTypeRefinement(ReferenceTypeInfo declared_rti,
diff --git a/compiler/optimizing/inliner.h b/compiler/optimizing/inliner.h
index a032042..9e4685c 100644
--- a/compiler/optimizing/inliner.h
+++ b/compiler/optimizing/inliner.h
@@ -42,7 +42,9 @@
VariableSizedHandleScope* handles,
OptimizingCompilerStats* stats,
size_t total_number_of_dex_registers,
- size_t depth)
+ size_t total_number_of_instructions,
+ HInliner* parent,
+ size_t depth = 0)
: HOptimization(outer_graph, kInlinerPassName, stats),
outermost_graph_(outermost_graph),
outer_compilation_unit_(outer_compilation_unit),
@@ -50,8 +52,10 @@
codegen_(codegen),
compiler_driver_(compiler_driver),
total_number_of_dex_registers_(total_number_of_dex_registers),
+ total_number_of_instructions_(total_number_of_instructions),
+ parent_(parent),
depth_(depth),
- number_of_inlined_instructions_(0),
+ inlining_budget_(0),
handles_(handles),
inline_stats_(nullptr) {}
@@ -95,10 +99,10 @@
HInstruction** return_replacement);
// Run simple optimizations on `callee_graph`.
- // Returns the number of inlined instructions.
- size_t RunOptimizations(HGraph* callee_graph,
- const DexFile::CodeItem* code_item,
- const DexCompilationUnit& dex_compilation_unit);
+ void RunOptimizations(HGraph* callee_graph,
+ const DexFile::CodeItem* code_item,
+ const DexCompilationUnit& dex_compilation_unit)
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Try to recognize known simple patterns and replace invoke call with appropriate instructions.
bool TryPatternSubstitution(HInvoke* invoke_instruction,
@@ -259,14 +263,30 @@
HInstruction* return_replacement,
HInstruction* invoke_instruction);
+ // Update the inlining budget based on `total_number_of_instructions_`.
+ void UpdateInliningBudget();
+
+ // Count the number of calls of `method` being inlined recursively.
+ size_t CountRecursiveCallsOf(ArtMethod* method) const;
+
+ // Pretty-print for spaces during logging.
+ std::string DepthString(int line) const;
+
HGraph* const outermost_graph_;
const DexCompilationUnit& outer_compilation_unit_;
const DexCompilationUnit& caller_compilation_unit_;
CodeGenerator* const codegen_;
CompilerDriver* const compiler_driver_;
const size_t total_number_of_dex_registers_;
+ size_t total_number_of_instructions_;
+
+ // The 'parent' inliner, that means the inlinigng optimization that requested
+ // `graph_` to be inlined.
+ const HInliner* const parent_;
const size_t depth_;
- size_t number_of_inlined_instructions_;
+
+ // The budget left for inlining, in number of instructions.
+ size_t inlining_budget_;
VariableSizedHandleScope* const handles_;
// Used to record stats about optimizations on the inlined graph.
diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc
index ba006ed..bf85b19 100644
--- a/compiler/optimizing/intrinsics_mips.cc
+++ b/compiler/optimizing/intrinsics_mips.cc
@@ -2559,7 +2559,7 @@
// void java.lang.String.getChars(int srcBegin, int srcEnd, char[] dst, int dstBegin)
void IntrinsicLocationsBuilderMIPS::VisitStringGetCharsNoCheck(HInvoke* invoke) {
LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
+ LocationSummary::kNoCall,
kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
@@ -2567,17 +2567,9 @@
locations->SetInAt(3, Location::RequiresRegister());
locations->SetInAt(4, Location::RequiresRegister());
- // We will call memcpy() to do the actual work. Allocate the temporary
- // registers to use the correct input registers, and output register.
- // memcpy() uses the normal MIPS calling convention.
- InvokeRuntimeCallingConvention calling_convention;
-
- locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
- locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
- locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
-
- Location outLocation = calling_convention.GetReturnLocation(Primitive::kPrimInt);
- locations->AddTemp(Location::RegisterLocation(outLocation.AsRegister<Register>()));
+ locations->AddTemp(Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresRegister());
}
void IntrinsicCodeGeneratorMIPS::VisitStringGetCharsNoCheck(HInvoke* invoke) {
@@ -2596,16 +2588,11 @@
Register dstBegin = locations->InAt(4).AsRegister<Register>();
Register dstPtr = locations->GetTemp(0).AsRegister<Register>();
- DCHECK_EQ(dstPtr, A0);
Register srcPtr = locations->GetTemp(1).AsRegister<Register>();
- DCHECK_EQ(srcPtr, A1);
Register numChrs = locations->GetTemp(2).AsRegister<Register>();
- DCHECK_EQ(numChrs, A2);
-
- Register dstReturn = locations->GetTemp(3).AsRegister<Register>();
- DCHECK_EQ(dstReturn, V0);
MipsLabel done;
+ MipsLabel loop;
// Location of data in char array buffer.
const uint32_t data_offset = mirror::Array::DataOffset(char_size).Uint32Value();
@@ -2634,7 +2621,7 @@
__ LoadFromOffset(kLoadWord, TMP, srcObj, count_offset);
__ Sll(TMP, TMP, 31);
- // If string is uncompressed, use memcpy() path.
+ // If string is uncompressed, use uncompressed path.
__ Bnez(TMP, &uncompressed_copy);
// Copy loop for compressed src, copying 1 character (8-bit) to (16-bit) at a time.
@@ -2660,10 +2647,13 @@
__ Addu(srcPtr, srcPtr, AT);
}
- // Calculate number of bytes to copy from number of characters.
- __ Sll(numChrs, numChrs, char_shift);
-
- codegen_->InvokeRuntime(kQuickMemcpy, invoke, invoke->GetDexPc(), nullptr);
+ __ Bind(&loop);
+ __ Lh(AT, srcPtr, 0);
+ __ Addiu(numChrs, numChrs, -1);
+ __ Addiu(srcPtr, srcPtr, char_size);
+ __ Sh(AT, dstPtr, 0);
+ __ Addiu(dstPtr, dstPtr, char_size);
+ __ Bnez(numChrs, &loop);
__ Bind(&done);
}
diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc
index 21c5074..1ee89cf 100644
--- a/compiler/optimizing/intrinsics_mips64.cc
+++ b/compiler/optimizing/intrinsics_mips64.cc
@@ -1895,7 +1895,7 @@
// void java.lang.String.getChars(int srcBegin, int srcEnd, char[] dst, int dstBegin)
void IntrinsicLocationsBuilderMIPS64::VisitStringGetCharsNoCheck(HInvoke* invoke) {
LocationSummary* locations = new (arena_) LocationSummary(invoke,
- LocationSummary::kCallOnMainOnly,
+ LocationSummary::kNoCall,
kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
@@ -1903,17 +1903,9 @@
locations->SetInAt(3, Location::RequiresRegister());
locations->SetInAt(4, Location::RequiresRegister());
- // We will call memcpy() to do the actual work. Allocate the temporary
- // registers to use the correct input registers, and output register.
- // memcpy() uses the normal MIPS calling conventions.
- InvokeRuntimeCallingConvention calling_convention;
-
- locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
- locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
- locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
-
- Location outLocation = calling_convention.GetReturnLocation(Primitive::kPrimLong);
- locations->AddTemp(Location::RegisterLocation(outLocation.AsRegister<GpuRegister>()));
+ locations->AddTemp(Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresRegister());
}
void IntrinsicCodeGeneratorMIPS64::VisitStringGetCharsNoCheck(HInvoke* invoke) {
@@ -1932,16 +1924,11 @@
GpuRegister dstBegin = locations->InAt(4).AsRegister<GpuRegister>();
GpuRegister dstPtr = locations->GetTemp(0).AsRegister<GpuRegister>();
- DCHECK_EQ(dstPtr, A0);
GpuRegister srcPtr = locations->GetTemp(1).AsRegister<GpuRegister>();
- DCHECK_EQ(srcPtr, A1);
GpuRegister numChrs = locations->GetTemp(2).AsRegister<GpuRegister>();
- DCHECK_EQ(numChrs, A2);
-
- GpuRegister dstReturn = locations->GetTemp(3).AsRegister<GpuRegister>();
- DCHECK_EQ(dstReturn, V0);
Mips64Label done;
+ Mips64Label loop;
// Location of data in char array buffer.
const uint32_t data_offset = mirror::Array::DataOffset(char_size).Uint32Value();
@@ -1965,7 +1952,7 @@
__ LoadFromOffset(kLoadWord, TMP, srcObj, count_offset);
__ Dext(TMP, TMP, 0, 1);
- // If string is uncompressed, use memcpy() path.
+ // If string is uncompressed, use uncompressed path.
__ Bnezc(TMP, &uncompressed_copy);
// Copy loop for compressed src, copying 1 character (8-bit) to (16-bit) at a time.
@@ -1986,10 +1973,13 @@
__ Daddiu(srcPtr, srcObj, value_offset);
__ Dlsa(srcPtr, srcBegin, srcPtr, char_shift);
- // Calculate number of bytes to copy from number of characters.
- __ Dsll(numChrs, numChrs, char_shift);
-
- codegen_->InvokeRuntime(kQuickMemcpy, invoke, invoke->GetDexPc(), nullptr);
+ __ Bind(&loop);
+ __ Lh(AT, srcPtr, 0);
+ __ Daddiu(numChrs, numChrs, -1);
+ __ Daddiu(srcPtr, srcPtr, char_size);
+ __ Sh(AT, dstPtr, 0);
+ __ Daddiu(dstPtr, dstPtr, char_size);
+ __ Bnezc(numChrs, &loop);
__ Bind(&done);
}
diff --git a/compiler/optimizing/locations.h b/compiler/optimizing/locations.h
index d391f69..6f0dbce 100644
--- a/compiler/optimizing/locations.h
+++ b/compiler/optimizing/locations.h
@@ -69,11 +69,13 @@
// We do not use the value 9 because it conflicts with kLocationConstantMask.
kDoNotUse9 = 9,
+ kSIMDStackSlot = 10, // 128bit stack slot. TODO: generalize with encoded #bytes?
+
// Unallocated location represents a location that is not fixed and can be
// allocated by a register allocator. Each unallocated location has
// a policy that specifies what kind of location is suitable. Payload
// contains register allocation policy.
- kUnallocated = 10,
+ kUnallocated = 11,
};
Location() : ValueObject(), value_(kInvalid) {
@@ -82,6 +84,7 @@
static_assert((kUnallocated & kLocationConstantMask) != kConstant, "TagError");
static_assert((kStackSlot & kLocationConstantMask) != kConstant, "TagError");
static_assert((kDoubleStackSlot & kLocationConstantMask) != kConstant, "TagError");
+ static_assert((kSIMDStackSlot & kLocationConstantMask) != kConstant, "TagError");
static_assert((kRegister & kLocationConstantMask) != kConstant, "TagError");
static_assert((kFpuRegister & kLocationConstantMask) != kConstant, "TagError");
static_assert((kRegisterPair & kLocationConstantMask) != kConstant, "TagError");
@@ -266,8 +269,20 @@
return GetKind() == kDoubleStackSlot;
}
+ static Location SIMDStackSlot(intptr_t stack_index) {
+ uintptr_t payload = EncodeStackIndex(stack_index);
+ Location loc(kSIMDStackSlot, payload);
+ // Ensure that sign is preserved.
+ DCHECK_EQ(loc.GetStackIndex(), stack_index);
+ return loc;
+ }
+
+ bool IsSIMDStackSlot() const {
+ return GetKind() == kSIMDStackSlot;
+ }
+
intptr_t GetStackIndex() const {
- DCHECK(IsStackSlot() || IsDoubleStackSlot());
+ DCHECK(IsStackSlot() || IsDoubleStackSlot() || IsSIMDStackSlot());
// Decode stack index manually to preserve sign.
return GetPayload() - kStackIndexBias;
}
@@ -315,6 +330,7 @@
case kRegister: return "R";
case kStackSlot: return "S";
case kDoubleStackSlot: return "DS";
+ case kSIMDStackSlot: return "SIMD";
case kUnallocated: return "U";
case kConstant: return "C";
case kFpuRegister: return "F";
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 6881d8f..fb0c889 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -341,6 +341,7 @@
cached_long_constants_(std::less<int64_t>(), arena->Adapter(kArenaAllocConstantsMap)),
cached_double_constants_(std::less<int64_t>(), arena->Adapter(kArenaAllocConstantsMap)),
cached_current_method_(nullptr),
+ art_method_(nullptr),
inexact_object_rti_(ReferenceTypeInfo::CreateInvalid()),
osr_(osr),
cha_single_implementation_list_(arena->Adapter(kArenaAllocCHA)) {
diff --git a/compiler/optimizing/optimizing_cfi_test_expected.inc b/compiler/optimizing/optimizing_cfi_test_expected.inc
index d84fe6c..60af2b4 100644
--- a/compiler/optimizing/optimizing_cfi_test_expected.inc
+++ b/compiler/optimizing/optimizing_cfi_test_expected.inc
@@ -174,53 +174,45 @@
// 0x00000034: .cfi_def_cfa_offset: 64
static constexpr uint8_t expected_asm_kMips64[] = {
- 0xD8, 0xFF, 0xBD, 0x67, 0x20, 0x00, 0xBF, 0xFF, 0x18, 0x00, 0xB1, 0xFF,
- 0x10, 0x00, 0xB0, 0xFF, 0x08, 0x00, 0xB9, 0xF7, 0x00, 0x00, 0xB8, 0xF7,
- 0xE8, 0xFF, 0xBD, 0x67, 0x18, 0x00, 0xBD, 0x67,
- 0x00, 0x00, 0xB8, 0xD7, 0x08, 0x00, 0xB9, 0xD7, 0x10, 0x00, 0xB0, 0xDF,
- 0x18, 0x00, 0xB1, 0xDF, 0x20, 0x00, 0xBF, 0xDF, 0x28, 0x00, 0xBD, 0x67,
- 0x09, 0x00, 0xE0, 0x03, 0x00, 0x00, 0x00, 0x00,
+ 0xC0, 0xFF, 0xBD, 0x67, 0x38, 0x00, 0xBF, 0xFF, 0x30, 0x00, 0xB1, 0xFF,
+ 0x28, 0x00, 0xB0, 0xFF, 0x20, 0x00, 0xB9, 0xF7, 0x18, 0x00, 0xB8, 0xF7,
+ 0x38, 0x00, 0xBF, 0xDF, 0x30, 0x00, 0xB1, 0xDF, 0x28, 0x00, 0xB0, 0xDF,
+ 0x20, 0x00, 0xB9, 0xD7, 0x18, 0x00, 0xB8, 0xD7, 0x40, 0x00, 0xBD, 0x67,
+ 0x00, 0x00, 0x1F, 0xD8,
};
-
static constexpr uint8_t expected_cfi_kMips64[] = {
- 0x44, 0x0E, 0x28, 0x44, 0x9F, 0x02, 0x44, 0x91, 0x04, 0x44, 0x90, 0x06,
- 0x44, 0xB9, 0x08, 0x44, 0xB8, 0x0A, 0x44, 0x0E, 0x40, 0x0A, 0x44,
- 0x0E, 0x28, 0x44, 0xF8, 0x44, 0xF9, 0x44, 0xD0, 0x44, 0xD1, 0x44, 0xDF,
- 0x44, 0x0E, 0x00, 0x48, 0x0B, 0x0E, 0x40,
+ 0x44, 0x0E, 0x40, 0x44, 0x9F, 0x02, 0x44, 0x91, 0x04, 0x44, 0x90, 0x06,
+ 0x44, 0xB9, 0x08, 0x44, 0xB8, 0x0A, 0x0A, 0x44, 0xDF, 0x44, 0xD1, 0x44,
+ 0xD0, 0x44, 0xF9, 0x44, 0xF8, 0x44, 0x0E, 0x00, 0x44, 0x0B, 0x0E, 0x40,
};
-// 0x00000000: daddiu r29, r29, -40
-// 0x00000004: .cfi_def_cfa_offset: 40
-// 0x00000004: sd r31, +32(r29)
+// 0x00000000: daddiu r29, r29, -64
+// 0x00000004: .cfi_def_cfa_offset: 64
+// 0x00000004: sd r31, +56(r29)
// 0x00000008: .cfi_offset: r31 at cfa-8
-// 0x00000008: sd r17, +24(r29)
+// 0x00000008: sd r17, +48(r29)
// 0x0000000c: .cfi_offset: r17 at cfa-16
-// 0x0000000c: sd r16, +16(r29)
+// 0x0000000c: sd r16, +40(r29)
// 0x00000010: .cfi_offset: r16 at cfa-24
-// 0x00000010: sdc1 f25, +8(r29)
+// 0x00000010: sdc1 f25, +32(r29)
// 0x00000014: .cfi_offset: r57 at cfa-32
-// 0x00000014: sdc1 f24, +0(r29)
+// 0x00000014: sdc1 f24, +24(r29)
// 0x00000018: .cfi_offset: r56 at cfa-40
-// 0x00000018: daddiu r29, r29, -24
-// 0x0000001c: .cfi_def_cfa_offset: 64
-// 0x0000001c: .cfi_remember_state
-// 0x0000001c: daddiu r29, r29, 24
-// 0x00000020: .cfi_def_cfa_offset: 40
-// 0x00000020: ldc1 f24, +0(r29)
-// 0x00000024: .cfi_restore: r56
-// 0x00000024: ldc1 f25, +8(r29)
+// 0x00000018: .cfi_remember_state
+// 0x00000018: ld r31, +56(r29)
+// 0x0000001c: .cfi_restore: r31
+// 0x0000001c: ld r17, +48(r29)
+// 0x00000020: .cfi_restore: r17
+// 0x00000020: ld r16, +40(r29)
+// 0x00000024: .cfi_restore: r16
+// 0x00000024: ldc1 f25, +32(r29)
// 0x00000028: .cfi_restore: r57
-// 0x00000028: ld r16, +16(r29)
-// 0x0000002c: .cfi_restore: r16
-// 0x0000002c: ld r17, +24(r29)
-// 0x00000030: .cfi_restore: r17
-// 0x00000030: ld r31, +32(r29)
-// 0x00000034: .cfi_restore: r31
-// 0x00000034: daddiu r29, r29, 40
-// 0x00000038: .cfi_def_cfa_offset: 0
-// 0x00000038: jr r31
-// 0x0000003c: nop
-// 0x00000040: .cfi_restore_state
-// 0x00000040: .cfi_def_cfa_offset: 64
+// 0x00000028: ldc1 f24, +24(r29)
+// 0x0000002c: .cfi_restore: r56
+// 0x0000002c: daddiu r29, r29, 64
+// 0x00000030: .cfi_def_cfa_offset: 0
+// 0x00000030: jic r31, 0
+// 0x00000034: .cfi_restore_state
+// 0x00000034: .cfi_def_cfa_offset: 64
static constexpr uint8_t expected_asm_kThumb2_adjust[] = {
#ifdef ART_USE_OLD_ARM_BACKEND
@@ -403,58 +395,52 @@
// 0x00020060: .cfi_def_cfa_offset: 64
static constexpr uint8_t expected_asm_kMips64_adjust_head[] = {
- 0xD8, 0xFF, 0xBD, 0x67, 0x20, 0x00, 0xBF, 0xFF, 0x18, 0x00, 0xB1, 0xFF,
- 0x10, 0x00, 0xB0, 0xFF, 0x08, 0x00, 0xB9, 0xF7, 0x00, 0x00, 0xB8, 0xF7,
- 0xE8, 0xFF, 0xBD, 0x67, 0x02, 0x00, 0xA6, 0x60,
- 0x02, 0x00, 0x3E, 0xEC, 0x0C, 0x00, 0x01, 0xD8,
+ 0xC0, 0xFF, 0xBD, 0x67, 0x38, 0x00, 0xBF, 0xFF, 0x30, 0x00, 0xB1, 0xFF,
+ 0x28, 0x00, 0xB0, 0xFF, 0x20, 0x00, 0xB9, 0xF7, 0x18, 0x00, 0xB8, 0xF7,
+ 0x02, 0x00, 0xA6, 0x60, 0x02, 0x00, 0x3E, 0xEC, 0x0C, 0x00, 0x01, 0xD8,
};
static constexpr uint8_t expected_asm_kMips64_adjust_tail[] = {
- 0x18, 0x00, 0xBD, 0x67, 0x00, 0x00, 0xB8, 0xD7, 0x08, 0x00, 0xB9, 0xD7,
- 0x10, 0x00, 0xB0, 0xDF, 0x18, 0x00, 0xB1, 0xDF, 0x20, 0x00, 0xBF, 0xDF,
- 0x28, 0x00, 0xBD, 0x67, 0x09, 0x00, 0xE0, 0x03, 0x00, 0x00, 0x00, 0x00,
+ 0x38, 0x00, 0xBF, 0xDF, 0x30, 0x00, 0xB1, 0xDF, 0x28, 0x00, 0xB0, 0xDF,
+ 0x20, 0x00, 0xB9, 0xD7, 0x18, 0x00, 0xB8, 0xD7, 0x40, 0x00, 0xBD, 0x67,
+ 0x00, 0x00, 0x1F, 0xD8,
};
static constexpr uint8_t expected_cfi_kMips64_adjust[] = {
- 0x44, 0x0E, 0x28, 0x44, 0x9F, 0x02, 0x44, 0x91, 0x04, 0x44, 0x90, 0x06,
- 0x44, 0xB9, 0x08, 0x44, 0xB8, 0x0A, 0x44, 0x0E, 0x40, 0x04, 0x10, 0x00,
- 0x02, 0x00, 0x0A, 0x44, 0x0E, 0x28, 0x44, 0xF8, 0x44, 0xF9, 0x44, 0xD0,
- 0x44, 0xD1, 0x44, 0xDF, 0x44, 0x0E, 0x00, 0x48, 0x0B, 0x0E, 0x40,
+ 0x44, 0x0E, 0x40, 0x44, 0x9F, 0x02, 0x44, 0x91, 0x04, 0x44, 0x90, 0x06,
+ 0x44, 0xB9, 0x08, 0x44, 0xB8, 0x0A, 0x04, 0x10, 0x00, 0x02, 0x00, 0x0A,
+ 0x44, 0xDF, 0x44, 0xD1, 0x44, 0xD0, 0x44, 0xF9, 0x44, 0xF8, 0x44, 0x0E,
+ 0x00, 0x44, 0x0B, 0x0E, 0x40,
};
-// 0x00000000: daddiu r29, r29, -40
-// 0x00000004: .cfi_def_cfa_offset: 40
-// 0x00000004: sd r31, +32(r29)
+// 0x00000000: daddiu r29, r29, -64
+// 0x00000004: .cfi_def_cfa_offset: 64
+// 0x00000004: sd r31, +56(r29)
// 0x00000008: .cfi_offset: r31 at cfa-8
-// 0x00000008: sd r17, +24(r29)
+// 0x00000008: sd r17, +48(r29)
// 0x0000000c: .cfi_offset: r17 at cfa-16
-// 0x0000000c: sd r16, +16(r29)
+// 0x0000000c: sd r16, +40(r29)
// 0x00000010: .cfi_offset: r16 at cfa-24
-// 0x00000010: sdc1 f25, +8(r29)
+// 0x00000010: sdc1 f25, +32(r29)
// 0x00000014: .cfi_offset: r57 at cfa-32
-// 0x00000014: sdc1 f24, +0(r29)
+// 0x00000014: sdc1 f24, +24(r29)
// 0x00000018: .cfi_offset: r56 at cfa-40
-// 0x00000018: daddiu r29, r29, -24
-// 0x0000001c: .cfi_def_cfa_offset: 64
-// 0x0000001c: bnec r5, r6, 0x0000002c ; +12
-// 0x00000020: auipc r1, 2
-// 0x00000024: jic r1, 12 ; b 0x00020030 ; +131080
-// 0x00000028: nop
+// 0x00000018: bnec r5, r6, 0x00000024 ; +12
+// 0x0000001c: auipc r1, 2
+// 0x00000020: jic r1, 12 ; bc 0x00020028 ; +131080
+// 0x00000024: nop
// ...
-// 0x00020028: nop
-// 0x0002002c: .cfi_remember_state
-// 0x0002002c: daddiu r29, r29, 24
-// 0x00020030: .cfi_def_cfa_offset: 40
-// 0x00020030: ldc1 f24, +0(r29)
-// 0x00020034: .cfi_restore: r56
-// 0x00020034: ldc1 f25, +8(r29)
+// 0x00020024: nop
+// 0x00020028: .cfi_remember_state
+// 0x00020028: ld r31, +56(r29)
+// 0x0002002c: .cfi_restore: r31
+// 0x0002002c: ld r17, +48(r29)
+// 0x00020030: .cfi_restore: r17
+// 0x00020030: ld r16, +40(r29)
+// 0x00020034: .cfi_restore: r16
+// 0x00020034: ldc1 f25, +32(r29)
// 0x00020038: .cfi_restore: r57
-// 0x00020038: ld r16, +16(r29)
-// 0x0002003c: .cfi_restore: r16
-// 0x0002003c: ld r17, +24(r29)
-// 0x00020040: .cfi_restore: r17
-// 0x00020040: ld r31, +32(r29)
-// 0x00020044: .cfi_restore: r31
-// 0x00020044: daddiu r29, r29, 40
-// 0x00020047: .cfi_def_cfa_offset: 0
-// 0x00020048: jr r31
-// 0x0002004c: nop
-// 0x00020050: .cfi_restore_state
-// 0x00020050: .cfi_def_cfa_offset: 64
+// 0x00020038: ldc1 f24, +24(r29)
+// 0x0002003c: .cfi_restore: r56
+// 0x0002003c: daddiu r29, r29, 64
+// 0x00020040: .cfi_def_cfa_offset: 0
+// 0x00020040: jic r31, 0
+// 0x00020044: .cfi_restore_state
+// 0x00020044: .cfi_def_cfa_offset: 64
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 23ccd9e..3c6d2d6 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -499,7 +499,8 @@
handles,
stats,
number_of_dex_registers,
- /* depth */ 0);
+ /* total_number_of_instructions */ 0,
+ /* parent */ nullptr);
} else if (opt_name == HSharpening::kSharpeningPassName) {
return new (arena) HSharpening(graph, codegen, dex_compilation_unit, driver, handles);
} else if (opt_name == HSelectGenerator::kSelectGeneratorPassName) {
@@ -607,8 +608,7 @@
VariableSizedHandleScope* handles) const {
OptimizingCompilerStats* stats = compilation_stats_.get();
const CompilerOptions& compiler_options = driver->GetCompilerOptions();
- bool should_inline = (compiler_options.GetInlineDepthLimit() > 0)
- && (compiler_options.GetInlineMaxCodeUnits() > 0);
+ bool should_inline = (compiler_options.GetInlineMaxCodeUnits() > 0);
if (!should_inline) {
return;
}
@@ -623,7 +623,8 @@
handles,
stats,
number_of_dex_registers,
- /* depth */ 0);
+ /* total_number_of_instructions */ 0,
+ /* parent */ nullptr);
HOptimization* optimizations[] = { inliner };
RunOptimizations(optimizations, arraysize(optimizations), pass_observer);
diff --git a/compiler/optimizing/optimizing_compiler_stats.h b/compiler/optimizing/optimizing_compiler_stats.h
index ae9a811..a211c54 100644
--- a/compiler/optimizing/optimizing_compiler_stats.h
+++ b/compiler/optimizing/optimizing_compiler_stats.h
@@ -69,6 +69,23 @@
kExplicitNullCheckGenerated,
kSimplifyIf,
kInstructionSunk,
+ kNotInlinedUnresolvedEntrypoint,
+ kNotInlinedDexCache,
+ kNotInlinedStackMaps,
+ kNotInlinedEnvironmentBudget,
+ kNotInlinedInstructionBudget,
+ kNotInlinedLoopWithoutExit,
+ kNotInlinedIrreducibleLoop,
+ kNotInlinedAlwaysThrows,
+ kNotInlinedInfiniteLoop,
+ kNotInlinedTryCatch,
+ kNotInlinedRegisterAllocator,
+ kNotInlinedCannotBuild,
+ kNotInlinedNotVerified,
+ kNotInlinedCodeItem,
+ kNotInlinedWont,
+ kNotInlinedRecursiveBudget,
+ kNotInlinedProxy,
kLastStat
};
@@ -168,6 +185,23 @@
case kExplicitNullCheckGenerated: name = "ExplicitNullCheckGenerated"; break;
case kSimplifyIf: name = "SimplifyIf"; break;
case kInstructionSunk: name = "InstructionSunk"; break;
+ case kNotInlinedUnresolvedEntrypoint: name = "NotInlinedUnresolvedEntrypoint"; break;
+ case kNotInlinedDexCache: name = "NotInlinedDexCache"; break;
+ case kNotInlinedStackMaps: name = "NotInlinedStackMaps"; break;
+ case kNotInlinedEnvironmentBudget: name = "NotInlinedEnvironmentBudget"; break;
+ case kNotInlinedInstructionBudget: name = "NotInlinedInstructionBudget"; break;
+ case kNotInlinedLoopWithoutExit: name = "NotInlinedLoopWithoutExit"; break;
+ case kNotInlinedIrreducibleLoop: name = "NotInlinedIrreducibleLoop"; break;
+ case kNotInlinedAlwaysThrows: name = "NotInlinedAlwaysThrows"; break;
+ case kNotInlinedInfiniteLoop: name = "NotInlinedInfiniteLoop"; break;
+ case kNotInlinedTryCatch: name = "NotInlinedTryCatch"; break;
+ case kNotInlinedRegisterAllocator: name = "NotInlinedRegisterAllocator"; break;
+ case kNotInlinedCannotBuild: name = "NotInlinedCannotBuild"; break;
+ case kNotInlinedNotVerified: name = "NotInlinedNotVerified"; break;
+ case kNotInlinedCodeItem: name = "NotInlinedCodeItem"; break;
+ case kNotInlinedWont: name = "NotInlinedWont"; break;
+ case kNotInlinedRecursiveBudget: name = "NotInlinedRecursiveBudget"; break;
+ case kNotInlinedProxy: name = "NotInlinedProxy"; break;
case kLastStat:
LOG(FATAL) << "invalid stat "
diff --git a/compiler/optimizing/register_allocation_resolver.cc b/compiler/optimizing/register_allocation_resolver.cc
index 0d33b49..c6a0b6a 100644
--- a/compiler/optimizing/register_allocation_resolver.cc
+++ b/compiler/optimizing/register_allocation_resolver.cc
@@ -303,6 +303,7 @@
switch (interval->NumberOfSpillSlotsNeeded()) {
case 1: loc = Location::StackSlot(interval->GetParent()->GetSpillSlot()); break;
case 2: loc = Location::DoubleStackSlot(interval->GetParent()->GetSpillSlot()); break;
+ case 4: loc = Location::SIMDStackSlot(interval->GetParent()->GetSpillSlot()); break;
default: LOG(FATAL) << "Unexpected number of spill slots"; UNREACHABLE();
}
InsertMoveAfter(interval->GetDefinedBy(), interval->ToLocation(), loc);
@@ -464,6 +465,7 @@
switch (parent->NumberOfSpillSlotsNeeded()) {
case 1: location_source = Location::StackSlot(parent->GetSpillSlot()); break;
case 2: location_source = Location::DoubleStackSlot(parent->GetSpillSlot()); break;
+ case 4: location_source = Location::SIMDStackSlot(parent->GetSpillSlot()); break;
default: LOG(FATAL) << "Unexpected number of spill slots"; UNREACHABLE();
}
}
@@ -496,7 +498,8 @@
|| destination.IsFpuRegister()
|| destination.IsFpuRegisterPair()
|| destination.IsStackSlot()
- || destination.IsDoubleStackSlot();
+ || destination.IsDoubleStackSlot()
+ || destination.IsSIMDStackSlot();
}
void RegisterAllocationResolver::AddMove(HParallelMove* move,
diff --git a/compiler/optimizing/sharpening.cc b/compiler/optimizing/sharpening.cc
index 7bd38c7..eedaf6e 100644
--- a/compiler/optimizing/sharpening.cc
+++ b/compiler/optimizing/sharpening.cc
@@ -259,7 +259,7 @@
} else if (runtime->UseJitCompilation()) {
// TODO: Make sure we don't set the "compile PIC" flag for JIT as that's bogus.
// DCHECK(!codegen_->GetCompilerOptions().GetCompilePic());
- string = class_linker->LookupString(dex_file, string_index, dex_cache);
+ string = class_linker->LookupString(dex_file, string_index, dex_cache.Get());
if (string != nullptr) {
if (runtime->GetHeap()->ObjectIsInBootImageSpace(string)) {
desired_load_kind = HLoadString::LoadKind::kBootImageAddress;
@@ -271,7 +271,7 @@
}
} else {
// AOT app compilation. Try to lookup the string without allocating if not found.
- string = class_linker->LookupString(dex_file, string_index, dex_cache);
+ string = class_linker->LookupString(dex_file, string_index, dex_cache.Get());
if (string != nullptr &&
runtime->GetHeap()->ObjectIsInBootImageSpace(string) &&
!codegen_->GetCompilerOptions().GetCompilePic()) {
diff --git a/compiler/optimizing/ssa_liveness_analysis.cc b/compiler/optimizing/ssa_liveness_analysis.cc
index c0a045c..36ee5a9 100644
--- a/compiler/optimizing/ssa_liveness_analysis.cc
+++ b/compiler/optimizing/ssa_liveness_analysis.cc
@@ -470,6 +470,8 @@
}
size_t LiveInterval::NumberOfSpillSlotsNeeded() const {
+ // TODO: detect vector operation.
+ // Return number of needed spill slots based on type.
return (type_ == Primitive::kPrimLong || type_ == Primitive::kPrimDouble) ? 2 : 1;
}
@@ -497,6 +499,7 @@
switch (NumberOfSpillSlotsNeeded()) {
case 1: return Location::StackSlot(GetParent()->GetSpillSlot());
case 2: return Location::DoubleStackSlot(GetParent()->GetSpillSlot());
+ case 4: return Location::SIMDStackSlot(GetParent()->GetSpillSlot());
default: LOG(FATAL) << "Unexpected number of spill slots"; UNREACHABLE();
}
} else {
diff --git a/compiler/utils/jni_macro_assembler.cc b/compiler/utils/jni_macro_assembler.cc
index 2f154fb..3ac6c3c 100644
--- a/compiler/utils/jni_macro_assembler.cc
+++ b/compiler/utils/jni_macro_assembler.cc
@@ -84,7 +84,11 @@
MacroAsm64UniquePtr JNIMacroAssembler<PointerSize::k64>::Create(
ArenaAllocator* arena,
InstructionSet instruction_set,
- const InstructionSetFeatures* instruction_set_features ATTRIBUTE_UNUSED) {
+ const InstructionSetFeatures* instruction_set_features) {
+#ifndef ART_ENABLE_CODEGEN_mips64
+ UNUSED(instruction_set_features);
+#endif
+
switch (instruction_set) {
#ifdef ART_ENABLE_CODEGEN_arm64
case kArm64:
@@ -92,7 +96,11 @@
#endif
#ifdef ART_ENABLE_CODEGEN_mips64
case kMips64:
- return MacroAsm64UniquePtr(new (arena) mips64::Mips64Assembler(arena));
+ return MacroAsm64UniquePtr(new (arena) mips64::Mips64Assembler(
+ arena,
+ instruction_set_features != nullptr
+ ? instruction_set_features->AsMips64InstructionSetFeatures()
+ : nullptr));
#endif
#ifdef ART_ENABLE_CODEGEN_x86_64
case kX86_64:
diff --git a/compiler/utils/mips64/assembler_mips64.cc b/compiler/utils/mips64/assembler_mips64.cc
index 4e7f635..8a5ae75 100644
--- a/compiler/utils/mips64/assembler_mips64.cc
+++ b/compiler/utils/mips64/assembler_mips64.cc
@@ -1180,373 +1180,456 @@
Nor(rd, rs, ZERO);
}
-// TODO: Check for MSA presence in Mips64InstructionSetFeatures for each MSA instruction.
-
void Mips64Assembler::AndV(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
EmitMsa3R(0x0, 0x0, wt, ws, wd, 0x1e);
}
void Mips64Assembler::OrV(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
EmitMsa3R(0x0, 0x1, wt, ws, wd, 0x1e);
}
void Mips64Assembler::NorV(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
EmitMsa3R(0x0, 0x2, wt, ws, wd, 0x1e);
}
void Mips64Assembler::XorV(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
EmitMsa3R(0x0, 0x3, wt, ws, wd, 0x1e);
}
void Mips64Assembler::AddvB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
EmitMsa3R(0x0, 0x0, wt, ws, wd, 0xe);
}
void Mips64Assembler::AddvH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
EmitMsa3R(0x0, 0x1, wt, ws, wd, 0xe);
}
void Mips64Assembler::AddvW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
EmitMsa3R(0x0, 0x2, wt, ws, wd, 0xe);
}
void Mips64Assembler::AddvD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
EmitMsa3R(0x0, 0x3, wt, ws, wd, 0xe);
}
void Mips64Assembler::SubvB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
EmitMsa3R(0x1, 0x0, wt, ws, wd, 0xe);
}
void Mips64Assembler::SubvH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
EmitMsa3R(0x1, 0x1, wt, ws, wd, 0xe);
}
void Mips64Assembler::SubvW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
EmitMsa3R(0x1, 0x2, wt, ws, wd, 0xe);
}
void Mips64Assembler::SubvD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
EmitMsa3R(0x1, 0x3, wt, ws, wd, 0xe);
}
void Mips64Assembler::MulvB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
EmitMsa3R(0x0, 0x0, wt, ws, wd, 0x12);
}
void Mips64Assembler::MulvH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
EmitMsa3R(0x0, 0x1, wt, ws, wd, 0x12);
}
void Mips64Assembler::MulvW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
EmitMsa3R(0x0, 0x2, wt, ws, wd, 0x12);
}
void Mips64Assembler::MulvD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
EmitMsa3R(0x0, 0x3, wt, ws, wd, 0x12);
}
void Mips64Assembler::Div_sB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
EmitMsa3R(0x4, 0x0, wt, ws, wd, 0x12);
}
void Mips64Assembler::Div_sH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
EmitMsa3R(0x4, 0x1, wt, ws, wd, 0x12);
}
void Mips64Assembler::Div_sW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
EmitMsa3R(0x4, 0x2, wt, ws, wd, 0x12);
}
void Mips64Assembler::Div_sD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
EmitMsa3R(0x4, 0x3, wt, ws, wd, 0x12);
}
void Mips64Assembler::Div_uB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
EmitMsa3R(0x5, 0x0, wt, ws, wd, 0x12);
}
void Mips64Assembler::Div_uH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
EmitMsa3R(0x5, 0x1, wt, ws, wd, 0x12);
}
void Mips64Assembler::Div_uW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
EmitMsa3R(0x5, 0x2, wt, ws, wd, 0x12);
}
void Mips64Assembler::Div_uD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
EmitMsa3R(0x5, 0x3, wt, ws, wd, 0x12);
}
void Mips64Assembler::Mod_sB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
EmitMsa3R(0x6, 0x0, wt, ws, wd, 0x12);
}
void Mips64Assembler::Mod_sH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
EmitMsa3R(0x6, 0x1, wt, ws, wd, 0x12);
}
void Mips64Assembler::Mod_sW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
EmitMsa3R(0x6, 0x2, wt, ws, wd, 0x12);
}
void Mips64Assembler::Mod_sD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
EmitMsa3R(0x6, 0x3, wt, ws, wd, 0x12);
}
void Mips64Assembler::Mod_uB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
EmitMsa3R(0x7, 0x0, wt, ws, wd, 0x12);
}
void Mips64Assembler::Mod_uH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
EmitMsa3R(0x7, 0x1, wt, ws, wd, 0x12);
}
void Mips64Assembler::Mod_uW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
EmitMsa3R(0x7, 0x2, wt, ws, wd, 0x12);
}
void Mips64Assembler::Mod_uD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
EmitMsa3R(0x7, 0x3, wt, ws, wd, 0x12);
}
void Mips64Assembler::FaddW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
EmitMsa3R(0x0, 0x0, wt, ws, wd, 0x1b);
}
void Mips64Assembler::FaddD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
EmitMsa3R(0x0, 0x1, wt, ws, wd, 0x1b);
}
void Mips64Assembler::FsubW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
EmitMsa3R(0x0, 0x2, wt, ws, wd, 0x1b);
}
void Mips64Assembler::FsubD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
EmitMsa3R(0x0, 0x3, wt, ws, wd, 0x1b);
}
void Mips64Assembler::FmulW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
EmitMsa3R(0x1, 0x0, wt, ws, wd, 0x1b);
}
void Mips64Assembler::FmulD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
EmitMsa3R(0x1, 0x1, wt, ws, wd, 0x1b);
}
void Mips64Assembler::FdivW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
EmitMsa3R(0x1, 0x2, wt, ws, wd, 0x1b);
}
void Mips64Assembler::FdivD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
EmitMsa3R(0x1, 0x3, wt, ws, wd, 0x1b);
}
void Mips64Assembler::Ffint_sW(VectorRegister wd, VectorRegister ws) {
+ CHECK(HasMsa());
EmitMsa2RF(0x19e, 0x0, ws, wd, 0x1e);
}
void Mips64Assembler::Ffint_sD(VectorRegister wd, VectorRegister ws) {
+ CHECK(HasMsa());
EmitMsa2RF(0x19e, 0x1, ws, wd, 0x1e);
}
void Mips64Assembler::Ftint_sW(VectorRegister wd, VectorRegister ws) {
+ CHECK(HasMsa());
EmitMsa2RF(0x19c, 0x0, ws, wd, 0x1e);
}
void Mips64Assembler::Ftint_sD(VectorRegister wd, VectorRegister ws) {
+ CHECK(HasMsa());
EmitMsa2RF(0x19c, 0x1, ws, wd, 0x1e);
}
void Mips64Assembler::SllB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
EmitMsa3R(0x0, 0x0, wt, ws, wd, 0xd);
}
void Mips64Assembler::SllH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
EmitMsa3R(0x0, 0x1, wt, ws, wd, 0xd);
}
void Mips64Assembler::SllW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
EmitMsa3R(0x0, 0x2, wt, ws, wd, 0xd);
}
void Mips64Assembler::SllD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
EmitMsa3R(0x0, 0x3, wt, ws, wd, 0xd);
}
void Mips64Assembler::SraB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
EmitMsa3R(0x1, 0x0, wt, ws, wd, 0xd);
}
void Mips64Assembler::SraH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
EmitMsa3R(0x1, 0x1, wt, ws, wd, 0xd);
}
void Mips64Assembler::SraW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
EmitMsa3R(0x1, 0x2, wt, ws, wd, 0xd);
}
void Mips64Assembler::SraD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
EmitMsa3R(0x1, 0x3, wt, ws, wd, 0xd);
}
void Mips64Assembler::SrlB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
EmitMsa3R(0x2, 0x0, wt, ws, wd, 0xd);
}
void Mips64Assembler::SrlH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
EmitMsa3R(0x2, 0x1, wt, ws, wd, 0xd);
}
void Mips64Assembler::SrlW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
EmitMsa3R(0x2, 0x2, wt, ws, wd, 0xd);
}
void Mips64Assembler::SrlD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
+ CHECK(HasMsa());
EmitMsa3R(0x2, 0x3, wt, ws, wd, 0xd);
}
void Mips64Assembler::SlliB(VectorRegister wd, VectorRegister ws, int shamt3) {
+ CHECK(HasMsa());
CHECK(IsUint<3>(shamt3)) << shamt3;
EmitMsaBIT(0x0, shamt3 | kMsaDfMByteMask, ws, wd, 0x9);
}
void Mips64Assembler::SlliH(VectorRegister wd, VectorRegister ws, int shamt4) {
+ CHECK(HasMsa());
CHECK(IsUint<4>(shamt4)) << shamt4;
EmitMsaBIT(0x0, shamt4 | kMsaDfMHalfwordMask, ws, wd, 0x9);
}
void Mips64Assembler::SlliW(VectorRegister wd, VectorRegister ws, int shamt5) {
+ CHECK(HasMsa());
CHECK(IsUint<5>(shamt5)) << shamt5;
EmitMsaBIT(0x0, shamt5 | kMsaDfMWordMask, ws, wd, 0x9);
}
void Mips64Assembler::SlliD(VectorRegister wd, VectorRegister ws, int shamt6) {
+ CHECK(HasMsa());
CHECK(IsUint<6>(shamt6)) << shamt6;
EmitMsaBIT(0x0, shamt6 | kMsaDfMDoublewordMask, ws, wd, 0x9);
}
void Mips64Assembler::SraiB(VectorRegister wd, VectorRegister ws, int shamt3) {
+ CHECK(HasMsa());
CHECK(IsUint<3>(shamt3)) << shamt3;
EmitMsaBIT(0x1, shamt3 | kMsaDfMByteMask, ws, wd, 0x9);
}
void Mips64Assembler::SraiH(VectorRegister wd, VectorRegister ws, int shamt4) {
+ CHECK(HasMsa());
CHECK(IsUint<4>(shamt4)) << shamt4;
EmitMsaBIT(0x1, shamt4 | kMsaDfMHalfwordMask, ws, wd, 0x9);
}
void Mips64Assembler::SraiW(VectorRegister wd, VectorRegister ws, int shamt5) {
+ CHECK(HasMsa());
CHECK(IsUint<5>(shamt5)) << shamt5;
EmitMsaBIT(0x1, shamt5 | kMsaDfMWordMask, ws, wd, 0x9);
}
void Mips64Assembler::SraiD(VectorRegister wd, VectorRegister ws, int shamt6) {
+ CHECK(HasMsa());
CHECK(IsUint<6>(shamt6)) << shamt6;
EmitMsaBIT(0x1, shamt6 | kMsaDfMDoublewordMask, ws, wd, 0x9);
}
void Mips64Assembler::SrliB(VectorRegister wd, VectorRegister ws, int shamt3) {
+ CHECK(HasMsa());
CHECK(IsUint<3>(shamt3)) << shamt3;
EmitMsaBIT(0x2, shamt3 | kMsaDfMByteMask, ws, wd, 0x9);
}
void Mips64Assembler::SrliH(VectorRegister wd, VectorRegister ws, int shamt4) {
+ CHECK(HasMsa());
CHECK(IsUint<4>(shamt4)) << shamt4;
EmitMsaBIT(0x2, shamt4 | kMsaDfMHalfwordMask, ws, wd, 0x9);
}
void Mips64Assembler::SrliW(VectorRegister wd, VectorRegister ws, int shamt5) {
+ CHECK(HasMsa());
CHECK(IsUint<5>(shamt5)) << shamt5;
EmitMsaBIT(0x2, shamt5 | kMsaDfMWordMask, ws, wd, 0x9);
}
void Mips64Assembler::SrliD(VectorRegister wd, VectorRegister ws, int shamt6) {
+ CHECK(HasMsa());
CHECK(IsUint<6>(shamt6)) << shamt6;
EmitMsaBIT(0x2, shamt6 | kMsaDfMDoublewordMask, ws, wd, 0x9);
}
void Mips64Assembler::MoveV(VectorRegister wd, VectorRegister ws) {
+ CHECK(HasMsa());
EmitMsaBIT(0x1, 0x3e, ws, wd, 0x19);
}
void Mips64Assembler::SplatiB(VectorRegister wd, VectorRegister ws, int n4) {
+ CHECK(HasMsa());
CHECK(IsUint<4>(n4)) << n4;
EmitMsaELM(0x1, n4 | kMsaDfNByteMask, ws, wd, 0x19);
}
void Mips64Assembler::SplatiH(VectorRegister wd, VectorRegister ws, int n3) {
+ CHECK(HasMsa());
CHECK(IsUint<3>(n3)) << n3;
EmitMsaELM(0x1, n3 | kMsaDfNHalfwordMask, ws, wd, 0x19);
}
void Mips64Assembler::SplatiW(VectorRegister wd, VectorRegister ws, int n2) {
+ CHECK(HasMsa());
CHECK(IsUint<2>(n2)) << n2;
EmitMsaELM(0x1, n2 | kMsaDfNWordMask, ws, wd, 0x19);
}
void Mips64Assembler::SplatiD(VectorRegister wd, VectorRegister ws, int n1) {
+ CHECK(HasMsa());
CHECK(IsUint<1>(n1)) << n1;
EmitMsaELM(0x1, n1 | kMsaDfNDoublewordMask, ws, wd, 0x19);
}
void Mips64Assembler::FillB(VectorRegister wd, GpuRegister rs) {
+ CHECK(HasMsa());
EmitMsa2R(0xc0, 0x0, static_cast<VectorRegister>(rs), wd, 0x1e);
}
void Mips64Assembler::FillH(VectorRegister wd, GpuRegister rs) {
+ CHECK(HasMsa());
EmitMsa2R(0xc0, 0x1, static_cast<VectorRegister>(rs), wd, 0x1e);
}
void Mips64Assembler::FillW(VectorRegister wd, GpuRegister rs) {
+ CHECK(HasMsa());
EmitMsa2R(0xc0, 0x2, static_cast<VectorRegister>(rs), wd, 0x1e);
}
void Mips64Assembler::FillD(VectorRegister wd, GpuRegister rs) {
+ CHECK(HasMsa());
EmitMsa2R(0xc0, 0x3, static_cast<VectorRegister>(rs), wd, 0x1e);
}
void Mips64Assembler::LdB(VectorRegister wd, GpuRegister rs, int offset) {
+ CHECK(HasMsa());
CHECK(IsInt<10>(offset)) << offset;
EmitMsaMI10(offset & kMsaS10Mask, rs, wd, 0x8, 0x0);
}
void Mips64Assembler::LdH(VectorRegister wd, GpuRegister rs, int offset) {
+ CHECK(HasMsa());
CHECK(IsInt<11>(offset)) << offset;
CHECK_ALIGNED(offset, kMips64HalfwordSize);
EmitMsaMI10((offset >> TIMES_2) & kMsaS10Mask, rs, wd, 0x8, 0x1);
}
void Mips64Assembler::LdW(VectorRegister wd, GpuRegister rs, int offset) {
+ CHECK(HasMsa());
CHECK(IsInt<12>(offset)) << offset;
CHECK_ALIGNED(offset, kMips64WordSize);
EmitMsaMI10((offset >> TIMES_4) & kMsaS10Mask, rs, wd, 0x8, 0x2);
}
void Mips64Assembler::LdD(VectorRegister wd, GpuRegister rs, int offset) {
+ CHECK(HasMsa());
CHECK(IsInt<13>(offset)) << offset;
CHECK_ALIGNED(offset, kMips64DoublewordSize);
EmitMsaMI10((offset >> TIMES_8) & kMsaS10Mask, rs, wd, 0x8, 0x3);
}
void Mips64Assembler::StB(VectorRegister wd, GpuRegister rs, int offset) {
+ CHECK(HasMsa());
CHECK(IsInt<10>(offset)) << offset;
EmitMsaMI10(offset & kMsaS10Mask, rs, wd, 0x9, 0x0);
}
void Mips64Assembler::StH(VectorRegister wd, GpuRegister rs, int offset) {
+ CHECK(HasMsa());
CHECK(IsInt<11>(offset)) << offset;
CHECK_ALIGNED(offset, kMips64HalfwordSize);
EmitMsaMI10((offset >> TIMES_2) & kMsaS10Mask, rs, wd, 0x9, 0x1);
}
void Mips64Assembler::StW(VectorRegister wd, GpuRegister rs, int offset) {
+ CHECK(HasMsa());
CHECK(IsInt<12>(offset)) << offset;
CHECK_ALIGNED(offset, kMips64WordSize);
EmitMsaMI10((offset >> TIMES_4) & kMsaS10Mask, rs, wd, 0x9, 0x2);
}
void Mips64Assembler::StD(VectorRegister wd, GpuRegister rs, int offset) {
+ CHECK(HasMsa());
CHECK(IsInt<13>(offset)) << offset;
CHECK_ALIGNED(offset, kMips64DoublewordSize);
EmitMsaMI10((offset >> TIMES_8) & kMsaS10Mask, rs, wd, 0x9, 0x3);
diff --git a/compiler/utils/mips64/assembler_mips64.h b/compiler/utils/mips64/assembler_mips64.h
index f42c162..a8035b6 100644
--- a/compiler/utils/mips64/assembler_mips64.h
+++ b/compiler/utils/mips64/assembler_mips64.h
@@ -21,6 +21,7 @@
#include <utility>
#include <vector>
+#include "arch/mips64/instruction_set_features_mips64.h"
#include "base/arena_containers.h"
#include "base/enums.h"
#include "base/macros.h"
@@ -413,7 +414,8 @@
public:
using JNIBase = JNIMacroAssembler<PointerSize::k64>;
- explicit Mips64Assembler(ArenaAllocator* arena)
+ explicit Mips64Assembler(ArenaAllocator* arena,
+ const Mips64InstructionSetFeatures* instruction_set_features = nullptr)
: Assembler(arena),
overwriting_(false),
overwrite_location_(0),
@@ -422,7 +424,8 @@
jump_tables_(arena->Adapter(kArenaAllocAssembler)),
last_position_adjustment_(0),
last_old_position_(0),
- last_branch_id_(0) {
+ last_branch_id_(0),
+ has_msa_(instruction_set_features != nullptr ? instruction_set_features->HasMsa() : false) {
cfi().DelayEmittingAdvancePCs();
}
@@ -1479,6 +1482,10 @@
// Emits exception block.
void EmitExceptionPoll(Mips64ExceptionSlowPath* exception);
+ bool HasMsa() const {
+ return has_msa_;
+ }
+
// List of exception blocks to generate at the end of the code cache.
std::vector<Mips64ExceptionSlowPath> exception_blocks_;
@@ -1502,6 +1509,8 @@
uint32_t last_old_position_;
uint32_t last_branch_id_;
+ const bool has_msa_;
+
DISALLOW_COPY_AND_ASSIGN(Mips64Assembler);
};
diff --git a/compiler/utils/mips64/assembler_mips64_test.cc b/compiler/utils/mips64/assembler_mips64_test.cc
index 12660ce..cadbe27 100644
--- a/compiler/utils/mips64/assembler_mips64_test.cc
+++ b/compiler/utils/mips64/assembler_mips64_test.cc
@@ -46,6 +46,9 @@
uint32_t,
mips64::VectorRegister> Base;
+ AssemblerMIPS64Test()
+ : instruction_set_features_(Mips64InstructionSetFeatures::FromVariant("default", nullptr)) {}
+
protected:
// Get the typically used name for this architecture, e.g., aarch64, x86-64, ...
std::string GetArchitectureString() OVERRIDE {
@@ -78,6 +81,10 @@
return " -D -bbinary -mmips:isa64r6";
}
+ mips64::Mips64Assembler* CreateAssembler(ArenaAllocator* arena) OVERRIDE {
+ return new (arena) mips64::Mips64Assembler(arena, instruction_set_features_.get());
+ }
+
void SetUpHelpers() OVERRIDE {
if (registers_.size() == 0) {
registers_.push_back(new mips64::GpuRegister(mips64::ZERO));
@@ -313,8 +320,9 @@
std::vector<mips64::FpuRegister*> fp_registers_;
std::vector<mips64::VectorRegister*> vec_registers_;
-};
+ std::unique_ptr<const Mips64InstructionSetFeatures> instruction_set_features_;
+};
TEST_F(AssemblerMIPS64Test, Toolchain) {
EXPECT_TRUE(CheckTools());
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index e80be81..b4ea20b 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -312,13 +312,6 @@
UsageError(" Example: --num-dex-method=%d", CompilerOptions::kDefaultNumDexMethodsThreshold);
UsageError(" Default: %d", CompilerOptions::kDefaultNumDexMethodsThreshold);
UsageError("");
- UsageError(" --inline-depth-limit=<depth-limit>: the depth limit of inlining for fine tuning");
- UsageError(" the compiler. A zero value will disable inlining. Honored only by Optimizing.");
- UsageError(" Has priority over the --compiler-filter option. Intended for ");
- UsageError(" development/experimental use.");
- UsageError(" Example: --inline-depth-limit=%d", CompilerOptions::kDefaultInlineDepthLimit);
- UsageError(" Default: %d", CompilerOptions::kDefaultInlineDepthLimit);
- UsageError("");
UsageError(" --inline-max-code-units=<code-units-count>: the maximum code units that a method");
UsageError(" can have to be considered for inlining. A zero value will disable inlining.");
UsageError(" Honored only by Optimizing. Has priority over the --compiler-filter option.");
@@ -870,22 +863,8 @@
}
}
- // It they are not set, use default values for inlining settings.
- // TODO: We should rethink the compiler filter. We mostly save
- // time here, which is orthogonal to space.
- if (compiler_options_->inline_depth_limit_ == CompilerOptions::kUnsetInlineDepthLimit) {
- compiler_options_->inline_depth_limit_ =
- (compiler_options_->compiler_filter_ == CompilerFilter::kSpace)
- // Implementation of the space filter: limit inlining depth.
- ? CompilerOptions::kSpaceFilterInlineDepthLimit
- : CompilerOptions::kDefaultInlineDepthLimit;
- }
if (compiler_options_->inline_max_code_units_ == CompilerOptions::kUnsetInlineMaxCodeUnits) {
- compiler_options_->inline_max_code_units_ =
- (compiler_options_->compiler_filter_ == CompilerFilter::kSpace)
- // Implementation of the space filter: limit inlining max code units.
- ? CompilerOptions::kSpaceFilterInlineMaxCodeUnits
- : CompilerOptions::kDefaultInlineMaxCodeUnits;
+ compiler_options_->inline_max_code_units_ = CompilerOptions::kDefaultInlineMaxCodeUnits;
}
// Checks are all explicit until we know the architecture.
diff --git a/runtime/backtrace_helper.h b/runtime/backtrace_helper.h
new file mode 100644
index 0000000..ace118c
--- /dev/null
+++ b/runtime/backtrace_helper.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_BACKTRACE_HELPER_H_
+#define ART_RUNTIME_BACKTRACE_HELPER_H_
+
+#include <unwind.h>
+
+namespace art {
+
+// Based on debug malloc logic from libc/bionic/debug_stacktrace.cpp.
+class BacktraceCollector {
+ public:
+ BacktraceCollector(uintptr_t* out_frames, size_t max_depth, size_t skip_count)
+ : out_frames_(out_frames), max_depth_(max_depth), skip_count_(skip_count) {}
+
+ size_t NumFrames() const {
+ return num_frames_;
+ }
+
+ // Collect the backtrace, do not call more than once.
+ void Collect() {
+ _Unwind_Backtrace(&Callback, this);
+ }
+
+ private:
+ static _Unwind_Reason_Code Callback(_Unwind_Context* context, void* arg) {
+ auto* const state = reinterpret_cast<BacktraceCollector*>(arg);
+ const uintptr_t ip = _Unwind_GetIP(context);
+ // The first stack frame is get_backtrace itself. Skip it.
+ if (ip != 0 && state->skip_count_ > 0) {
+ --state->skip_count_;
+ return _URC_NO_REASON;
+ }
+ // ip may be off for ARM but it shouldn't matter since we only use it for hashing.
+ state->out_frames_[state->num_frames_] = ip;
+ state->num_frames_++;
+ return state->num_frames_ >= state->max_depth_ ? _URC_END_OF_STACK : _URC_NO_REASON;
+ }
+
+ uintptr_t* const out_frames_ = nullptr;
+ size_t num_frames_ = 0u;
+ const size_t max_depth_ = 0u;
+ size_t skip_count_ = 0u;
+};
+
+// A bounded sized backtrace.
+template <size_t kMaxFrames>
+class FixedSizeBacktrace {
+ public:
+ void Collect(size_t skip_count) {
+ BacktraceCollector collector(frames_, kMaxFrames, skip_count);
+ collector.Collect();
+ num_frames_ = collector.NumFrames();
+ }
+
+ uint64_t Hash() const {
+ uint64_t hash = 9314237;
+ for (size_t i = 0; i < num_frames_; ++i) {
+ hash = hash * 2654435761 + frames_[i];
+ hash += (hash >> 13) ^ (hash << 6);
+ }
+ return hash;
+ }
+
+ private:
+ uintptr_t frames_[kMaxFrames];
+ size_t num_frames_;
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_BACKTRACE_HELPER_H_
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index fa87c8c..8162a82 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -7821,7 +7821,7 @@
mirror::String* ClassLinker::LookupString(const DexFile& dex_file,
dex::StringIndex string_idx,
- Handle<mirror::DexCache> dex_cache) {
+ ObjPtr<mirror::DexCache> dex_cache) {
DCHECK(dex_cache != nullptr);
ObjPtr<mirror::String> resolved = dex_cache->GetResolvedString(string_idx);
if (resolved != nullptr) {
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 6254acb..ef51d82 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -247,7 +247,7 @@
// result in the DexCache if found. Return null if not found.
mirror::String* LookupString(const DexFile& dex_file,
dex::StringIndex string_idx,
- Handle<mirror::DexCache> dex_cache)
+ ObjPtr<mirror::DexCache> dex_cache)
REQUIRES_SHARED(Locks::mutator_lock_);
// Resolve a Type with the given index from the DexFile, storing the
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index a769748..f04bc89 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -18,13 +18,13 @@
#include <limits>
#include <memory>
-#include <unwind.h> // For GC verification.
#include <vector>
#include "android-base/stringprintf.h"
#include "allocation_listener.h"
#include "art_field-inl.h"
+#include "backtrace_helper.h"
#include "base/allocator.h"
#include "base/arena_allocator.h"
#include "base/dumpable.h"
@@ -4065,42 +4065,6 @@
}
}
-// Based on debug malloc logic from libc/bionic/debug_stacktrace.cpp.
-class StackCrawlState {
- public:
- StackCrawlState(uintptr_t* frames, size_t max_depth, size_t skip_count)
- : frames_(frames), frame_count_(0), max_depth_(max_depth), skip_count_(skip_count) {
- }
- size_t GetFrameCount() const {
- return frame_count_;
- }
- static _Unwind_Reason_Code Callback(_Unwind_Context* context, void* arg) {
- auto* const state = reinterpret_cast<StackCrawlState*>(arg);
- const uintptr_t ip = _Unwind_GetIP(context);
- // The first stack frame is get_backtrace itself. Skip it.
- if (ip != 0 && state->skip_count_ > 0) {
- --state->skip_count_;
- return _URC_NO_REASON;
- }
- // ip may be off for ARM but it shouldn't matter since we only use it for hashing.
- state->frames_[state->frame_count_] = ip;
- state->frame_count_++;
- return state->frame_count_ >= state->max_depth_ ? _URC_END_OF_STACK : _URC_NO_REASON;
- }
-
- private:
- uintptr_t* const frames_;
- size_t frame_count_;
- const size_t max_depth_;
- size_t skip_count_;
-};
-
-static size_t get_backtrace(uintptr_t* frames, size_t max_depth) {
- StackCrawlState state(frames, max_depth, 0u);
- _Unwind_Backtrace(&StackCrawlState::Callback, &state);
- return state.GetFrameCount();
-}
-
void Heap::CheckGcStressMode(Thread* self, ObjPtr<mirror::Object>* obj) {
auto* const runtime = Runtime::Current();
if (gc_stress_mode_ && runtime->GetClassLinker()->IsInitialized() &&
@@ -4109,13 +4073,9 @@
bool new_backtrace = false;
{
static constexpr size_t kMaxFrames = 16u;
- uintptr_t backtrace[kMaxFrames];
- const size_t frames = get_backtrace(backtrace, kMaxFrames);
- uint64_t hash = 0;
- for (size_t i = 0; i < frames; ++i) {
- hash = hash * 2654435761 + backtrace[i];
- hash += (hash >> 13) ^ (hash << 6);
- }
+ FixedSizeBacktrace<kMaxFrames> backtrace;
+ backtrace.Collect(/* skip_frames */ 2);
+ uint64_t hash = backtrace.Hash();
MutexLock mu(self, *backtrace_lock_);
new_backtrace = seen_backtraces_.find(hash) == seen_backtraces_.end();
if (new_backtrace) {
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index fc41f94..e9a5ae5 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -1366,7 +1366,10 @@
MutexLock mu(self, lock_);
ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize);
if (info != nullptr) {
- info->IncrementInlineUse();
+ if (!info->IncrementInlineUse()) {
+ // Overflow of inlining uses, just bail.
+ return nullptr;
+ }
}
return info;
}
diff --git a/runtime/jit/profiling_info.h b/runtime/jit/profiling_info.h
index f42a8da..d6881aa 100644
--- a/runtime/jit/profiling_info.h
+++ b/runtime/jit/profiling_info.h
@@ -108,9 +108,15 @@
}
}
- void IncrementInlineUse() {
- DCHECK_NE(current_inline_uses_, std::numeric_limits<uint16_t>::max());
+ // Increments the number of times this method is currently being inlined.
+ // Returns whether it was successful, that is it could increment without
+ // overflowing.
+ bool IncrementInlineUse() {
+ if (current_inline_uses_ == std::numeric_limits<uint16_t>::max()) {
+ return false;
+ }
current_inline_uses_++;
+ return true;
}
void DecrementInlineUse() {
diff --git a/runtime/mirror/dex_cache.h b/runtime/mirror/dex_cache.h
index 48a9ecd..78b2e15 100644
--- a/runtime/mirror/dex_cache.h
+++ b/runtime/mirror/dex_cache.h
@@ -426,6 +426,11 @@
NativeDexCachePair<T> pair,
PointerSize ptr_size);
+ uint32_t StringSlotIndex(dex::StringIndex string_idx) REQUIRES_SHARED(Locks::mutator_lock_);
+ uint32_t TypeSlotIndex(dex::TypeIndex type_idx) REQUIRES_SHARED(Locks::mutator_lock_);
+ uint32_t FieldSlotIndex(uint32_t field_idx) REQUIRES_SHARED(Locks::mutator_lock_);
+ uint32_t MethodTypeSlotIndex(uint32_t proto_idx) REQUIRES_SHARED(Locks::mutator_lock_);
+
private:
void Init(const DexFile* dex_file,
ObjPtr<String> location,
@@ -457,11 +462,6 @@
using ConversionPair32 = ConversionPair<uint32_t>;
using ConversionPair64 = ConversionPair<uint64_t>;
- uint32_t StringSlotIndex(dex::StringIndex string_idx) REQUIRES_SHARED(Locks::mutator_lock_);
- uint32_t TypeSlotIndex(dex::TypeIndex type_idx) REQUIRES_SHARED(Locks::mutator_lock_);
- uint32_t FieldSlotIndex(uint32_t field_idx) REQUIRES_SHARED(Locks::mutator_lock_);
- uint32_t MethodTypeSlotIndex(uint32_t proto_idx) REQUIRES_SHARED(Locks::mutator_lock_);
-
// Visit instance fields of the dex cache as well as its associated arrays.
template <bool kVisitNativeRoots,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
diff --git a/runtime/openjdkjvmti/OpenjdkJvmTi.cc b/runtime/openjdkjvmti/OpenjdkJvmTi.cc
index 5401e5c..448e1ed 100644
--- a/runtime/openjdkjvmti/OpenjdkJvmTi.cc
+++ b/runtime/openjdkjvmti/OpenjdkJvmTi.cc
@@ -79,20 +79,26 @@
class JvmtiFunctions {
private:
- static bool IsValidEnv(jvmtiEnv* env) {
- return env != nullptr;
+ static jvmtiError getEnvironmentError(jvmtiEnv* env) {
+ if (env == nullptr) {
+ return ERR(INVALID_ENVIRONMENT);
+ } else if (art::Thread::Current() == nullptr) {
+ return ERR(UNATTACHED_THREAD);
+ } else {
+ return OK;
+ }
}
-#define ENSURE_VALID_ENV(env) \
- do { \
- if (!IsValidEnv(env)) { \
- return ERR(INVALID_ENVIRONMENT); \
- } \
+#define ENSURE_VALID_ENV(env) \
+ do { \
+ jvmtiError ensure_valid_env_ ## __LINE__ = getEnvironmentError(env); \
+ if (ensure_valid_env_ ## __LINE__ != OK) { \
+ return ensure_valid_env_ ## __LINE__ ; \
+ } \
} while (false)
#define ENSURE_HAS_CAP(env, cap) \
do { \
- ENSURE_VALID_ENV(env); \
if (ArtJvmTiEnv::AsArtJvmTiEnv(env)->capabilities.cap != 1) { \
return ERR(MUST_POSSESS_CAPABILITY); \
} \
@@ -121,18 +127,22 @@
}
static jvmtiError GetThreadState(jvmtiEnv* env, jthread thread, jint* thread_state_ptr) {
+ ENSURE_VALID_ENV(env);
return ThreadUtil::GetThreadState(env, thread, thread_state_ptr);
}
static jvmtiError GetCurrentThread(jvmtiEnv* env, jthread* thread_ptr) {
+ ENSURE_VALID_ENV(env);
return ThreadUtil::GetCurrentThread(env, thread_ptr);
}
static jvmtiError GetAllThreads(jvmtiEnv* env, jint* threads_count_ptr, jthread** threads_ptr) {
+ ENSURE_VALID_ENV(env);
return ThreadUtil::GetAllThreads(env, threads_count_ptr, threads_ptr);
}
static jvmtiError SuspendThread(jvmtiEnv* env, jthread thread ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_suspend);
return ERR(NOT_IMPLEMENTED);
}
@@ -141,11 +151,13 @@
jint request_count ATTRIBUTE_UNUSED,
const jthread* request_list ATTRIBUTE_UNUSED,
jvmtiError* results ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_suspend);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError ResumeThread(jvmtiEnv* env, jthread thread ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_suspend);
return ERR(NOT_IMPLEMENTED);
}
@@ -154,6 +166,7 @@
jint request_count ATTRIBUTE_UNUSED,
const jthread* request_list ATTRIBUTE_UNUSED,
jvmtiError* results ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_suspend);
return ERR(NOT_IMPLEMENTED);
}
@@ -161,16 +174,19 @@
static jvmtiError StopThread(jvmtiEnv* env,
jthread thread ATTRIBUTE_UNUSED,
jobject exception ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_signal_thread);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError InterruptThread(jvmtiEnv* env, jthread thread ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_signal_thread);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError GetThreadInfo(jvmtiEnv* env, jthread thread, jvmtiThreadInfo* info_ptr) {
+ ENSURE_VALID_ENV(env);
return ThreadUtil::GetThreadInfo(env, thread, info_ptr);
}
@@ -178,6 +194,7 @@
jthread thread ATTRIBUTE_UNUSED,
jint* owned_monitor_count_ptr ATTRIBUTE_UNUSED,
jobject** owned_monitors_ptr ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_get_owned_monitor_info);
return ERR(NOT_IMPLEMENTED);
}
@@ -187,6 +204,7 @@
jthread thread ATTRIBUTE_UNUSED,
jint* monitor_info_count_ptr ATTRIBUTE_UNUSED,
jvmtiMonitorStackDepthInfo** monitor_info_ptr ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_get_owned_monitor_stack_depth_info);
return ERR(NOT_IMPLEMENTED);
}
@@ -194,6 +212,7 @@
static jvmtiError GetCurrentContendedMonitor(jvmtiEnv* env,
jthread thread ATTRIBUTE_UNUSED,
jobject* monitor_ptr ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_get_current_contended_monitor);
return ERR(NOT_IMPLEMENTED);
}
@@ -203,26 +222,31 @@
jvmtiStartFunction proc,
const void* arg,
jint priority) {
+ ENSURE_VALID_ENV(env);
return ThreadUtil::RunAgentThread(env, thread, proc, arg, priority);
}
static jvmtiError SetThreadLocalStorage(jvmtiEnv* env, jthread thread, const void* data) {
+ ENSURE_VALID_ENV(env);
return ThreadUtil::SetThreadLocalStorage(env, thread, data);
}
static jvmtiError GetThreadLocalStorage(jvmtiEnv* env, jthread thread, void** data_ptr) {
+ ENSURE_VALID_ENV(env);
return ThreadUtil::GetThreadLocalStorage(env, thread, data_ptr);
}
static jvmtiError GetTopThreadGroups(jvmtiEnv* env,
jint* group_count_ptr,
jthreadGroup** groups_ptr) {
+ ENSURE_VALID_ENV(env);
return ThreadGroupUtil::GetTopThreadGroups(env, group_count_ptr, groups_ptr);
}
static jvmtiError GetThreadGroupInfo(jvmtiEnv* env,
jthreadGroup group,
jvmtiThreadGroupInfo* info_ptr) {
+ ENSURE_VALID_ENV(env);
return ThreadGroupUtil::GetThreadGroupInfo(env, group, info_ptr);
}
@@ -232,6 +256,7 @@
jthread** threads_ptr,
jint* group_count_ptr,
jthreadGroup** groups_ptr) {
+ ENSURE_VALID_ENV(env);
return ThreadGroupUtil::GetThreadGroupChildren(env,
group,
thread_count_ptr,
@@ -246,6 +271,7 @@
jint max_frame_count,
jvmtiFrameInfo* frame_buffer,
jint* count_ptr) {
+ ENSURE_VALID_ENV(env);
return StackUtil::GetStackTrace(env,
thread,
start_depth,
@@ -258,6 +284,7 @@
jint max_frame_count,
jvmtiStackInfo** stack_info_ptr,
jint* thread_count_ptr) {
+ ENSURE_VALID_ENV(env);
return StackUtil::GetAllStackTraces(env, max_frame_count, stack_info_ptr, thread_count_ptr);
}
@@ -266,6 +293,7 @@
const jthread* thread_list,
jint max_frame_count,
jvmtiStackInfo** stack_info_ptr) {
+ ENSURE_VALID_ENV(env);
return StackUtil::GetThreadListStackTraces(env,
thread_count,
thread_list,
@@ -274,10 +302,12 @@
}
static jvmtiError GetFrameCount(jvmtiEnv* env, jthread thread, jint* count_ptr) {
+ ENSURE_VALID_ENV(env);
return StackUtil::GetFrameCount(env, thread, count_ptr);
}
static jvmtiError PopFrame(jvmtiEnv* env, jthread thread ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_pop_frame);
return ERR(NOT_IMPLEMENTED);
}
@@ -287,12 +317,14 @@
jint depth,
jmethodID* method_ptr,
jlocation* location_ptr) {
+ ENSURE_VALID_ENV(env);
return StackUtil::GetFrameLocation(env, thread, depth, method_ptr, location_ptr);
}
static jvmtiError NotifyFramePop(jvmtiEnv* env,
jthread thread ATTRIBUTE_UNUSED,
jint depth ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_generate_frame_pop_events);
return ERR(NOT_IMPLEMENTED);
}
@@ -300,6 +332,7 @@
static jvmtiError ForceEarlyReturnObject(jvmtiEnv* env,
jthread thread ATTRIBUTE_UNUSED,
jobject value ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_force_early_return);
return ERR(NOT_IMPLEMENTED);
}
@@ -307,6 +340,7 @@
static jvmtiError ForceEarlyReturnInt(jvmtiEnv* env,
jthread thread ATTRIBUTE_UNUSED,
jint value ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_force_early_return);
return ERR(NOT_IMPLEMENTED);
}
@@ -314,6 +348,7 @@
static jvmtiError ForceEarlyReturnLong(jvmtiEnv* env,
jthread thread ATTRIBUTE_UNUSED,
jlong value ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_force_early_return);
return ERR(NOT_IMPLEMENTED);
}
@@ -321,6 +356,7 @@
static jvmtiError ForceEarlyReturnFloat(jvmtiEnv* env,
jthread thread ATTRIBUTE_UNUSED,
jfloat value ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_force_early_return);
return ERR(NOT_IMPLEMENTED);
}
@@ -328,11 +364,13 @@
static jvmtiError ForceEarlyReturnDouble(jvmtiEnv* env,
jthread thread ATTRIBUTE_UNUSED,
jdouble value ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_force_early_return);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError ForceEarlyReturnVoid(jvmtiEnv* env, jthread thread ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_force_early_return);
return ERR(NOT_IMPLEMENTED);
}
@@ -343,6 +381,7 @@
jobject initial_object,
const jvmtiHeapCallbacks* callbacks,
const void* user_data) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_tag_objects);
HeapUtil heap_util(ArtJvmTiEnv::AsArtJvmTiEnv(env)->object_tag_table.get());
return heap_util.FollowReferences(env,
@@ -358,12 +397,14 @@
jclass klass,
const jvmtiHeapCallbacks* callbacks,
const void* user_data) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_tag_objects);
HeapUtil heap_util(ArtJvmTiEnv::AsArtJvmTiEnv(env)->object_tag_table.get());
return heap_util.IterateThroughHeap(env, heap_filter, klass, callbacks, user_data);
}
static jvmtiError GetTag(jvmtiEnv* env, jobject object, jlong* tag_ptr) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_tag_objects);
JNIEnv* jni_env = GetJniEnv(env);
@@ -381,6 +422,7 @@
}
static jvmtiError SetTag(jvmtiEnv* env, jobject object, jlong tag) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_tag_objects);
if (object == nullptr) {
@@ -405,6 +447,7 @@
jint* count_ptr,
jobject** object_result_ptr,
jlong** tag_result_ptr) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_tag_objects);
JNIEnv* jni_env = GetJniEnv(env);
@@ -422,6 +465,7 @@
}
static jvmtiError ForceGarbageCollection(jvmtiEnv* env) {
+ ENSURE_VALID_ENV(env);
return HeapUtil::ForceGarbageCollection(env);
}
@@ -430,6 +474,7 @@
jobject object ATTRIBUTE_UNUSED,
jvmtiObjectReferenceCallback object_reference_callback ATTRIBUTE_UNUSED,
const void* user_data ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_tag_objects);
return ERR(NOT_IMPLEMENTED);
}
@@ -440,6 +485,7 @@
jvmtiStackReferenceCallback stack_ref_callback ATTRIBUTE_UNUSED,
jvmtiObjectReferenceCallback object_ref_callback ATTRIBUTE_UNUSED,
const void* user_data ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_tag_objects);
return ERR(NOT_IMPLEMENTED);
}
@@ -448,6 +494,7 @@
jvmtiHeapObjectFilter object_filter ATTRIBUTE_UNUSED,
jvmtiHeapObjectCallback heap_object_callback ATTRIBUTE_UNUSED,
const void* user_data ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_tag_objects);
return ERR(NOT_IMPLEMENTED);
}
@@ -458,6 +505,7 @@
jvmtiHeapObjectFilter object_filter ATTRIBUTE_UNUSED,
jvmtiHeapObjectCallback heap_object_callback ATTRIBUTE_UNUSED,
const void* user_data ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_tag_objects);
return ERR(NOT_IMPLEMENTED);
}
@@ -467,6 +515,7 @@
jint depth ATTRIBUTE_UNUSED,
jint slot ATTRIBUTE_UNUSED,
jobject* value_ptr ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_access_local_variables);
return ERR(NOT_IMPLEMENTED);
}
@@ -475,6 +524,7 @@
jthread thread ATTRIBUTE_UNUSED,
jint depth ATTRIBUTE_UNUSED,
jobject* value_ptr ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_access_local_variables);
return ERR(NOT_IMPLEMENTED);
}
@@ -484,6 +534,7 @@
jint depth ATTRIBUTE_UNUSED,
jint slot ATTRIBUTE_UNUSED,
jint* value_ptr ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_access_local_variables);
return ERR(NOT_IMPLEMENTED);
}
@@ -493,6 +544,7 @@
jint depth ATTRIBUTE_UNUSED,
jint slot ATTRIBUTE_UNUSED,
jlong* value_ptr ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_access_local_variables);
return ERR(NOT_IMPLEMENTED);
}
@@ -502,6 +554,7 @@
jint depth ATTRIBUTE_UNUSED,
jint slot ATTRIBUTE_UNUSED,
jfloat* value_ptr ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_access_local_variables);
return ERR(NOT_IMPLEMENTED);
}
@@ -511,6 +564,7 @@
jint depth ATTRIBUTE_UNUSED,
jint slot ATTRIBUTE_UNUSED,
jdouble* value_ptr ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_access_local_variables);
return ERR(NOT_IMPLEMENTED);
}
@@ -520,6 +574,7 @@
jint depth ATTRIBUTE_UNUSED,
jint slot ATTRIBUTE_UNUSED,
jobject value ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_access_local_variables);
return ERR(NOT_IMPLEMENTED);
}
@@ -529,6 +584,7 @@
jint depth ATTRIBUTE_UNUSED,
jint slot ATTRIBUTE_UNUSED,
jint value ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_access_local_variables);
return ERR(NOT_IMPLEMENTED);
}
@@ -538,6 +594,7 @@
jint depth ATTRIBUTE_UNUSED,
jint slot ATTRIBUTE_UNUSED,
jlong value ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_access_local_variables);
return ERR(NOT_IMPLEMENTED);
}
@@ -547,6 +604,7 @@
jint depth ATTRIBUTE_UNUSED,
jint slot ATTRIBUTE_UNUSED,
jfloat value ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_access_local_variables);
return ERR(NOT_IMPLEMENTED);
}
@@ -556,6 +614,7 @@
jint depth ATTRIBUTE_UNUSED,
jint slot ATTRIBUTE_UNUSED,
jdouble value ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_access_local_variables);
return ERR(NOT_IMPLEMENTED);
}
@@ -563,6 +622,7 @@
static jvmtiError SetBreakpoint(jvmtiEnv* env,
jmethodID method ATTRIBUTE_UNUSED,
jlocation location ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_generate_breakpoint_events);
return ERR(NOT_IMPLEMENTED);
}
@@ -570,6 +630,7 @@
static jvmtiError ClearBreakpoint(jvmtiEnv* env,
jmethodID method ATTRIBUTE_UNUSED,
jlocation location ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_generate_breakpoint_events);
return ERR(NOT_IMPLEMENTED);
}
@@ -577,6 +638,7 @@
static jvmtiError SetFieldAccessWatch(jvmtiEnv* env,
jclass klass ATTRIBUTE_UNUSED,
jfieldID field ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_generate_field_access_events);
return ERR(NOT_IMPLEMENTED);
}
@@ -584,6 +646,7 @@
static jvmtiError ClearFieldAccessWatch(jvmtiEnv* env,
jclass klass ATTRIBUTE_UNUSED,
jfieldID field ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_generate_field_access_events);
return ERR(NOT_IMPLEMENTED);
}
@@ -591,6 +654,7 @@
static jvmtiError SetFieldModificationWatch(jvmtiEnv* env,
jclass klass ATTRIBUTE_UNUSED,
jfieldID field ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_generate_field_modification_events);
return ERR(NOT_IMPLEMENTED);
}
@@ -598,11 +662,13 @@
static jvmtiError ClearFieldModificationWatch(jvmtiEnv* env,
jclass klass ATTRIBUTE_UNUSED,
jfieldID field ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_generate_field_modification_events);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError GetLoadedClasses(jvmtiEnv* env, jint* class_count_ptr, jclass** classes_ptr) {
+ ENSURE_VALID_ENV(env);
HeapUtil heap_util(ArtJvmTiEnv::AsArtJvmTiEnv(env)->object_tag_table.get());
return heap_util.GetLoadedClasses(env, class_count_ptr, classes_ptr);
}
@@ -611,6 +677,7 @@
jobject initiating_loader,
jint* class_count_ptr,
jclass** classes_ptr) {
+ ENSURE_VALID_ENV(env);
return ClassUtil::GetClassLoaderClasses(env, initiating_loader, class_count_ptr, classes_ptr);
}
@@ -618,21 +685,25 @@
jclass klass,
char** signature_ptr,
char** generic_ptr) {
+ ENSURE_VALID_ENV(env);
return ClassUtil::GetClassSignature(env, klass, signature_ptr, generic_ptr);
}
static jvmtiError GetClassStatus(jvmtiEnv* env, jclass klass, jint* status_ptr) {
+ ENSURE_VALID_ENV(env);
return ClassUtil::GetClassStatus(env, klass, status_ptr);
}
static jvmtiError GetSourceFileName(jvmtiEnv* env,
jclass klass ATTRIBUTE_UNUSED,
char** source_name_ptr ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_get_source_file_name);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError GetClassModifiers(jvmtiEnv* env, jclass klass, jint* modifiers_ptr) {
+ ENSURE_VALID_ENV(env);
return ClassUtil::GetClassModifiers(env, klass, modifiers_ptr);
}
@@ -640,6 +711,7 @@
jclass klass,
jint* method_count_ptr,
jmethodID** methods_ptr) {
+ ENSURE_VALID_ENV(env);
return ClassUtil::GetClassMethods(env, klass, method_count_ptr, methods_ptr);
}
@@ -647,6 +719,7 @@
jclass klass,
jint* field_count_ptr,
jfieldID** fields_ptr) {
+ ENSURE_VALID_ENV(env);
return ClassUtil::GetClassFields(env, klass, field_count_ptr, fields_ptr);
}
@@ -654,6 +727,7 @@
jclass klass,
jint* interface_count_ptr,
jclass** interfaces_ptr) {
+ ENSURE_VALID_ENV(env);
return ClassUtil::GetImplementedInterfaces(env, klass, interface_count_ptr, interfaces_ptr);
}
@@ -661,6 +735,7 @@
jclass klass,
jint* minor_version_ptr,
jint* major_version_ptr) {
+ ENSURE_VALID_ENV(env);
return ClassUtil::GetClassVersionNumbers(env, klass, minor_version_ptr, major_version_ptr);
}
@@ -669,38 +744,45 @@
jint* constant_pool_count_ptr ATTRIBUTE_UNUSED,
jint* constant_pool_byte_count_ptr ATTRIBUTE_UNUSED,
unsigned char** constant_pool_bytes_ptr ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_get_constant_pool);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError IsInterface(jvmtiEnv* env, jclass klass, jboolean* is_interface_ptr) {
+ ENSURE_VALID_ENV(env);
return ClassUtil::IsInterface(env, klass, is_interface_ptr);
}
static jvmtiError IsArrayClass(jvmtiEnv* env,
jclass klass,
jboolean* is_array_class_ptr) {
+ ENSURE_VALID_ENV(env);
return ClassUtil::IsArrayClass(env, klass, is_array_class_ptr);
}
static jvmtiError IsModifiableClass(jvmtiEnv* env,
jclass klass,
jboolean* is_modifiable_class_ptr) {
+ ENSURE_VALID_ENV(env);
return Redefiner::IsModifiableClass(env, klass, is_modifiable_class_ptr);
}
static jvmtiError GetClassLoader(jvmtiEnv* env, jclass klass, jobject* classloader_ptr) {
+ ENSURE_VALID_ENV(env);
return ClassUtil::GetClassLoader(env, klass, classloader_ptr);
}
static jvmtiError GetSourceDebugExtension(jvmtiEnv* env,
jclass klass ATTRIBUTE_UNUSED,
char** source_debug_extension_ptr ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_get_source_debug_extension);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError RetransformClasses(jvmtiEnv* env, jint class_count, const jclass* classes) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_retransform_classes);
std::string error_msg;
jvmtiError res = Transformer::RetransformClasses(ArtJvmTiEnv::AsArtJvmTiEnv(env),
@@ -719,6 +801,7 @@
static jvmtiError RedefineClasses(jvmtiEnv* env,
jint class_count,
const jvmtiClassDefinition* class_definitions) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_redefine_classes);
std::string error_msg;
jvmtiError res = Redefiner::RedefineClasses(ArtJvmTiEnv::AsArtJvmTiEnv(env),
@@ -735,16 +818,19 @@
}
static jvmtiError GetObjectSize(jvmtiEnv* env, jobject object, jlong* size_ptr) {
+ ENSURE_VALID_ENV(env);
return ObjectUtil::GetObjectSize(env, object, size_ptr);
}
static jvmtiError GetObjectHashCode(jvmtiEnv* env, jobject object, jint* hash_code_ptr) {
+ ENSURE_VALID_ENV(env);
return ObjectUtil::GetObjectHashCode(env, object, hash_code_ptr);
}
static jvmtiError GetObjectMonitorUsage(jvmtiEnv* env,
jobject object ATTRIBUTE_UNUSED,
jvmtiMonitorUsage* info_ptr ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_get_monitor_info);
return ERR(NOT_IMPLEMENTED);
}
@@ -755,6 +841,7 @@
char** name_ptr,
char** signature_ptr,
char** generic_ptr) {
+ ENSURE_VALID_ENV(env);
return FieldUtil::GetFieldName(env, klass, field, name_ptr, signature_ptr, generic_ptr);
}
@@ -762,6 +849,7 @@
jclass klass,
jfieldID field,
jclass* declaring_class_ptr) {
+ ENSURE_VALID_ENV(env);
return FieldUtil::GetFieldDeclaringClass(env, klass, field, declaring_class_ptr);
}
@@ -769,6 +857,7 @@
jclass klass,
jfieldID field,
jint* modifiers_ptr) {
+ ENSURE_VALID_ENV(env);
return FieldUtil::GetFieldModifiers(env, klass, field, modifiers_ptr);
}
@@ -776,6 +865,7 @@
jclass klass,
jfieldID field,
jboolean* is_synthetic_ptr) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_get_synthetic_attribute);
return FieldUtil::IsFieldSynthetic(env, klass, field, is_synthetic_ptr);
}
@@ -785,30 +875,35 @@
char** name_ptr,
char** signature_ptr,
char** generic_ptr) {
+ ENSURE_VALID_ENV(env);
return MethodUtil::GetMethodName(env, method, name_ptr, signature_ptr, generic_ptr);
}
static jvmtiError GetMethodDeclaringClass(jvmtiEnv* env,
jmethodID method,
jclass* declaring_class_ptr) {
+ ENSURE_VALID_ENV(env);
return MethodUtil::GetMethodDeclaringClass(env, method, declaring_class_ptr);
}
static jvmtiError GetMethodModifiers(jvmtiEnv* env,
jmethodID method,
jint* modifiers_ptr) {
+ ENSURE_VALID_ENV(env);
return MethodUtil::GetMethodModifiers(env, method, modifiers_ptr);
}
static jvmtiError GetMaxLocals(jvmtiEnv* env,
jmethodID method,
jint* max_ptr) {
+ ENSURE_VALID_ENV(env);
return MethodUtil::GetMaxLocals(env, method, max_ptr);
}
static jvmtiError GetArgumentsSize(jvmtiEnv* env,
jmethodID method,
jint* size_ptr) {
+ ENSURE_VALID_ENV(env);
return MethodUtil::GetArgumentsSize(env, method, size_ptr);
}
@@ -816,6 +911,7 @@
jmethodID method,
jint* entry_count_ptr,
jvmtiLineNumberEntry** table_ptr) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_get_line_numbers);
return MethodUtil::GetLineNumberTable(env, method, entry_count_ptr, table_ptr);
}
@@ -824,6 +920,7 @@
jmethodID method,
jlocation* start_location_ptr,
jlocation* end_location_ptr) {
+ ENSURE_VALID_ENV(env);
return MethodUtil::GetMethodLocation(env, method, start_location_ptr, end_location_ptr);
}
@@ -831,6 +928,7 @@
jmethodID method ATTRIBUTE_UNUSED,
jint* entry_count_ptr ATTRIBUTE_UNUSED,
jvmtiLocalVariableEntry** table_ptr ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_access_local_variables);
return ERR(NOT_IMPLEMENTED);
}
@@ -839,24 +937,29 @@
jmethodID method ATTRIBUTE_UNUSED,
jint* bytecode_count_ptr ATTRIBUTE_UNUSED,
unsigned char** bytecodes_ptr ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_get_bytecodes);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError IsMethodNative(jvmtiEnv* env, jmethodID method, jboolean* is_native_ptr) {
+ ENSURE_VALID_ENV(env);
return MethodUtil::IsMethodNative(env, method, is_native_ptr);
}
static jvmtiError IsMethodSynthetic(jvmtiEnv* env, jmethodID method, jboolean* is_synthetic_ptr) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_get_synthetic_attribute);
return MethodUtil::IsMethodSynthetic(env, method, is_synthetic_ptr);
}
static jvmtiError IsMethodObsolete(jvmtiEnv* env, jmethodID method, jboolean* is_obsolete_ptr) {
+ ENSURE_VALID_ENV(env);
return MethodUtil::IsMethodObsolete(env, method, is_obsolete_ptr);
}
static jvmtiError SetNativeMethodPrefix(jvmtiEnv* env, const char* prefix ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_set_native_method_prefix);
return ERR(NOT_IMPLEMENTED);
}
@@ -864,43 +967,53 @@
static jvmtiError SetNativeMethodPrefixes(jvmtiEnv* env,
jint prefix_count ATTRIBUTE_UNUSED,
char** prefixes ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_set_native_method_prefix);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError CreateRawMonitor(jvmtiEnv* env, const char* name, jrawMonitorID* monitor_ptr) {
+ ENSURE_VALID_ENV(env);
return MonitorUtil::CreateRawMonitor(env, name, monitor_ptr);
}
static jvmtiError DestroyRawMonitor(jvmtiEnv* env, jrawMonitorID monitor) {
+ ENSURE_VALID_ENV(env);
return MonitorUtil::DestroyRawMonitor(env, monitor);
}
static jvmtiError RawMonitorEnter(jvmtiEnv* env, jrawMonitorID monitor) {
+ ENSURE_VALID_ENV(env);
return MonitorUtil::RawMonitorEnter(env, monitor);
}
static jvmtiError RawMonitorExit(jvmtiEnv* env, jrawMonitorID monitor) {
+ ENSURE_VALID_ENV(env);
return MonitorUtil::RawMonitorExit(env, monitor);
}
static jvmtiError RawMonitorWait(jvmtiEnv* env, jrawMonitorID monitor, jlong millis) {
+ ENSURE_VALID_ENV(env);
return MonitorUtil::RawMonitorWait(env, monitor, millis);
}
static jvmtiError RawMonitorNotify(jvmtiEnv* env, jrawMonitorID monitor) {
+ ENSURE_VALID_ENV(env);
return MonitorUtil::RawMonitorNotify(env, monitor);
}
static jvmtiError RawMonitorNotifyAll(jvmtiEnv* env, jrawMonitorID monitor) {
+ ENSURE_VALID_ENV(env);
return MonitorUtil::RawMonitorNotifyAll(env, monitor);
}
static jvmtiError SetJNIFunctionTable(jvmtiEnv* env, const jniNativeInterface* function_table) {
+ ENSURE_VALID_ENV(env);
return JNIUtil::SetJNIFunctionTable(env, function_table);
}
static jvmtiError GetJNIFunctionTable(jvmtiEnv* env, jniNativeInterface** function_table) {
+ ENSURE_VALID_ENV(env);
return JNIUtil::GetJNIFunctionTable(env, function_table);
}
@@ -955,14 +1068,16 @@
return gEventHandler.SetEvent(art_env, art_thread, GetArtJvmtiEvent(art_env, event_type), mode);
}
- static jvmtiError GenerateEvents(jvmtiEnv* env ATTRIBUTE_UNUSED,
+ static jvmtiError GenerateEvents(jvmtiEnv* env,
jvmtiEvent event_type ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
return OK;
}
- static jvmtiError GetExtensionFunctions(jvmtiEnv* env ATTRIBUTE_UNUSED,
+ static jvmtiError GetExtensionFunctions(jvmtiEnv* env,
jint* extension_count_ptr,
jvmtiExtensionFunctionInfo** extensions) {
+ ENSURE_VALID_ENV(env);
// We do not have any extension functions.
*extension_count_ptr = 0;
*extensions = nullptr;
@@ -970,9 +1085,10 @@
return ERR(NONE);
}
- static jvmtiError GetExtensionEvents(jvmtiEnv* env ATTRIBUTE_UNUSED,
+ static jvmtiError GetExtensionEvents(jvmtiEnv* env,
jint* extension_count_ptr,
jvmtiExtensionEventInfo** extensions) {
+ ENSURE_VALID_ENV(env);
// We do not have any extension events.
*extension_count_ptr = 0;
*extensions = nullptr;
@@ -980,9 +1096,10 @@
return ERR(NONE);
}
- static jvmtiError SetExtensionEventCallback(jvmtiEnv* env ATTRIBUTE_UNUSED,
+ static jvmtiError SetExtensionEventCallback(jvmtiEnv* env,
jint extension_event_index ATTRIBUTE_UNUSED,
jvmtiExtensionEvent callback ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
// We do not have any extension events, so any call is illegal.
return ERR(ILLEGAL_ARGUMENT);
}
@@ -999,8 +1116,8 @@
ENSURE_NON_NULL(capabilities_ptr);
ArtJvmTiEnv* art_env = static_cast<ArtJvmTiEnv*>(env);
jvmtiError ret = OK;
- jvmtiCapabilities changed;
- jvmtiCapabilities potential_capabilities;
+ jvmtiCapabilities changed = {};
+ jvmtiCapabilities potential_capabilities = {};
ret = env->GetPotentialCapabilities(&potential_capabilities);
if (ret != OK) {
return ret;
@@ -1072,7 +1189,7 @@
ENSURE_VALID_ENV(env);
ENSURE_NON_NULL(capabilities_ptr);
ArtJvmTiEnv* art_env = reinterpret_cast<ArtJvmTiEnv*>(env);
- jvmtiCapabilities changed;
+ jvmtiCapabilities changed = {};
#define DEL_CAPABILITY(e) \
do { \
if (capabilities_ptr->e == 1) { \
@@ -1141,17 +1258,20 @@
static jvmtiError GetCurrentThreadCpuTimerInfo(jvmtiEnv* env,
jvmtiTimerInfo* info_ptr ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_get_current_thread_cpu_time);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError GetCurrentThreadCpuTime(jvmtiEnv* env, jlong* nanos_ptr ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_get_current_thread_cpu_time);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError GetThreadCpuTimerInfo(jvmtiEnv* env,
jvmtiTimerInfo* info_ptr ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_get_thread_cpu_time);
return ERR(NOT_IMPLEMENTED);
}
@@ -1159,43 +1279,53 @@
static jvmtiError GetThreadCpuTime(jvmtiEnv* env,
jthread thread ATTRIBUTE_UNUSED,
jlong* nanos_ptr ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_get_thread_cpu_time);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError GetTimerInfo(jvmtiEnv* env, jvmtiTimerInfo* info_ptr) {
+ ENSURE_VALID_ENV(env);
return TimerUtil::GetTimerInfo(env, info_ptr);
}
static jvmtiError GetTime(jvmtiEnv* env, jlong* nanos_ptr) {
+ ENSURE_VALID_ENV(env);
return TimerUtil::GetTime(env, nanos_ptr);
}
static jvmtiError GetAvailableProcessors(jvmtiEnv* env, jint* processor_count_ptr) {
+ ENSURE_VALID_ENV(env);
return TimerUtil::GetAvailableProcessors(env, processor_count_ptr);
}
static jvmtiError AddToBootstrapClassLoaderSearch(jvmtiEnv* env, const char* segment) {
+ ENSURE_VALID_ENV(env);
return SearchUtil::AddToBootstrapClassLoaderSearch(env, segment);
}
static jvmtiError AddToSystemClassLoaderSearch(jvmtiEnv* env, const char* segment) {
+ ENSURE_VALID_ENV(env);
return SearchUtil::AddToSystemClassLoaderSearch(env, segment);
}
static jvmtiError GetSystemProperties(jvmtiEnv* env, jint* count_ptr, char*** property_ptr) {
+ ENSURE_VALID_ENV(env);
return PropertiesUtil::GetSystemProperties(env, count_ptr, property_ptr);
}
static jvmtiError GetSystemProperty(jvmtiEnv* env, const char* property, char** value_ptr) {
+ ENSURE_VALID_ENV(env);
return PropertiesUtil::GetSystemProperty(env, property, value_ptr);
}
static jvmtiError SetSystemProperty(jvmtiEnv* env, const char* property, const char* value) {
+ ENSURE_VALID_ENV(env);
return PropertiesUtil::SetSystemProperty(env, property, value);
}
static jvmtiError GetPhase(jvmtiEnv* env, jvmtiPhase* phase_ptr) {
+ ENSURE_VALID_ENV(env);
return PhaseUtil::GetPhase(env, phase_ptr);
}
@@ -1303,9 +1433,10 @@
}
}
- static jvmtiError SetVerboseFlag(jvmtiEnv* env ATTRIBUTE_UNUSED,
+ static jvmtiError SetVerboseFlag(jvmtiEnv* env,
jvmtiVerboseFlag flag,
jboolean value) {
+ ENSURE_VALID_ENV(env);
if (flag == jvmtiVerboseFlag::JVMTI_VERBOSE_OTHER) {
// OTHER is special, as it's 0, so can't do a bit check.
bool val = (value == JNI_TRUE) ? true : false;
@@ -1359,8 +1490,8 @@
return ERR(NONE);
}
- static jvmtiError GetJLocationFormat(jvmtiEnv* env ATTRIBUTE_UNUSED,
- jvmtiJlocationFormat* format_ptr) {
+ static jvmtiError GetJLocationFormat(jvmtiEnv* env, jvmtiJlocationFormat* format_ptr) {
+ ENSURE_VALID_ENV(env);
// Report BCI as jlocation format. We report dex bytecode indices.
if (format_ptr == nullptr) {
return ERR(NULL_POINTER);
diff --git a/runtime/transaction_test.cc b/runtime/transaction_test.cc
index 97c1228..9206292 100644
--- a/runtime/transaction_test.cc
+++ b/runtime/transaction_test.cc
@@ -508,7 +508,7 @@
dex::StringIndex string_idx = dex_file->GetIndexForStringId(*string_id);
ASSERT_TRUE(string_idx.IsValid());
// String should only get resolved by the initializer.
- EXPECT_TRUE(class_linker_->LookupString(*dex_file, string_idx, h_dex_cache) == nullptr);
+ EXPECT_TRUE(class_linker_->LookupString(*dex_file, string_idx, h_dex_cache.Get()) == nullptr);
EXPECT_TRUE(h_dex_cache->GetResolvedString(string_idx) == nullptr);
// Do the transaction, then roll back.
Transaction transaction;
@@ -518,7 +518,7 @@
ASSERT_TRUE(h_klass->IsInitialized());
// Make sure the string got resolved by the transaction.
{
- mirror::String* s = class_linker_->LookupString(*dex_file, string_idx, h_dex_cache);
+ mirror::String* s = class_linker_->LookupString(*dex_file, string_idx, h_dex_cache.Get());
ASSERT_TRUE(s != nullptr);
EXPECT_STREQ(s->ToModifiedUtf8().c_str(), kResolvedString);
EXPECT_EQ(s, h_dex_cache->GetResolvedString(string_idx));
@@ -526,7 +526,7 @@
Runtime::Current()->ExitTransactionMode();
transaction.Rollback();
// Check that the string did not stay resolved.
- EXPECT_TRUE(class_linker_->LookupString(*dex_file, string_idx, h_dex_cache) == nullptr);
+ EXPECT_TRUE(class_linker_->LookupString(*dex_file, string_idx, h_dex_cache.Get()) == nullptr);
EXPECT_TRUE(h_dex_cache->GetResolvedString(string_idx) == nullptr);
ASSERT_FALSE(h_klass->IsInitialized());
ASSERT_FALSE(soa.Self()->IsExceptionPending());
diff --git a/test/051-thread/expected.txt b/test/051-thread/expected.txt
index c6cd4f8..3fc3492 100644
--- a/test/051-thread/expected.txt
+++ b/test/051-thread/expected.txt
@@ -1,6 +1,6 @@
JNI_OnLoad called
thread test starting
-testThreadCapacity thread count: 512
+testThreadCapacity thread count: 128
testThreadDaemons starting thread 'TestDaemonThread'
testThreadDaemons @ Thread running
testThreadDaemons @ Got expected setDaemon exception
diff --git a/test/051-thread/src/Main.java b/test/051-thread/src/Main.java
index 2e26b22..82fc0d4 100644
--- a/test/051-thread/src/Main.java
+++ b/test/051-thread/src/Main.java
@@ -35,8 +35,8 @@
* Simple thread capacity test.
*/
private static void testThreadCapacity() throws Exception {
- TestCapacityThread[] threads = new TestCapacityThread[512];
- for (int i = 0; i < 512; i++) {
+ TestCapacityThread[] threads = new TestCapacityThread[128];
+ for (int i = 0; i < threads.length; i++) {
threads[i] = new TestCapacityThread();
}
diff --git a/test/159-app-image-fields/expected.txt b/test/159-app-image-fields/expected.txt
new file mode 100644
index 0000000..f63e8e3
--- /dev/null
+++ b/test/159-app-image-fields/expected.txt
@@ -0,0 +1,3 @@
+Eating all memory.
+memoryWasAllocated = true
+match: true
diff --git a/test/159-app-image-fields/info.txt b/test/159-app-image-fields/info.txt
new file mode 100644
index 0000000..9b10078
--- /dev/null
+++ b/test/159-app-image-fields/info.txt
@@ -0,0 +1,3 @@
+Regression test for erroneously storing an ArtField* in the app image DexCache
+when the class from the corresponding FieldId is not in the app image, only the
+declaring class is.
diff --git a/test/159-app-image-fields/profile b/test/159-app-image-fields/profile
new file mode 100644
index 0000000..4184fa2
--- /dev/null
+++ b/test/159-app-image-fields/profile
@@ -0,0 +1,3 @@
+LAAA/Base;
+LMain;
+LFields;
diff --git a/test/159-app-image-fields/run b/test/159-app-image-fields/run
new file mode 100644
index 0000000..7cc107a
--- /dev/null
+++ b/test/159-app-image-fields/run
@@ -0,0 +1,20 @@
+#!/bin/bash
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Use a profile to put specific classes in the app image.
+# Also run the compiler with -j1 to ensure specific class verification order.
+exec ${RUN} $@ --profile -Xcompiler-option --compiler-filter=speed-profile \
+ -Xcompiler-option -j1
diff --git a/test/159-app-image-fields/src/AAA/Base.java b/test/159-app-image-fields/src/AAA/Base.java
new file mode 100644
index 0000000..41ee83a
--- /dev/null
+++ b/test/159-app-image-fields/src/AAA/Base.java
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package AAA;
+
+class Base {
+ // The field is public but the class is package-private.
+ public static int value = 42;
+}
diff --git a/test/159-app-image-fields/src/AAA/Derived.java b/test/159-app-image-fields/src/AAA/Derived.java
new file mode 100644
index 0000000..f6045d5
--- /dev/null
+++ b/test/159-app-image-fields/src/AAA/Derived.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package AAA;
+
+public class Derived extends Base {
+ // Allows public access to Base.value (Base is package-private) referenced as Derived.value.
+}
diff --git a/test/159-app-image-fields/src/Main.java b/test/159-app-image-fields/src/Main.java
new file mode 100644
index 0000000..d06a502
--- /dev/null
+++ b/test/159-app-image-fields/src/Main.java
@@ -0,0 +1,2156 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import AAA.Derived;
+
+public class Main {
+ public static void main(String[] args) {
+ try {
+ // Make sure we resolve Fields before eating memory.
+ // (Making sure that the test passes in no-image configurations.)
+ Class.forName("Fields", false, Main.class.getClassLoader());
+ System.out.println("Eating all memory.");
+ Object memory = eatAllMemory();
+
+ // This test assumes that Derived is not yet resolved. In some configurations
+ // (notably interp-ac), Derived is already resolved by verifying Main at run
+ // time. Therefore we cannot assume that we get a certain `value` and need to
+ // simply check for consistency, i.e. `value == another_value`.
+ int value = 0;
+ try {
+ // If the ArtField* is erroneously left in the DexCache, this
+ // shall succeed despite the class Derived being unresolved so
+ // far. Otherwise, we shall throw OOME trying to resolve it.
+ value = Derived.value;
+ } catch (OutOfMemoryError e) {
+ value = -1;
+ }
+ Fields.clobberDexCache();
+ int another_value = 0;
+ try {
+ // Try again for comparison. Since the DexCache field array has been
+ // clobbered by Fields.clobberDexCache(), this shall throw OOME.
+ another_value = Derived.value;
+ } catch (OutOfMemoryError e) {
+ another_value = -1;
+ }
+ boolean memoryWasAllocated = (memory != null);
+ memory = null;
+ System.out.println("memoryWasAllocated = " + memoryWasAllocated);
+ System.out.println("match: " + (value == another_value));
+ if (value != another_value || (value != -1 && value != 42)) {
+ // Mismatch or unexpected value, print additional debugging information.
+ System.out.println("value: " + value);
+ System.out.println("another_value: " + another_value);
+ }
+ } catch (Throwable t) {
+ t.printStackTrace();
+ }
+ }
+
+ public static Object eatAllMemory() {
+ Object[] result = null;
+ int size = 1000000;
+ while (result == null && size != 0) {
+ try {
+ result = new Object[size];
+ } catch (OutOfMemoryError oome) {
+ size /= 2;
+ }
+ }
+ if (result != null) {
+ int index = 0;
+ while (index != result.length && size != 0) {
+ try {
+ result[index] = new byte[size];
+ ++index;
+ } catch (OutOfMemoryError oome) {
+ size /= 2;
+ }
+ }
+ }
+ return result;
+ }
+}
+
+// The naming is deliberate to take into account two different situations:
+// - eagerly preloading DexCache with the available candidate with the lowest index,
+// - not preloading DexCache and relying on the verification to populate it.
+// This corresponds to new and old behavior, respectively.
+//
+// Eager preloading: "LFields;" is after "LAAA/Base;" and "LAAA/Derived;" so that
+// Derived.value takes priority over Fields.testField*.
+//
+// Relying on verifier: "LFields;" is before "LMain;" so that the class definition
+// of Fields precedes the definition of Main (this is not strictly required but the
+// tools look at lexicographic ordering when there is no inheritance relationship)
+// and the verification of Main is last and fills the DexCache with Derived.value.
+//
+class Fields {
+ public static int clobberDexCache() {
+ return 0
+ + testField0000
+ + testField0001
+ + testField0002
+ + testField0003
+ + testField0004
+ + testField0005
+ + testField0006
+ + testField0007
+ + testField0008
+ + testField0009
+ + testField0010
+ + testField0011
+ + testField0012
+ + testField0013
+ + testField0014
+ + testField0015
+ + testField0016
+ + testField0017
+ + testField0018
+ + testField0019
+ + testField0020
+ + testField0021
+ + testField0022
+ + testField0023
+ + testField0024
+ + testField0025
+ + testField0026
+ + testField0027
+ + testField0028
+ + testField0029
+ + testField0030
+ + testField0031
+ + testField0032
+ + testField0033
+ + testField0034
+ + testField0035
+ + testField0036
+ + testField0037
+ + testField0038
+ + testField0039
+ + testField0040
+ + testField0041
+ + testField0042
+ + testField0043
+ + testField0044
+ + testField0045
+ + testField0046
+ + testField0047
+ + testField0048
+ + testField0049
+ + testField0050
+ + testField0051
+ + testField0052
+ + testField0053
+ + testField0054
+ + testField0055
+ + testField0056
+ + testField0057
+ + testField0058
+ + testField0059
+ + testField0060
+ + testField0061
+ + testField0062
+ + testField0063
+ + testField0064
+ + testField0065
+ + testField0066
+ + testField0067
+ + testField0068
+ + testField0069
+ + testField0070
+ + testField0071
+ + testField0072
+ + testField0073
+ + testField0074
+ + testField0075
+ + testField0076
+ + testField0077
+ + testField0078
+ + testField0079
+ + testField0080
+ + testField0081
+ + testField0082
+ + testField0083
+ + testField0084
+ + testField0085
+ + testField0086
+ + testField0087
+ + testField0088
+ + testField0089
+ + testField0090
+ + testField0091
+ + testField0092
+ + testField0093
+ + testField0094
+ + testField0095
+ + testField0096
+ + testField0097
+ + testField0098
+ + testField0099
+ + testField0100
+ + testField0101
+ + testField0102
+ + testField0103
+ + testField0104
+ + testField0105
+ + testField0106
+ + testField0107
+ + testField0108
+ + testField0109
+ + testField0110
+ + testField0111
+ + testField0112
+ + testField0113
+ + testField0114
+ + testField0115
+ + testField0116
+ + testField0117
+ + testField0118
+ + testField0119
+ + testField0120
+ + testField0121
+ + testField0122
+ + testField0123
+ + testField0124
+ + testField0125
+ + testField0126
+ + testField0127
+ + testField0128
+ + testField0129
+ + testField0130
+ + testField0131
+ + testField0132
+ + testField0133
+ + testField0134
+ + testField0135
+ + testField0136
+ + testField0137
+ + testField0138
+ + testField0139
+ + testField0140
+ + testField0141
+ + testField0142
+ + testField0143
+ + testField0144
+ + testField0145
+ + testField0146
+ + testField0147
+ + testField0148
+ + testField0149
+ + testField0150
+ + testField0151
+ + testField0152
+ + testField0153
+ + testField0154
+ + testField0155
+ + testField0156
+ + testField0157
+ + testField0158
+ + testField0159
+ + testField0160
+ + testField0161
+ + testField0162
+ + testField0163
+ + testField0164
+ + testField0165
+ + testField0166
+ + testField0167
+ + testField0168
+ + testField0169
+ + testField0170
+ + testField0171
+ + testField0172
+ + testField0173
+ + testField0174
+ + testField0175
+ + testField0176
+ + testField0177
+ + testField0178
+ + testField0179
+ + testField0180
+ + testField0181
+ + testField0182
+ + testField0183
+ + testField0184
+ + testField0185
+ + testField0186
+ + testField0187
+ + testField0188
+ + testField0189
+ + testField0190
+ + testField0191
+ + testField0192
+ + testField0193
+ + testField0194
+ + testField0195
+ + testField0196
+ + testField0197
+ + testField0198
+ + testField0199
+ + testField0200
+ + testField0201
+ + testField0202
+ + testField0203
+ + testField0204
+ + testField0205
+ + testField0206
+ + testField0207
+ + testField0208
+ + testField0209
+ + testField0210
+ + testField0211
+ + testField0212
+ + testField0213
+ + testField0214
+ + testField0215
+ + testField0216
+ + testField0217
+ + testField0218
+ + testField0219
+ + testField0220
+ + testField0221
+ + testField0222
+ + testField0223
+ + testField0224
+ + testField0225
+ + testField0226
+ + testField0227
+ + testField0228
+ + testField0229
+ + testField0230
+ + testField0231
+ + testField0232
+ + testField0233
+ + testField0234
+ + testField0235
+ + testField0236
+ + testField0237
+ + testField0238
+ + testField0239
+ + testField0240
+ + testField0241
+ + testField0242
+ + testField0243
+ + testField0244
+ + testField0245
+ + testField0246
+ + testField0247
+ + testField0248
+ + testField0249
+ + testField0250
+ + testField0251
+ + testField0252
+ + testField0253
+ + testField0254
+ + testField0255
+ + testField0256
+ + testField0257
+ + testField0258
+ + testField0259
+ + testField0260
+ + testField0261
+ + testField0262
+ + testField0263
+ + testField0264
+ + testField0265
+ + testField0266
+ + testField0267
+ + testField0268
+ + testField0269
+ + testField0270
+ + testField0271
+ + testField0272
+ + testField0273
+ + testField0274
+ + testField0275
+ + testField0276
+ + testField0277
+ + testField0278
+ + testField0279
+ + testField0280
+ + testField0281
+ + testField0282
+ + testField0283
+ + testField0284
+ + testField0285
+ + testField0286
+ + testField0287
+ + testField0288
+ + testField0289
+ + testField0290
+ + testField0291
+ + testField0292
+ + testField0293
+ + testField0294
+ + testField0295
+ + testField0296
+ + testField0297
+ + testField0298
+ + testField0299
+ + testField0300
+ + testField0301
+ + testField0302
+ + testField0303
+ + testField0304
+ + testField0305
+ + testField0306
+ + testField0307
+ + testField0308
+ + testField0309
+ + testField0310
+ + testField0311
+ + testField0312
+ + testField0313
+ + testField0314
+ + testField0315
+ + testField0316
+ + testField0317
+ + testField0318
+ + testField0319
+ + testField0320
+ + testField0321
+ + testField0322
+ + testField0323
+ + testField0324
+ + testField0325
+ + testField0326
+ + testField0327
+ + testField0328
+ + testField0329
+ + testField0330
+ + testField0331
+ + testField0332
+ + testField0333
+ + testField0334
+ + testField0335
+ + testField0336
+ + testField0337
+ + testField0338
+ + testField0339
+ + testField0340
+ + testField0341
+ + testField0342
+ + testField0343
+ + testField0344
+ + testField0345
+ + testField0346
+ + testField0347
+ + testField0348
+ + testField0349
+ + testField0350
+ + testField0351
+ + testField0352
+ + testField0353
+ + testField0354
+ + testField0355
+ + testField0356
+ + testField0357
+ + testField0358
+ + testField0359
+ + testField0360
+ + testField0361
+ + testField0362
+ + testField0363
+ + testField0364
+ + testField0365
+ + testField0366
+ + testField0367
+ + testField0368
+ + testField0369
+ + testField0370
+ + testField0371
+ + testField0372
+ + testField0373
+ + testField0374
+ + testField0375
+ + testField0376
+ + testField0377
+ + testField0378
+ + testField0379
+ + testField0380
+ + testField0381
+ + testField0382
+ + testField0383
+ + testField0384
+ + testField0385
+ + testField0386
+ + testField0387
+ + testField0388
+ + testField0389
+ + testField0390
+ + testField0391
+ + testField0392
+ + testField0393
+ + testField0394
+ + testField0395
+ + testField0396
+ + testField0397
+ + testField0398
+ + testField0399
+ + testField0400
+ + testField0401
+ + testField0402
+ + testField0403
+ + testField0404
+ + testField0405
+ + testField0406
+ + testField0407
+ + testField0408
+ + testField0409
+ + testField0410
+ + testField0411
+ + testField0412
+ + testField0413
+ + testField0414
+ + testField0415
+ + testField0416
+ + testField0417
+ + testField0418
+ + testField0419
+ + testField0420
+ + testField0421
+ + testField0422
+ + testField0423
+ + testField0424
+ + testField0425
+ + testField0426
+ + testField0427
+ + testField0428
+ + testField0429
+ + testField0430
+ + testField0431
+ + testField0432
+ + testField0433
+ + testField0434
+ + testField0435
+ + testField0436
+ + testField0437
+ + testField0438
+ + testField0439
+ + testField0440
+ + testField0441
+ + testField0442
+ + testField0443
+ + testField0444
+ + testField0445
+ + testField0446
+ + testField0447
+ + testField0448
+ + testField0449
+ + testField0450
+ + testField0451
+ + testField0452
+ + testField0453
+ + testField0454
+ + testField0455
+ + testField0456
+ + testField0457
+ + testField0458
+ + testField0459
+ + testField0460
+ + testField0461
+ + testField0462
+ + testField0463
+ + testField0464
+ + testField0465
+ + testField0466
+ + testField0467
+ + testField0468
+ + testField0469
+ + testField0470
+ + testField0471
+ + testField0472
+ + testField0473
+ + testField0474
+ + testField0475
+ + testField0476
+ + testField0477
+ + testField0478
+ + testField0479
+ + testField0480
+ + testField0481
+ + testField0482
+ + testField0483
+ + testField0484
+ + testField0485
+ + testField0486
+ + testField0487
+ + testField0488
+ + testField0489
+ + testField0490
+ + testField0491
+ + testField0492
+ + testField0493
+ + testField0494
+ + testField0495
+ + testField0496
+ + testField0497
+ + testField0498
+ + testField0499
+ + testField0500
+ + testField0501
+ + testField0502
+ + testField0503
+ + testField0504
+ + testField0505
+ + testField0506
+ + testField0507
+ + testField0508
+ + testField0509
+ + testField0510
+ + testField0511
+ + testField0512
+ + testField0513
+ + testField0514
+ + testField0515
+ + testField0516
+ + testField0517
+ + testField0518
+ + testField0519
+ + testField0520
+ + testField0521
+ + testField0522
+ + testField0523
+ + testField0524
+ + testField0525
+ + testField0526
+ + testField0527
+ + testField0528
+ + testField0529
+ + testField0530
+ + testField0531
+ + testField0532
+ + testField0533
+ + testField0534
+ + testField0535
+ + testField0536
+ + testField0537
+ + testField0538
+ + testField0539
+ + testField0540
+ + testField0541
+ + testField0542
+ + testField0543
+ + testField0544
+ + testField0545
+ + testField0546
+ + testField0547
+ + testField0548
+ + testField0549
+ + testField0550
+ + testField0551
+ + testField0552
+ + testField0553
+ + testField0554
+ + testField0555
+ + testField0556
+ + testField0557
+ + testField0558
+ + testField0559
+ + testField0560
+ + testField0561
+ + testField0562
+ + testField0563
+ + testField0564
+ + testField0565
+ + testField0566
+ + testField0567
+ + testField0568
+ + testField0569
+ + testField0570
+ + testField0571
+ + testField0572
+ + testField0573
+ + testField0574
+ + testField0575
+ + testField0576
+ + testField0577
+ + testField0578
+ + testField0579
+ + testField0580
+ + testField0581
+ + testField0582
+ + testField0583
+ + testField0584
+ + testField0585
+ + testField0586
+ + testField0587
+ + testField0588
+ + testField0589
+ + testField0590
+ + testField0591
+ + testField0592
+ + testField0593
+ + testField0594
+ + testField0595
+ + testField0596
+ + testField0597
+ + testField0598
+ + testField0599
+ + testField0600
+ + testField0601
+ + testField0602
+ + testField0603
+ + testField0604
+ + testField0605
+ + testField0606
+ + testField0607
+ + testField0608
+ + testField0609
+ + testField0610
+ + testField0611
+ + testField0612
+ + testField0613
+ + testField0614
+ + testField0615
+ + testField0616
+ + testField0617
+ + testField0618
+ + testField0619
+ + testField0620
+ + testField0621
+ + testField0622
+ + testField0623
+ + testField0624
+ + testField0625
+ + testField0626
+ + testField0627
+ + testField0628
+ + testField0629
+ + testField0630
+ + testField0631
+ + testField0632
+ + testField0633
+ + testField0634
+ + testField0635
+ + testField0636
+ + testField0637
+ + testField0638
+ + testField0639
+ + testField0640
+ + testField0641
+ + testField0642
+ + testField0643
+ + testField0644
+ + testField0645
+ + testField0646
+ + testField0647
+ + testField0648
+ + testField0649
+ + testField0650
+ + testField0651
+ + testField0652
+ + testField0653
+ + testField0654
+ + testField0655
+ + testField0656
+ + testField0657
+ + testField0658
+ + testField0659
+ + testField0660
+ + testField0661
+ + testField0662
+ + testField0663
+ + testField0664
+ + testField0665
+ + testField0666
+ + testField0667
+ + testField0668
+ + testField0669
+ + testField0670
+ + testField0671
+ + testField0672
+ + testField0673
+ + testField0674
+ + testField0675
+ + testField0676
+ + testField0677
+ + testField0678
+ + testField0679
+ + testField0680
+ + testField0681
+ + testField0682
+ + testField0683
+ + testField0684
+ + testField0685
+ + testField0686
+ + testField0687
+ + testField0688
+ + testField0689
+ + testField0690
+ + testField0691
+ + testField0692
+ + testField0693
+ + testField0694
+ + testField0695
+ + testField0696
+ + testField0697
+ + testField0698
+ + testField0699
+ + testField0700
+ + testField0701
+ + testField0702
+ + testField0703
+ + testField0704
+ + testField0705
+ + testField0706
+ + testField0707
+ + testField0708
+ + testField0709
+ + testField0710
+ + testField0711
+ + testField0712
+ + testField0713
+ + testField0714
+ + testField0715
+ + testField0716
+ + testField0717
+ + testField0718
+ + testField0719
+ + testField0720
+ + testField0721
+ + testField0722
+ + testField0723
+ + testField0724
+ + testField0725
+ + testField0726
+ + testField0727
+ + testField0728
+ + testField0729
+ + testField0730
+ + testField0731
+ + testField0732
+ + testField0733
+ + testField0734
+ + testField0735
+ + testField0736
+ + testField0737
+ + testField0738
+ + testField0739
+ + testField0740
+ + testField0741
+ + testField0742
+ + testField0743
+ + testField0744
+ + testField0745
+ + testField0746
+ + testField0747
+ + testField0748
+ + testField0749
+ + testField0750
+ + testField0751
+ + testField0752
+ + testField0753
+ + testField0754
+ + testField0755
+ + testField0756
+ + testField0757
+ + testField0758
+ + testField0759
+ + testField0760
+ + testField0761
+ + testField0762
+ + testField0763
+ + testField0764
+ + testField0765
+ + testField0766
+ + testField0767
+ + testField0768
+ + testField0769
+ + testField0770
+ + testField0771
+ + testField0772
+ + testField0773
+ + testField0774
+ + testField0775
+ + testField0776
+ + testField0777
+ + testField0778
+ + testField0779
+ + testField0780
+ + testField0781
+ + testField0782
+ + testField0783
+ + testField0784
+ + testField0785
+ + testField0786
+ + testField0787
+ + testField0788
+ + testField0789
+ + testField0790
+ + testField0791
+ + testField0792
+ + testField0793
+ + testField0794
+ + testField0795
+ + testField0796
+ + testField0797
+ + testField0798
+ + testField0799
+ + testField0800
+ + testField0801
+ + testField0802
+ + testField0803
+ + testField0804
+ + testField0805
+ + testField0806
+ + testField0807
+ + testField0808
+ + testField0809
+ + testField0810
+ + testField0811
+ + testField0812
+ + testField0813
+ + testField0814
+ + testField0815
+ + testField0816
+ + testField0817
+ + testField0818
+ + testField0819
+ + testField0820
+ + testField0821
+ + testField0822
+ + testField0823
+ + testField0824
+ + testField0825
+ + testField0826
+ + testField0827
+ + testField0828
+ + testField0829
+ + testField0830
+ + testField0831
+ + testField0832
+ + testField0833
+ + testField0834
+ + testField0835
+ + testField0836
+ + testField0837
+ + testField0838
+ + testField0839
+ + testField0840
+ + testField0841
+ + testField0842
+ + testField0843
+ + testField0844
+ + testField0845
+ + testField0846
+ + testField0847
+ + testField0848
+ + testField0849
+ + testField0850
+ + testField0851
+ + testField0852
+ + testField0853
+ + testField0854
+ + testField0855
+ + testField0856
+ + testField0857
+ + testField0858
+ + testField0859
+ + testField0860
+ + testField0861
+ + testField0862
+ + testField0863
+ + testField0864
+ + testField0865
+ + testField0866
+ + testField0867
+ + testField0868
+ + testField0869
+ + testField0870
+ + testField0871
+ + testField0872
+ + testField0873
+ + testField0874
+ + testField0875
+ + testField0876
+ + testField0877
+ + testField0878
+ + testField0879
+ + testField0880
+ + testField0881
+ + testField0882
+ + testField0883
+ + testField0884
+ + testField0885
+ + testField0886
+ + testField0887
+ + testField0888
+ + testField0889
+ + testField0890
+ + testField0891
+ + testField0892
+ + testField0893
+ + testField0894
+ + testField0895
+ + testField0896
+ + testField0897
+ + testField0898
+ + testField0899
+ + testField0900
+ + testField0901
+ + testField0902
+ + testField0903
+ + testField0904
+ + testField0905
+ + testField0906
+ + testField0907
+ + testField0908
+ + testField0909
+ + testField0910
+ + testField0911
+ + testField0912
+ + testField0913
+ + testField0914
+ + testField0915
+ + testField0916
+ + testField0917
+ + testField0918
+ + testField0919
+ + testField0920
+ + testField0921
+ + testField0922
+ + testField0923
+ + testField0924
+ + testField0925
+ + testField0926
+ + testField0927
+ + testField0928
+ + testField0929
+ + testField0930
+ + testField0931
+ + testField0932
+ + testField0933
+ + testField0934
+ + testField0935
+ + testField0936
+ + testField0937
+ + testField0938
+ + testField0939
+ + testField0940
+ + testField0941
+ + testField0942
+ + testField0943
+ + testField0944
+ + testField0945
+ + testField0946
+ + testField0947
+ + testField0948
+ + testField0949
+ + testField0950
+ + testField0951
+ + testField0952
+ + testField0953
+ + testField0954
+ + testField0955
+ + testField0956
+ + testField0957
+ + testField0958
+ + testField0959
+ + testField0960
+ + testField0961
+ + testField0962
+ + testField0963
+ + testField0964
+ + testField0965
+ + testField0966
+ + testField0967
+ + testField0968
+ + testField0969
+ + testField0970
+ + testField0971
+ + testField0972
+ + testField0973
+ + testField0974
+ + testField0975
+ + testField0976
+ + testField0977
+ + testField0978
+ + testField0979
+ + testField0980
+ + testField0981
+ + testField0982
+ + testField0983
+ + testField0984
+ + testField0985
+ + testField0986
+ + testField0987
+ + testField0988
+ + testField0989
+ + testField0990
+ + testField0991
+ + testField0992
+ + testField0993
+ + testField0994
+ + testField0995
+ + testField0996
+ + testField0997
+ + testField0998
+ + testField0999
+ + testField1000
+ + testField1001
+ + testField1002
+ + testField1003
+ + testField1004
+ + testField1005
+ + testField1006
+ + testField1007
+ + testField1008
+ + testField1009
+ + testField1010
+ + testField1011
+ + testField1012
+ + testField1013
+ + testField1014
+ + testField1015
+ + testField1016
+ + testField1017
+ + testField1018
+ + testField1019
+ + testField1020
+ + testField1021
+ + testField1022
+ + testField1023
+ + 0;
+ }
+
+ private static int testField0000 = 0;
+ private static int testField0001 = 1;
+ private static int testField0002 = 2;
+ private static int testField0003 = 3;
+ private static int testField0004 = 4;
+ private static int testField0005 = 5;
+ private static int testField0006 = 6;
+ private static int testField0007 = 7;
+ private static int testField0008 = 8;
+ private static int testField0009 = 9;
+ private static int testField0010 = 10;
+ private static int testField0011 = 11;
+ private static int testField0012 = 12;
+ private static int testField0013 = 13;
+ private static int testField0014 = 14;
+ private static int testField0015 = 15;
+ private static int testField0016 = 16;
+ private static int testField0017 = 17;
+ private static int testField0018 = 18;
+ private static int testField0019 = 19;
+ private static int testField0020 = 20;
+ private static int testField0021 = 21;
+ private static int testField0022 = 22;
+ private static int testField0023 = 23;
+ private static int testField0024 = 24;
+ private static int testField0025 = 25;
+ private static int testField0026 = 26;
+ private static int testField0027 = 27;
+ private static int testField0028 = 28;
+ private static int testField0029 = 29;
+ private static int testField0030 = 30;
+ private static int testField0031 = 31;
+ private static int testField0032 = 32;
+ private static int testField0033 = 33;
+ private static int testField0034 = 34;
+ private static int testField0035 = 35;
+ private static int testField0036 = 36;
+ private static int testField0037 = 37;
+ private static int testField0038 = 38;
+ private static int testField0039 = 39;
+ private static int testField0040 = 40;
+ private static int testField0041 = 41;
+ private static int testField0042 = 42;
+ private static int testField0043 = 43;
+ private static int testField0044 = 44;
+ private static int testField0045 = 45;
+ private static int testField0046 = 46;
+ private static int testField0047 = 47;
+ private static int testField0048 = 48;
+ private static int testField0049 = 49;
+ private static int testField0050 = 50;
+ private static int testField0051 = 51;
+ private static int testField0052 = 52;
+ private static int testField0053 = 53;
+ private static int testField0054 = 54;
+ private static int testField0055 = 55;
+ private static int testField0056 = 56;
+ private static int testField0057 = 57;
+ private static int testField0058 = 58;
+ private static int testField0059 = 59;
+ private static int testField0060 = 60;
+ private static int testField0061 = 61;
+ private static int testField0062 = 62;
+ private static int testField0063 = 63;
+ private static int testField0064 = 64;
+ private static int testField0065 = 65;
+ private static int testField0066 = 66;
+ private static int testField0067 = 67;
+ private static int testField0068 = 68;
+ private static int testField0069 = 69;
+ private static int testField0070 = 70;
+ private static int testField0071 = 71;
+ private static int testField0072 = 72;
+ private static int testField0073 = 73;
+ private static int testField0074 = 74;
+ private static int testField0075 = 75;
+ private static int testField0076 = 76;
+ private static int testField0077 = 77;
+ private static int testField0078 = 78;
+ private static int testField0079 = 79;
+ private static int testField0080 = 80;
+ private static int testField0081 = 81;
+ private static int testField0082 = 82;
+ private static int testField0083 = 83;
+ private static int testField0084 = 84;
+ private static int testField0085 = 85;
+ private static int testField0086 = 86;
+ private static int testField0087 = 87;
+ private static int testField0088 = 88;
+ private static int testField0089 = 89;
+ private static int testField0090 = 90;
+ private static int testField0091 = 91;
+ private static int testField0092 = 92;
+ private static int testField0093 = 93;
+ private static int testField0094 = 94;
+ private static int testField0095 = 95;
+ private static int testField0096 = 96;
+ private static int testField0097 = 97;
+ private static int testField0098 = 98;
+ private static int testField0099 = 99;
+ private static int testField0100 = 100;
+ private static int testField0101 = 101;
+ private static int testField0102 = 102;
+ private static int testField0103 = 103;
+ private static int testField0104 = 104;
+ private static int testField0105 = 105;
+ private static int testField0106 = 106;
+ private static int testField0107 = 107;
+ private static int testField0108 = 108;
+ private static int testField0109 = 109;
+ private static int testField0110 = 110;
+ private static int testField0111 = 111;
+ private static int testField0112 = 112;
+ private static int testField0113 = 113;
+ private static int testField0114 = 114;
+ private static int testField0115 = 115;
+ private static int testField0116 = 116;
+ private static int testField0117 = 117;
+ private static int testField0118 = 118;
+ private static int testField0119 = 119;
+ private static int testField0120 = 120;
+ private static int testField0121 = 121;
+ private static int testField0122 = 122;
+ private static int testField0123 = 123;
+ private static int testField0124 = 124;
+ private static int testField0125 = 125;
+ private static int testField0126 = 126;
+ private static int testField0127 = 127;
+ private static int testField0128 = 128;
+ private static int testField0129 = 129;
+ private static int testField0130 = 130;
+ private static int testField0131 = 131;
+ private static int testField0132 = 132;
+ private static int testField0133 = 133;
+ private static int testField0134 = 134;
+ private static int testField0135 = 135;
+ private static int testField0136 = 136;
+ private static int testField0137 = 137;
+ private static int testField0138 = 138;
+ private static int testField0139 = 139;
+ private static int testField0140 = 140;
+ private static int testField0141 = 141;
+ private static int testField0142 = 142;
+ private static int testField0143 = 143;
+ private static int testField0144 = 144;
+ private static int testField0145 = 145;
+ private static int testField0146 = 146;
+ private static int testField0147 = 147;
+ private static int testField0148 = 148;
+ private static int testField0149 = 149;
+ private static int testField0150 = 150;
+ private static int testField0151 = 151;
+ private static int testField0152 = 152;
+ private static int testField0153 = 153;
+ private static int testField0154 = 154;
+ private static int testField0155 = 155;
+ private static int testField0156 = 156;
+ private static int testField0157 = 157;
+ private static int testField0158 = 158;
+ private static int testField0159 = 159;
+ private static int testField0160 = 160;
+ private static int testField0161 = 161;
+ private static int testField0162 = 162;
+ private static int testField0163 = 163;
+ private static int testField0164 = 164;
+ private static int testField0165 = 165;
+ private static int testField0166 = 166;
+ private static int testField0167 = 167;
+ private static int testField0168 = 168;
+ private static int testField0169 = 169;
+ private static int testField0170 = 170;
+ private static int testField0171 = 171;
+ private static int testField0172 = 172;
+ private static int testField0173 = 173;
+ private static int testField0174 = 174;
+ private static int testField0175 = 175;
+ private static int testField0176 = 176;
+ private static int testField0177 = 177;
+ private static int testField0178 = 178;
+ private static int testField0179 = 179;
+ private static int testField0180 = 180;
+ private static int testField0181 = 181;
+ private static int testField0182 = 182;
+ private static int testField0183 = 183;
+ private static int testField0184 = 184;
+ private static int testField0185 = 185;
+ private static int testField0186 = 186;
+ private static int testField0187 = 187;
+ private static int testField0188 = 188;
+ private static int testField0189 = 189;
+ private static int testField0190 = 190;
+ private static int testField0191 = 191;
+ private static int testField0192 = 192;
+ private static int testField0193 = 193;
+ private static int testField0194 = 194;
+ private static int testField0195 = 195;
+ private static int testField0196 = 196;
+ private static int testField0197 = 197;
+ private static int testField0198 = 198;
+ private static int testField0199 = 199;
+ private static int testField0200 = 200;
+ private static int testField0201 = 201;
+ private static int testField0202 = 202;
+ private static int testField0203 = 203;
+ private static int testField0204 = 204;
+ private static int testField0205 = 205;
+ private static int testField0206 = 206;
+ private static int testField0207 = 207;
+ private static int testField0208 = 208;
+ private static int testField0209 = 209;
+ private static int testField0210 = 210;
+ private static int testField0211 = 211;
+ private static int testField0212 = 212;
+ private static int testField0213 = 213;
+ private static int testField0214 = 214;
+ private static int testField0215 = 215;
+ private static int testField0216 = 216;
+ private static int testField0217 = 217;
+ private static int testField0218 = 218;
+ private static int testField0219 = 219;
+ private static int testField0220 = 220;
+ private static int testField0221 = 221;
+ private static int testField0222 = 222;
+ private static int testField0223 = 223;
+ private static int testField0224 = 224;
+ private static int testField0225 = 225;
+ private static int testField0226 = 226;
+ private static int testField0227 = 227;
+ private static int testField0228 = 228;
+ private static int testField0229 = 229;
+ private static int testField0230 = 230;
+ private static int testField0231 = 231;
+ private static int testField0232 = 232;
+ private static int testField0233 = 233;
+ private static int testField0234 = 234;
+ private static int testField0235 = 235;
+ private static int testField0236 = 236;
+ private static int testField0237 = 237;
+ private static int testField0238 = 238;
+ private static int testField0239 = 239;
+ private static int testField0240 = 240;
+ private static int testField0241 = 241;
+ private static int testField0242 = 242;
+ private static int testField0243 = 243;
+ private static int testField0244 = 244;
+ private static int testField0245 = 245;
+ private static int testField0246 = 246;
+ private static int testField0247 = 247;
+ private static int testField0248 = 248;
+ private static int testField0249 = 249;
+ private static int testField0250 = 250;
+ private static int testField0251 = 251;
+ private static int testField0252 = 252;
+ private static int testField0253 = 253;
+ private static int testField0254 = 254;
+ private static int testField0255 = 255;
+ private static int testField0256 = 256;
+ private static int testField0257 = 257;
+ private static int testField0258 = 258;
+ private static int testField0259 = 259;
+ private static int testField0260 = 260;
+ private static int testField0261 = 261;
+ private static int testField0262 = 262;
+ private static int testField0263 = 263;
+ private static int testField0264 = 264;
+ private static int testField0265 = 265;
+ private static int testField0266 = 266;
+ private static int testField0267 = 267;
+ private static int testField0268 = 268;
+ private static int testField0269 = 269;
+ private static int testField0270 = 270;
+ private static int testField0271 = 271;
+ private static int testField0272 = 272;
+ private static int testField0273 = 273;
+ private static int testField0274 = 274;
+ private static int testField0275 = 275;
+ private static int testField0276 = 276;
+ private static int testField0277 = 277;
+ private static int testField0278 = 278;
+ private static int testField0279 = 279;
+ private static int testField0280 = 280;
+ private static int testField0281 = 281;
+ private static int testField0282 = 282;
+ private static int testField0283 = 283;
+ private static int testField0284 = 284;
+ private static int testField0285 = 285;
+ private static int testField0286 = 286;
+ private static int testField0287 = 287;
+ private static int testField0288 = 288;
+ private static int testField0289 = 289;
+ private static int testField0290 = 290;
+ private static int testField0291 = 291;
+ private static int testField0292 = 292;
+ private static int testField0293 = 293;
+ private static int testField0294 = 294;
+ private static int testField0295 = 295;
+ private static int testField0296 = 296;
+ private static int testField0297 = 297;
+ private static int testField0298 = 298;
+ private static int testField0299 = 299;
+ private static int testField0300 = 300;
+ private static int testField0301 = 301;
+ private static int testField0302 = 302;
+ private static int testField0303 = 303;
+ private static int testField0304 = 304;
+ private static int testField0305 = 305;
+ private static int testField0306 = 306;
+ private static int testField0307 = 307;
+ private static int testField0308 = 308;
+ private static int testField0309 = 309;
+ private static int testField0310 = 310;
+ private static int testField0311 = 311;
+ private static int testField0312 = 312;
+ private static int testField0313 = 313;
+ private static int testField0314 = 314;
+ private static int testField0315 = 315;
+ private static int testField0316 = 316;
+ private static int testField0317 = 317;
+ private static int testField0318 = 318;
+ private static int testField0319 = 319;
+ private static int testField0320 = 320;
+ private static int testField0321 = 321;
+ private static int testField0322 = 322;
+ private static int testField0323 = 323;
+ private static int testField0324 = 324;
+ private static int testField0325 = 325;
+ private static int testField0326 = 326;
+ private static int testField0327 = 327;
+ private static int testField0328 = 328;
+ private static int testField0329 = 329;
+ private static int testField0330 = 330;
+ private static int testField0331 = 331;
+ private static int testField0332 = 332;
+ private static int testField0333 = 333;
+ private static int testField0334 = 334;
+ private static int testField0335 = 335;
+ private static int testField0336 = 336;
+ private static int testField0337 = 337;
+ private static int testField0338 = 338;
+ private static int testField0339 = 339;
+ private static int testField0340 = 340;
+ private static int testField0341 = 341;
+ private static int testField0342 = 342;
+ private static int testField0343 = 343;
+ private static int testField0344 = 344;
+ private static int testField0345 = 345;
+ private static int testField0346 = 346;
+ private static int testField0347 = 347;
+ private static int testField0348 = 348;
+ private static int testField0349 = 349;
+ private static int testField0350 = 350;
+ private static int testField0351 = 351;
+ private static int testField0352 = 352;
+ private static int testField0353 = 353;
+ private static int testField0354 = 354;
+ private static int testField0355 = 355;
+ private static int testField0356 = 356;
+ private static int testField0357 = 357;
+ private static int testField0358 = 358;
+ private static int testField0359 = 359;
+ private static int testField0360 = 360;
+ private static int testField0361 = 361;
+ private static int testField0362 = 362;
+ private static int testField0363 = 363;
+ private static int testField0364 = 364;
+ private static int testField0365 = 365;
+ private static int testField0366 = 366;
+ private static int testField0367 = 367;
+ private static int testField0368 = 368;
+ private static int testField0369 = 369;
+ private static int testField0370 = 370;
+ private static int testField0371 = 371;
+ private static int testField0372 = 372;
+ private static int testField0373 = 373;
+ private static int testField0374 = 374;
+ private static int testField0375 = 375;
+ private static int testField0376 = 376;
+ private static int testField0377 = 377;
+ private static int testField0378 = 378;
+ private static int testField0379 = 379;
+ private static int testField0380 = 380;
+ private static int testField0381 = 381;
+ private static int testField0382 = 382;
+ private static int testField0383 = 383;
+ private static int testField0384 = 384;
+ private static int testField0385 = 385;
+ private static int testField0386 = 386;
+ private static int testField0387 = 387;
+ private static int testField0388 = 388;
+ private static int testField0389 = 389;
+ private static int testField0390 = 390;
+ private static int testField0391 = 391;
+ private static int testField0392 = 392;
+ private static int testField0393 = 393;
+ private static int testField0394 = 394;
+ private static int testField0395 = 395;
+ private static int testField0396 = 396;
+ private static int testField0397 = 397;
+ private static int testField0398 = 398;
+ private static int testField0399 = 399;
+ private static int testField0400 = 400;
+ private static int testField0401 = 401;
+ private static int testField0402 = 402;
+ private static int testField0403 = 403;
+ private static int testField0404 = 404;
+ private static int testField0405 = 405;
+ private static int testField0406 = 406;
+ private static int testField0407 = 407;
+ private static int testField0408 = 408;
+ private static int testField0409 = 409;
+ private static int testField0410 = 410;
+ private static int testField0411 = 411;
+ private static int testField0412 = 412;
+ private static int testField0413 = 413;
+ private static int testField0414 = 414;
+ private static int testField0415 = 415;
+ private static int testField0416 = 416;
+ private static int testField0417 = 417;
+ private static int testField0418 = 418;
+ private static int testField0419 = 419;
+ private static int testField0420 = 420;
+ private static int testField0421 = 421;
+ private static int testField0422 = 422;
+ private static int testField0423 = 423;
+ private static int testField0424 = 424;
+ private static int testField0425 = 425;
+ private static int testField0426 = 426;
+ private static int testField0427 = 427;
+ private static int testField0428 = 428;
+ private static int testField0429 = 429;
+ private static int testField0430 = 430;
+ private static int testField0431 = 431;
+ private static int testField0432 = 432;
+ private static int testField0433 = 433;
+ private static int testField0434 = 434;
+ private static int testField0435 = 435;
+ private static int testField0436 = 436;
+ private static int testField0437 = 437;
+ private static int testField0438 = 438;
+ private static int testField0439 = 439;
+ private static int testField0440 = 440;
+ private static int testField0441 = 441;
+ private static int testField0442 = 442;
+ private static int testField0443 = 443;
+ private static int testField0444 = 444;
+ private static int testField0445 = 445;
+ private static int testField0446 = 446;
+ private static int testField0447 = 447;
+ private static int testField0448 = 448;
+ private static int testField0449 = 449;
+ private static int testField0450 = 450;
+ private static int testField0451 = 451;
+ private static int testField0452 = 452;
+ private static int testField0453 = 453;
+ private static int testField0454 = 454;
+ private static int testField0455 = 455;
+ private static int testField0456 = 456;
+ private static int testField0457 = 457;
+ private static int testField0458 = 458;
+ private static int testField0459 = 459;
+ private static int testField0460 = 460;
+ private static int testField0461 = 461;
+ private static int testField0462 = 462;
+ private static int testField0463 = 463;
+ private static int testField0464 = 464;
+ private static int testField0465 = 465;
+ private static int testField0466 = 466;
+ private static int testField0467 = 467;
+ private static int testField0468 = 468;
+ private static int testField0469 = 469;
+ private static int testField0470 = 470;
+ private static int testField0471 = 471;
+ private static int testField0472 = 472;
+ private static int testField0473 = 473;
+ private static int testField0474 = 474;
+ private static int testField0475 = 475;
+ private static int testField0476 = 476;
+ private static int testField0477 = 477;
+ private static int testField0478 = 478;
+ private static int testField0479 = 479;
+ private static int testField0480 = 480;
+ private static int testField0481 = 481;
+ private static int testField0482 = 482;
+ private static int testField0483 = 483;
+ private static int testField0484 = 484;
+ private static int testField0485 = 485;
+ private static int testField0486 = 486;
+ private static int testField0487 = 487;
+ private static int testField0488 = 488;
+ private static int testField0489 = 489;
+ private static int testField0490 = 490;
+ private static int testField0491 = 491;
+ private static int testField0492 = 492;
+ private static int testField0493 = 493;
+ private static int testField0494 = 494;
+ private static int testField0495 = 495;
+ private static int testField0496 = 496;
+ private static int testField0497 = 497;
+ private static int testField0498 = 498;
+ private static int testField0499 = 499;
+ private static int testField0500 = 500;
+ private static int testField0501 = 501;
+ private static int testField0502 = 502;
+ private static int testField0503 = 503;
+ private static int testField0504 = 504;
+ private static int testField0505 = 505;
+ private static int testField0506 = 506;
+ private static int testField0507 = 507;
+ private static int testField0508 = 508;
+ private static int testField0509 = 509;
+ private static int testField0510 = 510;
+ private static int testField0511 = 511;
+ private static int testField0512 = 512;
+ private static int testField0513 = 513;
+ private static int testField0514 = 514;
+ private static int testField0515 = 515;
+ private static int testField0516 = 516;
+ private static int testField0517 = 517;
+ private static int testField0518 = 518;
+ private static int testField0519 = 519;
+ private static int testField0520 = 520;
+ private static int testField0521 = 521;
+ private static int testField0522 = 522;
+ private static int testField0523 = 523;
+ private static int testField0524 = 524;
+ private static int testField0525 = 525;
+ private static int testField0526 = 526;
+ private static int testField0527 = 527;
+ private static int testField0528 = 528;
+ private static int testField0529 = 529;
+ private static int testField0530 = 530;
+ private static int testField0531 = 531;
+ private static int testField0532 = 532;
+ private static int testField0533 = 533;
+ private static int testField0534 = 534;
+ private static int testField0535 = 535;
+ private static int testField0536 = 536;
+ private static int testField0537 = 537;
+ private static int testField0538 = 538;
+ private static int testField0539 = 539;
+ private static int testField0540 = 540;
+ private static int testField0541 = 541;
+ private static int testField0542 = 542;
+ private static int testField0543 = 543;
+ private static int testField0544 = 544;
+ private static int testField0545 = 545;
+ private static int testField0546 = 546;
+ private static int testField0547 = 547;
+ private static int testField0548 = 548;
+ private static int testField0549 = 549;
+ private static int testField0550 = 550;
+ private static int testField0551 = 551;
+ private static int testField0552 = 552;
+ private static int testField0553 = 553;
+ private static int testField0554 = 554;
+ private static int testField0555 = 555;
+ private static int testField0556 = 556;
+ private static int testField0557 = 557;
+ private static int testField0558 = 558;
+ private static int testField0559 = 559;
+ private static int testField0560 = 560;
+ private static int testField0561 = 561;
+ private static int testField0562 = 562;
+ private static int testField0563 = 563;
+ private static int testField0564 = 564;
+ private static int testField0565 = 565;
+ private static int testField0566 = 566;
+ private static int testField0567 = 567;
+ private static int testField0568 = 568;
+ private static int testField0569 = 569;
+ private static int testField0570 = 570;
+ private static int testField0571 = 571;
+ private static int testField0572 = 572;
+ private static int testField0573 = 573;
+ private static int testField0574 = 574;
+ private static int testField0575 = 575;
+ private static int testField0576 = 576;
+ private static int testField0577 = 577;
+ private static int testField0578 = 578;
+ private static int testField0579 = 579;
+ private static int testField0580 = 580;
+ private static int testField0581 = 581;
+ private static int testField0582 = 582;
+ private static int testField0583 = 583;
+ private static int testField0584 = 584;
+ private static int testField0585 = 585;
+ private static int testField0586 = 586;
+ private static int testField0587 = 587;
+ private static int testField0588 = 588;
+ private static int testField0589 = 589;
+ private static int testField0590 = 590;
+ private static int testField0591 = 591;
+ private static int testField0592 = 592;
+ private static int testField0593 = 593;
+ private static int testField0594 = 594;
+ private static int testField0595 = 595;
+ private static int testField0596 = 596;
+ private static int testField0597 = 597;
+ private static int testField0598 = 598;
+ private static int testField0599 = 599;
+ private static int testField0600 = 600;
+ private static int testField0601 = 601;
+ private static int testField0602 = 602;
+ private static int testField0603 = 603;
+ private static int testField0604 = 604;
+ private static int testField0605 = 605;
+ private static int testField0606 = 606;
+ private static int testField0607 = 607;
+ private static int testField0608 = 608;
+ private static int testField0609 = 609;
+ private static int testField0610 = 610;
+ private static int testField0611 = 611;
+ private static int testField0612 = 612;
+ private static int testField0613 = 613;
+ private static int testField0614 = 614;
+ private static int testField0615 = 615;
+ private static int testField0616 = 616;
+ private static int testField0617 = 617;
+ private static int testField0618 = 618;
+ private static int testField0619 = 619;
+ private static int testField0620 = 620;
+ private static int testField0621 = 621;
+ private static int testField0622 = 622;
+ private static int testField0623 = 623;
+ private static int testField0624 = 624;
+ private static int testField0625 = 625;
+ private static int testField0626 = 626;
+ private static int testField0627 = 627;
+ private static int testField0628 = 628;
+ private static int testField0629 = 629;
+ private static int testField0630 = 630;
+ private static int testField0631 = 631;
+ private static int testField0632 = 632;
+ private static int testField0633 = 633;
+ private static int testField0634 = 634;
+ private static int testField0635 = 635;
+ private static int testField0636 = 636;
+ private static int testField0637 = 637;
+ private static int testField0638 = 638;
+ private static int testField0639 = 639;
+ private static int testField0640 = 640;
+ private static int testField0641 = 641;
+ private static int testField0642 = 642;
+ private static int testField0643 = 643;
+ private static int testField0644 = 644;
+ private static int testField0645 = 645;
+ private static int testField0646 = 646;
+ private static int testField0647 = 647;
+ private static int testField0648 = 648;
+ private static int testField0649 = 649;
+ private static int testField0650 = 650;
+ private static int testField0651 = 651;
+ private static int testField0652 = 652;
+ private static int testField0653 = 653;
+ private static int testField0654 = 654;
+ private static int testField0655 = 655;
+ private static int testField0656 = 656;
+ private static int testField0657 = 657;
+ private static int testField0658 = 658;
+ private static int testField0659 = 659;
+ private static int testField0660 = 660;
+ private static int testField0661 = 661;
+ private static int testField0662 = 662;
+ private static int testField0663 = 663;
+ private static int testField0664 = 664;
+ private static int testField0665 = 665;
+ private static int testField0666 = 666;
+ private static int testField0667 = 667;
+ private static int testField0668 = 668;
+ private static int testField0669 = 669;
+ private static int testField0670 = 670;
+ private static int testField0671 = 671;
+ private static int testField0672 = 672;
+ private static int testField0673 = 673;
+ private static int testField0674 = 674;
+ private static int testField0675 = 675;
+ private static int testField0676 = 676;
+ private static int testField0677 = 677;
+ private static int testField0678 = 678;
+ private static int testField0679 = 679;
+ private static int testField0680 = 680;
+ private static int testField0681 = 681;
+ private static int testField0682 = 682;
+ private static int testField0683 = 683;
+ private static int testField0684 = 684;
+ private static int testField0685 = 685;
+ private static int testField0686 = 686;
+ private static int testField0687 = 687;
+ private static int testField0688 = 688;
+ private static int testField0689 = 689;
+ private static int testField0690 = 690;
+ private static int testField0691 = 691;
+ private static int testField0692 = 692;
+ private static int testField0693 = 693;
+ private static int testField0694 = 694;
+ private static int testField0695 = 695;
+ private static int testField0696 = 696;
+ private static int testField0697 = 697;
+ private static int testField0698 = 698;
+ private static int testField0699 = 699;
+ private static int testField0700 = 700;
+ private static int testField0701 = 701;
+ private static int testField0702 = 702;
+ private static int testField0703 = 703;
+ private static int testField0704 = 704;
+ private static int testField0705 = 705;
+ private static int testField0706 = 706;
+ private static int testField0707 = 707;
+ private static int testField0708 = 708;
+ private static int testField0709 = 709;
+ private static int testField0710 = 710;
+ private static int testField0711 = 711;
+ private static int testField0712 = 712;
+ private static int testField0713 = 713;
+ private static int testField0714 = 714;
+ private static int testField0715 = 715;
+ private static int testField0716 = 716;
+ private static int testField0717 = 717;
+ private static int testField0718 = 718;
+ private static int testField0719 = 719;
+ private static int testField0720 = 720;
+ private static int testField0721 = 721;
+ private static int testField0722 = 722;
+ private static int testField0723 = 723;
+ private static int testField0724 = 724;
+ private static int testField0725 = 725;
+ private static int testField0726 = 726;
+ private static int testField0727 = 727;
+ private static int testField0728 = 728;
+ private static int testField0729 = 729;
+ private static int testField0730 = 730;
+ private static int testField0731 = 731;
+ private static int testField0732 = 732;
+ private static int testField0733 = 733;
+ private static int testField0734 = 734;
+ private static int testField0735 = 735;
+ private static int testField0736 = 736;
+ private static int testField0737 = 737;
+ private static int testField0738 = 738;
+ private static int testField0739 = 739;
+ private static int testField0740 = 740;
+ private static int testField0741 = 741;
+ private static int testField0742 = 742;
+ private static int testField0743 = 743;
+ private static int testField0744 = 744;
+ private static int testField0745 = 745;
+ private static int testField0746 = 746;
+ private static int testField0747 = 747;
+ private static int testField0748 = 748;
+ private static int testField0749 = 749;
+ private static int testField0750 = 750;
+ private static int testField0751 = 751;
+ private static int testField0752 = 752;
+ private static int testField0753 = 753;
+ private static int testField0754 = 754;
+ private static int testField0755 = 755;
+ private static int testField0756 = 756;
+ private static int testField0757 = 757;
+ private static int testField0758 = 758;
+ private static int testField0759 = 759;
+ private static int testField0760 = 760;
+ private static int testField0761 = 761;
+ private static int testField0762 = 762;
+ private static int testField0763 = 763;
+ private static int testField0764 = 764;
+ private static int testField0765 = 765;
+ private static int testField0766 = 766;
+ private static int testField0767 = 767;
+ private static int testField0768 = 768;
+ private static int testField0769 = 769;
+ private static int testField0770 = 770;
+ private static int testField0771 = 771;
+ private static int testField0772 = 772;
+ private static int testField0773 = 773;
+ private static int testField0774 = 774;
+ private static int testField0775 = 775;
+ private static int testField0776 = 776;
+ private static int testField0777 = 777;
+ private static int testField0778 = 778;
+ private static int testField0779 = 779;
+ private static int testField0780 = 780;
+ private static int testField0781 = 781;
+ private static int testField0782 = 782;
+ private static int testField0783 = 783;
+ private static int testField0784 = 784;
+ private static int testField0785 = 785;
+ private static int testField0786 = 786;
+ private static int testField0787 = 787;
+ private static int testField0788 = 788;
+ private static int testField0789 = 789;
+ private static int testField0790 = 790;
+ private static int testField0791 = 791;
+ private static int testField0792 = 792;
+ private static int testField0793 = 793;
+ private static int testField0794 = 794;
+ private static int testField0795 = 795;
+ private static int testField0796 = 796;
+ private static int testField0797 = 797;
+ private static int testField0798 = 798;
+ private static int testField0799 = 799;
+ private static int testField0800 = 800;
+ private static int testField0801 = 801;
+ private static int testField0802 = 802;
+ private static int testField0803 = 803;
+ private static int testField0804 = 804;
+ private static int testField0805 = 805;
+ private static int testField0806 = 806;
+ private static int testField0807 = 807;
+ private static int testField0808 = 808;
+ private static int testField0809 = 809;
+ private static int testField0810 = 810;
+ private static int testField0811 = 811;
+ private static int testField0812 = 812;
+ private static int testField0813 = 813;
+ private static int testField0814 = 814;
+ private static int testField0815 = 815;
+ private static int testField0816 = 816;
+ private static int testField0817 = 817;
+ private static int testField0818 = 818;
+ private static int testField0819 = 819;
+ private static int testField0820 = 820;
+ private static int testField0821 = 821;
+ private static int testField0822 = 822;
+ private static int testField0823 = 823;
+ private static int testField0824 = 824;
+ private static int testField0825 = 825;
+ private static int testField0826 = 826;
+ private static int testField0827 = 827;
+ private static int testField0828 = 828;
+ private static int testField0829 = 829;
+ private static int testField0830 = 830;
+ private static int testField0831 = 831;
+ private static int testField0832 = 832;
+ private static int testField0833 = 833;
+ private static int testField0834 = 834;
+ private static int testField0835 = 835;
+ private static int testField0836 = 836;
+ private static int testField0837 = 837;
+ private static int testField0838 = 838;
+ private static int testField0839 = 839;
+ private static int testField0840 = 840;
+ private static int testField0841 = 841;
+ private static int testField0842 = 842;
+ private static int testField0843 = 843;
+ private static int testField0844 = 844;
+ private static int testField0845 = 845;
+ private static int testField0846 = 846;
+ private static int testField0847 = 847;
+ private static int testField0848 = 848;
+ private static int testField0849 = 849;
+ private static int testField0850 = 850;
+ private static int testField0851 = 851;
+ private static int testField0852 = 852;
+ private static int testField0853 = 853;
+ private static int testField0854 = 854;
+ private static int testField0855 = 855;
+ private static int testField0856 = 856;
+ private static int testField0857 = 857;
+ private static int testField0858 = 858;
+ private static int testField0859 = 859;
+ private static int testField0860 = 860;
+ private static int testField0861 = 861;
+ private static int testField0862 = 862;
+ private static int testField0863 = 863;
+ private static int testField0864 = 864;
+ private static int testField0865 = 865;
+ private static int testField0866 = 866;
+ private static int testField0867 = 867;
+ private static int testField0868 = 868;
+ private static int testField0869 = 869;
+ private static int testField0870 = 870;
+ private static int testField0871 = 871;
+ private static int testField0872 = 872;
+ private static int testField0873 = 873;
+ private static int testField0874 = 874;
+ private static int testField0875 = 875;
+ private static int testField0876 = 876;
+ private static int testField0877 = 877;
+ private static int testField0878 = 878;
+ private static int testField0879 = 879;
+ private static int testField0880 = 880;
+ private static int testField0881 = 881;
+ private static int testField0882 = 882;
+ private static int testField0883 = 883;
+ private static int testField0884 = 884;
+ private static int testField0885 = 885;
+ private static int testField0886 = 886;
+ private static int testField0887 = 887;
+ private static int testField0888 = 888;
+ private static int testField0889 = 889;
+ private static int testField0890 = 890;
+ private static int testField0891 = 891;
+ private static int testField0892 = 892;
+ private static int testField0893 = 893;
+ private static int testField0894 = 894;
+ private static int testField0895 = 895;
+ private static int testField0896 = 896;
+ private static int testField0897 = 897;
+ private static int testField0898 = 898;
+ private static int testField0899 = 899;
+ private static int testField0900 = 900;
+ private static int testField0901 = 901;
+ private static int testField0902 = 902;
+ private static int testField0903 = 903;
+ private static int testField0904 = 904;
+ private static int testField0905 = 905;
+ private static int testField0906 = 906;
+ private static int testField0907 = 907;
+ private static int testField0908 = 908;
+ private static int testField0909 = 909;
+ private static int testField0910 = 910;
+ private static int testField0911 = 911;
+ private static int testField0912 = 912;
+ private static int testField0913 = 913;
+ private static int testField0914 = 914;
+ private static int testField0915 = 915;
+ private static int testField0916 = 916;
+ private static int testField0917 = 917;
+ private static int testField0918 = 918;
+ private static int testField0919 = 919;
+ private static int testField0920 = 920;
+ private static int testField0921 = 921;
+ private static int testField0922 = 922;
+ private static int testField0923 = 923;
+ private static int testField0924 = 924;
+ private static int testField0925 = 925;
+ private static int testField0926 = 926;
+ private static int testField0927 = 927;
+ private static int testField0928 = 928;
+ private static int testField0929 = 929;
+ private static int testField0930 = 930;
+ private static int testField0931 = 931;
+ private static int testField0932 = 932;
+ private static int testField0933 = 933;
+ private static int testField0934 = 934;
+ private static int testField0935 = 935;
+ private static int testField0936 = 936;
+ private static int testField0937 = 937;
+ private static int testField0938 = 938;
+ private static int testField0939 = 939;
+ private static int testField0940 = 940;
+ private static int testField0941 = 941;
+ private static int testField0942 = 942;
+ private static int testField0943 = 943;
+ private static int testField0944 = 944;
+ private static int testField0945 = 945;
+ private static int testField0946 = 946;
+ private static int testField0947 = 947;
+ private static int testField0948 = 948;
+ private static int testField0949 = 949;
+ private static int testField0950 = 950;
+ private static int testField0951 = 951;
+ private static int testField0952 = 952;
+ private static int testField0953 = 953;
+ private static int testField0954 = 954;
+ private static int testField0955 = 955;
+ private static int testField0956 = 956;
+ private static int testField0957 = 957;
+ private static int testField0958 = 958;
+ private static int testField0959 = 959;
+ private static int testField0960 = 960;
+ private static int testField0961 = 961;
+ private static int testField0962 = 962;
+ private static int testField0963 = 963;
+ private static int testField0964 = 964;
+ private static int testField0965 = 965;
+ private static int testField0966 = 966;
+ private static int testField0967 = 967;
+ private static int testField0968 = 968;
+ private static int testField0969 = 969;
+ private static int testField0970 = 970;
+ private static int testField0971 = 971;
+ private static int testField0972 = 972;
+ private static int testField0973 = 973;
+ private static int testField0974 = 974;
+ private static int testField0975 = 975;
+ private static int testField0976 = 976;
+ private static int testField0977 = 977;
+ private static int testField0978 = 978;
+ private static int testField0979 = 979;
+ private static int testField0980 = 980;
+ private static int testField0981 = 981;
+ private static int testField0982 = 982;
+ private static int testField0983 = 983;
+ private static int testField0984 = 984;
+ private static int testField0985 = 985;
+ private static int testField0986 = 986;
+ private static int testField0987 = 987;
+ private static int testField0988 = 988;
+ private static int testField0989 = 989;
+ private static int testField0990 = 990;
+ private static int testField0991 = 991;
+ private static int testField0992 = 992;
+ private static int testField0993 = 993;
+ private static int testField0994 = 994;
+ private static int testField0995 = 995;
+ private static int testField0996 = 996;
+ private static int testField0997 = 997;
+ private static int testField0998 = 998;
+ private static int testField0999 = 999;
+ private static int testField1000 = 1000;
+ private static int testField1001 = 1001;
+ private static int testField1002 = 1002;
+ private static int testField1003 = 1003;
+ private static int testField1004 = 1004;
+ private static int testField1005 = 1005;
+ private static int testField1006 = 1006;
+ private static int testField1007 = 1007;
+ private static int testField1008 = 1008;
+ private static int testField1009 = 1009;
+ private static int testField1010 = 1010;
+ private static int testField1011 = 1011;
+ private static int testField1012 = 1012;
+ private static int testField1013 = 1013;
+ private static int testField1014 = 1014;
+ private static int testField1015 = 1015;
+ private static int testField1016 = 1016;
+ private static int testField1017 = 1017;
+ private static int testField1018 = 1018;
+ private static int testField1019 = 1019;
+ private static int testField1020 = 1020;
+ private static int testField1021 = 1021;
+ private static int testField1022 = 1022;
+ private static int testField1023 = 1023;
+}
diff --git a/test/901-hello-ti-agent/basics.cc b/test/901-hello-ti-agent/basics.cc
index 9166277..cbd7686 100644
--- a/test/901-hello-ti-agent/basics.cc
+++ b/test/901-hello-ti-agent/basics.cc
@@ -16,6 +16,8 @@
#include "901-hello-ti-agent/basics.h"
+#include <thread>
+
#include <jni.h>
#include <stdio.h>
#include <string.h>
@@ -159,5 +161,19 @@
return (current_phase == JVMTI_PHASE_LIVE) ? JNI_TRUE : JNI_FALSE;
}
+static void CallJvmtiFunction(jvmtiEnv* env, jclass klass, jvmtiError* err) {
+ jint n;
+ jmethodID* methods = nullptr;
+ *err = env->GetClassMethods(klass, &n, &methods);
+}
+
+extern "C" JNIEXPORT jboolean JNICALL Java_Main_checkUnattached(
+ JNIEnv* env ATTRIBUTE_UNUSED, jclass Main_klass) {
+ jvmtiError res = JVMTI_ERROR_NONE;
+ std::thread t1(CallJvmtiFunction, jvmti_env, Main_klass, &res);
+ t1.join();
+ return res == JVMTI_ERROR_UNATTACHED_THREAD;
+}
+
} // namespace Test901HelloTi
} // namespace art
diff --git a/test/901-hello-ti-agent/expected.txt b/test/901-hello-ti-agent/expected.txt
index c4b24cb..eb5b6a2 100644
--- a/test/901-hello-ti-agent/expected.txt
+++ b/test/901-hello-ti-agent/expected.txt
@@ -3,6 +3,7 @@
VMInit
Hello, world!
Agent in live phase.
+Received expected error for unattached JVMTI calls
0
1
2
diff --git a/test/901-hello-ti-agent/src/Main.java b/test/901-hello-ti-agent/src/Main.java
index 4d62ed3..556e05b 100644
--- a/test/901-hello-ti-agent/src/Main.java
+++ b/test/901-hello-ti-agent/src/Main.java
@@ -21,6 +21,9 @@
if (checkLivePhase()) {
System.out.println("Agent in live phase.");
}
+ if (checkUnattached()) {
+ System.out.println("Received expected error for unattached JVMTI calls");
+ }
set(0); // OTHER
set(1); // GC
@@ -41,4 +44,5 @@
private static native boolean checkLivePhase();
private static native void setVerboseFlag(int flag, boolean value);
+ private static native boolean checkUnattached();
}
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 703b911..cc015b0 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -138,6 +138,7 @@
# specific version depending on the compiler.
ART_TEST_HOST_RUN_TEST_DEPENDENCIES := \
$(ART_HOST_EXECUTABLES) \
+ $(HOST_OUT_EXECUTABLES)/hprof-conv \
$(OUT_DIR)/$(ART_TEST_LIST_host_$(ART_HOST_ARCH)_libtiagent) \
$(OUT_DIR)/$(ART_TEST_LIST_host_$(ART_HOST_ARCH)_libtiagentd) \
$(OUT_DIR)/$(ART_TEST_LIST_host_$(ART_HOST_ARCH)_libartagent) \
@@ -177,8 +178,6 @@
# Required for dx, jasmin, smali, dexmerger, jack.
host_prereq_rules += $(TEST_ART_RUN_TEST_DEPENDENCIES)
-host_prereq_rules += $(HOST_OUT_EXECUTABLES)/hprof-conv
-
# Classpath for Jack compilation for target.
target_prereq_rules := $(TARGET_JACK_CLASSPATH_DEPENDENCIES)
@@ -225,18 +224,6 @@
test-art-target-run-test-dependencies : $(target_prereq_rules)
test-art-run-test-dependencies : test-art-host-run-test-dependencies test-art-target-run-test-dependencies
-# Generate list of dependencies required for given target - HOST or TARGET, IMAGE_TYPE,
-# COMPILER_TYPE and ADDRESS_SIZE.
-$(foreach target, $(TARGET_TYPES), \
- $(foreach image, $(IMAGE_TYPES), \
- $(foreach compiler, $(COMPILER_TYPES), \
- $(foreach address_size, $(ALL_ADDRESS_SIZES), $(eval \
- $(call core-image-dependencies,$(target),$(image),$(compiler),$(address_size)))))))
-
-test-art-host-run-test-dependencies : $(host_prereq_rules)
-test-art-target-run-test-dependencies : $(target_prereq_rules)
-test-art-run-test-dependencies : test-art-host-run-test-dependencies test-art-target-run-test-dependencies
-
# Create a rule to build and run a test group of the following form:
# test-art-{1: host target}-run-test
define define-test-art-host-or-target-run-test-group
@@ -259,8 +246,6 @@
target_prereq_rules :=
core-image-dependencies :=
name-to-var :=
-ART_TEST_HOST_RUN_TEST_DEPENDENCIES :=
-TEST_ART_TARGET_SYNC_DEPS :=
define-test-art-host-or-target-run-test-group :=
TARGET_TYPES :=
COMPILER_TYPES :=
diff --git a/test/knownfailures.json b/test/knownfailures.json
index 2de34ca..abbdbb1 100644
--- a/test/knownfailures.json
+++ b/test/knownfailures.json
@@ -332,8 +332,11 @@
{
"tests": ["912-classes",
"616-cha",
- "616-cha-abstract"],
- "bug": "http://b/36344364 http://b36344221",
+ "616-cha-abstract",
+ "616-cha-interface",
+ "616-cha-miranda",
+ "616-cha-proxy-method-inline"],
+ "bug": "http://b/36344364 http://b/36344221",
"variant": "no-dex2oat | relocate-npatchoat"
},
{
diff --git a/test/run-test b/test/run-test
index 1715423..a6903ff 100755
--- a/test/run-test
+++ b/test/run-test
@@ -772,7 +772,7 @@
# Set a hard limit to encourage ART developers to increase the ulimit here if
# needed to support a test case rather than resetting the limit in the run
# script for the particular test in question.
-if ! ulimit -f -H 128000; then
+if ! ulimit -f 128000; then
err_echo "ulimit file size setting failed"
fi
diff --git a/test/testrunner/run_build_test_target.py b/test/testrunner/run_build_test_target.py
index 835b678..e105da3 100755
--- a/test/testrunner/run_build_test_target.py
+++ b/test/testrunner/run_build_test_target.py
@@ -51,6 +51,8 @@
build_command += ' -j' + str(n_threads)
build_command += ' -C ' + env.ANDROID_BUILD_TOP
build_command += ' ' + target.get('target')
+ # Add 'dist' to avoid Jack issues b/36169180.
+ build_command += ' dist'
print build_command.split()
if subprocess.call(build_command.split()):
sys.exit(1)
diff --git a/test/testrunner/testrunner.py b/test/testrunner/testrunner.py
index 8a4bff1..13f341c 100755
--- a/test/testrunner/testrunner.py
+++ b/test/testrunner/testrunner.py
@@ -269,6 +269,16 @@
global semaphore
semaphore = threading.Semaphore(n_thread)
+ if not sys.stdout.isatty():
+ global COLOR_ERROR
+ global COLOR_PASS
+ global COLOR_SKIP
+ global COLOR_NORMAL
+ COLOR_ERROR = ''
+ COLOR_PASS = ''
+ COLOR_SKIP = ''
+ COLOR_NORMAL = ''
+
def run_tests(tests):
"""Creates thread workers to run the tests.
@@ -915,6 +925,7 @@
if options['gdb_arg']:
gdb_arg = options['gdb_arg']
timeout = options['timeout']
+
return test
def main():
diff --git a/tools/libcore_failures.txt b/tools/libcore_failures.txt
index e0aae46..55b2c59 100644
--- a/tools/libcore_failures.txt
+++ b/tools/libcore_failures.txt
@@ -141,13 +141,6 @@
names: ["org.apache.harmony.tests.java.lang.ClassTest#test_forNameLjava_lang_String"]
},
{
- description: "TimeZoneTest.testAllDisplayNames times out, needs investigation",
- result: EXEC_TIMEOUT,
- modes: [device],
- names: ["libcore.java.util.TimeZoneTest#testAllDisplayNames"],
- bug: 22786792
-},
-{
description: "Lack of IPv6 on some buildbot slaves",
result: EXEC_FAILED,
names: ["libcore.io.OsTest#test_byteBufferPositions_sendto_recvfrom_af_inet6",